= (checkpoint * 1e6)):\n saved_key_checkpoints[j] = True\n save_model(str(checkpoint) + 'M')\n\n save_model()\n\n env.close()\n return mean_rewards\n"}}},{"rowIdx":6,"cells":{"python_code":{"kind":"string","value":"import tensorflow as tf\nfrom mpi4py import MPI\nfrom coinrun.config import Config\nimport numpy as np\n\ndef clean_tb_dir():\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n if rank == 0:\n if tf.gfile.Exists(Config.TB_DIR):\n tf.gfile.DeleteRecursively(Config.TB_DIR) \n tf.gfile.MakeDirs(Config.TB_DIR)\n\n comm.Barrier()\n\nclass TB_Writer(object):\n def __init__(self, sess):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n clean_tb_dir()\n\n tb_writer = tf.summary.FileWriter(Config.TB_DIR + '/' + Config.RUN_ID + '_' + str(rank), sess.graph)\n total_steps = [0]\n\n should_log = (rank == 0 or Config.LOG_ALL_MPI)\n\n if should_log:\n hyperparams = np.array(Config.get_arg_text())\n hyperparams_tensor = tf.constant(hyperparams)\n\n summary_op = tf.summary.text(\"hyperparameters info\", hyperparams_tensor)\n summary = sess.run(summary_op)\n\n tb_writer.add_summary(summary)\n\n def add_summary(_merged, interval=1):\n if should_log:\n total_steps[0] += 1\n\n if total_steps[0] % interval == 0:\n tb_writer.add_summary(_merged, total_steps[0])\n tb_writer.flush()\n\n tuples = []\n\n def make_scalar_graph(name):\n scalar_ph = tf.placeholder(name='scalar_' + name, dtype=tf.float32)\n scalar_summary = tf.summary.scalar(name, scalar_ph)\n merged = tf.summary.merge([scalar_summary])\n tuples.append((scalar_ph, merged))\n\n name_dict = {}\n curr_name_idx = [0]\n\n def log_scalar(x, name, step=-1):\n if not name in name_dict:\n name_dict[name] = curr_name_idx[0]\n tf_name = (name + '_' + Config.RUN_ID) if curr_name_idx[0] == 0 else name\n make_scalar_graph(tf_name)\n curr_name_idx[0] += 1\n\n idx = name_dict[name]\n\n scalar_ph, merged = tuples[idx]\n \n if should_log:\n if step == -1:\n step = total_steps[0]\n total_steps[0] += 1\n\n _merged = sess.run(merged, {scalar_ph: x})\n\n tb_writer.add_summary(_merged, step)\n tb_writer.flush()\n \n self.add_summary = add_summary\n self.log_scalar = log_scalar\n"}}},{"rowIdx":7,"cells":{"python_code":{"kind":"string","value":"from .coinrunenv import init_args_and_threads\nfrom .coinrunenv import make\n\n__all__ = [\n 'init_args_and_threads',\n 'make'\n ]\n"}}},{"rowIdx":8,"cells":{"python_code":{"kind":"string","value":"import gym\nimport numpy as np\n\nclass EpsilonGreedyWrapper(gym.Wrapper):\n \"\"\"\n Wrapper to perform a random action each step instead of the requested action, \n with the provided probability.\n \"\"\"\n def __init__(self, env, prob=0.05):\n gym.Wrapper.__init__(self, env)\n self.prob = prob\n self.num_envs = env.num_envs\n\n def reset(self):\n return self.env.reset()\n\n def step(self, action):\n if np.random.uniform() 0\n\n if use_aux:\n for i, info in enumerate(infos):\n self.aux_rewards[i,:] += info['aux_rew']\n self.long_aux_rewards[i,:] += info['aux_rew']\n\n for i, d in enumerate(done):\n if d:\n epinfo = {'r': round(self.rewards[i], 6), 'l': self.lengths[i], 't': 0}\n aux_dict = {}\n\n for nr in range(self.num_aux_rews):\n aux_dict['aux_' + str(nr)] = self.aux_rewards[i,nr]\n\n if 'ale.lives' in infos[i]:\n game_over_rew = np.nan\n\n is_game_over = infos[i]['ale.lives'] == 0\n\n if is_game_over:\n game_over_rew = self.long_aux_rewards[i,0]\n self.long_aux_rewards[i,:] = 0\n\n aux_dict['game_over_rew'] = game_over_rew\n\n epinfo['aux_dict'] = aux_dict\n\n infos[i]['episode'] = epinfo\n\n self.rewards[i] = 0\n self.lengths[i] = 0\n self.aux_rewards[i,:] = 0\n\n return obs, rew, done, infos\n\n self.reset = reset\n self.step = step\n\ndef add_final_wrappers(env):\n env = EpisodeRewardWrapper(env)\n\n return env"}}},{"rowIdx":9,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nRun a CoinRun environment in a window where you can interact with it using the keyboard\n\"\"\"\n\nfrom coinrun.coinrunenv import lib\nfrom coinrun import setup_utils\n\n\ndef main():\n setup_utils.setup_and_load(paint_vel_info=0)\n print(\"\"\"Control with arrow keys,\nF1, F2 -- switch resolution,\nF5, F6, F7, F8 -- zoom,\nF9 -- switch reconstruction target picture,\nF10 -- switch lasers\n \"\"\")\n lib.test_main_loop()\n\n\nif __name__ == '__main__':\n main()"}}},{"rowIdx":10,"cells":{"python_code":{"kind":"string","value":"import tensorflow as tf\nimport os\nimport joblib\nimport numpy as np\n\nfrom mpi4py import MPI\n\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom coinrun.config import Config\nfrom coinrun import setup_utils, wrappers\n\nimport platform\n\ndef make_general_env(num_env, seed=0, use_sub_proc=True):\n from coinrun import coinrunenv\n \n env = coinrunenv.make(Config.GAME_TYPE, num_env)\n\n if Config.FRAME_STACK > 1:\n env = VecFrameStack(env, Config.FRAME_STACK)\n\n epsilon = Config.EPSILON_GREEDY\n\n if epsilon > 0:\n env = wrappers.EpsilonGreedyWrapper(env, epsilon)\n\n return env\n\ndef file_to_path(filename):\n return setup_utils.file_to_path(filename)\n\ndef load_all_params(sess):\n load_params_for_scope(sess, 'model')\n\ndef load_params_for_scope(sess, scope, load_key='default'):\n load_data = Config.get_load_data(load_key)\n if load_data is None:\n return False\n\n params_dict = load_data['params']\n\n if scope in params_dict:\n print('Loading saved file for scope', scope)\n\n loaded_params = params_dict[scope]\n\n loaded_params, params = get_savable_params(loaded_params, scope, keep_heads=True)\n\n restore_params(sess, loaded_params, params)\n \n return True\n\ndef get_savable_params(loaded_params, scope, keep_heads=False):\n params = tf.trainable_variables(scope)\n filtered_params = []\n filtered_loaded = []\n\n if len(loaded_params) != len(params):\n print('param mismatch', len(loaded_params), len(params))\n assert(False)\n\n for p, loaded_p in zip(params, loaded_params):\n keep = True\n\n if any((scope + '/' + x) in p.name for x in ['v','pi']):\n keep = keep_heads\n\n if keep:\n filtered_params.append(p)\n filtered_loaded.append(loaded_p)\n else:\n print('drop', p)\n \n\n return filtered_loaded, filtered_params\n\ndef restore_params(sess, loaded_params, params):\n if len(loaded_params) != len(params):\n print('param mismatch', len(loaded_params), len(params))\n assert(False)\n\n restores = []\n for p, loaded_p in zip(params, loaded_params):\n print('restoring', p)\n restores.append(p.assign(loaded_p))\n sess.run(restores)\n\ndef save_params_in_scopes(sess, scopes, filename, base_dict=None):\n data_dict = {}\n\n if base_dict is not None:\n data_dict.update(base_dict)\n\n save_path = file_to_path(filename)\n\n data_dict['args'] = Config.get_args_dict()\n param_dict = {}\n\n for scope in scopes:\n params = tf.trainable_variables(scope)\n\n if len(params) > 0:\n print('saving scope', scope, filename)\n ps = sess.run(params)\n\n param_dict[scope] = ps\n \n data_dict['params'] = param_dict\n joblib.dump(data_dict, save_path)\n\ndef setup_mpi_gpus():\n if 'RCALL_NUM_GPU' not in os.environ:\n return\n num_gpus = int(os.environ['RCALL_NUM_GPU'])\n node_id = platform.node()\n nodes = MPI.COMM_WORLD.allgather(node_id)\n local_rank = len([n for n in nodes[:MPI.COMM_WORLD.Get_rank()] if n == node_id])\n os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)\n\ndef is_mpi_root():\n return MPI.COMM_WORLD.Get_rank() == 0\n\ndef tf_initialize(sess):\n sess.run(tf.initialize_all_variables())\n sync_from_root(sess)\n \ndef sync_from_root(sess, vars=None):\n if vars is None:\n vars = tf.trainable_variables()\n\n if Config.SYNC_FROM_ROOT:\n rank = MPI.COMM_WORLD.Get_rank()\n print('sync from root', rank)\n for var in vars:\n if rank == 0:\n MPI.COMM_WORLD.bcast(sess.run(var))\n else:\n sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None)))\n\ndef mpi_average(values):\n return mpi_average_comm(values, MPI.COMM_WORLD)\n\ndef mpi_average_comm(values, comm):\n size = comm.size\n\n x = np.array(values)\n buf = np.zeros_like(x)\n comm.Allreduce(x, buf, op=MPI.SUM)\n buf = buf / size\n\n return buf\n\ndef mpi_average_train_test(values):\n return mpi_average_comm(values, Config.TRAIN_TEST_COMM)\n \ndef mpi_print(*args):\n rank = MPI.COMM_WORLD.Get_rank()\n\n if rank == 0:\n print(*args)\n\ndef process_ep_buf(epinfobuf, tb_writer=None, suffix='', step=0):\n rewards = [epinfo['r'] for epinfo in epinfobuf]\n rew_mean = np.nanmean(rewards)\n\n if Config.SYNC_FROM_ROOT:\n rew_mean = mpi_average_train_test([rew_mean])[0]\n\n if tb_writer is not None:\n tb_writer.log_scalar(rew_mean, 'rew_mean' + suffix, step)\n\n aux_dicts = []\n\n if len(epinfobuf) > 0 and 'aux_dict' in epinfobuf[0]:\n aux_dicts = [epinfo['aux_dict'] for epinfo in epinfobuf]\n\n if len(aux_dicts) > 0:\n keys = aux_dicts[0].keys()\n\n for key in keys:\n sub_rews = [aux_dict[key] for aux_dict in aux_dicts]\n sub_rew = np.nanmean(sub_rews)\n\n if tb_writer is not None:\n tb_writer.log_scalar(sub_rew, key, step)\n\n return rew_mean\n"}}},{"rowIdx":11,"cells":{"python_code":{"kind":"string","value":"from coinrun.config import Config\n\nimport os\nimport joblib\n\ndef load_for_setup_if_necessary():\n restore_file(Config.RESTORE_ID)\n\ndef restore_file(restore_id, load_key='default'):\n if restore_id is not None:\n load_file = Config.get_load_filename(restore_id=restore_id)\n filepath = file_to_path(load_file)\n load_data = joblib.load(filepath)\n\n Config.set_load_data(load_data, load_key=load_key)\n\n restored_args = load_data['args']\n sub_dict = {}\n res_keys = Config.RES_KEYS\n\n for key in res_keys:\n if key in restored_args:\n sub_dict[key] = restored_args[key]\n else:\n print('warning key %s not restored' % key)\n\n Config.parse_args_dict(sub_dict)\n \n from coinrun.coinrunenv import init_args_and_threads\n init_args_and_threads(4)\n\ndef setup_and_load(use_cmd_line_args=True, **kwargs):\n \"\"\"\n Initialize the global config using command line options, defaulting to the values in `config.py`.\n\n `use_cmd_line_args`: set to False to ignore command line arguments passed to the program\n `**kwargs`: override the defaults from `config.py` with these values\n \"\"\"\n args = Config.initialize_args(use_cmd_line_args=use_cmd_line_args, **kwargs)\n\n load_for_setup_if_necessary()\n\n return args\n\ndef file_to_path(filename):\n return os.path.join(Config.WORKDIR, filename)"}}},{"rowIdx":12,"cells":{"python_code":{"kind":"string","value":"from coinrun import random_agent\n\ndef test_coinrun():\n random_agent.random_agent(num_envs=16, max_steps=100)\n\n\nif __name__ == '__main__':\n test_coinrun()"}}},{"rowIdx":13,"cells":{"python_code":{"kind":"string","value":"import numpy as np\nimport tensorflow as tf\nfrom baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm\nfrom baselines.common.distributions import make_pdtype\nfrom baselines.common.input import observation_input\n\nfrom coinrun.config import Config\n\ndef impala_cnn(images, depths=[16, 32, 32]):\n \"\"\"\n Model used in the paper \"IMPALA: Scalable Distributed Deep-RL with \n Importance Weighted Actor-Learner Architectures\" https://arxiv.org/abs/1802.01561\n \"\"\"\n use_batch_norm = Config.USE_BATCH_NORM == 1\n\n dropout_layer_num = [0]\n dropout_assign_ops = []\n\n def dropout_layer(out):\n if Config.DROPOUT > 0:\n out_shape = out.get_shape().as_list()\n num_features = np.prod(out_shape[1:])\n\n var_name = 'mask_' + str(dropout_layer_num[0])\n batch_seed_shape = out_shape[1:]\n batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False)\n batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1))\n dropout_assign_ops.append(batch_seed_assign)\n\n curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - Config.DROPOUT))\n\n curr_mask = curr_mask * (1.0 / (1.0 - Config.DROPOUT))\n\n out = out * curr_mask\n\n dropout_layer_num[0] += 1\n\n return out\n\n def conv_layer(out, depth):\n out = tf.layers.conv2d(out, depth, 3, padding='same')\n out = dropout_layer(out)\n\n if use_batch_norm:\n out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True)\n\n return out\n\n def residual_block(inputs):\n depth = inputs.get_shape()[-1].value\n \n out = tf.nn.relu(inputs)\n\n out = conv_layer(out, depth)\n out = tf.nn.relu(out)\n out = conv_layer(out, depth)\n return out + inputs\n\n def conv_sequence(inputs, depth):\n out = conv_layer(inputs, depth)\n out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')\n out = residual_block(out)\n out = residual_block(out)\n return out\n\n out = images\n for depth in depths:\n out = conv_sequence(out, depth)\n\n out = tf.layers.flatten(out)\n out = tf.nn.relu(out)\n out = tf.layers.dense(out, 256, activation=tf.nn.relu)\n\n return out, dropout_assign_ops\n\ndef nature_cnn(scaled_images, **conv_kwargs):\n \"\"\"\n Model used in the paper \"Human-level control through deep reinforcement learning\" \n https://www.nature.com/articles/nature14236\n \"\"\"\n\n def activ(curr):\n return tf.nn.relu(curr)\n\n h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),\n **conv_kwargs))\n h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))\n h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))\n h3 = conv_to_fc(h3)\n return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))\n\ndef choose_cnn(images):\n arch = Config.ARCHITECTURE\n scaled_images = tf.cast(images, tf.float32) / 255.\n dropout_assign_ops = []\n\n if arch == 'nature':\n out = nature_cnn(scaled_images)\n elif arch == 'impala':\n out, dropout_assign_ops = impala_cnn(scaled_images)\n elif arch == 'impalalarge':\n out, dropout_assign_ops = impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64])\n else:\n assert(False)\n\n return out, dropout_assign_ops\n\nclass LstmPolicy(object):\n\n def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256):\n nenv = nbatch // nsteps\n self.pdtype = make_pdtype(ac_space)\n X, processed_x = observation_input(ob_space, nbatch)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n h, self.dropout_assign_ops = choose_cnn(processed_x)\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)\n h5 = seq_to_batch(h5)\n vf = fc(h5, 'v', 1)[:,0]\n self.pd, self.pi = self.pdtype.pdfromlatent(h5)\n\n a0 = self.pd.sample()\n neglogp0 = self.pd.neglogp(a0)\n self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)\n\n def step(ob, state, mask):\n return sess.run([a0, vf, snew, neglogp0], {X:ob, S:state, M:mask})\n\n def value(ob, state, mask):\n return sess.run(vf, {X:ob, S:state, M:mask})\n\n self.X = X\n self.M = M\n self.S = S\n self.vf = vf\n self.step = step\n self.value = value\n\nclass CnnPolicy(object):\n def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613\n self.pdtype = make_pdtype(ac_space)\n X, processed_x = observation_input(ob_space, nbatch)\n\n with tf.variable_scope(\"model\", reuse=tf.AUTO_REUSE):\n h, self.dropout_assign_ops = choose_cnn(processed_x)\n vf = fc(h, 'v', 1)[:,0]\n self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)\n\n a0 = self.pd.sample()\n neglogp0 = self.pd.neglogp(a0)\n self.initial_state = None\n\n def step(ob, *_args, **_kwargs):\n a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})\n return a, v, self.initial_state, neglogp\n\n def value(ob, *_args, **_kwargs):\n return sess.run(vf, {X:ob})\n\n self.X = X\n self.vf = vf\n self.step = step\n self.value = value\n\n\ndef get_policy():\n use_lstm = Config.USE_LSTM\n \n if use_lstm == 1:\n policy = LstmPolicy\n elif use_lstm == 0:\n policy = CnnPolicy\n else:\n assert(False)\n\n return policy\n"}}},{"rowIdx":14,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nPython interface to the CoinRun shared library using ctypes.\n\nOn import, this will attempt to build the shared library.\n\"\"\"\n\nimport os\nimport atexit\nimport random\nimport sys\nfrom ctypes import c_int, c_char_p, c_float, c_bool\n\nimport gym\nimport gym.spaces\nimport numpy as np\nimport numpy.ctypeslib as npct\nfrom baselines.common.vec_env import VecEnv\nfrom baselines import logger\n\nfrom coinrun.config import Config\n\nfrom mpi4py import MPI\nfrom baselines.common import mpi_util\n\n# if the environment is crashing, try using the debug build to get\n# a readable stack trace\nDEBUG = False\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\ngame_versions = {\n 'standard': 1000,\n 'platform': 1001,\n 'maze': 1002,\n}\n\ndef build():\n lrank, _lsize = mpi_util.get_local_rank_size(MPI.COMM_WORLD)\n if lrank == 0:\n dirname = os.path.dirname(__file__)\n if len(dirname):\n make_cmd = \"QT_SELECT=5 make -C %s\" % dirname\n else:\n make_cmd = \"QT_SELECT=5 make\"\n\n r = os.system(make_cmd)\n if r != 0:\n logger.error('coinrun: make failed')\n sys.exit(1)\n MPI.COMM_WORLD.barrier()\n\nbuild()\n\nif DEBUG:\n lib_path = '.build-debug/coinrun_cpp_d'\nelse:\n lib_path = '.build-release/coinrun_cpp'\n\nlib = npct.load_library(lib_path, os.path.dirname(__file__))\nlib.init.argtypes = [c_int]\nlib.get_NUM_ACTIONS.restype = c_int\nlib.get_RES_W.restype = c_int\nlib.get_RES_H.restype = c_int\nlib.get_VIDEORES.restype = c_int\n\nlib.vec_create.argtypes = [\n c_int, # game_type\n c_int, # nenvs\n c_int, # lump_n\n c_bool, # want_hires_render\n c_float, # default_zoom\n ]\nlib.vec_create.restype = c_int\n\nlib.vec_close.argtypes = [c_int]\n\nlib.vec_step_async_discrete.argtypes = [c_int, npct.ndpointer(dtype=np.int32, ndim=1)]\n\nlib.initialize_args.argtypes = [npct.ndpointer(dtype=np.int32, ndim=1)]\nlib.initialize_set_monitor_dir.argtypes = [c_char_p, c_int]\n\nlib.vec_wait.argtypes = [\n c_int,\n npct.ndpointer(dtype=np.uint8, ndim=4), # normal rgb\n npct.ndpointer(dtype=np.uint8, ndim=4), # larger rgb for render()\n npct.ndpointer(dtype=np.float32, ndim=1), # rew\n npct.ndpointer(dtype=np.bool, ndim=1), # done\n ]\n\nalready_inited = False\n\ndef init_args_and_threads(cpu_count=4,\n monitor_csv_policy='all',\n rand_seed=None):\n \"\"\"\n Perform one-time global init for the CoinRun library. This must be called\n before creating an instance of CoinRunVecEnv. You should not\n call this multiple times from the same process.\n \"\"\"\n os.environ['COINRUN_RESOURCES_PATH'] = os.path.join(SCRIPT_DIR, 'assets')\n is_high_difficulty = Config.HIGH_DIFFICULTY\n\n if rand_seed is None:\n rand_seed = random.SystemRandom().randint(0, 1000000000)\n\n # ensure different MPI processes get different seeds (just in case SystemRandom implementation is poor)\n mpi_rank, mpi_size = mpi_util.get_local_rank_size(MPI.COMM_WORLD)\n rand_seed = rand_seed - rand_seed % mpi_size + mpi_rank\n\n int_args = np.array([int(is_high_difficulty), Config.NUM_LEVELS, int(Config.PAINT_VEL_INFO), Config.USE_DATA_AUGMENTATION, game_versions[Config.GAME_TYPE], Config.SET_SEED, rand_seed]).astype(np.int32)\n\n lib.initialize_args(int_args)\n lib.initialize_set_monitor_dir(logger.get_dir().encode('utf-8'), {'off': 0, 'first_env': 1, 'all': 2}[monitor_csv_policy])\n\n global already_inited\n if already_inited:\n return\n\n lib.init(cpu_count)\n already_inited = True\n\n@atexit.register\ndef shutdown():\n global already_inited\n if not already_inited:\n return\n lib.coinrun_shutdown()\n\nclass CoinRunVecEnv(VecEnv):\n \"\"\"\n This is the CoinRun VecEnv, all CoinRun environments are just instances\n of this class with different values for `game_type`\n\n `game_type`: int game type corresponding to the game type to create, see `enum GameType` in `coinrun.cpp`\n `num_envs`: number of environments to create in this VecEnv\n `lump_n`: only used when the environment creates `monitor.csv` files\n `default_zoom`: controls how much of the level the agent can see\n \"\"\"\n def __init__(self, game_type, num_envs, lump_n=0, default_zoom=5.0):\n self.metadata = {'render.modes': []}\n self.reward_range = (-float('inf'), float('inf'))\n\n self.NUM_ACTIONS = lib.get_NUM_ACTIONS()\n self.RES_W = lib.get_RES_W()\n self.RES_H = lib.get_RES_H()\n self.VIDEORES = lib.get_VIDEORES()\n\n self.buf_rew = np.zeros([num_envs], dtype=np.float32)\n self.buf_done = np.zeros([num_envs], dtype=np.bool)\n self.buf_rgb = np.zeros([num_envs, self.RES_H, self.RES_W, 3], dtype=np.uint8)\n self.hires_render = Config.IS_HIGH_RES\n if self.hires_render:\n self.buf_render_rgb = np.zeros([num_envs, self.VIDEORES, self.VIDEORES, 3], dtype=np.uint8)\n else:\n self.buf_render_rgb = np.zeros([1, 1, 1, 1], dtype=np.uint8)\n\n num_channels = 1 if Config.USE_BLACK_WHITE else 3\n obs_space = gym.spaces.Box(0, 255, shape=[self.RES_H, self.RES_W, num_channels], dtype=np.uint8)\n\n super().__init__(\n num_envs=num_envs,\n observation_space=obs_space,\n action_space=gym.spaces.Discrete(self.NUM_ACTIONS),\n )\n self.handle = lib.vec_create(\n game_versions[game_type],\n self.num_envs,\n lump_n,\n self.hires_render,\n default_zoom)\n self.dummy_info = [{} for _ in range(num_envs)]\n\n def __del__(self):\n if hasattr(self, 'handle'):\n lib.vec_close(self.handle)\n self.handle = 0\n\n def close(self):\n lib.vec_close(self.handle)\n self.handle = 0\n\n def reset(self):\n print(\"CoinRun ignores resets\")\n obs, _, _, _ = self.step_wait()\n return obs\n\n def get_images(self):\n if self.hires_render:\n return self.buf_render_rgb\n else:\n return self.buf_rgb\n\n def step_async(self, actions):\n assert actions.dtype in [np.int32, np.int64]\n actions = actions.astype(np.int32)\n lib.vec_step_async_discrete(self.handle, actions)\n\n def step_wait(self):\n self.buf_rew = np.zeros_like(self.buf_rew)\n self.buf_done = np.zeros_like(self.buf_done)\n\n lib.vec_wait(\n self.handle,\n self.buf_rgb,\n self.buf_render_rgb,\n self.buf_rew,\n self.buf_done)\n\n obs_frames = self.buf_rgb\n\n if Config.USE_BLACK_WHITE:\n obs_frames = np.mean(obs_frames, axis=-1).astype(np.uint8)[...,None]\n\n return obs_frames, self.buf_rew, self.buf_done, self.dummy_info\n\ndef make(env_id, num_envs, **kwargs):\n assert env_id in game_versions, 'cannot find environment \"%s\", maybe you mean one of %s' % (env_id, list(game_versions.keys()))\n return CoinRunVecEnv(env_id, num_envs, **kwargs)\n"}}},{"rowIdx":15,"cells":{"python_code":{"kind":"string","value":"import json\nimport pickle\nimport math\nimport sys\nimport argparse\nimport warnings\n\nfrom os import makedirs\nfrom os.path import basename, join, exists, dirname, splitext, realpath\n\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom dataset import TSVDataset, CombinedDataset, H5Dataset, ClassificationHandler\nfrom batchifier import (iter_batches_single_threaded,\n requires_vocab,\n requires_character_convolution,\n get_feature_vocabs)\nimport tensorflow as tf\nimport numpy as np\n\ntry:\n RNNCell = tf.nn.rnn_cell.RNNCell\n TFLSTMCell = tf.nn.rnn_cell.LSTMCell\n MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell\n LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple\n from tensorflow.contrib.cudnn_rnn import CudnnLSTM\nexcept AttributeError:\n RNNCell = tf.contrib.rnn.RNNCell\n TFLSTMCell = tf.contrib.rnn.LSTMCell\n MultiRNNCell = tf.contrib.rnn.MultiRNNCell\n LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple\n from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM\n\nfrom tensorflow.python.client import device_lib\n\n\nclass LazyAdamOptimizer(tf.train.AdamOptimizer):\n \"\"\"Variant of the Adam optimizer that handles sparse updates more efficiently.\n\n The original Adam algorithm maintains two moving-average accumulators for\n each trainable variable; the accumulators are updated at every step.\n This class provides lazier handling of gradient updates for sparse variables.\n It only updates moving-average accumulators for sparse variable indices that\n appear in the current batch, rather than updating the accumulators for all\n indices. Compared with the original Adam optimizer, it can provide large\n improvements in model training throughput for some applications. However, it\n provides slightly different semantics than the original Adam algorithm, and\n may lead to different empirical results.\n \"\"\"\n\n def _apply_sparse(self, grad, var):\n beta1_power = tf.cast(self._beta1_power, var.dtype.base_dtype)\n beta2_power = tf.cast(self._beta2_power, var.dtype.base_dtype)\n lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)\n beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)\n beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)\n epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)\n lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power))\n\n # m := beta1 * m + (1 - beta1) * g_t\n # We use a slightly different version of the moving-average update formula\n # that does a better job of handling concurrent lockless updates:\n # m -= (1 - beta1) * (m - g_t)\n m = self.get_slot(var, \"m\")\n m_t_delta = tf.gather(m, grad.indices) - grad.values\n m_t = tf.scatter_sub(m, grad.indices,\n (1 - beta1_t) * m_t_delta,\n use_locking=self._use_locking)\n\n # v := beta2 * v + (1 - beta2) * (g_t * g_t)\n # We reformulate the update as:\n # v -= (1 - beta2) * (v - g_t * g_t)\n v = self.get_slot(var, \"v\")\n v_t_delta = tf.gather(v, grad.indices) - tf.square(grad.values)\n v_t = tf.scatter_sub(v, grad.indices,\n (1 - beta2_t) * v_t_delta,\n use_locking=self._use_locking)\n\n # variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t))\n m_t_slice = tf.gather(m_t, grad.indices)\n v_t_slice = tf.gather(v_t, grad.indices)\n denominator_slice = tf.sqrt(v_t_slice) + epsilon_t\n var_update = tf.scatter_sub(var, grad.indices,\n lr * m_t_slice / denominator_slice,\n use_locking=self._use_locking)\n return tf.group(var_update, m_t, v_t)\n\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\ndef split(values, axis, num_splits, name=None):\n return tf.split(values, num_splits, axis=axis, name=name)\n\ndef reverse(values, axis):\n return tf.reverse(values, [axis])\n\n\ndef sparse_softmax_cross_entropy_with_logits(logits, labels):\n return tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n\n\ndef concat(values, axis, name=None):\n if len(values) == 1:\n return values[0]\n return tf.concat(values, axis, name=name)\n\n\ndef concat_tensor_array(values, name=None):\n return values.stack(name=name)\n\n\ndef batch_gather_3d(values, indices):\n return tf.gather(tf.reshape(values, [-1, tf.shape(values)[2]]),\n tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] +\n indices)\n\n\ndef batch_gather_2d(values, indices):\n return tf.gather(tf.reshape(values, [-1]),\n tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] +\n indices)\n\n\ndef viterbi_decode(score, transition_params, sequence_lengths, back_prop=False,\n parallel_iterations=1):\n \"\"\"Decode the highest scoring sequence of tags inside of TensorFlow!!!\n This can be used anytime.\n Args:\n score: A [batch, seq_len, num_tags] matrix of unary potentials.\n transition_params: A [num_tags, num_tags] matrix of binary potentials.\n sequence_lengths: A [batch] int32 vector of the length of each score\n sequence.\n Returns:\n viterbi: A [batch, seq_len] list of integers containing the highest\n scoring tag indices.\n viterbi_score: A vector of float containing the score for the Viterbi\n sequence.\n \"\"\"\n sequence_lengths = tf.convert_to_tensor(\n sequence_lengths, name=\"sequence_lengths\")\n score = tf.convert_to_tensor(score, name=\"score\")\n transition_params = tf.convert_to_tensor(\n transition_params, name=\"transition_params\")\n\n if sequence_lengths.dtype != tf.int32:\n sequence_lengths = tf.cast(sequence_lengths, tf.int32)\n\n def condition(t, *args):\n \"\"\"Stop when full score sequence has been read in.\"\"\"\n return tf.less(t, tf.shape(score)[1])\n\n def body(t, trellis, backpointers, trellis_val):\n \"\"\"Perform forward viterbi pass.\"\"\"\n v = tf.expand_dims(trellis_val, 2) + tf.expand_dims(transition_params, 0)\n new_trellis_val = score[:, t, :] + tf.reduce_max(v, axis=1)\n new_trellis = trellis.write(t, new_trellis_val)\n new_backpointers = backpointers.write(\n t, tf.cast(tf.argmax(v, axis=1), tf.int32))\n return t + 1, new_trellis, new_backpointers, new_trellis_val\n\n trellis_arr = tf.TensorArray(score.dtype, size=0,\n dynamic_size=True, clear_after_read=False, infer_shape=False)\n first_trellis_val = score[:, 0, :]\n trellis_arr = trellis_arr.write(0, first_trellis_val)\n\n backpointers_arr = tf.TensorArray(tf.int32, size=0,\n dynamic_size=True, clear_after_read=False, infer_shape=False)\n backpointers_arr = backpointers_arr.write(0,\n tf.zeros_like(score[:, 0, :], dtype=tf.int32))\n\n _, trellis_out, backpointers_out, _ = tf.while_loop(\n condition, body,\n (tf.constant(1, name=\"t\", dtype=tf.int32), trellis_arr, backpointers_arr, first_trellis_val),\n parallel_iterations=parallel_iterations,\n back_prop=back_prop)\n\n trellis_out = concat_tensor_array(trellis_out)\n backpointers_out = concat_tensor_array(backpointers_out)\n # make batch-major:\n trellis_out = tf.transpose(trellis_out, [1, 0, 2])\n backpointers_out = tf.transpose(backpointers_out, [1, 0, 2])\n\n def condition(t, *args):\n return tf.less(t, tf.shape(score)[1])\n\n def body(t, viterbi, last_decision):\n backpointers_timestep = batch_gather_3d(\n backpointers_out, tf.maximum(sequence_lengths - t, 0))\n new_last_decision = batch_gather_2d(\n backpointers_timestep, last_decision)\n new_viterbi = viterbi.write(t, new_last_decision)\n return t + 1, new_viterbi, new_last_decision\n\n last_timestep = batch_gather_3d(trellis_out, sequence_lengths - 1)\n # get scores for last timestep of each batch element inside\n # trellis:\n scores = tf.reduce_max(last_timestep, axis=1)\n # get choice index for last timestep:\n last_decision = tf.cast(tf.argmax(last_timestep, axis=1), tf.int32)\n\n # decode backwards using backpointers:\n viterbi = tf.TensorArray(tf.int32, size=0,\n dynamic_size=True, clear_after_read=False, infer_shape=False)\n viterbi = viterbi.write(0, last_decision)\n _, viterbi_out, _ = tf.while_loop(\n condition, body,\n (tf.constant(1, name=\"t\", dtype=tf.int32), viterbi, last_decision),\n parallel_iterations=parallel_iterations,\n back_prop=back_prop)\n viterbi_out = concat_tensor_array(viterbi_out)\n # make batch-major:\n viterbi_out = tf.transpose(viterbi_out, [1, 0])\n viterbi_out_fwd = tf.reverse_sequence(\n viterbi_out, sequence_lengths, seq_dim=1)\n return viterbi_out_fwd, scores\n\n\ndef sum_list(elements):\n total = elements[0]\n for el in elements[1:]:\n total += el\n return total\n\n\ndef explicitly_set_fields():\n received = set()\n for argument in sys.argv:\n if argument.startswith(\"--\"):\n received.add(argument[2:])\n if argument[2:].startswith(\"no\"):\n received.add(argument[4:])\n return received\n\n\ndef save_session(session, saver, path, verbose=False):\n \"\"\"\n Call save on tf.train.Saver on a specific path to store all the variables\n of the current tensorflow session to a file for later restoring.\n\n Arguments:\n session : tf.Session\n path : str, place to save session\n \"\"\"\n makedirs(path, exist_ok=True)\n if not path.endswith(\"/\"):\n path = path + \"/\"\n\n path = join(path, \"model.ckpt\")\n if verbose:\n print(\"Saving session under %r\" % (path,), flush=True)\n saver.save(session, path)\n print(\"Saved\", flush=True)\n\n### constants for saving & loading\n\n# model config:\nOBJECTIVE_NAMES = \"OBJECTIVE_NAMES\"\nOBJECTIVE_TYPES = \"OBJECTIVE_TYPES\"\n\n# inputs:\nINPUT_PLACEHOLDERS = \"INPUT_PLACEHOLDERS\"\nLABEL_PLACEHOLDERS = \"LABEL_PLACEHOLDERS\"\nLABEL_MASK_PLACEHOLDERS = \"LABEL_MASK_PLACEHOLDERS\"\nTRAIN_OP = \"TRAIN_OP\"\nSEQUENCE_LENGTHS = \"SEQUENCE_LENGTHS\"\nIS_TRAINING = \"IS_TRAINING\"\n\n# outputs:\nDECODED = \"DECODED\"\nDECODED_SCORES = \"DECODED_SCORES\"\nUNARY_SCORES = \"UNARY_SCORES\"\n\n# per objective metrics:\nTOKEN_CORRECT = \"TOKEN_CORRECT\"\nTOKEN_CORRECT_TOTAL = \"TOKEN_CORRECT_TOTAL\"\nSENTENCE_CORRECT = \"SENTENCE_CORRECT\"\nSENTENCE_CORRECT_TOTAL = \"SENTENCE_CORRECT_TOTAL\"\n\n# aggregate metrics over all objectives\nNLL = \"NLL\"\nNLL_TOTAL = \"NLL_TOTAL\"\nTOKEN_CORRECT_ALL = \"TOKEN_CORRECT_ALL\"\nTOKEN_CORRECT_ALL_TOTAL = \"TOKEN_CORRECT_ALL_TOTAL\"\nSENTENCE_CORRECT_ALL = \"SENTENCE_CORRECT_ALL\"\nSENTENCE_CORRECT_ALL_TOTAL = \"SENTENCE_CORRECT_ALL_TOTAL\"\nCONFUSION_MATRIX = \"CONFUSION_MATRIX\"\nGLOBAL_STEP = \"global_step\"\nSUMMARIES_ASSIGNS = \"SUMMARIES_ASSIGNS\"\nSUMMARIES_PLACEHOLDERS = \"SUMMARIES_PLACEHOLDERS\"\nSUMMARIES_NAMES = \"SUMMARIES_NAMES\"\nTRAIN_SUMMARIES = \"TRAIN_SUMMARIES\"\n\nTRUE_POSITIVES = \"TRUE_POSITIVES\"\nFALSE_POSITIVES = \"FALSE_POSITIVES\"\nFALSE_NEGATIVES = \"FALSE_NEGATIVES\"\n\ndef maybe_dropout(inputs, keep_prob, is_training):\n return tf.cond(is_training,\n lambda : tf.nn.dropout(inputs, keep_prob),\n lambda : inputs\n ) if keep_prob < 1 else inputs\n\n\ndef compute_sentence_correct(correct, sequence_mask):\n any_label = tf.reduce_max(tf.cast(sequence_mask, tf.int32), 1)\n sentence_correct_total = tf.reduce_sum(any_label)\n # is 1 when all is correct, 0 otherwise\n sentence_correct = tf.reduce_sum(tf.reduce_prod(\n tf.cast(\n tf.logical_or(correct, tf.logical_not(sequence_mask)),\n tf.int32\n ),\n 1\n ) * any_label)\n return sentence_correct, sentence_correct_total\n\n\ndef lstm_activation(inputs, input_h, input_c, W, b, activation):\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n cell_inputs = concat([inputs, input_h], axis=1)\n\n lstm_matrix = tf.nn.xw_plus_b(cell_inputs, W, b)\n preactiv = split(lstm_matrix, axis=1, num_splits=4)\n # from CUDNN docs:\n # Values 0 and 4 reference the input gate.\n # Values 1 and 5 reference the forget gate.\n # Values 2 and 6 reference the new memory gate.\n # Values 3 and 7 reference the output gate\n i, f, j, o = (\n preactiv[CUDNN_MAPPING[\"i\"]],\n preactiv[CUDNN_MAPPING[\"f\"]],\n preactiv[CUDNN_MAPPING[\"j\"]],\n preactiv[CUDNN_MAPPING[\"o\"]]\n )\n\n c = (tf.nn.sigmoid(f) * input_c +\n tf.nn.sigmoid(i) * activation(j))\n\n m = tf.nn.sigmoid(o) * activation(c)\n return (c, m)\n\n\nclass Logger(object):\n def __init__(self, session, writer):\n self.session = session\n self.writer = writer\n self._placeholders = {}\n summaries = tf.get_collection(SUMMARIES_ASSIGNS)\n summaries_pholders = tf.get_collection(SUMMARIES_PLACEHOLDERS)\n summaries_names = [name.decode(\"utf-8\")\n for name in tf.get_collection(SUMMARIES_NAMES)]\n\n for summary, pholder, name in zip(summaries, summaries_pholders, summaries_names):\n self._placeholders[name] = (pholder, summary)\n\n\n def log(self, name, value, step):\n if name not in self._placeholders:\n pholder = tf.placeholder(tf.float32, [], name=name)\n summary = tf.summary.scalar(name, pholder)\n tf.add_to_collection(SUMMARIES_ASSIGNS, summary)\n tf.add_to_collection(SUMMARIES_NAMES, name)\n tf.add_to_collection(SUMMARIES_PLACEHOLDERS, pholder)\n self._placeholders[name] = (pholder, summary)\n pholder, summary = self._placeholders[name]\n res = self.session.run(summary, {pholder:value})\n self.writer.add_summary(res, step)\n\n\nclass ParametrizedLSTMCell(RNNCell):\n def __init__(self, weights, biases, hidden_size):\n self._weights = weights\n self._biases = biases\n self.hidden_size = hidden_size\n\n @property\n def state_size(self):\n return (self.hidden_size, self.hidden_size)\n\n @property\n def output_size(self):\n return self.hidden_size\n\n def __call__(self, inputs, state, scope=None):\n input_h, input_c = state\n c, m = lstm_activation(inputs,\n input_h=input_h,\n input_c=input_c,\n b=self._biases,\n W=self._weights,\n activation=tf.nn.tanh)\n return m, (m, c)\n\n\nclass LSTMCell(TFLSTMCell):\n def __init__(self,\n num_units,\n keep_prob=1.0,\n is_training=False):\n self._is_training = is_training\n self._keep_prob = keep_prob\n TFLSTMCell.__init__(\n self,\n num_units=num_units,\n state_is_tuple=True\n )\n\n def __call__(self, inputs, state, scope=None):\n (c_prev, m_prev) = state\n\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(2)[1]\n if input_size.value is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n with tf.variable_scope(scope or type(self).__name__,\n initializer=self._initializer): # \"LSTMCell\"\n concat_w = _get_concat_variable(\n \"W\", [input_size.value + self._num_units, 4 * self._num_units],\n dtype, 1)\n\n b = tf.get_variable(\n \"B\", shape=[4 * self._num_units],\n initializer=tf.zeros_initializer(), dtype=dtype)\n\n c, m = lstm_activation(inputs,\n input_c=c_prev,\n input_h=m_prev,\n W=concat_w,\n b=b,\n activation=self._activation,\n keep_prob=self._keep_prob,\n is_training=self._is_training,\n forget_bias=self._forget_bias)\n return m, LSTMStateTuple(c, m)\n\n\n\ndef cudnn_lstm_parameter_size(input_size, hidden_size):\n \"\"\"Number of parameters in a single CuDNN LSTM cell.\"\"\"\n biases = 8 * hidden_size\n weights = 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size)\n return biases + weights\n\n\ndef direction_to_num_directions(direction):\n if direction == \"unidirectional\":\n return 1\n elif direction == \"bidirectional\":\n return 2\n else:\n raise ValueError(\"Unknown direction: %r.\" % (direction,))\n\n\ndef estimate_cudnn_parameter_size(num_layers,\n input_size,\n hidden_size,\n input_mode,\n direction):\n \"\"\"\n Compute the number of parameters needed to\n construct a stack of LSTMs. Assumes the hidden states\n of bidirectional LSTMs are concatenated before being\n sent to the next layer up.\n \"\"\"\n num_directions = direction_to_num_directions(direction)\n params = 0\n isize = input_size\n for layer in range(num_layers):\n for direction in range(num_directions):\n params += cudnn_lstm_parameter_size(\n isize, hidden_size\n )\n isize = hidden_size * num_directions\n return params\n\n# cudnn conversion to dynamic RNN:\nCUDNN_LAYER_WEIGHT_ORDER = [\n \"x\", \"x\", \"x\", \"x\", \"h\", \"h\", \"h\", \"h\"\n]\nCUDNN_LAYER_BIAS_ORDER = [\n \"bx\", \"bx\", \"bx\", \"bx\", \"bh\", \"bh\", \"bh\", \"bh\"\n]\nCUDNN_TRANSPOSED = True\nCUDNN_MAPPING = {\"i\": 0, \"f\": 1, \"j\": 2, \"o\": 3}\n\n\ndef consume_biases_direction(params, old_offset, hidden_size, isize):\n offset = old_offset\n layer_biases_x = []\n layer_biases_h = []\n\n for piece in CUDNN_LAYER_BIAS_ORDER:\n if piece == \"bx\":\n layer_biases_x.append(\n params[offset:offset + hidden_size]\n )\n offset += hidden_size\n elif piece == \"bh\":\n layer_biases_h.append(\n params[offset:offset + hidden_size]\n )\n offset += hidden_size\n else:\n raise ValueError(\"Unknown cudnn piece %r.\" % (piece,))\n b = concat(layer_biases_x, axis=0) + concat(layer_biases_h, axis=0)\n return b, offset\n\n\ndef consume_weights_direction(params, old_offset, hidden_size, isize):\n offset = old_offset\n layer_weights_x = []\n layer_weights_h = []\n for piece in CUDNN_LAYER_WEIGHT_ORDER:\n if piece == \"x\":\n layer_weights_x.append(\n tf.reshape(\n params[offset:offset + hidden_size * isize],\n [hidden_size, isize] if CUDNN_TRANSPOSED else [isize, hidden_size]\n )\n )\n offset += hidden_size * isize\n elif piece == \"h\":\n layer_weights_h.append(\n tf.reshape(\n params[offset:offset + hidden_size * hidden_size],\n [hidden_size, hidden_size]\n )\n )\n offset += hidden_size * hidden_size\n else:\n raise ValueError(\"Unknown cudnn piece %r.\" % (piece,))\n if CUDNN_TRANSPOSED:\n W_T = concat([concat(layer_weights_x, axis=0), concat(layer_weights_h, axis=0)], axis=1)\n W = tf.transpose(W_T)\n else:\n W = concat([concat(layer_weights_x, axis=1), concat(layer_weights_h, axis=1)], axis=0)\n return W, offset\n\n\ndef decompose_layer_params(params, num_layers,\n hidden_size, cell_input_size,\n input_mode, direction, create_fn):\n \"\"\"\n This operation converts the opaque cudnn params into a set of\n usable weight matrices.\n Args:\n params : Tensor, opaque cudnn params tensor\n num_layers : int, number of stacked LSTMs.\n hidden_size : int, number of neurons in each LSTM.\n cell_input_size : int, input size for the LSTMs.\n input_mode: whether a pre-projection was used or not. Currently only\n 'linear_input' is supported (e.g. CuDNN does its own projection\n internally)\n direction : str, 'unidirectional' or 'bidirectional'.\n create_fn: callback for weight creation. Receives parameter slice (op),\n layer (int), direction (0 = fwd, 1 = bwd),\n parameter_index (0 = W, 1 = b).\n Returns:\n weights : list of lists of Tensors in the format:\n first list is indexed layers,\n inner list is indexed by direction (fwd, bwd),\n tensors in the inner list are (Weights, biases)\n \"\"\"\n if input_mode != \"linear_input\":\n raise ValueError(\"Only input_mode == linear_input supported for now.\")\n num_directions = direction_to_num_directions(direction)\n offset = 0\n all_weights = [[[] for j in range(num_directions)]\n for i in range(num_layers)]\n isize = cell_input_size\n with tf.variable_scope(\"DecomposeCudnnParams\"):\n for layer in range(num_layers):\n with tf.variable_scope(\"Layer{}\".format(layer)):\n for direction in range(num_directions):\n with tf.variable_scope(\"fwd\" if direction == 0 else \"bwd\"):\n with tf.variable_scope(\"weights\"):\n W, offset = consume_weights_direction(\n params,\n old_offset=offset,\n hidden_size=hidden_size,\n isize=isize)\n all_weights[layer][direction].append(\n create_fn(W, layer, direction, 0))\n isize = hidden_size * num_directions\n isize = cell_input_size\n for layer in range(num_layers):\n with tf.variable_scope(\"Layer{}\".format(layer)):\n for direction in range(num_directions):\n with tf.variable_scope(\"fwd\" if direction == 0 else \"bwd\"):\n with tf.variable_scope(\"biases\"):\n b, offset = consume_biases_direction(\n params,\n old_offset=offset,\n hidden_size=hidden_size,\n isize=isize)\n all_weights[layer][direction].append(\n create_fn(b, layer, direction, 1))\n isize = hidden_size * num_directions\n return all_weights\n\n\ndef create_decomposed_variable(param, lidx, didx, pidx):\n with tf.device(\"cpu\"):\n return tf.get_variable(\"w\" if pidx == 0 else \"b\",\n shape=param.get_shape().as_list(),\n dtype=param.dtype,\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES,\n \"excluded_variables\"])\n\n\ndef cpu_cudnn_params(params, num_layers, hidden_size, cell_input_size, input_mode,\n direction):\n \"\"\"\n This operation converts the opaque cudnn params into a set of\n usable weight matrices, and caches the conversion.\n Args:\n params : Tensor, opaque cudnn params tensor\n num_layers : int, number of stacked LSTMs.\n hidden_size : int, number of neurons in each LSTM.\n cell_input_size : int, input size for the LSTMs.\n input_mode: whether a pre-projection was used or not. Currently only\n 'linear_input' is supported (e.g. CuDNN does its own projection\n internally)\n direction : str, 'unidirectional' or 'bidirectional'.\n skip_creation : bool, whether to build variables.\n Returns:\n weights : list of lists of Tensors in the format:\n first list is indexed layers,\n inner list is indexed by direction (fwd, bwd),\n tensors in the inner list are (Weights, biases)\n \"\"\"\n # create a boolean status variable that checks whether the\n # weights have been converted to cpu format:\n with tf.device(\"cpu\"):\n cpu_conversion_status = tf.get_variable(\n name=\"CudnnConversionStatus\", dtype=tf.float32,\n initializer=tf.zeros_initializer(), shape=[],\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES])\n # create a fresh copy of the weights (not trainable)\n reshaped = decompose_layer_params(\n params,\n num_layers=num_layers,\n hidden_size=hidden_size,\n cell_input_size=cell_input_size,\n input_mode=input_mode,\n direction=direction,\n create_fn=create_decomposed_variable)\n\n def cpu_convert():\n all_assigns = decompose_layer_params(\n params,\n num_layers=num_layers,\n hidden_size=hidden_size,\n cell_input_size=cell_input_size,\n input_mode=input_mode,\n direction=direction,\n create_fn=lambda p, lidx, didx, pidx: tf.assign(reshaped[lidx][didx][pidx], p))\n all_assigns = [assign for layer_assign in all_assigns\n for dir_assign in layer_assign\n for assign in dir_assign]\n all_assigns.append(tf.assign(cpu_conversion_status, tf.constant(1.0, dtype=tf.float32)))\n all_assigns.append(tf.Print(cpu_conversion_status, [0],\n message=\"Converted cudnn weights to CPU format. \"))\n with tf.control_dependencies(all_assigns):\n ret = tf.identity(cpu_conversion_status)\n return ret\n # cache the reshaping/concatenating\n ensure_conversion = tf.cond(tf.greater(cpu_conversion_status, 0),\n lambda: cpu_conversion_status,\n cpu_convert)\n # if weights are already reshaped, go ahead:\n with tf.control_dependencies([ensure_conversion]):\n # wrap with identity to ensure there is a dependency between assignment\n # and using the weights:\n all_params = [[[tf.identity(p) for p in dir_param]\n for dir_param in layer_param]\n for layer_param in reshaped]\n return all_params\n\n\nclass CpuCudnnLSTM(object):\n def __init__(self, num_layers, hidden_size,\n cell_input_size, input_mode, direction):\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.cell_input_size = cell_input_size\n self.input_mode = input_mode\n self.direction = direction\n\n def __call__(self,\n inputs,\n input_h,\n input_c,\n params,\n is_training=True):\n layer_params = cpu_cudnn_params(params,\n num_layers=self.num_layers,\n hidden_size=self.hidden_size,\n cell_input_size=self.cell_input_size,\n input_mode=self.input_mode,\n direction=self.direction)\n REVERSED = 1\n layer_inputs = inputs\n cell_idx = 0\n for layer_param in layer_params:\n hidden_fwd_bwd = []\n final_output_c = []\n final_output_h = []\n for direction, (W, b) in enumerate(layer_param):\n if direction == REVERSED:\n layer_inputs = reverse(layer_inputs, axis=0)\n hiddens, (output_h, output_c) = tf.nn.dynamic_rnn(\n cell=ParametrizedLSTMCell(W, b, self.hidden_size),\n inputs=layer_inputs,\n dtype=inputs.dtype,\n time_major=True,\n initial_state=(input_h[cell_idx], input_c[cell_idx]))\n if direction == REVERSED:\n hiddens = reverse(hiddens, axis=0)\n hidden_fwd_bwd.append(hiddens)\n final_output_c.append(tf.expand_dims(output_c, 0))\n final_output_h.append(tf.expand_dims(output_h, 0))\n cell_idx += 1\n if len(hidden_fwd_bwd) > 1:\n layer_inputs = concat(hidden_fwd_bwd, axis=2)\n final_output_c = concat(final_output_c, axis=0)\n final_output_h = concat(final_output_h, axis=0)\n else:\n layer_inputs = hidden_fwd_bwd[0]\n final_output_c = final_output_c[0]\n final_output_h = final_output_h[0]\n return layer_inputs, final_output_h, final_output_c\n\n\ndef highway(x, activation_fn=tf.nn.relu, scope=None):\n size = x.get_shape()[-1].value\n with tf.variable_scope(scope or \"HighwayLayer\"):\n activ = tf.contrib.layers.fully_connected(\n x, size * 2, activation_fn=None, scope=\"FC\"\n )\n transform = tf.sigmoid(activ[..., :size], name=\"transform_gate\")\n hidden = activation_fn(activ[..., size:])\n carry = 1.0 - transform\n return tf.add(hidden * transform, x * carry, \"y\")\n\n\ndef conv2d(inputs, output_dim, k_h, k_w,\n stddev=0.02, scope=None,\n weight_noise=0.0, is_training=True):\n with tf.variable_scope(scope or \"Conv2D\"):\n w = tf.get_variable('w', [k_h, k_w, inputs.get_shape()[-1], output_dim],\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n if weight_noise > 0 and not isinstance(is_training, bool):\n w = add_weight_noise(w, is_training=is_training, stddev=weight_noise)\n return tf.nn.conv2d(inputs, w, strides=[1, 1, 1, 1], padding=\"VALID\")\n\n\n\ndef character_convolution(inputs, feature):\n inputs_2d = tf.reshape(inputs,\n [tf.shape(inputs)[0] * tf.shape(inputs)[1], tf.shape(inputs)[2]]\n )\n inputs_3d = embedding_lookup(\n inputs_2d,\n dim=feature[\"dimension\"],\n # 255 different bytes (uint8)\n # & start and end symbol:\n size=257,\n dtype=tf.float32,\n mask_negative=True)\n inputs_4d = tf.expand_dims(inputs_3d, 1)\n feature_pools = []\n for idx, conv_filter in enumerate(feature[\"filters\"]):\n width, channels = conv_filter[\"width\"], conv_filter[\"channels\"]\n # [batch * time x 1 x word_length x embed_dim x feature_map_dim]\n conv = tf.squeeze(conv2d(inputs_4d, channels, 1, width, scope=\"CharacterConvolution%d\" % (idx,)), [1])\n # remove word dimension\n pool = tf.reduce_max(conv, 1)\n feature_pools.append(pool)\n activations = concat(feature_pools, axis=1)\n channels_out = sum(conv_filter[\"channels\"] for conv_filter in feature[\"filters\"])\n activations = tf.reshape(\n tf.tanh(activations),\n [tf.shape(inputs)[0], tf.shape(inputs)[1], channels_out],\n name=\"CharacterConvolutionPooled\")\n for idx in range(feature[\"highway_layers\"]):\n activations = highway(activations, scope=\"HighwayLayer%d\" % (idx,),\n activation_fn=tf.tanh)\n return activations\n\n\ndef feature_dtype(feat):\n if requires_vocab(feat):\n return tf.int32\n elif feat[\"type\"] in {\"digit\", \"punctuation_count\", \"uppercase\"}:\n return tf.float32\n elif requires_character_convolution(feat):\n return tf.int32\n else:\n raise ValueError(\"unknown feature %r.\" % (feat,))\n\n\ndef feature_shape(feature):\n if requires_vocab(feature) or feature[\"type\"] in {'digit', 'punctuation_count', 'uppercase'}:\n return [None, None]\n elif requires_character_convolution(feature):\n return [None, None, None]\n else:\n raise ValueError(\"unknown feature %r.\" % (feature,))\n\n\ndef build_inputs(features, objectives, fused, class_weights,\n class_weights_clipval):\n input_placeholders = []\n labels = []\n labels_mask = []\n labels_class_weights = []\n max_output_vocab = max(len(obj[\"vocab\"]) for obj in objectives)\n\n with tf.variable_scope(\"Inputs\"):\n is_training = tf.placeholder(tf.bool, [], name=\"is_training\")\n tf.add_to_collection(IS_TRAINING, is_training)\n for idx, feat in enumerate(features):\n input_placeholder = tf.placeholder(\n feature_dtype(feat), feature_shape(feat),\n name=\"input_placeholders_%d\" % (idx,)\n )\n input_placeholders.append(input_placeholder)\n tf.add_to_collection(INPUT_PLACEHOLDERS, input_placeholder)\n\n if fused:\n label_placeholder = tf.placeholder(\n tf.int32, [None, None, len(objectives)]\n )\n labels_mask_placeholder = tf.placeholder(\n tf.bool, [None, None, len(objectives)], name=\"labels_mask\"\n )\n\n labels.append(label_placeholder)\n labels_mask.append(labels_mask_placeholder)\n tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder)\n tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder)\n\n if class_weights:\n with tf.variable_scope(\"FusedClassWeights\"):\n init_class_weights = tf.get_variable(\n name=\"class_weights\",\n shape=[len(objectives) * max_output_vocab],\n initializer=tf.constant_initializer(1),\n dtype=tf.int64,\n trainable=False)\n init_class_count = tf.get_variable(\n name=\"class_weights_denominator\",\n shape=[len(objectives)],\n initializer=tf.constant_initializer(1),\n dtype=tf.int64,\n trainable=False)\n\n def update_class_weights():\n mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1, len(objectives)]), tf.int64)\n updated_cls_weights = tf.scatter_add(\n init_class_weights,\n tf.reshape(label_placeholder + tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]), [-1]),\n tf.reshape(mask_as_ints, [-1])\n )\n updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints, 0))\n\n # class weight: weight_i = total / class_i\n weights = tf.clip_by_value(tf.expand_dims(updated_class_count, 1) /\n tf.reshape(updated_cls_weights, [len(objectives), max_output_vocab]),\n 1e-6, class_weights_clipval)\n return tf.cast(weights, tf.float32)\n\n def return_class_weights():\n # class weight: weight_i = total / class_i\n return tf.cast(\n tf.clip_by_value(tf.expand_dims(init_class_count, 1) /\n tf.reshape(init_class_weights, [len(objectives), max_output_vocab]),\n 1e-6, class_weights_clipval), tf.float32)\n\n labels_class_weights.append(\n tf.cond(is_training,\n update_class_weights,\n return_class_weights))\n else:\n labels_class_weights.append(None)\n else:\n for objective in objectives:\n with tf.variable_scope(objective[\"name\"]):\n label_placeholder = tf.placeholder(\n tf.int32, [None, None], name=\"labels\"\n )\n labels.append(label_placeholder)\n if objective[\"type\"] == \"crf\":\n labels_mask_placeholder = tf.placeholder(\n tf.bool, [None], name=\"labels_mask\"\n )\n labels_class_weights.append(None)\n elif objective[\"type\"] == \"softmax\":\n labels_mask_placeholder = tf.placeholder(\n tf.bool, [None, None], name=\"labels_mask\"\n )\n if class_weights:\n init_class_weights = tf.get_variable(\n name=\"class_weights\",\n shape=len(objective[\"vocab\"]),\n initializer=tf.constant_initializer(1),\n dtype=tf.int64,\n trainable=False)\n init_class_count = tf.get_variable(\n name=\"class_weights_denominator\",\n shape=[],\n initializer=tf.constant_initializer(1),\n dtype=tf.int64,\n trainable=False)\n\n def update_class_weights():\n mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1]), tf.int64)\n updated_cls_weights = tf.scatter_add(\n init_class_weights,\n tf.reshape(label_placeholder, [-1]),\n mask_as_ints\n )\n updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints))\n\n # class weight: weight_i = total / class_i\n weights = tf.clip_by_value(updated_class_count / updated_cls_weights,\n 1e-6, class_weights_clipval)\n return tf.cast(weights, tf.float32)\n\n def return_class_weights():\n # class weight: weight_i = total / class_i\n return tf.cast(\n tf.clip_by_value(init_class_count / init_class_weights,\n 1e-6, class_weights_clipval), tf.float32)\n\n labels_class_weights.append(\n tf.cond(is_training, update_class_weights, return_class_weights)\n )\n else:\n labels_class_weights.append(None)\n else:\n raise ValueError(\n \"unknown objective type %r.\" % (\n objective[\"type\"]\n )\n )\n labels_mask.append(labels_mask_placeholder)\n tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder)\n tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder)\n sequence_lengths = tf.placeholder(tf.int32, [None],\n name=\"sequence_lengths\")\n tf.add_to_collection(SEQUENCE_LENGTHS, sequence_lengths)\n return (input_placeholders,\n labels,\n labels_mask,\n labels_class_weights,\n sequence_lengths,\n is_training)\n\n\ndef add_weight_noise(x, is_training, stddev):\n return tf.cond(is_training,\n lambda: x + tf.random_normal(\n shape=tf.shape(x), stddev=stddev),\n lambda: x)\n\n\ndef build_recurrent(inputs, cudnn, faux_cudnn, hidden_sizes, is_training,\n keep_prob, weight_noise):\n dtype = tf.float32\n if cudnn:\n if len(hidden_sizes) == 0:\n raise ValueError(\"hidden_sizes must be a list of length > 1.\")\n hidden_size = hidden_sizes[0]\n if any(hidden_size != hsize for hsize in hidden_sizes):\n raise ValueError(\"cudnn RNN requires all hidden units \"\n \"to be the same size (got %r)\" % (\n hidden_sizes,\n ))\n num_layers = len(hidden_sizes)\n cell_input_size = inputs.get_shape()[-1].value\n\n est_size = estimate_cudnn_parameter_size(\n num_layers=num_layers,\n hidden_size=hidden_size,\n input_size=cell_input_size,\n input_mode=\"linear_input\",\n direction=\"bidirectional\"\n )\n # autoswitch to GPUs based on availability of alternatives:\n cudnn_params = tf.get_variable(\"RNNParams\",\n shape=[est_size],\n dtype=tf.float32,\n initializer=tf.contrib.layers.variance_scaling_initializer())\n if weight_noise > 0:\n cudnn_params = add_weight_noise(cudnn_params,\n stddev=weight_noise, is_training=is_training)\n if faux_cudnn:\n cudnn_cell = CpuCudnnLSTM(num_layers,\n hidden_size,\n cell_input_size,\n input_mode=\"linear_input\",\n direction=\"bidirectional\")\n else:\n cpu_cudnn_params(cudnn_params,\n num_layers=num_layers,\n hidden_size=hidden_size,\n cell_input_size=cell_input_size,\n input_mode=\"linear_input\",\n direction=\"bidirectional\")\n cudnn_cell = CudnnLSTM(num_layers,\n hidden_size,\n cell_input_size,\n input_mode=\"linear_input\",\n direction=\"bidirectional\")\n init_state = tf.fill(\n (2 * num_layers, tf.shape(inputs)[1], hidden_size),\n tf.constant(np.float32(0.0)))\n hiddens, output_h, output_c = cudnn_cell(\n inputs,\n input_h=init_state,\n input_c=init_state,\n params=cudnn_params,\n is_training=True)\n hiddens = maybe_dropout(\n hiddens,\n keep_prob,\n is_training)\n else:\n cell = MultiRNNCell(\n [LSTMCell(hsize, is_training=is_training, keep_prob=keep_prob)\n for hsize in hidden_sizes]\n )\n hiddens, _ = bidirectional_dynamic_rnn(\n cell,\n inputs,\n time_major=True,\n dtype=dtype,\n swap_memory=True\n )\n return hiddens\n\n\ndef build_embed(inputs, features, index2words, keep_prob, is_training):\n embeddings = []\n for idx, (values, feature, index2word) in enumerate(zip(inputs, features, index2words)):\n if requires_vocab(feature):\n with tf.variable_scope(\"embedding_%d\" % (idx,)):\n embedding = embedding_lookup(\n values,\n dim=feature[\"dimension\"],\n size=len(index2word),\n dtype=tf.float32,\n mask_negative=True\n )\n embeddings.append(embedding)\n elif requires_character_convolution(feature):\n embeddings.append(\n character_convolution(values, feature)\n )\n else:\n embeddings.append(tf.expand_dims(values, 2))\n return maybe_dropout(concat(embeddings, axis=2), keep_prob, is_training)\n\n\ndef crf_metrics(unary_scores, labels, transition_params, sequence_lengths,\n mask):\n \"\"\"\n Computes CRF output metrics.\n Receives:\n unary_scores : batch-major order\n labels : batch-major order\n transition_params : nclasses x nclasses matrix.\n sequence_lengths : length of each time-sequence\n mask : batch-major example mask\n\n Returns:\n token_correct,\n token_correct_total,\n sentence_correct,\n sentence_correct_total\n \"\"\"\n classes = unary_scores.get_shape()[-1].value\n decoded, scores = viterbi_decode(unary_scores,\n transition_params,\n sequence_lengths)\n\n tf.add_to_collection(UNARY_SCORES, unary_scores)\n tf.add_to_collection(DECODED, decoded)\n tf.add_to_collection(DECODED_SCORES, scores)\n\n equals_label = tf.equal(labels, decoded)\n token_correct = tf.reduce_sum(\n tf.cast(\n tf.logical_and(equals_label, mask),\n tf.int32\n )\n )\n token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32))\n tf.add_to_collection(TOKEN_CORRECT, token_correct)\n tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total)\n sentence_correct, _ = compute_sentence_correct(equals_label, mask)\n sentence_correct_total = tf.reduce_sum(tf.cast(mask[:, 0], tf.int32))\n\n tf.add_to_collection(SENTENCE_CORRECT, sentence_correct)\n tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total)\n\n build_true_false_positives(decoded, mask, labels,\n classes, equals_label)\n\n return (token_correct, token_correct_total,\n sentence_correct, sentence_correct_total)\n\n\ndef build_true_false_positives(decoded, mask_batch_major, labels_batch_major,\n classes, equals_label):\n masked_equals_label = tf.logical_and(equals_label, mask_batch_major)\n\n # now for each class compute tp, fp, fn\n # [nclasses x batch x time]\n masked_per_class = tf.logical_and(\n tf.equal(labels_batch_major[None, :, :], tf.range(classes)[:, None, None]),\n mask_batch_major)\n\n # correct, and on label\n correct = tf.reduce_sum(tf.cast(tf.logical_and(masked_per_class, equals_label[None, :, :]), tf.int32),\n axis=[1, 2])\n # predicted a particular class\n guessed = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(decoded[None, :, :], tf.range(classes)[:, None, None]), mask_batch_major), tf.int32),\n axis=[1, 2])\n total = tf.reduce_sum(tf.cast(masked_per_class, tf.int32), axis=[1, 2])\n tp, fp, fn = correct, guessed - correct, total - correct\n\n tf.add_to_collection(TRUE_POSITIVES, tp)\n tf.add_to_collection(FALSE_POSITIVES, fp)\n tf.add_to_collection(FALSE_NEGATIVES, fn)\n\n\ndef softmax_metrics(unary_scores, labels, mask):\n \"\"\"\n Compute softmax output stats for correct/accuracy per-token/per-sentence.\n Receive\n unary_scores : time-major\n labels : time-major\n mask : time-major\n Returns:\n token_correct,\n token_correct_total,\n sentence_correct,\n sentence_correct_total\n \"\"\"\n classes = unary_scores.get_shape()[-1].value\n unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2])\n labels_batch_major = tf.transpose(labels, [1, 0])\n mask_batch_major = tf.transpose(mask, [1, 0])\n decoded = tf.cast(tf.argmax(unary_scores_batch_major, 2), labels.dtype)\n unary_probs_batch_major = tf.nn.softmax(unary_scores_batch_major)\n scores = tf.reduce_max(unary_probs_batch_major, 2)\n\n tf.add_to_collection(UNARY_SCORES, unary_probs_batch_major)\n tf.add_to_collection(DECODED, decoded)\n tf.add_to_collection(DECODED_SCORES, scores)\n\n equals_label = tf.equal(decoded, labels_batch_major)\n\n token_correct = tf.reduce_sum(\n tf.cast(\n tf.logical_and(\n equals_label,\n mask_batch_major\n ),\n tf.int32\n )\n )\n token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32))\n tf.add_to_collection(TOKEN_CORRECT, token_correct)\n tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total)\n\n sentence_correct, sentence_correct_total = compute_sentence_correct(\n equals_label, mask_batch_major\n )\n tf.add_to_collection(SENTENCE_CORRECT, sentence_correct)\n tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total)\n\n build_true_false_positives(decoded, mask_batch_major, labels_batch_major,\n classes, equals_label)\n return (token_correct, token_correct_total,\n sentence_correct, sentence_correct_total)\n\n\ndef add_objective_names_types(objectives):\n for objective in objectives:\n with tf.variable_scope(objective[\"name\"]):\n # store objective names in graph:\n tf.add_to_collection(OBJECTIVE_NAMES,\n tf.constant(objective[\"name\"], name=\"objective_name\")\n )\n tf.add_to_collection(OBJECTIVE_TYPES,\n tf.constant(objective[\"type\"], name=\"objective_type\")\n )\n\n\ndef build_loss(inputs, objectives, labels, labels_mask,\n labels_class_weights, fused, sequence_lengths,\n class_weights_normalize):\n \"\"\"\n Compute loss function given the objectives.\n Assumes inputs are of the form [time, batch, features].\n\n Arguments:\n ----------\n inputs : tf.Tensor\n objectives : list, objective specs\n labels : list\n labels_mask : list\n labels_class_weights : list\n sequence_lengths : tf.Tensor\n\n Returns:\n loss : tf.Tensor (scalar)\n \"\"\"\n losses = []\n negative_log_likelihoods = []\n sentence_corrects = []\n sentence_corrects_total = []\n token_corrects = []\n token_corrects_total = []\n max_output_vocab = max(len(obj[\"vocab\"]) for obj in objectives)\n total_output_size = len(objectives) * max_output_vocab\n\n add_objective_names_types(objectives)\n\n if fused:\n with tf.variable_scope(\"FusedOutputs\"):\n objective_labels = labels[0]\n mask = labels_mask[0]\n objective_class_weights = labels_class_weights[0]\n # perform all classifications at once:\n unary_scores = tf.contrib.layers.fully_connected(\n inputs, total_output_size,\n activation_fn=None\n )\n\n unary_scores = tf.reshape(unary_scores,\n [tf.shape(unary_scores)[0],\n tf.shape(unary_scores)[1],\n len(objectives),\n max_output_vocab])\n negative_log_likelihood = sparse_softmax_cross_entropy_with_logits(\n logits=unary_scores,\n labels=objective_labels\n )\n labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype)\n masked_negative_log_likelihood = negative_log_likelihood * labels_mask_casted\n if objective_class_weights is not None:\n class_weights_mask = tf.gather(\n tf.reshape(objective_class_weights, [-1]),\n objective_labels +\n tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]))\n if class_weights_normalize:\n masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6)\n normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))\n else:\n masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)\n normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))\n else:\n masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)\n normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives))\n\n masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)\n losses.append(normed_loss)\n negative_log_likelihoods.append(masked_negative_log_likelihood_sum)\n\n for idx, objective in enumerate(objectives):\n with tf.variable_scope(objective[\"name\"]):\n (token_correct,\n token_correct_total,\n sentence_correct,\n sentence_correct_total) = softmax_metrics(unary_scores[:, :, idx, :len(objective[\"vocab\"])],\n labels=objective_labels[:, :, idx],\n mask=mask[:, :, idx])\n token_corrects.append(token_correct)\n token_corrects_total.append(token_correct_total)\n sentence_corrects.append(sentence_correct)\n sentence_corrects_total.append(sentence_correct_total)\n\n else:\n for objective, objective_labels, mask, objective_class_weights in zip(objectives, labels, labels_mask, labels_class_weights):\n with tf.variable_scope(objective[\"name\"]):\n if objective[\"type\"] == \"crf\":\n unary_scores = tf.contrib.layers.fully_connected(\n inputs,\n len(objective[\"vocab\"]),\n activation_fn=None\n )\n unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2])\n labels_batch_major = tf.transpose(objective_labels, [1, 0])\n\n\n padded_unary_scores_batch_major = tf.cond(tf.greater(tf.shape(unary_scores_batch_major)[1], 1),\n lambda: unary_scores_batch_major,\n lambda: tf.pad(unary_scores_batch_major, [[0, 0], [0, 1], [0, 0]]))\n padded_labels_batch_major = tf.cond(tf.greater(tf.shape(labels_batch_major)[1], 1),\n lambda: labels_batch_major,\n lambda: tf.pad(labels_batch_major, [[0, 0], [0, 1]]))\n\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n padded_unary_scores_batch_major, padded_labels_batch_major, sequence_lengths\n )\n labels_mask_casted = tf.cast(mask, log_likelihood.dtype)\n masked_log_likelihood = (\n log_likelihood * labels_mask_casted\n )\n masked_negative_log_likelihood_sum = -tf.reduce_sum(masked_log_likelihood)\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)\n losses.append(masked_negative_log_likelihood_sum / num_predictions)\n negative_log_likelihoods.append(masked_negative_log_likelihood_sum)\n sequence_mask = tf.logical_and(\n tf.sequence_mask(sequence_lengths),\n # pad the time dimension:\n tf.expand_dims(mask, 1)\n )\n\n (token_correct,\n token_correct_total,\n sentence_correct,\n sentence_correct_total) = crf_metrics(unary_scores_batch_major,\n labels=labels_batch_major,\n mask=sequence_mask,\n transition_params=transition_params,\n sequence_lengths=sequence_lengths)\n elif objective[\"type\"] == 'softmax':\n unary_scores = tf.contrib.layers.fully_connected(\n inputs,\n len(objective[\"vocab\"]),\n activation_fn=None\n )\n negative_log_likelihood = sparse_softmax_cross_entropy_with_logits(\n logits=unary_scores,\n labels=objective_labels\n )\n labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype)\n masked_negative_log_likelihood = (\n negative_log_likelihood * labels_mask_casted\n )\n if objective_class_weights is not None:\n class_weights_mask = tf.gather(objective_class_weights, objective_labels)\n masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask\n masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)\n\n if class_weights_normalize:\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6)\n normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions\n else:\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)\n normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions\n else:\n masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood\n masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood)\n num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6)\n normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions\n\n losses.append(normed_loss)\n negative_log_likelihoods.append(masked_negative_log_likelihood_sum)\n\n (token_correct,\n token_correct_total,\n sentence_correct,\n sentence_correct_total) = softmax_metrics(unary_scores,\n labels=objective_labels,\n mask=mask)\n else:\n raise ValueError(\n \"unknown objective type %r\" % (objective[\"type\"],)\n )\n token_corrects.append(token_correct)\n token_corrects_total.append(token_correct_total)\n sentence_corrects.append(sentence_correct)\n sentence_corrects_total.append(sentence_correct_total)\n # aggregate metrics for all objectives:\n total_loss = tf.reduce_sum(sum_list(losses))\n tf.summary.scalar(\"BatchLoss\", total_loss)\n neg_log_likelihood_total = sum_list(negative_log_likelihoods)\n tf.summary.scalar(\"BatchNLL\", neg_log_likelihood_total)\n tf.add_to_collection(NLL, neg_log_likelihood_total)\n tf.add_to_collection(NLL_TOTAL, tf.shape(inputs)[1])\n\n sentence_corrects_total = sum_list(sentence_corrects_total)\n sentence_corrects = sum_list(sentence_corrects)\n tf.add_to_collection(SENTENCE_CORRECT_ALL, sentence_corrects)\n tf.add_to_collection(SENTENCE_CORRECT_ALL_TOTAL, sentence_corrects_total)\n\n token_corrects_total = sum_list(token_corrects_total)\n token_corrects = sum_list(token_corrects)\n tf.add_to_collection(TOKEN_CORRECT_ALL, token_corrects)\n tf.add_to_collection(TOKEN_CORRECT_ALL_TOTAL, token_corrects_total)\n return total_loss\n\n\ndef build_model(name,\n trainable,\n features,\n feature_index2words,\n objectives,\n keep_prob,\n input_keep_prob,\n hidden_sizes,\n freeze_rate,\n freeze_rate_anneal,\n solver,\n cudnn,\n fused,\n faux_cudnn,\n class_weights,\n class_weights_normalize,\n class_weights_clipval,\n lr,\n weight_noise,\n anneal_rate,\n clip_norm):\n # mixed output fusing is currently unsupported\n if fused and any(obj[\"type\"] != \"softmax\" for obj in objectives):\n raise ValueError(\"cannot fuse outputs and use non-softmax output.\")\n # clear all existing collections to ensure every new collection is\n # is created fresh\n graph = tf.get_default_graph()\n for collection_name in graph.get_all_collection_keys():\n graph.clear_collection(collection_name)\n\n # build a model under the model's name to prevent collisions\n # when multiple models are restored simultaneously\n with tf.variable_scope(name):\n global_step = tf.Variable(0, trainable=False, name=\"global_step\")\n tf.add_to_collection(GLOBAL_STEP, global_step)\n # model placeholders:\n (input_placeholders,\n labels,\n labels_mask,\n labels_class_weights,\n sequence_lengths,\n is_training) = build_inputs(features,\n objectives=objectives,\n fused=fused,\n class_weights=class_weights,\n class_weights_clipval=class_weights_clipval)\n embed = build_embed(input_placeholders,\n features=features,\n index2words=feature_index2words,\n is_training=is_training,\n keep_prob=input_keep_prob)\n hiddens = embed\n if len(hidden_sizes) > 0:\n hiddens = build_recurrent(hiddens,\n cudnn=cudnn,\n faux_cudnn=faux_cudnn,\n hidden_sizes=hidden_sizes,\n keep_prob=keep_prob,\n weight_noise=weight_noise,\n is_training=is_training)\n\n loss = build_loss(hiddens,\n objectives=objectives,\n fused=fused,\n labels=labels,\n labels_mask=labels_mask,\n labels_class_weights=labels_class_weights,\n class_weights_normalize=class_weights_normalize,\n sequence_lengths=sequence_lengths)\n if trainable:\n learning_rate = tf.train.exponential_decay(lr, global_step,\n 33000, anneal_rate, staircase=True)\n\n if solver == \"adam\":\n optimizer = LazyAdamOptimizer(learning_rate)\n elif solver == \"sgd\":\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n else:\n raise ValueError(\"Unknown solver %r.\" % (solver))\n\n grad_vars = optimizer.compute_gradients(loss)\n if clip_norm > 0:\n grad_vars = [(grad if isinstance(grad, tf.IndexedSlices) else tf.clip_by_norm(grad, clip_norm), var) for grad, var in grad_vars]\n train_op = optimizer.apply_gradients(grad_vars, global_step=global_step)\n else:\n train_op = tf.no_op()\n tf.add_to_collection(TRAIN_OP, train_op)\n tf.add_to_collection(TRAIN_SUMMARIES, tf.summary.merge_all())\n\n\ndef restore_session(session,\n path,\n replace_to=None,\n replace_from=None,\n verbose=False,\n use_metagraph=True,\n only_features=False):\n \"\"\"\n Call restore on tf.train.Saver on a specific path to store all the\n variables of the current tensorflow session to a file for later restoring.\n\n Arguments:\n session : tf.Session\n path : str, place containing the session data to restore\n verbose : bool, print status messages.\n use_metagraph : bool, restore by re-creating saved metagraph.\n\n Returns:\n bool : success or failure of the restoration\n \"\"\"\n makedirs(path, exist_ok=True)\n if not path.endswith(\"/\"):\n path = path + \"/\"\n checkpoint = tf.train.get_checkpoint_state(path)\n if verbose:\n print(\"Looking for saved session under %r\" % (path,), flush=True)\n if checkpoint is None or checkpoint.model_checkpoint_path is None:\n if verbose:\n print(\"No saved session found\", flush=True)\n return False\n fname = basename(checkpoint.model_checkpoint_path)\n if verbose:\n print(\"Restoring saved session from %r\" % (join(path, fname),), flush=True)\n\n if use_metagraph:\n param_saver = tf.train.import_meta_graph(join(path, fname + \".meta\"),\n clear_devices=True)\n missing_vars = []\n else:\n if only_features:\n to_restore = {}\n whitelist = [\"embedding\", \"/RNN/\", \"/RNNParams\", \"CharacterConvolution\", \"HighwayLayer\"]\n for var in tf.global_variables():\n if any(keyword in var.name for keyword in whitelist):\n to_restore[var.name[:-2]] = var\n param_saver = tf.train.Saver(to_restore)\n else:\n if replace_to is not None and replace_from is not None:\n to_restore = {}\n for var in tf.global_variables():\n var_name = var.name[:var.name.rfind(\":\")]\n old_name = var_name.replace(replace_to, replace_from)\n to_restore[old_name] = var\n param_saver = tf.train.Saver(to_restore)\n missing_vars = []\n else:\n reader = tf.train.NewCheckpointReader(join(path, fname))\n saved_shapes = reader.get_variable_to_shape_map()\n found_vars = [var for var in tf.global_variables()\n if var.name.split(':')[0] in saved_shapes]\n missing_vars = [var for var in tf.global_variables()\n if var.name.split(':')[0] not in saved_shapes]\n param_saver = tf.train.Saver(found_vars)\n param_saver.restore(session, join(path, fname))\n session.run([var.initializer for var in missing_vars])\n return True\n\n\ndef bidirectional_dynamic_rnn(cell, inputs, dtype, time_major=True, swap_memory=False):\n with tf.variable_scope(\"forward\"):\n out_fwd, final_fwd = tf.nn.dynamic_rnn(\n cell,\n inputs,\n time_major=time_major,\n dtype=dtype,\n swap_memory=swap_memory\n )\n\n if time_major:\n reverse_axis = 0\n else:\n reverse_axis = 1\n\n with tf.variable_scope(\"backward\"):\n out_bwd, final_bwd = tf.nn.dynamic_rnn(\n cell,\n reverse(inputs, axis=reverse_axis),\n time_major=time_major,\n dtype=dtype,\n swap_memory=swap_memory\n )\n\n out_bwd = reverse(out_bwd, axis=reverse_axis)\n return concat([out_fwd, out_bwd], axis=2), (final_fwd, final_bwd)\n\n\ndef get_embedding_lookup(size, dim, dtype, reuse=None, trainable=True):\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n W = tf.get_variable(\n name=\"embedding\",\n shape=[size, dim],\n dtype=dtype,\n initializer=tf.random_uniform_initializer(\n -1.0 / math.sqrt(dim),\n 1.0 / math.sqrt(dim)\n ),\n trainable=trainable\n )\n return W\n\n\ndef embedding_lookup(inputs,\n size,\n dim,\n dtype,\n reuse=None,\n mask_negative=False,\n trainable=True,\n place_on_cpu_if_big=True):\n \"\"\"\n Construct an Embedding layer that gathers\n elements from a matrix with `size` rows,\n and `dim` features using the indices stored in `x`.\n\n Arguments:\n ----------\n inputs : tf.Tensor, of integer type\n size : int, how many symbols in the lookup table\n dim : int, how many columns per symbol.\n dtype : data type for the lookup table (e.g. tf.float32)\n reuse : bool, (default None) whether the lookup table\n was already used before (thus this is weight sharing).\n mask_negative : bool, (default False) should -1s in the\n lookup input indicate padding (e.g. no lookup),\n and thus should those values be masked out post-lookup.\n trainable : bool (default True), whether the parameters of\n this lookup table can be backpropagated into (e.g.\n for Glove word vectors that are fixed pre-trained, this\n can be set to False).\n place_on_cpu_if_big : bool, if matrix is big, store it on cpu.\n Returns:\n --------\n tf.Tensor, result of tf.nn.embedding_lookup(LookupTable, inputs)\n \"\"\"\n W = get_embedding_lookup(size, dim, dtype, reuse, trainable=trainable)\n if mask_negative:\n embedded = tf.nn.embedding_lookup(W, tf.maximum(inputs, 0))\n null_mask = tf.expand_dims(\n tf.cast(\n tf.not_equal(inputs, -1),\n dtype\n ),\n -1\n )\n return embedded * null_mask\n else:\n return tf.nn.embedding_lookup(W, inputs)\n\n\ndef _get_sharded_variable(name, shape, dtype, num_shards):\n \"\"\"Get a list of sharded variables with the given dtype.\"\"\"\n if num_shards > shape[0]:\n raise ValueError(\"Too many shards: shape=%s, num_shards=%d\" %\n (shape, num_shards))\n unit_shard_size = int(math.floor(shape[0] / num_shards))\n remaining_rows = shape[0] - unit_shard_size * num_shards\n\n shards = []\n for i in range(num_shards):\n current_size = unit_shard_size\n if i < remaining_rows:\n current_size += 1\n shards.append(\n tf.get_variable(\n name + \"_%d\" % i,\n [current_size] + shape[1:],\n dtype=dtype\n )\n )\n return shards\n\n\ndef _get_concat_variable(name, shape, dtype, num_shards):\n \"\"\"Get a sharded variable concatenated into one tensor.\"\"\"\n sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)\n if len(sharded_variable) == 1:\n return sharded_variable[0]\n\n concat_name = name + \"/concat\"\n concat_full_name = tf.get_variable_scope().name + \"/\" + concat_name + \":0\"\n for value in tf.get_collection(tf.GraphKeys.CONCATENATED_VARIABLES):\n if value.name == concat_full_name:\n return value\n\n concat_variable = tf.concat_v2(sharded_variable, 0, name=concat_name)\n tf.add_to_collection(tf.GraphKeys.CONCATENATED_VARIABLES, concat_variable)\n return concat_variable\n\n\nclass SequenceModel(object):\n def __init__(self,\n objectives,\n features,\n feature_index2words,\n hidden_sizes,\n keep_prob,\n lr,\n solver,\n seed=1234,\n input_keep_prob=0.7,\n clip_norm=-1,\n name=\"SequenceTagger\",\n cudnn=False,\n anneal_rate=0.99,\n trainable=True,\n weight_noise=0.0,\n class_weights_normalize=False,\n faux_cudnn=False,\n class_weights=False,\n class_weights_clipval=1000.0,\n freeze_rate=1.0,\n fused=False,\n freeze_rate_anneal=0.8,\n create_variables=True):\n if fused and objectives[0][\"type\"] == \"crf\":\n fused = False\n\n self.keep_prob = keep_prob\n self.input_keep_prob = input_keep_prob\n self.hidden_sizes = hidden_sizes\n self.name = name\n self.objectives = objectives\n self.features = features\n self.feature_index2words = feature_index2words\n self.seed = seed\n self.lr = lr\n self.fused = fused\n self.weight_noise = weight_noise\n self.anneal_rate = anneal_rate\n self.clip_norm = clip_norm\n self.solver = solver\n self.class_weights_normalize = class_weights_normalize\n self.class_weights = class_weights\n self.class_weights_clipval = class_weights_clipval\n self.rng = np.random.RandomState(seed)\n self.cudnn = cudnn\n self.feature_word2index = [\n {w: k for k, w in enumerate(index2word)} if index2word is not None else None\n for index2word in self.feature_index2words\n ]\n self.label2index = [\n {w: k for k, w in enumerate(objective[\"vocab\"])}\n for objective in self.objectives\n ]\n\n if create_variables:\n # 1) build graph here (TF functional code pattern)\n build_model(name=self.name,\n trainable=trainable,\n objectives=self.objectives,\n features=self.features,\n feature_index2words=self.feature_index2words,\n hidden_sizes=self.hidden_sizes,\n keep_prob=self.keep_prob,\n solver=self.solver,\n freeze_rate=freeze_rate,\n class_weights_normalize=self.class_weights_normalize,\n class_weights=self.class_weights,\n class_weights_clipval=self.class_weights_clipval,\n freeze_rate_anneal=freeze_rate_anneal,\n cudnn=self.cudnn,\n lr=self.lr,\n fused=self.fused,\n weight_noise=self.weight_noise,\n anneal_rate=self.anneal_rate,\n input_keep_prob=self.input_keep_prob,\n faux_cudnn=faux_cudnn,\n clip_norm=self.clip_norm)\n\n # 2) and use meta graph to recover these fields:\n self.recover_graph_variables()\n\n\n def recover_graph_variables(self):\n \"\"\"Use TF meta graph to obtain key metrics\n and outputs from model.\"\"\"\n self.labels = tf.get_collection(LABEL_PLACEHOLDERS)\n self.labels_mask = tf.get_collection(LABEL_MASK_PLACEHOLDERS)\n self.input_placeholders = tf.get_collection(INPUT_PLACEHOLDERS)\n self.sequence_lengths = tf.get_collection(SEQUENCE_LENGTHS)[0]\n self.decoded = tf.get_collection(DECODED)\n self.decoded_scores = tf.get_collection(DECODED_SCORES)\n self.unary_scores = tf.get_collection(UNARY_SCORES)\n\n self.token_correct = tf.get_collection(TOKEN_CORRECT)\n self.token_correct_total = tf.get_collection(TOKEN_CORRECT_TOTAL)\n\n self.sentence_correct = tf.get_collection(SENTENCE_CORRECT)\n self.sentence_correct_total = tf.get_collection(SENTENCE_CORRECT_TOTAL)\n\n self.token_correct_all = tf.get_collection(TOKEN_CORRECT_ALL)[0]\n self.token_correct_all_total = tf.get_collection(TOKEN_CORRECT_ALL_TOTAL)[0]\n self.sentence_correct_all = tf.get_collection(SENTENCE_CORRECT_ALL)[0]\n self.sentence_correct_all_total = tf.get_collection(SENTENCE_CORRECT_ALL_TOTAL)[0]\n\n self.true_positives = tf.get_collection(TRUE_POSITIVES)\n self.false_positives = tf.get_collection(FALSE_POSITIVES)\n self.false_negatives = tf.get_collection(FALSE_NEGATIVES)\n\n if len(self.true_positives) == 0 and len(self.token_correct) != 0:\n self.true_positives = [None for _ in self.token_correct]\n self.false_positives = [None for _ in self.token_correct]\n self.false_negatives = [None for _ in self.token_correct]\n\n if len(tf.get_collection(GLOBAL_STEP)) > 0:\n self.global_step = tf.get_collection(GLOBAL_STEP)[0]\n else:\n try:\n self.global_step = tf.get_default_graph().get_tensor_by_name(\n self.name + \"/\" + \"global_step:0\")\n except KeyError:\n self.global_step = tf.Variable(0, trainable=False, name=\"global_step\")\n tf.add_to_collection(GLOBAL_STEP, self.global_step)\n\n self.is_training = tf.get_collection(IS_TRAINING)[0]\n self.noop = tf.no_op()\n self.train_op = tf.get_collection(TRAIN_OP)[0]\n train_summaries = tf.get_collection(TRAIN_SUMMARIES)\n self.train_summaries = train_summaries[0] if len(train_summaries) > 0 else None\n\n self.nll = tf.get_collection(NLL)[0]\n self.nll_total = tf.get_collection(NLL_TOTAL)[0]\n self.saver = tf.train.Saver()\n\n\n\n @classmethod\n def overrideable_fields(cls):\n return [\n \"keep_prob\",\n \"name\",\n \"lr\",\n \"clip_norm\",\n \"class_weights_normalize\",\n \"class_weights_clipval\",\n \"cudnn\",\n \"anneal_rate\",\n \"weight_noise\",\n \"input_keep_prob\"\n ]\n\n @classmethod\n def fields_to_save(cls):\n return [\n \"hidden_sizes\",\n \"objectives\",\n \"name\",\n \"cudnn\",\n \"class_weights\",\n \"features\",\n \"fused\",\n \"class_weights_normalize\",\n \"weight_noise\",\n \"anneal_rate\",\n \"feature_index2words\",\n \"solver\",\n \"lr\",\n \"clip_norm\",\n \"keep_prob\",\n \"input_keep_prob\",\n \"class_weights_clipval\"\n ]\n\n def predict(self, session, feed_dict):\n feed_dict[self.is_training] = False\n outputs, outputs_probs = session.run(\n (self.decoded, self.decoded_scores), feed_dict\n )\n predictions_out = {}\n for value, val_prob, objective in zip(outputs, outputs_probs, self.objectives):\n predictions_out[objective[\"name\"]] = (value, val_prob)\n return predictions_out\n\n def predict_proba(self, session, feed_dict):\n feed_dict[self.is_training] = False\n outputs = session.run(\n self.unary_scores, feed_dict\n )\n predictions_out = {}\n for value, objective in zip(outputs, self.objectives):\n predictions_out[objective[\"name\"]] = value\n return predictions_out\n\n def save(self, session, path):\n makedirs(path, exist_ok=True)\n with open(join(path, \"model.json\"), \"wt\") as fout:\n save_dict = {}\n for field in type(self).fields_to_save():\n save_dict[field] = getattr(self, field)\n json.dump(save_dict, fout)\n\n with open(join(path, \"rng.pkl\"), \"wb\") as fout:\n pickle.dump(self.rng, fout)\n\n save_session(session, self.saver, path, verbose=True)\n\n @classmethod\n def load(cls, session, path, args=None, verbose=True, trainable=True,\n rebuild_graph=False, faux_cudnn=False, replace_to=None, replace_from=None):\n \"\"\"Convenience method for using a tensorflow session to reload\n a previously saved + serialized model from disk.\"\"\"\n with open(join(path, \"model.json\"), \"rt\") as fin:\n model_props = json.load(fin)\n\n # update fields based on CLI:\n if args is not None:\n ex_fields = explicitly_set_fields()\n for field in cls.overrideable_fields():\n if field in ex_fields:\n model_props[field] = getattr(args, field)\n\n # prune old fields based on changes to saveable fields:\n relevant_props = {}\n for field in cls.fields_to_save():\n if field in model_props:\n relevant_props[field] = model_props[field]\n\n relevant_props[\"trainable\"] = trainable\n relevant_props[\"faux_cudnn\"] = faux_cudnn\n\n if rebuild_graph:\n print(\"Using rebuild_graph mode: creating a new graph.\", flush=True)\n relevant_props[\"create_variables\"] = True\n model = cls(**relevant_props)\n restore_session(\n session, path,\n replace_to=replace_to,\n replace_from=replace_from,\n verbose=verbose,\n use_metagraph=False\n )\n else:\n if model_props.get(\"cudnn\", False):\n import tensorflow.contrib.cudnn_rnn\n relevant_props[\"create_variables\"] = False\n restore_session(\n session, path,\n verbose=verbose,\n use_metagraph=True\n )\n model = cls(**relevant_props)\n\n rng_path = join(path, \"rng.pkl\")\n if exists(rng_path):\n # apply the saved random number generator to this\n # model:\n with open(rng_path, \"rb\") as fin:\n model.rng = pickle.load(fin)\n return model\n\n\ndef make_path_absolute(obj, basepath):\n copied = obj.copy()\n for key in [\"path\", \"vocab\"]:\n if key in copied:\n copied[key] = join(basepath, copied[key])\n return copied\n\n\nclass Config(object):\n def __init__(self, datasets, features, objectives,\n wikidata_path, classification_path):\n assert(len(features) > 0)\n self.datasets = datasets\n self.features = features\n self.objectives = objectives\n self.classifications = None\n self.wikidata_path = wikidata_path\n self.classification_path = classification_path\n\n # build the objective names:\n self._named_objectives = [obj[\"name\"] for obj in self.objectives]\n\n @classmethod\n def load(cls, path):\n with open(path, \"rt\") as fin:\n config = json.load(fin)\n config_dirname = dirname(path)\n return cls(\n datasets=[make_path_absolute(dataset, config_dirname) for dataset in config['datasets']],\n features=[make_path_absolute(feat, config_dirname) for feat in config['features']],\n objectives=[make_path_absolute(objective, config_dirname) for objective in config['objectives']],\n wikidata_path=config.get(\"wikidata_path\", None),\n classification_path=(\n join(config_dirname, config.get(\"classification_path\", None))\n if \"classification_path\" in config else None)\n )\n\n def load_dataset_separate(self, dataset_type):\n paths = [dataset for dataset in self.datasets if dataset[\"type\"] == dataset_type]\n all_examples = {}\n for dataset in paths:\n _, extension = splitext(dataset[\"path\"])\n if extension == \".h5\" or extension == \".hdf5\":\n if self.classifications is None:\n if self.wikidata_path is None or self.classification_path is None:\n raise ValueError(\"missing wikidata_path and \"\n \"classification_path, cannot \"\n \"construct H5Dataset.\")\n self.classifications = ClassificationHandler(\n self.wikidata_path,\n self.classification_path\n )\n examples = H5Dataset(\n dataset[\"path\"],\n dataset[\"x\"],\n dataset[\"y\"],\n self._named_objectives,\n ignore_value=dataset.get('ignore', None),\n classifications=self.classifications)\n else:\n examples = TSVDataset(\n dataset[\"path\"],\n dataset[\"x\"],\n dataset[\"y\"],\n self._named_objectives,\n comment=dataset.get('comment', '#'),\n ignore_value=dataset.get('ignore', None),\n retokenize=dataset.get('retokenize', False))\n title = dataset[\"path\"].split('/')[-1].split(\".\")[0]\n name = title\n iteration = 1\n while name in all_examples:\n name = title + \"-%d\" % (iteration,)\n iteration += 1\n all_examples[name] = examples\n return all_examples\n\n def load_dataset(self, dataset_type, merge=True):\n datasets = self.load_dataset_separate(dataset_type)\n if merge:\n return CombinedDataset(list(datasets.values()))\n return datasets\n\n\ndef boolean_argument(parser, name, default):\n parser.add_argument(\"--\" + name, action=\"store_true\", default=default)\n parser.add_argument(\"--no\" + name, action=\"store_false\", dest=name)\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('config', type=str)\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--anneal_rate', type=float, default=0.99)\n parser.add_argument('--clip_norm', type=float, default=-1)\n parser.add_argument('--weight_noise', type=float, default=0.0)\n parser.add_argument('--hidden_sizes', type=int, nargs=\"*\", default=[200, 200])\n parser.add_argument('--load_dir', type=str, default=None)\n parser.add_argument('--restore_input_features', type=str, default=None)\n parser.add_argument('--improvement_key', type=str, default=\"token_correct\")\n parser.add_argument('--freeze_rate', type=float, default=1.0)\n parser.add_argument('--freeze_rate_anneal', type=float, default=0.8)\n parser.add_argument('--save_dir', type=str, default=None)\n parser.add_argument('--max_epochs', type=int, default=1000)\n parser.add_argument('--test_every', type=int, default=10000,\n help=\"Number of training iterations after which testing should occur.\")\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--max_patience', type=int, default=10)\n parser.add_argument('--class_weights_clipval', type=float, default=1000.0)\n parser.add_argument('--device', type=str, default=\"gpu:0\")\n parser.add_argument('--keep_prob', type=float, default=0.5)\n parser.add_argument('--input_keep_prob', type=float, default=0.7)\n parser.add_argument('--solver', type=str, default=\"adam\",\n choices=[\"adam\", \"sgd\"])\n parser.add_argument(\"--name\", type=str, default=\"SequenceTagger\")\n parser.add_argument(\"--old_name\", type=str, default=None)\n boolean_argument(parser, \"cudnn\", True)\n boolean_argument(parser, \"faux_cudnn\", False)\n boolean_argument(parser, \"class_weights\", False)\n boolean_argument(parser, \"rebuild_graph\", False)\n boolean_argument(parser, \"class_weights_normalize\", False)\n boolean_argument(parser, \"fused\", True)\n boolean_argument(parser, \"report_metrics_per_axis\", True)\n boolean_argument(parser, \"report_class_f1\", False)\n return parser.parse_args(args=args)\n\n\ndef get_vocab(dataset, max_vocab=-1, extra_words=None):\n index2word = []\n occurrence = {}\n for el in dataset:\n if el not in occurrence:\n index2word.append(el)\n occurrence[el] = 1\n else:\n occurrence[el] += 1\n index2word = sorted(index2word, key=lambda x: occurrence[x], reverse=True)\n if max_vocab > 0:\n index2word = index2word[:max_vocab]\n if extra_words is not None:\n index2word = extra_words + index2word\n return index2word\n\n\ndef get_objectives(objectives, dataset):\n out = []\n for obj_idx, objective in enumerate(objectives):\n if \"vocab\" in objective:\n with open(objective[\"vocab\"], \"rt\") as fin:\n vocab = fin.read().splitlines()\n else:\n vocab = get_vocab((w[obj_idx] for _, y in dataset for w in y if w[obj_idx] is not None), -1)\n\n out.append(\n {\n \"vocab\": vocab,\n \"type\": objective[\"type\"],\n \"name\": objective[\"name\"]\n }\n )\n return out\n\n\ndef merge_all_metrics(metrics):\n out = {}\n for key, metric in metrics.items():\n for subkey, submetric in metric.items():\n if len(key) > 0:\n out[key + \"_\" + subkey] = submetric\n if subkey not in out:\n out[subkey] = submetric\n else:\n out[subkey] += submetric\n else:\n out[subkey] = submetric\n return out\n\n\ndef log_outcome(logger, outcome, step, name):\n for k, v in sorted(outcome.items()):\n if \"total\" in k:\n continue\n else:\n total = outcome[k + \"_total\"]\n if total == 0:\n continue\n logger.log(k, v / total, step=step)\n logger.writer.flush()\n\n\ndef compute_f1(metrics, objectives, report_class_f1):\n total_f1 = 0.0\n total_precision = 0.0\n total_recall = 0.0\n total = 0\n for objective in objectives:\n name = objective[\"name\"]\n key = \"%s_true_positives\" % (name,)\n if key not in metrics:\n continue\n tp = metrics[key]\n fp = metrics[\"%s_false_positives\" % (name,)]\n fn = metrics[\"%s_false_negatives\" % (name,)]\n del metrics[key]\n del metrics[\"%s_false_positives\" % (name,)]\n del metrics[\"%s_false_negatives\" % (name,)]\n\n\n\n precision = 1.* tp / np.maximum((tp + fp), 1e-6)\n recall = 1. * tp / np.maximum((tp + fn), 1e-6)\n f1 = 2.0 * precision * recall / np.maximum((precision + recall), 1e-6)\n\n support = tp + fn\n\n full_f1 = np.average(f1, weights=support) * 100.0\n full_recall = np.average(recall, weights=support) * 100.0\n full_precision = np.average(precision, weights=support) * 100.0\n\n total_f1 += full_f1\n total_recall += full_recall\n total_precision += full_precision\n total += 1\n if report_class_f1:\n print(\"F1 %s: %r\" % (name, full_f1))\n print(\"Name\\tF1\\tTP\\tFP\\tFN\")\n rows = zip([label for label, has_support in zip(objective[\"vocab\"],\n support > 0)\n if has_support],\n f1, tp, fp, fn)\n for val, f1_val, val_tp, val_fp, val_fn in rows:\n print(\"%s\\t%r\\t%d\\t%d\\t%d\" % (\n val, f1_val, val_tp, val_fp, val_fn))\n print(\"\")\n if total > 0:\n metrics[\"F1\"] = total_f1\n metrics[\"recall\"] = total_recall\n metrics[\"precision\"] = total_precision\n metrics[\"F1_total\"] = total\n metrics[\"recall_total\"] = total\n metrics[\"precision_total\"] = total\n\n\ndef accuracy(model, session, datasets, batch_size, train,\n report_metrics_per_axis, report_class_f1,\n callback=None,\n callback_period=None, writer=None):\n pbar = get_progress_bar(\"train\" if train else \"validation\", item=\"batches\")\n if not isinstance(datasets, dict):\n datasets = {'':datasets}\n all_metrics_agg = {}\n\n if callback is not None:\n if callback_period is None:\n raise ValueError(\"callback_period cannot be None if \"\n \"callback is used.\")\n else:\n callback_period = None\n\n if train:\n train_op = model.train_op\n else:\n train_op = model.noop\n is_training = model.is_training\n metrics = {\"nll\": model.nll, \"nll_total\": model.nll_total}\n summaries = []\n\n if not train:\n metric_iter = zip(\n model.objectives,\n model.token_correct,\n model.token_correct_total,\n model.sentence_correct,\n model.sentence_correct_total,\n model.true_positives,\n model.false_positives,\n model.false_negatives\n )\n for metric_vars in metric_iter:\n (\n objective,\n token_correct,\n token_correct_total,\n sentence_correct,\n sentence_correct_total,\n true_positives,\n false_positives,\n false_negatives\n ) = metric_vars\n name = objective[\"name\"]\n if report_metrics_per_axis:\n metrics[\"%s_token_correct\" % (name,)] = token_correct\n metrics[\"%s_token_correct_total\" % (name,)] = token_correct_total\n metrics[\"%s_sentence_correct\" % (name,)] = sentence_correct\n metrics[\"%s_sentence_correct_total\" % (name,)] = sentence_correct_total\n if true_positives is not None:\n metrics[\"%s_true_positives\" % (name,)] = true_positives\n metrics[\"%s_false_positives\" % (name,)] = false_positives\n metrics[\"%s_false_negatives\" % (name,)] = false_negatives\n metrics[\"token_correct\"] = model.token_correct_all\n metrics[\"token_correct_total\"] = model.token_correct_all_total\n metrics[\"sentence_correct\"] = model.sentence_correct_all\n metrics[\"sentence_correct_total\"] = model.sentence_correct_all_total\n summaries = []\n else:\n if writer is not None and model.train_summaries is not None:\n summaries = model.train_summaries\n\n metrics_values = [v for _, v in sorted(metrics.items())]\n metrics_names = [name for name, _ in sorted(metrics.items())]\n outputs_val = [train_op, model.global_step, summaries, metrics_values]\n for title, dataset in datasets.items():\n batches = iter_batches_single_threaded(\n model=model,\n dataset=dataset,\n batch_size=batch_size,\n train=train,\n pbar=pbar\n )\n metrics_agg = {}\n iteration = 0\n for feed_dict in batches:\n feed_dict[is_training] = train\n _, step, summary_out, outputs = session.run(outputs_val, feed_dict)\n if writer is not None:\n writer.add_summary(summary_out, step)\n for key, value in zip(metrics_names, outputs[:len(metrics_names)]):\n if key not in metrics_agg:\n metrics_agg[key] = value\n else:\n metrics_agg[key] += value\n iteration += 1\n if callback_period is not None and iteration % callback_period == 0:\n callback(iteration)\n\n if np.isnan(metrics_agg['nll']):\n print(\"loss is NaN.\", flush=True, file=sys.stderr)\n sys.exit(1)\n\n compute_f1(metrics_agg, model.objectives, report_class_f1)\n all_metrics_agg[title] = metrics_agg\n del batches\n return merge_all_metrics(all_metrics_agg)\n\n\ndef present_outcome(outcome, epoch, name):\n string_rows = []\n for k, v in sorted(outcome.items()):\n if \"total\" in k:\n continue\n else:\n total = outcome[k + \"_total\"]\n if total == 0:\n continue\n if \"correct\" in k:\n string_rows.append(\n [\n k,\n \"%.2f%%\" % (100.0 * v / total),\n \"(%d correct / %d)\" % (v, total)\n ]\n )\n else:\n string_rows.append(\n [\n k,\n \"%.3f\" % (v / total),\n \"\"\n ]\n )\n max_len_cols = [\n max(len(row[colidx]) for row in string_rows)\n for colidx in range(len(string_rows[0]))\n ] if len(string_rows) > 0 else []\n rows = []\n for row in string_rows:\n rows.append(\n \" \".join(\n [col + \" \" * (max_len_cols[colidx] - len(col))\n for colidx, col in enumerate(row)]\n )\n )\n return \"\\n\".join([\"Epoch {epoch}: {name}\".format(epoch=epoch, name=name)] + rows)\n\n\ndef print_outcome(outcome, objectives, epoch, step, name, logger=None):\n outcome_report = present_outcome(outcome, epoch, name)\n if logger is not None:\n log_outcome(logger, outcome, step, name)\n print(outcome_report)\n\n\n\nclass SequenceTagger(object):\n def __init__(self, path, device=\"gpu\", faux_cudnn=False, rebuild_graph=False):\n tf.reset_default_graph()\n session_conf = tf.ConfigProto(\n allow_soft_placement=True\n )\n self.session = tf.InteractiveSession(config=session_conf)\n with tf.device(device):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n self._model = SequenceModel.load(\n self.session,\n path,\n args=None,\n verbose=False,\n trainable=False,\n rebuild_graph=rebuild_graph,\n faux_cudnn=faux_cudnn\n )\n\n @property\n def objectives(self):\n return self._model.objectives\n\n def predict_proba(self, tokens):\n blank_labels = tuple(None for _ in self._model.objectives)\n batches = list(iter_batches_single_threaded(\n model=self._model,\n dataset=[\n (tokens, [blank_labels for t in tokens])\n ],\n batch_size=1,\n train=False,\n autoresize=False\n ))\n outputs = []\n batches[0][self._model.is_training] = False\n probs_out = self._model.predict_proba(\n self.session, batches[0]\n )\n return probs_out\n\n\n def predict_proba_sentences(self, sentences):\n blank_labels = tuple(None for _ in self._model.objectives)\n batches = iter_batches_single_threaded(\n model=self._model,\n dataset=[\n (sentence, [blank_labels for t in sentence])\n for sentence in sentences\n ],\n batch_size=min(256, len(sentences)),\n train=False,\n autoresize=False\n )\n for batch in batches:\n batch[self._model.is_training] = False\n yield self._model.predict_proba(\n self.session, batch\n )\n\n def predict_topk_sentences(self, sentences, k=5):\n blank_labels = tuple(None for _ in self._model.objectives)\n batches = iter_batches_single_threaded(\n model=self._model,\n dataset=[\n (sentence, [blank_labels for t in sentence])\n for sentence in sentences\n ],\n batch_size=min(256, len(sentences)),\n train=False,\n autoresize=False\n )\n for batch in batches:\n outputs = self._model.predict_proba(\n self.session, batch\n )\n named_outputs = {}\n for objective in self._model.objectives:\n obj_name = objective[\"name\"]\n tags, scores = outputs[obj_name]\n if objective[\"type\"] == \"crf\":\n named_outputs[obj_name] = [\n [(token, [objective[\"vocab\"][tag]], [score]) for token, tag in zip(tokens, tags)]\n for tokens, tags, score in zip(sentences, tags, scores)\n ]\n elif objective[\"type\"] == 'softmax':\n all_sent_scores = []\n\n for tokens, scores in zip(sentences, scores):\n sent_scores = []\n for token, token_scores in zip(tokens, scores):\n topk = np.argsort(token_scores)[::-1][:k]\n sent_scores.append(\n (\n token,\n [objective[\"vocab\"][idx] for idx in topk],\n [token_scores[idx] for idx in topk]\n )\n )\n all_sent_scores.append(sent_scores)\n named_outputs[obj_name] = all_sent_scores\n else:\n raise ValueError(\"unknown objective type %r.\" % (objective[\"type\"],))\n yield named_outputs\n\n def tag_sentences(self, sentences):\n if len(sentences) == 0:\n return {\n objective[\"name\"]: []\n for objective in self._model.objectives\n }\n blank_labels = tuple(None for _ in self._model.objectives)\n batches = list(iter_batches_single_threaded(\n self._model,\n [\n (sentence, [blank_labels for t in sentence])\n for sentence in sentences\n ],\n batch_size=min(256, len(sentences)),\n train=False,\n autoresize=False\n ))\n\n named_outputs = {}\n sentence_idx = 0\n\n for batch in batches:\n outputs = self._model.predict(self.session, batch)\n for objective in self._model.objectives:\n obj_name = objective[\"name\"]\n if obj_name not in named_outputs:\n named_outputs[obj_name] = []\n tags, scores = outputs[obj_name]\n nsentences = len(tags)\n if objective[\"type\"] == \"crf\":\n named_outputs[obj_name].extend([\n [(token, objective[\"vocab\"][tag], score) for token, tag in zip(tokens, tags)]\n for tokens, tags, score in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores)\n ])\n elif objective[\"type\"] == 'softmax':\n named_outputs[obj_name].extend([\n [(token, objective[\"vocab\"][tag], score)\n for token, tag, score in zip(tokens, tags, scores)]\n for tokens, tags, scores in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores)\n ])\n else:\n raise ValueError(\"unknown objective type %r.\" % (objective[\"type\"],))\n sentence_idx += nsentences\n\n return named_outputs\n\n\ndef count_number_of_parameters():\n return int(sum([np.prod(var.get_shape().as_list())\n for var in tf.trainable_variables()]))\n\n\nclass TestCallback(object):\n def __init__(self, model, session, dataset, epoch, args, logger):\n self.model = model\n self.session = session\n self.dataset = dataset\n self.epoch = epoch\n self.args = args\n self.logger = logger\n self.report_metrics_per_axis = args.report_metrics_per_axis\n self.report_class_f1 = args.report_class_f1\n\n def test(self, iteration):\n dev_outcome = accuracy(self.model, self.session, self.dataset, self.args.batch_size,\n train=False, report_metrics_per_axis=self.report_metrics_per_axis,\n report_class_f1=self.report_class_f1)\n print_outcome(dev_outcome, self.model.objectives,\n epoch=\"{}-{}\".format(self.epoch, iteration),\n step=self.session.run(self.model.global_step),\n name=\"validation\",\n logger=self.logger\n )\n if self.args.save_dir is not None:\n self.model.save(self.session, self.args.save_dir)\n\n\ndef compute_epoch(session, model, train_set,\n validation_set, test_callback, epoch,\n train_writer, test_writer,\n args):\n test_callback.epoch = epoch\n train_outcome = accuracy(model,\n session,\n train_set,\n args.batch_size,\n train=True,\n callback_period=args.test_every,\n writer=train_writer.writer if train_writer is not None else None,\n report_metrics_per_axis=args.report_metrics_per_axis,\n report_class_f1=args.report_class_f1,\n callback=test_callback.test)\n global_step = session.run(model.global_step)\n print_outcome(train_outcome,\n model.objectives,\n epoch=epoch,\n name=\"train\",\n step=global_step,\n logger=train_writer)\n dev_outcome = accuracy(\n model, session, validation_set, args.batch_size,\n train=False,\n report_metrics_per_axis=args.report_metrics_per_axis,\n report_class_f1=args.report_class_f1)\n print_outcome(dev_outcome,\n model.objectives,\n epoch=epoch,\n step=global_step,\n name=\"validation\",\n logger=test_writer)\n if args.save_dir is not None:\n model.save(session, args.save_dir)\n return dev_outcome\n\n\ndef main():\n args = parse_args()\n config = Config.load(args.config)\n validation_set = config.load_dataset(\"dev\", merge=False)\n session_conf = tf.ConfigProto(allow_soft_placement=True)\n\n with tf.Session(config=session_conf) as session, tf.device(args.device):\n if args.load_dir is not None:\n model = SequenceModel.load(session, args.load_dir,\n args=args, rebuild_graph=args.rebuild_graph, faux_cudnn=args.faux_cudnn,\n replace_to=args.name,\n replace_from=args.old_name)\n dev_outcome = accuracy(\n model, session, validation_set, args.batch_size, train=False,\n report_metrics_per_axis=args.report_metrics_per_axis,\n report_class_f1=args.report_class_f1)\n print_outcome(dev_outcome,\n model.objectives, 0,\n name=\"loaded validation\",\n step=session.run(model.global_step),\n logger=None)\n # dev_outcome = None\n if args.rebuild_graph and args.save_dir is not None:\n model.save(session, args.save_dir)\n train_set = config.load_dataset(\"train\")\n else:\n # load classes and index2word from a file.\n dev_outcome = None\n train_set = config.load_dataset(\"train\")\n model = SequenceModel(\n objectives=get_objectives(config.objectives, train_set),\n features=config.features,\n feature_index2words=get_feature_vocabs(config.features, train_set, [\"\"]),\n lr=args.lr,\n anneal_rate=args.anneal_rate,\n weight_noise=args.weight_noise,\n freeze_rate=args.freeze_rate,\n freeze_rate_anneal=args.freeze_rate_anneal,\n clip_norm=args.clip_norm,\n hidden_sizes=args.hidden_sizes,\n solver=args.solver,\n fused=args.fused,\n class_weights_normalize=args.class_weights_normalize,\n class_weights=args.class_weights,\n class_weights_clipval=args.class_weights_clipval,\n keep_prob=args.keep_prob,\n input_keep_prob=args.input_keep_prob,\n name=args.name,\n cudnn=args.cudnn,\n faux_cudnn=args.faux_cudnn,\n create_variables=True)\n session.run(tf.global_variables_initializer())\n if args.restore_input_features is not None:\n restore_session(\n session, args.restore_input_features,\n verbose=True,\n use_metagraph=False,\n only_features=True)\n\n print(\"Model has {} trainable parameters.\".format(count_number_of_parameters()), flush=True)\n best_dev_score = 0.0\n patience = 0\n best_epoch = 0\n best_outcome = None\n improvement_key = args.improvement_key\n if dev_outcome is not None:\n best_dev_score = dev_outcome[improvement_key]\n best_epoch = -1\n best_outcome = dev_outcome\n\n if args.save_dir is not None:\n train_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, \"train\")))\n test_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, \"test\")))\n else:\n train_writer, test_writer = None, None\n\n test_callback = TestCallback(model,\n session,\n validation_set,\n -1,\n args,\n logger=test_writer)\n if len(train_set) > 0:\n train_set.set_randomize(True)\n train_set.set_rng(model.rng)\n for epoch in range(args.max_epochs):\n dev_outcome = compute_epoch(\n session, model,\n train_set=train_set, validation_set=validation_set,\n epoch=epoch, test_callback=test_callback,\n train_writer=train_writer,\n test_writer=test_writer,\n args=args)\n\n if dev_outcome[improvement_key] > best_dev_score:\n best_dev_score = dev_outcome[improvement_key]\n best_epoch = epoch\n best_outcome = dev_outcome\n patience = 0\n if args.save_dir is not None:\n model.save(session, join(args.save_dir, \"best\"))\n else:\n patience += 1\n if patience >= args.max_patience:\n print(\"No improvements for {} epochs. Stopping.\".format(args.max_patience))\n break\n del dev_outcome\n print_outcome(\n best_outcome,\n model.objectives,\n epoch=best_epoch,\n name=\"validation-best\",\n step=session.run(model.global_step),\n logger=None)\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":16,"cells":{"python_code":{"kind":"string","value":"import numpy as np\nimport subprocess\nimport h5py\nimport ciseau\nfrom os.path import exists, splitext, join\nfrom wikidata_linker_utils.wikidata_ids import load_wikidata_ids\n\ndef count_examples(lines, comment, ignore_value, column_indices):\n example_length = 0\n has_labels = False\n found = 0\n for line in lines:\n if len(line) == 0 or (comment is not None and line.startswith(comment)):\n if example_length > 0 and has_labels:\n found += 1\n example_length = 0\n has_labels = False\n else:\n example_length += 1\n if not has_labels:\n cols = line.split(\"\\t\")\n if len(cols) > 1:\n if ignore_value is not None:\n for col_index in column_indices:\n if cols[col_index] != ignore_value:\n has_labels = True\n break\n\n else:\n has_labels = True\n if example_length > 0 and has_labels:\n found += 1\n return found\n\n\ndef retokenize_example(x, y):\n tokens = ciseau.tokenize(\" \".join(w for w in x),\n normalize_ascii=False)\n out_y = []\n regular_cursor = 0\n tokens_length_total = 0\n regular_length_total = len(x[regular_cursor]) + 1 if len(x) > 0 else 0\n if regular_cursor + 1 == len(x):\n regular_length_total -= 1\n for i in range(len(tokens)):\n tokens_length_total = tokens_length_total + len(tokens[i])\n while regular_length_total < tokens_length_total:\n regular_cursor += 1\n regular_length_total = regular_length_total + len(x[regular_cursor]) + 1\n if regular_cursor + 1 == len(x):\n regular_length_total -= 1\n out_y.append(y[regular_cursor])\n assert(regular_cursor + 1 == len(x)), \"error with %r\" % (x,)\n return ([tok.rstrip() for tok in tokens], out_y)\n\n\ndef convert_lines_to_examples(lines, comment, ignore_value,\n column_indices, x_column, empty_column,\n retokenize=False):\n examples = []\n x = []\n y = []\n for line in lines:\n if len(line) == 0 or (comment is not None and line.startswith(comment)):\n if len(x) > 0:\n if not all(row == empty_column for row in y):\n examples.append((x, y))\n x = []\n y = []\n else:\n cols = line.split(\"\\t\")\n x.append(cols[x_column])\n if len(cols) == 1:\n y.append(empty_column)\n else:\n if ignore_value is not None:\n y.append(\n tuple(\n cols[col_index] if col_index is not None and cols[col_index] != ignore_value else None\n for col_index in column_indices\n )\n )\n else:\n y.append(\n tuple(\n cols[col_index] if col_index is not None else None\n for col_index in column_indices\n )\n )\n if len(x) > 0 and not all(row == empty_column for row in y):\n examples.append((x, y))\n if retokenize:\n examples = [retokenize_example(x, y) for x, y in examples]\n return examples\n\n\ndef load_tsv(path, x_column, y_columns, objective_names, comment, ignore_value,\n retokenize):\n \"\"\"\"\n Deprecated method for loading a tsv file as a training/test set for a model.\n\n Arguments:\n ----------\n path: str, location of tsv file\n x_column: int\n y_columns: list, objectives in this file along with their column.\n (e.g. `y_columns=[{\"objective\": \"POS\", \"column\": 2}, ...])`)\n objective_names: name of all desired columns\n comment: line beginning indicating it's okay to skip\n ignore_value: label value that should be treated as missing\n retokenize: run tokenizer again.\n Returns\n -------\n list : examples loaded into memory\n\n Note: can use a lot of memory since entire file is loaded.\n \"\"\"\n objective2column = {col['objective']: col['column'] for col in y_columns}\n column_indices = [objective2column.get(name, None) for name in objective_names]\n empty_column = tuple(None for _ in objective_names)\n\n if all(col_index is None for col_index in column_indices):\n return []\n\n with open(path, \"rt\") as fin:\n lines = fin.read().splitlines()\n\n return convert_lines_to_examples(lines,\n ignore_value=ignore_value,\n empty_column=empty_column,\n x_column=x_column,\n column_indices=column_indices,\n comment=comment,\n retokenize=retokenize)\n\n\nclass RandomizableDataset(object):\n def set_rng(self, rng):\n self.rng = rng\n\n def set_randomize(self, randomize):\n self.randomize = randomize\n\n def set_ignore_y(self, ignore):\n self.ignore_y = ignore\n\nclass TSVDataset(RandomizableDataset):\n _fhandle = None\n _fhandle_position = 0\n _examples = None\n _example_indices = None\n _example_index = 0\n _eof = False\n ignore_y = False\n def __init__(self, path, x_column, y_columns, objective_names, comment, ignore_value,\n retokenize=False, chunksize=50000000, randomize=False, rng=None):\n \"\"\"\"\n Arguments:\n ----------\n path: str, location of tsv file\n x_column: int\n y_columns: list, objectives in this file along with their column.\n (e.g. `y_columns=[{\"objective\": \"POS\", \"column\": 2}, ...])`)\n objective_names: name of all desired columns\n comment: line beginning indicating it's okay to skip\n ignore_value: label value that should be treated as missing\n chunksize: how many bytes to read from the file at a time.\n rng: numpy RandomState\n retokenize: run tokenizer on x again.\n \"\"\"\n self.path = path\n self.randomize = randomize\n self.x_column = x_column\n self.y_columns = y_columns\n self.objective_names = objective_names\n self.comment = comment\n self.ignore_value = ignore_value\n self.retokenize = retokenize\n self.chunksize = chunksize\n if rng is None:\n rng = np.random.RandomState(0)\n self.rng = rng\n # column picking setup:\n objective2column = {col['objective']: col['column'] for col in y_columns}\n self.column_indices = [objective2column.get(name, None) for name in objective_names]\n self.empty_column = tuple(None for _ in objective_names)\n if all(col_index is None for col_index in self.column_indices):\n self.length = 0\n else:\n self._compute_length()\n\n def _signature(self):\n try:\n file_sha1sum = subprocess.check_output(\n [\"sha1sum\", self.path], universal_newlines=True\n ).split(\" \")[0]\n except FileNotFoundError:\n file_sha1sum = subprocess.check_output(\n [\"shasum\", self.path], universal_newlines=True\n ).split(\" \")[0]\n sorted_cols = list(\n map(\n str,\n sorted(\n [col for col in self.column_indices if col is not None]\n )\n )\n )\n return \"-\".join([file_sha1sum] + sorted_cols)\n\n def _compute_length(self):\n length_file = (\n splitext(self.path)[0] +\n \"-length-\" +\n self._signature() + \".txt\"\n )\n if exists(length_file):\n with open(length_file, \"rt\") as fin:\n total = int(fin.read())\n else:\n total = 0\n while True:\n total += self._count_examples()\n if self._eof:\n break\n with open(length_file, \"wt\") as fout:\n fout.write(str(total) + \"\\n\")\n self.length = total\n\n def __len__(self):\n return self.length\n\n def close(self):\n if self._fhandle is not None:\n self._fhandle.close()\n self._fhandle = None\n self._fhandle_position = 0\n self._eof = False\n self._examples = None\n self._example_indices = None\n\n def __del__(self):\n self.close()\n\n def _read_file_until_newline(self):\n if self._fhandle is None:\n self._fhandle = open(self.path, \"rb\")\n if self._eof:\n self._fhandle_position = 0\n self._fhandle.seek(0)\n self._eof = False\n\n read_chunk = None\n while True:\n new_read_chunk = self._fhandle.read(self.chunksize)\n if read_chunk is None:\n read_chunk = new_read_chunk\n else:\n read_chunk += new_read_chunk\n if len(new_read_chunk) < self.chunksize:\n del new_read_chunk\n self._fhandle_position += len(read_chunk)\n self._eof = True\n break\n else:\n del new_read_chunk\n newline_pos = read_chunk.rfind(b\"\\n\\n\")\n if newline_pos != -1:\n # move to last line end position (so that we don't get\n # half an example.)\n self._fhandle.seek(self._fhandle_position + newline_pos + 2)\n self._fhandle_position += newline_pos + 2\n read_chunk = read_chunk[:newline_pos]\n break\n return read_chunk\n\n def _count_examples(self):\n read_chunk = self._read_file_until_newline()\n return count_examples(\n read_chunk.decode(\"utf-8\").splitlines(),\n ignore_value=self.ignore_value,\n column_indices=self.column_indices,\n comment=self.comment\n )\n\n def _load_examples(self):\n read_chunk = self._read_file_until_newline()\n if self._examples is not None:\n del self._examples\n self._examples = convert_lines_to_examples(\n read_chunk.decode(\"utf-8\").splitlines(),\n ignore_value=self.ignore_value,\n empty_column=self.empty_column,\n x_column=self.x_column,\n column_indices=self.column_indices,\n comment=self.comment,\n retokenize=self.retokenize\n )\n self._example_indices = np.arange(len(self._examples))\n if self.randomize:\n # access loaded data randomly:\n self.rng.shuffle(self._example_indices)\n self._example_index = 0\n\n def __getitem__(self, index):\n \"\"\"Retrieve the next example (index is ignored)\"\"\"\n if index >= self.length:\n raise StopIteration()\n if self._example_indices is None or self._example_index == len(self._example_indices):\n self._load_examples()\n while len(self._examples) == 0:\n self._load_examples()\n if len(self._examples) > 0:\n break\n if self._eof:\n raise StopIteration()\n ex = self._examples[self._example_indices[self._example_index]]\n self._example_index += 1\n return ex\n\n def set_randomize(self, randomize):\n if randomize != self.randomize:\n self.randomize = randomize\n\n def close(self):\n if self._fhandle is not None:\n self._fhandle.close()\n self._fhandle = None\n\n\nclass OracleClassification(object):\n def __init__(self, classes, classification, path):\n self.classes = classes\n self.classification = classification\n self.path = path\n self.contains_other = self.classes[-1] == \"other\"\n\n def classify(self, index):\n return self.classification[index]\n\ndef load_oracle_classification(path):\n with open(join(path, \"classes.txt\"), \"rt\", encoding=\"UTF-8\") as fin:\n classes = fin.read().splitlines()\n classification = np.load(join(path, \"classification.npy\"))\n return OracleClassification(classes, classification, path)\n\n\n\nclass ClassificationHandler(object):\n def __init__(self, wikidata_path, classification_path):\n self.classification_path = classification_path\n _, self.name2index = load_wikidata_ids(wikidata_path, verbose=False)\n self.classifiers = {}\n\n def get_classifier(self, name):\n if name not in self.classifiers:\n self.classifiers[name] = load_oracle_classification(\n join(self.classification_path, name)\n )\n return self.classifiers[name]\n\n\nclass H5Dataset(RandomizableDataset):\n handle_open = False\n ignore_y = False\n _max_generated_example = 0\n _min_generated_example = 0\n def __init__(self, path, x_column, y_columns, objective_names,\n classifications, ignore_value, randomize=False, rng=None):\n self.x_column = str(x_column)\n self.y_columns = y_columns\n self.ignore_value = ignore_value\n self.objective_names = objective_names\n self.randomize = randomize\n if rng is None:\n rng = np.random.RandomState(0)\n self.rng = rng\n self._classifications = classifications\n self.handle = h5py.File(path, \"r\")\n self.path = path\n self.handle_open = True\n self.length = len(self.handle[self.x_column])\n self.chunksize = self.handle[self.x_column].chunks[0]\n self._example_indices = None\n objective2column = {\n col['objective']: (\n str(col['column']),\n self._classifications.get_classifier(col['classification'])\n ) for col in y_columns\n }\n if self.ignore_value is not None:\n for _, classifier in objective2column.values():\n if self.ignore_value in classifier.classes:\n classifier.classes[classifier.classes.index(self.ignore_value)] = None\n\n self.column2col_indices = {}\n for col_idx, name in enumerate(self.objective_names):\n if name not in objective2column:\n continue\n column, classifier = objective2column[name]\n if column not in self.column2col_indices:\n self.column2col_indices[column] = [(classifier, col_idx)]\n else:\n self.column2col_indices[column].append((classifier, col_idx))\n\n def close(self):\n if self.handle_open:\n self.handle.close()\n self.handle_open = False\n\n def __del__(self):\n self.close()\n\n def __len__(self):\n return self.length\n\n def _build_examples(self, index):\n x = [x_chunk.split(\"\\n\") for x_chunk in self.handle[self.x_column][index:index + self.chunksize]]\n y = [[[None for k in range(len(self.objective_names))] for j in range(len(x[i]))] for i in range(len(x))]\n if not self.ignore_y:\n for handle_column, col_content in self.column2col_indices.items():\n col_ids = [[self._classifications.name2index[name] if name != \"\" else None\n for name in y_chunk.split(\"\\n\")]\n for y_chunk in self.handle[handle_column][index:index + self.chunksize]]\n for i in range(len(col_ids)):\n for j, idx in enumerate(col_ids[i]):\n if idx is not None:\n for classifier, k in col_content:\n y[i][j][k] = classifier.classify(idx)\n\n return x, y\n\n def set_randomize(self, randomize):\n if self.randomize != randomize:\n self.randomize = randomize\n if self._max_generated_example != self._min_generated_example:\n self.xorder = np.arange(self._min_generated_example, self._max_generated_example)\n self.rng.shuffle(self.xorder)\n\n\n def __getitem__(self, index):\n if index >= len(self):\n raise StopIteration()\n if self.randomize:\n if self._example_indices is None or index == 0:\n self._example_indices = np.arange(0, len(self), self.chunksize)\n self.rng.shuffle(self._example_indices)\n # transformed index:\n index = (self._example_indices[index // self.chunksize] + (index % self.chunksize)) % len(self)\n\n if index < self._min_generated_example or index >= self._max_generated_example:\n self.x, self.y = self._build_examples(index)\n # store bounds of generated data:\n self._min_generated_example = index\n self._max_generated_example = index + len(self.x)\n\n if self.randomize:\n self.xorder = np.arange(self._min_generated_example, self._max_generated_example)\n self.rng.shuffle(self.xorder)\n if self.randomize:\n index = self.xorder[index - self._min_generated_example]\n return self.x[index - self._min_generated_example], self.y[index - self._min_generated_example]\n\nclass CombinedDataset(object):\n _which_dataset = None\n _dataset_counters = None\n def set_rng(self, rng):\n self.rng = rng\n for dataset in self.datasets:\n dataset.rng = rng\n\n def set_randomize(self, randomize):\n self.randomize = randomize\n for dataset in self.datasets:\n dataset.set_randomize(randomize)\n\n def set_ignore_y(self, ignore):\n for dataset in self.datasets:\n dataset.set_ignore_y(ignore)\n\n def close(self):\n for dataset in self.datasets:\n dataset.close()\n\n def _build_which_dataset(self):\n self._which_dataset = np.empty(self.length, dtype=np.int16)\n self._dataset_counters = np.zeros(len(self.datasets), dtype=np.int64)\n offset = 0\n for index, dataset in enumerate(self.datasets):\n # ensure each dataset is seen as much as its content\n # says:\n self._which_dataset[offset:offset + len(dataset)] = index\n offset += len(dataset)\n\n def __getitem__(self, index):\n if index == 0:\n if self.randomize:\n # visit datasets in random orders:\n self.rng.shuffle(self._which_dataset)\n self._dataset_counters[:] = 0\n which = self._which_dataset[index]\n idx = self._dataset_counters[which]\n self._dataset_counters[which] += 1\n return self.datasets[which][idx]\n\n def __init__(self, datasets, rng=None, randomize=False):\n self.datasets = datasets\n if rng is None:\n rng = np.random.RandomState(0)\n self.set_rng(rng)\n self.set_randomize(randomize)\n self.length = sum(len(dataset) for dataset in datasets)\n self._build_which_dataset()\n\n def __len__(self):\n return self.length\n"}}},{"rowIdx":17,"cells":{"python_code":{"kind":"string","value":"import queue\nimport threading\n\n\ndef prefetch_generator(generator, to_fetch=10):\n q = queue.Queue(maxsize=to_fetch)\n\n def thread_worker(queue, gen):\n for val in gen:\n queue.put(val)\n queue.put(None)\n\n t = threading.Thread(target=thread_worker, args=(q, generator))\n some_exception = None\n try:\n t.start()\n while True:\n job = q.get()\n if job is None:\n break\n yield job\n del job\n # print(\"q.qsize() %d\" % (q.qsize(),), flush=True)\n except Exception as e:\n some_exception = e\n finally:\n if some_exception is not None:\n raise some_exception\n t.join()\n del t\n"}}},{"rowIdx":18,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nObtain a learnability score for each type axis.\nTrains a binary classifier for each type and\ngets its AUC.\n\nUsage\n-----\n\n```\npython3 evaluate_learnability.py sample_data.tsv --out report.json --wikidata /path/to/wikidata\n```\n\n\"\"\"\nimport json\nimport time\nimport argparse\n\nfrom os.path import dirname, realpath, join\n\nSCRIPT_DIR = dirname(realpath(__file__))\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom sklearn import metrics\nfrom collections import Counter\n\nfrom wikidata_linker_utils.type_collection import TypeCollection, offset_values_mask\nimport wikidata_linker_utils.wikidata_properties as wprop\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom generator import prefetch_generator\n\n\ndef learnability(collection, lines, mask, truth_tables, qids, id2pos,\n epochs=5, batch_size=128, max_dataset_size=-1,\n max_vocab_size=10000, hidden_sizes=None, lr=0.001,\n window_size=5, input_size=5, keep_prob=0.5,\n verbose=True):\n if hidden_sizes is None:\n hidden_sizes = []\n tf.reset_default_graph()\n dset = list(get_windows(lines, mask, window_size, truth_tables, lambda x: id2pos[x]))\n if max_dataset_size > 0:\n dset = dset[:max_dataset_size]\n\n pos_num = np.zeros(len(qids))\n for _, labels in dset:\n pos_num += labels\n neg_num = np.ones(len(qids)) * len(dset) - pos_num\n pos_weight = (pos_num / (pos_num + neg_num))[None, :]\n\n vocab = [\"\"] + [w for w, _ in Counter(lines[:, 0]).most_common(max_vocab_size)]\n inv_vocab = {w: k for k, w in enumerate(vocab)}\n with tf.device(\"gpu\"):\n W = tf.get_variable(\n \"W\", shape=[len(vocab), input_size],\n dtype=tf.float32,\n initializer=tf.random_normal_initializer()\n )\n indices = tf.placeholder(tf.int32, [None, window_size*2], name=\"indices\")\n labels = tf.placeholder(tf.bool, [None, len(qids)], name=\"label\")\n keep_prob_pholder = tf.placeholder_with_default(keep_prob, [])\n lookup = tf.reshape(tf.nn.embedding_lookup(\n W, indices\n ), [tf.shape(indices)[0], input_size * window_size*2])\n lookup = tf.nn.dropout(lookup, keep_prob_pholder)\n hidden = lookup\n for layer_idx, hidden_size in enumerate(hidden_sizes):\n hidden = tf.contrib.layers.fully_connected(\n hidden,\n num_outputs=hidden_size,\n scope=\"FC%d\" % (layer_idx,)\n )\n out = tf.contrib.layers.fully_connected(\n hidden,\n num_outputs=len(qids),\n activation_fn=None)\n cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=out, labels=tf.cast(labels, tf.float32))\n cost = tf.where(tf.is_finite(cost), cost, tf.zeros_like(cost))\n cost_mean = tf.reduce_mean(\n (tf.cast(labels, tf.float32) * 1.0 / (pos_weight)) * cost +\n (tf.cast(tf.logical_not(labels), tf.float32) * 1.0 / (1.0 - pos_weight)) * cost\n )\n cost_sum = tf.reduce_sum(cost)\n size = tf.shape(indices)[0]\n noop = tf.no_op()\n correct = tf.reduce_sum(tf.cast(tf.equal(tf.greater_equal(out, 0), labels), tf.int32), 0)\n out_activated = tf.sigmoid(out)\n train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost_mean)\n session = tf.InteractiveSession()\n session.run(tf.global_variables_initializer())\n\n def accuracy(dataset, batch_size, train):\n epoch_correct = np.zeros(len(qids))\n epoch_nll = 0.0\n epoch_total = np.zeros(len(qids))\n op = train_op if train else noop\n all_labels = []\n all_preds = []\n for i in get_progress_bar(\"train\" if train else \"dev\", item=\"batches\")(range(0, len(dataset), batch_size)):\n batch_labels = [label for _, label in dataset[i:i+batch_size]]\n csum, corr, num_examples, preds, _ = session.run([cost_sum, correct, size, out_activated, op],\n feed_dict={\n indices: [[inv_vocab.get(w, 0) for w in window] for window, _ in dataset[i:i+batch_size]],\n labels: batch_labels,\n keep_prob_pholder: keep_prob if train else 1.0\n })\n epoch_correct += corr\n epoch_nll += csum\n epoch_total += num_examples\n all_labels.extend(batch_labels)\n all_preds.append(preds)\n return (epoch_nll, epoch_correct, epoch_total, np.vstack(all_preds), np.vstack(all_labels))\n\n\n dataset_indices = np.arange(len(dset))\n train_indices = dataset_indices[:int(0.8 * len(dset))]\n dev_indices = dataset_indices[int(0.8 * len(dset)):]\n train_dataset = [dset[idx] for idx in train_indices]\n dev_dataset = [dset[idx] for idx in dev_indices]\n learnability = []\n for epoch in range(epochs):\n t0 = time.time()\n train_epoch_nll, train_epoch_correct, train_epoch_total, _, _ = accuracy(train_dataset, batch_size, train=True)\n t1 = time.time()\n if verbose:\n print(\"epoch %d train: %.3f%% in %.3fs\" % (\n epoch, 100.0 * train_epoch_correct.sum() / train_epoch_total.sum(), t1 - t0),)\n t0 = time.time()\n dev_epoch_nll, dev_epoch_correct, dev_epoch_total, pred, y = accuracy(dev_dataset, batch_size, train=False)\n t1 = time.time()\n learnability = []\n for qidx in range(len(qids)):\n try:\n fpr, tpr, thresholds = metrics.roc_curve(y[:,qidx], pred[:,qidx], pos_label=1)\n auc = metrics.auc(fpr, tpr)\n if not np.isnan(auc):\n average_precision_score = metrics.average_precision_score(y[:,qidx], pred[:,qidx])\n learnability.append((qids[qidx],\n auc,\n average_precision_score,\n 100.0 * dev_epoch_correct[qidx] / dev_epoch_total[qidx],\n int(pos_num[qidx]),\n int(neg_num[qidx])))\n except ValueError:\n continue\n if verbose:\n learnability = sorted(learnability, key=lambda x: x[1], reverse=True)\n print(\"epoch %d dev: %.3fs\" % (epoch, t1-t0))\n for qid, auc, average_precision_score, acc, pos, neg in learnability:\n print(\" %r AUC: %.3f, APS: %.3f, %.3f%% positive: %d, negative: %d\" % (\n collection.ids[qid], auc, average_precision_score, acc, pos, neg))\n print(\"\")\n return learnability\n\n\ndef generate_training_data(collection, path):\n with open(path, \"rt\") as fin:\n lines = [row.split(\"\\t\")[:2] for row in fin.read().splitlines()]\n lines_arr = np.zeros((len(lines), 2), dtype=np.object)\n mask = np.zeros(len(lines), dtype=np.bool)\n for i, l in enumerate(lines):\n lines_arr[i, 0] = l[0]\n if len(l) > 1:\n lines_arr[i, 1] = collection.name2index[l[1]]\n mask[i] = True\n return lines_arr, mask\n\n\ndef get_proposal_sets(collection, article_ids, seed):\n np.random.seed(seed)\n relation = collection.relation(wprop.CATEGORY_LINK)\n\n relation_mask = offset_values_mask(relation.values, relation.offsets, article_ids)\n counts = np.bincount(relation.values[relation_mask])\n is_fp = collection.relation(wprop.FIXED_POINTS).edges() > 0\n is_fp = is_fp[:counts.shape[0]]\n counts = counts * is_fp\n topfields_fp = np.argsort(counts)[::-1][:(counts > 0).sum()]\n relation = collection.relation(wprop.INSTANCE_OF)\n\n relation_mask = offset_values_mask(relation.values, relation.offsets, article_ids)\n counts = np.bincount(relation.values[relation_mask])\n topfields_instance_of = np.argsort(counts)[::-1][:(counts > 0).sum()]\n\n np.random.shuffle(topfields_instance_of)\n np.random.shuffle(topfields_fp)\n\n return [(topfields_instance_of, wprop.INSTANCE_OF), (topfields_fp, wprop.CATEGORY_LINK)] \n\n\ndef build_truth_tables(collection, lines, qids, relation_name):\n truth_tables = []\n all_ids = list(sorted(set(lines[:, 1])))\n id2pos = {idx: pos for pos, idx in enumerate(all_ids)}\n for qid in qids:\n truth_tables.append(collection.satisfy([relation_name], [qid])[all_ids])\n collection.reset_cache()\n truth_tables = np.stack(truth_tables, axis=1)\n qid_sums = truth_tables.sum(axis=0)\n kept_qids = []\n kept_dims = []\n for i, (qid, qid_sum) in enumerate(zip(qids, qid_sums)):\n if qid_sum != 0 and qid_sum != truth_tables.shape[0]:\n kept_qids.append(qid)\n kept_dims.append(i)\n truth_tables = truth_tables[:, kept_dims]\n return truth_tables, kept_qids, id2pos\n\n\ndef get_windows(lines, mask, window, truth_table, id_mapper):\n for i in np.where(mask)[0]:\n if i >= window and i < len(lines) - window:\n yield (lines[max(0, i - window):i + window, 0],\n truth_table[id_mapper(lines[i, 1])])\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset\", type=str, required=True)\n parser.add_argument(\"--batch_size\", type=int, default=128)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--max_epochs\", type=int, default=2)\n parser.add_argument(\"--max_vocab_size\", type=int, default=10000)\n parser.add_argument(\"--simultaneous_fields\", type=int, default=512)\n parser.add_argument(\"--window_size\", type=int, default=5)\n parser.add_argument(\"--input_size\", type=int, default=5)\n parser.add_argument(\"--wikidata\", type=str, required=True)\n parser.add_argument(\"--out\", type=str, required=True)\n return parser.parse_args()\n\n\ndef generate_truth_tables(collection, lines_arr, proposal_sets, simultaneous_fields):\n for topfields, relation_name in proposal_sets:\n for i in range(0, len(topfields), simultaneous_fields):\n truth_tables, qids, id2pos = build_truth_tables(\n collection,\n lines_arr,\n qids=topfields[i:i+simultaneous_fields],\n relation_name=relation_name)\n yield (topfields[i:i+simultaneous_fields],\n relation_name,\n truth_tables,\n qids,\n id2pos)\n\n\ndef main():\n args = parse_args()\n collection = TypeCollection(args.wikidata, num_names_to_load=0)\n collection.load_blacklist(join(dirname(SCRIPT_DIR), \"extraction\", \"blacklist.json\"))\n lines_arr, mask = generate_training_data(collection, args.dataset)\n article_ids = np.array(list(set(lines_arr[:, 1])), dtype=np.int32)\n proposal_sets = get_proposal_sets(collection, article_ids, args.seed)\n report = []\n total = sum(len(topfields) for topfields, _ in proposal_sets)\n seen = 0\n t0 = time.time()\n data_source = generate_truth_tables(collection, lines_arr, proposal_sets,\n args.simultaneous_fields)\n\n for topfields, relation_name, truth_tables, qids, id2pos in prefetch_generator(data_source):\n # for each of these properties and given relation\n # construct the truth table for each item and discover\n # their 'learnability':\n seen += len(topfields)\n field_auc_scores = learnability(\n collection,\n lines_arr,\n mask,\n qids=qids,\n truth_tables=truth_tables,\n id2pos=id2pos,\n batch_size=args.batch_size,\n epochs=args.max_epochs,\n input_size=args.input_size,\n window_size=args.window_size,\n max_vocab_size=args.max_vocab_size,\n verbose=True)\n for qid, auc, average_precision_score, correct, pos, neg in field_auc_scores:\n report.append(\n {\n \"qid\": collection.ids[qid],\n \"auc\": auc,\n \"average_precision_score\": average_precision_score,\n \"correct\": correct,\n \"relation\": relation_name,\n \"positive\": pos,\n \"negative\": neg\n }\n )\n with open(args.out, \"wt\") as fout:\n json.dump(report, fout)\n t1 = time.time()\n speed = seen / (t1 - t0)\n print(\"AUC obtained for %d / %d items (%.3f items/s)\" % (seen, total, speed), flush=True)\n\n\nif __name__ == \"__main__\":\n main()\n\n"}}},{"rowIdx":19,"cells":{"python_code":{"kind":"string","value":"import numpy as np\nimport string\nfrom dataset import TSVDataset, H5Dataset, CombinedDataset\nfrom generator import prefetch_generator\n\ndef word_dropout(inputs, rng, keep_prob):\n inputs_ndim = inputs.ndim\n mask_shape = [len(inputs)] + [1] * (inputs_ndim - 1)\n return (\n inputs *\n (\n rng.random_sample(size=mask_shape) <\n keep_prob\n )\n ).astype(inputs.dtype)\n\n\ndef extract_feat(feat):\n if feat[\"type\"] == \"word\":\n return lambda x: x\n elif feat[\"type\"] == \"suffix\":\n length = feat[\"length\"]\n return lambda x: x[-length:]\n elif feat[\"type\"] == \"prefix\":\n length = feat[\"length\"]\n return lambda x: x[:length]\n elif feat[\"type\"] == \"digit\":\n return lambda x: x.isdigit()\n elif feat[\"type\"] == \"punctuation_count\":\n return lambda x: sum(c in string.punctuation for c in x)\n elif feat[\"type\"] == \"uppercase\":\n return lambda x: len(x) > 0 and x[0].isupper()\n elif feat[\"type\"] == \"character-conv\":\n max_size = feat[\"max_word_length\"]\n def extract(x):\n x_bytes = x.encode(\"utf-8\")\n if len(x_bytes) > max_size:\n return np.concatenate(\n [\n [255],\n list(x_bytes[:max_size]),\n [256]\n ]\n )\n else:\n return np.concatenate(\n [\n [255],\n list(x_bytes),\n [256],\n -np.ones(max_size - len(x_bytes), dtype=np.int32),\n ]\n )\n return extract\n else:\n raise ValueError(\"unknown feature %r.\" % (feat,))\n\n\ndef extract_word_keep_prob(feat):\n return feat.get(\"word_keep_prob\", 0.85)\n\n\ndef extract_case_keep_prob(feat):\n return feat.get(\"case_keep_prob\", 0.95)\n\n\ndef extract_s_keep_prob(feat):\n return feat.get(\"s_keep_prob\", 0.95)\n\n\ndef apply_case_s_keep_prob(feat, rng, keep_case, keep_s):\n if len(feat) == 0:\n return feat\n if keep_case < 1 and feat[0].isupper() and rng.random_sample() >= keep_case:\n feat = feat.lower()\n if keep_s < 1 and feat.endswith(\"s\") and rng.random_sample() >= keep_s:\n feat = feat[:-1]\n return feat\n\n\ndef requires_character_convolution(feat):\n return feat[\"type\"] in {\"character-conv\"}\n\n\ndef requires_vocab(feat):\n return feat[\"type\"] in {\"word\", \"suffix\", \"prefix\"}\n\n\ndef feature_npdtype(feat):\n if requires_vocab(feat):\n return np.int32\n elif feat[\"type\"] in {\"digit\", \"punctuation_count\", \"uppercase\"}:\n return np.float32\n elif requires_character_convolution(feat):\n return np.int32\n else:\n raise ValueError(\"unknown feature %r.\" % (feat,))\n\n\ndef get_vocabs(dataset, max_vocabs, extra_words=None):\n index2words = [[] for i in range(len(max_vocabs))]\n occurrences = [{} for i in range(len(max_vocabs))]\n for els in dataset:\n for el, index2word, occurrence in zip(els, index2words, occurrences):\n if el not in occurrence:\n index2word.append(el)\n occurrence[el] = 1\n else:\n occurrence[el] += 1\n index2words = [\n sorted(index2word, key=lambda x: occurrence[x], reverse=True)\n for index2word, occurrence in zip(index2words, occurrences)\n ]\n index2words = [\n index2word[:max_vocab] if max_vocab > 0 else index2word\n for index2word, max_vocab in zip(index2words, max_vocabs)\n ]\n if extra_words is not None:\n index2words = [\n extra_words + index2word for index2word in index2words\n ]\n return index2words\n\n\ndef get_feature_vocabs(features, dataset, extra_words=None):\n out, feats_needing_vocab, feats_with_vocabs, vocabs = [], [], [], []\n if hasattr(dataset, \"set_ignore_y\"):\n dataset.set_ignore_y(True)\n try:\n for feat in features:\n if requires_vocab(feat):\n if feat.get(\"path\") is not None:\n with open(feat[\"path\"], \"rt\") as fin:\n index2word = fin.read().splitlines()\n if feat.get(\"max_vocab\", -1) > 0:\n index2word = index2word[:feat[\"max_vocab\"]]\n if extra_words is not None:\n index2word = extra_words + index2word\n feats_with_vocabs.append(index2word)\n else:\n feats_needing_vocab.append(feat)\n if len(feats_needing_vocab) > 0:\n extractors = tuple(\n [extract_feat(feat) for feat in feats_needing_vocab]\n )\n vocabs = get_vocabs(\n ((extractor(w) for extractor in extractors)\n for x, _ in dataset for w in x),\n max_vocabs=[feat.get(\"max_vocab\", -1) for feat in feats_needing_vocab],\n extra_words=extra_words\n )\n vocab_feature_idx = 0\n preexisting_vocab_feature_idx = 0\n for feat in features:\n if requires_vocab(feat):\n if feat.get(\"path\") is not None:\n out.append(feats_with_vocabs[preexisting_vocab_feature_idx])\n preexisting_vocab_feature_idx += 1\n else:\n out.append(vocabs[vocab_feature_idx])\n vocab_feature_idx+=1\n else:\n out.append(None)\n finally:\n if hasattr(dataset, \"set_ignore_y\"):\n dataset.set_ignore_y(False)\n return out\n\n\ndef pad_arrays_into_array(arrays, padding):\n out_ndim = arrays[0].ndim + 1\n out_shape = [0] * out_ndim\n out_shape[0] = len(arrays)\n for arr in arrays:\n for dim_idx in range(arr.ndim):\n out_shape[1 + dim_idx] = max(out_shape[1 + dim_idx], arr.shape[dim_idx])\n out = np.empty(out_shape, dtype=arrays[0].dtype)\n out.fill(padding)\n for arr_idx, array in enumerate(arrays):\n arr_slice = [arr_idx]\n for dim_idx in range(arr.ndim):\n arr_slice.append(slice(0, array.shape[dim_idx]))\n arr_slice = tuple(arr_slice)\n out[arr_slice] = array\n return out\n\n\ndef build_objective_mask(label_sequence, objective_idx, objective_type):\n if objective_type == 'crf':\n if len(label_sequence) == 0 or label_sequence[0][objective_idx] is None:\n return np.array(False, dtype=np.bool)\n else:\n return np.array(True, dtype=np.bool)\n elif objective_type == 'softmax':\n return np.array(\n [w[objective_idx] is not None for w in label_sequence], dtype=np.bool\n )\n else:\n raise ValueError(\n \"unknown objective type %r.\" % (objective_type,)\n )\n\n\ndef allocate_shrunk_batches(max_length, batch_size, lengths):\n typical_indices = max_length * batch_size\n i = 0\n ranges = []\n while i < len(lengths):\n j = i + 1\n current_batch_size = 1\n longest_ex = lengths[j - 1]\n while j < len(lengths) and j - i < batch_size:\n # can grow?\n new_batch_size = current_batch_size + 1\n new_j = j + 1\n if max(longest_ex, lengths[new_j - 1]) * new_batch_size < typical_indices:\n j = new_j\n longest_ex = max(longest_ex, lengths[new_j - 1])\n current_batch_size = new_batch_size\n else:\n break\n ranges.append((i, j))\n i = j\n return ranges\n\n\ndef convert_label_to_index(label, label2index):\n if label is None:\n return 0\n if isinstance(label, str):\n return label2index[label]\n return label\n\n\nclass Batchifier(object):\n def __init__(self, rng, feature_word2index, objective_types, label2index,\n fused, sequence_lengths, labels, labels_mask,\n input_placeholders, features, dataset, batch_size, train,\n autoresize=True, max_length=100):\n assert(batch_size > 0), (\n \"batch size must be strictly positive (got %r).\" % (batch_size,)\n )\n # dictionaries, strings defined by model:\n self.objective_types = objective_types\n self.label2index = label2index\n self.feature_word2index = feature_word2index\n self.rng = rng\n self.fused = fused\n\n # tf placeholders:\n self.sequence_lengths = sequence_lengths\n self.labels = labels\n self.labels_mask = labels_mask\n self.input_placeholders = input_placeholders\n\n self.dataset = dataset\n self.batch_size = batch_size\n self.train = train\n\n self.dataset_is_lazy = isinstance(dataset, (TSVDataset, H5Dataset, CombinedDataset))\n self.autoresize = autoresize\n self.max_length = max_length\n\n indices = np.arange(len(dataset))\n\n if train:\n if self.dataset_is_lazy:\n dataset.set_rng(rng)\n dataset.set_randomize(True)\n elif isinstance(dataset, list):\n rng.shuffle(indices)\n self.batch_indices = []\n if self.autoresize and not self.dataset_is_lazy:\n ranges = allocate_shrunk_batches(\n max_length=self.max_length,\n batch_size=self.batch_size,\n lengths=[len(dataset[indices[i]][0]) for i in range(len(indices))]\n )\n for i, j in ranges:\n self.batch_indices.append(indices[i:j])\n else:\n for i in range(0, len(indices), self.batch_size):\n self.batch_indices.append(indices[i:i + self.batch_size])\n self.extractors = [\n (extract_feat(feat), requires_vocab(feat), feature_npdtype(feat),\n extract_word_keep_prob(feat), extract_case_keep_prob(feat), extract_s_keep_prob(feat))\n for feat in features\n ]\n\n def generate_batch(self, examples):\n X = [[] for i in range(len(self.extractors))]\n Y = []\n Y_mask = []\n for ex, label in examples:\n for idx, (extractor, uses_vocab, dtype, word_keep_prob, case_keep_prob, s_keep_prob) in enumerate(self.extractors):\n if self.train and (case_keep_prob < 1 or s_keep_prob < 1):\n ex = [apply_case_s_keep_prob(w, self.rng, case_keep_prob, s_keep_prob) for w in ex]\n if uses_vocab:\n word_feats = np.array(\n [self.feature_word2index[idx].get(extractor(w), 0) for w in ex],\n dtype=dtype\n )\n else:\n word_feats = np.array([extractor(w) for w in ex], dtype=dtype)\n if self.train and word_keep_prob < 1:\n word_feats = word_dropout(\n word_feats, self.rng, word_keep_prob\n )\n X[idx].append(word_feats)\n Y.append(\n tuple(\n np.array([convert_label_to_index(w[objective_idx], label2index)\n for w in label], dtype=np.int32)\n for objective_idx, label2index in enumerate(self.label2index)\n )\n )\n\n Y_mask.append(\n tuple(\n build_objective_mask(label, objective_idx, objective_type)\n for objective_idx, objective_type in enumerate(self.objective_types)\n )\n )\n sequence_lengths = np.array([len(x) for x in X[0]], dtype=np.int32)\n X = [pad_arrays_into_array(x, -1) for x in X]\n Y = [\n pad_arrays_into_array([row[objective_idx] for row in Y], 0)\n for objective_idx in range(len(self.objective_types))\n ]\n Y_mask = [\n pad_arrays_into_array([row[objective_idx] for row in Y_mask], 0.0)\n for objective_idx in range(len(self.objective_types))\n ]\n feed_dict = {\n self.sequence_lengths: sequence_lengths\n }\n if self.fused:\n feed_dict[self.labels[0]] = np.stack([y.T for y in Y], axis=-1)\n feed_dict[self.labels_mask[0]] = np.stack([y.T for y in Y_mask], axis=-1)\n else:\n for y, placeholder in zip(Y, self.labels):\n feed_dict[placeholder] = y.T\n for y, placeholder in zip(Y_mask, self.labels_mask):\n feed_dict[placeholder] = y.T\n for idx, x in enumerate(X):\n feed_dict[self.input_placeholders[idx]] = x.swapaxes(0, 1)\n return feed_dict\n\n def as_list(self):\n return list(self.iter_batches())\n\n def iter_batches(self, pbar=None):\n gen = range(len(self.batch_indices))\n if pbar is not None:\n pbar.max_value = len(self.batch_indices)\n pbar.value = 0\n gen = pbar(gen)\n if self.autoresize and self.dataset_is_lazy:\n for idx in gen:\n examples = [self.dataset[ex] for ex in self.batch_indices[idx]]\n ranges = allocate_shrunk_batches(\n max_length=self.max_length,\n batch_size=self.batch_size,\n lengths=[len(ex[0]) for ex in examples]\n )\n for i, j in ranges:\n yield self.generate_batch(examples[i:j])\n else:\n for idx in gen:\n yield self.generate_batch(\n [self.dataset[ex] for ex in self.batch_indices[idx]]\n )\n\n\ndef allocate_shrunk_batches(max_length, batch_size, lengths):\n typical_indices = max_length * batch_size\n i = 0\n ranges = []\n while i < len(lengths):\n j = i + 1\n current_batch_size = 1\n longest_ex = lengths[j - 1]\n while j < len(lengths) and j - i < batch_size:\n # can grow?\n new_batch_size = current_batch_size + 1\n new_j = j + 1\n if max(longest_ex, lengths[new_j - 1]) * new_batch_size < typical_indices:\n j = new_j\n longest_ex = max(longest_ex, lengths[new_j - 1])\n current_batch_size = new_batch_size\n else:\n break\n ranges.append((i, j))\n i = j\n return ranges\n\n\n\ndef batch_worker(rng,\n features,\n feature_word2index,\n objective_types,\n label2index,\n fused,\n sequence_lengths,\n labels,\n labels_mask,\n input_placeholders,\n autoresize,\n train,\n batch_size,\n max_length,\n dataset,\n pbar,\n batch_queue,\n death_event):\n batchifier = Batchifier(\n rng=rng,\n features=features,\n feature_word2index=feature_word2index,\n objective_types=objective_types,\n label2index=label2index,\n fused=fused,\n sequence_lengths=sequence_lengths,\n labels=labels,\n labels_mask=labels_mask,\n input_placeholders=input_placeholders,\n autoresize=autoresize,\n train=train,\n batch_size=batch_size,\n max_length=max_length,\n dataset=dataset\n )\n for batch in batchifier.iter_batches(pbar=pbar):\n if death_event.is_set():\n break\n batch_queue.put(batch)\n if not death_event.is_set():\n batch_queue.put(None)\n\n\ndef range_size(start, size):\n return [i for i in range(start, start + size)]\n\n\nclass ProcessHolder(object):\n def __init__(self, process, death_event, batch_queue):\n self.process = process\n self.batch_queue = batch_queue\n self.death_event = death_event\n\n def close(self):\n self.death_event.set()\n try:\n self.batch_queue.close()\n while True:\n self.batch_queue.get_nowait()\n except Exception as e:\n pass\n self.process.terminate()\n self.process.join()\n\n def __del__(self):\n self.close()\n\n\ndef iter_batches_single_threaded(model,\n dataset,\n batch_size,\n train,\n autoresize=True,\n max_length=100,\n pbar=None):\n tensorflow_placeholders = [model.sequence_lengths] + model.labels + model.labels_mask + model.input_placeholders\n labels_start = 1\n labels_mask_start = labels_start + len(model.labels)\n placeholder_start = labels_mask_start + len(model.labels_mask)\n batchifier = Batchifier(\n rng=model.rng,\n features=model.features,\n feature_word2index=model.feature_word2index,\n objective_types=[obj[\"type\"] for obj in model.objectives],\n label2index=model.label2index,\n fused=model.fused,\n sequence_lengths=0,\n labels=range_size(labels_start, len(model.labels)),\n labels_mask=range_size(labels_mask_start, len(model.labels_mask)),\n input_placeholders=range_size(placeholder_start, len(model.input_placeholders)),\n autoresize=autoresize,\n train=train,\n batch_size=batch_size,\n max_length=max_length,\n dataset=dataset\n )\n for batch in prefetch_generator(batchifier.iter_batches(pbar=pbar), to_fetch=100):\n feed_dict = {}\n for idx, key in enumerate(tensorflow_placeholders):\n feed_dict[key] = batch[idx]\n yield feed_dict\n\n\ndef iter_batches(model,\n dataset,\n batch_size,\n train,\n autoresize=True,\n max_length=100,\n pbar=None):\n import multiprocessing\n batch_queue = multiprocessing.Queue(maxsize=10)\n tensorflow_placeholders = [model.sequence_lengths] + model.labels + model.labels_mask + model.input_placeholders\n labels_start = 1\n labels_mask_start = labels_start + len(model.labels)\n placeholder_start = labels_mask_start + len(model.labels_mask)\n death_event = multiprocessing.Event()\n batch_process = ProcessHolder(multiprocessing.Process(\n target=batch_worker,\n daemon=True,\n args=(\n model.rng,\n model.features,\n model.feature_word2index,\n [obj[\"type\"] for obj in model.objectives],\n model.label2index,\n model.fused,\n 0,\n range_size(labels_start, len(model.labels)),\n range_size(labels_mask_start, len(model.labels_mask)),\n range_size(placeholder_start, len(model.input_placeholders)),\n autoresize,\n train,\n batch_size,\n max_length,\n dataset,\n pbar,\n batch_queue,\n death_event\n )\n ), death_event, batch_queue)\n batch_process.process.name = \"iter_batches\"\n batch_process.process.start()\n while True:\n batch = batch_queue.get()\n if batch is None:\n break\n else:\n feed_dict = {}\n for idx, key in enumerate(tensorflow_placeholders):\n feed_dict[key] = batch[idx]\n yield feed_dict\n del batch\n"}}},{"rowIdx":20,"cells":{"python_code":{"kind":"string","value":"import distutils.ccompiler\nimport distutils.sysconfig\nimport re\nimport numpy as np\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\nfrom os.path import join, dirname, realpath, relpath, splitext, exists, getmtime, relpath, lexists, islink\nfrom os import walk, sep, remove, listdir, stat, symlink\n\nfrom Cython.Distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nfrom distutils.core import setup\nfrom distutils.command import build as build_module, clean as clean_module\nfrom distutils.spawn import find_executable\n\nSCRIPT_DIR = dirname(realpath(__file__))\nWIKIDATA_LINKER_SOURCE_DIR = join(SCRIPT_DIR, \"src\")\nWIKIDATA_LINKER_MODULE_NAME = \"wikidata_linker_utils\"\nWIKIDATA_LINKER_INTERNAL_MODULE_NAME = WIKIDATA_LINKER_MODULE_NAME\n\nversion_file = join(SCRIPT_DIR, \"VERSION\")\nif exists(version_file):\n with open(version_file) as f:\n VERSION = f.read().strip()\nelse:\n VERSION = \"1.0.0\"\n\n\ndef path_to_module_name(path):\n BASE_DIRS = [\"python\", \"cython\"]\n relative_path = relpath(path, join(WIKIDATA_LINKER_SOURCE_DIR))\n path_no_ext, _ = splitext(relative_path)\n for base_dir in BASE_DIRS:\n if path_no_ext.startswith(base_dir):\n return path_no_ext.lstrip(base_dir + sep).replace(sep, '.')\n raise Exception(\"Cannot convert path %r to module name\" % (relative_path,))\n\n\ndef find_files_by_suffix(path, suffix):\n \"\"\"Recursively find files with specific suffix in a directory\"\"\"\n for relative_path, dirs, files in walk(path):\n for fname in files:\n if fname.endswith(suffix):\n yield join(path, relative_path, fname)\n\n# Make a `cleanall` rule to get rid of intermediate and library files\nclass clean(clean_module.clean):\n def run(self):\n print(\"Cleaning up cython files...\")\n # Just in case the build directory was created by accident,\n # note that shell=True should be OK here because the command is constant.\n for place in [\"build\",\n join(\"src\", \"cython\", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, \"*.c\"),\n join(\"src\", \"cython\", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, \"*.cpp\"),\n join(\"src\", \"cython\", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, \"*.so\")]:\n subprocess.Popen(\"rm -rf %s\" % (place,),\n shell=True,\n executable=\"/bin/bash\",\n cwd=SCRIPT_DIR)\n\ncompiler = distutils.ccompiler.new_compiler()\ndistutils.sysconfig.customize_compiler(compiler)\nBLACKLISTED_COMPILER_SO = ['-Wp,-D_FORTIFY_SOURCE=2']\nbuild_ext.compiler = compiler\n\next_modules = []\n\nfor pyx_file in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, \"cython\"), \".pyx\"):\n # pxd files are like header files for pyx files\n # and they can also have relevant includes.\n relevant_files = [pyx_file]\n pxd_file = pyx_file[:-3] + \"pxd\"\n if exists(pxd_file):\n relevant_files.append(pxd_file)\n\n ext_modules.append(Extension(\n name=path_to_module_name(pyx_file),\n sources=[pyx_file],\n library_dirs=[],\n language='c++',\n extra_compile_args=['-std=c++11', '-Wno-unused-function',\n '-Wno-sign-compare', '-Wno-unused-local-typedef',\n '-Wno-undefined-bool-conversion', '-O3',\n '-Wno-reorder'],\n extra_link_args=[],\n libraries=[],\n extra_objects=[],\n include_dirs=[np.get_include()]\n ))\n\n################################################################################\n## FIND PYTHON PACKAGES ##\n################################################################################\n\npy_packages = []\nfor file in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, \"python\"), \".py\"):\n module_path = dirname(file)\n py_packages.append(path_to_module_name(module_path))\n\n################################################################################\n## BUILD COMMAND WITH EXTRA WORK WHEN DONE ##\n################################################################################\n\ndef symlink_built_package(module_name, dest_directory):\n build_dir_contents = listdir(join(SCRIPT_DIR, \"build\"))\n lib_dot_fnames = []\n for name in build_dir_contents:\n if name.startswith(\"lib.\"):\n lib_dot_fnames.append(join(SCRIPT_DIR, \"build\", name))\n # get latest lib. file created and symlink it to the project\n # directory for easier testing\n lib_dot_fnames = sorted(\n lib_dot_fnames,\n key=lambda name: stat(name).st_mtime,\n reverse=True\n )\n if len(lib_dot_fnames) == 0:\n return\n\n most_recent_name = join(lib_dot_fnames[0], module_name)\n symlink_name = join(dest_directory, module_name)\n\n if lexists(symlink_name):\n if islink(symlink_name):\n remove(symlink_name)\n else:\n print(\n (\"non symlink file with name %r found in project directory.\"\n \" Please remove to create a symlink on build\") % (\n symlink_name,\n )\n )\n return\n\n symlink(most_recent_name,\n symlink_name,\n target_is_directory=True)\n print(\"Created symlink pointing to %r from %r\" % (\n most_recent_name,\n join(SCRIPT_DIR, module_name)\n ))\n\n\nclass build_with_posthooks(build_module.build):\n def run(self):\n build_module.build.run(self)\n\n\n# Make a `cleanall` rule to get rid of intermediate and library files\nclass clean_with_posthooks(clean_module.clean):\n def run(self):\n clean_module.clean.run(self)\n # remove cython generated sources\n for file_path in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, 'cython'), '.cpp'):\n remove(file_path)\n\nsetup(name=WIKIDATA_LINKER_MODULE_NAME,\n version=VERSION,\n cmdclass={\"build\": build_with_posthooks, 'build_ext': build_ext, 'clean': clean_with_posthooks},\n install_requires=[\"numpy\"],\n extras_require={\"dev\": []},\n author=\"Jonathan Raiman\",\n language='c++',\n author_email=\"raiman@openai.com\",\n ext_modules=ext_modules,\n description=\"Generate data processing utilities for running DeepType.\",\n package_dir={'': join(WIKIDATA_LINKER_SOURCE_DIR, 'python')},\n packages=py_packages)\n"}}},{"rowIdx":21,"cells":{"python_code":{"kind":"string","value":"import re\n\nSTOP_WORDS = {'a', 'an', 'in', 'the', 'of', 'it', 'from', 'with', 'this', 'that', 'they', 'he',\n 'she', 'some', 'where', 'what', 'since', 'his', 'her', 'their', 'le', 'la', 'les', 'il',\n 'elle', 'ce', 'Γ§a', 'ci', 'ceux', 'ceci', 'cela', 'celle', 'se', 'cet', 'cette',\n 'dans', 'avec', 'con', 'sans', 'pendant', 'durant', 'avant', 'aprΓ¨s', 'puis', 'el', 'lo', 'la',\n 'ese', 'esto', 'que', 'qui', 'quoi', 'dont', 'ou', 'oΓΉ', 'si', 'este', 'esta', 'cual',\n 'eso', 'ella', 'depuis', 'y', 'a', 'Γ ', 'su', 'de', \"des\", 'du', 'los', 'las', 'un', 'une', 'una',\n 'uno', 'para', 'asi', 'later', 'into', 'dentro', 'dedans', 'depuis', 'despuΓ©s', 'desde',\n 'al', 'et', 'por', 'at', 'for', 'when', 'why', 'how', 'with', 'whether', 'if',\n 'thus', 'then', 'and', 'but', 'on', 'during', 'while', 'as', 'within', 'was', 'is',\n 'est', 'au', 'fait', 'font', 'va', 'vont', 'sur', 'en', 'pour', 'del', 'cuando',\n 'cuan', 'do', 'does', 'until', 'sinon', 'encore', 'to', 'by', 'be', 'which',\n 'have', 'not', 'were', 'has', 'also', 'its', 'isbn', 'pp.', \"&amp;\", \"p.\", 'ces', 'o'}\n\n\ndef starts_with_apostrophe_letter(word):\n return (\n word.startswith(\"l'\") or\n word.startswith(\"L'\") or\n word.startswith(\"d'\") or\n word.startswith(\"D'\") or\n word.startswith(\"j'\") or\n word.startswith(\"J'\") or\n word.startswith(\"t'\") or\n word.startswith(\"T'\")\n )\n\n\nPUNCTUATION = {\"'\", \",\", \"-\", \"!\", \".\", \"?\", \":\", \"’\"}\n\n\ndef clean_up_trie_source(source, lowercase=True):\n source = source.rstrip().strip('()[]')\n if len(source) > 0 and (source[-1] in PUNCTUATION or source[0] in PUNCTUATION):\n return \"\"\n # remove l'\n if starts_with_apostrophe_letter(source):\n source = source[2:]\n if source.endswith(\"'s\"):\n source = source[:-2]\n tokens = source.split()\n while len(tokens) > 0 and tokens[0].lower() in STOP_WORDS:\n tokens = tokens[1:]\n while len(tokens) > 0 and tokens[-1].lower() in STOP_WORDS:\n tokens = tokens[:-1]\n joined_tokens = \" \".join(tokens)\n if lowercase:\n return joined_tokens.lower()\n return joined_tokens\n\n\nORDINAL_ANCHOR = re.compile(\"^\\d+(st|th|nd|rd|er|eme|Γ¨me|Γ¨re)$\")\nNUMBER_PUNCTUATION = re.compile(\"^\\d+([\\/\\-,\\.:;%]\\d*)+$\")\n\n\ndef anchor_is_ordinal(anchor):\n return ORDINAL_ANCHOR.match(anchor) is not None\n\n\ndef anchor_is_numbers_slashes(anchor):\n return NUMBER_PUNCTUATION.match(anchor) is not None\n\n\ndef acceptable_anchor(anchor, anchor_trie, blacklist=None):\n return (len(anchor) > 0 and\n not anchor.isdigit() and\n not anchor_is_ordinal(anchor) and\n not anchor_is_numbers_slashes(anchor) and\n anchor in anchor_trie and\n (blacklist is None or anchor not in blacklist))\n"}}},{"rowIdx":22,"cells":{"python_code":{"kind":"string","value":"import sys\nimport importlib.util\nimport traceback\n\nfrom os.path import basename, splitext\n\n\ndef reload_module(path):\n module_name, extension = splitext(basename(path))\n if extension != \".py\":\n raise ValueError(\"path must have a .py extension (got %r)\" % (path,))\n spec = importlib.util.spec_from_file_location(module_name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef enter_or_quit():\n wait = input(\"press any key to continue, q to quit.\")\n received = wait.rstrip()\n if received == 'q':\n print(\"Bye.\")\n sys.exit(0)\n else:\n return received\n\n\nALLOWED_RUNTIME_ERRORS = (\n TypeError,\n ValueError,\n IndexError,\n NameError,\n KeyError,\n AssertionError,\n AttributeError,\n ImportError,\n KeyboardInterrupt\n)\n\nALLOWED_IMPORT_ERRORS = (\n SyntaxError,\n NameError,\n ImportError\n)\n\n\ndef reload_run_retry(module_path, callback):\n while True:\n try:\n module = reload_module(module_path)\n except ALLOWED_IMPORT_ERRORS as e:\n print(\"issue reading %r, please fix.\" % (module_path,))\n print(str(e))\n traceback.print_exc(file=sys.stdout)\n enter_or_quit()\n continue\n try:\n result = callback(module)\n except ALLOWED_RUNTIME_ERRORS as e:\n print(\"issue running %r, please fix.\" % (module_path,))\n print(str(e))\n traceback.print_exc(file=sys.stdout)\n enter_or_quit()\n continue\n break\n return result\n"}}},{"rowIdx":23,"cells":{"python_code":{"kind":"string","value":"import subprocess\n\n\ndef execute_bash(command):\n \"\"\"\n Executes bash command, prints output and\n throws an exception on failure.\n \"\"\"\n process = subprocess.Popen(command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n for line in process.stdout:\n print(line, end='', flush=True)\n process.wait()\n assert process.returncode == 0\n\n\ndef get_bash_result(command):\n \"\"\"\n Executes bash command, returns output and throws\n an exception on failure.\n \"\"\"\n process = subprocess.Popen(command,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n out = [line for line in process.stdout]\n process.wait()\n assert process.returncode == 0\n return out\n\n\ndef count_lines(path):\n return int(\n get_bash_result('wc -l %s' % (path,))[0].strip().split(' ')[0]\n )\n"}}},{"rowIdx":24,"cells":{"python_code":{"kind":"string","value":""}}},{"rowIdx":25,"cells":{"python_code":{"kind":"string","value":"import json\nimport msgpack\nimport bz2\n\n\ndef iterate_bytes_jsons(fin, batch_size=1000):\n current = []\n for l in fin:\n if l.startswith(b'{'):\n current.append(l)\n if len(current) >= batch_size:\n docs = json.loads('[' + b\"\".join(current).decode('utf-8').rstrip(',\\n') + ']')\n for doc in docs:\n yield doc\n current = []\n if len(current) > 0:\n docs = json.loads('[' + b\"\".join(current).decode('utf-8').rstrip(',\\n') + ']')\n for doc in docs:\n yield doc\n current = []\n\n\ndef iterate_text_jsons(fin, batch_size=1000):\n current = []\n for l in fin:\n if l.startswith('{'):\n current.append(l)\n if len(current) >= batch_size:\n docs = json.loads('[' + \"\".join(current).rstrip(',\\n') + ']')\n for doc in docs:\n yield doc\n current = []\n if len(current) > 0:\n docs = json.loads('[' + \"\".join(current).rstrip(',\\n') + ']')\n for doc in docs:\n yield doc\n current = []\n\n\ndef iterate_message_packs(fin):\n\n unpacker = msgpack.Unpacker(fin, encoding='utf-8', use_list=False)\n for obj in unpacker:\n yield obj\n\n\ndef open_wikidata_file(path, batch_size):\n if path.endswith('bz2'):\n with bz2.open(path, 'rb') as fin:\n for obj in iterate_bytes_jsons(fin, batch_size):\n yield obj\n elif path.endswith('json'):\n with open(path, 'rt') as fin:\n for obj in iterate_text_jsons(fin, batch_size):\n yield obj\n elif path.endswith('mp'):\n with open(path, 'rb') as fin:\n for obj in iterate_message_packs(fin):\n yield obj\n else:\n raise ValueError(\n \"unknown extension for wikidata. \"\n \"Expecting bz2, json, or mp (msgpack).\"\n )\n"}}},{"rowIdx":26,"cells":{"python_code":{"kind":"string","value":"from numpy import logical_and, logical_not, logical_or\n\n\ndef logical_negate(truth, falses):\n out = truth\n for value in falses:\n out = logical_and(out, logical_not(value))\n return out\n\n\ndef logical_ors(values):\n assert(len(values) > 0), \"values cannot be empty.\"\n out = values[0]\n for val in values[1:]:\n out = logical_or(out, val)\n return out\n\n\ndef logical_ands(values):\n assert(len(values) > 0), \"values cannot be empty.\"\n out = values[0]\n for val in values[1:]:\n out = logical_and(out, val)\n return out\n"}}},{"rowIdx":27,"cells":{"python_code":{"kind":"string","value":"import json\nimport warnings\n\nfrom os.path import join, exists\nfrom functools import lru_cache\n\nimport marisa_trie\nimport requests\nimport numpy as np\n\nfrom .successor_mask import (\n successor_mask, invert_relation, offset_values_mask\n)\nfrom .offset_array import OffsetArray, SparseAttribute\nfrom .wikidata_ids import (\n load_wikidata_ids, load_names, property_names, temporal_property_names\n)\nfrom . import wikidata_properties as wprop\n\n\nclass CachedRelation(object):\n def __init__(self, use, state):\n self.use = use\n self.state = state\n\n\n@lru_cache(maxsize=None)\ndef get_name(wikidata_id):\n res = requests.get(\"https://www.wikidata.org/wiki/\" + wikidata_id)\n el = res.text.find('')\n el_end = res.text.find('')\n return res.text[el + len(''):el_end]\n\n\nclass TypeCollection(object):\n def __init__(self, path, num_names_to_load=100000, language_path=None, prefix=\"enwiki\", verbose=True,\n cache=True):\n self.cache = cache\n self.path = path\n self.verbose = verbose\n self.wikidata_names2prop_names = property_names(\n join(path, 'wikidata_property_names.json')\n )\n self.wikidata_names2temporal_prop_names = temporal_property_names(\n join(path, 'wikidata_time_property_names.json')\n )\n # add wikipedia english category links:\n self.wikidata_names2prop_names[wprop.CATEGORY_LINK] = \"category_link\"\n self.wikidata_names2prop_names[wprop.FIXED_POINTS] = \"fixed_points\"\n self.known_names = load_names(\n join(path, \"wikidata_wikititle2wikidata.tsv\"),\n num_names_to_load,\n prefix=prefix\n )\n self.num_names_to_load = num_names_to_load\n self.ids, self.name2index = load_wikidata_ids(path, verbose=self.verbose)\n self._relations = {}\n self._attributes = {}\n self._inverted_relations = {}\n self._article2id = None\n self._web_get_name = True\n self._satisfy_cache = {}\n\n # empty blacklist:\n self.set_bad_node(\n set(), set()\n )\n if language_path is not None:\n article_links = np.load(join(language_path, \"trie_index2indices_values.npy\"))\n article_links_counts = np.load(join(language_path, \"trie_index2indices_counts.npy\"))\n self._weighted_articles = np.bincount(article_links, weights=article_links_counts).astype(np.int32)\n if len(self._weighted_articles) != len(self.ids):\n self._weighted_articles = np.concatenate(\n [\n self._weighted_articles,\n np.zeros(len(self.ids) - len(self._weighted_articles), dtype=np.int32)\n ]\n )\n else:\n self._weighted_articles = None\n\n def attribute(self, name):\n if name not in self._attributes:\n is_temporal = name in self.wikidata_names2temporal_prop_names\n assert(is_temporal), \"load relations using `relation` method.\"\n if self.verbose:\n print('load %r (%r)' % (name, self.wikidata_names2prop_names[name],))\n self._attributes[name] = SparseAttribute.load(\n join(self.path, \"wikidata_%s\" % (name,))\n )\n return self._attributes[name]\n\n @property\n def article2id(self):\n if self._article2id is None:\n if self.verbose:\n print('load %r' % (\"article2id\",))\n self._article2id = marisa_trie.RecordTrie('i').load(\n join(self.path, \"wikititle2wikidata.marisa\")\n )\n if self.verbose:\n print(\"done.\")\n return self._article2id\n\n def relation(self, name):\n if name.endswith(\".inv\"):\n return self.get_inverted_relation(name[:-4])\n if name not in self._relations:\n is_temporal = name in self.wikidata_names2temporal_prop_names\n assert(not is_temporal), \"load attributes using `attribute` method.\"\n if self.verbose:\n print('load %r (%r)' % (name, self.wikidata_names2prop_names[name],))\n self._relations[name] = OffsetArray.load(\n join(self.path, \"wikidata_%s\" % (name,)),\n compress=True\n )\n return self._relations[name]\n\n def set_bad_node(self, bad_node, bad_node_pair):\n changed = False\n if hasattr(self, \"_bad_node\") and self._bad_node != bad_node:\n changed = True\n if hasattr(self, \"_bad_node_pair\") and self._bad_node_pair != bad_node_pair:\n changed = True\n\n self._bad_node = bad_node\n self._bad_node_pair = bad_node_pair\n self._bad_node_array = np.array(list(bad_node), dtype=np.int32)\n\n bad_node_pair_right = {}\n for node_left, node_right in self._bad_node_pair:\n if node_right not in bad_node_pair_right:\n bad_node_pair_right[node_right] = [node_left]\n else:\n bad_node_pair_right[node_right].append(node_left)\n bad_node_pair_right = {\n node_right: np.array(node_lefts, dtype=np.int32)\n for node_right, node_lefts in bad_node_pair_right.items()\n }\n self._bad_node_pair_right = bad_node_pair_right\n\n if changed:\n self.reset_cache()\n\n def get_name(self, identifier):\n if identifier >= self.num_names_to_load and self._web_get_name:\n try:\n return get_name(self.ids[identifier]) + \" (\" + self.ids[identifier] + \")\"\n except requests.exceptions.ConnectionError:\n self._web_get_name = False\n name = self.known_names.get(identifier, None)\n if name is None:\n return self.ids[identifier]\n else:\n return name + \" (\" + self.ids[identifier] + \")\"\n\n def describe_connection(self, source, destination, allowed_edges):\n if isinstance(source, str):\n if source in self.name2index:\n source_index = self.name2index[source]\n else:\n source_index = self.article2id[\"enwiki/\" + source][0][0]\n else:\n source_index = source\n\n if isinstance(destination, str):\n if destination in self.name2index:\n dest_index = self.name2index[destination]\n else:\n dest_index = self.article2id[\"enwiki/\" + destination][0][0]\n else:\n dest_index = destination\n\n found_path = self.is_member_with_path(\n source_index,\n allowed_edges,\n [dest_index]\n )\n if found_path is not None:\n _, path = found_path\n for el in path:\n if isinstance(el, str):\n print(\" \" + el)\n else:\n print(self.get_name(el), el)\n else:\n print('%r and %r are not connected' % (source, destination))\n\n def is_member_with_path(self, root, fields, member_fields, max_steps=float(\"inf\"), steps=0, visited=None, path=None):\n if steps >= max_steps:\n return None\n if visited is None:\n visited = set()\n\n if path is None:\n path = [root]\n else:\n path = path + [root]\n\n for field in fields:\n field_parents = self.relation(field)[root]\n for el in field_parents:\n if el in member_fields and el not in self._bad_node and (root, el) not in self._bad_node_pair:\n return True, path + [field, el]\n for el in field_parents:\n if el in visited or el in self._bad_node or (root, el) in self._bad_node_pair:\n continue\n visited.add(el)\n res = self.is_member_with_path(el, fields, member_fields, max_steps, steps=steps + 1, visited=visited, path=path + [field])\n if res is not None:\n return res\n return None\n\n def get_inverted_relation(self, relation_name):\n if relation_name.endswith(\".inv\"):\n return self.relation(relation_name[:-4])\n if relation_name not in self._inverted_relations:\n new_values_path = join(self.path, \"wikidata_inverted_%s_values.npy\" % (relation_name,))\n new_offsets_path = join(self.path, \"wikidata_inverted_%s_offsets.npy\" % (relation_name,))\n\n if not exists(new_values_path):\n relation = self.relation(relation_name)\n if self.verbose:\n print(\"inverting relation %r (%r)\" % (relation_name, self.wikidata_names2prop_names[relation_name],))\n new_values, new_offsets = invert_relation(\n relation.values,\n relation.offsets\n )\n np.save(new_values_path, new_values)\n np.save(new_offsets_path, new_offsets)\n if self.verbose:\n print(\"load inverted %r (%r)\" % (relation_name, self.wikidata_names2prop_names[relation_name]))\n self._inverted_relations[relation_name] = OffsetArray.load(\n join(self.path, \"wikidata_inverted_%s\" % (relation_name,)),\n compress=True\n )\n return self._inverted_relations[relation_name]\n\n def successor_mask(self, relation, active_nodes):\n if isinstance(active_nodes, list):\n active_nodes = np.array(active_nodes, dtype=np.int32)\n if active_nodes.dtype != np.int32:\n active_nodes = active_nodes.astype(np.int32)\n return successor_mask(\n relation.values, relation.offsets, self._bad_node_pair_right, active_nodes\n )\n\n def remove_blacklist(self, state):\n state[self._bad_node_array] = False\n\n def satisfy(self, relation_names, active_nodes, max_steps=None):\n assert(len(relation_names) > 0), (\n \"relation_names cannot be empty.\"\n )\n if self.cache and isinstance(active_nodes, (list, tuple)) and len(active_nodes) < 100:\n satisfy_key = (tuple(sorted(relation_names)), tuple(sorted(active_nodes)), max_steps)\n if satisfy_key in self._satisfy_cache:\n cached = self._satisfy_cache[satisfy_key]\n cached.use += 1\n return cached.state\n else:\n satisfy_key = None\n inverted_relations = [self.get_inverted_relation(relation_name) for relation_name in relation_names]\n state = np.zeros(inverted_relations[0].size(), dtype=np.bool)\n state[active_nodes] = True\n step = 0\n while len(active_nodes) > 0:\n succ = None\n for relation in inverted_relations:\n if succ is None:\n succ = self.successor_mask(relation, active_nodes)\n else:\n succ = succ | self.successor_mask(relation, active_nodes)\n new_state = state | succ\n self.remove_blacklist(new_state)\n (active_nodes,) = np.where(state != new_state)\n active_nodes = active_nodes.astype(np.int32)\n state = new_state\n step += 1\n if max_steps is not None and step >= max_steps:\n break\n if satisfy_key is not None:\n self._satisfy_cache[satisfy_key] = CachedRelation(1, state)\n return state\n\n def reset_cache(self):\n cache_keys = list(self._satisfy_cache.keys())\n for key in cache_keys:\n if self._satisfy_cache[key].use == 0:\n del self._satisfy_cache[key]\n else:\n self._satisfy_cache[key].use = 0\n\n def print_top_class_members(self, truth_table, name=\"Other\", topn=20):\n if self._weighted_articles is not None:\n print(\"%s category, highly linked articles in wikipedia:\" % (name,))\n sort_weight = self._weighted_articles * truth_table\n linked_articles = int((sort_weight > 0).sum())\n print(\"%s category, %d articles linked in wikipedia:\" % (name, linked_articles))\n top_articles = np.argsort(sort_weight)[::-1]\n for art in top_articles[:topn]:\n if not truth_table[art]:\n break\n print(\"%r (%d)\" % (self.get_name(art), self._weighted_articles[art]))\n print(\"\")\n else:\n print(\"%s category, sample of members:\" % (name,))\n top_articles = np.where(truth_table)[0]\n for art in top_articles[:topn]:\n print(\"%r\" % (self.get_name(art),))\n print(\"\")\n\n def class_report(self, relation_names, truth_table, name=\"Other\", topn=20):\n active_nodes = np.where(truth_table)[0].astype(np.int32)\n num_active_nodes = len(active_nodes)\n print(\"%s category contains %d unique items.\" % (name, num_active_nodes,))\n relations = [self.relation(relation_name) for relation_name in relation_names]\n for relation, relation_name in zip(relations, relation_names):\n mask = offset_values_mask(relation.values, relation.offsets, active_nodes)\n counts = np.bincount(relation.values[mask])\n topfields = np.argsort(counts)[::-1]\n print(\"%s category, most common %r:\" % (name, relation_name,))\n for field in topfields[:topn]:\n if counts[field] == 0:\n break\n print(\"%.3f%% (%d): %r\" % (100.0 * counts[field] / num_active_nodes,\n counts[field],\n self.get_name(field)))\n print(\"\")\n\n is_fp = np.logical_and(\n np.logical_or(\n self.relation(wprop.FIXED_POINTS + \".inv\").edges() > 0,\n self.relation(wprop.FIXED_POINTS).edges() > 0\n ),\n truth_table\n )\n self.print_top_class_members(\n is_fp, topn=topn, name=name + \" (fixed points)\"\n )\n if self._weighted_articles is not None:\n self.print_top_class_members(truth_table, topn=topn, name=name)\n\n def load_blacklist(self, path):\n with open(path, \"rt\") as fin:\n blacklist = json.load(fin)\n filtered_bad_node = []\n for el in blacklist[\"bad_node\"]:\n if el not in self.name2index:\n warnings.warn(\"Node %r under `bad_node` is not a known wikidata id.\" % (\n el\n ))\n continue\n filtered_bad_node.append(el)\n bad_node = set(self.name2index[el] for el in filtered_bad_node)\n\n filtered_bad_node_pair = []\n\n for el, oel in blacklist[\"bad_node_pair\"]:\n if el not in self.name2index:\n warnings.warn(\"Node %r under `bad_node_pair` is not a known wikidata id.\" % (\n el\n ))\n continue\n if oel not in self.name2index:\n warnings.warn(\"Node %r under `bad_node_pair` is not a known wikidata id.\" % (\n oel\n ))\n continue\n filtered_bad_node_pair.append((el, oel))\n bad_node_pair = set([(self.name2index[el], self.name2index[oel])\n for el, oel in filtered_bad_node_pair])\n self.set_bad_node(bad_node, bad_node_pair)\n"}}},{"rowIdx":28,"cells":{"python_code":{"kind":"string","value":"INSTANCE_OF = \"P31\"\nSUBCLASS_OF = \"P279\"\nPART_OF = \"P361\"\nOCCUPATION = \"P106\"\nFIELD_OF_WORK = \"P101\"\nFIELD_OF_THIS_OCCUPATION = \"P425\"\nMEDICAL_SPECIALITY = \"P1995\"\nGENRE = \"P136\"\nSEX_OR_GENDER = \"P21\"\nCOUNTRY_OF_CITIZENSHIP = \"P27\"\nCOUNTRY = \"P17\"\nCONTINENT = \"P30\"\nLOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY = \"P131\"\nSPORT = \"P641\"\nSTUDIES = \"P2578\"\nSERIES = \"P179\"\nUSE = \"P366\"\nLOCATION = \"P276\"\nFACET_OF = \"P1269\"\nIS_A_LIST_OF = \"P360\"\nCOUNTRY_OF_ORIGIN = \"P495\"\nPRODUCT_OR_MATERIAL_PRODUCED = \"P1056\"\nINDUSTRY = \"P452\"\nPARENT_TAXON = \"P171\"\nAPPLIES_TO_TERRITORIAL_JURISDICTION = \"P1001\"\nPOSITION_HELD = \"P39\"\nCATEGORYS_MAIN_TOPIC = \"P301\"\nPUBLICATION_DATE = \"P577\"\nDATE_OF_BIRTH = \"P569\"\nDATE_OF_DEATH = \"P570\"\nINCEPTION = \"P571\"\nDISSOLVED_OR_ABOLISHED = \"P576\"\nPOINT_IN_TIME = \"P585\"\nSTART_TIME = \"P580\"\nEND_TIME = \"P582\"\nCATEGORY_LINK = \"enwiki_category_links\"\nFIXED_POINTS = \"enwiki_fixed_points\"\n"}}},{"rowIdx":29,"cells":{"python_code":{"kind":"string","value":"from os.path import exists\nfrom os import stat\n\n\ndef true_exists(fname):\n return exists(fname) and stat(fname).st_size > 100\n"}}},{"rowIdx":30,"cells":{"python_code":{"kind":"string","value":"import progressbar\n\npercentage = progressbar.Percentage()\ncounter = progressbar.Counter()\nbar = progressbar.Bar()\nadaptive_eta = progressbar.AdaptiveETA()\n\n\nclass MessageProgressbar(progressbar.ProgressBar):\n def set_message(self, message):\n self.widgets[0] = message + \" \"\n\n def set_item(self, item):\n self.widgets[4] = \" %s) \" % (item,)\n\n\ndef get_progress_bar(message, max_value=None, item=\"lines\"):\n \"\"\"\n Construct a progressbar iterator wrapper\n with an ETA and percentage information.\n\n Arguments:\n ----------\n message : str, title for the progress bar.\n max_value : None or int\n\n Returns:\n --------\n ProgressBar : object that can wrap an iterator\n and print out duration estimates and\n iteration stats.\n \"\"\"\n widgets = [\n message + \" \",\n percentage,\n \" (\",\n counter,\n \" %s) \" % (item,),\n bar,\n adaptive_eta\n ]\n return MessageProgressbar(widgets=widgets, maxval=max_value)\n"}}},{"rowIdx":31,"cells":{"python_code":{"kind":"string","value":"from os.path import exists\nimport numpy as np\nfrom .successor_mask import (\n convert_to_offset_array, make_dense, make_sparse\n)\n\n\ndef count_non_zero(dense):\n return len(np.nonzero(dense[1:] - dense[:-1])[0]) + int(dense[0] != 0)\n\n\ndef should_compress(dense):\n nonzeros = count_non_zero(dense)\n return (2 * nonzeros + 1) < 0.5 * len(dense)\n\n\nclass OffsetArray(object):\n def __init__(self, values, offsets):\n self.values = values\n self.offsets = offsets\n\n def __getitem__(self, idx):\n end = self.offsets[idx]\n start = 0 if idx == 0 else self.offsets[idx - 1]\n return self.values[start:end]\n\n def is_empty(self, idx):\n end = self.offsets[idx]\n start = 0 if idx == 0 else self.offsets[idx - 1]\n return start == end\n\n def size(self):\n return self.offsets.shape[0]\n\n def edges(self):\n num_edges = np.zeros(len(self.offsets), dtype=np.int32)\n num_edges[0] = self.offsets[0]\n num_edges[1:] = self.offsets[1:] - self.offsets[:-1]\n return num_edges\n\n @classmethod\n def load(cls, path, compress=True):\n values = np.load(path + \"_values.npy\")\n if exists(path + \"_offsets.sparse.npy\"):\n offsets_compressed = np.load(path + \"_offsets.sparse.npy\")\n offsets = make_dense(offsets_compressed, cumsum=True)\n else:\n # legacy mode, load dense versions:\n offsets = np.load(path + \"_offsets.npy\")\n if compress:\n if should_compress(offsets):\n offsets_compressed = make_sparse(offsets)\n np.save(path + \"_offsets.sparse.npy\", offsets_compressed)\n # optionally delete the old version here\n return OffsetArray(\n values,\n offsets\n )\n\n\ndef convert_dict_to_offset_array(dictionary, num_values):\n offsets = np.zeros(num_values, dtype=np.int32)\n total_num_values = sum(len(v) for _, v in dictionary.items())\n values = np.zeros(total_num_values, dtype=np.int32)\n position = 0\n for key, value in sorted(dictionary.items(), key=lambda x: x[0]):\n values[position:position + len(value)] = value\n position += len(value)\n offsets[key] = len(value)\n np.cumsum(offsets, out=offsets)\n return values, offsets\n\n\ndef save_record_with_offset(path, index2indices, total_size=None):\n if isinstance(index2indices, dict):\n if total_size is None:\n raise ValueError(\"cannot leave total_size None \"\n \"when using a dict.\")\n values, offsets = convert_dict_to_offset_array(index2indices, total_size)\n else:\n values, offsets = convert_to_offset_array(index2indices)\n np.save(path + \"_values.npy\", values)\n if should_compress(offsets):\n compressed_offsets = make_sparse(offsets)\n np.save(path + \"_offsets.sparse.npy\", compressed_offsets)\n else:\n np.save(path + \"_offsets.npy\", offsets)\n\n\ndef load_sparse(path):\n compressed = np.load(path)\n dense = make_dense(compressed, cumsum=False)\n non_zero_indices = compressed[1::2]\n mask = np.zeros(len(dense), dtype=np.bool)\n mask[non_zero_indices] = True\n return dense, mask\n\n\nclass SparseAttribute(object):\n def __init__(self, dense, mask):\n self.dense = dense\n self.mask = mask\n\n def __lt__(self, value):\n return np.logical_and(self.dense < value, self.mask)\n\n def __le__(self, value):\n return np.logical_and(self.dense <= value, self.mask)\n\n def __gt__(self, value):\n return np.logical_and(self.dense > value, self.mask)\n\n def __ge__(self, value):\n return np.logical_and(self.dense >= value, self.mask)\n\n def __eq__(self, value):\n return np.logical_and(self.dense == value, self.mask)\n\n @classmethod\n def load(cls, path):\n dense, mask = load_sparse(path + \"_values.sparse.npy\")\n return SparseAttribute(dense, mask)\n"}}},{"rowIdx":32,"cells":{"python_code":{"kind":"string","value":"LANGUAGE_CODES = [\"en\", \"zh\", \"fr\", \"ja\",\n \"ru\", \"pt\", \"ca\", \"fa\",\n \"ar\", \"fi\", \"hu\", \"id\",\n \"es\", \"it\", \"war\", \"ceb\",\n \"nl\", \"de\", \"sv\", \"ro\",\n \"cs\", \"ko\", \"sr\", \"ms\",\n \"tr\", \"min\", \"eo\", \"eu\",\n \"kk\", \"da\", \"bg\", \"sk\",\n \"hy\", \"he\", \"lt\", \"sl\",\n \"et\", \"uz\", \"gl\", \"nn\",\n \"la\", \"vo\", \"simple\",\n \"el\", \"ce\", \"be\", \"ka\",\n \"hi\", \"az\", \"th\", \"ur\",\n \"oc\", \"mk\", \"ta\", \"mg\",\n \"new\", \"tt\", \"cy\", \"tl\",\n \"bs\", \"br\", \"ne\", \"gu\",\n \"io\", \"bpy\", \"nds\", \"ku\",\n \"als\", \"pa\", \"su\", \"kn\",\n \"bar\", \"ckb\", \"ia\", \"arz\"]\n"}}},{"rowIdx":33,"cells":{"python_code":{"kind":"string","value":"import json\n\nfrom collections import namedtuple\nfrom os.path import join, dirname\n\n\ndef dict_fix_relative_paths(basepath, relative_paths):\n if relative_paths is None:\n relative_paths = []\n\n def load(d):\n new_obj = d.copy()\n for key in relative_paths:\n if key in new_obj:\n if isinstance(new_obj[key], str):\n new_obj[key] = join(basepath, new_obj[key])\n elif isinstance(new_obj[key], list) and len(new_obj[key]) > 0 and isinstance(new_obj[key][0], str):\n new_obj[key] = [join(basepath, path) for path in new_obj[key]]\n return new_obj\n return load\n\n\ndef load_config(path, relative_paths=None, defaults=None, relative_to=None):\n if relative_to is None:\n relative_to = dirname(path)\n object_hook = dict_fix_relative_paths(relative_to, relative_paths)\n with open(path, \"rt\") as fin:\n obj = json.load(\n fin,\n object_hook=object_hook\n )\n if defaults is not None:\n for key, value in defaults.items():\n if key not in obj:\n obj[key] = value\n return json.loads(\n json.dumps(obj),\n object_hook=lambda d: namedtuple('X', d.keys())(*d.values())\n )\n\n\ndef json_loads(bytes):\n return json.loads(bytes.decode('utf-8'))\n\n\ndef json_serializer(x):\n return json.dumps(\n x, check_circular=False, separators=(',', ':')\n ).encode('utf-8')\n"}}},{"rowIdx":34,"cells":{"python_code":{"kind":"string","value":"from os.path import exists, join, dirname\nimport marisa_trie\nimport json\nfrom .file import true_exists\nfrom os import makedirs\n\n\nclass MarisaAsDict(object):\n def __init__(self, marisa):\n self.marisa = marisa\n\n def get(self, key, fallback):\n value = self.marisa.get(key, None)\n if value is None:\n return fallback\n else:\n return value[0][0]\n\n def __getitem__(self, key):\n value = self.marisa[key]\n return value[0][0]\n\n def __contains__(self, key):\n return key in self.marisa\n\n\ndef load_wikidata_ids(path, verbose=True):\n wikidata_ids_inverted_path = join(path, 'wikidata_ids_inverted.marisa')\n with open(join(path, \"wikidata_ids.txt\"), \"rt\") as fin:\n ids = fin.read().splitlines()\n if exists(wikidata_ids_inverted_path):\n if verbose:\n print(\"loading wikidata id -> index\")\n name2index = MarisaAsDict(marisa_trie.RecordTrie('i').load(wikidata_ids_inverted_path))\n if verbose:\n print(\"done\")\n else:\n if verbose:\n print(\"building trie\")\n\n name2index = MarisaAsDict(\n marisa_trie.RecordTrie('i', [(name, (k,)) for k, name in enumerate(ids)])\n )\n name2index.marisa.save(wikidata_ids_inverted_path)\n if verbose:\n print(\"done\")\n return (ids, name2index)\n\n\ndef load_names(path, num, prefix):\n names = {}\n errors = 0 # debug\n if num > 0:\n with open(path, \"rt\", encoding=\"UTF-8\") as fin:\n for line in fin:\n try:\n name, number = line.rstrip('\\n').split('\\t')\n except ValueError:\n errors += 1\n number = int(number)\n if number >= num:\n break\n else:\n if name.startswith(prefix):\n names[number] = name[7:]\n print(errors) # debug\n return names\n\n\ndef sparql_query(query):\n import requests\n wikidata_url = \"https://query.wikidata.org/sparql\"\n response = requests.get(\n wikidata_url,\n params={\n \"format\": \"json\",\n \"query\": query\n }\n ).json()\n out = {}\n for el in response[\"results\"]['bindings']:\n label = el['propertyLabel']['value']\n value = el['property']['value']\n if value.startswith(\"http://www.wikidata.org/entity/\"):\n value = value[len(\"http://www.wikidata.org/entity/\"):]\n out[value] = label\n return out\n\n\ndef saved_sparql_query(savename, query):\n directory = dirname(savename)\n makedirs(directory, exist_ok=True)\n if true_exists(savename):\n with open(savename, \"rt\") as fin:\n out = json.load(fin)\n return out\n else:\n out = sparql_query(query)\n with open(savename, \"wt\") as fout:\n json.dump(out, fout)\n return out\n\n\ndef property_names(prop_save_path):\n \"\"\"\"\n Retrieve the mapping between wikidata properties ids (e.g. \"P531\") and\n their human-readable names (e.g. \"diplomatic mission sent\").\n\n Returns:\n dict<str, str> : mapping from property id to property descriptor.\n \"\"\"\n return saved_sparql_query(\n prop_save_path,\n \"\"\"\n SELECT DISTINCT ?property ?propertyLabel\n WHERE\n {\n ?property a wikibase:Property .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n }\n \"\"\"\n )\n\n\ndef temporal_property_names(prop_save_path):\n \"\"\"\"\n Retrieve the mapping between wikidata properties ids (e.g. \"P531\") and\n their human-readable names (e.g. \"diplomatic mission sent\") only\n for fields that are time-based.\n\n Returns:\n dict<str, str> : mapping from property id to property descriptor.\n \"\"\"\n return saved_sparql_query(\n prop_save_path,\n \"\"\"\n SELECT DISTINCT ?property ?propertyLabel\n WHERE\n {\n ?property a wikibase:Property .\n {?property wdt:P31 wd:Q18636219} UNION {?property wdt:P31 wd:Q22661913} .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n }\n \"\"\"\n )\n"}}},{"rowIdx":35,"cells":{"python_code":{"kind":"string","value":"import re\n\nimport numpy as np\n\nfrom os.path import join\nfrom epub_conversion import convert_wiki_to_lines\nfrom epub_conversion.wiki_decoder import almost_smart_open\nfrom .wikipedia_language_codes import LANGUAGE_CODES\nfrom .file import true_exists\nfrom .bash import execute_bash\nfrom .successor_mask import (\n load_redirections, match_wikipedia_to_wikidata\n)\n\n\nBADS = [\"Wikipedia:\", \"WikipΓ©dia:\", \"File:\", \"Media:\", \"Help:\", \"User:\"]\n\n\ndef _lines_extractor(lines, article_name):\n \"\"\"\n Simply outputs lines\n \"\"\"\n yield (article_name, lines)\n\n\ndef _bad_link(link):\n return any(link.startswith(el) for el in BADS)\n\n\ndef iterate_articles(path):\n num_articles = 9999999999999\n with almost_smart_open(path, \"rb\") as wiki:\n for article_name, lines in convert_wiki_to_lines(\n wiki,\n max_articles=num_articles,\n clear_output=True,\n report_every=100,\n parse_special_pages=True,\n skip_templated_lines=False,\n line_converter=_lines_extractor):\n if not _bad_link(article_name):\n yield (article_name, lines)\n\n\ndef induce_wikipedia_prefix(wikiname):\n if wikiname in {code + \"wiki\" for code in LANGUAGE_CODES}:\n return wikiname\n else:\n raise ValueError(\"Could not determine prefix for wiki \"\n \"with name %r.\" % (wikiname,))\n\n\ndef convert_sql_to_lookup(props, propname):\n propname = b\",'\" + propname.encode(\"utf-8\") + b\"','\"\n ending = b\"',\"\n starting = b\"(\"\n lookup = {}\n offset = 0\n while True:\n newpos = props.find(propname, offset)\n if newpos == -1:\n break\n begin = props.rfind(starting, offset, newpos)\n end = props.find(ending, newpos + len(propname))\n key = props[begin + len(starting):newpos]\n value = props[newpos + len(propname):end]\n lookup[key.decode('utf-8')] = value.decode('utf-8')\n offset = end\n return lookup\n\n\ndef load_wikipedia_pageid_to_wikidata(data_dir):\n fname = join(data_dir, \"enwiki-latest-page_props.sql\")\n if not true_exists(fname):\n execute_bash(\n \"wget -O - https://dumps.wikimedia.org/enwiki/\"\n \"latest/enwiki-latest-page_props.sql.gz | gunzip > %s\" % (fname,)\n )\n with open(fname, \"rb\") as fin:\n props = fin.read()\n return convert_sql_to_lookup(props, \"wikibase_item\")\n\n\nlink_pattern = re.compile(r'\\[\\[([^\\]\\[:]*)\\]\\]')\n\n\nclass WikipediaDoc(object):\n def __init__(self, doc):\n self.doc = doc\n\n def links(self, wiki_trie, redirections, prefix):\n current_pos = 0\n for match in re.finditer(link_pattern, self.doc):\n match_string = match.group(1)\n start = match.start()\n end = match.end()\n if current_pos != start:\n yield self.doc[current_pos:start], None\n current_pos = end\n\n if \"|\" in match_string:\n link, anchor = match_string.rsplit(\"|\", 1)\n link = link.strip().split(\"#\")[0]\n else:\n anchor = match_string\n link = anchor.strip()\n\n if len(link) > 0:\n dest_index = match_wikipedia_to_wikidata(\n link,\n wiki_trie,\n redirections,\n prefix\n )\n yield anchor, dest_index\n else:\n yield anchor, None\n if current_pos != len(self.doc):\n yield self.doc[current_pos:], None\n\n\ndef load_wikipedia_docs(path, size):\n docs = []\n for article_name, doc in iterate_articles(path):\n docs.append(WikipediaDoc(doc))\n if len(docs) == size:\n break\n return docs\n\n\ndef transition_trie_index(anchor_idx, dest_index, transitions, all_options):\n \"\"\"\n Recover the new trie index for an index that has gone stale.\n Use a transitions array to know how original anchors now map to\n new trie indices.\n \"\"\"\n option_transitions = transitions[anchor_idx]\n dest_index = option_transitions[option_transitions[:, 0] == dest_index, 1]\n if len(dest_index) == 0:\n dest_index = -1\n else:\n dest_index = np.asscalar(dest_index)\n if dest_index != -1:\n if not np.any(all_options == dest_index):\n dest_index = -1\n return dest_index\n\n\n__all__ = [\"load_redirections\", \"induce_wikipedia_prefix\",\n \"load_wikipedia_docs\", \"WikipediaDoc\",\n \"transition_trie_index\", \"iterate_articles\"]\n"}}},{"rowIdx":36,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nCompress a jsonl version of Wikidata by throwing about descriptions\nand converting file to msgpack format.\n\nUsage\n-----\n\n```\npython3 compress_wikidata_msgpack.py wikidata.json wikidata.msgpack\n```\n\n\"\"\"\nimport argparse\nimport msgpack\n\nfrom wikidata_linker_utils.wikidata_iterator import open_wikidata_file\nfrom wikidata_linker_utils.progressbar import get_progress_bar\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument('wikidata')\n parser.add_argument('out')\n return parser.parse_args(args=args)\n\n\ndef main():\n args = parse_args()\n approx_max_quantity = 24642416\n pbar = get_progress_bar('compress wikidata', max_value=approx_max_quantity, item='entities')\n pbar.start()\n seen = 0\n with open(args.out, \"wb\") as fout:\n for doc in open_wikidata_file(args.wikidata, 1000):\n seen += 1\n if 'descriptions' in doc:\n del doc['descriptions']\n if 'labels' in doc:\n del doc['labels']\n if 'aliases' in doc:\n del doc['aliases']\n for claims in doc['claims'].values():\n for claim in claims:\n if 'id' in claim:\n del claim['id']\n if 'rank' in claim:\n del claim['rank']\n if 'references' in claim:\n for ref in claim['references']:\n if 'hash' in ref:\n del ref['hash']\n if 'qualifiers' in claim:\n for qualifier in claim['qualifiers'].values():\n if 'hash' in qualifier:\n del qualifier['hash']\n fout.write(msgpack.packb(doc))\n if seen % 1000 == 0:\n if seen < approx_max_quantity:\n pbar.update(seen)\n pbar.finish()\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":37,"cells":{"python_code":{"kind":"string","value":"import argparse\n\nfrom os.path import join\nfrom os import makedirs\n\nimport marisa_trie\nimport numpy as np\n\nfrom wikidata_linker_utils.bash import count_lines\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata, load_redirections\nfrom wikidata_linker_utils.successor_mask import construct_mapping, construct_anchor_trie\n\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"wikipedia2wikidata_trie\",\n help=\"Location of wikipedia -> wikidata mapping trie.\")\n parser.add_argument(\"prefix\", type=str,\n help=\"What language is being processed, e.g. enwiki, frwiki, etc.\")\n parser.add_argument(\"anchor_tags\", type=str,\n help=\"Location where anchor tags were saved (tsv).\")\n parser.add_argument(\"redirections\", type=str,\n help=\"Location where redirections were saved (tsv).\")\n parser.add_argument(\"out\", type=str,\n help=\"Directory to save trie/data in.\")\n return parser.parse_args(argv)\n\n\ndef main():\n args = parse_args()\n makedirs(args.out, exist_ok=True)\n wikipedia2wikidata_trie = marisa_trie.RecordTrie('i').load(\n args.wikipedia2wikidata_trie\n )\n print('loaded trie')\n redirections = load_redirections(args.redirections)\n anchor_trie = construct_anchor_trie(\n anchor_tags=args.anchor_tags,\n wikipedia2wikidata_trie=wikipedia2wikidata_trie,\n redirections=redirections,\n prefix=args.prefix\n )\n anchor_trie.save(join(args.out, 'trie.marisa'))\n (\n (\n trie_index2indices_offsets,\n trie_index2indices_values,\n trie_index2indices_counts\n ),\n (\n trie_index2contexts_offsets,\n trie_index2contexts_values,\n trie_index2contexts_counts\n )\n ) = construct_mapping(\n anchor_tags=args.anchor_tags,\n wikipedia2wikidata_trie=wikipedia2wikidata_trie,\n redirections=redirections,\n prefix=args.prefix,\n anchor_trie=anchor_trie\n )\n np.save(join(args.out, \"trie_index2indices_offsets.npy\"), trie_index2indices_offsets)\n np.save(join(args.out, \"trie_index2indices_values.npy\"), trie_index2indices_values)\n np.save(join(args.out, \"trie_index2indices_counts.npy\"), trie_index2indices_counts)\n\n np.save(join(args.out, \"trie_index2contexts_offsets.npy\"), trie_index2contexts_offsets)\n np.save(join(args.out, \"trie_index2contexts_values.npy\"), trie_index2contexts_values)\n np.save(join(args.out, \"trie_index2contexts_counts.npy\"), trie_index2contexts_counts)\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":38,"cells":{"python_code":{"kind":"string","value":"import argparse\nimport sys\nimport json\nimport time\nimport traceback\n\nfrom os import makedirs\nfrom os.path import join, dirname, realpath\nfrom wikidata_linker_utils.repl import (\n enter_or_quit, reload_module,\n ALLOWED_RUNTIME_ERRORS,\n ALLOWED_IMPORT_ERRORS\n)\nfrom wikidata_linker_utils.logic import logical_ors\nfrom wikidata_linker_utils.type_collection import TypeCollection\nimport wikidata_linker_utils.wikidata_properties as wprop\n\n\nimport numpy as np\n\nSCRIPT_DIR = dirname(realpath(__file__))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('wikidata', type=str,\n help=\"Location of wikidata properties.\")\n parser.add_argument('classifiers', type=str, nargs=\"+\",\n help=\"Filename(s) for Python script that classifies entities.\")\n parser.add_argument('--export_classification', type=str, nargs=\"+\",\n default=None,\n help=\"Location to save the result of the entity classification.\")\n parser.add_argument('--num_names_to_load', type=int, default=20000000,\n help=\"Number of names to load from disk to accelerate reporting.\")\n parser.add_argument('--language_path', type=str, default=None,\n help=\"Location of a language-wikipedia specific information set to \"\n \"provide language/wikipedia specific metrics.\")\n parser.add_argument('--interactive', action=\"store_true\", default=True,\n help=\"Operate in a REPL. Reload scripts on errors or on user prompt.\")\n parser.add_argument('--nointeractive', action=\"store_false\",\n dest=\"interactive\", help=\"Run classification without REPL.\")\n parser.add_argument('--use-cache', action=\"store_true\",\n dest=\"use_cache\", help=\"store satisfies in cache.\")\n parser.add_argument('--nouse-cache', action=\"store_false\",\n dest=\"use_cache\", help=\"not store satisfies in cache.\")\n return parser.parse_args()\n\n\ndef get_other_class(classification):\n if len(classification) == 0:\n return None\n return np.logical_not(logical_ors(\n list(classification.values())\n ))\n\n\ndef export_classification(classification, path):\n classes = sorted(list(classification.keys()))\n if len(classes) == 0:\n return\n makedirs(path, exist_ok=True)\n num_items = classification[classes[0]].shape[0]\n classid = np.zeros(num_items, dtype=np.int32)\n selected = np.zeros(num_items, dtype=np.bool)\n for index, classname in enumerate(classes):\n truth_table = classification[classname]\n selected = selected | truth_table\n classid = np.maximum(classid, truth_table.astype(np.int32) * index)\n\n other = np.logical_not(selected)\n if other.sum() > 0:\n classes_with_other = classes + [\"other\"]\n classid = np.maximum(classid, other.astype(np.int32) * len(classes))\n else:\n classes_with_other = classes\n\n with open(join(path, \"classes.txt\"), \"wt\") as fout:\n for classname in classes_with_other:\n fout.write(classname + \"\\n\")\n\n np.save(join(path, \"classification.npy\"), classid)\n\n\ndef main():\n args = parse_args()\n should_export = args.export_classification is not None\n if should_export and len(args.export_classification) != len(args.classifiers):\n raise ValueError(\"Must have as many export filenames as classifiers.\")\n collection = TypeCollection(\n args.wikidata,\n num_names_to_load=args.num_names_to_load,\n language_path=args.language_path,\n cache=args.use_cache\n )\n if args.interactive:\n alert_failure = enter_or_quit\n else:\n alert_failure = lambda: sys.exit(1)\n\n while True:\n try:\n collection.load_blacklist(join(SCRIPT_DIR, \"blacklist.json\"))\n except (ValueError,) as e:\n print(\"Issue reading blacklist, please fix.\")\n print(str(e))\n alert_failure()\n continue\n\n classifications = []\n for class_idx, classifier_fname in enumerate(args.classifiers):\n while True:\n try:\n classifier = reload_module(classifier_fname)\n except ALLOWED_IMPORT_ERRORS as e:\n print(\"issue reading %r, please fix.\" % (classifier_fname,))\n print(str(e))\n traceback.print_exc(file=sys.stdout)\n alert_failure()\n continue\n\n try:\n t0 = time.time()\n classification = classifier.classify(collection)\n classifications.append(classification)\n if class_idx == len(args.classifiers) - 1:\n collection.reset_cache()\n t1 = time.time()\n print(\"classification took %.3fs\" % (t1 - t0,))\n except ALLOWED_RUNTIME_ERRORS as e:\n print(\"issue running %r, please fix.\" % (classifier_fname,))\n print(str(e))\n traceback.print_exc(file=sys.stdout)\n alert_failure()\n continue\n break\n try:\n # show cardinality for each truth table:\n if args.interactive:\n mega_other_class = None\n for classification in classifications:\n for classname in sorted(classification.keys()):\n print(\"%r: %d members\" % (classname, int(classification[classname].sum())))\n print(\"\")\n summary = {}\n for classname, truth_table in classification.items():\n (members,) = np.where(truth_table)\n summary[classname] = [collection.get_name(int(member)) for member in members[:20]]\n print(json.dumps(summary, indent=4))\n\n other_class = get_other_class(classification)\n if other_class.sum() > 0:\n # there are missing items:\n to_report = (\n classifier.class_report if hasattr(classifier, \"class_report\") else\n [wprop.SUBCLASS_OF, wprop.INSTANCE_OF, wprop.OCCUPATION, wprop.CATEGORY_LINK]\n )\n collection.class_report(to_report, other_class, name=\"Other\")\n if mega_other_class is None:\n mega_other_class = other_class\n else:\n mega_other_class = np.logical_and(mega_other_class, other_class)\n if len(classifications) > 1:\n if mega_other_class.sum() > 0:\n # there are missing items:\n to_report = [wprop.SUBCLASS_OF, wprop.INSTANCE_OF, wprop.OCCUPATION, wprop.CATEGORY_LINK]\n collection.class_report(to_report, mega_other_class, name=\"Other-combined\")\n if should_export:\n assert(len(classifications) == len(args.export_classification)), (\n \"classification outputs missing for export.\"\n )\n for classification, savename in zip(classifications, args.export_classification):\n export_classification(classification, savename)\n except KeyboardInterrupt as e:\n pass\n\n if args.interactive:\n enter_or_quit()\n else:\n break\n\n\nif __name__ == \"__main__\":\n main()\n\n\n"}}},{"rowIdx":39,"cells":{"python_code":{"kind":"string","value":"import argparse\n\nfrom os import remove\nfrom wikidata_linker_utils.bash import execute_bash\nimport h5py\n\n\ndef produce_window_dataset(path, window_size, out):\n num_columns = 0\n with open(path, \"rt\") as fin:\n line_locations = []\n for idx, line in enumerate(fin):\n if \"\\t\" in line:\n line_locations.append(idx)\n if num_columns == 0:\n num_columns = len(line.split(\"\\t\"))\n if line == \"\\n\":\n line_locations.append(-1)\n groups = []\n current_group = []\n\n max_buffer_size = 250000\n read_size = 100000\n seen_classes = {}\n\n for line_location in line_locations:\n if line_location == -1:\n if len(current_group) > 0:\n groups.append(current_group)\n current_group = []\n else:\n if len(current_group) == 0:\n current_group.append(line_location)\n elif abs(current_group[-1] - line_location) <= window_size:\n current_group.append(line_location)\n else:\n groups.append(current_group)\n current_group = [line_location]\n if len(current_group) > 0:\n groups.append(current_group)\n\n num_examples = len(groups)\n EMPTY = \"\"\n\n with h5py.File(out, \"w\") as handle:\n datasets = []\n for col in range(num_columns):\n datasets.append(\n handle.create_dataset(\n str(col),\n (num_examples,),\n dtype=h5py.special_dtype(vlen=str),\n chunks=(1500,)\n # compression=\"gzip\",\n # compression_opts=9\n )\n )\n k = 0\n with open(path, \"rt\") as fin:\n current_location = 0\n current_lines = fin.readlines(read_size)\n current_end = current_location + len(current_lines)\n for group in groups:\n start = max(0, group[0] - window_size)\n end = group[-1] + window_size\n if end > current_end:\n # read more lines into buffer:\n current_lines = current_lines + fin.readlines(read_size)\n # advance buffer max location\n current_end = current_location + len(current_lines)\n if len(current_lines) > max_buffer_size:\n # compute how much to remove from buffer\n to_chop = len(current_lines) - max_buffer_size\n # move start location\n current_location += to_chop\n # remove extra buffer lines\n current_lines = current_lines[to_chop:]\n # ensure that we do not cross white space boundaries\n start_delay = 0\n for idx, line in enumerate(current_lines[start - current_location:group[0] - current_location]):\n if line == \"\\n\":\n start_delay = idx\n start += start_delay\n early_end = window_size\n for idx, line in enumerate(current_lines[group[-1] - current_location:end - current_location]):\n if line == \"\\n\":\n early_end = idx\n break\n end = group[-1] + early_end\n cols = [[] for i in range(num_columns)]\n for line in current_lines[start - current_location:end - current_location]:\n vals = line.rstrip().split(\"\\t\")\n for col_index in range(num_columns):\n if len(vals) > col_index:\n cols[col_index].append(vals[col_index])\n else:\n cols[col_index].append(EMPTY)\n for col_index, dataset in zip(cols, datasets):\n dataset[k] = \"\\n\".join(col_index)\n k += 1\n\n\ndef file_slice(path, start, end, destination, append):\n file_operator = \">>\" if append else \">\"\n delta = end - start\n command = \"head -n %d %s | tail -n %d %s %s\" % (\n end,\n path,\n delta,\n file_operator,\n destination\n )\n execute_bash(command)\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\")\n parser.add_argument(\"out_train\")\n parser.add_argument(\"out_validation\")\n parser.add_argument(\"--window_size\", type=int, default=5)\n parser.add_argument(\"--total_size\", type=int, required=True)\n parser.add_argument(\"--validation_start\", type=int, required=True)\n parser.add_argument(\"--validation_size\", type=int, default=500000)\n return parser.parse_args(args=args)\n\n\ndef main():\n args = parse_args()\n if args.total_size < args.validation_size:\n raise ValueError(\"cannot have total_size (%d) < validation_size \"\n \"(%d)\" % (args.total_size, args.validation_size))\n if args.validation_start > args.total_size:\n raise ValueError(\"cannot have validation_start (%d) begin after \"\n \"total_size (%d)\" % (args.validation_start, args.total_size))\n if args.validation_start + args.validation_size > args.total_size:\n raise ValueError(\"cannot have validation_start + validation_size (%d)\"\n \" be larger than total_size (%d)\" % (\n args.validation_start + args.validation_size, args.total_size\n ))\n train_temp = args.out_train + \".train_temp\"\n try:\n file_slice(\n args.path,\n 0,\n args.validation_start,\n train_temp,\n append=False\n )\n file_slice(\n args.path,\n args.validation_start + args.validation_size,\n args.total_size,\n train_temp,\n append=True\n )\n print(\"created temp file %s\" % (train_temp))\n produce_window_dataset(\n train_temp, args.window_size, args.out_train\n )\n print(\"created windowed dataset for train\")\n finally:\n print(\"removing temp file %s\" % (train_temp))\n remove(train_temp)\n\n\n try:\n validation_temp = args.out_validation + \".validation_temp\"\n file_slice(\n args.path,\n args.validation_start,\n args.validation_start + args.validation_size,\n validation_temp,\n append=False\n )\n print(\"created temp file %s\" % (validation_temp))\n produce_window_dataset(validation_temp, args.window_size, args.out_validation)\n print(\"created windowed dataset for validation\")\n finally:\n print(\"removing temp file %s\" % (validation_temp))\n remove(validation_temp)\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":40,"cells":{"python_code":{"kind":"string","value":"import json\nimport argparse\nimport time\nimport random\nimport numpy as np\n\nfrom evaluate_type_system import fix_and_parse_tags\n\nfrom wikidata_linker_utils.json import load_config\nfrom wikidata_linker_utils.type_collection import TypeCollection\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom wikidata_linker_utils.wikipedia import induce_wikipedia_prefix\nfrom os.path import realpath, dirname, join, exists\nfrom wikidata_linker_utils.fast_disambiguate import (\n beam_project, cem_project, ga_project\n)\n\nSCRIPT_DIR = dirname(realpath(__file__))\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", type=str)\n parser.add_argument(\"out\", type=str)\n parser.add_argument(\"--relative_to\", default=None, type=str)\n parser.add_argument(\"--penalty\", default=0.0005, type=float)\n parser.add_argument(\"--beam_width\", default=8, type=float)\n parser.add_argument(\"--beam_search_subset\", default=2000, type=int)\n parser.add_argument(\"--log\", default=None, type=str)\n parser.add_argument(\"--samples\", type=int, default=1000)\n parser.add_argument(\"--ngen\", type=int, default=40)\n parser.add_argument(\"--method\", type=str,\n choices=[\"cem\", \"greedy\", \"beam\", \"ga\"],\n default=\"greedy\")\n return parser.parse_args(args=args)\n\n\ndef load_aucs():\n paths = [\n \"/home/jonathanraiman/en_field_auc_w10_e10.json\",\n \"/home/jonathanraiman/en_field_auc_w10_e10-s1234.json\",\n \"/home/jonathanraiman/en_field_auc_w5_e5.json\",\n \"/home/jonathanraiman/en_field_auc_w5_e5-s1234.json\"\n ]\n aucs = {}\n for path in paths:\n with open(path, \"rt\") as fin:\n auc_report = json.load(fin)\n for report in auc_report:\n key = (report[\"qid\"], report[\"relation\"])\n if key in aucs:\n aucs[key].append(report[\"auc\"])\n else:\n aucs[key] = [report[\"auc\"]]\n for key in aucs.keys():\n aucs[key] = np.mean(aucs[key])\n return aucs\n\ndef greedy_disambiguate(tags):\n greedy_correct = 0\n total = 0\n for dest, other_dest, times_pointed in tags:\n total += 1\n if len(other_dest) == 1 and dest == other_dest[0]:\n greedy_correct += 1\n elif other_dest[np.argmax(times_pointed)] == dest:\n greedy_correct += 1\n return greedy_correct, total\n\n\ndef fast_disambiguate(tags, all_classifications):\n correct = 0\n total = 0\n for dest, other_dest, times_pointed in tags:\n total += 1\n if len(other_dest) == 1 and dest == other_dest[0]:\n correct += 1\n else:\n identities = np.all(all_classifications[other_dest, :] == all_classifications[dest, :], axis=1)\n matches = other_dest[identities]\n matches_counts = times_pointed[identities]\n if len(matches) == 1 and matches[0] == dest:\n correct += 1\n elif matches[np.argmax(matches_counts)] == dest:\n correct += 1\n return correct, total\n\n\ndef get_prefix(config):\n return config.prefix or induce_wikipedia_prefix(config.wiki)\n\n\nMAX_PICKS = 400.0\n\ndef rollout(cached_satisfy, key2row, tags, aucs, ids, sample,\n penalty, greedy_correct):\n mean_auc = 0.0\n sample_sum = sample.sum()\n if sample_sum == 0:\n total = len(tags)\n return (greedy_correct / total,\n greedy_correct / total)\n if sample_sum > MAX_PICKS:\n return 0.0, 0.0\n all_classifications = None\n if sample_sum > 0:\n all_classifications = np.zeros((len(ids), int(sample_sum)), dtype=np.bool)\n col = 0\n for picked, (key, auc) in zip(sample, aucs):\n if picked:\n all_classifications[:, col] = cached_satisfy[key2row[key]]\n col += 1\n mean_auc += auc\n mean_auc = mean_auc / sample_sum\n correct, total = fast_disambiguate(tags, all_classifications)\n # here's the benefit of using types:\n improvement = correct - greedy_correct\n # penalty for using unreliable types:\n objective = (\n (greedy_correct + improvement * mean_auc) / total -\n # number of items is penalized\n sample_sum * penalty\n )\n return objective, correct / total\n\n\ndef get_cached_satisfy(collection, aucs, ids, mmap=False):\n path = join(SCRIPT_DIR, \"cached_satisfy.npy\")\n if not exists(path):\n cached_satisfy = np.zeros((len(aucs), len(ids)), dtype=np.bool)\n for row, (qid, relation_name) in get_progress_bar(\"satisfy\", item=\"types\")(enumerate(sorted(aucs.keys()))):\n cached_satisfy[row, :] = collection.satisfy([relation_name], [collection.name2index[qid]])[ids]\n collection._satisfy_cache.clear()\n np.save(path, cached_satisfy)\n if mmap:\n del cached_satisfy\n cached_satisfy = np.load(path, mmap_mode=\"r\")\n else:\n if mmap:\n cached_satisfy = np.load(path, mmap_mode=\"r\")\n else:\n cached_satisfy = np.load(path)\n return cached_satisfy\n\n\ndef main():\n args = parse_args()\n config = load_config(\n args.config,\n [\"wiki\",\n \"language_path\",\n \"wikidata\",\n \"redirections\",\n \"classification\"],\n defaults={\n \"num_names_to_load\": 0,\n \"prefix\": None,\n \"sample_size\": 100,\n \"wiki\": None,\n \"fix_links\": False,\n \"min_count\": 0,\n \"min_percent\": 0.0\n },\n relative_to=args.relative_to\n )\n if config.wiki is None:\n raise ValueError(\"must provide path to 'wiki' in config.\")\n prefix = get_prefix(config)\n collection = TypeCollection(\n config.wikidata,\n num_names_to_load=config.num_names_to_load,\n prefix=prefix,\n verbose=True\n )\n collection.load_blacklist(join(SCRIPT_DIR, \"blacklist.json\"))\n\n fname = config.wiki\n test_tags = fix_and_parse_tags(config,\n collection,\n config.sample_size)\n aucs = load_aucs()\n ids = sorted(set([idx for doc_tags in test_tags\n for _, tag in doc_tags if tag is not None\n for idx in tag[2] if len(tag[2]) > 1]))\n id2pos = {idx: k for k, idx in enumerate(ids)}\n # use reduced identity system:\n remapped_tags = []\n for doc_tags in test_tags:\n for text, tag in doc_tags:\n if tag is not None:\n remapped_tags.append(\n (id2pos[tag[1]] if len(tag[2]) > 1 else tag[1],\n np.array([id2pos[idx] for idx in tag[2]]) if len(tag[2]) > 1 else tag[2],\n tag[3]))\n test_tags = remapped_tags\n\n aucs = {key: value for key, value in aucs.items() if value > 0.5}\n print(\"%d relations to pick from with %d ids.\" % (len(aucs), len(ids)), flush=True)\n cached_satisfy = get_cached_satisfy(collection, aucs, ids, mmap=args.method==\"greedy\")\n del collection\n key2row = {key: k for k, key in enumerate(sorted(aucs.keys()))}\n\n if args.method == \"greedy\":\n picks, _ = beam_project(\n cached_satisfy,\n key2row,\n remapped_tags,\n aucs,\n ids,\n beam_width=1,\n penalty=args.penalty,\n log=args.log\n )\n elif args.method == \"beam\":\n picks, _ = beam_project(\n cached_satisfy,\n key2row,\n remapped_tags,\n aucs,\n ids,\n beam_width=args.beam_width,\n penalty=args.penalty,\n log=args.log\n )\n elif args.method == \"cem\":\n picks, _ = cem_project(\n cached_satisfy,\n key2row,\n remapped_tags,\n aucs,\n ids,\n n_samples=args.samples,\n penalty=args.penalty,\n log=args.log\n )\n elif args.method == \"ga\":\n picks, _ = ga_project(\n cached_satisfy,\n key2row,\n remapped_tags,\n aucs,\n ids,\n ngen=args.ngen,\n n_samples=args.samples,\n penalty=args.penalty,\n log=args.log\n )\n else:\n raise ValueError(\"unknown method %r.\" % (args.method,))\n with open(args.out, \"wt\") as fout:\n json.dump(picks, fout)\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":41,"cells":{"python_code":{"kind":"string","value":"import sys\nimport pickle\nimport argparse\nimport requests\nimport marisa_trie\nimport traceback\nimport numpy as np\n\nfrom os.path import join, dirname, realpath, exists\nfrom os import stat\nfrom collections import Counter\nfrom itertools import product\n\nfrom wikidata_linker_utils.anchor_filtering import clean_up_trie_source, acceptable_anchor\nfrom wikidata_linker_utils.wikipedia import (\n load_wikipedia_docs, induce_wikipedia_prefix, load_redirections, transition_trie_index\n)\nfrom wikidata_linker_utils.json import load_config\n\nfrom wikidata_linker_utils.offset_array import OffsetArray\nfrom wikidata_linker_utils.repl import reload_run_retry, enter_or_quit\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom wikidata_linker_utils.type_collection import TypeCollection, get_name as web_get_name\n\n\nSCRIPT_DIR = dirname(realpath(__file__))\nPROJECT_DIR = dirname(SCRIPT_DIR)\n\nINTERNET = True\n\ndef maybe_web_get_name(s):\n global INTERNET\n if INTERNET:\n try:\n res = web_get_name(s)\n return res\n except requests.exceptions.ConnectionError:\n INTERNET = False\n return s\n\n\nclass OracleClassification(object):\n def __init__(self, classes, classification, path):\n self.classes = classes\n self.classification = classification\n self.path = path\n self.contains_other = self.classes[-1] == \"other\"\n\n def classify(self, index):\n return self.classification[index]\n\ndef load_oracle_classification(path):\n with open(join(path, \"classes.txt\"), \"rt\") as fin:\n classes = fin.read().splitlines()\n classification = np.load(join(path, \"classification.npy\"))\n return OracleClassification(classes, classification, path)\n\n\ndef can_disambiguate(oracles, truth, alternatives,\n times_pointed, count_threshold,\n ignore_other=False, keep_other=False):\n ambig = np.ones(len(alternatives), dtype=np.bool)\n for oracle in oracles:\n truth_pred = oracle.classify(truth)\n alt_preds = oracle.classify(alternatives)\n if keep_other and oracle.contains_other:\n if truth_pred == len(oracle.classes) - 1:\n continue\n else:\n ambig = np.logical_and(\n ambig,\n np.logical_or(\n np.equal(alt_preds, truth_pred),\n np.equal(alt_preds, len(oracle.classes) - 1)\n )\n )\n elif ignore_other and oracle.contains_other and np.any(alt_preds == len(oracle.classes) - 1):\n continue\n else:\n ambig = np.logical_and(ambig, np.equal(alt_preds, truth_pred))\n\n # apply type rules to disambiguate:\n alternatives_matching_type = alternatives[ambig]\n alternatives_matching_type_times_pointed = times_pointed[ambig]\n\n if len(alternatives_matching_type) <= 1:\n return alternatives_matching_type, alternatives_matching_type_times_pointed, False\n\n # apply rules for count thresholding:\n ordered_times_pointed = np.argsort(alternatives_matching_type_times_pointed)[::-1]\n top1count = alternatives_matching_type_times_pointed[ordered_times_pointed[0]]\n top2count = alternatives_matching_type_times_pointed[ordered_times_pointed[1]]\n if top1count > top2count + count_threshold and alternatives_matching_type[ordered_times_pointed[0]] == truth:\n return (\n alternatives_matching_type[ordered_times_pointed[0]:ordered_times_pointed[0]+1],\n alternatives_matching_type_times_pointed[ordered_times_pointed[0]:ordered_times_pointed[0]+1],\n True\n )\n return alternatives_matching_type, alternatives_matching_type_times_pointed, False\n\n\ndef disambiguate(tags, oracles):\n ambiguous = 0\n obvious = 0\n disambiguated_oracle = 0\n disambiguated_with_counts = 0\n disambiguated_greedy = 0\n disambiguated_with_background = 0\n count_threshold = 0\n ambiguous_tags = []\n obvious_tags = []\n non_obvious_tags = []\n\n disambiguated_oracle_ignore_other = 0\n disambiguated_oracle_keep_other = 0\n\n for text, tag in tags:\n if tag is None:\n continue\n anchor, dest, other_dest, times_pointed = tag\n if len(other_dest) == 1:\n obvious += 1\n obvious_tags.append((anchor, dest, other_dest, times_pointed))\n else:\n ambiguous += 1\n non_obvious_tags.append((anchor, dest, other_dest, times_pointed))\n\n if other_dest[np.argmax(times_pointed)] == dest:\n disambiguated_greedy += 1\n\n matching_tags, times_pointed_subset, used_counts = can_disambiguate(\n oracles, dest, other_dest, times_pointed, count_threshold\n )\n if len(matching_tags) <= 1:\n if used_counts:\n disambiguated_with_counts += 1\n else:\n disambiguated_oracle += 1\n else:\n ambiguous_tags.append(\n (anchor, dest, matching_tags, times_pointed_subset)\n )\n\n matching_tags, times_pointed_subset, used_counts = can_disambiguate(\n oracles, dest, other_dest, times_pointed, count_threshold, ignore_other=True\n )\n if len(matching_tags) <= 1:\n disambiguated_oracle_ignore_other += 1\n\n matching_tags, times_pointed_subset, used_counts = can_disambiguate(\n oracles, dest, other_dest, times_pointed, count_threshold, keep_other=True\n )\n if len(matching_tags) <= 1:\n disambiguated_oracle_keep_other += 1\n\n report = {\n \"ambiguous\": ambiguous,\n \"obvious\": obvious,\n \"disambiguated oracle\": disambiguated_oracle,\n \"disambiguated greedy\": disambiguated_greedy,\n \"disambiguated oracle + counts\": disambiguated_oracle + disambiguated_with_counts,\n \"disambiguated oracle + counts + ignore other\": disambiguated_oracle_ignore_other,\n \"disambiguated oracle + counts + keep other\": disambiguated_oracle_keep_other\n }\n return (report, ambiguous_tags)\n\n\ndef disambiguate_batch(test_tags, train_tags, oracles):\n test_tags = test_tags\n total_report = {}\n ambiguous_tags = []\n for tags in get_progress_bar(\"disambiguating\", item=\"articles\")(test_tags):\n report, remainder = disambiguate(tags, oracles)\n ambiguous_tags.extend(remainder)\n for key, value in report.items():\n if key not in total_report:\n total_report[key] = value\n else:\n total_report[key] += value\n return total_report, ambiguous_tags\n\n\ndef obtain_tags(doc,\n wiki_trie,\n anchor_trie,\n trie_index2indices,\n trie_index2indices_counts,\n trie_index2indices_transitions,\n redirections,\n prefix,\n collection,\n first_names,\n min_count,\n min_percent):\n out_doc = []\n for anchor, dest_index in doc.links(wiki_trie, redirections, prefix):\n if dest_index is None:\n out_doc.append((anchor, None))\n continue\n anchor_stripped = anchor.strip()\n keep = False\n if len(anchor_stripped) > 0:\n anchor_stripped = clean_up_trie_source(anchor_stripped)\n if acceptable_anchor(anchor_stripped, anchor_trie, first_names):\n anchor_idx = anchor_trie[anchor_stripped]\n all_options = trie_index2indices[anchor_idx]\n all_counts = trie_index2indices_counts[anchor_idx]\n if len(all_options) > 0:\n if trie_index2indices_transitions is not None:\n old_dest_index = dest_index\n dest_index = transition_trie_index(\n anchor_idx, dest_index,\n trie_index2indices_transitions,\n all_options\n )\n if dest_index != -1:\n new_dest_index = dest_index\n keep = True\n if keep and (min_count > 0 or min_percent > 0):\n dest_count = all_counts[all_options==new_dest_index]\n if dest_count < min_count or (dest_count / sum(all_counts)) < min_percent:\n keep = False\n\n if keep:\n out_doc.append(\n (\n anchor,\n (anchor_stripped, new_dest_index, all_options, all_counts)\n )\n )\n if not keep:\n out_doc.append((anchor, None))\n return out_doc\n\n\ndef add_boolean(parser, name, default):\n parser.add_argument(\"--%s\" % (name,), action=\"store_true\", default=default)\n parser.add_argument(\"--no%s\" % (name,), action=\"store_false\", dest=name)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\")\n parser.add_argument(\"--relative_to\", type=str, default=None)\n parser.add_argument(\"--log\", type=str, default=None)\n add_boolean(parser, \"verbose\", True)\n add_boolean(parser, \"interactive\", True)\n return parser\n\n\ndef parse_args(args=None):\n return get_parser().parse_args(args=args)\n\n\ndef summarize_disambiguation(total_report, file=None):\n if file is None:\n file = sys.stdout\n if total_report.get(\"ambiguous\", 0) > 0:\n for key, value in sorted(total_report.items(), key=lambda x : x[1]):\n if \"disambiguated\" in key:\n print(\"%.3f%% disambiguated by %s (%d / %d)\" % (\n 100.0 * value / total_report[\"ambiguous\"],\n key[len(\"disambiguated\"):].strip(),\n value, total_report[\"ambiguous\"]\n ), file=file\n )\n print(\"\", file=file)\n for key, value in sorted(total_report.items(), key=lambda x : x[1]):\n if \"disambiguated\" in key:\n print(\"%.3f%% disambiguated by %s [including single choice] (%d / %d)\" % (\n 100.0 * (\n (value + total_report[\"obvious\"]) /\n (total_report[\"ambiguous\"] + total_report[\"obvious\"])\n ),\n key[len(\"disambiguated\"):].strip(),\n value + total_report[\"obvious\"],\n total_report[\"ambiguous\"] + total_report[\"obvious\"]\n ), file=file\n )\n print(\"\", file=file)\n\n\ndef summarize_ambiguities(ambiguous_tags,\n oracles,\n get_name):\n class_ambiguities = {}\n for anchor, dest, other_dest, times_pointed in ambiguous_tags:\n class_ambig_name = []\n for oracle in oracles:\n class_ambig_name.append(oracle.classes[oracle.classify(dest)])\n class_ambig_name = \" and \".join(class_ambig_name)\n if class_ambig_name not in class_ambiguities:\n class_ambiguities[class_ambig_name] = {\n \"count\": 1,\n \"examples\": [(anchor, dest, other_dest, times_pointed)]\n }\n else:\n class_ambiguities[class_ambig_name][\"count\"] += 1\n class_ambiguities[class_ambig_name][\"examples\"].append((anchor, dest, other_dest, times_pointed))\n print(\"Ambiguity Report:\")\n for classname, ambiguity in sorted(class_ambiguities.items(), key=lambda x: x[0]):\n print(\" %s\" % (classname,))\n print(\" %d ambiguities\" % (ambiguity[\"count\"],))\n\n common_bad_anchors = Counter([anc for anc, _, _, _ in ambiguity[\"examples\"]]).most_common(6)\n anchor2example = {anc: (dest, other_dest, times_pointed) for anc, dest, other_dest, times_pointed in ambiguity[\"examples\"]}\n\n for bad_anchor, count in common_bad_anchors:\n dest, other_dest, times_pointed = anchor2example[bad_anchor]\n truth_times_pointed = int(times_pointed[np.equal(other_dest, dest)])\n only_alt = [(el, int(times_pointed[k])) for k, el in enumerate(other_dest) if el != dest]\n only_alt = sorted(only_alt, key=lambda x: x[1], reverse=True)\n print(\" %r (%d time%s)\" % (bad_anchor, count, 's' if count != 1 else ''))\n print(\" Actual: %r\" % ((get_name(dest), truth_times_pointed),))\n print(\" Others: %r\" % ([(get_name(el), c) for (el, c) in only_alt[:5]]))\n print(\"\")\n print(\"\")\n\n\ndef get_prefix(config):\n return config.prefix or induce_wikipedia_prefix(config.wiki)\n\n\n\ndef fix_and_parse_tags(config, collection, size):\n trie_index2indices = OffsetArray.load(\n join(config.language_path, \"trie_index2indices\"),\n compress=True\n )\n trie_index2indices_counts = OffsetArray(\n np.load(join(config.language_path, \"trie_index2indices_counts.npy\")),\n trie_index2indices.offsets\n )\n if exists(join(config.language_path, \"trie_index2indices_transition_values.npy\")):\n trie_index2indices_transitions = OffsetArray(\n np.load(join(config.language_path, \"trie_index2indices_transition_values.npy\")),\n np.load(join(config.language_path, \"trie_index2indices_transition_offsets.npy\")),\n )\n else:\n trie_index2indices_transitions = None\n\n\n anchor_trie = marisa_trie.Trie().load(join(config.language_path, \"trie.marisa\"))\n wiki_trie = marisa_trie.RecordTrie('i').load(\n join(config.wikidata, \"wikititle2wikidata.marisa\")\n )\n prefix = get_prefix(config)\n redirections = load_redirections(config.redirections)\n docs = load_wikipedia_docs(config.wiki, size)\n\n while True:\n try:\n collection.load_blacklist(join(SCRIPT_DIR, \"blacklist.json\"))\n except (ValueError,) as e:\n print(\"issue reading blacklist, please fix.\")\n print(str(e))\n enter_or_quit()\n continue\n break\n\n print(\"Load first_names\")\n with open(join(PROJECT_DIR, \"data\", \"first_names.txt\"), \"rt\") as fin:\n first_names = set(fin.read().splitlines())\n\n all_tags = []\n for doc in get_progress_bar('fixing links', item='article')(docs):\n tags = obtain_tags(\n doc,\n wiki_trie=wiki_trie,\n anchor_trie=anchor_trie,\n trie_index2indices=trie_index2indices,\n trie_index2indices_counts=trie_index2indices_counts,\n trie_index2indices_transitions=trie_index2indices_transitions,\n redirections=redirections,\n prefix=prefix,\n first_names=first_names,\n collection=collection,\n min_count=config.min_count,\n min_percent=config.min_percent)\n if any(x is not None for _, x in tags):\n all_tags.append(tags)\n collection.reset_cache()\n return all_tags\n\n\ndef main():\n args = parse_args()\n config = load_config(args.config,\n [\"wiki\",\n \"language_path\",\n \"wikidata\",\n \"redirections\",\n \"classification\",\n \"path\"],\n defaults={\"num_names_to_load\": 0,\n \"prefix\": None,\n \"sample_size\": 100,\n \"wiki\": None,\n \"min_count\": 0,\n \"min_percent\": 0.0},\n relative_to=args.relative_to)\n if config.wiki is None:\n raise ValueError(\"must provide path to 'wiki' in config.\")\n prefix = get_prefix(config)\n\n print(\"Load type_collection\")\n collection = TypeCollection(\n config.wikidata,\n num_names_to_load=config.num_names_to_load,\n prefix=prefix,\n verbose=True)\n\n fname = config.wiki\n all_tags = fix_and_parse_tags(config, collection, config.sample_size)\n test_tags = all_tags[:config.sample_size]\n train_tags = all_tags[config.sample_size:]\n\n oracles = [load_oracle_classification(classification)\n for classification in config.classification]\n\n def get_name(idx):\n if idx < config.num_names_to_load:\n if idx in collection.known_names:\n return collection.known_names[idx] + \" (%s)\" % (collection.ids[idx],)\n else:\n return collection.ids[idx]\n else:\n return maybe_web_get_name(collection.ids[idx]) + \" (%s)\" % (collection.ids[idx],)\n\n while True:\n total_report, ambiguous_tags = disambiguate_batch(\n test_tags, train_tags, oracles)\n summarize_disambiguation(total_report)\n if args.log is not None:\n with open(args.log, \"at\") as fout:\n summarize_disambiguation(total_report, file=fout)\n if args.verbose:\n try:\n summarize_ambiguities(\n ambiguous_tags,\n oracles,\n get_name\n )\n except KeyboardInterrupt as e:\n pass\n if args.interactive:\n enter_or_quit()\n else:\n break\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":42,"cells":{"python_code":{"kind":"string","value":"import argparse\nimport marisa_trie\n\nimport numpy as np\n\nfrom os.path import join\n\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom wikidata_linker_utils.bash import count_lines\nfrom wikidata_linker_utils.offset_array import save_record_with_offset\n\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"wikipedia2wikidata_trie\",\n help=\"Location of wikipedia -> wikidata mapping trie.\")\n parser.add_argument(\"wikidata_ids\")\n parser.add_argument(\"prefix\")\n parser.add_argument(\"category_links\")\n parser.add_argument(\"out\")\n return parser.parse_args(argv)\n\ndef main():\n args = parse_args()\n trie = marisa_trie.RecordTrie('i').load(args.wikipedia2wikidata_trie)\n print('loaded trie')\n\n num_lines = count_lines(args.category_links)\n num_ids = count_lines(args.wikidata_ids)\n missing = []\n num_missing = 0\n num_broken = 0\n all_category_links = [[] for i in range(num_ids)]\n with open(args.category_links, 'rt') as fin:\n fin_pbar = get_progress_bar('reading category_links', max_value=num_lines)(fin)\n for line in fin_pbar:\n try:\n origin, dest = line.rstrip('\\n').split('\\t')\n except:\n num_broken += 1\n continue\n if len(dest) == 0:\n num_broken += 1\n continue\n origin = args.prefix + '/' + origin\n prefixed_dest = args.prefix + '/' + dest\n origin_index = trie.get(origin, None)\n dest_index = trie.get(prefixed_dest, None)\n\n if dest_index is None:\n prefixed_dest = args.prefix + '/' + dest[0].upper() + dest[1:]\n dest_index = trie.get(prefixed_dest, None)\n\n if origin_index is None or dest_index is None:\n missing.append((origin, prefixed_dest))\n num_missing += 1\n else:\n all_category_links[origin_index[0][0]].append(dest_index[0][0])\n\n print(\"%d/%d category links could not be found in wikidata\" % (num_missing, num_lines))\n print(\"%d/%d category links were malformed\" % (num_broken, num_lines))\n print(\"Missing links sample:\")\n for origin, dest in missing[:10]:\n print(\"%r -> %r\" % (origin, dest))\n save_record_with_offset(\n join(args.out, \"wikidata_%s_category_links\" % (args.prefix,)),\n all_category_links\n )\n\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":43,"cells":{"python_code":{"kind":"string","value":"import argparse\nimport time\nimport marisa_trie\nimport numpy as np\nimport pandas\n\nfrom os.path import join, realpath, dirname\nfrom os import makedirs\n\nfrom wikidata_linker_utils.wikidata_iterator import open_wikidata_file\nfrom wikidata_linker_utils.file import true_exists\nfrom wikidata_linker_utils.bash import count_lines\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom wikidata_linker_utils.offset_array import save_record_with_offset\nfrom wikidata_linker_utils.wikidata_ids import (\n load_wikidata_ids, load_names, property_names, temporal_property_names\n)\nimport wikidata_linker_utils.wikidata_properties as wikidata_properties\n\nSCRIPT_DIR = dirname(realpath(__file__))\nPROJECT_DIR = dirname(SCRIPT_DIR)\n\nWIKITILE_2_WIKIDATA_TRIE_NAME = \"wikititle2wikidata.marisa\"\nWIKITILE_2_WIKIDATA_TSV_NAME = \"wikidata_wikititle2wikidata.tsv\"\nWIKIDATA_IDS_NAME = \"wikidata_ids.txt\"\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"wikidata_dump\", type=str,\n help=\"Path to wikidata dump file.\")\n parser.add_argument(\"wikidata\", type=str,\n help=\"Path to save location for wikidata properties.\")\n parser.add_argument(\"--batch_size\", type=int, default=1000)\n return parser.parse_args(args=args)\n\n\ndef get_related_nested_field(doc_claims, nested_field):\n out = []\n for claim in doc_claims:\n mainsnak = claim.get(\"mainsnak\", None)\n if mainsnak is None:\n continue\n datavalue = mainsnak.get(\"datavalue\", None)\n if datavalue is None:\n continue\n value = datavalue.get(\"value\", None)\n if value is None:\n continue\n value_id = value.get(nested_field, None)\n if value_id is None:\n continue\n out.append(value_id)\n return out\n\n\ndef get_related_entities(doc_claims):\n return get_related_nested_field(doc_claims, \"id\")\n\n\ndef get_claim_time(doc_claims):\n return get_related_nested_field(doc_claims, \"time\")\n\n\ndef get_wikidata_mapping(name2id_path,\n wikidata_ids_path,\n jsons,\n relation_names,\n verbose=False):\n approx_max_quantity = 24642416\n if verbose:\n pbar = None\n from IPython.display import clear_output\n else:\n pbar = get_progress_bar(\"collect wikilinks\", max_value=approx_max_quantity)\n pbar.start()\n clear_output = None\n wikidata_ids = []\n entity_types = []\n subclass = []\n seen = 0\n\n relations = {\n name: (open(outfile, \"wt\"), is_temporal) for name, outfile, is_temporal in relation_names\n }\n fout_name2id = None if true_exists(name2id_path) else open(name2id_path, \"wt\")\n fout_wikidata_ids = None if true_exists(wikidata_ids_path) else open(wikidata_ids_path, \"wt\")\n try:\n t_then = time.time()\n seen_last = 0\n speed = None\n index = 0\n for doc in jsons:\n seen += 1\n if seen % 2000 == 0:\n if verbose:\n t_now = time.time()\n new_speed = (seen - seen_last) / (t_now - t_then)\n if speed is None:\n speed = new_speed\n else:\n speed = 0.9 * speed + 0.1 * new_speed\n clear_output(wait=True)\n print(\"%.3f%% done (%d seen, %.3f docs/s, ETA: %ds)\" % (\n 100.0 * seen / approx_max_quantity,\n seen,\n speed,\n int((approx_max_quantity - seen) / speed)\n ), flush=True)\n seen_last = seen\n t_then = t_now\n else:\n if seen < approx_max_quantity:\n pbar.update(seen)\n if fout_name2id is not None:\n if \"sitelinks\" in doc:\n for key, value in doc[\"sitelinks\"].items():\n if key.endswith(\"wiki\"):\n fout_name2id.write(key + \"/\" + value[\"title\"] + \"\\t\" + str(index) + \"\\n\")\n index += 1\n if fout_wikidata_ids is not None:\n fout_wikidata_ids.write(doc[\"id\"] + \"\\n\")\n for name, (outfile, is_temporal) in relations.items():\n if is_temporal:\n outfile.write(\n \"\\t\".join(get_claim_time(doc[\"claims\"].get(name, []))) + \"\\n\"\n )\n else:\n outfile.write(\n \"\\t\".join(get_related_entities(doc[\"claims\"].get(name, []))) + \"\\n\"\n )\n if pbar is not None:\n pbar.finish()\n finally:\n for name, (outfile, _) in relations.items():\n outfile.close()\n if fout_name2id is not None:\n fout_name2id.close()\n if fout_wikidata_ids is not None:\n fout_wikidata_ids.close()\n\n\ndef convert_wikidata_ids_to_ids(id2index, wikidata_ids):\n return [[id2index.get(wikidata_id, -1) for wikidata_id in propgroup] for propgroup in wikidata_ids]\n\n\ndef parse_year(text):\n pos = text[1:].find(\"-\")\n return int(text[:pos+1])\n\n\ndef values_exist(path):\n return (\n true_exists(path + \"_values.npy\") or\n true_exists(path + \"_values.sparse.npy\")\n )\n\n\ndef line2indices(id2index, line):\n if len(line) == 0:\n return []\n out = []\n for el in line.split(\"\\t\"):\n idx = id2index.get(el, None)\n if idx is None:\n continue\n else:\n out.append(idx)\n return out\n\n\ndef fixed_point_name_alternates(name):\n if name.endswith(\")\"):\n pos_closing = name.rfind(\"(\")\n return (name, name[:pos_closing].strip())\n if name.endswith(\"ses\"):\n return (name, name[:-2] + \"is\")\n if name.endswith(\"ies\"):\n return (name, name[:-3] + \"y\")\n if name.endswith(\"s\"):\n return (name, name[:-1])\n return (name,)\n\n\ndef build_fixed_point(out, prefix):\n wiki_fixed_point_save = join(out, \"wikidata_%s_fixed_points_values.npy\" % (prefix,))\n if not true_exists(wiki_fixed_point_save):\n print(\"building %s fixed point property.\" % (prefix,))\n trie = marisa_trie.RecordTrie('i').load(join(out, WIKITILE_2_WIKIDATA_TRIE_NAME))\n num_items = count_lines(join(out, WIKIDATA_IDS_NAME))\n fixed_point_relation = {}\n\n category_prefix = \"%s/Category:\" % (prefix,)\n article_prefix = \"%s/\" % (prefix,)\n wikititle2wikidata_path = join(out, WIKITILE_2_WIKIDATA_TSV_NAME)\n relevant_items = trie.iteritems(category_prefix)\n\n for name, category_idx in relevant_items:\n article_name = article_prefix + name[len(category_prefix):]\n for fixed_point_name_alternate in fixed_point_name_alternates(article_name):\n matches = trie.get(fixed_point_name_alternate, None)\n if matches is not None and len(matches) > 0:\n fixed_point_relation[category_idx] = [matches[0][0]]\n break\n print(\"Found %d fixed point relations for %s\" % (len(fixed_point_relation), prefix,))\n save_record_with_offset(\n join(out, \"wikidata_%s_fixed_points\" % (prefix,)),\n fixed_point_relation,\n num_items\n )\n\n\ndef main():\n args = parse_args()\n makedirs(args.wikidata, exist_ok=True)\n\n wikidata_names2prop_names = property_names(\n join(PROJECT_DIR, \"data\", \"wikidata\", 'wikidata_property_names.json')\n )\n wikidata_names2temporal_prop_names = temporal_property_names(\n join(PROJECT_DIR, \"data\", \"wikidata\", 'wikidata_time_property_names.json')\n )\n # fields to make easily accessible:\n wikidata_important_properties = [\n wikidata_properties.INSTANCE_OF,\n wikidata_properties.SUBCLASS_OF,\n wikidata_properties.PART_OF,\n wikidata_properties.OCCUPATION,\n wikidata_properties.FIELD_OF_WORK,\n wikidata_properties.FIELD_OF_THIS_OCCUPATION,\n wikidata_properties.MEDICAL_SPECIALITY,\n wikidata_properties.GENRE,\n wikidata_properties.SEX_OR_GENDER,\n wikidata_properties.COUNTRY_OF_CITIZENSHIP,\n wikidata_properties.COUNTRY,\n wikidata_properties.CONTINENT,\n wikidata_properties.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY,\n wikidata_properties.SPORT,\n wikidata_properties.STUDIES,\n wikidata_properties.SERIES,\n wikidata_properties.USE,\n wikidata_properties.LOCATION,\n wikidata_properties.FACET_OF,\n wikidata_properties.IS_A_LIST_OF,\n wikidata_properties.COUNTRY_OF_ORIGIN,\n wikidata_properties.PRODUCT_OR_MATERIAL_PRODUCED,\n wikidata_properties.INDUSTRY,\n wikidata_properties.PARENT_TAXON,\n wikidata_properties.APPLIES_TO_TERRITORIAL_JURISDICTION,\n wikidata_properties.POSITION_HELD,\n wikidata_properties.CATEGORYS_MAIN_TOPIC,\n # temporal properties\n wikidata_properties.PUBLICATION_DATE,\n wikidata_properties.DATE_OF_BIRTH,\n wikidata_properties.DATE_OF_DEATH,\n wikidata_properties.INCEPTION,\n wikidata_properties.DISSOLVED_OR_ABOLISHED,\n wikidata_properties.POINT_IN_TIME,\n wikidata_properties.START_TIME,\n wikidata_properties.END_TIME\n ]\n wikidata_important_properties_fnames = [\n (name, join(args.wikidata, \"wikidata_%s.txt\" % (name,)), name in wikidata_names2temporal_prop_names)\n for name in wikidata_important_properties\n ]\n\n missing_wikidata_important_properties_fnames = [\n (name, outfile, is_temporal)\n for name, outfile, is_temporal in wikidata_important_properties_fnames\n if not true_exists(outfile)\n ]\n\n wikidata_ids_path = join(args.wikidata, WIKIDATA_IDS_NAME)\n wikititle2wikidata_path = join(args.wikidata, WIKITILE_2_WIKIDATA_TSV_NAME)\n\n work_to_be_done = (\n not true_exists(wikidata_ids_path) or\n not true_exists(wikititle2wikidata_path) or\n len(missing_wikidata_important_properties_fnames) > 0\n )\n\n if work_to_be_done:\n get_wikidata_mapping(\n wikititle2wikidata_path,\n wikidata_ids_path,\n open_wikidata_file(args.wikidata_dump, args.batch_size),\n missing_wikidata_important_properties_fnames\n )\n\n numpy_wikidata_important_properties_fnames = [\n (name, outfile, is_temporal)\n for name, outfile, is_temporal in wikidata_important_properties_fnames\n if not values_exist(join(args.wikidata, \"wikidata_%s\" % (name,)))\n ]\n\n # obtain a mapping from id -> number\n if len(numpy_wikidata_important_properties_fnames) > 0:\n _, id2index = load_wikidata_ids(args.wikidata)\n # make relations numerical:\n for relname, outfile, is_temporal in numpy_wikidata_important_properties_fnames:\n with open(outfile, \"rt\") as fin:\n lines = fin.read().splitlines()\n fin_pbar = get_progress_bar(\"loading relation %r\" % (relname,))(lines)\n if is_temporal:\n value = np.zeros(len(lines) * 2 + 1, dtype=np.int32)\n position = 1\n seen = 0\n for idx, line in enumerate(fin_pbar):\n for wikidata_id in line.split('\\t'):\n if len(wikidata_id) > 0:\n value[position] = idx\n value[position + 1] = parse_year(wikidata_id)\n position += 2\n seen += 1\n break\n value[0] = len(lines)\n value = value[:position]\n np.save(join(args.wikidata, \"wikidata_%s_values.sparse.npy\" % (relname,)), value)\n else:\n relation = [\n line2indices(id2index, line) for line in fin_pbar\n ]\n save_record_with_offset(\n join(args.wikidata, \"wikidata_%s\" % (relname,)),\n relation\n )\n del id2index\n\n # convert the mapping from wikinames to integer values:\n trie_save_path = join(args.wikidata, WIKITILE_2_WIKIDATA_TRIE_NAME)\n if not true_exists(trie_save_path):\n print(\"loading wikipedia name -> wikidata\")\n name2id = pandas.read_csv(wikititle2wikidata_path, sep=\"\\t\", encoding='utf-8')\n print(\"loaded\")\n trie = marisa_trie.RecordTrie(\n 'i',\n get_progress_bar(\"convert to trie\", max_value=name2id.shape[0])(\n (key, (value,)) for _, key, value in name2id.itertuples()\n )\n )\n trie.save(trie_save_path)\n\n build_fixed_point(args.wikidata, \"enwiki\")\n\n\nif __name__ == '__main__':\n main()\n"}}},{"rowIdx":44,"cells":{"python_code":{"kind":"string","value":"import json\nimport time\nimport re\nimport argparse\n\nfrom wikidata_linker_utils.wikipedia import iterate_articles\n\nfrom multiprocessing import Pool\n\nCATEGORY_PREFIXES = [\n \"Category:\",\n \"CatΓ©gorie:\",\n \"Categorie:\",\n \"CategorΓ­a:\",\n \"Categoria:\",\n \"Kategorie:\",\n \"Kategoria:\",\n \"ΠšΠ°Ρ‚Π΅Π³ΠΎΡ€ΠΈΡ:\",\n \"Kategori:\"\n]\n\ncategory_link_pattern = re.compile(\n r\"\\[\\[((?:\" + \"|\".join(CATEGORY_PREFIXES) + r\")[^\\]\\[]*)\\]\\]\"\n)\nredirection_link_pattern = re.compile(r\"(?:#REDIRECT|#weiterleitung|#REDIRECCIΓ“N|REDIRECIONAMENTO)\\s*\\[\\[([^\\]\\[]*)\\]\\]\", re.IGNORECASE)\nanchor_link_pattern = re.compile(r\"\\[\\[([^\\]\\[:]*)\\]\\]\")\n\n\ndef category_link_job(args):\n \"\"\"\n Performing map-processing on different articles\n (in this case, just remove internal links)\n \"\"\"\n article_name, lines = args\n found_tags = []\n for match in re.finditer(category_link_pattern, lines):\n match_string = match.group(1).strip()\n if \"|\" in match_string:\n link, _ = match_string.rsplit(\"|\", 1)\n link = link.strip().split(\"#\")[0]\n else:\n link = match_string\n\n if len(link) > 0:\n found_tags.append(link)\n return (article_name, found_tags)\n\ndef redirection_link_job(args):\n \"\"\"\n Performing map-processing on different articles\n (in this case, just remove internal links)\n \"\"\"\n article_name, lines = args\n found_tags = []\n for match in re.finditer(redirection_link_pattern, lines):\n if match is None:\n continue\n if match.group(1) is None:\n continue\n match_string = match.group(1).strip()\n if \"|\" in match_string:\n link, _ = match_string.rsplit(\"|\", 1)\n link = link.strip().split(\"#\")[0]\n else:\n link = match_string\n\n if len(link) > 0:\n found_tags.append(link)\n return (article_name, found_tags)\n\n\ndef anchor_finding_job(args):\n \"\"\"\n Performing map-processing on different articles\n (in this case, just remove internal links)\n \"\"\"\n article_name, lines = args\n found_tags = []\n for match in re.finditer(anchor_link_pattern, lines):\n match_string = match.group(1).strip()\n\n if \"|\" in match_string:\n link, anchor = match_string.rsplit(\"|\", 1)\n link = link.strip().split(\"#\")[0]\n anchor = anchor.strip()\n else:\n anchor = match_string\n link = match_string\n\n if len(anchor) > 0 and len(link) > 0:\n found_tags.append((anchor, link))\n return (article_name, found_tags)\n\n\n\ndef anchor_category_redirection_link_job(args):\n article_name, found_redirections = redirection_link_job(args)\n article_name, found_categories = category_link_job(args)\n article_name, found_anchors = anchor_finding_job(args)\n return (article_name, (found_anchors, found_redirections, found_categories))\n\n\ndef run_jobs(worker_pool, pool_jobs, outfile_anchors, outfile_redirections, outfile_category_links):\n results = worker_pool.map(anchor_category_redirection_link_job, pool_jobs)\n for article_name, result in results:\n anchor_links, redirect_links, category_links = result\n for link in redirect_links:\n outfile_redirections.write(article_name + \"\\t\" + link + \"\\n\")\n for link in category_links:\n outfile_category_links.write(article_name + \"\\t\" + link + \"\\n\")\n if \":\" not in article_name:\n outfile_anchors.write(article_name + \"\\t\" + article_name + \"\\t\" + article_name + \"\\n\")\n for anchor, link in anchor_links:\n outfile_anchors.write(article_name + \"\\t\" + anchor + \"\\t\" + link + \"\\n\")\n\n\ndef parse_wiki(path,\n anchors_path,\n redirections_path,\n category_links_path,\n threads=1,\n max_jobs=10):\n t0 = time.time()\n jobs = []\n pool = Pool(processes=threads)\n try:\n with open(redirections_path, \"wt\") as fout_redirections, open(category_links_path, \"wt\") as fout_category_links, open(anchors_path, \"wt\") as fout_anchors:\n for article_name, lines in iterate_articles(path):\n jobs.append((article_name, lines))\n if len(jobs) >= max_jobs:\n run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links)\n jobs = []\n if len(jobs) > 0:\n run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links)\n jobs = []\n finally:\n pool.close()\n t1 = time.time()\n print(\"%.3fs elapsed.\" % (t1 - t0,))\n\n\ndef parse_args(argv=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"wiki\",\n help=\"Wikipedia dump file (xml).\")\n parser.add_argument(\"out_anchors\",\n help=\"File where anchor information should be saved (tsv).\")\n parser.add_argument(\"out_redirections\",\n help=\"File where redirection information should be saved (tsv).\")\n parser.add_argument(\"out_category_links\",\n help=\"File where category link information should be saved (tsv).\")\n\n def add_int_arg(name, default):\n parser.add_argument(\"--%s\" % (name,), type=int, default=default)\n\n add_int_arg(\"threads\", 8)\n add_int_arg(\"max_jobs\", 10000)\n return parser.parse_args(argv)\n\n\ndef main(argv=None):\n args = parse_args(argv)\n parse_wiki(\n path=args.wiki,\n anchors_path=args.out_anchors,\n redirections_path=args.out_redirections,\n category_links_path=args.out_category_links,\n threads=args.threads,\n max_jobs=args.max_jobs\n )\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":45,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nCreate a tsv file where where the first column is a token and second column\nis the QID (wikidata internal id for entities). This can then be used\nby evaluate_learnability or from training a type model.\n\nUsage\n-----\n\n```\npython3 produce_wikidata_tsv.py configs/en_export_config.json en_wikipedia.tsv\n```\n\nUse `--relative_to` argument to specify the base directory for relative paths in the\nconfig file.\n\n\"\"\"\nimport argparse\nimport re\nimport json\n\nfrom os.path import join, dirname, realpath, exists\n\nimport marisa_trie\nimport ciseau\nimport numpy as np\n\nfrom wikidata_linker_utils.wikipedia import (\n iterate_articles, induce_wikipedia_prefix, load_redirections,\n transition_trie_index\n)\nfrom wikidata_linker_utils.json import load_config\nfrom wikidata_linker_utils.offset_array import OffsetArray\nfrom wikidata_linker_utils.type_collection import TypeCollection\nfrom wikidata_linker_utils.anchor_filtering import acceptable_anchor, clean_up_trie_source\nfrom wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata\n\nSCRIPT_DIR = dirname(realpath(__file__))\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\")\n parser.add_argument(\"out\")\n parser.add_argument(\"--relative_to\", type=str, default=None)\n return parser.parse_args(args=args)\n\n\nlink_pattern = re.compile(r\"\\[\\[([^\\]\\[:]*)\\]\\]\")\nref_pattern = re.compile(r\"<ref[^<>]*>[^<]+</ref>\")\ndouble_bracket_pattern = re.compile(r\"{{[^{}]+}}\")\ntitle_pattern = re.compile(r\"==+([^=]+)==+\")\nbullet_point_pattern = re.compile(r\"^([*#])\", re.MULTILINE)\n\n\ndef merge_tags(words, tags, start_sent):\n out = [(w, []) for w in words]\n for tag_start, tag_end, tag in tags:\n so_far = start_sent\n for k, word in enumerate(words):\n begins = tag_start <= so_far or (tag_start > so_far and tag_start < so_far + len(word))\n ends = (so_far + len(word) <= tag_end) or (tag_end < so_far + len(word) and tag_end > so_far)\n if begins and ends:\n out[k][1].append(tag)\n so_far += len(word)\n if so_far >= tag_end:\n break\n return out\n\n\ndef pick_relevant_tags(tagged_sequence, char_offset, char_offset_end):\n relevant_tags = []\n for word, tags in tagged_sequence:\n if tags is not None:\n start, end, dest_index = tags\n if start >= char_offset and start < char_offset_end:\n relevant_tags.append((start, end, dest_index))\n if start >= char_offset_end:\n break\n return relevant_tags\n\n\ndef convert_document_to_labeled_tags(annotated, sentences):\n paragraphs = []\n paragraph = []\n char_offset = 0\n for sentence in sentences:\n sentence_length = sum(len(w) for w in sentence)\n sentence_tags = pick_relevant_tags(\n annotated,\n char_offset,\n char_offset + sentence_length\n )\n sentence_with_tags = merge_tags(\n sentence,\n sentence_tags,\n char_offset\n )\n sentence_with_tags = [\n (\n w,\n [tags[0]] if len(tags) > 0 else []\n ) for w, tags in sentence_with_tags\n ]\n if \"\\n\" in sentence[-1]:\n paragraph.extend(sentence_with_tags)\n paragraphs.append(paragraph)\n paragraph = []\n else:\n paragraph.extend(sentence_with_tags)\n char_offset += sentence_length\n if len(paragraph) > 0:\n paragraphs.append(paragraph)\n return paragraphs\n\n\ndef annotate_document(doc,\n collection,\n wiki_trie,\n anchor_trie,\n trie_index2indices,\n trie_index2indices_counts,\n trie_index2indices_transitions,\n redirections,\n prefix):\n out = []\n current_position = 0\n current_position_no_brackets = 0\n for match in re.finditer(link_pattern, doc):\n start = match.start()\n end = match.end()\n\n if current_position != start:\n out.append(\n (doc[current_position:start], None)\n )\n current_position_no_brackets += start - current_position\n current_position = end\n\n match_string = match.group(1).strip()\n if \"|\" in match_string:\n link, anchor = match_string.rsplit(\"|\", 1)\n link = link.strip().split(\"#\")[0]\n anchor = anchor\n anchor_stripped = anchor.strip()\n else:\n anchor = match_string\n anchor_stripped = match_string.strip()\n link = anchor_stripped\n\n if len(anchor) > 0 and len(link) > 0:\n anchor = clean_up_trie_source(anchor, lowercase=False)\n lowercase_anchor = anchor.lower()\n if acceptable_anchor(lowercase_anchor, anchor_trie):\n anchor_idx = anchor_trie[lowercase_anchor]\n dest_index = match_wikipedia_to_wikidata(link, wiki_trie, redirections, prefix)\n if dest_index is not None:\n all_options = trie_index2indices[anchor_idx]\n if len(all_options) > 0:\n if trie_index2indices_transitions is not None:\n dest_index = transition_trie_index(\n anchor_idx, dest_index,\n trie_index2indices_transitions,\n all_options\n )\n try:\n new_dest_index = dest_index\n keep = True\n\n if keep:\n out.append(\n (\n anchor,\n (\n current_position_no_brackets,\n current_position_no_brackets + len(anchor),\n collection.ids[new_dest_index]\n )\n )\n )\n current_position_no_brackets += len(anchor)\n continue\n except IndexError:\n # missing element\n pass\n current_position_no_brackets += len(anchor)\n out.append(\n (anchor, None)\n )\n\n if current_position != len(doc):\n out.append(\n (doc[current_position:len(doc)], None)\n )\n return out\n\n\ndef convert(article_name,\n doc,\n collection,\n wiki_trie,\n anchor_trie,\n trie_index2indices,\n trie_index2indices_counts,\n trie_index2indices_transitions,\n redirections,\n prefix):\n doc = doc.replace(\"\\t\", \" \")\n # remove ref tags:\n doc = re.sub(ref_pattern, \" \", doc)\n doc = re.sub(double_bracket_pattern, \" \", doc)\n doc = re.sub(title_pattern, r\"\\n\\n\\1\\. \", doc)\n doc = re.sub(bullet_point_pattern, r\"\\1 \", doc)\n\n article_index = match_wikipedia_to_wikidata(\n article_name, wiki_trie, redirections, prefix\n )\n # find location of tagged items in wikipedia:\n annotated = annotate_document(doc,\n collection,\n wiki_trie,\n anchor_trie,\n trie_index2indices,\n trie_index2indices_counts,\n trie_index2indices_transitions,\n redirections,\n prefix)\n text_without_brackets = \"\".join(text for text, _ in annotated)\n sentences = ciseau.sent_tokenize(\n text_without_brackets,\n normalize_ascii=False,\n keep_whitespace=True\n )\n return (\n convert_document_to_labeled_tags(\n annotated, sentences\n ),\n collection.ids[article_index] if article_index is not None else \"other\"\n )\n\n\n\ndef main():\n args = parse_args()\n config = load_config(\n args.config,\n [\"wiki\", \"language_path\", \"wikidata\", \"redirections\"],\n defaults={\n \"num_names_to_load\": 0,\n \"prefix\": None,\n \"sample_size\": 100\n },\n relative_to=args.relative_to\n )\n prefix = config.prefix or induce_wikipedia_prefix(config.wiki)\n\n collection = TypeCollection(\n config.wikidata,\n num_names_to_load=0\n )\n collection.load_blacklist(join(SCRIPT_DIR, \"blacklist.json\"))\n\n trie_index2indices = OffsetArray.load(\n join(config.language_path, \"trie_index2indices\"),\n compress=True\n )\n trie_index2indices_counts = OffsetArray(\n np.load(join(config.language_path, \"trie_index2indices_counts.npy\")),\n trie_index2indices.offsets\n )\n if exists(join(config.language_path, \"trie_index2indices_transition_values.npy\")):\n trie_index2indices_transitions = OffsetArray(\n np.load(join(config.language_path, \"trie_index2indices_transition_values.npy\")),\n np.load(join(config.language_path, \"trie_index2indices_transition_offsets.npy\")),\n )\n else:\n trie_index2indices_transitions = None\n\n anchor_trie = marisa_trie.Trie().load(join(config.language_path, \"trie.marisa\"))\n wiki_trie = marisa_trie.RecordTrie('i').load(\n join(config.wikidata, \"wikititle2wikidata.marisa\")\n )\n redirections = load_redirections(config.redirections)\n\n seen = 0\n with open(args.out, \"wt\") as fout:\n try:\n for article_name, article in iterate_articles(config.wiki):\n fixed_article, article_qid = convert(\n article_name,\n article,\n collection=collection,\n anchor_trie=anchor_trie,\n wiki_trie=wiki_trie,\n trie_index2indices=trie_index2indices,\n trie_index2indices_counts=trie_index2indices_counts,\n trie_index2indices_transitions=trie_index2indices_transitions,\n redirections=redirections,\n prefix=prefix)\n for paragraph in fixed_article:\n for word, qids in paragraph:\n if len(qids) > 0:\n fout.write(word.rstrip() + \"\\t\" + \"\\t\".join(qids + [article_qid]) + \"\\n\")\n else:\n fout.write(word.rstrip() + \"\\n\")\n fout.write(\"\\n\")\n seen += 1\n if seen >= config.sample_size:\n break\n finally:\n fout.flush()\n fout.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n"}}},{"rowIdx":46,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nPerform a reduction on the anchors to articles relation\nby finding different articles refering to the same item\nand making the anchor point to the most common version,\nor by using the wikidata graph to find instance of, and\nother parent-child relations that allow one article to\nencompass or be more generic than its co-triggerable\narticles.\n\nUsage:\n------\n\n```\nDATA_DIR=data/wikidata\nLANG_DIR=data/en_trie\nFIXED_LANG_DIR=data/en_trie_fixed\npython3 fast_link_fixer.py ${WIKIDATA_PATH} ${LANG_DIR} ${FIXED_LANG_DIR}\n```\n\"\"\"\nimport argparse\nimport time\nimport shutil\n\nfrom os.path import join, realpath, dirname\nfrom os import makedirs\n\nimport numpy as np\nimport marisa_trie\n\nfrom wikidata_linker_utils.type_collection import get_name, TypeCollection\nfrom wikidata_linker_utils.logic import logical_and, logical_ands, logical_not, logical_or, logical_ors\nfrom wikidata_linker_utils.progressbar import get_progress_bar\nfrom wikidata_linker_utils.offset_array import OffsetArray\nfrom wikidata_linker_utils.file import true_exists\nimport wikidata_linker_utils.wikidata_properties as wprop\n\nfrom wikidata_linker_utils.successor_mask import (\n related_promote_highest, extend_relations, reduce_values,\n remap_offset_array\n)\n\nSCRIPT_DIR = dirname(realpath(__file__))\n\nfrom numpy import logical_not, logical_or, logical_and\nfrom wikidata_linker_utils.logic import logical_ors\nIS_HISTORY = None\nIS_PEOPLE = None\nIS_BREED = None\nIS_PEOPLE_GROUP = None\nIS_LIST_ARTICLE = None\nIS_LANGUAGE_ALPHABET = None\nIS_SPORTS_TEAM = None\nIS_CARDINAL_DIRECTION = None\nIS_POLITICAL_PARTY = None\nIS_SOCIETY = None\nIS_POSITION = None\nIS_CHARACTER_HUMAN = None\nIS_POLITICAL_ORGANIZATION = None\nIS_LANDFORM = None\nIS_THING = None\nIS_BATTLE = None\nIS_EVENT = None\nIS_ACTIVITY = None\nIS_THOROUGHFARE = None\nIS_KINSHIP = None\nIS_EPISODE_LIST = None\n\ndef wkp(c, name):\n return c.article2id['enwiki/' + name][0][0]\n\ndef wkd(c, name):\n return c.name2index[name]\n\ndef initialize_globals(c):\n \"\"\"global variables that guide the metonymy/anaphora removal process.\"\"\"\n global IS_HISTORY\n global IS_PEOPLE\n global IS_PEOPLE_GROUP\n global IS_LIST_ARTICLE\n global IS_COUNTRY\n global IS_BREED\n global IS_EVENT_SPORT\n global IS_LANGUAGE_ALPHABET\n global IS_SPORTS_TEAM\n global IS_CARDINAL_DIRECTION\n global IS_ACTIVITY\n global IS_POLITICAL_PARTY\n global IS_SOCIETY\n global IS_BATTLE\n global IS_POSITION\n global IS_LANDFORM\n global IS_CHARACTER_HUMAN\n global IS_POLITICAL_ORGANIZATION\n global IS_THING\n global IS_THOROUGHFARE\n global IS_EVENT\n global IS_KINSHIP\n global IS_EPISODE_LIST\n PEOPLE = wkd(c, \"Q2472587\")\n NATIONALITY = wkd(c, \"Q231002\")\n ASPECT_OF_HIST = wkd(c, \"Q17524420\")\n HISTORY = wkd(c, \"Q309\")\n LIST_ARTICLE = wkd(c, \"Q13406463\")\n WAR = wkd(c, \"Q198\")\n COUNTRY = wkd(c, \"Q6256\")\n FORMER_COUNTRY = wkd(c, \"Q3024240\")\n DOMINION = wkd(c, \"Q223832\")\n LANGUAGE = wkd(c, \"Q34770\")\n ALPHABET = wkd(c, \"Q9779\")\n COLONY = wkd(c, \"Q133156\")\n GOVERNORATE = wkd(c, \"Q1798622\")\n SPORTS_TEAM = wkd(c, \"Q12973014\")\n ATHLETIC_CONFERENCE = wkd(c, \"Q2992826\")\n CARDINAL_DIRECTION = wkd(c, \"Q23718\")\n POLITICAL_PARTY = wkd(c, \"Q7278\")\n STATE = wkd(c, \"Q7275\")\n DYNASTY = wkd(c, \"Q164950\")\n SOCIETY = wkd(c, \"Q8425\")\n MENS_SINGLES = wkd(c, \"Q16893072\")\n SPORT = wkd(c, \"Q349\")\n POSITION = wkd(c, \"Q4164871\")\n HUMAN = wkd(c, \"Q5\")\n FICTIONAL_CHARACTER = wkd(c, \"Q95074\")\n BREED = wkd(c, \"Q38829\")\n ORTHOGRAPHY = wkd(c, \"Q43091\")\n POLITICAL_ORGANIZATION = wkd(c, \"Q7210356\")\n GROUP_OF_HUMANS = wkd(c, \"Q16334295\")\n LANDFORM = wkd(c, \"Q271669\")\n BATTLE = wkd(c, \"Q178561\")\n FOOD = wkd(c, \"Q2095\")\n DRINK = wkd(c, \"Q40050\")\n ANIMAL = wkd(c, \"Q16521\")\n WORK = wkd(c, \"Q386724\")\n AUTOMOBILE_MODEL = wkd(c, \"Q3231690\")\n GOOD = wkd(c, \"Q28877\")\n VEHICLE = wkd(c, \"Q42889\")\n PUBLICATION = wkd(c, \"Q732577\")\n AUDIOVISUAL = wkd(c, \"Q2431196\")\n TERRITORIAL_ENTITY = wkd(c, \"Q15642541\")\n GEOGRAPHIC_OBJECT = wkd(c, \"Q618123\")\n ASTRO_OBJECT = wkd(c, \"Q17444909\")\n EVENT_SPORTING = wkd(c, \"Q1656682\")\n EVENT_OCCURRENCE = wkd(c, \"Q1190554\")\n ELECTROMAGNETIC_SPECTRUM = wkd(c, \"Q133139\")\n MAGICAL_ORG = wkd(c, \"Q14946195\")\n AUTONOM_CHURCH = wkd(c, \"Q20871948\")\n SIGN = wkd(c, \"Q3695082\")\n FORM_OF_GOVERNMENT = wkd(c, \"Q1307214\")\n SPORTS_ORG = wkd(c, \"Q4438121\")\n RECURRING_SPORTING_EVENT = wkd(c, \"Q18608583\")\n CLASS_SCHEME = wkd(c, \"Q5962346\")\n STYLE = wkd(c, \"Q1292119\")\n SIGN_SYSTEM = wkd(c, \"Q7512598\")\n PHYSICAL_PHENOMENON = wkd(c, \"Q1293220\")\n LAW = wkd(c, \"Q7748\")\n WATERCOURSE = wkd(c, \"Q355304\")\n BODY_OF_WATER = wkd(c, \"Q15324\")\n CHEMICAL_SUBSTANCE = wkd(c, \"Q79529\")\n HISTORICAL_PERIOD = wkd(c, \"Q11514315\")\n ACTIVITY = wkd(c, \"Q815962\")\n THOROUGHFARE = wkd(c, \"Q83620\")\n KINSHIP = wkd(c, \"Q171318\")\n FICTIONAL_HUMAN = wkd(c, \"Q15632617\")\n EPISODE = wkd(c, \"Q1983062\")\n\n IS_CHARACTER_HUMAN = c.satisfy(\n [wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF],\n [HUMAN, FICTIONAL_HUMAN, FICTIONAL_CHARACTER]\n )\n # to be a history you must be an aspect of history\n # but not a history itself:\n IS_HISTORY = logical_and(\n c.satisfy([wprop.INSTANCE_OF], [ASPECT_OF_HIST]),\n logical_not(c.satisfy([wprop.INSTANCE_OF], [HISTORY]))\n )\n IS_PEOPLE = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [PEOPLE, NATIONALITY])\n IS_PEOPLE_GROUP = np.logical_or(\n IS_PEOPLE,\n c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [GROUP_OF_HUMANS, MAGICAL_ORG, AUTONOM_CHURCH])\n )\n IS_LIST_ARTICLE = c.satisfy([wprop.INSTANCE_OF], [LIST_ARTICLE])\n IS_LANGUAGE_ALPHABET = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF],\n [LANGUAGE, ALPHABET, ORTHOGRAPHY, SIGN_SYSTEM]\n )\n IS_COUNTRY = c.satisfy([wprop.INSTANCE_OF], [COUNTRY, FORMER_COUNTRY, DOMINION, COLONY, STATE, DYNASTY, GOVERNORATE])\n IS_SPORTS_TEAM = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.PART_OF], [SPORTS_TEAM, ATHLETIC_CONFERENCE, SPORTS_ORG, RECURRING_SPORTING_EVENT])\n IS_CARDINAL_DIRECTION = c.satisfy([wprop.INSTANCE_OF], [CARDINAL_DIRECTION])\n IS_POLITICAL_PARTY = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POLITICAL_PARTY])\n IS_SOCIETY = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [SOCIETY, HISTORICAL_PERIOD])\n IS_POSITION = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POSITION])\n IS_BREED = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [BREED])\n IS_POLITICAL_ORGANIZATION = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POLITICAL_ORGANIZATION, FORM_OF_GOVERNMENT])\n IS_LANDFORM = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [LANDFORM, TERRITORIAL_ENTITY, GEOGRAPHIC_OBJECT, ASTRO_OBJECT, WATERCOURSE, BODY_OF_WATER])\n IS_EVENT_SPORT = c.satisfy([wprop.SUBCLASS_OF, wprop.PART_OF, wprop.INSTANCE_OF], [EVENT_SPORTING, SPORT])\n IS_THING = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF],\n [\n AUTOMOBILE_MODEL,\n FOOD,\n DRINK,\n STYLE,\n ANIMAL,\n GOOD,\n LAW,\n CHEMICAL_SUBSTANCE,\n SIGN,\n VEHICLE,\n PHYSICAL_PHENOMENON,\n PUBLICATION,\n AUDIOVISUAL,\n CLASS_SCHEME,\n WORK,\n ELECTROMAGNETIC_SPECTRUM\n ]\n )\n IS_THOROUGHFARE = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [THOROUGHFARE])\n IS_ACTIVITY = c.satisfy([wprop.INSTANCE_OF], [ACTIVITY])\n IS_EVENT = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [EVENT_OCCURRENCE])\n IS_BATTLE = c.satisfy([wprop.SUBCLASS_OF, wprop.INSTANCE_OF], [BATTLE])\n IS_KINSHIP = c.satisfy([wprop.INSTANCE_OF], [KINSHIP])\n IS_EPISODE_LIST = c.satisfy([wprop.IS_A_LIST_OF], [EPISODE])\n\n\ndef get_relation_data(collection, relation_paths):\n \"\"\"Prepare relations for usage inside extend_relations.\"\"\"\n out = []\n for path in relation_paths:\n promote = path.get(\"promote\", False)\n numpy_path = []\n for step in path[\"steps\"]:\n if isinstance(step, str):\n step_name, max_usage = step, 1\n else:\n step_name, max_usage = step\n relation = collection.relation(step_name)\n numpy_path.append((relation.offsets, relation.values, max_usage))\n inv_relation = collection.get_inverted_relation(step_name).edges() > 0\n out.append((numpy_path, inv_relation, promote))\n return out\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"wikidata\")\n parser.add_argument(\"language_path\")\n parser.add_argument(\"new_language_path\")\n parser.add_argument(\"--steps\", type=int, default=3,\n help=\"how many time should fixing be recursed (takes \"\n \"about 2mn per step. Has diminishing returns).\")\n return parser.parse_args()\n\n\ndef get_trie_properties(trie, offsets, values):\n \"\"\"Obtain the length of every trigger in the trie.\"\"\"\n anchor_length = np.zeros(len(values), dtype=np.int32)\n start, end = 0, 0\n for idx, key in enumerate(trie.iterkeys()):\n end = offsets[idx]\n anchor_length[start:end] = len(key)\n start = end\n return anchor_length\n\n\ndef fix(collection,\n offsets,\n values,\n counts,\n anchor_length,\n num_category_link=8,\n keep_min=5):\n relations_that_can_extend = [\n {\"steps\": [wprop.INSTANCE_OF]},\n {\"steps\": [wprop.INSTANCE_OF, (wprop.SUBCLASS_OF, 2)]},\n {\"steps\": [wprop.INSTANCE_OF, wprop.FACET_OF]},\n {\"steps\": [(wprop.SUBCLASS_OF, 3)]},\n {\"steps\": [wprop.OCCUPATION], \"promote\": True},\n {\"steps\": [wprop.POSITION_HELD], \"promote\": True},\n {\"steps\": [wprop.PART_OF, wprop.INSTANCE_OF]},\n {\"steps\": [wprop.SERIES, wprop.INSTANCE_OF]},\n {\"steps\": [wprop.SERIES, wprop.LOCATION]},\n {\"steps\": [wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY]},\n {\"steps\": [wprop.COUNTRY]},\n {\"steps\": [wprop.CATEGORY_LINK, wprop.CATEGORYS_MAIN_TOPIC]},\n {\"steps\": [(wprop.CATEGORY_LINK, num_category_link), wprop.FIXED_POINTS]},\n {\"steps\": [wprop.CATEGORY_LINK, wprop.FIXED_POINTS, wprop.IS_A_LIST_OF]},\n {\"steps\": [wprop.IS_A_LIST_OF, (wprop.SUBCLASS_OF, 2)]}\n ]\n relation_data = get_relation_data(collection, relations_that_can_extend)\n new_values = values\n # get rid of History of BLAH where link also points to BLAH:\n\n is_history = IS_HISTORY[new_values]\n is_people_mask = IS_PEOPLE[new_values]\n is_list = IS_LIST_ARTICLE[new_values]\n new_values = related_promote_highest(\n new_values,\n offsets,\n counts,\n condition=is_history,\n alternative=is_people_mask,\n keep_min=keep_min\n )\n unchanged = values == new_values\n is_not_history_or_list = logical_and(\n logical_not(is_history), logical_not(is_list)\n )\n new_values = related_promote_highest(\n new_values,\n offsets,\n counts,\n condition=logical_and(is_history, unchanged),\n alternative=is_not_history_or_list,\n keep_min=keep_min\n )\n\n is_sport_or_thoroughfare = logical_or(\n IS_EVENT_SPORT, IS_THOROUGHFARE\n )[new_values]\n\n # delete these references:\n new_values[anchor_length < 2] = -1\n # get rid of shorthand for sports:\n new_values[logical_and(is_sport_or_thoroughfare, anchor_length <= 2)] = -1\n # remove lists of episodes:\n is_episode_list = IS_EPISODE_LIST[new_values]\n new_values[is_episode_list] = -1\n\n # get rid of \"car\" -> \"Renault Megane\", when \"car\" -> \"Car\",\n # and \"Renault Megane\" is instance of \"Car\":\n is_not_people = logical_not(IS_PEOPLE)[new_values]\n new_values = extend_relations(\n relation_data,\n new_values,\n offsets,\n counts,\n alternative=is_not_people,\n pbar=get_progress_bar(\"extend_relations\", max_value=len(offsets), item=\"links\"),\n keep_min=keep_min\n )\n unchanged = values == new_values\n # remove all non-modified values that are\n # not instances of anything, nor subclasses of anything:\n new_values[logical_ands(\n [\n logical_ands([\n collection.relation(wprop.INSTANCE_OF).edges() == 0,\n collection.relation(wprop.SUBCLASS_OF).edges() == 0,\n collection.relation(wprop.PART_OF).edges() == 0,\n collection.relation(wprop.CATEGORY_LINK).edges() == 0\n ])[new_values],\n unchanged\n ])] = -1\n\n is_kinship = IS_KINSHIP[new_values]\n is_human = IS_CHARACTER_HUMAN[new_values]\n new_values = related_promote_highest(\n new_values,\n offsets,\n counts,\n condition=is_human,\n alternative=is_kinship,\n keep_min=keep_min\n )\n\n # replace elements by a country\n # if a better alternative is present,\n # counts is less than 100:\n should_replace_by_country = logical_ands(\n [\n logical_not(\n logical_ors([\n IS_POLITICAL_ORGANIZATION,\n IS_CARDINAL_DIRECTION,\n IS_LANGUAGE_ALPHABET,\n IS_COUNTRY,\n IS_PEOPLE_GROUP,\n IS_BREED,\n IS_BATTLE,\n IS_SOCIETY,\n IS_POSITION,\n IS_POLITICAL_PARTY,\n IS_SPORTS_TEAM,\n IS_CHARACTER_HUMAN,\n IS_LANDFORM,\n IS_ACTIVITY\n ])\n )[new_values],\n counts < 100\n ]\n )\n\n # turn this into a promote highest in this order:\n is_country_or_cardinal = [\n IS_CARDINAL_DIRECTION,\n IS_COUNTRY,\n IS_POLITICAL_ORGANIZATION\n ]\n for i, alternative in enumerate(is_country_or_cardinal):\n unchanged = values == new_values\n should_replace_by_country = logical_and(\n should_replace_by_country, unchanged\n )\n new_values = related_promote_highest(\n new_values,\n offsets,\n counts,\n condition=should_replace_by_country,\n alternative=alternative[new_values],\n keep_min=keep_min\n )\n\n new_offsets, new_values, new_counts, location_shift = reduce_values(\n offsets, new_values, counts)\n\n return (new_offsets, new_values, new_counts), location_shift\n\n\ndef filter_trie(trie, values):\n return marisa_trie.Trie((trie.restore_key(value) for value in values))\n\n\ndef remap_trie_offset_array(old_trie, new_trie, offsets_values_counts):\n mapping = np.zeros(len(new_trie), dtype=np.int32)\n t0 = time.time()\n for new_index in range(len(new_trie)):\n mapping[new_index] = old_trie[new_trie.restore_key(new_index)]\n t1 = time.time()\n print(\"Got mapping from old trie to new trie in %.3fs\" % (t1 - t0,))\n ported = []\n for offsets, values, counts in offsets_values_counts:\n new_offsets, new_values, new_counts = remap_offset_array(\n mapping, offsets, values, counts\n )\n ported.append((new_offsets, new_values, new_counts))\n t2 = time.time()\n print(\"Ported counts and values across tries in %.3fs\" % (t2 - t1,))\n return ported\n\n\ndef main():\n args = parse_args()\n if args.new_language_path == args.language_path:\n raise ValueError(\"new_language_path and language_path must be \"\n \"different: cannot generate a fixed trie in \"\n \"the same directory as the original trie.\")\n\n c = TypeCollection(args.wikidata, num_names_to_load=0)\n c.load_blacklist(join(SCRIPT_DIR, \"blacklist.json\"))\n original_values = np.load(\n join(args.language_path, \"trie_index2indices_values.npy\"))\n original_offsets = np.load(\n join(args.language_path, \"trie_index2indices_offsets.npy\"))\n original_counts = np.load(\n join(args.language_path, \"trie_index2indices_counts.npy\"))\n original_trie_path = join(args.language_path, 'trie.marisa')\n trie = marisa_trie.Trie().load(original_trie_path)\n initialize_globals(c)\n t0 = time.time()\n\n old_location_shift = None\n values, offsets, counts = original_values, original_offsets, original_counts\n for step in range(args.steps):\n anchor_length = get_trie_properties(trie, offsets, values)\n (offsets, values, counts), location_shift = fix(\n collection=c,\n offsets=offsets,\n values=values,\n counts=counts,\n anchor_length=anchor_length,\n num_category_link=8\n )\n if old_location_shift is not None:\n # see where newly shifted values are now pointing\n # to (extra indirection level):\n location_shift = location_shift[old_location_shift]\n location_shift[old_location_shift == -1] = -1\n old_location_shift = location_shift\n pre_reduced_values = values[location_shift]\n pre_reduced_values[location_shift == -1] = -1\n num_changes = int((pre_reduced_values != original_values).sum())\n change_volume = int((original_counts[pre_reduced_values != original_values].sum()))\n print(\"step %d with %d changes, %d total links\" % (\n step, num_changes, change_volume)\n )\n pre_reduced_values = values[location_shift]\n pre_reduced_values[location_shift == -1] = -1\n t1 = time.time()\n num_changes = int((pre_reduced_values != original_values).sum())\n print(\"Done with link fixing in %.3fs, with %d changes.\" % (\n t1 - t0, num_changes)\n )\n\n # show some remappings:\n np.random.seed(1234)\n num_samples = 10\n samples = np.random.choice(\n np.where(\n np.logical_and(\n np.logical_and(\n pre_reduced_values != original_values,\n pre_reduced_values != -1\n ),\n original_values != -1\n )\n )[0],\n size=num_samples,\n replace=False\n )\n print(\"Sample fixes:\")\n for index in samples:\n print(\" %r (%d) -> %r (%d)\" % (\n c.get_name(int(original_values[index])),\n int(original_values[index]),\n c.get_name(int(pre_reduced_values[index])),\n int(pre_reduced_values[index])\n )\n )\n print(\"\")\n\n samples = np.random.choice(\n np.where(\n OffsetArray(values, offsets).edges() == 0\n )[0],\n size=num_samples,\n replace=False\n )\n print(\"Sample deletions:\")\n for index in samples:\n print(\" %r\" % (trie.restore_key(int(index))))\n\n # prune out anchors where there are no more linked items:\n print(\"Removing empty anchors from trie...\")\n t0 = time.time()\n non_empty_offsets = np.where(\n OffsetArray(values, offsets).edges() != 0\n )[0]\n fixed_trie = filter_trie(trie, non_empty_offsets)\n\n contexts_found = true_exists(\n join(args.language_path, \"trie_index2contexts_values.npy\")\n )\n if contexts_found:\n contexts_values = np.load(\n join(args.language_path, \"trie_index2contexts_values.npy\"))\n contexts_offsets = np.load(\n join(args.language_path, \"trie_index2contexts_offsets.npy\"))\n contexts_counts = np.load(\n join(args.language_path, \"trie_index2contexts_counts.npy\"))\n\n to_port = [\n (offsets, values, counts),\n (original_offsets, pre_reduced_values, original_values)\n ]\n if contexts_found:\n to_port.append(\n (contexts_offsets, contexts_values, contexts_counts)\n )\n\n ported = remap_trie_offset_array(trie, fixed_trie, to_port)\n offsets, values, counts = ported[0]\n original_offsets, pre_reduced_values, original_values = ported[1]\n t1 = time.time()\n print(\"Removed %d empty anchors from trie in %.3fs\" % (\n len(trie) - len(fixed_trie), t1 - t0,)\n )\n\n print(\"Saving...\")\n makedirs(args.new_language_path, exist_ok=True)\n\n np.save(join(args.new_language_path, \"trie_index2indices_values.npy\"),\n values)\n np.save(join(args.new_language_path, \"trie_index2indices_offsets.npy\"),\n offsets)\n np.save(join(args.new_language_path, \"trie_index2indices_counts.npy\"),\n counts)\n if contexts_found:\n contexts_offsets, contexts_values, contexts_counts = ported[2]\n np.save(join(args.new_language_path, \"trie_index2contexts_values.npy\"),\n contexts_values)\n np.save(join(args.new_language_path, \"trie_index2contexts_offsets.npy\"),\n contexts_offsets)\n np.save(join(args.new_language_path, \"trie_index2contexts_counts.npy\"),\n contexts_counts)\n new_trie_path = join(args.new_language_path, 'trie.marisa')\n fixed_trie.save(new_trie_path)\n\n transition = np.vstack([original_values, pre_reduced_values]).T\n np.save(join(args.new_language_path, \"trie_index2indices_transition_values.npy\"),\n transition)\n np.save(join(args.new_language_path, \"trie_index2indices_transition_offsets.npy\"),\n original_offsets)\n print(\"Done.\")\n\nif __name__ == \"__main__\":\n main()\n"}}},{"rowIdx":47,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nObtain a coarse-grained classification of places and entities according to their associated\ncontinent/country.\n\"\"\"\nfrom numpy import (\n logical_and, logical_or, logical_not, logical_xor, where\n)\nfrom wikidata_linker_utils.logic import logical_negate\nimport wikidata_linker_utils.wikidata_properties as wprop\n\n\ndef wkp(c, name):\n \"\"\"Convert a string wikipedia article name to its Wikidata index.\"\"\"\n return c.article2id[\"enwiki/\" + name][0][0]\n\n\ndef wkd(c, name):\n \"\"\"Convert a wikidata QID to its wikidata index.\"\"\"\n return c.name2index[name]\n\n\ndef classify(c):\n EUROPE = wkp(c, 'Europe')\n AFRICA = wkp(c, 'Africa')\n ASIA = wkp(c, 'Asia')\n NORTH_AMERICA = wkp(c, 'North America')\n SOUTH_AMERICA = wkp(c, 'South America')\n OCEANIA = wkp(c, 'Oceania')\n ANTARCTICA = wkp(c, 'Antarctica')\n CONTINENT = wkp(c, wprop.CONTINENT)\n OUTERSPACE = wkp(c, 'Astronomical object')\n EARTH = wkp(c, \"Earth\")\n GEOGRAPHIC_LOCATION = wkd(c, \"Q2221906\")\n POPULATED_PLACE = wkd(c, 'Q486972')\n\n MIDDLE_EAST = [\n wkp(c, \"Bahrain\"),\n wkp(c, \"Cyprus\"),\n wkp(c, \"Turkish\"),\n wkp(c, \"Egypt\"),\n wkp(c, \"Iran\"),\n wkp(c, \"Iraq\"),\n wkp(c, \"Kurdish\"),\n wkp(c, \"Israel\"),\n wkp(c, \"Arabic\"),\n wkp(c, \"Jordan\"),\n wkp(c, \"Kuwait\"),\n wkp(c, \"Lebanon\"),\n wkp(c, \"Oman\"),\n wkp(c, \"Palestine\"),\n wkp(c, \"Jordanian\"),\n wkp(c, \"Qatar\"),\n wkp(c, \"Saudi Arabia\"),\n wkp(c, \"Syria\"),\n wkp(c, \"Turkey\"),\n wkp(c, \"United Arab Emirates\"),\n wkp(c, \"Yemen\")\n ]\n\n\n TRAVERSIBLE = [\n wprop.INSTANCE_OF,\n wprop.SUBCLASS_OF,\n wprop.CONTINENT,\n wprop.PART_OF,\n wprop.COUNTRY_OF_CITIZENSHIP,\n wprop.COUNTRY,\n wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY\n ]\n # c.describe_connection(\"Q55\", \"North America\", TRAVERSIBLE)\n # return {}\n print(\"is_in_middle_east\")\n is_in_middle_east = c.satisfy(TRAVERSIBLE, MIDDLE_EAST)\n print(\"is_in_europe\")\n is_in_europe = c.satisfy(TRAVERSIBLE, [EUROPE])\n is_in_europe_only = logical_negate(is_in_europe, [is_in_middle_east])\n print(\"is_in_asia\")\n is_in_asia = c.satisfy(TRAVERSIBLE, [ASIA])\n is_in_asia_only = logical_negate(is_in_asia, [is_in_europe, is_in_middle_east])\n print(\"is_in_africa\")\n is_in_africa = c.satisfy(TRAVERSIBLE, [AFRICA])\n is_in_africa_only = logical_negate(is_in_africa, [is_in_europe, is_in_asia, is_in_middle_east])\n print(\"is_in_north_america\")\n is_in_north_america = c.satisfy(TRAVERSIBLE, [NORTH_AMERICA])\n is_in_north_america_only = logical_negate(is_in_north_america, [is_in_europe, is_in_asia, is_in_middle_east])\n print(\"is_in_south_america\")\n is_in_south_america = c.satisfy(TRAVERSIBLE, [SOUTH_AMERICA])\n print(\"is_in_antarctica\")\n is_in_antarctica = c.satisfy(TRAVERSIBLE, [ANTARCTICA])\n is_in_antarctica_only = logical_negate(is_in_antarctica, [is_in_europe, is_in_north_america, is_in_asia, is_in_middle_east])\n print(\"is_in_oceania\")\n is_in_oceania = c.satisfy(TRAVERSIBLE, [OCEANIA])\n is_in_oceania_only = logical_negate(is_in_oceania, [is_in_europe, is_in_north_america, is_in_asia, is_in_middle_east])\n print(\"is_in_outer_space\")\n is_in_outer_space = c.satisfy(TRAVERSIBLE, [OUTERSPACE])\n print(\"part_of_earth\")\n part_of_earth = c.satisfy(\n [wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF],\n [GEOGRAPHIC_LOCATION, EARTH]\n )\n print(\"is_in_outer_space_not_earth\")\n is_in_outer_space_not_earth = logical_negate(\n is_in_outer_space, [part_of_earth]\n )\n print(\"is_a_populated_place\")\n is_populated_place = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POPULATED_PLACE])\n is_unlocalized_populated_place = logical_negate( is_populated_place, [is_in_europe, is_in_asia, is_in_antarctica, is_in_oceania, is_in_outer_space, is_in_south_america, is_in_north_america])\n\n return {\n \"europe\": is_in_europe_only,\n \"asia\": is_in_asia_only,\n \"africa\": is_in_africa_only,\n \"middle_east\": is_in_middle_east,\n \"north_america\": is_in_north_america_only,\n \"south_america\": is_in_south_america,\n \"antarctica\": is_in_antarctica_only,\n \"oceania\": is_in_oceania_only,\n \"outer_space\": is_in_outer_space_not_earth,\n # \"populated_space\": is_populated_place,\n \"populated_place_unlocalized\": is_unlocalized_populated_place\n }\n"}}},{"rowIdx":48,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nObtain a finer-grained classification of places and entities according to their associated\ncountry/region.\n\"\"\"\nfrom numpy import (\n logical_and, logical_or, logical_not, logical_xor, where\n)\nfrom wikidata_linker_utils.logic import logical_negate, logical_ors\nimport wikidata_linker_utils.wikidata_properties as wprop\n\n\ndef wkp(c, name):\n \"\"\"Convert a string wikipedia article name to its Wikidata index.\"\"\"\n return c.article2id[\"enwiki/\" + name][0][0]\n\ndef wkd(c, name):\n \"\"\"Convert a wikidata QID to its wikidata index.\"\"\"\n return c.name2index[name]\n\n\ndef classify(c):\n TRAVERSIBLE_BASIC = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF]\n TRAVERSIBLE_COUNTRY = [\n wprop.INSTANCE_OF,\n wprop.SUBCLASS_OF,\n wprop.COUNTRY_OF_CITIZENSHIP,\n wprop.COUNTRY,\n wprop.LOCATION,\n wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY\n ]\n TRAVERSIBLE_PART_OF = [\n wprop.INSTANCE_OF,\n wprop.SUBCLASS_OF,\n wprop.CONTINENT,\n wprop.PART_OF,\n wprop.COUNTRY_OF_CITIZENSHIP,\n wprop.COUNTRY,\n wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY\n ]\n TRAVERSIBLE_TOPIC = [\n wprop.INSTANCE_OF, wprop.SUBCLASS_OF,\n wprop.STUDIES, wprop.FIELD_OF_THIS_OCCUPATION, wprop.OCCUPATION,\n wprop.FIELD_OF_WORK, wprop.INDUSTRY]\n\n ASSOCIATION_FOOTBALL_PLAYER = wkd(c,\"Q937857\")\n PAINTER = wkd(c,\"Q1028181\")\n POLITICIAN = wkd(c,\"Q82955\")\n ARTICLE = wkd(c,\"Q191067\")\n VIDEO_GAME = wkd(c,\"Q7889\")\n FILM = wkd(c,\"Q11424\")\n FICTIONAL_CHARACTER = wkd(c,\"Q95074\")\n POEM = wkd(c,\"Q482\")\n BOOK = wkd(c,\"Q571\")\n DISEASE = wkd(c,\"Q12136\")\n PAINTING = wkd(c,\"Q3305213\")\n VISUAL_ART_WORK = wkd(c,\"Q4502142\")\n MUSIC_WORK = wkd(c,\"Q2188189\")\n SCIENTIFIC_ARTICLE = wkd(c,\"Q13442814\")\n PROTEIN_FAMILY = wkd(c,\"Q417841\")\n PROTEIN_COMPLEX = wkd(c,\"Q420927\")\n GENE = wkd(c,\"Q7187\")\n CHEMICAL_SUBSTANCE = wkd(c,\"Q79529\")\n PROTEIN = wkd(c,\"Q8054\")\n TAXON = wkd(c,\"Q16521\")\n PHYSICAL_OBJECT = wkd(c,\"Q223557\")\n OUTERSPACE = wkp(c, 'Astronomical object')\n #INTERNATIONAL_ORGANISATION = wkd(c,\"\")\n HUMAN = wkp(c,\"Human\")\n HUMAN_SETTLMENT = wkd(c,\"Q486972\")\n DICTIONARY = wkd(c,\"Q23622\")\n ABRREVIATION = wkd(c,\"Q102786\")\n POPULATED_PLACE = wkd(c,\"Q486972\")\n TERRITORIAL_ENTITY = wkd(c, \"Q1496967\")\n DESA = wkd(c,\"Q26211545\")\n TOWN_IN_CHINA = wkd(c,\"Q735428\")\n ADMIN_DIVISION_CHINA = wkd(c,\"Q50231\")\n COUNTRY = wkd(c,\"Q6256\")\n MOUNTAIN_RANGE = wkd(c,\"Q46831\")\n EARTH = wkp(c, \"Earth\")\n GEOGRAPHIC_LOCATION = wkd(c, \"Q2221906\")\n\n is_politician = c.satisfy([wprop.OCCUPATION], [POLITICIAN])\n is_painter = c.satisfy([wprop.OCCUPATION], [PAINTER])\n is_association_football_player = c.satisfy([wprop.OCCUPATION],[ASSOCIATION_FOOTBALL_PLAYER])\n\n is_populated_place = c.satisfy(\n [wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP,\n wprop.COUNTRY, wprop.SUBCLASS_OF],\n [GEOGRAPHIC_LOCATION, EARTH, HUMAN_SETTLMENT])\n\n is_taxon = c.satisfy(\n [wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF],\n [TAXON])\n is_other_wkd= c.satisfy(\n [wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF],\n [GENE, CHEMICAL_SUBSTANCE, SCIENTIFIC_ARTICLE,\n PROTEIN, DISEASE, PROTEIN_FAMILY,PROTEIN_COMPLEX,\n BOOK, MUSIC_WORK, PAINTING, VISUAL_ART_WORK, POEM, FILM,\n FICTIONAL_CHARACTER,VIDEO_GAME,SCIENTIFIC_ARTICLE,ARTICLE])\n is_gene_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Genes\")], max_steps=5)\n is_chromosome_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Chromosomes\")], max_steps=5)\n is_protein_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Proteins\")], max_steps=5)\n is_other= logical_ors([is_other_wkd, is_gene_wkp, is_chromosome_wkp,\n is_protein_wkp ])\n\n\n\n\n print(\"WIKI Links\")\n WIKIPEDIA_DISAMBIGUATION_PAGE = wkd(c,\"Q4167410\")\n SCIENTIFIC_JOURNAL = wkd(c,\"Q5633421\")\n SURNAME = wkd(c,\"Q101352\")\n WIKI_NEWS_ARTICLE = wkd(c,\"Q17633526\")\n WIKIMEDIA_CATEGORY = wkd(c,\"Q4167836\")\n WIKIPEDIA_TEMPLATE_NAMESPACE = wkd(c,\"Q11266439\")\n WIKIPEDIA_LIST = wkd(c,\"Q13406463\")\n ENCYCLOPEDIA_ARTICLE = wkd(c,\"Q17329259\")\n WIKIMEDIA_PROJECT_PAGE = wkd(c,\"Q14204246\")\n RURAL_COMUNE_VIETNAM = wkd(c,\"Q2389082\")\n TERRITORIAL_ENTITY = wkd(c,\"Q1496967\")\n is_Wiki_Links = c.satisfy(TRAVERSIBLE_TOPIC,\n [WIKIPEDIA_DISAMBIGUATION_PAGE,\n SURNAME,\n WIKIMEDIA_CATEGORY,\n WIKIPEDIA_TEMPLATE_NAMESPACE,\n WIKIPEDIA_LIST,\n ENCYCLOPEDIA_ARTICLE,\n WIKIMEDIA_PROJECT_PAGE,\n WIKI_NEWS_ARTICLE\n ])\n\n\n print(\"is_in_outer_space\")\n is_in_outer_space = c.satisfy(TRAVERSIBLE_PART_OF, [OUTERSPACE])\n print(\"part_of_earth\")\n part_of_earth = c.satisfy(\n [wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF, wprop.LOCATION],\n [GEOGRAPHIC_LOCATION, EARTH])\n print(\"is_in_outer_space_not_earth\")\n is_in_outer_space_not_earth = logical_negate(\n is_in_outer_space, [part_of_earth])\n\n\n print(\"African countries\")\n ALGERIA = wkp(c,\"Algeria\")\n ANGOLA = wkp(c,\"Angola\")\n BENIN = wkp(c,\"Benin\")\n BOTSWANA = wkd(c,\"Q963\")\n BURKINA_FASO = wkd(c,\"Q965\")\n BURUNDI = wkd(c,\"Q967\")\n CAMEROON = wkd(c,\"Q1009\")\n CAPE_VERDE = wkd(c,\"Q1011\")\n CHAD = wkd(c,\"Q657\")\n CENTRAL_AFRICAN_REPUBLIC = wkd(c,\"Q929\")\n COMOROS = wkd(c,\"Q970\")\n DEMOCRATIC_REPUBLIC_OF_CONGO = wkd(c,\"Q974\")\n REPUBLIC_OF_CONGO = wkd(c,\"Q971\")\n DJIBOUTI = wkd(c,\"Q977\")\n EGYPT = wkd(c,\"Q79\")\n RASHIDUN_CALIPHATE = wkd(c,\"Q12490507\")\n EQUATORIAL_GUINEA = wkd(c,\"Q983\")\n ERITREA = wkd(c,\"Q986\")\n ETHIOPIA = wkd(c,\"Q115\")\n GABON = wkd(c,\"Q1000\")\n THE_GAMBIA = wkd(c,\"Q1005\")\n GHANA = wkd(c,\"Q117\")\n GUINEA = wkd(c,\"Q1006\")\n GUINEA_BISSAU = wkd(c,\"Q1007\")\n IVORY_COAST = wkd(c,\"Q1008\")\n KENYA = wkd(c,\"Q114\")\n LESOTHO = wkd(c,\"Q1013\")\n LIBERIA = wkd(c,\"Q1014\")\n LIBYA = wkd(c,\"Q1016\")\n MADAGASCAR = wkd(c,\"Q1019\")\n MALAWI = wkd(c,\"Q1020\")\n MALI = wkd(c,\"Q912\")\n MAURITANIA = wkd(c,\"Q1025\")\n MAURITIUS = wkd(c,\"Q1027\")\n MOROCCO = wkd(c,\"Q1028\")\n MOZAMBIQUE = wkd(c,\"Q1029\")\n NAMIBIA = wkd(c,\"Q1030\")\n NIGER = wkd(c,\"Q1032\")\n NIGERIA = wkd(c,\"Q1033\")\n RWANDA = wkd(c,\"Q1037\")\n SAHARI_ARAB_DEOMOCRATIC_REPUBLIC = wkd(c,\"Q40362\")\n SAO_TOME_AND_PRINCIPE= wkd(c,\"Q1039\")\n SENEGAL = wkd(c,\"Q1041\")\n SEYCHELLES = wkd(c,\"Q1042\")\n SIERRA_LEONE = wkd(c,\"Q1044\")\n SOMALIA = wkd(c,\"Q1045\")\n SOUTH_AFRICA = wkd(c,\"Q258\")\n SOUTHSUDAN = wkd(c,\"Q958\")\n SUDAN = wkd(c,\"Q1049\")\n SWAZILAND= wkd(c,\"Q1050\")\n TANZANIA = wkd(c,\"Q924\")\n TOGO = wkd(c,\"Q945\")\n TUNISIA= wkd(c,\"Q948\")\n UGANDA = wkd(c,\"Q1036\")\n WESTERN_SAHARA = wkd(c,\"Q6250\")\n ZAMBIA = wkd(c,\"Q953\")\n ZIMBABWE = wkd(c,\"Q954\")\n SOMALI_LAND = wkd(c,\"Q34754\")\n\n\n in_algeria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ALGERIA])\n in_algeria_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Algeria stubs\")], max_steps=4)\n in_algeria_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Politics of Algeria\")], max_steps=3)\n in_algeria_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Roads in Algeria\")], max_steps=3)\n in_algeria = logical_ors([in_algeria_wkd, in_algeria_stubs, in_algeria_politics, in_algeria_roads])\n in_angola_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ANGOLA])\n in_angola_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Angola stubs\")], max_steps=4)\n in_angola_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Politics of Angola\")], max_steps=3)\n in_angola_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Roads in Angola\")], max_steps=3)\n in_angola = logical_ors([in_angola_wkd , in_angola_stubs, in_angola_politics, in_angola_roads])\n in_benin_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BENIN])\n in_benin_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Benin stubs\")], max_steps=4)\n in_benin_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Politics of Benin\")], max_steps=3)\n in_benin_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Roads in Benin\")], max_steps=3)\n in_benin = logical_ors([in_benin_wkd, in_benin_stubs, in_benin_politics, in_benin_roads])\n in_botswana_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BOTSWANA])\n in_botswana_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Botswana stubs\")], max_steps=4)\n in_botswana_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Politics of Botswana\")], max_steps=3)\n in_botswana_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Roads in Botswana\")], max_steps=3)\n in_botswana = logical_ors([in_botswana_wkd, in_botswana_stubs, in_botswana_politics,in_botswana_roads])\n in_burkina_faso_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BURKINA_FASO])\n in_bburkina_faso_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Burkina Faso stubs\")], max_steps=4)\n in_bburkina_faso_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Politics of Botswana\")], max_steps=3)\n in_burkina_faso = logical_ors([in_burkina_faso_wkd , in_botswana_stubs, in_botswana_politics])\n in_burundi_politics_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Politics of Burkina Faso\")], max_steps=4)\n in_burundi_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BURUNDI])\n in_burundi = logical_ors([in_burundi_wkd,in_burundi_politics_wkp])\n in_cameroon = c.satisfy(TRAVERSIBLE_COUNTRY, [CAMEROON])\n in_cape_verde= c.satisfy(TRAVERSIBLE_COUNTRY, [CAPE_VERDE])\n in_chad = c.satisfy(TRAVERSIBLE_COUNTRY, [CHAD])\n in_central_african_republic = c.satisfy(TRAVERSIBLE_COUNTRY, [CENTRAL_AFRICAN_REPUBLIC])\n in_comoros = c.satisfy(TRAVERSIBLE_COUNTRY, [COMOROS])\n in_democratic_republic_congo = c.satisfy(TRAVERSIBLE_COUNTRY, [DEMOCRATIC_REPUBLIC_OF_CONGO])\n in_republic_of_congo = c.satisfy(TRAVERSIBLE_COUNTRY, [REPUBLIC_OF_CONGO])\n in_djibouti = c.satisfy(TRAVERSIBLE_COUNTRY, [DJIBOUTI])\n in_egypt_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [EGYPT])\n in_ancient_egypt = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Ancient Egypt\")], max_steps=6)\n in_Rashidun_Caliphate = c.satisfy(TRAVERSIBLE_COUNTRY, [RASHIDUN_CALIPHATE])\n egyptian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Egyptian people\")], max_steps=6)\n in_egypt = logical_ors([in_egypt_wkd, in_egypt_wkd,in_Rashidun_Caliphate, egyptian_people])\n in_equatorial_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [EQUATORIAL_GUINEA])\n in_eritrea = c.satisfy(TRAVERSIBLE_COUNTRY, [ERITREA])\n in_ethiopia = c.satisfy(TRAVERSIBLE_COUNTRY, [ETHIOPIA])\n in_gabon = c.satisfy(TRAVERSIBLE_COUNTRY, [GABON])\n in_the_gambia = c.satisfy(TRAVERSIBLE_COUNTRY, [THE_GAMBIA])\n in_ghana = c.satisfy(TRAVERSIBLE_COUNTRY, [GHANA])\n in_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [GUINEA])\n in_guinea_bissau = c.satisfy(TRAVERSIBLE_COUNTRY, [GUINEA_BISSAU])\n in_ivory_coast = c.satisfy(TRAVERSIBLE_COUNTRY, [IVORY_COAST])\n in_lesotho = c.satisfy(TRAVERSIBLE_COUNTRY, [LESOTHO])\n in_kenya = c.satisfy(TRAVERSIBLE_COUNTRY, [KENYA])\n in_liberia = c.satisfy(TRAVERSIBLE_COUNTRY, [LIBERIA])\n in_libya = c.satisfy(TRAVERSIBLE_COUNTRY, [LIBYA])\n in_madagascar = c.satisfy(TRAVERSIBLE_COUNTRY, [MADAGASCAR])\n in_malawi = c.satisfy(TRAVERSIBLE_COUNTRY, [MALAWI])\n in_mali = c.satisfy(TRAVERSIBLE_COUNTRY, [MALI])\n in_mauritania = c.satisfy(TRAVERSIBLE_COUNTRY, [MAURITANIA])\n in_mauritius = c.satisfy(TRAVERSIBLE_COUNTRY, [MAURITIUS])\n in_morrocco = c.satisfy(TRAVERSIBLE_COUNTRY, [MOROCCO])\n in_mozambique = c.satisfy(TRAVERSIBLE_COUNTRY, [MOZAMBIQUE])\n in_namibia = c.satisfy(TRAVERSIBLE_COUNTRY, [NAMIBIA])\n in_niger = c.satisfy(TRAVERSIBLE_COUNTRY, [NIGER])\n in_nigeria = c.satisfy(TRAVERSIBLE_COUNTRY, [NIGERIA])\n in_rwanda = c.satisfy(TRAVERSIBLE_COUNTRY, [RWANDA])\n in_sadr = c.satisfy(TRAVERSIBLE_COUNTRY, [SAHARI_ARAB_DEOMOCRATIC_REPUBLIC])\n in_stap = c.satisfy(TRAVERSIBLE_COUNTRY, [SAO_TOME_AND_PRINCIPE])\n in_senegal = c.satisfy(TRAVERSIBLE_COUNTRY, [SENEGAL])\n in_seychelles = c.satisfy(TRAVERSIBLE_COUNTRY, [SEYCHELLES])\n in_sierra_leone = c.satisfy(TRAVERSIBLE_COUNTRY, [SIERRA_LEONE])\n in_somalia = c.satisfy(TRAVERSIBLE_COUNTRY, [SOMALIA])\n in_somali_land = c.satisfy(TRAVERSIBLE_COUNTRY, [SOMALI_LAND])\n in_south_africa = c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTH_AFRICA])\n in_ssudan= c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTHSUDAN])\n in_sudan= c.satisfy(TRAVERSIBLE_COUNTRY, [SUDAN])\n in_swaziland= c.satisfy(TRAVERSIBLE_COUNTRY, [SWAZILAND])\n in_tanzania_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Sports competitions in Tanzania\")], max_steps=4)\n in_tanzania_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [TANZANIA])\n in_tanzania = logical_ors([in_tanzania_wkp,in_tanzania_wkd])\n in_togo = c.satisfy(TRAVERSIBLE_COUNTRY, [TOGO])\n in_tunisia = c.satisfy(TRAVERSIBLE_COUNTRY, [TUNISIA])\n in_uganda = c.satisfy(TRAVERSIBLE_COUNTRY, [UGANDA])\n in_western_sahara = c.satisfy(TRAVERSIBLE_COUNTRY, [WESTERN_SAHARA])\n in_zambia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ZAMBIA])\n zambian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Zambian people\")], max_steps=4)\n in_zambia = logical_ors([in_zambia_wkd, zambian_people])\n in_zimbabwe = c.satisfy(TRAVERSIBLE_COUNTRY, [ZIMBABWE])\n in_africa = logical_ors([\n in_botswana,\n in_burkina_faso,\n in_burundi,\n in_cameroon,\n in_cape_verde,\n in_chad,\n in_central_african_republic,\n in_comoros,\n in_democratic_republic_congo,\n in_republic_of_congo,\n in_djibouti,\n in_egypt,\n in_equatorial_guinea,\n in_eritrea,\n in_ethiopia,\n in_gabon,\n in_the_gambia,\n in_ghana,\n in_guinea,\n in_guinea_bissau,\n in_ivory_coast,\n in_lesotho,\n in_kenya,\n in_liberia,\n in_libya,\n in_madagascar,\n in_malawi\n ])\n\n print(\"Oceanian countries\")\n AUSTRALIA = wkd(c,\"Q408\")\n FIJI = wkd(c,\"Q712\")\n INDONESIA = wkd(c,\"Q252\")\n KIRIBATI= wkd(c,\"Q710\")\n MARSHALL_ISLANDS= wkd(c,\"Q709\")\n FEDERATED_STATES_OF_MICRONESIA= wkd(c,\"Q702\")\n NAURU= wkd(c,\"Q697\")\n PALAU= wkd(c,\"Q695\")\n PAPUA_NEW_GUINEA= wkd(c,\"Q691\")\n SAMOA = wkd(c,\"Q683\")\n SOLOMON_ISLANDS= wkd(c,\"Q685\")\n VANUATU = wkd(c,\"Q686\")\n NEW_ZEALAND = wkd(c,\"Q664\")\n\n in_australia_athletes = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Australian sportspeople\")], max_steps=5)\n in_australia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [AUSTRALIA])\n in_australia = logical_ors([in_australia_wkd, in_australia_athletes])\n in_fiji = c.satisfy(TRAVERSIBLE_COUNTRY, [FIJI])\n in_indonesia = c.satisfy(TRAVERSIBLE_COUNTRY, [INDONESIA])\n in_kiribati = c.satisfy(TRAVERSIBLE_COUNTRY, [KIRIBATI])\n in_marshall_islands = c.satisfy(TRAVERSIBLE_COUNTRY, [MARSHALL_ISLANDS])\n in_federates_states_of_micronesia = c.satisfy(TRAVERSIBLE_COUNTRY, [FEDERATED_STATES_OF_MICRONESIA])\n in_nauru = c.satisfy(TRAVERSIBLE_COUNTRY, [NAURU])\n in_palau = c.satisfy(TRAVERSIBLE_COUNTRY, [PALAU])\n in_papua_new_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [PAPUA_NEW_GUINEA])\n in_samoa_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Samoa\")], max_steps=5)\n in_samoa_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SAMOA])\n in_samoa = logical_ors([in_samoa_wkd, in_samoa_wkp])\n in_solomon_islands = c.satisfy(TRAVERSIBLE_COUNTRY, [SOLOMON_ISLANDS])\n in_vanuatu = c.satisfy(TRAVERSIBLE_COUNTRY, [VANUATU])\n in_new_zealand = c.satisfy(TRAVERSIBLE_COUNTRY, [NEW_ZEALAND])\n\n print(\"South American countries\")\n ARGENTINA = wkd(c,\"Q414\")\n BOLIVIA = wkd(c,\"Q750\")\n BRAZIL = wkd(c,\"Q155\")\n CHILE = wkd(c,\"Q298\")\n COLOMBIA = wkd(c,\"Q739\")\n ECUADOR = wkd(c,\"Q736\")\n GUYANA = wkd(c,\"Q734\")\n PARAGUAY = wkd(c,\"Q733\")\n PERU = wkd(c,\"Q419\")\n SURINAME = wkd(c,\"Q730\")\n TRINIDAD_AND_TOBAGO = wkd(c,\"Q754\")\n URUGUAY = wkd(c,\"Q77\")\n VENEZUELA = wkd(c,\"Q717\")\n\n in_argentina = c.satisfy(TRAVERSIBLE_COUNTRY, [ARGENTINA])\n in_bolivia = c.satisfy(TRAVERSIBLE_COUNTRY, [BOLIVIA])\n in_brazil = c.satisfy(TRAVERSIBLE_COUNTRY, [BRAZIL])\n in_chile = c.satisfy(TRAVERSIBLE_COUNTRY, [CHILE])\n in_colombia = c.satisfy(TRAVERSIBLE_COUNTRY, [COLOMBIA])\n in_ecuador = c.satisfy(TRAVERSIBLE_COUNTRY, [ECUADOR])\n in_guyana = c.satisfy(TRAVERSIBLE_COUNTRY, [GUYANA])\n in_paraguay = c.satisfy(TRAVERSIBLE_COUNTRY, [PARAGUAY])\n in_peru = c.satisfy(TRAVERSIBLE_COUNTRY, [PERU])\n in_suriname = c.satisfy(TRAVERSIBLE_COUNTRY, [SURINAME])\n in_trinidad_and_tobago = c.satisfy(TRAVERSIBLE_COUNTRY, [TRINIDAD_AND_TOBAGO])\n in_uruguay = c.satisfy(TRAVERSIBLE_COUNTRY, [URUGUAY])\n in_venezuela = c.satisfy(TRAVERSIBLE_COUNTRY, [VENEZUELA])\n\n print(\"Central American countries\")\n BELIZE = wkd(c,\"Q242\")\n COSTA_RICA = wkd(c,\"Q800\")\n EL_SALVADOR = wkd(c,\"Q792\")\n GUATEMALA = wkd(c,\"Q774\")\n HONDURAS = wkd(c,\"Q783\")\n NICARAGUA = wkd(c,\"Q811\")\n PANAMA = wkd(c,\"Q804\")\n\n in_belize = c.satisfy(TRAVERSIBLE_COUNTRY, [BELIZE])\n in_costa_rica = c.satisfy(TRAVERSIBLE_COUNTRY, [COSTA_RICA])\n in_el_salvador = c.satisfy(TRAVERSIBLE_COUNTRY, [EL_SALVADOR])\n in_guatemala = c.satisfy(TRAVERSIBLE_COUNTRY, [GUATEMALA])\n in_honduras = c.satisfy(TRAVERSIBLE_COUNTRY, [HONDURAS])\n in_nicaragua = c.satisfy(TRAVERSIBLE_COUNTRY, [NICARAGUA])\n in_panama = c.satisfy(TRAVERSIBLE_COUNTRY, [PANAMA])\n\n print(\"North American countries\")\n ANTIGUA_BARBUDA = wkd(c,\"Q781\")\n BAHAMAS = wkd(c,\"Q778\")\n BARBADOS = wkd(c,\"Q244\")\n BELIZE = wkd(c,\"Q242\")\n CANADA = wkd(c,\"Q16\")\n COSTA_RICA = wkd(c,\"Q800\")\n CUBA = wkd(c,\"Q241\")\n DOMINICAN_REPUBLIC = wkd(c,\"Q786\")\n EL_SALVADOR = wkd(c,\"Q792\")\n GRENADA = wkd(c,\"Q769\")\n GUATEMALA = wkd(c,\"Q774\")\n HAITI = wkd(c,\"Q790\")\n HONDURAS = wkd(c,\"Q783\")\n JAMAICA = wkd(c,\"Q766\")\n MEXICO = wkd(c,\"Q96\")\n NICARAGUA = wkd(c,\"Q811\")\n PANAMA = wkd(c,\"Q804\")\n SAINT_KITTS_AND_NEVIS = wkd(c,\"Q763\")\n SAINT_LUCIA = wkd(c,\"Q760\")\n SAINT_VINCENT_AND_GRENADINES = wkd(c,\"Q757\")\n UNITED_STATES = wkd(c,\"Q30\")\n\n in_antigua_barbuda = c.satisfy(TRAVERSIBLE_COUNTRY, [ANTIGUA_BARBUDA])\n in_bahamas = c.satisfy(TRAVERSIBLE_COUNTRY, [BAHAMAS])\n in_barbados = c.satisfy(TRAVERSIBLE_COUNTRY, [BARBADOS])\n in_belize = c.satisfy(TRAVERSIBLE_COUNTRY, [BELIZE])\n canadians = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Canadian people by occupation\")], max_steps=5)\n in_canada_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CANADA])\n in_canada = logical_ors([canadians, in_canada_wkd])\n\n in_costa_rica = c.satisfy(TRAVERSIBLE_COUNTRY, [COSTA_RICA])\n in_cuba = c.satisfy(TRAVERSIBLE_COUNTRY, [CUBA])\n in_dominican_republic = c.satisfy(TRAVERSIBLE_COUNTRY, [DOMINICAN_REPUBLIC])\n in_el_salvador = c.satisfy(TRAVERSIBLE_COUNTRY, [EL_SALVADOR])\n in_grenada = c.satisfy(TRAVERSIBLE_COUNTRY, [GRENADA])\n in_guatemala = c.satisfy(TRAVERSIBLE_COUNTRY, [GUATEMALA])\n in_haiti = c.satisfy(TRAVERSIBLE_COUNTRY, [HAITI])\n in_honduras = c.satisfy(TRAVERSIBLE_COUNTRY, [HONDURAS])\n in_jamaica = c.satisfy(TRAVERSIBLE_COUNTRY, [JAMAICA])\n in_mexico = c.satisfy(TRAVERSIBLE_COUNTRY, [MEXICO])\n in_nicaragua = c.satisfy(TRAVERSIBLE_COUNTRY, [NICARAGUA])\n in_panama = c.satisfy(TRAVERSIBLE_COUNTRY, [PANAMA])\n in_Saint_Kitts_and_Nevis = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_KITTS_AND_NEVIS])\n in_saint_lucia = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_LUCIA])\n in_saint_vincent_and_grenadines = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_VINCENT_AND_GRENADINES])\n in_usa_sports = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:History of sports in the United States\")], max_steps=7)\n years_in_usa = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in the United States\")], max_steps=7)\n in_usa_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Roads in the United States\")], max_steps=7)\n in_united_states_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_STATES])\n in_united_states = logical_ors([in_usa_sports,in_united_states_wkd, years_in_usa])\n\n print(\"Asian countries\")\n FOURTH_ADMIN_DIVISION_INDONESIA = wkd(c,\"Q2225692\")\n RURAL_COMUNE_VIETNAM = wkd(c,\"Q2389082\")\n AFGHANISTAN = wkd(c,\"Q889\")\n KINGDOM_OF_AFGHANISTAN = wkd(c,\"Q1138904\")\n REPUBLIC_OF_AFGHANISTAN = wkd(c,\"Q1415128\")\n DEMOCRATIC_REPUBLIC_OF_AFGHANISTAN = wkd(c,\"Q476757\")\n BANGLADESH = wkd(c,\"Q902\")\n BHUTAN = wkd(c,\"Q917\")\n BRUNEI = wkd(c,\"Q921\")\n CAMBODIA = wkd(c,\"Q424\")\n CHINA = wkd(c,\"Q148\")\n EAST_TIMOR = wkd(c,\"Q574\")\n INDIA = wkd(c,\"Q668\")\n INDONESIA = wkd(c,\"Q252\")\n IRAN = wkd(c,\"Q794\")\n IRAQ = wkd(c,\"Q796\")\n KURDISTAN = wkd(c,\"Q41470\")\n ISRAEL = wkd(c,\"Q801\")\n JAPAN = wkd(c,\"Q17\")\n JORDAN = wkd(c,\"Q810\")\n KAZAKHSTAN = wkd(c,\"Q232\")\n KUWAIT = wkd(c,\"Q817\")\n KYRGYZSTAN = wkd(c,\"Q813\")\n LAOS = wkd(c,\"Q819\")\n LEBANON = wkd(c,\"Q822\")\n MALAYSIA = wkd(c,\"Q833\")\n MALDIVES = wkd(c,\"Q826\")\n MONGOLIA = wkd(c,\"Q711\")\n MYANMAR = wkd(c,\"Q836\")\n NEPAL = wkd(c,\"Q837\")\n NORTH_KOREA = wkd(c,\"Q423\")\n OMAN = wkd(c,\"Q842\")\n PALESTINE = wkd(c,\"Q219060\")\n PAKISTAN = wkd(c,\"Q843\")\n PHILIPPINES = wkd(c,\"Q928\")\n QATAR = wkd(c,\"Q846\")\n SAUDI_ARABIA = wkd(c,\"Q851\")\n SINGAPORE = wkd(c,\"Q334\")\n SOUTH_KOREA = wkd(c,\"Q884\")\n SRI_LANKA = wkd(c,\"Q854\")\n SYRIA = wkd(c,\"Q858\")\n TAIWAN = wkd(c,\"Q865\")\n TAJIKISTAN = wkd(c,\"Q863\")\n THAILAND = wkd(c,\"Q869\")\n TURKMENISTAN = wkd(c,\"Q874\")\n UNITED_ARAB_EMIRATES = wkd(c,\"Q878\")\n UZBEKISTAN = wkd(c,\"Q265\")\n VIETNAM = wkd(c,\"Q881\")\n YEMEN = wkd(c,\"Q805\")\n\n\n in_afghanistan = c.satisfy(TRAVERSIBLE_COUNTRY, [AFGHANISTAN, REPUBLIC_OF_AFGHANISTAN, DEMOCRATIC_REPUBLIC_OF_AFGHANISTAN])\n in_bangladesh = c.satisfy(TRAVERSIBLE_COUNTRY, [BANGLADESH])\n in_bhutan = c.satisfy(TRAVERSIBLE_COUNTRY, [BHUTAN])\n in_brunei = c.satisfy(TRAVERSIBLE_COUNTRY, [BRUNEI])\n in_cambodia = c.satisfy(TRAVERSIBLE_COUNTRY, [CAMBODIA])\n\n years_in_china = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in China\")], max_steps=6)\n chinese_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Chinese people by occupation\")], max_steps=6)\n is_tibetan_politician = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Tibetan politicians\")], max_steps=6)\n in_china_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CHINA])\n in_china = logical_ors([in_china_wkd,years_in_china,is_tibetan_politician, chinese_people])\n\n\n in_east_timor = c.satisfy(TRAVERSIBLE_COUNTRY, [EAST_TIMOR])\n in_india = c.satisfy(TRAVERSIBLE_COUNTRY, [INDIA])\n in_indonesia = c.satisfy(TRAVERSIBLE_COUNTRY, [INDONESIA,FOURTH_ADMIN_DIVISION_INDONESIA])\n in_iran = c.satisfy(TRAVERSIBLE_COUNTRY, [IRAN])\n in_iraq = c.satisfy(TRAVERSIBLE_COUNTRY, [IRAQ, KURDISTAN])\n in_israel = c.satisfy(TRAVERSIBLE_COUNTRY, [ISRAEL])\n in_japan = c.satisfy(TRAVERSIBLE_COUNTRY, [JAPAN])\n in_jordan = c.satisfy(TRAVERSIBLE_COUNTRY, [JORDAN])\n in_kazakhstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KAZAKHSTAN])\n in_kuwait = c.satisfy(TRAVERSIBLE_COUNTRY, [KUWAIT])\n in_kyrgyzstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KYRGYZSTAN])\n in_laos = c.satisfy(TRAVERSIBLE_COUNTRY, [LAOS])\n in_lebanon = c.satisfy(TRAVERSIBLE_COUNTRY, [LEBANON])\n in_malaysia = c.satisfy(TRAVERSIBLE_COUNTRY, [MALAYSIA])\n in_maldives = c.satisfy(TRAVERSIBLE_COUNTRY, [MALDIVES])\n in_mongolia = c.satisfy(TRAVERSIBLE_COUNTRY, [MONGOLIA])\n in_myanmar = c.satisfy(TRAVERSIBLE_COUNTRY, [MYANMAR])\n in_nepal = c.satisfy(TRAVERSIBLE_COUNTRY, [NEPAL])\n in_north_korea = c.satisfy(TRAVERSIBLE_COUNTRY, [NORTH_KOREA])\n in_oman = c.satisfy(TRAVERSIBLE_COUNTRY, [OMAN])\n in_palestine = c.satisfy(TRAVERSIBLE_COUNTRY, [PALESTINE])\n in_pakistan = c.satisfy(TRAVERSIBLE_COUNTRY, [PAKISTAN])\n in_philippines = c.satisfy(TRAVERSIBLE_COUNTRY, [PHILIPPINES])\n in_qatar = c.satisfy(TRAVERSIBLE_COUNTRY, [QATAR])\n in_saudi_arabia = c.satisfy(TRAVERSIBLE_COUNTRY, [SAUDI_ARABIA])\n in_singapore = c.satisfy(TRAVERSIBLE_COUNTRY, [SINGAPORE])\n in_south_korea_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTH_KOREA])\n korean_rulers = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Korean rulers\")], max_steps=6)\n south_korea_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:South Korea\")], max_steps=6)\n south_korean_rulers = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Korean rulers\")], max_steps=6)\n in_south_korea = logical_ors([in_south_korea_wkd, korean_rulers])\n in_sri_lanka = c.satisfy(TRAVERSIBLE_COUNTRY, [SRI_LANKA])\n in_syria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SYRIA])\n ancient_syria = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Ancient Syria\")], max_steps=6)\n in_syria = logical_ors([in_syria_wkd,ancient_syria])\n in_taiwan = c.satisfy(TRAVERSIBLE_COUNTRY, [TAIWAN])\n in_tajikistan = c.satisfy(TRAVERSIBLE_COUNTRY, [TAJIKISTAN])\n in_thailand = c.satisfy(TRAVERSIBLE_COUNTRY, [THAILAND])\n in_turkmenistan = c.satisfy(TRAVERSIBLE_COUNTRY, [TURKMENISTAN])\n in_united_arab_emirates = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_ARAB_EMIRATES])\n in_uzbekistan = c.satisfy(TRAVERSIBLE_COUNTRY, [UZBEKISTAN])\n in_vietnam = c.satisfy(TRAVERSIBLE_COUNTRY, [VIETNAM, RURAL_COMUNE_VIETNAM])\n in_yemen = c.satisfy(TRAVERSIBLE_COUNTRY, [YEMEN])\n\n\n print(\"European countries\")\n ALBANIA = wkd(c,\"Q222\")\n ANDORRA = wkd(c,\"Q228\")\n ARMENIA = wkd(c,\"Q399\")\n AUSTRIA = wkd(c,\"Q40\")\n AUSTRIA_HUNGARY = wkd(c,\"Q28513\")\n AZERBAIJAN = wkd(c,\"Q227\")\n BELARUS = wkd(c,\"Q184\")\n BELGIUM = wkd(c,\"Q31\")\n BOSNIA = wkd(c,\"Q225\")\n BULGARIA = wkd(c,\"Q219\")\n CROATIA = wkd(c,\"Q224\")\n CYPRUS = wkd(c,\"Q229\")\n CZECH_REPUBLIC = wkd(c,\"Q213\")\n CZECHOSLOVAKIA = wkd(c,\"Q33946\")\n DENMARK = wkd(c,\"Q35\")\n ESTONIA = wkd(c,\"Q191\")\n FINLAND = wkd(c,\"Q33\")\n FRANCE = wkd(c,\"Q142\")\n GEORGIA = wkd(c,\"Q230\")\n GERMANY = wkd(c,\"Q183\")\n GERMANY_NAZI = wkd(c,\"Q7318\")\n GERMAN_EMPIRE = wkd(c,\"Q43287\")\n GERMAN_CONFEDERATION = wkd(c,\"Q151624\")\n EAST_GERMANY = wkd(c,\"Q16957\")\n GREECE = wkd(c,\"Q41\")\n HUNGARY = wkd(c,\"Q28\")\n ICELAND = wkd(c,\"Q189\")\n IRELAND = wkd(c,\"Q27\")\n ITALY = wkd(c,\"Q38\")\n ROMAN_EMPIRE = wkd(c,\"Q2277\")\n ANCIENT_ROME = wkd(c,\"Q1747689\")\n KINGDOM_OF_ITALY = wkd(c,\"Q172579\")\n NATIONAL_FASCIST_PARTY = wkd(c,\"Q139596\")\n KAZAKHSTAN = wkd(c,\"Q232\")\n KOSOVO = wkd(c,\"Q1246\")\n LATVIA = wkd(c,\"Q211\")\n LIECHTENSTEIN = wkd(c,\"Q347\")\n LITHUANIA = wkd(c,\"Q37\")\n LUXEMBOURG = wkd(c,\"Q32\")\n MACEDONIA = wkd(c,\"Q221\")\n MALTA = wkd(c,\"Q233\")\n MOLDOVA = wkd(c,\"Q217\")\n MONACO = wkd(c,\"Q235\")\n MONTENEGRO = wkd(c,\"Q236\")\n NETHERLANDS = wkd(c,\"Q55\")\n SOUTHERN_NETHERLANDS = wkd(c,\"Q6581823\")\n KINGDOM_OF_NETHERLANDS = wkd(c,\"Q29999\")\n NORWAY = wkd(c,\"Q20\")\n POLAND = wkd(c,\"Q36\")\n PORTUGAL = wkd(c,\"Q45\")\n ROMANIA = wkd(c,\"Q218\")\n RUSSIA = wkd(c,\"Q159\")\n SOVIET_UNION =wkd(c,\"Q15180\")\n RUSSIAN_EMPIRE = wkd(c,\"Q34266\")\n SAN_MARINO = wkd(c,\"Q238\")\n SERBIA = wkd(c,\"Q403\")\n YOUGOSLAVIA = wkd(c,\"Q36704\")\n SLOVAKIA = wkd(c,\"Q214\")\n SLOVENIA = wkd(c,\"Q215\")\n SPAIN = wkd(c,\"Q29\")\n KINGDOM_OF_CASTILLE = wkd(c,\"Q179293\")\n SWEDEN = wkd(c,\"Q34\")\n SWITZERLAND = wkd(c,\"Q39\")\n TURKEY = wkd(c,\"Q43\")\n OTTOMAN_EMPIRE = wkd(c,\"Q12560\")\n UKRAINE = wkd(c,\"Q212\")\n UNITED_KINGDOM = wkd(c,\"Q145\")\n UNITED_KINGDOM_OLD = wkd(c,\"Q174193\")\n KINGDOM_OF_ENGLAND = wkd(c,\"Q179876\")\n KINGDOM_OF_GREAT_BRITAIN = wkd(c,\"Q161885\")\n VATICAN_CITY = wkd(c,\"Q237\")\n\n\n in_albania = c.satisfy(TRAVERSIBLE_COUNTRY, [ALBANIA])\n in_andorra = c.satisfy(TRAVERSIBLE_COUNTRY, [ANDORRA])\n in_armenia = c.satisfy(TRAVERSIBLE_COUNTRY, [ARMENIA])\n\n in_austria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [AUSTRIA, AUSTRIA_HUNGARY])\n is_austria_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Austrian people by occupation\")], max_steps=5)\n in_austria = logical_ors([in_austria_wkd, is_austria_people])\n in_azerbaijan = c.satisfy(TRAVERSIBLE_COUNTRY, [AZERBAIJAN])\n in_belarus = c.satisfy(TRAVERSIBLE_COUNTRY, [BELARUS])\n in_belgium = c.satisfy(TRAVERSIBLE_COUNTRY, [BELGIUM])\n in_bosnia = c.satisfy(TRAVERSIBLE_COUNTRY, [BOSNIA])\n in_bulgaria = c.satisfy(TRAVERSIBLE_COUNTRY, [BULGARIA])\n in_croatia = c.satisfy(TRAVERSIBLE_COUNTRY, [CROATIA])\n in_cyprus = c.satisfy(TRAVERSIBLE_COUNTRY, [CYPRUS])\n in_czech_republic_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CZECH_REPUBLIC,CZECHOSLOVAKIA])\n czhec_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Czechoslovak people\")], max_steps=5)\n in_czech_republic = logical_ors([in_czech_republic_wkd, czhec_people])\n in_denmark_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [DENMARK])\n is_danish_legendary_figure = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Danish legendary figures\")], max_steps=5)\n in_denmark = logical_ors([in_denmark_wkd,is_danish_legendary_figure])\n\n in_estonia = c.satisfy(TRAVERSIBLE_COUNTRY, [ESTONIA])\n in_finland = c.satisfy(TRAVERSIBLE_COUNTRY, [FINLAND])\n\n\n years_in_france = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in France\")], max_steps=5)\n in_france_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [FRANCE])\n in_france = logical_ors([in_france_wkd,years_in_france])\n\n in_georgia = c.satisfy(TRAVERSIBLE_COUNTRY, [GEORGIA])\n\n years_in_germany = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in Germany\")], max_steps=5)\n nazis = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Nazis\")], max_steps=5)\n german_nobility = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:German nobility\")], max_steps=7)\n in_germany_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [GERMANY, GERMANY_NAZI, GERMAN_EMPIRE, GERMAN_CONFEDERATION, EAST_GERMANY])\n in_germany = logical_ors([in_germany_wkd, years_in_germany, nazis, german_nobility])\n\n years_in_greece = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in Greece\")], max_steps=5)\n ancient_greeks = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Ancient Greeks\")], max_steps=7)\n greek_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Greek people by occupation\")], max_steps=7)\n in_greece_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [GREECE])\n in_greece = logical_ors([in_greece_wkd,years_in_greece, ancient_greeks, greek_people])\n\n in_hungary = c.satisfy(TRAVERSIBLE_COUNTRY, [HUNGARY])\n in_iceland = c.satisfy(TRAVERSIBLE_COUNTRY, [ICELAND])\n in_ireland = c.satisfy(TRAVERSIBLE_COUNTRY, [IRELAND])\n in_italy_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ITALY,NATIONAL_FASCIST_PARTY, KINGDOM_OF_ITALY, ROMAN_EMPIRE, ANCIENT_ROME])\n is_italian_politician = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Italian politicians\")], max_steps=6)\n in_roman_empire = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Roman Empire\")], max_steps=6)\n in_history_of_italy = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:History of Italy by region\")], max_steps=6)\n italian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Italian people by occupation\")], max_steps=6)\n ancient_romans = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Ancient Romans\")], max_steps=8)\n in_italy = logical_ors([in_italy_wkd, in_roman_empire, in_history_of_italy,\n is_italian_politician, italian_people, ancient_romans])\n in_kazakhstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KAZAKHSTAN])\n in_kosovo = c.satisfy(TRAVERSIBLE_COUNTRY, [KOSOVO])\n in_latvia = c.satisfy(TRAVERSIBLE_COUNTRY, [LATVIA])\n in_liectenstein = c.satisfy(TRAVERSIBLE_COUNTRY, [LIECHTENSTEIN])\n in_lithuania = c.satisfy(TRAVERSIBLE_COUNTRY, [LITHUANIA])\n in_luxembourg = c.satisfy(TRAVERSIBLE_COUNTRY, [LUXEMBOURG])\n in_macedonia = c.satisfy(TRAVERSIBLE_COUNTRY, [MACEDONIA])\n in_malta = c.satisfy(TRAVERSIBLE_COUNTRY, [MALTA])\n in_moldova = c.satisfy(TRAVERSIBLE_COUNTRY, [MOLDOVA])\n in_monaco = c.satisfy(TRAVERSIBLE_COUNTRY, [MONACO])\n in_montenegro = c.satisfy(TRAVERSIBLE_COUNTRY, [MONTENEGRO])\n in_netherlands_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [NETHERLANDS, KINGDOM_OF_NETHERLANDS, SOUTHERN_NETHERLANDS])\n dutch_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Dutch people by occupation\")], max_steps=5)\n in_netherlands = logical_ors([in_netherlands_wkd, dutch_people])\n in_norway = c.satisfy(TRAVERSIBLE_COUNTRY, [NORWAY])\n in_poland = c.satisfy(TRAVERSIBLE_COUNTRY, [POLAND])\n in_portugal = c.satisfy(TRAVERSIBLE_COUNTRY, [PORTUGAL])\n in_romania = c.satisfy(TRAVERSIBLE_COUNTRY, [ROMANIA])\n russian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Russian people by occupation\")], max_steps=7)\n sport_in_the_soviet_union = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Sport in the Soviet Union\")], max_steps=7)\n in_russia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [RUSSIA, RUSSIAN_EMPIRE, SOVIET_UNION])\n in_russia = logical_ors([in_russia_wkd, russian_people, sport_in_the_soviet_union])\n in_san_marino = c.satisfy(TRAVERSIBLE_COUNTRY, [SAN_MARINO])\n in_serbia = c.satisfy(TRAVERSIBLE_COUNTRY, [SERBIA, YOUGOSLAVIA])\n in_slovakia = c.satisfy(TRAVERSIBLE_COUNTRY, [SLOVAKIA])\n in_slovenia = c.satisfy(TRAVERSIBLE_COUNTRY, [SLOVENIA])\n years_in_spain = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in Spain\")], max_steps=5)\n in_spain_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SPAIN, KINGDOM_OF_CASTILLE])\n in_spain = logical_ors([in_spain_wkd, years_in_spain])\n years_in_sweden = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in Sweden\")], max_steps=5)\n in_sweden_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SWEDEN])\n in_sweden = logical_ors([in_sweden_wkd, years_in_sweden])\n years_in_switzerland = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Years in Switzerland\")], max_steps=5)\n in_switzerland_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SWITZERLAND])\n in_switzerland = logical_ors([in_switzerland_wkd, years_in_switzerland ])\n in_turkey = c.satisfy(TRAVERSIBLE_COUNTRY, [TURKEY, OTTOMAN_EMPIRE])\n in_ukraine = c.satisfy(TRAVERSIBLE_COUNTRY, [UKRAINE])\n in_united_kingdom = c.satisfy(TRAVERSIBLE_COUNTRY,\n [UNITED_KINGDOM, UNITED_KINGDOM_OLD, KINGDOM_OF_ENGLAND, KINGDOM_OF_GREAT_BRITAIN])\n popes = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Popes\")], max_steps=5)\n in_vatican_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [VATICAN_CITY])\n in_vatican = logical_ors([popes, in_vatican_wkd])\n\n\n print(\"Artic and others\")\n ARCTIC = wkd(c,\"Q25322\")\n INUIT = wkd(c,\"Q189975\")\n FAROE_ISLANDS = wkd(c,\"Q4628\")\n TONGA = wkd(c,\"Q678\")\n in_faroe_islands_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Faroe Islands\")], max_steps=5)\n in_faroe_islands_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [FAROE_ISLANDS])\n in_faroe_islands = logical_ors([in_faroe_islands_wkp, in_faroe_islands_wkd])\n in_arctic = c.satisfy(TRAVERSIBLE_COUNTRY, [ARCTIC,INUIT])\n in_tonga_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [TONGA])\n in_tonga_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, \"Category:Tonga\")], max_steps=5)\n in_tonga = logical_ors([in_tonga_wkd,in_tonga_wkp])\n\n\n\n is_unlocated = logical_ors([is_Wiki_Links,is_taxon])\n is_unlocated_not = logical_negate(is_unlocated,[is_populated_place,\n is_in_outer_space_not_earth,in_tanzania])\n is_unlocated_only = logical_ors([is_unlocated_not,is_other])\n\n\n COUNTRIES = [ALGERIA, ANGOLA, BENIN, BOTSWANA, BURKINA_FASO, BURUNDI, CAPE_VERDE, CAMEROON, CHAD,\n CENTRAL_AFRICAN_REPUBLIC, COMOROS, DEMOCRATIC_REPUBLIC_OF_CONGO, REPUBLIC_OF_CONGO, DJIBOUTI,\n EGYPT, EQUATORIAL_GUINEA, ERITREA, ETHIOPIA, GABON, THE_GAMBIA, GHANA, GUINEA, GUINEA_BISSAU, IVORY_COAST,\n LESOTHO, KENYA, LIBERIA, LIBYA, MADAGASCAR, MALAWI, MALI, MAURITANIA,MAURITIUS, MOROCCO, MOZAMBIQUE,\n NAMIBIA, NIGER, NIGERIA, RWANDA,SAHARI_ARAB_DEOMOCRATIC_REPUBLIC, SAO_TOME_AND_PRINCIPE, SENEGAL,\n SEYCHELLES, SIERRA_LEONE, SOMALIA, SOMALI_LAND, SOUTH_AFRICA, SUDAN, TANZANIA, TOGO,\n TUNISIA, UGANDA, WESTERN_SAHARA, ZAMBIA, ZIMBABWE,\n AUSTRALIA, FIJI,INDONESIA,KIRIBATI, MARSHALL_ISLANDS,\n FEDERATED_STATES_OF_MICRONESIA, NAURU, NEW_ZEALAND, PAPUA_NEW_GUINEA, SAMOA, SOLOMON_ISLANDS, VANUATU,\n ARGENTINA, BOLIVIA, BRAZIL, CHILE, COLOMBIA, ECUADOR, GUYANA, PARAGUAY, PERU, SURINAME, TRINIDAD_AND_TOBAGO,\n URUGUAY, VENEZUELA,\n BELIZE, COSTA_RICA,EL_SALVADOR, GUATEMALA, HONDURAS, NICARAGUA, PANAMA,\n ANTIGUA_BARBUDA, BAHAMAS, BARBADOS, CANADA, CUBA, DOMINICAN_REPUBLIC, GRENADA, GUATEMALA, HAITI, JAMAICA, MEXICO,\n SAINT_KITTS_AND_NEVIS, SAINT_LUCIA, SAINT_VINCENT_AND_GRENADINES, UNITED_STATES,\n ALBANIA, ANDORRA, ARMENIA, AUSTRIA, AUSTRIA_HUNGARY, AZERBAIJAN, BELARUS, BELGIUM, BOSNIA, BULGARIA, CROATIA,\n CYPRUS,\n CZECH_REPUBLIC, CZECHOSLOVAKIA,\n DENMARK, ESTONIA, FINLAND, FRANCE, GEORGIA, GERMANY, GERMANY_NAZI, GREECE, HUNGARY, ICELAND,\n IRELAND, ITALY, NATIONAL_FASCIST_PARTY, KINGDOM_OF_ITALY, ROMAN_EMPIRE,\n KAZAKHSTAN, KOSOVO, LATVIA, LIECHTENSTEIN, LITHUANIA, LUXEMBOURG, MACEDONIA, MALTA,\n MOLDOVA, MONACO, MONTENEGRO, NORWAY,\n NETHERLANDS, KINGDOM_OF_NETHERLANDS, SOUTHERN_NETHERLANDS,\n POLAND, PORTUGAL, ROMANIA,\n RUSSIA, RUSSIAN_EMPIRE, SOVIET_UNION,\n SAN_MARINO,\n SERBIA, YOUGOSLAVIA,\n SLOVAKIA,\n SLOVENIA, SPAIN, SWEDEN, SWITZERLAND,\n TURKEY, OTTOMAN_EMPIRE, UKRAINE,\n UNITED_KINGDOM, UNITED_KINGDOM_OLD, KINGDOM_OF_ENGLAND, KINGDOM_OF_GREAT_BRITAIN,\n AFGHANISTAN, BANGLADESH, BRUNEI, CAMBODIA, CHINA, CYPRUS, EAST_TIMOR, EGYPT, GEORGIA, INDIA, INDONESIA,\n IRAN, IRAQ, ISRAEL, JAPAN, KAZAKHSTAN, KUWAIT, KYRGYZSTAN, LAOS, LEBANON, MALAYSIA, MALDIVES, MONGOLIA,\n MYANMAR, NEPAL, NORTH_KOREA, OMAN, PALESTINE, PAKISTAN, PHILIPPINES, QATAR, SAUDI_ARABIA, SINGAPORE, SOUTH_KOREA, SRI_LANKA,\n SYRIA, TAJIKISTAN, TAIWAN, THAILAND, TURKMENISTAN, UNITED_ARAB_EMIRATES, UZBEKISTAN, VIETNAM, YEMEN,\n VATICAN_CITY,\n ARCTIC, FAROE_ISLANDS, TONGA\n ]\n\n\n\n located_somewhere_wkd = c.satisfy([wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY], COUNTRIES)\n located_somewhere = logical_ors([ located_somewhere_wkd, in_austria, in_afghanistan, in_china, in_france,\n in_sweden, in_china, in_switzerland, in_germany, years_in_usa, in_greece,\n in_south_korea, in_italy,\n in_denmark, in_spain, in_iraq, in_egypt, in_vatican, in_canada,\n in_faroe_islands, in_netherlands, in_russia, in_samoa, in_syria, in_tonga, in_zambia ])\n\n is_unlocated_politician = logical_negate(is_politician,[located_somewhere])\n\n is_unlocated_painter = logical_negate(is_painter, [located_somewhere])\n\n is_unlocated_association_football_player = logical_negate(is_association_football_player, [located_somewhere])\n\n\n return {\n \"Algeria\": in_algeria,\n \"Angola\": in_angola,\n \"Benin\": in_benin,\n \"BOSTWANA\": in_botswana,\n \"BURKINA_FASO\": in_burkina_faso,\n \"BURUNDI\": in_burundi,\n \"CAPE_VERDE\": in_cape_verde,\n \"CAMEROON\": in_cameroon,\n \"CHAD\": in_chad,\n \"CENTRAL AFRICAN REPUBLIC\": in_central_african_republic,\n \"COMOROS\": in_comoros,\n \"DEMOCRATIC_REPUBLIC_OF_CONGO\": in_democratic_republic_congo,\n \"REPUBLIC_OF_CONGO\": in_republic_of_congo,\n \"DJIBOUTI\": in_djibouti,\n \"EGYPT\": in_egypt,\n \"EQUATORIAL_GUINEA\": in_equatorial_guinea,\n \"ERITREA\": in_eritrea,\n \"ETHIOPIA\": in_ethiopia,\n \"GABON\": in_gabon,\n \"THE_GAMBIA\": in_the_gambia,\n \"GHANA\": in_ghana,\n \"GUINEA\": in_guinea,\n \"GUINEA_BISSAU\": in_guinea_bissau,\n \"IVORY_COAST\": in_ivory_coast,\n \"LESOTHO\": in_lesotho,\n \"KENYA\": in_kenya,\n \"LIBERIA\": in_liberia,\n \"LIBYA\": in_libya,\n \"Madagascar\": in_madagascar,\n \"Malawi\": in_malawi,\n \"Mali\": in_mali,\n \"Mauritania\": in_mauritania,\n \"Mauritius\": in_mauritius,\n \"Morocco\": in_morrocco,\n \"Mozambique\": in_mozambique,\n \"Namibia\": in_namibia,\n \"Niger\": in_niger,\n \"Nigeria\": in_nigeria,\n \"Rwanda\": in_rwanda,\n \"Sahrawi_Arab_Democratic_Republic\": in_sadr,\n \"Sao_Tome_and_Principe\": in_stap,\n \"Senegal\": in_senegal,\n \"Seychelles\": in_seychelles,\n \"Sierra_Leone\": in_sierra_leone,\n \"Somalia\": in_somalia,\n \"Somalilandβ€Ž\": in_somali_land,\n \"South_Africaβ€Ž\": in_south_africa,\n \"South_Sudanβ€Ž\": in_ssudan,\n \"Sudan\": in_sudan,\n \"SWAZILAND\": in_swaziland,\n \"TANZANIA\": in_tanzania,\n \"TOGO\": in_togo,\n \"TUNISIA\": in_tunisia,\n \"Uganda\": in_uganda,\n \"Western Sahara\": in_western_sahara,\n \"Zambia\": in_zambia,\n \"Zimbabwe\": in_zimbabwe,\n\n\n \"AUSTRALIA\": in_australia,\n \"FIJI\": in_fiji,\n \"INDONESIA\": in_indonesia,\n \"KIRIBATI\": in_kiribati,\n \"MARSHALL_ISLANDS\": in_marshall_islands,\n \"FEDERATED_STATES_OF_MICRONESIA\": in_federates_states_of_micronesia,\n \"NAURU\": in_nauru,\n \"NEW_ZEALAND\": in_new_zealand,\n \"PAPUA_NEW_GUINEA\": in_papua_new_guinea,\n \"SAMOA\": in_samoa,\n \"SOLOMON_ISLANDS\": in_solomon_islands,\n \"VANUATU\": in_vanuatu,\n\n\n \"ARGENTINA\": in_argentina,\n \"BOLIVIA\": in_bolivia,\n \"BRAZIL\": in_brazil,\n \"CHILE\": in_chile,\n \"COLOMBIA\": in_colombia,\n \"ECUADOR\": in_ecuador,\n \"GUYANA\": in_guyana,\n \"PARAGUAY\": in_paraguay,\n \"PERU\": in_peru,\n \"SURINAME\": in_suriname,\n \"TRINIDAD_AND_TOBAGO\": in_trinidad_and_tobago,\n \"URUGUAY\": in_uruguay,\n \"VENEZUELA\": in_venezuela,\n\n\n \"BELIZE\": in_belize,\n \"COSTA_RICA\": in_costa_rica,\n \"EL_SALVADOR\": in_el_salvador,\n \"GUATEMALA\": in_guatemala,\n \"HONDURAS\": in_honduras,\n \"NICARAGUA\": in_nicaragua,\n \"PANAMA\": in_panama,\n\n\n \"ANTIGUA_BARBUDA\": in_antigua_barbuda,\n \"BAHAMAS\": in_bahamas,\n \"BARBADOS\": in_barbados,\n \"CANADA\": in_canada,\n \"CUBA\": in_cuba,\n \"DOMINICAN REPUBLIC\": in_dominican_republic,\n \"GRENADA\": in_grenada,\n \"GUATEMALA\": in_guatemala,\n \"HAITI\": in_haiti,\n \"JAMAICA\": in_jamaica,\n \"MEXICO\": in_mexico,\n \"SAINT_KITTS_AND_NEVIS\": in_Saint_Kitts_and_Nevis,\n \"SAINT_LUCIA\": in_saint_lucia,\n \"SAINT_VINCENT_AND_GRENADINES\": in_saint_vincent_and_grenadines,\n \"UNITED_STATES\": in_united_states,\n\n\n \"ALBANIA\": in_albania,\n \"ANDORRA\": in_andorra,\n \"ARMENIA\": in_armenia,\n \"AUSTRIA\": in_austria,\n \"AZERBAIJAN\": in_azerbaijan,\n \"BELARUS\": in_belarus,\n \"BELGIUM\": in_belgium,\n \"BOSNIA\": in_bosnia,\n \"BULGARIA\": in_bulgaria,\n \"CROATIA\": in_croatia,\n \"CYPRUS\": in_cyprus,\n \"CZECH REPUBLIC\": in_czech_republic,\n \"DENMARK\": in_denmark,\n \"ESTONIA\": in_estonia,\n \"FINLAND\": in_finland,\n \"FRANCE\": in_france,\n \"GEORGIA\": in_georgia,\n \"GERMANY\": in_germany,\n \"GREECE\": in_greece,\n \"HUNGARY\": in_hungary,\n \"ICELAND\": in_iceland,\n \"IRELAND\": in_ireland,\n \"ITALY\": in_italy,\n \"KAZAKHSTAN\": in_kazakhstan,\n \"KOSOVO\": in_kosovo,\n \"LATVIA\": in_latvia,\n \"LIECHTENSTEIN\": in_liectenstein,\n \"LITHUANIA\": in_lithuania,\n \"LUXEMBOURG\": in_luxembourg,\n \"MACEDONIA\": in_macedonia,\n \"MALTA\": in_malta,\n \"MOLDOVA\": in_moldova,\n \"MONACO\": in_monaco,\n \"MONTENEGRO\": in_montenegro,\n \"NORWAY\": in_norway,\n \"NETHERLANDS\": in_netherlands,\n \"POLAND\": in_poland,\n \"PORTUGAL\": in_portugal,\n \"ROMANIA\": in_romania,\n \"RUSSIA\": in_russia,\n \"SAN MARINO\": in_san_marino,\n \"SERBIA\": in_serbia,\n \"SLOVAKIA\": in_slovakia,\n \"SLOVENIA\": in_slovenia,\n \"SPAIN\": in_spain,\n \"SWEDEN\": in_sweden,\n \"SWITZERLAND\": in_switzerland,\n \"TURKEY\": in_turkey,\n \"UKRAINE\": in_ukraine,\n \"UNITED KINGDOM\": in_united_kingdom,\n\n\n \"AFGHANISTAN\": in_afghanistan,\n \"BANGLADESH\": in_bangladesh,\n \"BHUTAN\": in_bhutan,\n \"BRUNEI\": in_brunei,\n \"CAMBODIA\": in_cambodia,\n \"CHINA\": in_china,\n \"CYPRUS\": in_cyprus,\n \"EAST TIMOR\": in_east_timor,\n \"EGYPT\": in_egypt,\n \"GEORGIA\": in_georgia,\n \"INDIA\": in_india,\n \"INDONESIA\": in_indonesia,\n \"IRAN\": in_iran,\n \"IRAQ\": in_iraq,\n \"ISRAEL\": in_israel,\n \"JAPAN\": in_japan,\n \"JORDAN\": in_jordan,\n \"KAZAKHSTAN\": in_kazakhstan,\n \"KUWAIT\": in_kuwait,\n \"KYRGYZSTAN\": in_kyrgyzstan,\n \"LAOS\": in_laos,\n \"LEBANON\": in_lebanon,\n \"MALAYSIA\": in_malaysia,\n \"MALDIVES\": in_maldives,\n \"MONGOLIA\": in_mongolia,\n \"MYANMAR\": in_myanmar,\n \"NEPAL\": in_nepal,\n \"NORTH_KOREA\": in_north_korea,\n \"OMAN\": in_oman,\n \"PALESTINE\": in_palestine,\n \"PAKISTAN\": in_pakistan,\n \"PHILIPPINES\": in_philippines,\n \"QATAR\": in_qatar,\n \"SAUDI_ARABIA\": in_saudi_arabia,\n \"SINGAPORE\": in_singapore,\n \"SOUTH_KOREA\": in_south_korea,\n \"SRI LANKA\": in_sri_lanka,\n \"SYRIA\": in_syria,\n \"TAJIKISTAN\": in_tajikistan,\n \"TAIWAN\": in_taiwan,\n \"THAILAND\": in_thailand,\n \"TURKMENISTAN\": in_turkmenistan,\n \"UNITED_ARAB_EMIRATES\": in_united_arab_emirates,\n \"UZBEKISTAN\": in_uzbekistan,\n \"VIETNAM\": in_vietnam,\n \"YEMEN\": in_yemen,\n \"OUTERSPACE\": is_in_outer_space_not_earth,\n\n \"ARCTIC\": in_arctic,\n \"FAROE_ISLANDS\": in_faroe_islands,\n \"TONGA\": in_tonga,\n\n \"UNLOCATED\": is_unlocated_only,\n \"USA_ROADS\": in_usa_roads,\n \"POLITICIAN\": is_politician,\n \"UNLOCATED_POLITICIAN\": is_unlocated_politician,\n \"UNLOCATED_PAINTER\": is_unlocated_painter,\n \"UNLOCATED_ASSOCIATION_FOOTBALL_PLAYER\": is_unlocated_association_football_player\n }\n"}}},{"rowIdx":49,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nCreate membership rules for entities based on their date of existence/birth/etc.\nMore classes can be created by selecting other key dates as hyperplanes.\n\"\"\"\nfrom numpy import (\n logical_and, logical_or, logical_not, logical_xor, where\n)\nfrom wikidata_linker_utils.logic import logical_negate, logical_ors, logical_ands\nimport wikidata_linker_utils.wikidata_properties as wprop\n\n\ndef wkp(c, name):\n \"\"\"Convert a string wikipedia article name to its Wikidata index.\"\"\"\n return c.article2id[\"enwiki/\" + name][0][0]\n\n\ndef wkd(c, name):\n \"\"\"Convert a wikidata QID to its wikidata index.\"\"\"\n return c.name2index[name]\n\n\ndef classify(c):\n D1950 = 1950\n\n pre_1950 = logical_ors([\n c.attribute(wprop.PUBLICATION_DATE) < D1950,\n c.attribute(wprop.DATE_OF_BIRTH) < D1950,\n c.attribute(wprop.INCEPTION) < D1950,\n c.attribute(wprop.DISSOLVED_OR_ABOLISHED) < D1950,\n c.attribute(wprop.POINT_IN_TIME) < D1950,\n c.attribute(wprop.START_TIME) < D1950\n ])\n\n post_1950 = logical_and(logical_ors([\n c.attribute(wprop.PUBLICATION_DATE) >= D1950,\n c.attribute(wprop.DATE_OF_BIRTH) >= D1950,\n c.attribute(wprop.INCEPTION) >= D1950,\n c.attribute(wprop.DISSOLVED_OR_ABOLISHED) >= D1950,\n c.attribute(wprop.POINT_IN_TIME) >= D1950,\n c.attribute(wprop.START_TIME) >= D1950\n ]), logical_not(pre_1950))\n\n # some elements are neither pre 1950 or post 1950, they are \"undated\"\n # (e.g. no value was provided for any of the time attributes used\n # above)\n undated = logical_and(logical_not(pre_1950), logical_not(post_1950))\n print(\"%d items have no date information\" % (undated.sum(),))\n return {\n \"pre-1950\": pre_1950,\n \"post-1950\": post_1950\n }\n"}}},{"rowIdx":50,"cells":{"python_code":{"kind":"string","value":"\"\"\"\nAssociate to each entity a type (exclusive membership). Association is imperfect\n(e.g. some false positives, false negatives), however the majority of entities\nare covered under this umbrella and thus a model can learn to predict several\nof the attributes listed below.\n\"\"\"\nfrom numpy import (\n logical_and, logical_or, logical_not, logical_xor, where\n)\nfrom wikidata_linker_utils.logic import logical_negate, logical_ors, logical_ands\nimport wikidata_linker_utils.wikidata_properties as wprop\n\ndef wkp(c, name):\n return c.article2id['enwiki/' + name][0][0]\n\ndef wkd(c, name):\n return c.name2index[name]\n\n\ndef classify(c):\n TRAVERSIBLE = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF]\n TRAVERSIBLE_LO = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF]\n\n MALE = wkd(c,\"Q6581097\")\n FEMALE = wkd(c,\"Q6581072\")\n HUMAN = wkp(c, \"Human\")\n TAXON = wkd(c, \"Q16521\")\n HORSE = wkd(c, \"Q726\")\n RACE_HORSE = wkd(c, \"Q10855242\")\n FOSSIL_TAXON = wkd(c, \"Q23038290\")\n MONOTYPIC_TAXON = wkd(c, \"Q310890\")\n FOOD = wkp(c, \"Food\")\n DRINK = wkp(c, \"Drink\")\n BIOLOGY = wkp(c, \"Biology\")\n GEOGRAPHICAL_OBJECT = wkd(c, \"Q618123\")\n LOCATION_GEOGRAPHY = wkd(c, \"Q2221906\")\n ORGANISATION = wkp(c, 'Organization')\n MUSICAL_WORK = wkd(c, 'Q2188189')\n AUDIO_VISUAL_WORK = wkd(c,'Q2431196')\n ART_WORK = wkd(c,'Q838948')\n PHYSICAL_OBJECT = wkp(c, \"Physical body\")\n VALUE = wkd(c, 'Q614112')\n TIME_INTERVAL = wkd(c, 'Q186081')\n EVENT = wkd(c, 'Q1656682')\n POPULATED_PLACE = wkd(c, 'Q486972')\n ACTIVITY = wkd(c, \"Q1914636\")\n PROCESS = wkd(c, \"Q3249551\")\n BODY_OF_WATER = wkd(c, \"Q15324\")\n PEOPLE = wkd(c, \"Q2472587\")\n LANGUAGE = wkd(c, \"Q34770\")\n ALPHABET = wkd(c, \"Q9779\")\n SPEECH = wkd(c, \"Q861911\")\n GAS = wkd(c, \"Q11432\")\n CHEMICAL_COMPOUND = wkd(c, \"Q11173\")\n DRUG = wkd(c, \"Q8386\")\n GEOMETRIC_SHAPE = wkd(c, \"Q815741\")\n MIND = wkd(c, \"Q450\")\n TV_STATION = wkd(c, \"Q1616075\")\n\n AWARD_CEREMONY = wkd(c, \"Q4504495\")\n SONG = wkd(c, \"Q7366\")\n SINGLE = wkd(c, \"Q134556\")\n CHESS_OPENING = wkd(c, \"Q103632\")\n BATTLE = wkd(c, \"Q178561\")\n BLOCKADE = wkd(c, \"Q273976\")\n MILITARY_OFFENSIVE = wkd(c, \"Q2001676\")\n DEVELOPMENT_BIOLOGY = wkd(c, \"Q213713\")\n UNIT_OF_MASS = wkd(c, \"Q3647172\")\n WATERCOURSE = wkd(c, \"Q355304\")\n VOLCANO = wkd(c, \"Q8072\")\n LAKE = wkd(c, \"Q23397\")\n SEA = wkd(c, \"Q165\")\n BRAND = wkd(c, \"Q431289\")\n AUTOMOBILE_MANUFACTURER = wkd(c, \"Q786820\")\n MOUNTAIN = wkd(c, \"Q8502\")\n MASSIF = wkd(c, \"Q1061151\")\n WAR = wkd(c, \"Q198\")\n CRIME = wkd(c, \"Q83267\")\n GENE = wkd(c, \"Q7187\")\n CHROMOSOME = wkd(c, \"Q37748\")\n DISEASE = wkd(c, \"Q12136\")\n ASTEROID = wkd(c, \"Q3863\")\n COMET = wkd(c, \"Q3559\")\n PLANET = wkd(c, \"Q634\")\n GALAXY = wkd(c, \"Q318\")\n ASTRONOMICAL_OBJECT = wkd(c, \"Q6999\")\n FICTIONAL_ASTRONOMICAL_OBJECT = wkd(c, \"Q15831598\")\n MATHEMATICAL_OBJECT = wkd(c, \"Q246672\")\n REGION = wkd(c, \"Q82794\")\n PHYSICAL_QUANTITY = wkd(c, \"Q107715\")\n NUMBER = wkd(c, \"Q11563\")\n NATURAL_PHENOMENON = wkd(c, \"Q1322005\")\n GEOLOGICAL_FORMATION = wkd(c, \"Q736917\")\n CURRENCY = wkd(c, \"Q8142\")\n MONEY = wkd(c, \"Q1368\")\n LANDFORM = wkd(c, \"Q271669\")\n COUNTRY = wkd(c, \"Q6256\")\n FICTIONAL_HUMAN = wkd(c, \"Q15632617\")\n AWARD = wkd(c, \"Q618779\")\n RELIGIOUS_TEXT = wkd(c, \"Q179461\")\n OCCUPATION = wkd(c, \"Q12737077\")\n PROFESSION = wkd(c, \"Q28640\")\n POSITION = wkd(c, \"Q4164871\")\n RELIGION = wkd(c, \"Q9174\")\n SOFTWARE = wkd(c, \"Q7397\")\n ELECTRONIC_GAME = wkd(c, \"Q2249149\")\n GAME = wkd(c, \"Q11410\")\n VIDEO_GAME_FRANCHISES = wkd(c, \"Q7213857\")\n TRAIN_STATION = wkd(c, \"Q55488\")\n BRIDGE = wkd(c, \"Q12280\")\n AIRPORT = wkd(c, \"Q62447\")\n SURNAME = wkd(c, \"Q101352\")\n GIVEN_NAME = wkd(c, \"Q202444\")\n FEMALE_GIVEN_NAME = wkd(c, \"Q11879590\")\n MALE_GIVEN_NAME = wkd(c, \"Q12308941\")\n GIVEN_NAME = wkd(c, \"Q202444\")\n MOLECULE = wkd(c, \"Q11369\")\n PROTEIN_FAMILY = wkd(c, \"Q417841\")\n PROTEIN_DOMAIN = wkd(c, \"Q898273\")\n MULTIPROTEIN_COMPLEX = wkd(c, \"Q420927\")\n LAW = wkd(c, \"Q7748\")\n VEHICLE = wkd(c, \"Q42889\")\n MODE_OF_TRANSPORT = wkd(c, \"Q334166\")\n WATERCRAFT = wkd(c, \"Q1229765\")\n AIRCRAFT = wkd(c, \"Q11436\")\n ROAD_VEHICLE = wkd(c, \"Q1515493\")\n AUTOMOBILE_MODEL = wkd(c, \"Q3231690\")\n AUTOMOBILE = wkd(c, \"Q1420\")\n TRUCK = wkd(c, \"Q43193\")\n MOTORCYCLE_MODEL = wkd(c, \"Q23866334\")\n TANK = wkd(c, \"Q12876\")\n FIRE_ENGINE = wkd(c, \"Q208281\")\n AMBULANCE = wkd(c, \"Q180481\")\n RAILROAD = wkd(c, \"Q22667\")\n RADIO_PROGRAM = wkd(c, \"Q1555508\")\n DISCOGRAPHY = wkd(c, \"Q273057\")\n WEBSITE = wkd(c, \"Q35127\")\n WEAPON = wkd(c, \"Q728\")\n PUBLICATION = wkd(c, \"Q732577\")\n ARTICLE = wkd(c, \"Q191067\")\n FAMILY = wkd(c, \"Q8436\")\n FICTIONAL_CHARACTER = wkd(c, \"Q95074\")\n FACILITY = wkd(c, \"Q13226383\")\n CONCEPT = wkd(c, \"Q151885\")\n PROVERB = wkd(c, \"Q35102\")\n ANATOMICAL_STRUCTURE = wkd(c, \"Q4936952\")\n BREED = wkd(c, \"Q38829\")\n PLANT_STRUCTURE = wkd(c, \"Q25571752\")\n PLANT = wkd(c, \"Q756\")\n SPECIAL_FIELD = wkd(c, \"Q1047113\")\n ACADEMIC_DISCIPLINE = wkd(c, \"Q11862829\")\n TERM = wkd(c, \"Q1969448\")\n SEXUAL_ORIENTATION = wkd(c, \"Q17888\")\n PARADIGM = wkd(c, \"Q28643\")\n LEGAL_CASE = wkd(c, \"Q2334719\")\n SPORT = wkd(c, \"Q349\")\n RECURRING_SPORTING_EVENT = wkd(c, \"Q18608583\")\n ART_GENRE = wkd(c, \"Q1792379\")\n SPORTING_EVENT = wkd(c, \"Q16510064\")\n COMIC = wkd(c, \"Q1004\")\n CHARACTER = wkd(c, \"Q3241972\")\n PERSON = wkd(c, \"Q215627\")\n NATIONAL_HERITAGE_SITE = wkd(c, \"Q358\")\n ESTATE = wkd(c, \"Q2186896\")\n ELECTION = wkd(c, \"Q40231\")\n LEGISLATIVE_TERM = wkd(c, \"Q15238777\")\n COMPETITION = wkd(c, \"Q476300\")\n LEGAL_ACTION = wkd(c, \"Q27095657\")\n SEX_TOY = wkd(c, \"Q10816\")\n MONUMENT = wkd(c, \"Q4989906\")\n ASSOCIATION_FOOTBALL_POSITION = wkd(c, \"Q4611891\")\n # ICE_HOCKEY_POSITION = wkd(c, \"Q18533987\")\n # PART_OF_LAND = wkd(c, \"Q23001306\")\n MUSIC_DOWNLOAD = wkd(c, \"Q6473564\")\n OCCUPATION = wkd(c, \"Q12737077\")\n KINSHIP = wkd(c, \"Q171318\")\n KIN = wkd(c, \"Q21073947\")\n PSEUDONYM = wkd(c, \"Q61002\")\n STOCK_CHARACTER = wkd(c, \"Q162244\")\n TITLE = wkd(c, \"Q4189293\")\n DATA_FORMAT = wkd(c, \"Q494823\")\n ELECTROMAGNETIC_WAVE = wkd(c, \"Q11386\")\n POSTAL_CODE = wkd(c, \"Q37447\")\n CLOTHING = wkd(c, \"Q11460\")\n NATIONALITY = wkd(c, \"Q231002\")\n BASEBALL_POSITION = wkd(c, \"Q1151733\")\n AMERICAN_FOOTBALL_POSITIONS = wkd(c, \"Q694589\")\n POSITION_TEAM_SPORTS = wkd(c, \"Q1781513\")\n FILE_FORMAT_FAMILY = wkd(c, \"Q26085352\")\n FILE_FORMAT = wkd(c, \"Q235557\")\n TAXONOMIC_RANK = wkd(c, \"Q427626\")\n ORDER_HONOUR = wkd(c, \"Q193622\")\n BRANCH_OF_SCIENCE = wkd(c, \"Q2465832\")\n RESEARCH = wkd(c, \"Q42240\")\n METHOD = wkd(c, \"Q1799072\")\n ALGORITHM = wkd(c, \"Q8366\")\n PROPOSITION = wkd(c, \"Q108163\")\n SPORTSPERSON = wkd(c, \"Q2066131\")\n LAKES_MINESOTTA = wkd(c, \"Q8580663\")\n NAMED_PASSENGER_TRAIN_INDIA = wkd(c, \"Q9260591\")\n TOWNSHIPS_MISOURI = wkd(c, \"Q8861637\")\n RACE_ETHNICITY_USA = wkd(c, \"Q2035701\")\n RECORD_CHART = wkd(c, \"Q373899\")\n SINGLE_ENGINE_AIRCRAFT = wkd(c, \"Q7405339\")\n SIGNIFICANT_OTHER = wkd(c, \"Q841509\")\n BILLBOARDS = wkd(c, \"Q19754079\")\n RADIO_STATION = wkd(c, \"Q19754079\")\n RADIO_STATION2 = wkd(c, \"Q1474493\")\n NOBLE_TITLE = wkd(c, \"Q216353\")\n HOUSES_NATIONAL_REGISTER_ARKANSAS = wkd(c, \"Q8526394\")\n CLADE = wkd(c, \"Q713623\")\n BOARD_GAMES = wkd(c, \"Q131436\")\n CLAN = wkd(c, \"Q211503\")\n ACCIDENT = wkd(c, \"Q171558\")\n MASSACRE = wkd(c, \"Q3199915\")\n TORNADO = wkd(c, \"Q8081\")\n NATURAL_DISASTER = wkd(c, \"Q8065\")\n SPORTS_TEAM = wkd(c, \"Q12973014\")\n BAND_ROCK_AND_POP = wkd(c, \"Q215380\")\n ORGANIZATION_OTHER = wkd(c, \"Q43229\")\n POLITICAL_PARTY = wkd(c, \"Q7278\")\n SPECIES = wkd(c, \"Q7432\")\n CHEMICAL_SUBSTANCE = wkd(c, \"Q79529\")\n\n THREATENED_SPECIES = wkd(c, \"Q515487\")\n HYPOTHETICAL_SPECIES = wkd(c, \"Q5961273\")\n\n CONFLICT = wkd(c, \"Q180684\")\n PRIVATE_USE_AREAS = wkd(c, \"Q11152836\")\n\n BARONETCIES_IN_UK = wkd(c, \"Q8290061\")\n EXTINCT_BARONETCIES_ENGLAND = wkd(c, \"Q8432223\")\n EXTINCT_BARONETCIES_UK = wkd(c, \"Q8432226\")\n\n WIKIPEDIA_DISAMBIGUATION = wkd(c, \"Q4167410\")\n WIKIPEDIA_TEMPLATE_NAMESPACE = wkd(c, \"Q11266439\")\n WIKIPEDIA_LIST = wkd(c, \"Q13406463\")\n WIKIPEDIA_PROJECT_PAGE = wkd(c, \"Q14204246\")\n WIKIMEDIA_CATEGORY_PAGE = wkd(c, \"Q4167836\")\n WIKIPEDIA_USER_LANGUAGE_TEMPLATE = wkd(c, \"Q19842659\")\n WIKIDATA_PROPERTY = wkd(c, \"Q18616576\")\n COLLEGIATE_ATHLETICS_PROGRAM = wkd(c, \"Q5146583\")\n SPORTS_TRANSFER_AF = wkd(c, \"Q1811518\")\n DEMOGRAPHICS_OF_NORWAY = wkd(c, \"Q7664203\")\n DOCUMENT = wkd(c, \"Q49848\")\n BASIC_STAT_UNIT_NORWAY = wkd(c, \"Q4580177\")\n PUBLIC_TRANSPORT = wkd(c, \"Q178512\")\n HAZARD = wkd(c, \"Q1132455\")\n BASEBALL_RULES = wkd(c, \"Q1153773\")\n HIT_BASEBALL = wkd(c, \"Q713493\")\n OUT_BASEBALL = wkd(c, \"Q1153773\")\n LAWS_OF_ASSOCIATION_FOOTBALL = wkd(c, \"Q7215850\")\n CRICKET_LAWS_AND_REGULATION = wkd(c, \"Q8427034\")\n MEASUREMENTS_OF_POVERTY = wkd(c, \"Q8614855\")\n PROFESSIONAL_WRESTLING_MATCH_TYPES = wkd(c, \"Q679633\")\n CITATION = wkd(c, \"Q1713\")\n INTERNATIONAL_RELATIONS = wkd(c, \"Q166542\")\n WORLD_VIEW = wkd(c, \"Q49447\")\n ROCK_GEOLOGY = wkd(c, \"Q8063\")\n BASEBALL_STATISTIC = wkd(c, \"Q8291081\")\n BASEBALL_STATISTICS = wkd(c, \"Q809898\")\n TRAIN_ACCIDENT = wkd(c, \"Q1078765\")\n CIRCUS_SKILLS = wkd(c, \"Q4990963\")\n FOLKLORE = wkd(c, \"Q36192\")\n NEWS_BUREAU = wkd(c, \"Q19824398\")\n RECESSION = wkd(c, \"Q176494\")\n NYC_BALLET = wkd(c, \"Q1336942\")\n SPORTS_RECORD = wkd(c, \"Q1241356\")\n WINGSPAN = wkd(c, \"Q245097\")\n WIN_LOSS_RECORD_PITCHING = wkd(c, \"Q1202506\")\n CRICKET_TERMINOLOGY = wkd(c, \"Q8427141\")\n UNION_ARMY = wkd(c, \"Q1752901\")\n POPULATION = wkd(c, \"Q33829\")\n WIND = wkd(c, \"Q8094\")\n TORPEDO_TUBE = wkd(c, \"Q1330003\")\n WEAPONS_PLATFORM = wkd(c, \"Q7978115\")\n COLOR = wkd(c, \"Q1075\")\n SOCIAL_SCIENCE = wkd(c, \"Q34749\")\n DISCIPLINE_ACADEMIA = wkd(c, \"Q11862829\")\n FORMAL_SCIENCE = wkd(c, \"Q816264\")\n ASPHALT = wkd(c, \"Q167510\")\n TALK_RADIO = wkd(c, \"Q502319\")\n ART_MOVEMENT = wkd(c, \"Q968159\")\n IDEOLOGY = wkd(c, \"Q7257\")\n\n # print([c.get_name(idx) for idx in c.relation(wprop.INSTANCE_OF)[wkd(c, \"Q14934048\")]])\n # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.INSTANCE_OF)[wkd(c, \"Q14934048\")]])\n\n # print([c.get_name(idx) for idx in c.relation(wprop.PART_OF)[wkd(c, \"Q14934048\")]])\n # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.PART_OF)[wkd(c, \"Q14934048\")]])\n\n # print([c.get_name(idx) for idx in c.relation(wprop.SUBCLASS_OF)[wkd(c, \"Q14934048\")]])\n # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.SUBCLASS_OF)[wkd(c, \"Q14934048\")]])\n\n # print([c.get_name(idx) for idx in c.relation(wprop.CATEGORY_LINK)[wkd(c, \"Q14934048\")]])\n # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.CATEGORY_LINK)[wkd(c, \"Q14934048\")]])\n\n is_sports_terminology = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [OUT_BASEBALL, HIT_BASEBALL]),\n c.satisfy(\n [wprop.CATEGORY_LINK],\n [\n BASEBALL_RULES,\n LAWS_OF_ASSOCIATION_FOOTBALL,\n CRICKET_LAWS_AND_REGULATION,\n PROFESSIONAL_WRESTLING_MATCH_TYPES,\n CRICKET_TERMINOLOGY\n ],\n max_steps=1\n )\n )\n is_accident = c.satisfy(TRAVERSIBLE_LO, [ACCIDENT])\n is_taxon = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF],\n [\n TAXON, FOSSIL_TAXON, MONOTYPIC_TAXON, HORSE, RACE_HORSE, CLADE, SPECIES,\n THREATENED_SPECIES, HYPOTHETICAL_SPECIES\n ]\n )\n is_breed = c.satisfy(TRAVERSIBLE_LO, [BREED])\n is_taxon_or_breed = logical_or(is_taxon, is_breed)\n is_human = c.satisfy(TRAVERSIBLE_LO, [HUMAN, FICTIONAL_HUMAN])\n is_country = c.satisfy(TRAVERSIBLE_LO, [COUNTRY])\n is_people = c.satisfy(\n TRAVERSIBLE_LO,\n [\n PEOPLE,\n NATIONALITY,\n SPORTS_TRANSFER_AF,\n POPULATION\n ]\n )\n is_populated_place = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [POPULATED_PLACE]),\n c.satisfy([wprop.CATEGORY_LINK], [TOWNSHIPS_MISOURI], max_steps=1)\n )\n is_organization = c.satisfy(\n TRAVERSIBLE_LO,\n [\n POLITICAL_PARTY,\n COLLEGIATE_ATHLETICS_PROGRAM,\n ORGANIZATION_OTHER,\n ORGANISATION,\n SPORTS_TEAM,\n BAND_ROCK_AND_POP,\n NEWS_BUREAU,\n NYC_BALLET,\n UNION_ARMY\n ]\n )\n is_position = c.satisfy(\n TRAVERSIBLE_LO,\n [\n POSITION,\n OCCUPATION,\n POSITION_TEAM_SPORTS,\n AMERICAN_FOOTBALL_POSITIONS,\n ASSOCIATION_FOOTBALL_POSITION,\n BASEBALL_POSITION,\n # ICE_HOCKEY_POSITION,\n SPORTSPERSON\n ]\n )\n is_kinship = c.satisfy(TRAVERSIBLE_LO, [KINSHIP])\n is_kin = c.satisfy([wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF], [KIN])\n is_title = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [TITLE, NOBLE_TITLE]),\n c.satisfy([wprop.CATEGORY_LINK], [BARONETCIES_IN_UK, EXTINCT_BARONETCIES_UK, EXTINCT_BARONETCIES_ENGLAND], max_steps=1)\n )\n is_art_work = c.satisfy(TRAVERSIBLE_LO, [ART_WORK, COMIC])\n is_audio_visual_work = c.satisfy(TRAVERSIBLE_LO, [AUDIO_VISUAL_WORK, TV_STATION])\n is_fictional_character = c.satisfy(TRAVERSIBLE_LO, [FICTIONAL_CHARACTER])\n is_name = c.satisfy(TRAVERSIBLE_LO, [GIVEN_NAME, SURNAME, FEMALE_GIVEN_NAME, MALE_GIVEN_NAME, PSEUDONYM])\n is_stock_character = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [STOCK_CHARACTER])\n is_family = c.satisfy(TRAVERSIBLE_LO, [FAMILY, CLAN])\n is_award = c.satisfy(TRAVERSIBLE_LO, [AWARD])\n is_electromagnetic_wave = c.satisfy(TRAVERSIBLE_LO, [ELECTROMAGNETIC_WAVE])\n is_geographical_object = c.satisfy(\n TRAVERSIBLE_LO,\n [\n GEOGRAPHICAL_OBJECT,\n BODY_OF_WATER,\n LOCATION_GEOGRAPHY,\n GEOLOGICAL_FORMATION,\n NATIONAL_HERITAGE_SITE,\n ESTATE,\n # PART_OF_LAND,\n PRIVATE_USE_AREAS\n ]\n )\n is_postal_code = c.satisfy(TRAVERSIBLE_LO, [POSTAL_CODE])\n is_person = c.satisfy(TRAVERSIBLE_LO, [PERSON])\n is_person_only = logical_or(\n logical_negate(\n is_person,\n [\n is_human,\n is_people,\n is_populated_place,\n is_organization,\n is_position,\n is_title,\n is_kinship,\n is_kin,\n is_country,\n is_geographical_object,\n is_art_work,\n is_audio_visual_work,\n is_fictional_character,\n is_name,\n is_family,\n is_award\n ]\n ), is_stock_character)\n\n is_male = c.satisfy([wprop.SEX_OR_GENDER], [MALE])\n is_female = c.satisfy([wprop.SEX_OR_GENDER], [FEMALE])\n is_human_male = logical_and(is_human, is_male)\n is_human_female = logical_and(is_human, is_female)\n\n is_musical_work = c.satisfy(TRAVERSIBLE_LO, [MUSICAL_WORK, DISCOGRAPHY])\n is_song = c.satisfy(TRAVERSIBLE_LO, [SONG, SINGLE])\n is_radio_program = c.satisfy(\n TRAVERSIBLE_LO,\n [\n RADIO_PROGRAM,\n RADIO_STATION,\n RADIO_STATION2,\n TALK_RADIO\n ]\n )\n is_sexual_orientation = c.satisfy(TRAVERSIBLE_LO, [SEXUAL_ORIENTATION])\n\n is_taxonomic_rank = c.satisfy([wprop.INSTANCE_OF], [TAXONOMIC_RANK])\n is_order = c.satisfy(TRAVERSIBLE_LO, [ORDER_HONOUR])\n\n is_train_station = c.satisfy(TRAVERSIBLE_LO, [TRAIN_STATION])\n is_bridge = c.satisfy(TRAVERSIBLE_LO, [BRIDGE])\n is_airport = c.satisfy(TRAVERSIBLE_LO, [AIRPORT])\n\n is_sex_toy = c.satisfy(TRAVERSIBLE_LO, [SEX_TOY])\n is_monument = c.satisfy(TRAVERSIBLE_LO, [MONUMENT])\n\n is_physical_object = c.satisfy(\n TRAVERSIBLE_LO,\n [\n PHYSICAL_OBJECT,\n BOARD_GAMES,\n ELECTRONIC_GAME,\n GAME,\n ROCK_GEOLOGY,\n ASPHALT\n ]\n )\n is_clothing = c.satisfy(TRAVERSIBLE_LO, [CLOTHING])\n\n is_mathematical_object = c.satisfy(TRAVERSIBLE_LO, [MATHEMATICAL_OBJECT])\n is_physical_quantity = logical_or(\n c.satisfy(\n TRAVERSIBLE_LO,\n [\n PHYSICAL_QUANTITY,\n BASIC_STAT_UNIT_NORWAY,\n SPORTS_RECORD,\n WINGSPAN,\n WIN_LOSS_RECORD_PITCHING,\n BASEBALL_STATISTICS\n ]\n ),\n c.satisfy(\n [wprop.CATEGORY_LINK],\n [\n DEMOGRAPHICS_OF_NORWAY,\n MEASUREMENTS_OF_POVERTY,\n BASEBALL_STATISTIC\n ],\n max_steps=1\n )\n )\n is_number = c.satisfy(TRAVERSIBLE_LO, [NUMBER])\n is_astronomical_object = c.satisfy(\n TRAVERSIBLE_LO,\n [\n ASTEROID,\n COMET,\n PLANET,\n GALAXY,\n ASTRONOMICAL_OBJECT,\n FICTIONAL_ASTRONOMICAL_OBJECT\n ]\n )\n is_hazard = c.satisfy(TRAVERSIBLE_LO, [HAZARD, TRAIN_ACCIDENT])\n is_date = c.satisfy(TRAVERSIBLE_LO, [TIME_INTERVAL])\n is_algorithm = c.satisfy(TRAVERSIBLE_LO, [ALGORITHM])\n is_value = c.satisfy(TRAVERSIBLE_LO, [VALUE])\n is_currency = c.satisfy(TRAVERSIBLE_LO, [CURRENCY, MONEY])\n is_event = c.satisfy(TRAVERSIBLE_LO, [EVENT, RECESSION])\n is_election = c.satisfy(TRAVERSIBLE_LO, [ELECTION])\n is_legislative_term = c.satisfy(TRAVERSIBLE_LO, [LEGISLATIVE_TERM])\n is_activity = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [ACTIVITY, MUSIC_DOWNLOAD, CIRCUS_SKILLS])\n is_activity_subclass = c.satisfy([wprop.SUBCLASS_OF], [ACTIVITY, MUSIC_DOWNLOAD, CIRCUS_SKILLS])\n is_food = c.satisfy([wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF], [FOOD, DRINK])\n is_wikidata_prop = c.satisfy(TRAVERSIBLE_LO, [WIKIDATA_PROPERTY])\n is_wikipedia_disambiguation = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_DISAMBIGUATION])\n is_wikipedia_template_namespace = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_TEMPLATE_NAMESPACE])\n is_wikipedia_list = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_LIST])\n is_wikipedia_project_page = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_PROJECT_PAGE])\n is_wikipedia_user_language_template = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_USER_LANGUAGE_TEMPLATE])\n is_wikimedia_category_page = c.satisfy([wprop.INSTANCE_OF], [WIKIMEDIA_CATEGORY_PAGE])\n is_legal_case = c.satisfy(TRAVERSIBLE_LO, [LEGAL_CASE])\n is_sport = c.satisfy(TRAVERSIBLE_LO, [SPORT])\n is_data_format = c.satisfy(TRAVERSIBLE_LO, [DATA_FORMAT, FILE_FORMAT_FAMILY, FILE_FORMAT])\n is_research_method = c.satisfy(TRAVERSIBLE_LO, [RESEARCH, METHOD, RACE_ETHNICITY_USA])\n is_proposition = c.satisfy(TRAVERSIBLE_LO, [PROPOSITION])\n is_record_chart = c.satisfy(TRAVERSIBLE_LO, [RECORD_CHART, BILLBOARDS])\n is_international_relations = c.satisfy(TRAVERSIBLE_LO, [INTERNATIONAL_RELATIONS])\n\n is_union = c.satisfy(TRAVERSIBLE_LO, [SIGNIFICANT_OTHER])\n\n is_recurring_sporting_event = c.satisfy(\n TRAVERSIBLE_LO,\n [RECURRING_SPORTING_EVENT]\n )\n is_sport_event = logical_or(\n logical_and(\n is_sport,\n c.satisfy([wprop.PART_OF, wprop.IS_A_LIST_OF], where(is_recurring_sporting_event)[0])\n ),\n c.satisfy(TRAVERSIBLE_LO, [SPORTING_EVENT, COMPETITION])\n )\n\n is_genre = c.satisfy(TRAVERSIBLE_LO, [ART_GENRE, ART_MOVEMENT])\n\n is_landform = c.satisfy(TRAVERSIBLE_LO, [LANDFORM])\n is_language = c.satisfy(TRAVERSIBLE_LO, [LANGUAGE])\n is_alphabet = c.satisfy(TRAVERSIBLE_LO, [ALPHABET])\n is_railroad = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [RAILROAD]),\n c.satisfy([wprop.CATEGORY_LINK], [NAMED_PASSENGER_TRAIN_INDIA], max_steps=1)\n )\n is_speech = c.satisfy(TRAVERSIBLE_LO, [SPEECH])\n is_language_only = logical_negate(is_language, [is_speech])\n is_alphabet_only = logical_negate(is_alphabet, [is_speech, is_language])\n is_war = c.satisfy(TRAVERSIBLE_LO, [WAR])\n is_battle = c.satisfy(TRAVERSIBLE_LO, [BATTLE, BLOCKADE, MILITARY_OFFENSIVE, CONFLICT, MASSACRE])\n is_crime = c.satisfy(TRAVERSIBLE_LO, [CRIME])\n is_gas = c.satisfy(TRAVERSIBLE_LO, [GAS])\n is_chemical_compound = c.satisfy(TRAVERSIBLE_LO, [CHEMICAL_COMPOUND, DRUG, CHEMICAL_SUBSTANCE])\n is_chemical_compound_only = logical_negate(is_chemical_compound, [is_food])\n is_gas_only = logical_negate(is_gas, [is_chemical_compound])\n is_geometric_shape = c.satisfy(TRAVERSIBLE_LO, [GEOMETRIC_SHAPE])\n is_award_ceremony = c.satisfy(TRAVERSIBLE_LO, [AWARD_CEREMONY])\n is_strategy = c.satisfy(TRAVERSIBLE_LO, [CHESS_OPENING])\n is_gene = c.satisfy(TRAVERSIBLE_LO, [GENE, CHROMOSOME])\n is_character = c.satisfy(TRAVERSIBLE_LO, [CHARACTER])\n is_law = c.satisfy(TRAVERSIBLE_LO, [LAW])\n is_legal_action = c.satisfy(TRAVERSIBLE_LO, [LEGAL_ACTION])\n is_facility = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [FACILITY]),\n c.satisfy([wprop.CATEGORY_LINK], [HOUSES_NATIONAL_REGISTER_ARKANSAS], max_steps=1)\n )\n is_molecule = c.satisfy(TRAVERSIBLE_LO, [MOLECULE, PROTEIN_FAMILY, PROTEIN_DOMAIN, MULTIPROTEIN_COMPLEX])\n is_disease = c.satisfy(TRAVERSIBLE_LO, [DISEASE])\n is_mind = c.satisfy(TRAVERSIBLE_LO, [MIND])\n is_religion = c.satisfy(TRAVERSIBLE_LO, [RELIGION])\n is_natural_phenomenon = c.satisfy(TRAVERSIBLE_LO, [NATURAL_PHENOMENON, NATURAL_DISASTER, WIND])\n is_anatomical_structure = c.satisfy(TRAVERSIBLE_LO, [ANATOMICAL_STRUCTURE])\n is_plant = c.satisfy(TRAVERSIBLE_LO + [wprop.PARENT_TAXON], [PLANT_STRUCTURE, PLANT])\n is_region = c.satisfy(TRAVERSIBLE_LO, [REGION])\n is_software = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [SOFTWARE]),\n c.satisfy([wprop.CATEGORY_LINK], [VIDEO_GAME_FRANCHISES], max_steps=1)\n )\n is_website = c.satisfy(TRAVERSIBLE_LO, [WEBSITE])\n is_river = logical_and(c.satisfy(TRAVERSIBLE_LO, [WATERCOURSE]), is_geographical_object)\n is_lake = logical_or(\n logical_and(c.satisfy(TRAVERSIBLE_LO, [LAKE]), is_geographical_object),\n c.satisfy([wprop.CATEGORY_LINK], [LAKES_MINESOTTA], max_steps=1)\n )\n is_sea = logical_and(c.satisfy(TRAVERSIBLE_LO, [SEA]), is_geographical_object)\n is_volcano = logical_and(c.satisfy(TRAVERSIBLE_LO, [VOLCANO]), is_geographical_object)\n\n is_development_biology = c.satisfy([wprop.PART_OF, wprop.SUBCLASS_OF, wprop.INSTANCE_OF], [DEVELOPMENT_BIOLOGY, BIOLOGY])\n is_unit_of_mass = c.satisfy(TRAVERSIBLE_LO, [UNIT_OF_MASS])\n is_vehicle = c.satisfy(TRAVERSIBLE_LO, [VEHICLE, MODE_OF_TRANSPORT, PUBLIC_TRANSPORT])\n is_watercraft = c.satisfy(TRAVERSIBLE_LO, [WATERCRAFT])\n is_aircraft = logical_or(\n c.satisfy(TRAVERSIBLE_LO, [AIRCRAFT]),\n c.satisfy([wprop.CATEGORY_LINK], [SINGLE_ENGINE_AIRCRAFT], max_steps=1)\n )\n is_road_vehicle = c.satisfy(\n TRAVERSIBLE_LO,\n [\n ROAD_VEHICLE,\n TANK,\n FIRE_ENGINE,\n AMBULANCE,\n AUTOMOBILE_MODEL,\n MOTORCYCLE_MODEL\n ]\n )\n is_weapon = c.satisfy(TRAVERSIBLE_LO, [WEAPON, TORPEDO_TUBE, WEAPONS_PLATFORM])\n is_book_magazine_article_proverb = c.satisfy(\n TRAVERSIBLE_LO,\n [\n PUBLICATION,\n ARTICLE,\n RELIGIOUS_TEXT,\n PROVERB,\n DOCUMENT,\n CITATION,\n FOLKLORE\n ]\n )\n is_brand = c.satisfy(TRAVERSIBLE_LO, [BRAND])\n is_concept = logical_or(\n c.satisfy([wprop.INSTANCE_OF],\n [TERM, ACADEMIC_DISCIPLINE, SPECIAL_FIELD, BRANCH_OF_SCIENCE, WORLD_VIEW]\n ),\n c.satisfy([wprop.SUBCLASS_OF], [SOCIAL_SCIENCE, DISCIPLINE_ACADEMIA, FORMAL_SCIENCE, IDEOLOGY])\n )\n is_color = c.satisfy(TRAVERSIBLE_LO, [COLOR])\n is_paradigm = c.satisfy(TRAVERSIBLE_LO, [PARADIGM])\n is_vehicle_brand = logical_or(\n logical_and(c.satisfy([wprop.PRODUCT_OR_MATERIAL_PRODUCED], [AUTOMOBILE, TRUCK]), is_brand),\n c.satisfy(TRAVERSIBLE_LO, [AUTOMOBILE_MANUFACTURER])\n )\n is_mountain_massif = logical_and(c.satisfy(TRAVERSIBLE_LO, [MOUNTAIN, MASSIF]), is_geographical_object)\n is_mountain_only = logical_negate(\n is_mountain_massif,\n [\n is_volcano\n ]\n )\n is_physical_object_only = logical_negate(\n is_physical_object,\n [\n is_audio_visual_work,\n is_art_work,\n is_musical_work,\n is_geographical_object,\n is_currency,\n is_gas,\n is_clothing,\n is_chemical_compound,\n is_electromagnetic_wave,\n is_song,\n is_food,\n is_character,\n is_law,\n is_software,\n is_website,\n is_vehicle,\n is_lake,\n is_landform,\n is_railroad,\n is_airport,\n is_aircraft,\n is_watercraft,\n is_sex_toy,\n is_data_format,\n is_date,\n is_research_method,\n is_sport,\n is_watercraft,\n is_aircraft,\n is_brand,\n is_vehicle_brand,\n is_road_vehicle,\n is_railroad,\n is_radio_program,\n is_weapon,\n is_book_magazine_article_proverb,\n is_brand,\n is_organization,\n is_facility,\n is_anatomical_structure,\n is_gene,\n is_monument\n ]\n )\n is_musical_work_only = logical_negate(\n is_musical_work,\n [\n is_song\n ]\n )\n is_geographical_object_only = logical_negate(\n is_geographical_object,\n [\n is_river,\n is_lake,\n is_sea,\n is_volcano,\n is_mountain_only,\n is_region,\n is_monument,\n is_country,\n is_facility,\n is_food,\n is_airport,\n is_bridge,\n is_train_station\n ]\n )\n\n is_event_election_only = logical_negate(\n logical_ors([is_event, is_election, is_accident]),\n [\n is_award_ceremony,\n is_war,\n is_natural_phenomenon\n ]\n )\n is_region_only = logical_negate(\n is_region,\n [\n is_populated_place,\n is_country,\n is_lake,\n is_river,\n is_sea,\n is_volcano,\n is_mountain_only\n ]\n )\n is_astronomical_object_only = logical_negate(\n is_astronomical_object,\n [\n is_geographical_object\n ]\n )\n\n is_date_only = logical_negate(\n is_date,\n [\n is_strategy,\n is_development_biology\n ]\n )\n is_development_biology_date = logical_and(is_development_biology, is_date)\n is_value_only = logical_negate(\n is_value,\n [\n is_unit_of_mass,\n is_event,\n is_election,\n is_currency,\n is_number,\n is_physical_quantity,\n is_award,\n is_date,\n is_postal_code\n ]\n )\n is_activity_subclass_only = logical_negate(\n logical_or(is_activity_subclass, is_activity),\n [\n is_crime,\n is_war,\n is_chemical_compound,\n is_gene,\n is_molecule,\n is_mathematical_object,\n is_sport,\n is_sport_event,\n is_event,\n is_paradigm,\n is_position,\n is_title,\n is_algorithm,\n is_order,\n is_organization,\n is_research_method,\n is_proposition,\n is_taxonomic_rank,\n is_algorithm,\n is_event,\n is_election,\n is_genre,\n is_concept\n ]\n )\n is_crime_only = logical_negate(\n is_crime,\n [\n is_war\n ]\n )\n is_number_only = logical_negate(\n is_number,\n [\n is_physical_quantity\n ]\n )\n is_molecule_only = logical_negate(\n is_molecule,\n [\n is_gene,\n is_chemical_compound\n ]\n )\n # VEHICLES:\n is_vehicle_only = logical_negate(\n is_vehicle,\n [\n is_watercraft,\n is_aircraft,\n is_road_vehicle\n ]\n )\n is_watercraft_only = logical_negate(\n is_watercraft,\n [\n is_aircraft\n ]\n )\n is_road_vehicle_only = logical_negate(\n is_road_vehicle,\n [\n is_aircraft,\n is_watercraft,\n ]\n )\n # remove groups that have occupations from mathematical objects:\n is_object_with_occupation = c.satisfy([wprop.INSTANCE_OF, wprop.OCCUPATION], [OCCUPATION, PROFESSION, POSITION])\n is_mathematical_object_only = logical_negate(\n is_mathematical_object,\n [\n is_geometric_shape,\n is_physical_quantity,\n is_number,\n is_object_with_occupation,\n is_landform\n ]\n )\n is_organization_only = logical_negate(\n is_organization,\n [\n is_country,\n is_geographical_object,\n is_family,\n is_people\n ]\n )\n is_art_work_only = logical_negate(\n is_art_work,\n [\n is_musical_work,\n is_audio_visual_work,\n is_sex_toy,\n is_monument\n ]\n )\n is_software_only = logical_negate(\n is_software,\n [\n is_language,\n is_organization,\n is_website\n ]\n )\n is_website_only = logical_negate(\n is_website,\n [\n is_organization,\n is_language\n ]\n )\n is_taxon_or_breed_only = logical_negate(\n is_taxon_or_breed,\n [\n is_human,\n is_plant\n ]\n )\n is_human_only = logical_negate(\n is_human,\n [\n is_male,\n is_female,\n is_kin,\n is_kinship,\n is_title\n ]\n )\n is_weapon_only = logical_negate(\n is_weapon,\n [\n is_software,\n is_website,\n is_vehicle\n ]\n )\n is_book_magazine_article_proverb_only = logical_negate(\n is_book_magazine_article_proverb,\n [\n is_software,\n is_website,\n is_musical_work,\n is_song,\n is_law,\n is_legal_action\n ]\n )\n is_fictional_character_only = logical_negate(\n is_fictional_character,\n [\n is_human,\n is_stock_character\n ]\n )\n is_battle_only = logical_negate(\n is_battle,\n [\n is_war,\n is_crime\n ]\n )\n is_brand_only = logical_negate(\n is_brand,\n [\n is_vehicle,\n is_aircraft,\n is_watercraft,\n is_website,\n is_software,\n is_vehicle_brand\n ]\n )\n is_vehicle_brand_only = logical_negate(\n is_vehicle_brand,\n [\n is_vehicle,\n is_aircraft,\n is_watercraft,\n is_website,\n is_software\n ]\n )\n is_concept_paradigm_proposition_only = logical_negate(\n logical_ors([is_concept, is_paradigm, is_proposition]),\n [\n is_physical_object,\n is_physical_quantity,\n is_software,\n is_website,\n is_color,\n is_vehicle,\n is_electromagnetic_wave,\n is_brand,\n is_vehicle_brand,\n is_currency,\n is_fictional_character,\n is_human,\n is_aircraft,\n is_geographical_object,\n is_geometric_shape,\n is_mathematical_object,\n is_musical_work,\n is_mountain_massif,\n is_lake,\n is_landform,\n is_language,\n is_anatomical_structure,\n is_book_magazine_article_proverb,\n is_development_biology,\n is_plant,\n is_sexual_orientation,\n is_genre,\n is_legislative_term\n ]\n )\n is_anatomical_structure_only = logical_negate(\n is_anatomical_structure,\n [\n is_plant\n ]\n )\n is_facility_only = logical_negate(\n is_facility,\n [\n is_train_station,\n is_aircraft,\n is_airport,\n is_bridge,\n is_vehicle,\n is_astronomical_object,\n is_railroad,\n is_monument\n ]\n )\n is_wikipedia_list_only = logical_negate(\n is_wikipedia_list,\n [\n is_activity_subclass,\n is_alphabet,\n is_art_work,\n is_astronomical_object,\n is_audio_visual_work,\n is_award,\n is_character,\n is_character,\n is_chemical_compound,\n is_color,\n is_currency,\n is_disease,\n is_election,\n is_electromagnetic_wave,\n is_facility,\n is_fictional_character,\n is_gene,\n is_genre,\n is_geographical_object,\n is_human,\n is_language,\n is_law,\n is_law,\n is_legal_action,\n is_legal_case,\n is_legislative_term,\n is_mathematical_object,\n is_mind,\n is_people,\n is_person,\n is_person,\n is_physical_object,\n is_populated_place,\n is_position,\n is_region,\n is_religion,\n is_research_method,\n is_sexual_orientation,\n is_software,\n is_speech,\n is_sport,\n is_sport_event,\n is_stock_character,\n is_strategy,\n is_taxon_or_breed,\n is_value,\n is_vehicle,\n is_wikidata_prop,\n is_weapon\n ]\n )\n is_sport_only = logical_negate(\n is_sport,\n [\n is_sport_event\n ]\n )\n is_legal_action_only = logical_negate(\n is_legal_action,\n [\n is_law,\n is_election\n ]\n )\n is_genre_only = logical_negate(\n is_genre,\n [\n is_physical_object,\n is_audio_visual_work,\n is_art_work,\n is_book_magazine_article_proverb,\n is_concept\n ]\n )\n is_plant_only = logical_negate(\n is_plant,\n [\n is_food,\n is_human,\n is_organization\n ]\n )\n is_kinship_kin_only = logical_negate(\n logical_or(is_kinship, is_kin),\n [\n is_family\n ]\n )\n is_position_only = logical_negate(\n is_position,\n [\n is_organization,\n is_human\n ]\n )\n is_radio_program_only = logical_negate(\n is_radio_program,\n [\n is_audio_visual_work,\n ]\n )\n is_taxonomic_rank_only = logical_negate(\n is_taxonomic_rank,\n [\n is_order\n ]\n )\n is_research_method_only = logical_negate(\n is_research_method,\n [\n is_audio_visual_work,\n is_book_magazine_article_proverb,\n is_art_work,\n is_concept,\n is_crime,\n is_war,\n is_algorithm,\n is_law,\n is_legal_action,\n is_legal_case\n ]\n )\n is_algorithm_only = logical_negate(\n is_algorithm,\n [\n is_concept,\n is_paradigm\n ]\n )\n\n is_union_only = logical_negate(\n is_union,\n [\n is_kinship,\n is_human,\n is_person\n ]\n )\n # get all the wikidata items that are disconnected:\n no_instance_subclass_or_cat_link = logical_ands(\n [\n c.relation(relation_name).edges() == 0\n for relation_name in [wprop.PART_OF, wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.CATEGORY_LINK]\n ]\n )\n is_sports_terminology_only = logical_negate(\n is_sports_terminology,\n [\n is_organization,\n is_human,\n is_person,\n is_activity,\n is_title,\n is_physical_quantity\n ]\n )\n\n out = {\n \"aaa_wikidata_prop\": is_wikidata_prop,\n \"aaa_wikipedia_disambiguation\": is_wikipedia_disambiguation,\n \"aaa_wikipedia_template_namespace\": is_wikipedia_template_namespace,\n \"aaa_wikipedia_user_language_template\": is_wikipedia_user_language_template,\n \"aaa_wikipedia_list\": is_wikipedia_list_only,\n \"aaa_wikipedia_project_page\": is_wikipedia_project_page,\n \"aaa_wikimedia_category_page\": is_wikimedia_category_page,\n \"aaa_no_instance_subclass_or_link\": no_instance_subclass_or_cat_link,\n \"taxon\": is_taxon_or_breed_only,\n \"human_male\": is_human_male,\n \"human_female\": is_human_female,\n \"human\": is_human_only,\n \"fictional_character\": is_fictional_character_only,\n \"people\": is_people,\n \"language\": is_language_only,\n \"alphabet\": is_alphabet_only,\n \"speech\": is_speech,\n \"gas\": is_gas_only,\n \"gene\": is_gene,\n \"molecule\": is_molecule_only,\n \"astronomical_object\": is_astronomical_object_only,\n \"disease\": is_disease,\n \"mind\": is_mind,\n \"song\": is_song,\n \"radio_program\": is_radio_program_only,\n \"law\": is_law,\n \"legal_action\": is_legal_action_only,\n \"book_magazine_article\": is_book_magazine_article_proverb_only,\n \"chemical_compound\": is_chemical_compound_only,\n \"geometric_shape\": is_geometric_shape,\n \"mathematical_object\": is_mathematical_object_only,\n \"physical_quantity\": is_physical_quantity,\n \"number\": is_number_only,\n \"geographical_object\": is_geographical_object_only,\n \"train_station\": is_train_station,\n \"railroad\": is_railroad,\n \"concept\": is_concept_paradigm_proposition_only,\n \"genre\": is_genre_only,\n \"sexual_orientation\": is_sexual_orientation,\n \"bridge\": is_bridge,\n \"airport\": is_airport,\n \"river\": is_river,\n \"lake\": is_lake,\n \"sea\": is_sea,\n \"weapon\": is_weapon_only,\n \"region\": is_region_only,\n \"country\": is_country,\n \"software\": is_software_only,\n \"website\": is_website_only,\n \"volcano\": is_volcano,\n \"mountain\": is_mountain_only,\n \"religion\": is_religion,\n \"organization\": is_organization_only,\n \"musical_work\": is_musical_work_only,\n \"other_art_work\": is_art_work_only,\n \"audio_visual_work\": is_audio_visual_work,\n \"physical_object\": is_physical_object_only,\n \"record_chart\": is_record_chart,\n \"clothing\": is_clothing,\n \"plant\": is_plant_only,\n \"anatomical_structure\": is_anatomical_structure_only,\n \"facility\": is_facility_only,\n \"monument\": is_monument,\n \"vehicle\": is_vehicle_only,\n \"watercraft\": is_watercraft_only,\n \"road_vehicle\": is_road_vehicle_only,\n \"vehicle_brand\": is_vehicle_brand_only,\n \"brand\": is_brand_only,\n \"aircraft\": is_aircraft,\n \"legal_case\": is_legal_case,\n \"position\": is_position_only,\n \"person_role\": is_person_only,\n \"populated_place\": is_populated_place,\n \"value\": is_value_only,\n \"unit_of_mass\": is_unit_of_mass,\n \"currency\": is_currency,\n \"postal_code\": is_postal_code,\n \"name\": is_name,\n \"data_format\": is_data_format,\n \"character\": is_character,\n \"family\": is_family,\n \"sport\": is_sport_only,\n \"taxonomic_rank\": is_taxonomic_rank,\n \"sex_toy\": is_sex_toy,\n \"legislative_term\": is_legislative_term,\n \"sport_event\": is_sport_event,\n \"date\": is_date_only,\n \"kinship\": is_kinship_kin_only,\n \"union\": is_union_only,\n \"research\": is_research_method_only,\n \"title\": is_title,\n \"hazard\": is_hazard,\n \"color\": is_color,\n \"sports_terminology\": is_sports_terminology_only,\n \"developmental_biology_period\": is_development_biology_date,\n \"strategy\": is_strategy,\n \"event\": is_event_election_only,\n \"natural_phenomenon\": is_natural_phenomenon,\n \"electromagnetic_wave\": is_electromagnetic_wave,\n \"war\": is_war,\n \"award\": is_award,\n \"crime\": is_crime_only,\n \"battle\": is_battle_only,\n \"international_relations\": is_international_relations,\n \"food\": is_food,\n \"algorithm\": is_algorithm,\n \"activity\": is_activity_subclass_only,\n \"award_ceremony\": is_award_ceremony\n }\n # is_other = logical_not(logical_ors([val for key, val in out.items() if key != \"aaa_wikipedia_list\"]))\n # c.class_report([wprop.IS_A_LIST_OF, wprop.CATEGORY_LINK], logical_and(\n # is_other,\n # is_wikipedia_list_only\n # ), name=\"remaining lists\")\n return out\n"}}},{"rowIdx":51,"cells":{"python_code":{"kind":"string","value":"import os\nimport argparse\nimport numpy as np\nimport gym\nfrom gym.envs.atari.atari_env import ACTION_MEANING\nimport pygame\nfrom atari_demo.wrappers import AtariDemo\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-g', '--game', type=str, default='MontezumaRevenge')\nparser.add_argument('-f', '--frame_rate', type=int, default=60)\nparser.add_argument('-y', '--screen_height', type=int, default=840)\nparser.add_argument('-d', '--save_dir', type=str, default=None)\nparser.add_argument('-s', '--frame_skip', type=int, default=4)\nargs = parser.parse_args()\n\nif args.save_dir is None:\n save_dir = os.path.join(os.getcwd(), 'demos')\nelse:\n save_dir = args.save_dir\nif not os.path.exists(save_dir):\n os.makedirs(save_dir)\ndemo_file_name = os.path.join(save_dir, args.game + '.demo')\n\n\n# //////// set up gym + atari part /////////\nACTION_KEYS = {\n \"NOOP\" : set(),\n \"FIRE\" : {'space'},\n \"UP\" : {'up'},\n \"RIGHT\": {'right'},\n \"LEFT\" : {'left'},\n \"DOWN\" : {'down'},\n \"UPRIGHT\" : {'up', 'right'},\n \"UPLEFT\" : {'up', 'left'},\n \"DOWNRIGHT\" : {'down', 'right'},\n \"DOWNLEFT\" : {'down', 'left'},\n \"UPFIRE\" : {'up', 'space'},\n \"RIGHTFIRE\" : {'right', 'space'},\n \"LEFTFIRE\" : {'left', 'space'},\n \"DOWNFIRE\" : {'down', 'space'},\n \"UPRIGHTFIRE\" : {'up', 'right', 'space'},\n \"UPLEFTFIRE\" : {'up', 'left', 'space'},\n \"DOWNRIGHTFIRE\" : {'down', 'right', 'space'},\n \"DOWNLEFTFIRE\" : {'down', 'left', 'space'},\n \"TIMETRAVEL\": {'b'}\n}\n\nenv = AtariDemo(gym.make(args.game + 'NoFrameskip-v4'))\navailable_actions = [ACTION_MEANING[i] for i in env.unwrapped._action_set] + [\"TIMETRAVEL\"]\nenv.reset()\nloaded_previous = False\nif os.path.exists(demo_file_name):\n env.load_from_file(demo_file_name)\n loaded_previous = True\n\ndef get_gym_action(key_presses):\n action = 0\n for i,action_name in enumerate(available_actions):\n if ACTION_KEYS[action_name].issubset(key_presses):\n action = i\n return action\n\n\n# ///////// set up pygame part //////////\npygame.init()\nscreen_size = (int((args.screen_height/210)*160),args.screen_height)\nscreen = pygame.display.set_mode(screen_size)\nsmall_screen = pygame.transform.scale(screen.copy(), (160,210))\nclock = pygame.time.Clock()\npygame.display.set_caption(\"Recording demonstration for \" + args.game)\n\ndef show_text(text_lines):\n screen.fill((255, 255, 255))\n f1 = pygame.font.SysFont(\"\", 30)\n for i, line in enumerate(text_lines):\n text = f1.render(line, True, (0, 0, 0))\n screen.blit(text, (50, 100 + 50 * i))\n pygame.display.flip()\n\ndef show_start_screen():\n text_lines = [\"Recording demo for \" + args.game,\n \"Control the game using the arrow keys and space bar\",\n \"Hold <b> to go backward in time to fix mistakes\",\n \"Press <s> to save the demo and exit\",\n \"Press <SPACE BAR> to get started\"]\n if loaded_previous:\n text_lines = text_lines[:1] + [\"Continuing from previously recorded demo\"] + text_lines[1:]\n show_text(text_lines)\n started = False\n while not started:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n key_name = pygame.key.name(event.key)\n if key_name == 'space':\n started = True\n clock.tick(args.frame_rate)\n\ndef show_end_screen():\n text_lines = [\"GAME OVER\",\n \"Hold <b> to go backward in time\",\n \"Press <s> to save the demo and exit\"]\n show_text(text_lines)\n\ndef show_game_screen(observation):\n pygame.surfarray.blit_array(small_screen, np.transpose(observation,[1,0,2]))\n pygame.transform.scale(small_screen, screen_size, screen)\n pygame.display.flip()\n\nkey_is_pressed = set()\ndef process_key_presses():\n key_presses = set()\n quit = False\n save = False\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit = True\n elif event.type == pygame.KEYDOWN:\n key_name = pygame.key.name(event.key)\n key_presses.add(key_name)\n key_is_pressed.add(key_name)\n elif event.type == pygame.KEYUP:\n key_name = pygame.key.name(event.key)\n if key_name in key_is_pressed:\n key_is_pressed.remove(key_name)\n if key_name == 's':\n save = True\n key_presses.update(key_is_pressed)\n\n return key_presses, quit, save\n\n\n# //////// run the game and record the demo! /////////\nquit = False\ndone = False\nshow_start_screen()\nwhile not quit:\n\n # process key presses & save when requested\n key_presses, quit, save = process_key_presses()\n if save:\n env.save_to_file(demo_file_name)\n quit = True\n\n # advance gym env\n action = get_gym_action(key_presses)\n for step in range(args.frame_skip):\n observation, reward, done, info = env.step(action)\n\n # show screen\n if done:\n show_end_screen()\n else:\n show_game_screen(observation)\n\n clock.tick(float(args.frame_rate)/args.frame_skip)\n"}}},{"rowIdx":52,"cells":{"python_code":{"kind":"string","value":"import numpy as np\nfrom multiprocessing import Process, Pipe\nimport gym\nfrom baselines.common.vec_env.subproc_vec_env import CloudpickleWrapper\n\nclass ClonedEnv(gym.Wrapper):\n def __init__(self, env, possible_actions_dict, best_action_dict, seed):\n gym.Wrapper.__init__(self, env)\n self.possible_actions_dict = possible_actions_dict\n self.best_action_dict = best_action_dict\n self.state = None\n self.rng = np.random.RandomState(seed)\n self.just_initialized = True\n self.l = 0\n self.r = 0\n\n def step(self, action=None):\n if self.state in self.possible_actions_dict:\n possible_actions = list(self.possible_actions_dict[self.state])\n action = possible_actions[self.rng.randint(len(possible_actions))]\n obs, reward, done, info = self.env.step(action)\n self.l += 1\n self.r += reward\n self.state = self.env.unwrapped._get_ram().tostring()\n if self.state in self.possible_actions_dict: # still in known territory\n info['possible_actions'] = self.possible_actions_dict[self.state]\n if self.state in self.best_action_dict:\n info['best_action'] = self.best_action_dict[self.state]\n else:\n done = True\n past_l = self.l\n past_r = self.r\n self.l = 0\n self.r = 0\n if past_l > 0:\n info['episode'] = {'r': past_r, 'l': past_l}\n else:\n raise Exception('stepping cloned env without resetting')\n\n return obs, reward, done, info\n\n def reset(self):\n obs = self.env.reset()\n if isinstance(obs, tuple):\n obs,info = obs\n else:\n info = {}\n\n self.state = self.env.unwrapped._get_ram().tostring()\n if self.state in self.best_action_dict:\n info['best_action'] = self.best_action_dict[self.state]\n for randop in range(self.rng.randint(30)): # randomize starting point\n obs, reward, done, info = self._step(None)\n\n if self.just_initialized:\n self.just_initialized = False\n for randops in range(self.rng.randint(50000)): # randomize starting point further\n obs, reward, done, info = self._step(None)\n if done:\n obs, info = self._reset()\n\n return obs, info\n\ndef get_best_actions_from_infos(infos):\n k = len(infos)\n best_actions = [0] * k\n action_masks = [1] * k\n for i in range(k):\n if 'best_action' in infos[i]:\n best_actions[i] = infos[i]['best_action']\n action_masks[i] = 0\n return best_actions, action_masks\n\ndef get_available_actions_from_infos(infos, n_actions):\n k = len(infos)\n best_actions = np.zeros((k,n_actions), dtype=np.uint8)\n action_masks = [1] * k\n for i in range(k):\n if 'possible_actions' in infos[i]:\n action_masks[i] = 0\n for j in infos[i]['possible_actions']:\n best_actions[i,j] = 1\n return best_actions, action_masks\n\ndef worker2(nr, remote, env_fn_wrapper, mode):\n env = env_fn_wrapper.x()\n while True:\n cmd,count = remote.recv()\n if cmd == 'step':\n obs = []\n rews = []\n dones = []\n infos = []\n for step in range(count):\n ob, reward, done, info = env.step(0) # action is ignored in ClonedEnv downstream\n if done:\n ob = env.reset()\n if isinstance(ob, tuple):\n ob, new_info = ob\n info.update(new_info)\n if 'episode' in info:\n epinfo = info['episode']\n print('simulator thread %d completed demo run with total return %d obtained in %d steps' % (nr, epinfo[\"r\"], epinfo[\"l\"]))\n obs.append(ob)\n rews.append(reward)\n dones.append(done)\n infos.append(info)\n if mode == 'best':\n best_actions, action_masks = get_best_actions_from_infos(infos)\n else:\n best_actions, action_masks = get_available_actions_from_infos(infos, env.action_space.n)\n remote.send((obs, rews, dones, best_actions, action_masks))\n elif cmd == 'reset':\n ob = env.reset()\n if isinstance(ob, tuple):\n ob,info = ob\n else:\n info = {}\n remote.send((ob,info))\n elif cmd == 'close':\n remote.close()\n break\n elif cmd == 'get_spaces':\n remote.send((env.action_space, env.observation_space))\n else:\n raise NotImplementedError(str(cmd) + ' action not implemented in worker')\n\nclass ClonedVecEnv(object):\n def __init__(self, env_fns, mode='best'):\n self.nenvs = len(env_fns)\n self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.nenvs)])\n self.ps = [Process(target=worker2, args=(nr, work_remote, CloudpickleWrapper(env_fn), mode))\n for (nr, work_remote, env_fn) in zip(range(self.nenvs), self.work_remotes, env_fns)]\n for p in self.ps:\n p.start()\n self.remotes[0].send(('get_spaces', None))\n self.action_space, self.observation_space = self.remotes[0].recv()\n self.steps_taken = 0\n\n def step(self, time_steps=128):\n for remote in self.remotes:\n remote.send(('step', time_steps))\n results = [remote.recv() for remote in self.remotes]\n obs, rews, dones, best_actions, action_masks = [np.stack(x) for x in zip(*results)]\n return obs, rews, dones, best_actions, action_masks\n\n def reset(self):\n for remote in self.remotes:\n remote.send(('reset', None))\n results = [remote.recv() for remote in self.remotes]\n obs, infos = zip(*results)\n best_actions, action_masks = [np.stack(x) for x in get_best_actions_from_infos(infos)]\n return np.stack(obs), best_actions, action_masks\n\n def close(self):\n for remote in self.remotes:\n remote.send(('close', None))\n for p in self.ps:\n p.join()\n\ndef make_cloned_vec_env(nenvs, env_id, possible_actions_dict, best_action_dict, wrappers, mode='best'):\n def make_env(rank):\n def env_fn():\n env = gym.make(env_id)\n env = ClonedEnv(env, possible_actions_dict, best_action_dict, rank)\n env = wrappers(env)\n return env\n return env_fn\n\n return ClonedVecEnv([make_env(i) for i in range(nenvs)], mode)\n\n"}}},{"rowIdx":53,"cells":{"python_code":{"kind":"string","value":""}}},{"rowIdx":54,"cells":{"python_code":{"kind":"string","value":"import pickle\nimport sys\nimport os\n\ndef save_as_pickled_object(obj, filepath):\n \"\"\"\n This is a defensive way to write pickle.write, allowing for very large files on all platforms\n \"\"\"\n max_bytes = 2**31 - 1\n bytes_out = pickle.dumps(obj)\n n_bytes = sys.getsizeof(bytes_out)\n with open(filepath, 'wb') as f_out:\n for idx in range(0, n_bytes, max_bytes):\n f_out.write(bytes_out[idx:idx+max_bytes])\n\n\ndef load_as_pickled_object(filepath):\n \"\"\"\n This is a defensive way to write pickle.load, allowing for very large files on all platforms\n \"\"\"\n max_bytes = 2**31 - 1\n try:\n input_size = os.path.getsize(filepath)\n bytes_in = bytearray(0)\n with open(filepath, 'rb') as f_in:\n for _ in range(0, input_size, max_bytes):\n bytes_in += f_in.read(max_bytes)\n obj = pickle.loads(bytes_in)\n except:\n return None\n return obj\n\n"}}},{"rowIdx":55,"cells":{"python_code":{"kind":"string","value":"import pickle\nimport gym\nfrom gym import spaces\n\nclass AtariDemo(gym.Wrapper):\n \"\"\"\n Records actions taken, creates checkpoints, allows time travel, restoring and saving of states\n \"\"\"\n\n def __init__(self, env, disable_time_travel=False):\n super(AtariDemo, self).__init__(env)\n self.action_space = spaces.Discrete(len(env.unwrapped._action_set)+1) # add \"time travel\" action\n self.save_every_k = 100\n self.max_time_travel_steps = 10000\n self.disable_time_travel = disable_time_travel\n\n def step(self, action):\n if action >= len(self.env.unwrapped._action_set):\n if self.disable_time_travel:\n obs, reward, done, info = self.env.step(0)\n else:\n obs, reward, done, info = self.time_travel()\n\n else:\n if self.steps_in_the_past > 0:\n self.restore_past_state()\n\n if len(self.done)>0 and self.done[-1]:\n obs = self.obs[-1]\n reward = 0\n done = True\n info = None\n\n else:\n self.lives.append(self.env.unwrapped.ale.lives())\n\n obs, reward, done, info = self.env.step(action)\n\n self.actions.append(action)\n self.obs.append(obs)\n self.rewards.append(reward)\n self.done.append(done)\n self.info.append(info)\n\n # periodic checkpoint saving\n if not done:\n if (len(self.checkpoint_action_nr)>0 and len(self.actions) >= self.checkpoint_action_nr[-1] + self.save_every_k) \\\n or (len(self.checkpoint_action_nr)==0 and len(self.actions) >= self.save_every_k):\n self.save_checkpoint()\n\n return obs, reward, done, info\n\n def reset(self):\n obs = self.env.reset()\n self.actions = []\n self.lives = []\n self.checkpoints = []\n self.checkpoint_action_nr = []\n self.obs = [obs]\n self.rewards = []\n self.done = [False]\n self.info = [None]\n self.steps_in_the_past = 0\n return obs\n\n def time_travel(self):\n if len(self.obs) > 1:\n reward = self.rewards.pop()\n self.obs.pop()\n self.done.pop()\n self.info.pop()\n self.lives.pop()\n obs = self.obs[-1]\n done = self.done[-1]\n info = self.info[-1]\n self.steps_in_the_past += 1\n\n else: # reached time travel limit\n reward = 0\n obs = self.obs[0]\n done = self.done[0]\n info = self.info[0]\n\n # rewards are differences in subsequent state values, and so should get reversed sign when going backward in time\n reward = -reward\n\n return obs, reward, done, info\n\n def save_to_file(self, file_name):\n dat = {'actions': self.actions, 'checkpoints': self.checkpoints, 'checkpoint_action_nr': self.checkpoint_action_nr,\n 'rewards': self.rewards, 'lives': self.lives}\n with open(file_name, \"wb\") as f:\n pickle.dump(dat, f)\n\n def load_from_file(self, file_name):\n self.reset()\n with open(file_name, \"rb\") as f:\n dat = pickle.load(f)\n self.actions = dat['actions']\n self.checkpoints = dat['checkpoints']\n self.checkpoint_action_nr = dat['checkpoint_action_nr']\n self.rewards = dat['rewards']\n self.lives = dat['lives']\n self.load_state_and_walk_forward()\n\n def save_checkpoint(self):\n chk_pnt = self.env.unwrapped.clone_state()\n self.checkpoints.append(chk_pnt)\n self.checkpoint_action_nr.append(len(self.actions))\n\n def restore_past_state(self):\n self.actions = self.actions[:-self.steps_in_the_past]\n while len(self.checkpoints)>0 and self.checkpoint_action_nr[-1]>len(self.actions):\n self.checkpoints.pop()\n self.checkpoint_action_nr.pop()\n self.load_state_and_walk_forward()\n self.steps_in_the_past = 0\n\n def load_state_and_walk_forward(self):\n if len(self.checkpoints)==0:\n self.env.reset()\n time_step = 0\n else:\n self.env.unwrapped.restore_state(self.checkpoints[-1])\n time_step = self.checkpoint_action_nr[-1]\n\n for a in self.actions[time_step:]:\n action = self.env.unwrapped._action_set[a]\n self.env.unwrapped.ale.act(action)\n"}}},{"rowIdx":56,"cells":{"python_code":{"kind":"string","value":"import distutils.util\nplatform = distutils.util.get_platform()\n\n# technically, our platform is not actually multilinux... so this may fail in some distros\n# however, tested in python:3.6 docker image (by construction)\n# and in ubuntu:16.04\nplatform = platform.replace('linux', 'manylinux1')\n\nprint(platform)\n"}}},{"rowIdx":57,"cells":{"python_code":{"kind":"string","value":"import os\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nimport subprocess\nimport sys\n\nwith open(os.path.join(os.path.dirname(__file__), 'atari_py', 'package_data.txt')) as f:\n package_data = [line.rstrip() for line in f.readlines()]\n\n\nclass Build(build_ext):\n def run(self):\n if os.name != 'posix' and not self.inplace:\n # silly patch to disable build steps on windows, as we are doing compilation externally\n return\n try:\n cwd = os.path.join('' if self.inplace else self.build_lib, 'atari_py', 'ale_interface', 'build')\n if not os.path.exists(cwd):\n os.makedirs(cwd)\n subprocess.check_call(['cmake', '..'], cwd=cwd)\n subprocess.check_call(['cmake', '--build', '.'], cwd=cwd)\n except subprocess.CalledProcessError as e:\n sys.stderr.write(\"Could not build atari-py: %s. (HINT: are you sure cmake is installed? You might also be missing a library. Atari-py requires: zlib [installable as 'apt-get install zlib1g-dev' on Ubuntu].)\\n\" % e)\n raise\n \nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\nsetup(name='atari-py',\n version='0.3.0',\n description='Python bindings to Atari games',\n url='https://github.com/openai/atari-py',\n author='OpenAI',\n author_email='info@openai.com',\n license='',\n packages=['atari_py'],\n package_data={'atari_py': package_data},\n ext_modules=[CMakeExtension('atari_py')],\n cmdclass={'build_ext': Build},\n install_requires=['numpy', 'six'],\n tests_require=['nose2']\n)\n"}}},{"rowIdx":58,"cells":{"python_code":{"kind":"string","value":"import sys\n\nfrom .ale_python_interface import *\nfrom .games import get_game_path, list_games\n\nprint(\n \"[NOTICE] atari-py is deprecated in favor ale-py \"\n \"and will no longer receive further maintenance or critical updates. \"\n \"ale-py is fully backwards compatible with atari-py. \"\n \"If you're using Gym, you can simply upgrade via pip install -U gym[atari]\",\n file=sys.stderr,\n)\n\n\n# default to only logging errors\nALEInterface.setLoggerMode(ALEInterface.Logger.Error)\n"}}},{"rowIdx":59,"cells":{"python_code":{"kind":"string","value":"import os\n\n\nSCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))\n\ntry:\n import atari_py_roms\n _games_dir = os.path.join(atari_py_roms.__path__[0], \"atari_roms\")\nexcept ImportError:\n _games_dir = os.path.join(SCRIPT_DIR, \"atari_roms\")\n\n\ndef get_games_dir():\n return _games_dir\n\n\ndef get_game_path(game_name):\n path = os.path.join(_games_dir, game_name) + \".bin\"\n if not os.path.exists(path):\n raise Exception('ROM is missing for %s, see https://github.com/openai/atari-py#roms for instructions' % (game_name,))\n return path\n\ndef list_games():\n files = os.listdir(_games_dir)\n return [os.path.basename(f).split(\".\")[0] for f in files]"}}},{"rowIdx":60,"cells":{"python_code":{"kind":"string","value":"# ale_python_interface.py\n# Author: Ben Goodrich\n# This directly implements a python version of the arcade learning\n# environment interface.\n__all__ = ['ALEInterface']\n\nfrom ctypes import *\nimport numpy as np\nfrom numpy.ctypeslib import as_ctypes\nimport os\nimport six\n\nif os.name == 'posix':\n ale_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),\n 'ale_interface/libale_c.so'))\nelse:\n ale_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__),\n 'ale_interface/ale_c.dll'))\n\nale_lib.ALE_new.argtypes = None\nale_lib.ALE_new.restype = c_void_p\nale_lib.ALE_del.argtypes = [c_void_p]\nale_lib.ALE_del.restype = None\nale_lib.getString.argtypes = [c_void_p, c_char_p]\nale_lib.getString.restype = c_char_p\nale_lib.getInt.argtypes = [c_void_p, c_char_p]\nale_lib.getInt.restype = c_int\nale_lib.getBool.argtypes = [c_void_p, c_char_p]\nale_lib.getBool.restype = c_bool\nale_lib.getFloat.argtypes = [c_void_p, c_char_p]\nale_lib.getFloat.restype = c_float\nale_lib.setString.argtypes = [c_void_p, c_char_p, c_char_p]\nale_lib.setString.restype = None\nale_lib.setInt.argtypes = [c_void_p, c_char_p, c_int]\nale_lib.setInt.restype = None\nale_lib.setBool.argtypes = [c_void_p, c_char_p, c_bool]\nale_lib.setBool.restype = None\nale_lib.setFloat.argtypes = [c_void_p, c_char_p, c_float]\nale_lib.setFloat.restype = None\nale_lib.loadROM.argtypes = [c_void_p, c_char_p]\nale_lib.loadROM.restype = None\nale_lib.act.argtypes = [c_void_p, c_int]\nale_lib.act.restype = c_int\nale_lib.game_over.argtypes = [c_void_p]\nale_lib.game_over.restype = c_bool\nale_lib.reset_game.argtypes = [c_void_p]\nale_lib.reset_game.restype = None\nale_lib.getAvailableModes.argtypes = [c_void_p, c_void_p]\nale_lib.getAvailableModes.restype = None\nale_lib.getAvailableModesSize.argtypes = [c_void_p]\nale_lib.getAvailableModesSize.restype = c_int\nale_lib.setMode.argtypes = [c_void_p, c_int]\nale_lib.setMode.restype = None\nale_lib.getAvailableDifficulties.argtypes = [c_void_p, c_void_p]\nale_lib.getAvailableDifficulties.restype = None\nale_lib.getAvailableDifficultiesSize.argtypes = [c_void_p]\nale_lib.getAvailableDifficultiesSize.restype = c_int\nale_lib.setDifficulty.argtypes = [c_void_p, c_int]\nale_lib.setDifficulty.restype = None\nale_lib.getLegalActionSet.argtypes = [c_void_p, c_void_p]\nale_lib.getLegalActionSet.restype = None\nale_lib.getLegalActionSize.argtypes = [c_void_p]\nale_lib.getLegalActionSize.restype = c_int\nale_lib.getMinimalActionSet.argtypes = [c_void_p, c_void_p]\nale_lib.getMinimalActionSet.restype = None\nale_lib.getMinimalActionSize.argtypes = [c_void_p]\nale_lib.getMinimalActionSize.restype = c_int\nale_lib.getFrameNumber.argtypes = [c_void_p]\nale_lib.getFrameNumber.restype = c_int\nale_lib.lives.argtypes = [c_void_p]\nale_lib.lives.restype = c_int\nale_lib.getEpisodeFrameNumber.argtypes = [c_void_p]\nale_lib.getEpisodeFrameNumber.restype = c_int\nale_lib.getScreen.argtypes = [c_void_p, c_void_p]\nale_lib.getScreen.restype = None\nale_lib.getRAM.argtypes = [c_void_p, c_void_p]\nale_lib.getRAM.restype = None\nale_lib.getRAMSize.argtypes = [c_void_p]\nale_lib.getRAMSize.restype = c_int\nale_lib.getScreenWidth.argtypes = [c_void_p]\nale_lib.getScreenWidth.restype = c_int\nale_lib.getScreenHeight.argtypes = [c_void_p]\nale_lib.getScreenHeight.restype = c_int\nale_lib.getScreenRGB.argtypes = [c_void_p, c_void_p]\nale_lib.getScreenRGB.restype = None\nale_lib.getScreenRGB2.argtypes = [c_void_p, c_void_p]\nale_lib.getScreenRGB2.restype = None\nale_lib.getScreenGrayscale.argtypes = [c_void_p, c_void_p]\nale_lib.getScreenGrayscale.restype = None\nale_lib.saveState.argtypes = [c_void_p]\nale_lib.saveState.restype = None\nale_lib.loadState.argtypes = [c_void_p]\nale_lib.loadState.restype = None\nale_lib.cloneState.argtypes = [c_void_p]\nale_lib.cloneState.restype = c_void_p\nale_lib.restoreState.argtypes = [c_void_p, c_void_p]\nale_lib.restoreState.restype = None\nale_lib.cloneSystemState.argtypes = [c_void_p]\nale_lib.cloneSystemState.restype = c_void_p\nale_lib.restoreSystemState.argtypes = [c_void_p, c_void_p]\nale_lib.restoreSystemState.restype = None\nale_lib.deleteState.argtypes = [c_void_p]\nale_lib.deleteState.restype = None\nale_lib.saveScreenPNG.argtypes = [c_void_p, c_char_p]\nale_lib.saveScreenPNG.restype = None\nale_lib.encodeState.argtypes = [c_void_p, c_void_p, c_int]\nale_lib.encodeState.restype = None\nale_lib.encodeStateLen.argtypes = [c_void_p]\nale_lib.encodeStateLen.restype = c_int\nale_lib.decodeState.argtypes = [c_void_p, c_int]\nale_lib.decodeState.restype = c_void_p\nale_lib.setLoggerMode.argtypes = [c_int]\nale_lib.setLoggerMode.restype = None\n\ndef _as_bytes(s):\n if hasattr(s, 'encode'):\n return s.encode('utf8')\n return s\n\nclass ALEInterface(object):\n # Logger enum\n class Logger:\n Info = 0\n Warning = 1\n Error = 2\n\n def __init__(self):\n self.obj = ale_lib.ALE_new()\n\n def getString(self, key):\n return ale_lib.getString(self.obj, _as_bytes(key))\n def getInt(self, key):\n return ale_lib.getInt(self.obj, _as_bytes(key))\n def getBool(self, key):\n return ale_lib.getBool(self.obj, _as_bytes(key))\n def getFloat(self, key):\n return ale_lib.getFloat(self.obj, _as_bytes(key))\n\n def setString(self, key, value):\n ale_lib.setString(self.obj, _as_bytes(key), _as_bytes(value))\n def setInt(self, key, value):\n ale_lib.setInt(self.obj, _as_bytes(key), int(value))\n def setBool(self, key, value):\n ale_lib.setBool(self.obj, _as_bytes(key), bool(value))\n def setFloat(self, key, value):\n ale_lib.setFloat(self.obj, _as_bytes(key), float(value))\n\n def loadROM(self, rom_file):\n ale_lib.loadROM(self.obj, _as_bytes(rom_file))\n\n def act(self, action):\n return ale_lib.act(self.obj, int(action))\n\n def game_over(self):\n return ale_lib.game_over(self.obj)\n\n def reset_game(self):\n ale_lib.reset_game(self.obj)\n\n def getLegalActionSet(self):\n act_size = ale_lib.getLegalActionSize(self.obj)\n act = np.zeros((act_size), dtype=np.intc)\n ale_lib.getLegalActionSet(self.obj, as_ctypes(act))\n return act\n\n def getMinimalActionSet(self):\n act_size = ale_lib.getMinimalActionSize(self.obj)\n act = np.zeros((act_size), dtype=np.intc)\n ale_lib.getMinimalActionSet(self.obj, as_ctypes(act))\n return act\n\n def getAvailableModes(self):\n modes_size = ale_lib.getAvailableModesSize(self.obj)\n modes = np.zeros((modes_size), dtype=np.intc)\n ale_lib.getAvailableModes(self.obj, as_ctypes(modes))\n return modes\n\n def setMode(self, mode):\n ale_lib.setMode(self.obj, int(mode))\n\n def getAvailableDifficulties(self):\n difficulties_size = ale_lib.getAvailableDifficultiesSize(self.obj)\n difficulties = np.zeros((difficulties_size), dtype=np.intc)\n ale_lib.getAvailableDifficulties(self.obj, as_ctypes(difficulties))\n return difficulties\n\n def setDifficulty(self, difficulty):\n ale_lib.setDifficulty(self.obj, int(difficulty))\n\n def getLegalActionSet(self):\n act_size = ale_lib.getLegalActionSize(self.obj)\n act = np.zeros((act_size), dtype=np.intc)\n ale_lib.getLegalActionSet(self.obj, as_ctypes(act))\n return act\n\n def getMinimalActionSet(self):\n act_size = ale_lib.getMinimalActionSize(self.obj)\n act = np.zeros((act_size), dtype=np.intc)\n ale_lib.getMinimalActionSet(self.obj, as_ctypes(act))\n return act\n\n def getFrameNumber(self):\n return ale_lib.getFrameNumber(self.obj)\n\n def lives(self):\n return ale_lib.lives(self.obj)\n\n def getEpisodeFrameNumber(self):\n return ale_lib.getEpisodeFrameNumber(self.obj)\n\n def getScreenDims(self):\n \"\"\"returns a tuple that contains (screen_width, screen_height)\n \"\"\"\n width = ale_lib.getScreenWidth(self.obj)\n height = ale_lib.getScreenHeight(self.obj)\n return (width, height)\n\n def getScreen(self, screen_data=None):\n \"\"\"This function fills screen_data with the RAW Pixel data\n screen_data MUST be a numpy array of uint8/int8. This could be initialized like so:\n screen_data = np.empty(w*h, dtype=np.uint8)\n Notice, it must be width*height in size also\n If it is None, then this function will initialize it\n Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place\n \"\"\"\n if(screen_data is None):\n width = ale_lib.getScreenWidth(self.obj)\n height = ale_lib.getScreenHeight(self.obj)\n screen_data = np.zeros(width*height, dtype=np.uint8)\n ale_lib.getScreen(self.obj, as_ctypes(screen_data))\n return screen_data\n\n def getScreenRGB(self, screen_data=None):\n \"\"\"This function fills screen_data with the data in RGB format\n screen_data MUST be a numpy array of uint8. This can be initialized like so:\n screen_data = np.empty((height,width,3), dtype=np.uint8)\n If it is None, then this function will initialize it.\n On little-endian machines like x86, the channels are BGR order:\n screen_data[x, y, 0:3] is [blue, green, red]\n On big-endian machines (rare in 2017) the channels would be the opposite order.\n There's not much error checking here: if you supply an array that's too small\n this function will produce undefined behavior.\n \"\"\"\n if(screen_data is None):\n width = ale_lib.getScreenWidth(self.obj)\n height = ale_lib.getScreenHeight(self.obj)\n screen_data = np.empty((height, width,3), dtype=np.uint8)\n ale_lib.getScreenRGB(self.obj, as_ctypes(screen_data[:]))\n return screen_data\n\n def getScreenRGB2(self, screen_data=None):\n \"\"\"This function fills screen_data with the data in RGB format.\n screen_data MUST be a numpy array of uint8. This can be initialized like so:\n screen_data = np.empty((height,width,3), dtype=np.uint8)\n If it is None, then this function will initialize it.\n On all architectures, the channels are RGB order:\n screen_data[x, y, :] is [red, green, blue]\n There's not much error checking here: if you supply an array that's too small\n this function will produce undefined behavior.\n \"\"\"\n if(screen_data is None):\n width = ale_lib.getScreenWidth(self.obj)\n height = ale_lib.getScreenHeight(self.obj)\n screen_data = np.empty((height, width, 3), dtype=np.uint8)\n assert screen_data.strides == (480, 3, 1)\n ale_lib.getScreenRGB2(self.obj, as_ctypes(screen_data[:]))\n return screen_data\n\n def getScreenGrayscale(self, screen_data=None):\n \"\"\"This function fills screen_data with the data in grayscale\n screen_data MUST be a numpy array of uint8. This can be initialized like so:\n screen_data = np.empty((height,width,1), dtype=np.uint8)\n If it is None, then this function will initialize it.\n \"\"\"\n if(screen_data is None):\n width = ale_lib.getScreenWidth(self.obj)\n height = ale_lib.getScreenHeight(self.obj)\n screen_data = np.empty((height, width,1), dtype=np.uint8)\n ale_lib.getScreenGrayscale(self.obj, as_ctypes(screen_data[:]))\n return screen_data\n\n def getRAMSize(self):\n return ale_lib.getRAMSize(self.obj)\n\n def getRAM(self, ram=None):\n \"\"\"This function grabs the atari RAM.\n ram MUST be a numpy array of uint8/int8. This can be initialized like so:\n ram = np.array(ram_size, dtype=uint8)\n Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function.\n If it is None, then this function will initialize it.\n \"\"\"\n if(ram is None):\n ram_size = ale_lib.getRAMSize(self.obj)\n ram = np.zeros(ram_size, dtype=np.uint8)\n ale_lib.getRAM(self.obj, as_ctypes(ram))\n return ram\n\n def saveScreenPNG(self, filename):\n \"\"\"Save the current screen as a png file\"\"\"\n return ale_lib.saveScreenPNG(self.obj, _as_bytes(filename))\n\n def saveState(self):\n \"\"\"Saves the state of the system\"\"\"\n return ale_lib.saveState(self.obj)\n\n def loadState(self):\n \"\"\"Loads the state of the system\"\"\"\n return ale_lib.loadState(self.obj)\n\n def cloneState(self):\n \"\"\"This makes a copy of the environment state. This copy does *not*\n include pseudorandomness, making it suitable for planning\n purposes. By contrast, see cloneSystemState.\n \"\"\"\n return ale_lib.cloneState(self.obj)\n\n def restoreState(self, state):\n \"\"\"Reverse operation of cloneState(). This does not restore\n pseudorandomness, so that repeated calls to restoreState() in\n the stochastic controls setting will not lead to the same\n outcomes. By contrast, see restoreSystemState.\n \"\"\"\n ale_lib.restoreState(self.obj, state)\n\n def cloneSystemState(self):\n \"\"\"This makes a copy of the system & environment state, suitable for\n serialization. This includes pseudorandomness and so is *not*\n suitable for planning purposes.\n \"\"\"\n return ale_lib.cloneSystemState(self.obj)\n\n def restoreSystemState(self, state):\n \"\"\"Reverse operation of cloneSystemState.\"\"\"\n ale_lib.restoreSystemState(self.obj, state)\n\n def deleteState(self, state):\n \"\"\" Deallocates the ALEState \"\"\"\n ale_lib.deleteState(state)\n\n def encodeStateLen(self, state):\n return ale_lib.encodeStateLen(state)\n\n def encodeState(self, state, buf=None):\n if buf == None:\n length = ale_lib.encodeStateLen(state)\n buf = np.zeros(length, dtype=np.uint8)\n ale_lib.encodeState(state, as_ctypes(buf), c_int(len(buf)))\n return buf\n\n def decodeState(self, serialized):\n return ale_lib.decodeState(as_ctypes(serialized), len(serialized))\n\n def __del__(self):\n ale_lib.ALE_del(self.obj)\n\n @staticmethod\n def setLoggerMode(mode):\n dic = {'info': 0, 'warning': 1, 'error': 2}\n mode = dic.get(mode, mode)\n assert mode in [0, 1, 2], \"Invalid Mode! Mode must be one of 0: info, 1: warning, 2: error\"\n ale_lib.setLoggerMode(mode)\n"}}},{"rowIdx":61,"cells":{"python_code":{"kind":"string","value":"import os\nimport hashlib\nimport shutil\nimport zipfile\nimport argparse\nimport io\n\nfrom .games import get_games_dir\n\n\nSCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))\nMD5_CHUNK_SIZE = 8096\n\n\ndef _check_zipfile(f, process_f):\n with zipfile.ZipFile(f) as zf:\n for entry in zf.infolist():\n _root, ext = os.path.splitext(entry.filename)\n with zf.open(entry) as innerf:\n if ext == \".zip\":\n _check_zipfile(innerf, process_f)\n else:\n process_f(innerf)\n\n\ndef _calc_md5(f):\n h = hashlib.md5()\n while True:\n chunk = f.read(MD5_CHUNK_SIZE)\n if chunk == b'':\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef import_roms(dirpath=\".\"):\n md5s = {}\n copied_md5s = set()\n\n with open(os.path.join(SCRIPT_DIR, \"ale_interface\", \"md5.txt\")) as f:\n f.readline()\n f.readline()\n for line in f:\n hexdigest, filename = line.strip().split(' ')\n md5s[hexdigest] = os.path.join(get_games_dir(), filename)\n\n def save_if_matches(f):\n hexdigest = _calc_md5(f)\n if hexdigest == \"ce5cc62608be2cd3ed8abd844efb8919\":\n # the ALE version of road_runner.bin is not easily available\n # patch this file instead to match the correct data\n delta = {4090: 216, 4091: 111, 4092: 216, 4093: 111, 4094: 216, 4095: 111, 8186: 18, 8187: 43, 8188: -216, 8189: 49, 8190: -216, 8191: 49, 12281: 234, 12282: 18, 12283: 11, 12284: -216, 12285: 17, 12286: -216, 12287: 17, 16378: 18, 16379: -21, 16380: -216, 16381: -15, 16382: -216, 16383: -15}\n f.seek(0)\n data = bytearray(f.read())\n for index, offset in delta.items():\n data[index] += offset\n name = f\"patched version of {f.name}\"\n f = io.BytesIO(bytes(data))\n f.name = name\n hexdigest = _calc_md5(f)\n\n if hexdigest in md5s and hexdigest not in copied_md5s:\n copied_md5s.add(hexdigest)\n rom_path = md5s[hexdigest]\n print(f\"copying {os.path.basename(rom_path)} from {f.name} to {rom_path}\")\n os.makedirs(os.path.dirname(rom_path), exist_ok=True)\n f.seek(0)\n with open(rom_path, \"wb\") as out_f:\n shutil.copyfileobj(f, out_f)\n \n for root, dirs, files in os.walk(dirpath):\n for filename in files:\n filepath = os.path.join(root, filename)\n with open(filepath, \"rb\") as f:\n _root, ext = os.path.splitext(filename)\n if ext == \".zip\":\n try:\n _check_zipfile(f, save_if_matches)\n except zipfile.BadZipFile:\n pass\n else:\n save_if_matches(f)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dirpath\", help=\"path to directory containing extracted ROM files\")\n args = parser.parse_args()\n import_roms(args.dirpath)\n\n\nif __name__ == \"__main__\":\n main()"}}},{"rowIdx":62,"cells":{"python_code":{"kind":"string","value":""}}},{"rowIdx":63,"cells":{"python_code":{"kind":"string","value":"import atari_py\nimport numpy as np\n\ndef test_smoke():\n game_path = atari_py.get_game_path('tetris')\n ale = atari_py.ALEInterface()\n ale.loadROM(game_path)\n action_set = ale.getMinimalActionSet()\n\n # Test stepping\n ale.act(action_set[0])\n\n # Test screen capture\n (screen_width,screen_height) = ale.getScreenDims()\n arr = np.zeros((screen_height, screen_width, 4), dtype=np.uint8)\n ale.getScreenRGB(arr)\n\nif __name__ == '__main__':\n print('smoke test')\n test_smoke()\n print('done!')\n"}}},{"rowIdx":64,"cells":{"python_code":{"kind":"string","value":"#!/usr/bin/env python\n# python_example.py\n# Author: Ben Goodrich\n#\n# This is a direct port to python of the shared library example from\n# ALE provided in doc/examples/sharedLibraryInterfaceExample.cpp\nfrom __future__ import print_function\n\nimport sys\nfrom random import randrange\nfrom atari_py import ALEInterface\n\nif len(sys.argv) < 2:\n print('Usage:', sys.argv[0], 'rom_file')\n sys.exit()\n\nale = ALEInterface()\n\n# Get & Set the desired settings\nale.setInt('random_seed', 123)\n\n# Set USE_SDL to true to display the screen. ALE must be compilied\n# with SDL enabled for this to work. On OSX, pygame init is used to\n# proxy-call SDL_main.\nUSE_SDL = False\nif USE_SDL:\n if sys.platform == 'darwin':\n import pygame\n pygame.init()\n ale.setBool('sound', False) # Sound doesn't work on OSX\n elif sys.platform.startswith('linux'):\n ale.setBool('sound', True)\n ale.setBool('display_screen', True)\n\n# Load the ROM file\nale.loadROM(sys.argv[1])\n\n# Get the list of legal actions\nlegal_actions = ale.getLegalActionSet()\n\n# Play 10 episodes\nfor episode in range(10):\n total_reward = 0\n while not ale.game_over():\n a = legal_actions[randrange(len(legal_actions))]\n # Apply an action and get the resulting reward\n reward = ale.act(a);\n total_reward += reward\n print('Episode', episode, 'ended with score:', total_reward)\n ale.reset_game()\n"}}},{"rowIdx":65,"cells":{"python_code":{"kind":"string","value":"# TODO: the code below does not work!\ndef detect_even_palindrome(arr):\n \"\"\"\n You're given an array of strings,\n your task is to return an array of all palindromes of even length\n in the same order of appearance.\n Consider the empty string as not palindrome.\n\n Examples:\n * detect_even_palindrome([\"AabcbaA\", \"abccdA\", \"1221\"]) => [\"1221\"]\n * detect_even_palindrome([\"rebber\", \"madam\"]) => [\"rebber\"]\n * detect_even_palindrome([\"\", \"124\", \"a\"]) => []\n * detect_even_palindrome([]) => []\n \"\"\"\n # END OF CONTEXT\n return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']\n # END OF SOLUTION\n\ndef check(candidate):\n assert candidate([\"AabcbaA\", \"abccdA\", \"1221\"]) == [\"1221\"]\n assert candidate([\"rebber\", \"madam\"]) == [\"rebber\"]\n assert candidate([\"\", \"124\", \"a\"]) == []\n assert candidate([]) == []\n assert candidate([\"\"]) == []\n assert candidate([\"as\"]) == []\n assert candidate([\"asd\"]) == []\n assert candidate([\"asd\", \"asd\"]) == []\n assert candidate([\"rubbur\", \"rubbur\"]) == [\"rubbur\", \"rubbur\"]\n assert candidate([\"rubber\"]) == []\n\nif __name__ == '__main__':\n check(detect_even_palindrome)\n"}}},{"rowIdx":66,"cells":{"python_code":{"kind":"string","value":"# sdfljafowejidsfjospadjcfaopwjeopfsjsadkl;fjaowejfopjdksaldfjopweajfojasdfkljafpo2wqd;lcmpovnteoirdpsafd\n# sdf9wjfaowiejf-0j23w9-eafjidosjf023qjiobgkf023w8hger90fivdfginb0qaerpoeprg0jegar0-3wjfiiewrowqeoiwer\n# f0-23rnfer0-wfaeijoafweop32023lnfewopiagsd9234toerg9uegapjr3bng4eropgeojsfaewneffa0rq32fwiojwefniaggerj\n# f03j4efqpwjdf902a3jwopfvjae09fj q9p23wjdoJDF\n##WF0923JWEPOFJAF\n#[[[[[[[\n# {{{\n\n\ndef detect_even_palindrome(arr):\n \"\"\"\n You're given an array of strings,\n your task is to return an array of all palindromes of even length\n in the same order of appearance.\n Consider the empty string as not palindrome.\n\n Examples:\n * detect_even_palindrome([\"AabcbaA\", \"abccdA\", \"1221\"]) => [\"1221\"]\n * detect_even_palindrome([\"rebber\", \"madam\"]) => [\"rebber\"]\n * detect_even_palindrome([\"\", \"124\", \"a\"]) => []\n * detect_even_palindrome([]) => []\n \"\"\"\n # END OF CONTEXT\n return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']\n # END OF SOLUTION\n\n\ndef check(candidate):\n assert candidate([\"AabcbaA\", \"abccdA\", \"1221\"]) == [\"1221\"]\n assert candidate([\"rebber\", \"madam\"]) == [\"rebber\"]\n assert candidate([\"\", \"124\", \"a\"]) == []\n assert candidate([]) == []\n assert candidate([\"\"]) == []\n assert candidate([\"as\"]) == []\n assert candidate([\"asd\"]) == []\n assert candidate([\"asd\", \"asd\"]) == []\n assert candidate([\"rubbur\", \"rubbur\"]) == [\"rubbur\", \"rubbur\"]\n assert candidate([\"rubber\"]) == []\n\nif __name__ == '__main__':\n check(detect_even_palindrome)\n"}}},{"rowIdx":67,"cells":{"python_code":{"kind":"string","value":"def genpassword(wlc,maxchar,txt,List,verbose):\n word = \"\"\n i1 = i2 = i3 = i4 = i5 = i6 = i6 = i7 = i8 = i9 = i10 = i11 = i12 = i13 = i14 = i15 = 0\n txtfile = open(txt,'w')\n\n i = 0\n mc = int(maxchar) - 1\n lword = [0]\n for i in range(mc):\n lword += [0] \n \n for i1 in range(len(wlc)):\n for i2 in range(len(wlc)):\n for i3 in range(len(wlc)):\n for i4 in range(len(wlc)):\n for i5 in range(len(wlc)):\n for i6 in range(len(wlc)):\n for i7 in range(len(wlc)):\n for i8 in range(len(wlc)):\n for i9 in range(len(wlc)):\n for i10 in range(len(wlc)):\n for i11 in range(len(wlc)):\n for i12 in range(len(wlc)):\n for i13 in range(len(wlc)):\n for i14 in range(len(wlc)):\n for i15 in range(len(wlc)):\n if int(maxchar) == 1 :\n word = wlc[i15]\n if int(maxchar) == 2 :\n word = wlc[i14] + wlc[i15]\n if int(maxchar) == 3 :\n word = wlc[i13] + wlc[i14] + wlc[i15]\n if int(maxchar) == 4 :\n word = wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15]\n if int(maxchar) == 5 :\n word = wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] \\\n + wlc[i15]\n if int(maxchar) == 6 :\n word = wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \\\n + wlc[i14] + wlc[i15]\n if int(maxchar) == 7 :\n word = wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] \\\n + wlc[i13] + wlc[i14] + wlc[i15]\n if int(maxchar) == 8 :\n word = wlc[i8] + wlc[i9] + wlc[i10] + wlc[i11] \\\n + wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15]\n if int(maxchar) == 9 :\n word = wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10] \\\n + wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15]\n if int(maxchar) == 10 :\n word = wlc[i6] + wlc[i7] + wlc[i8] + wlc[i9] \\\n + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] \\\n + wlc[i15]\n if int(maxchar) == 11 :\n word = wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] \\\n + wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \\\n + wlc[i14] + wlc[i15]\n if int(maxchar) == 12 :\n word = wlc[i4] + wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] \\\n + wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \\\n + wlc[i14] + wlc[i15]\n if int(maxchar) == 13 :\n word = wlc[i3] + wlc[i4] + wlc[i5] + wlc[i6] \\\n + wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10]\\\n + wlc[i11] + wlc[i12] + wlc[i13] \\\n + wlc[i14] + wlc[i15]\n if int(maxchar) == 14 :\n word = wlc[i2] +wlc[i3] + wlc[i4] + wlc[i5] + wlc[i6] \\\n + wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10]\\\n + wlc[i11] + wlc[i12] + wlc[i13] \\\n + wlc[i14] + wlc[i15]\n if int(maxchar) == 15 :\n word = wlc[i1] + wlc[i2] + wlc[i3] + wlc[i4] \\\n + wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] + wlc[i9] \\\n + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \\\n + wlc[i14] + wlc[i15]\n \n if int(verbose) == 1:\n print(word)\n \n txtfile.writelines(word + \"\\n\")\n\n i = 0\n end = 0\n if int(List) == 1 :\n for i in range(len(word)):\n lword[i] = \"9\"\n if str(lword) == str(list(word)):\n end = 1\n elif (int(List) == 2):\n for i in range(len(word)):\n lword[i] = \"z\"\n if str(lword) == str(list(word)):\n end = 1\n elif (int(List) == 3):\n for i in range(len(word)):\n lword[i] = \"Z\"\n if str(lword) == str(list(word)):\n end = 1\n elif (int(List) == 4):\n for i in range(len(word)):\n lword[i] = \"z\"\n if str(lword) == str(list(word)):\n end = 1\n elif (int(List) == 5):\n for i in range(len(word)):\n lword[i] = \"Z\"\n if str(lword) == str(list(word)):\n end = 1\n elif (int(List) == 6):\n for i in range(len(word)):\n lword[i] = \"Z\"\n if str(lword) == str(list(word)):\n end = 1\n \n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n if end == 1 : break\n \n txtfile.close()\n\ndef detect_even_palindrome(arr):\n \"\"\"\n You're given an array of strings,\n your task is to return an array of all palindromes of even length\n in the same order of appearance.\n Consider the empty string as not palindrome.\n\n Examples:\n * detect_even_palindrome([\"AabcbaA\", \"abccdA\", \"1221\"]) => [\"1221\"]\n * detect_even_palindrome([\"rebber\", \"madam\"]) => [\"rebber\"]\n * detect_even_palindrome([\"\", \"124\", \"a\"]) => []\n * detect_even_palindrome([]) => []\n \"\"\"\n # END OF CONTEXT\n return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']\n # END OF SOLUTION\n\ndef check(candidate):\n assert candidate([\"AabcbaA\", \"abccdA\", \"1221\"]) == [\"1221\"]\n assert candidate([\"rebber\", \"madam\"]) == [\"rebber\"]\n assert candidate([\"\", \"124\", \"a\"]) == []\n assert candidate([]) == []\n assert candidate([\"\"]) == []\n assert candidate([\"as\"]) == []\n assert candidate([\"asd\"]) == []\n assert candidate([\"asd\", \"asd\"]) == []\n assert candidate([\"rubbur\", \"rubbur\"]) == [\"rubbur\", \"rubbur\"]\n assert candidate([\"rubber\"]) == []\n\nif __name__ == '__main__':\n check(detect_even_palindrome)\n"}}},{"rowIdx":68,"cells":{"python_code":{"kind":"string","value":"def convert_time_zone(time, zone):\n \"\"\"\n Convert time from UTC to a given time zone.\n \"\"\"\n return 'y' % 5 and 'wtf'\n halt()\n zone and 59\n\ndef reverse_string(s):\n \"\"\"\n Reverse a string.\n \"\"\"\n s[1]\n return 'y' % 5 and 'wtf'\n\ndef detect_even_palindrome(arr):\n \"\"\"\n You're given an array of strings,\n your task is to return an array of all palindromes of even length\n in the same order of appearance.\n Consider the empty string as not palindrome.\n\n Examples:\n * detect_even_palindrome([\"AabcbaA\", \"abccdA\", \"1221\"]) => [\"1221\"]\n * detect_even_palindrome([\"rebber\", \"madam\"]) => [\"rebber\"]\n * detect_even_palindrome([\"\", \"124\", \"a\"]) => []\n * detect_even_palindrome([]) => []\n \"\"\"\n # END OF CONTEXT\n return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != '']\n # END OF SOLUTION\n\ndef check(candidate):\n assert candidate([\"AabcbaA\", \"abccdA\", \"1221\"]) == [\"1221\"]\n assert candidate([\"rebber\", \"madam\"]) == [\"rebber\"]\n assert candidate([\"\", \"124\", \"a\"]) == []\n assert candidate([]) == []\n assert candidate([\"\"]) == []\n assert candidate([\"as\"]) == []\n assert candidate([\"asd\"]) == []\n assert candidate([\"asd\", \"asd\"]) == []\n assert candidate([\"rubbur\", \"rubbur\"]) == [\"rubbur\", \"rubbur\"]\n assert candidate([\"rubber\"]) == []\n\nif __name__ == '__main__':\n check(detect_even_palindrome)\n"}}},{"rowIdx":69,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'factorize'\n#[PROMPT]\nfrom typing import List\n\n\ndef factorize(n: int) -> List[int]:\n \"\"\" Return list of prime factors of given integer in the order from smallest to largest.\n Each of the factors should be listed number of times corresponding to how many times it appeares in factorization.\n Input number should be equal to the product of all factors\n >>> factorize(8)\n [2, 2, 2]\n >>> factorize(25)\n [5, 5]\n >>> factorize(70)\n [2, 5, 7]\n \"\"\"\n#[SOLUTION]\n import math\n fact = []\n i = 2\n while i <= int(math.sqrt(n) + 1):\n if n % i == 0:\n fact.append(i)\n n //= i\n else:\n i += 1\n\n if n > 1:\n fact.append(n)\n return fact\n#[CHECK]\n\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate(2) == [2]\n assert candidate(4) == [2, 2]\n assert candidate(8) == [2, 2, 2]\n assert candidate(3 * 19) == [3, 19]\n assert candidate(3 * 19 * 3 * 19) == [3, 3, 19, 19]\n assert candidate(3 * 19 * 3 * 19 * 3 * 19) == [3, 3, 3, 19, 19, 19]\n assert candidate(3 * 19 * 19 * 19) == [3, 19, 19, 19]\n assert candidate(3 * 2 * 3) == [2, 3, 3]\n"}}},{"rowIdx":70,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'is_simple_power'\n#[PROMPT]\n\ndef is_simple_power(x, n):\n \"\"\"Your task is to write a function that returns true if a number x is a simple\n power of n and false in other cases.\n x is a simple power of n if n**int=x\n For example:\n is_simple_power(1, 4) => true\n is_simple_power(2, 2) => true\n is_simple_power(8, 2) => true\n is_simple_power(3, 2) => false\n is_simple_power(3, 1) => false\n is_simple_power(5, 3) => false\n \"\"\"\n#[SOLUTION]\n if (n == 1): \n return (x == 1) \n power = 1\n while (power < x): \n power = power * n \n return (power == x) \n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(16, 2)== True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(143214, 16)== False, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(4, 2)==True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(9, 3)==True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(16, 4)==True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(24, 2)==False, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(128, 4)==False, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(12, 6)==False, \"This prints if this assert fails 1 (good for debugging!)\"\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate(1, 1)==True, \"This prints if this assert fails 2 (also good for debugging!)\"\n assert candidate(1, 12)==True, \"This prints if this assert fails 2 (also good for debugging!)\"\n\n"}}},{"rowIdx":71,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'solve'\n#[PROMPT]\n\ndef solve(N):\n \"\"\"Given a positive integer N, return the total sum of its digits in binary.\n \n Example\n For N = 1000, the sum of digits will be 1 the output should be \"1\".\n For N = 150, the sum of digits will be 6 the output should be \"110\".\n For N = 147, the sum of digits will be 12 the output should be \"1100\".\n \n Variables:\n @N integer\n Constraints: 0 ≀ N ≀ 10000.\n Output:\n a string of binary number\n \"\"\"\n#[SOLUTION]\n return bin(sum(int(i) for i in str(N)))[2:]\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(1000) == \"1\", \"Error\"\n assert candidate(150) == \"110\", \"Error\"\n assert candidate(147) == \"1100\", \"Error\"\n\n # Check some edge cases that are easy to work out by hand.\n assert True, \"This prints if this assert fails 2 (also good for debugging!)\"\n assert candidate(333) == \"1001\", \"Error\"\n assert candidate(963) == \"10010\", \"Error\"\n\n"}}},{"rowIdx":72,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'correct_bracketing'\n#[PROMPT]\n\n\ndef correct_bracketing(brackets: str):\n \"\"\" brackets is a string of \"(\" and \")\".\n return True if every opening bracket has a corresponding closing bracket.\n\n >>> correct_bracketing(\"(\")\n False\n >>> correct_bracketing(\"()\")\n True\n >>> correct_bracketing(\"(()())\")\n True\n >>> correct_bracketing(\")(()\")\n False\n \"\"\"\n#[SOLUTION]\n depth = 0\n for b in brackets:\n if b == \"(\":\n depth += 1\n else:\n depth -= 1\n if depth < 0:\n return False\n return depth == 0\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert candidate(\"()\")\n assert candidate(\"(()())\")\n assert candidate(\"()()(()())()\")\n assert candidate(\"()()((()()())())(()()(()))\")\n assert not candidate(\"((()())))\")\n assert not candidate(\")(()\")\n assert not candidate(\"(\")\n assert not candidate(\"((((\")\n assert not candidate(\")\")\n assert not candidate(\"(()\")\n assert not candidate(\"()()(()())())(()\")\n assert not candidate(\"()()(()())()))()\")\n\n"}}},{"rowIdx":73,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'pluck'\n#[PROMPT]\n\ndef pluck(arr):\n \"\"\"\n \"Given an array representing a branch of a tree that has non-negative integer nodes\n your task is to pluck one of the nodes and return it.\n The plucked node should be the node with the smallest even value.\n If multiple nodes with the same smallest even value are found return the node that has smallest index.\n\n The plucked node should be returned in a list, [ smalest_value, its index ],\n If there are no even values or the given array is empty, return [].\n\n Example 1:\n Input: [4,2,3]\n Output: [2, 1]\n Explanation: 2 has the smallest even value, and 2 has the smallest index.\n\n Example 2:\n Input: [1,2,3]\n Output: [2, 1]\n Explanation: 2 has the smallest even value, and 2 has the smallest index. \n\n Example 3:\n Input: []\n Output: []\n \n Example 4:\n Input: [5, 0, 3, 0, 4, 2]\n Output: [0, 1]\n Explanation: 0 is the smallest value, but there are two zeros,\n so we will choose the first zero, which has the smallest index.\n\n Constraints:\n * 1 <= nodes.length <= 10000\n * 0 <= node.value\n \"\"\"\n#[SOLUTION]\n if(len(arr) == 0): return []\n evens = list(filter(lambda x: x%2 == 0, arr))\n if(evens == []): return []\n return [min(evens), arr.index(min(evens))]\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate([4,2,3]) == [2, 1], \"Error\"\n assert candidate([1,2,3]) == [2, 1], \"Error\"\n assert candidate([]) == [], \"Error\"\n assert candidate([5, 0, 3, 0, 4, 2]) == [0, 1], \"Error\"\n\n # Check some edge cases that are easy to work out by hand.\n assert True, \"This prints if this assert fails 2 (also good for debugging!)\"\n assert candidate([1, 2, 3, 0, 5, 3]) == [0, 3], \"Error\"\n assert candidate([5, 4, 8, 4 ,8]) == [4, 1], \"Error\"\n assert candidate([7, 6, 7, 1]) == [6, 1], \"Error\"\n assert candidate([7, 9, 7, 1]) == [], \"Error\"\n\n"}}},{"rowIdx":74,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'add'\n#[PROMPT]\n\ndef add(lst):\n \"\"\"Given a non-empty list of integers lst. add the even elements that are at odd indices..\n\n\n Examples:\n add([4, 2, 6, 7]) ==> 2 \n \"\"\"\n#[SOLUTION]\n return sum([lst[i] for i in range(1, len(lst), 2) if lst[i]%2 == 0])\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([4, 88]) == 88\n assert candidate([4, 5, 6, 7, 2, 122]) == 122\n assert candidate([4, 0, 6, 7]) == 0\n assert candidate([4, 4, 6, 8]) == 12\n\n # Check some edge cases that are easy to work out by hand.\n \n"}}},{"rowIdx":75,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'any_int'\n#[PROMPT]\n\ndef any_int(x, y, z):\n '''\n Create a function that takes 3 numbers.\n Returns true if one of the numbers is equal to the sum of the other two, and all numbers are integers.\n Returns false in any other cases.\n \n Examples\n any_int(5, 2, 7) ➞ True\n \n any_int(3, 2, 2) ➞ False\n\n any_int(3, -2, 1) ➞ True\n \n any_int(3.6, -2.2, 2) ➞ False\n \n\n \n '''\n#[SOLUTION]\n \n if isinstance(x,int) and isinstance(y,int) and isinstance(z,int):\n if (x+y==z) or (x+z==y) or (y+z==x):\n return True\n return False\n return False\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(2, 3, 1)==True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(2.5, 2, 3)==False, \"This prints if this assert fails 2 (good for debugging!)\"\n assert candidate(1.5, 5, 3.5)==False, \"This prints if this assert fails 3 (good for debugging!)\"\n assert candidate(2, 6, 2)==False, \"This prints if this assert fails 4 (good for debugging!)\"\n assert candidate(4, 2, 2)==True, \"This prints if this assert fails 5 (good for debugging!)\"\n assert candidate(2.2, 2.2, 2.2)==False, \"This prints if this assert fails 6 (good for debugging!)\"\n assert candidate(-4, 6, 2)==True, \"This prints if this assert fails 7 (good for debugging!)\"\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate(2,1,1)==True, \"This prints if this assert fails 8 (also good for debugging!)\"\n assert candidate(3,4,7)==True, \"This prints if this assert fails 9 (also good for debugging!)\"\n\n"}}},{"rowIdx":76,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'exchange'\n#[PROMPT]\n\ndef exchange(lst1, lst2):\n \"\"\"In this problem, you will implement a function that takes two lists of numbers,\n and determines whether it is possible to perform an exchange of elements\n between them to make lst1 a list of only even numbers.\n There is no limit on the number of exchanged elements between lst1 and lst2.\n If it is possible to exchange elements between the lst1 and lst2 to make\n all the elements of lst1 to be even, return \"YES\".\n Otherwise, return \"NO\".\n For example:\n exchange([1, 2, 3, 4], [1, 2, 3, 4]) => \"YES\"\n exchange([1, 2, 3, 4], [1, 5, 3, 4]) => \"NO\"\n It is assumed that the input lists will be non-empty.\n \"\"\"\n#[SOLUTION]\n odd = 0\n even = 0\n for i in lst1:\n if i%2 == 1:\n odd += 1\n for i in lst2:\n if i%2 == 0:\n even += 1\n if even >= odd:\n return \"YES\"\n return \"NO\"\n \n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([1, 2, 3, 4], [1, 2, 3, 4]) == \"YES\"\n assert candidate([1, 2, 3, 4], [1, 5, 3, 4]) == \"NO\"\n assert candidate([1, 2, 3, 4], [2, 1, 4, 3]) == \"YES\" \n assert candidate([5, 7, 3], [2, 6, 4]) == \"YES\"\n assert candidate([5, 7, 3], [2, 6, 3]) == \"NO\" \n assert candidate([3, 2, 6, 1, 8, 9], [3, 5, 5, 1, 1, 1]) == \"NO\"\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate([100, 200], [200, 200]) == \"YES\"\n\n"}}},{"rowIdx":77,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'reverse_delete'\n#[PROMPT]\n\ndef reverse_delete(s,c):\n \"\"\"Task\n We are given two strings s and c, you have to deleted all the characters in s that are equal to any character in c\n then check if the result string is palindrome.\n A string is called palindrome if it reads the same backward as forward.\n You should return a tuple containing the result string and True/False for the check.\n Example\n For s = \"abcde\", c = \"ae\", the result should be ('bcd',False)\n For s = \"abcdef\", c = \"b\" the result should be ('acdef',False)\n For s = \"abcdedcba\", c = \"ab\", the result should be ('cdedc',True)\n \"\"\"\n#[SOLUTION]\n s = ''.join([char for char in s if char not in c])\n return (s,s[::-1] == s)\n#[CHECK]\ndef check(candidate):\n\n assert candidate(\"abcde\",\"ae\") == ('bcd',False)\n assert candidate(\"abcdef\", \"b\") == ('acdef',False)\n assert candidate(\"abcdedcba\",\"ab\") == ('cdedc',True)\n assert candidate(\"dwik\",\"w\") == ('dik',False)\n assert candidate(\"a\",\"a\") == ('',True)\n assert candidate(\"abcdedcba\",\"\") == ('abcdedcba',True)\n assert candidate(\"abcdedcba\",\"v\") == ('abcdedcba',True)\n"}}},{"rowIdx":78,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'strange_sort_list'\n#[PROMPT]\n\ndef strange_sort_list(lst):\n '''\n Given list of integers, return list in strange order.\n Strange sorting, is when you start with the minimum value,\n then maximum of the remaining integers, then minimum and so on.\n\n Examples:\n strange_sort_list([1, 2, 3, 4]) == [1, 4, 2, 3]\n strange_sort_list([5, 5, 5, 5]) == [5, 5, 5, 5]\n strange_sort_list([]) == []\n '''\n#[SOLUTION]\n res, switch = [], True\n while lst:\n res.append(min(lst) if switch else max(lst))\n lst.remove(res[-1])\n switch = not switch\n return res\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([1, 2, 3, 4]) == [1, 4, 2, 3]\n assert candidate([5, 6, 7, 8, 9]) == [5, 9, 6, 8, 7]\n assert candidate([1, 2, 3, 4, 5]) == [1, 5, 2, 4, 3]\n assert candidate([5, 6, 7, 8, 9, 1]) == [1, 9, 5, 8, 6, 7]\n assert candidate([5, 5, 5, 5]) == [5, 5, 5, 5]\n assert candidate([]) == []\n assert candidate([1,2,3,4,5,6,7,8]) == [1, 8, 2, 7, 3, 6, 4, 5]\n assert candidate([0,2,2,2,5,5,-5,-5]) == [-5, 5, -5, 5, 0, 2, 2, 2]\n assert candidate([111111]) == [111111]\n\n # Check some edge cases that are easy to work out by hand.\n assert True\n\n"}}},{"rowIdx":79,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'words_in_sentence'\n#[PROMPT]\n\ndef words_in_sentence(sentence):\n \"\"\"\n You are given a string representing a sentence,\n the sentence contains some words separated by a space,\n and you have to return a string that contains the words from the original sentence,\n whose lengths are prime numbers,\n the order of the words in the new string should be the same as the original one.\n\n Example 1:\n Input: sentence = \"This is a test\"\n Output: \"is\"\n\n Example 2:\n Input: sentence = \"lets go for swimming\"\n Output: \"go for\"\n\n Constraints:\n * 1 <= len(sentence) <= 100\n * sentence contains only letters\n \"\"\"\n#[SOLUTION]\n new_lst = []\n for word in sentence.split():\n flg = 0\n if len(word) == 1:\n flg = 1\n for i in range(2, len(word)):\n if len(word)%i == 0:\n flg = 1\n if flg == 0 or len(word) == 2:\n new_lst.append(word)\n return \" \".join(new_lst)\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(\"This is a test\") == \"is\"\n assert candidate(\"lets go for swimming\") == \"go for\"\n assert candidate(\"there is no place available here\") == \"there is no place\"\n assert candidate(\"Hi I am Hussein\") == \"Hi am Hussein\"\n assert candidate(\"go for it\") == \"go for it\"\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate(\"here\") == \"\"\n assert candidate(\"here is\") == \"is\"\n\n"}}},{"rowIdx":80,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'check_if_last_char_is_a_letter'\n#[PROMPT]\n\ndef check_if_last_char_is_a_letter(txt):\n '''\n Create a function that returns True if the last character\n of a given string is an alphabetical character and is not\n a part of a word, and False otherwise.\n Note: \"word\" is a group of characters separated by space.\n\n Examples:\n check_if_last_char_is_a_letter(\"apple pie\") ➞ False\n check_if_last_char_is_a_letter(\"apple pi e\") ➞ True\n check_if_last_char_is_a_letter(\"apple pi e \") ➞ False\n check_if_last_char_is_a_letter(\"\") ➞ False \n '''\n#[SOLUTION]\n \n check = txt.split(' ')[-1]\n return True if len(check) == 1 and (97 <= ord(check.lower()) <= 122) else False\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(\"apple\") == False\n assert candidate(\"apple pi e\") == True\n assert candidate(\"eeeee\") == False\n assert candidate(\"A\") == True\n assert candidate(\"Pumpkin pie \") == False\n assert candidate(\"Pumpkin pie 1\") == False\n assert candidate(\"\") == False\n assert candidate(\"eeeee e \") == False\n assert candidate(\"apple pie\") == False\n assert candidate(\"apple pi e \") == False\n\n # Check some edge cases that are easy to work out by hand.\n assert True\n\n"}}},{"rowIdx":81,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'multiply'\n#[PROMPT]\n\ndef multiply(a, b):\n \"\"\"Complete the function that takes two integers and returns \n the product of their unit digits.\n Assume the input is always valid.\n Examples:\n multiply(148, 412) should return 16.\n multiply(19, 28) should return 72.\n multiply(2020, 1851) should return 0.\n multiply(14,-15) should return 20.\n \"\"\"\n#[SOLUTION]\n return abs(a % 10) * abs(b % 10)\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(148, 412) == 16, \"First test error: \" + str(candidate(148, 412)) \n assert candidate(19, 28) == 72, \"Second test error: \" + str(candidate(19, 28)) \n assert candidate(2020, 1851) == 0, \"Third test error: \" + str(candidate(2020, 1851))\n assert candidate(14,-15) == 20, \"Fourth test error: \" + str(candidate(14,-15)) \n assert candidate(76, 67) == 42, \"Fifth test error: \" + str(candidate(76, 67)) \n assert candidate(17, 27) == 49, \"Sixth test error: \" + str(candidate(17, 27)) \n\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate(0, 1) == 0, \"1st edge test error: \" + str(candidate(0, 1))\n assert candidate(0, 0) == 0, \"2nd edge test error: \" + str(candidate(0, 0))\n\n"}}},{"rowIdx":82,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'can_arrange'\nFIX = \"\"\"\nFixed typo arange -> arrange\nRemove semicolon from solution\n\"\"\"\n\n#[PROMPT]\n\ndef can_arrange(arr):\n \"\"\"Create a function which returns the index of the element such that after \n removing that element the remaining array is itself sorted in ascending order.\n If the given array is already sorted in ascending order then return -1.\n Note: It is guaranteed that the array arr will either be sorted or it will\n have only one element such that after its removal the given array\n will become sorted in ascending order.\n - The given array will not contain duplicate values.\n Examples:\n can_arrange([1,2,4,3,5]) = 3\n can_arrange([1,2,3]) = -1\n \"\"\"\n#[SOLUTION]\n ind=-1\n i=1\n while i<len(arr):\n if arr[i]<arr[i-1]:\n ind=i\n i+=1\n return ind\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([1,2,4,3,5])==3\n assert candidate([1,2,4,5])==-1\n assert candidate([1,4,2,5,6,7,8,9,10])==2\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate([])==-1\n\n"}}},{"rowIdx":83,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'sorted_list_sum'\n\nFIX = \"\"\"\nAdd test case when input strings with equal length are not in sorted order.\n\"\"\"\n\n#[PROMPT]\n\ndef sorted_list_sum(lst):\n \"\"\"Write a function that accepts a list of strings as a parameter,\n deletes the strings that have odd lengths from it,\n and returns the resulted list with a sorted order,\n The list is always a list of strings and never an array of numbers,\n and it may contain duplicates.\n The order of the list should be ascending by length of each word, and you\n should return the list sorted by that rule.\n If two words have the same length, sort the list alphabetically.\n The function should return a list of strings in sorted order.\n You may assume that all words will have the same length.\n For example:\n assert list_sort([\"aa\", \"a\", \"aaa\"]) => [\"aa\"]\n assert list_sort([\"ab\", \"a\", \"aaa\", \"cd\"]) => [\"ab\", \"cd\"]\n \"\"\"\n#[SOLUTION]\n lst.sort()\n new_lst = []\n for i in lst:\n if len(i)%2 == 0:\n new_lst.append(i)\n return sorted(new_lst, key=len)\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([\"aa\", \"a\", \"aaa\"]) == [\"aa\"]\n assert candidate([\"school\", \"AI\", \"asdf\", \"b\"]) == [\"AI\", \"asdf\", \"school\"]\n assert candidate([\"d\", \"b\", \"c\", \"a\"]) == []\n assert candidate([\"d\", \"dcba\", \"abcd\", \"a\"]) == [\"abcd\", \"dcba\"]\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate([\"AI\", \"ai\", \"au\"]) == [\"AI\", \"ai\", \"au\"]\n assert candidate([\"a\", \"b\", \"b\", \"c\", \"c\", \"a\"]) == []\n assert candidate(['aaaa', 'bbbb', 'dd', 'cc']) == [\"cc\", \"dd\", \"aaaa\", \"bbbb\"]\n\n"}}},{"rowIdx":84,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'max_element'\n#[PROMPT]\n\n\ndef max_element(l: list):\n \"\"\"Return maximum element in the list.\n >>> max_element([1, 2, 3])\n 3\n >>> max_element([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10])\n 123\n \"\"\"\n#[SOLUTION]\n m = l[0]\n for e in l:\n if e > m:\n m = e\n return m\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert candidate([1, 2, 3]) == 3\n assert candidate([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10]) == 123\n\n"}}},{"rowIdx":85,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'sort_even'\n\nFIX = \"\"\"\nRemove sort helper function\n\"\"\"\n\n#[PROMPT]\n\n\ndef sort_even(l: list):\n \"\"\"This function takes a list l and returns a list l' such that\n l' is identical to l in the odd indicies, while its values at the even indicies are equal\n to the values of the even indicies of l, but sorted.\n >>> sort_even([1, 2, 3])\n [1, 2, 3]\n >>> sort_even([5, 6, 3, 4])\n [3, 6, 5, 4]\n \"\"\"\n#[SOLUTION]\n evens = l[::2]\n odds = l[1::2]\n evens.sort()\n ans = []\n for e, o in zip(evens, odds):\n ans.extend([e, o])\n if len(evens) > len(odds):\n ans.append(evens[-1])\n return ans\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert tuple(candidate([1, 2, 3])) == tuple([1, 2, 3])\n assert tuple(candidate([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10])) == tuple([-10, 3, -5, 2, -3, 3, 5, 0, 9, 1, 123])\n assert tuple(candidate([5, 8, -12, 4, 23, 2, 3, 11, 12, -10])) == tuple([-12, 8, 3, 4, 5, 2, 12, 11, 23, -10])\n\n"}}},{"rowIdx":86,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'parse_nested_parens'\n#[PROMPT]\nfrom typing import List\n\n\ndef parse_nested_parens(paren_string: str) -> List[int]:\n \"\"\" Input to this function is a string represented multiple groups for nested parentheses separated by spaces.\n For each of the group, output the deepest level of nesting of parentheses.\n E.g. (()()) has maximum two levels of nesting while ((())) has three.\n\n >>> parse_nested_parens('(()()) ((())) () ((())()())')\n [2, 3, 1, 3]\n \"\"\"\n#[SOLUTION]\n def parse_paren_group(s):\n depth = 0\n max_depth = 0\n for c in s:\n if c == '(':\n depth += 1\n max_depth = max(depth, max_depth)\n else:\n depth -= 1\n\n return max_depth\n\n return [parse_paren_group(x) for x in paren_string.split(' ') if x]\n#[CHECK]\n\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate('(()()) ((())) () ((())()())') == [2, 3, 1, 3]\n assert candidate('() (()) ((())) (((())))') == [1, 2, 3, 4]\n assert candidate('(()(())((())))') == [4]\n"}}},{"rowIdx":87,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'triangle_area'\n#[PROMPT]\n\n\ndef triangle_area(a, h):\n \"\"\"Given length of a side and high return area for a triangle.\n >>> triangle_area(5, 3)\n 7.5\n \"\"\"\n#[SOLUTION]\n return a * h / 2.0\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert candidate(5, 3) == 7.5\n assert candidate(2, 2) == 2.0\n assert candidate(10, 8) == 40.0\n\n"}}},{"rowIdx":88,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'fizz_buzz'\nFIX = \"\"\"\nUpdate doc string to remove requirement for print.\n\"\"\"\n\n#[PROMPT]\n\n\ndef fizz_buzz(n: int):\n \"\"\"Return the number of times the digit 7 appears in integers less than n which are divisible by 11 or 13.\n >>> fizz_buzz(50)\n 0\n >>> fizz_buzz(78)\n 2\n >>> fizz_buzz(79)\n 3\n \"\"\"\n#[SOLUTION]\n ns = []\n for i in range(n):\n if i % 11 == 0 or i % 13 == 0:\n ns.append(i)\n s = ''.join(list(map(str, ns)))\n ans = 0\n for c in s:\n ans += (c == '7')\n return ans\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert candidate(50) == 0\n assert candidate(78) == 2\n assert candidate(79) == 3\n assert candidate(100) == 3\n assert candidate(200) == 6\n assert candidate(4000) == 192\n assert candidate(10000) == 639\n assert candidate(100000) == 8026\n\n"}}},{"rowIdx":89,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'car_race_collision'\n#[PROMPT]\n\n\ndef car_race_collision(n: int):\n \"\"\"\n Imagine a road that's a perfectly straight infinitely long line.\n n cars are driving left to right; simultaneously, a different set of n cars\n are driving right to left. The two sets of cars start out being very far from\n each other. All cars move in the same speed. Two cars are said to collide\n when a car that's moving left to right hits a car that's moving right to left.\n However, the cars are infinitely sturdy and strong; as a result, they continue moving\n in their trajectory as if they did not collide.\n\n This function outputs the number of such collisions.\n \"\"\"\n#[SOLUTION]\n return n**2\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert candidate(2) == 4\n assert candidate(3) == 9\n assert candidate(4) == 16\n assert candidate(8) == 64\n assert candidate(10) == 100\n\n"}}},{"rowIdx":90,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'monotonic'\nFIX = \"\"\"\nAdd a few more tests.\n\"\"\"\n\n#[PROMPT]\n\n\ndef monotonic(l: list):\n \"\"\"Return True is list elements are monotonically increasing or decreasing.\n >>> monotonic([1, 2, 4, 20])\n True\n >>> monotonic([1, 20, 4, 10])\n False\n >>> monotonic([4, 1, 0, -10])\n True\n \"\"\"\n#[SOLUTION]\n if l == sorted(l) or l == sorted(l, reverse=True):\n return True\n return False\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n assert candidate([1, 2, 4, 10]) == True\n assert candidate([1, 2, 4, 20]) == True\n assert candidate([1, 20, 4, 10]) == False\n assert candidate([4, 1, 0, -10]) == True\n assert candidate([4, 1, 1, 0]) == True\n assert candidate([1, 2, 3, 2, 5, 60]) == False\n assert candidate([1, 2, 3, 4, 5, 60]) == True\n assert candidate([9, 9, 9, 9]) == True\n\n"}}},{"rowIdx":91,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'decode_shift'\n#[PROMPT]\n\n\ndef encode_shift(s: str):\n \"\"\"\n returns encoded string by shifting every character by 5 in the alphabet.\n \"\"\"\n return \"\".join([chr(((ord(ch) + 5 - ord(\"a\")) % 26) + ord(\"a\")) for ch in s])\n\n\ndef decode_shift(s: str):\n \"\"\"\n takes as input string encoded with encode_shift function. Returns decoded string.\n \"\"\"\n#[SOLUTION]\n return \"\".join([chr(((ord(ch) - 5 - ord(\"a\")) % 26) + ord(\"a\")) for ch in s])\n#[CHECK]\n\n\nMETADATA = {}\n\n\ndef check(candidate):\n from random import randint, choice\n import copy\n import string\n\n letters = string.ascii_lowercase\n for _ in range(100):\n str = ''.join(choice(letters) for i in range(randint(10, 20)))\n encoded_str = encode_shift(str)\n assert candidate(copy.deepcopy(encoded_str)) == str\n\n"}}},{"rowIdx":92,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'digitSum'\n#[PROMPT]\n\ndef digitSum(s):\n \"\"\"Task\n Write a function that takes a string as input and returns the sum of the upper characters only'\n ASCII codes.\n\n Examples:\n digitSum(\"\") => 0\n digitSum(\"abAB\") => 131\n digitSum(\"abcCd\") => 67\n digitSum(\"helloE\") => 69\n digitSum(\"woArBld\") => 131\n digitSum(\"aAaaaXa\") => 153\n \"\"\"\n#[SOLUTION]\n if s == \"\": return 0\n return sum(ord(char) if char.isupper() else 0 for char in s)\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(\"\") == 0, \"Error\"\n assert candidate(\"abAB\") == 131, \"Error\"\n assert candidate(\"abcCd\") == 67, \"Error\"\n assert candidate(\"helloE\") == 69, \"Error\"\n assert candidate(\"woArBld\") == 131, \"Error\"\n assert candidate(\"aAaaaXa\") == 153, \"Error\"\n\n # Check some edge cases that are easy to work out by hand.\n assert True, \"This prints if this assert fails 2 (also good for debugging!)\"\n assert candidate(\" How are yOu?\") == 151, \"Error\"\n assert candidate(\"You arE Very Smart\") == 327, \"Error\"\n\n"}}},{"rowIdx":93,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'x_or_y'\n#[PROMPT]\n\ndef x_or_y(n, x, y):\n \"\"\"A simple program which should return the value of x if n is \n a prime number and should return the value of y otherwise.\n\n Examples:\n for x_or_y(7, 34, 12) == 34\n for x_or_y(15, 8, 5) == 5\n \n \"\"\"\n#[SOLUTION]\n if n == 1:\n return y\n for i in range(2, n):\n if n % i == 0:\n return y\n break\n else:\n return x\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(7, 34, 12) == 34\n assert candidate(15, 8, 5) == 5\n assert candidate(3, 33, 5212) == 33\n assert candidate(1259, 3, 52) == 3\n assert candidate(7919, -1, 12) == -1\n assert candidate(3609, 1245, 583) == 583\n assert candidate(91, 56, 129) == 129\n assert candidate(6, 34, 1234) == 1234\n \n\n # Check some edge cases that are easy to work out by hand.\n assert candidate(1, 2, 0) == 0\n assert candidate(2, 2, 0) == 2\n\n"}}},{"rowIdx":94,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'choose_num'\n#[PROMPT]\n\ndef choose_num(x, y):\n \"\"\"This function takes two positive numbers x and y and returns the\n biggest even integer number that is in the range [x, y] inclusive. If \n there's no such number, then the function should return -1.\n\n For example:\n choose_num(12, 15) = 14\n choose_num(13, 12) = -1\n \"\"\"\n#[SOLUTION]\n if x > y:\n return -1\n if y % 2 == 0:\n return y\n if x == y:\n return -1\n return y - 1\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(12, 15) == 14\n assert candidate(13, 12) == -1\n assert candidate(33, 12354) == 12354\n assert candidate(5234, 5233) == -1\n assert candidate(6, 29) == 28\n assert candidate(27, 10) == -1\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate(7, 7) == -1\n assert candidate(546, 546) == 546\n\n"}}},{"rowIdx":95,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'move_one_ball'\n#[PROMPT]\n\ndef move_one_ball(arr):\n \"\"\"We have an array 'arr' of N integers arr[1], arr[2], ..., arr[N].The\n numbers in the array will be randomly ordered. Your task is to determine if\n it is possible to get an array sorted in non-decreasing order by performing \n the following operation on the given array:\n You are allowed to perform right shift operation any number of times.\n \n One right shift operation means shifting all elements of the array by one\n position in the right direction. The last element of the array will be moved to\n the starting position in the array i.e. 0th index. \n\n If it is possible to obtain the sorted array by performing the above operation\n then return True else return False.\n If the given array is empty then return True.\n\n Note: The given list is guaranteed to have unique elements.\n\n For Example:\n \n move_one_ball([3, 4, 5, 1, 2])==>True\n Explanation: By performin 2 right shift operations, non-decreasing order can\n be achieved for the given array.\n move_one_ball([3, 5, 4, 1, 2])==>False\n Explanation:It is not possible to get non-decreasing order for the given\n array by performing any number of right shift operations.\n \n \"\"\"\n#[SOLUTION]\n if len(arr)==0:\n return True\n sorted_array=sorted(arr)\n my_arr=[]\n \n min_value=min(arr)\n min_index=arr.index(min_value)\n my_arr=arr[min_index:]+arr[0:min_index]\n for i in range(len(arr)):\n if my_arr[i]!=sorted_array[i]:\n return False\n return True\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([3, 4, 5, 1, 2])==True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate([3, 5, 10, 1, 2])==True\n # Check some edge cases that are easy to work out by hand.\n assert candidate([3, 5, 4, 1, 2])==False, \"This prints if this assert fails 2 (also good for debugging!)\"\n assert candidate([])==True\n"}}},{"rowIdx":96,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'minSubArraySum'\n#[PROMPT]\n\ndef minSubArraySum(nums):\n \"\"\"\n Given an array of integers nums, find the minimum sum of any non-empty sub-array\n of nums.\n Example\n minSubArraySum([2, 3, 4, 1, 2, 4]) == 1\n minSubArraySum([-1, -2, -3]) == -6\n \"\"\"\n#[SOLUTION]\n max_sum = 0\n s = 0\n for num in nums:\n s += -num\n if (s < 0):\n s = 0\n max_sum = max(s, max_sum)\n if max_sum == 0:\n max_sum = max(-i for i in nums)\n min_sum = -max_sum\n return min_sum\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate([2, 3, 4, 1, 2, 4]) == 1, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate([-1, -2, -3]) == -6\n assert candidate([-1, -2, -3, 2, -10]) == -14\n assert candidate([-9999999999999999]) == -9999999999999999\n assert candidate([0, 10, 20, 1000000]) == 0\n assert candidate([-1, -2, -3, 10, -5]) == -6\n assert candidate([100, -1, -2, -3, 10, -5]) == -6\n assert candidate([10, 11, 13, 8, 3, 4]) == 3\n assert candidate([100, -33, 32, -1, 0, -2]) == -33\n\n # Check some edge cases that are easy to work out by hand.\n assert candidate([-10]) == -10, \"This prints if this assert fails 2 (also good for debugging!)\"\n assert candidate([7]) == 7\n assert candidate([1, -1]) == -1\n"}}},{"rowIdx":97,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'rolling_max'\n#[PROMPT]\nfrom typing import List, Tuple\n\n\ndef rolling_max(numbers: List[int]) -> List[int]:\n \"\"\" From a given list of integers, generate a list of rolling maximum element found until given moment\n in the sequence.\n >>> rolling_max([1, 2, 3, 2, 3, 4, 2])\n [1, 2, 3, 3, 3, 4, 4]\n \"\"\"\n#[SOLUTION]\n running_max = None\n result = []\n\n for n in numbers:\n if running_max is None:\n running_max = n\n else:\n running_max = max(running_max, n)\n\n result.append(running_max)\n\n return result\n#[CHECK]\n\n\nMETADATA = {\n 'author': 'jt',\n 'dataset': 'test'\n}\n\n\ndef check(candidate):\n assert candidate([]) == []\n assert candidate([1, 2, 3, 4]) == [1, 2, 3, 4]\n assert candidate([4, 3, 2, 1]) == [4, 4, 4, 4]\n assert candidate([3, 2, 3, 100, 3]) == [3, 3, 3, 100, 100]\n"}}},{"rowIdx":98,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'is_bored'\n#[PROMPT]\n\ndef is_bored(S):\n \"\"\"\n You'll be given a string of words, and your task is to count the number\n of boredoms. A boredom is a sentence that starts with the word \"I\".\n Sentences are delimited by '.', '?' or '!'.\n \n For example:\n >>> is_bored(\"Hello world\")\n 0\n >>> is_bored(\"The sky is blue. The sun is shining. I love this weather\")\n 1\n \"\"\"\n#[SOLUTION]\n import re\n sentences = re.split(r'[.?!]\\s*', S)\n return sum(sentence[0:2] == 'I ' for sentence in sentences)\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert candidate(\"Hello world\") == 0, \"Test 1\"\n assert candidate(\"Is the sky blue?\") == 0, \"Test 2\"\n assert candidate(\"I love It !\") == 1, \"Test 3\"\n assert candidate(\"bIt\") == 0, \"Test 4\"\n assert candidate(\"I feel good today. I will be productive. will kill It\") == 2, \"Test 5\"\n assert candidate(\"You and I are going for a walk\") == 0, \"Test 6\"\n\n # Check some edge cases that are easy to work out by hand.\n assert True, \"This prints if this assert fails 2 (also good for debugging!)\"\n\n"}}},{"rowIdx":99,"cells":{"python_code":{"kind":"string","value":"ENTRY_POINT = 'starts_one_ends'\n#[PROMPT]\n\ndef starts_one_ends(n):\n \"\"\"\n Given a positive integer n, return the count of the numbers of n-digit\n positive integers that start or end with 1.\n \"\"\"\n#[SOLUTION]\n if n == 1: return 1\n return 18 * (10 ** (n - 2))\n#[CHECK]\ndef check(candidate):\n\n # Check some simple cases\n assert True, \"This prints if this assert fails 1 (good for debugging!)\"\n assert candidate(1) == 1\n assert candidate(2) == 18\n assert candidate(3) == 180\n assert candidate(4) == 1800\n assert candidate(5) == 18000\n\n # Check some edge cases that are easy to work out by hand.\n assert True, \"This prints if this assert fails 2 (also good for debugging!)\"\n\n"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":0,"numItemsPerPage":100,"numTotalItems":7605,"offset":0,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NTMwNDY0Miwic3ViIjoiL2RhdGFzZXRzL2t5ZS9hbGwtb3BlbmFpLWdpdGh1Yi1jb2RlIiwiZXhwIjoxNzU1MzA4MjQyLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.vMoCBqvCOGUrtv02PPxEakkstQPpcb2RCg2X0_Fr1oauXNylmg8rlognQj3REACNF32f54ru1Vb2XE20P1sQDg","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}"><div><header class="bg-linear-to-t border-b border-gray-100 pt-4 xl:pt-0 from-purple-500/8 dark:from-purple-500/20 to-white to-70% dark:to-gray-950"><div class="mx-4 relative flex flex-col xl:flex-row"><h1 class="flex flex-wrap items-center max-md:leading-tight gap-y-1 text-lg xl:flex-none"><a href="https://huggingface.co/datasets" class="group flex items-center"><svg class="sm:mr-1 -mr-1 text-gray-400" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 25 25"><ellipse cx="12.5" cy="5" fill="currentColor" fill-opacity="0.25" rx="7.5" ry="2"></ellipse><path d="M12.5 15C16.6421 15 20 14.1046 20 13V20C20 21.1046 16.6421 22 12.5 22C8.35786 22 5 21.1046 5 20V13C5 14.1046 8.35786 15 12.5 15Z" fill="currentColor" opacity="0.5"></path><path d="M12.5 7C16.6421 7 20 6.10457 20 5V11.5C20 12.6046 16.6421 13.5 12.5 13.5C8.35786 13.5 5 12.6046 5 11.5V5C5 6.10457 8.35786 7 12.5 7Z" fill="currentColor" opacity="0.5"></path><path d="M5.23628 12C5.08204 12.1598 5 12.8273 5 13C5 14.1046 8.35786 15 12.5 15C16.6421 15 20 14.1046 20 13C20 12.8273 19.918 12.1598 19.7637 12C18.9311 12.8626 15.9947 13.5 12.5 13.5C9.0053 13.5 6.06886 12.8626 5.23628 12Z" fill="currentColor"></path></svg> <span class="mr-2.5 font-semibold text-gray-400 group-hover:text-gray-500 max-sm:hidden">Datasets:</span></a> <hr class="mx-1.5 h-2 translate-y-px rounded-sm border-r dark:border-gray-600 sm:hidden"> <div class="group flex flex-none items-center"><div class="relative mr-1 flex items-center"> <span class="inline-block "><span class="contents"><a href="https://huggingface.co/kye" class="text-gray-400 hover:text-blue-600"><img alt="" class="size-3.5 rounded-full flex-none" src="https://cdn-avatars.huggingface.co/v1/production/uploads/6270324ebecab9e2dcf245de/cMbtWSasyNlYc9hvsEEzt.jpeg" crossorigin="anonymous"></a></span> </span></div> <span class="inline-block "><span class="contents"><a href="https://huggingface.co/kye" class="text-gray-400 hover:text-blue-600">kye</a></span> </span> <div class="mx-0.5 text-gray-300">/</div></div> <div class="max-w-full xl:flex xl:min-w-0 xl:flex-nowrap xl:items-center xl:gap-x-1"><a class="break-words font-mono font-semibold hover:text-blue-600 text-[1.07rem] xl:truncate" href="https://huggingface.co/datasets/kye/all-openai-github-code">all-openai-github-code</a> <button class="text-xs mr-3 focus:outline-hidden inline-flex cursor-pointer items-center text-sm mx-0.5 text-gray-600 " title="Copy dataset name to clipboard" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> </button></div> <div class="inline-flex items-center overflow-hidden whitespace-nowrap rounded-md border bg-white text-sm leading-none text-gray-500 mr-2"><button class="relative flex items-center overflow-hidden from-red-50 to-transparent dark:from-red-900 px-1.5 py-1 hover:bg-linear-to-t focus:outline-hidden" title="Like"><svg class="left-1.5 absolute" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" fill="currentColor"><path d="M22.45,6a5.47,5.47,0,0,1,3.91,1.64,5.7,5.7,0,0,1,0,8L16,26.13,5.64,15.64a5.7,5.7,0,0,1,0-8,5.48,5.48,0,0,1,7.82,0L16,10.24l2.53-2.58A5.44,5.44,0,0,1,22.45,6m0-2a7.47,7.47,0,0,0-5.34,2.24L16,7.36,14.89,6.24a7.49,7.49,0,0,0-10.68,0,7.72,7.72,0,0,0,0,10.82L16,29,27.79,17.06a7.72,7.72,0,0,0,0-10.82A7.49,7.49,0,0,0,22.45,4Z"></path></svg> <span class="ml-4 pl-0.5 ">like</span></button> <button class="focus:outline-hidden flex items-center border-l px-1.5 py-1 text-gray-400 hover:bg-gray-50 focus:bg-gray-100 dark:hover:bg-gray-900 dark:focus:bg-gray-800" title="See users who liked this repository">1</button></div> </h1> <div class="flex flex-col-reverse gap-x-2 sm:flex-row sm:items-center sm:justify-between xl:ml-auto"><div class="-mb-px flex h-12 items-center overflow-x-auto overflow-y-hidden "> <a class="tab-alternate" href="https://huggingface.co/datasets/kye/all-openai-github-code"><svg class="mr-1.5 text-gray-400 flex-none" style="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-quaternary" d="M20.23 7.24L12 12L3.77 7.24a1.98 1.98 0 0 1 .7-.71L11 2.76c.62-.35 1.38-.35 2 0l6.53 3.77c.29.173.531.418.7.71z" opacity=".25" fill="currentColor"></path><path class="uim-tertiary" d="M12 12v9.5a2.09 2.09 0 0 1-.91-.21L4.5 17.48a2.003 2.003 0 0 1-1-1.73v-7.5a2.06 2.06 0 0 1 .27-1.01L12 12z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M20.5 8.25v7.5a2.003 2.003 0 0 1-1 1.73l-6.62 3.82c-.275.13-.576.198-.88.2V12l8.23-4.76c.175.308.268.656.27 1.01z" fill="currentColor"></path></svg> Dataset card </a><a class="tab-alternate active" href="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill="currentColor" d="M2.5 2h7a1 1 0 0 1 1 1v6a1 1 0 0 1-1 1h-7a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1Zm0 2v2h3V4h-3Zm4 0v2h3V4h-3Zm-4 3v2h3V7h-3Zm4 0v2h3V7h-3Z"></path></svg> Data Studio </a><a class="tab-alternate" href="https://huggingface.co/datasets/kye/all-openai-github-code/tree/main"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><path class="uim-tertiary" d="M21 19h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0-8h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2zm0 4h-8a1 1 0 0 1 0-2h8a1 1 0 0 1 0 2z" opacity=".5" fill="currentColor"></path><path class="uim-primary" d="M9 19a1 1 0 0 1-1-1V6a1 1 0 0 1 2 0v12a1 1 0 0 1-1 1zm-6-4.333a1 1 0 0 1-.64-1.769L3.438 12l-1.078-.898a1 1 0 0 1 1.28-1.538l2 1.667a1 1 0 0 1 0 1.538l-2 1.667a.999.999 0 0 1-.64.231z" fill="currentColor"></path></svg> <span class="xl:hidden">Files</span> <span class="hidden xl:inline">Files and versions</span> <span class="inline-block "><span class="contents"><div slot="anchor" class="shadow-purple-500/10 ml-2 inline-flex -translate-y-px items-center gap-0.5 rounded-md border bg-white px-1 py-0.5 align-middle text-xs font-semibold leading-none text-gray-800 shadow-sm dark:border-gray-700 dark:bg-gradient-to-b dark:from-gray-925 dark:to-gray-925 dark:text-gray-300"><svg class="size-3 " xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path fill-rule="evenodd" clip-rule="evenodd" d="M6.14 3.64 5.1 4.92 2.98 2.28h2.06l1.1 1.36Zm0 4.72-1.1 1.36H2.98l2.13-2.64 1.03 1.28Zm4.9 1.36L8.03 6l3-3.72H8.96L5.97 6l3 3.72h2.06Z" fill="#7875FF"></path><path d="M4.24 6 2.6 8.03.97 6 2.6 3.97 4.24 6Z" fill="#FF7F41" opacity="1"></path></svg> <span>xet</span> </div></span> </span> </a><a class="tab-alternate" href="https://huggingface.co/datasets/kye/all-openai-github-code/discussions"><svg class="mr-1.5 text-gray-400 flex-none" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path><path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path></svg> Community </a></div> </div></div></header> </div> <div class="flex flex-col w-full"> <div class="flex h-full flex-1"> <div class="flex flex-1 flex-col overflow-hidden " style="height: calc(100vh - 48px)"><div class="flex flex-col overflow-hidden h-full "> <div class="flex flex-1 flex-col overflow-hidden "><div class="flex flex-1 flex-col overflow-hidden"><div class="flex min-h-0 flex-1"><div class="flex flex-1 flex-col overflow-hidden"><div class="md:shadow-xs dark:border-gray-800 md:my-4 md:ml-4 md:rounded-lg md:border flex min-w-0 flex-wrap "><div class="flex min-w-0 flex-1 flex-wrap"><div class="grid flex-1 grid-cols-1 overflow-hidden text-sm md:grid-cols-2 md:place-content-center md:rounded-lg"><label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-950 md:border-r md:border-r-0 hidden" title="default"><span class="text-gray-500">Subset (1)</span> <div class="flex items-center whitespace-nowrap"><span class="truncate">default</span> <span class="mx-2 text-gray-500">Β·</span> <span class="text-gray-500">7.61k rows</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Subset (1)"><option value="default" selected>default (7.61k rows)</option></optgroup></select></label> <label class="relative block flex-1 px-3 py-2 hover:bg-gray-50 dark:border-gray-850 dark:hover:bg-gray-900 md:border-r md:border-r" title="train"><div class="text-gray-500">Split (1)</div> <div class="flex items-center overflow-hidden whitespace-nowrap"><span class="truncate">train</span> <span class="mx-2 text-gray-500">Β·</span> <span class="text-gray-500">7.61k rows</span> <svg class="ml-auto min-w-6 pl-2" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></div> <select class="absolute inset-0 z-10 w-full cursor-pointer border-0 bg-white text-base opacity-0"><optgroup label="Split (1)"><option value="train" selected>train (7.61k rows)</option></optgroup></select></label></div></div> <div class="hidden flex-none flex-col items-center gap-0.5 border-l px-1 md:flex justify-end"> <span class="inline-block "><span class="contents"><div slot="anchor"><button class="group text-gray-500 hover:text-gray-700" aria-label="Hide sidepanel"><div class="rounded-xs flex size-4 items-center justify-center border border-gray-400 bg-gray-100 hover:border-gray-600 hover:bg-blue-50 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-700 dark:group-hover:border-gray-400"><div class="float-left h-full w-[65%]"></div> <div class="float-right h-full w-[35%] bg-gray-400 group-hover:bg-gray-600 dark:bg-gray-600 dark:group-hover:bg-gray-400"></div></div></button></div></span> </span> <div class="relative "> <button class="btn px-0.5 py-0.5 " type="button"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="p-0.5" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><circle cx="16" cy="7" r="3" fill="currentColor"></circle><circle cx="16" cy="16" r="3" fill="currentColor"></circle><circle cx="16" cy="25" r="3" fill="currentColor"></circle></svg> </button> </div></div></div> <div class="flex min-h-0 flex-1 flex-col border dark:border-gray-800 md:mb-4 md:ml-4 md:rounded-lg"> <div class="bg-linear-to-r text-smd relative flex items-center dark:border-gray-900 dark:bg-gray-950 false rounded-t-lg [&:has(:focus)]:from-gray-50 [&:has(:focus)]:to-transparent [&:has(:focus)]:to-20% dark:[&:has(:focus)]:from-gray-900"><form class="flex-1"><svg class="absolute left-3 top-1/2 transform -translate-y-1/2 pointer-events-none text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M30 28.59L22.45 21A11 11 0 1 0 21 22.45L28.59 30zM5 14a9 9 0 1 1 9 9a9 9 0 0 1-9-9z" fill="currentColor"></path></svg> <input disabled class="outline-hidden h-9 w-full border-none bg-transparent px-1 pl-9 pr-3 placeholder:text-gray-400 " placeholder="Search this dataset" dir="auto"></form> <div class="flex items-center gap-2 px-2 py-1"><button type="button" class="hover:bg-yellow-200/70 flex items-center gap-1 rounded-md border border-yellow-200 bg-yellow-100 pl-0.5 pr-1 text-[.8rem] leading-normal text-gray-700 dark:border-orange-500/25 dark:bg-orange-500/20 dark:text-gray-300 dark:hover:brightness-110 md:hidden"><div class="rounded-sm bg-yellow-300 px-1 font-mono text-[.7rem] font-bold text-black dark:bg-yellow-700 dark:text-gray-200">SQL </div> Console </button></div></div> <div class="flex flex-1 flex-col overflow-hidden min-h-64 flex w-full flex-col border-t md:rounded-b-lg md:shadow-lg"> <div class="flex-1 relative overflow-auto"><table class="w-full table-auto rounded-lg font-mono text-xs text-gray-900"><thead class="shadow-xs sticky left-0 right-0 top-0 z-1 bg-white align-top"><tr class="space-y-54 h-full min-w-fit divide-x border-b text-left"><th class="h-full max-w-sm p-2 text-left relative w-full"><div class="flex h-full flex-col flex-nowrap justify-between"><div><div class="flex items-center justify-between">python_code <form class="flex flex-col"><button id="asc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="-rotate-180 transform text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button> <button id="desc" class="-mr-1 ml-2 h-[0.4rem] w-[0.8rem] transition ease-in-out"><svg class="text-gray-300 hover:text-gray-500" xmlns="http://www.w3.org/2000/svg" viewBox="0 64 256 128" fill="currentColor" aria-hidden="true"><path d="M213.65674,101.657l-80,79.99976a7.99945,7.99945,0,0,1-11.31348,0l-80-79.99976A8,8,0,0,1,48,88H208a8,8,0,0,1,5.65674,13.657Z"></path></svg></button></form></div> <div class="mb-2 whitespace-nowrap text-xs font-normal text-gray-500"><span>string</span><span class="italic text-gray-400 before:mx-1 before:content-['Β·']">lengths</span></div></div> <div><div class="" style="height: 40px; padding-top: 2px"><svg width="130" height="28"><g><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="0" y="0" width="11.2" height="30" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="13.2" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="26.4" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="39.599999999999994" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="52.8" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="66" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="79.19999999999999" y="25" width="11.2" height="5" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="92.39999999999999" y="26" width="11.2" height="4" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="105.6" y="26" width="11.2" height="4" fill-opacity="1"></rect><rect class="fill-gray-400 dark:fill-gray-500/80" rx="2" x="118.8" y="25" width="11.2" height="5" fill-opacity="1"></rect></g><rect class="fill-white dark:fill-gray-900" x="0" y="26" width="130" height="2" stroke-opacity="1"></rect><line class="stroke-gray-100 dark:stroke-gray-500/20" x1="0" y1="27.5" x2="130" y2="27.5" stroke-opacity="1"></line><g><rect class="fill-indigo-500 cursor-pointer" x="-1" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="12.2" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="25.4" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="38.599999999999994" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="51.8" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="65" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="78.19999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="91.39999999999999" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="104.6" y="0" width="13.2" height="30" fill-opacity="0"></rect><rect class="fill-indigo-500 cursor-pointer" x="117.8" y="0" width="13.2" height="30" fill-opacity="0"></rect></g></svg> <div class="relative font-light text-gray-400" style="height: 10px; width: 130px;"><div class="absolute left-0 overflow-hidden text-ellipsis whitespace-nowrap" style="max-width: 60px">0</div> <div class="absolute overflow-hidden text-ellipsis whitespace-nowrap" style="right: 0px; max-width: 60px">869k</div> </div></div></div></div> </th></tr></thead> <tbody class="h-16 overflow-scroll"><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="0"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from setuptools import setup, find_packages setup( name='coinrun', packages=find_packages(), version='0.0.1', ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="1"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np from coinrun import setup_utils, make def random_agent(num_envs=1, max_steps=100000): setup_utils.setup_and_load(use_cmd_line_args=False) env = make('standard', num_envs=num_envs) for step in range(max_steps): acts = np.array([env.action_space.sample() for _ in range(env.num_envs)]) _obs, rews, _dones, _infos = env.step(acts) print("step", step, "rews", rews) env.close() if __name__ == '__main__': random_agent()</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="2"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Load an agent trained with train_agent.py and """ import time import tensorflow as tf import numpy as np from coinrun import setup_utils import coinrun.main_utils as utils from coinrun.config import Config from coinrun import policies, wrappers mpi_print = utils.mpi_print def create_act_model(sess, env, nenvs): ob_space = env.observation_space ac_space = env.action_space policy = policies.get_policy() act = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False) return act def enjoy_env_sess(sess): should_render = True should_eval = Config.TRAIN_EVAL or Config.TEST_EVAL rep_count = Config.REP if should_eval: env = utils.make_general_env(Config.NUM_EVAL) should_render = False else: env = utils.make_general_env(1) env = wrappers.add_final_wrappers(env) if should_render: from gym.envs.classic_control import rendering nenvs = env.num_envs agent = create_act_model(sess, env, nenvs) sess.run(tf.global_variables_initializer()) loaded_params = utils.load_params_for_scope(sess, 'model') if not loaded_params: print('NO SAVED PARAMS LOADED') obs = env.reset() t_step = 0 if should_render: viewer = rendering.SimpleImageViewer() should_render_obs = not Config.IS_HIGH_RES def maybe_render(info=None): if should_render and not should_render_obs: env.render() maybe_render() scores = np.array([0] * nenvs) score_counts = np.array([0] * nenvs) curr_rews = np.zeros((nenvs, 3)) def should_continue(): if should_eval: return np.sum(score_counts) < rep_count * nenvs return True state = agent.initial_state done = np.zeros(nenvs) while should_continue(): action, values, state, _ = agent.step(obs, state, done) obs, rew, done, info = env.step(action) if should_render and should_render_obs: if np.shape(obs)[-1] % 3 == 0: ob_frame = obs[0,:,:,-3:] else: ob_frame = obs[0,:,:,-1] ob_frame = np.stack([ob_frame] * 3, axis=2) viewer.imshow(ob_frame) curr_rews[:,0] += rew for i, d in enumerate(done): if d: if score_counts[i] < rep_count: score_counts[i] += 1 if 'episode' in info[i]: scores[i] += info[i].get('episode')['r'] if t_step % 100 == 0: mpi_print('t', t_step, values[0], done[0], rew[0], curr_rews[0], np.shape(obs)) maybe_render(info[0]) t_step += 1 if should_render: time.sleep(.02) if done[0]: if should_render: mpi_print('ep_rew', curr_rews) curr_rews[:] = 0 result = 0 if should_eval: mean_score = np.mean(scores) / rep_count max_idx = np.argmax(scores) mpi_print('scores', scores / rep_count) print('mean_score', mean_score) mpi_print('max idx', max_idx) mpi_mean_score = utils.mpi_average([mean_score]) mpi_print('mpi_mean', mpi_mean_score) result = mean_score return result def main(): utils.setup_mpi_gpus() setup_utils.setup_and_load() with tf.Session() as sess: enjoy_env_sess(sess) if __name__ == '__main__': main()</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="3"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Train an agent using a PPO2 based on OpenAI Baselines. """ import time from mpi4py import MPI import tensorflow as tf from baselines.common import set_global_seeds import coinrun.main_utils as utils from coinrun import setup_utils, policies, wrappers, ppo2 from coinrun.config import Config def main(): args = setup_utils.setup_and_load() comm = MPI.COMM_WORLD rank = comm.Get_rank() seed = int(time.time()) % 10000 set_global_seeds(seed * 100 + rank) utils.setup_mpi_gpus() config = tf.ConfigProto() config.gpu_options.allow_growth = True # pylint: disable=E1101 nenvs = Config.NUM_ENVS total_timesteps = int(256e6) save_interval = args.save_interval env = utils.make_general_env(nenvs, seed=rank) with tf.Session(config=config): env = wrappers.add_final_wrappers(env) policy = policies.get_policy() ppo2.learn(policy=policy, env=env, save_interval=save_interval, nsteps=Config.NUM_STEPS, nminibatches=Config.NUM_MINIBATCHES, lam=0.95, gamma=Config.GAMMA, noptepochs=Config.PPO_EPOCHS, log_interval=1, ent_coef=Config.ENTROPY_COEFF, lr=lambda f : f * Config.LEARNING_RATE, cliprange=lambda f : f * 0.2, total_timesteps=total_timesteps) if __name__ == '__main__': main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="4"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from mpi4py import MPI import argparse import os class ConfigSingle(object): """ A global config object that can be initialized from command line arguments or keyword arguments. """ def __init__(self): self.WORKDIR = './saved_models/' self.TB_DIR = '/tmp/tensorflow' if not os.path.exists(self.WORKDIR): os.makedirs(self.WORKDIR, exist_ok=True) self.LOG_ALL_MPI = True self.SYNC_FROM_ROOT = True arg_keys = [] bool_keys = [] type_keys = [] # The runid, used to determine the name for save files. type_keys.append(('runid', 'run_id', str, 'tmp')) # The runid whose parameters and settings you want to load. type_keys.append(('resid', 'restore_id', str, None)) # The game to be played. # One of {'standard', 'platform', 'maze'} (for CoinRun, CoinRun-Platforms, Random-Mazes) type_keys.append(('gamet', 'game_type', str, 'standard', True)) # The convolutional architecture to use # One of {'nature', 'impala', 'impalalarge'} type_keys.append(('arch', 'architecture', str, 'impala', True)) # Should the model include an LSTM type_keys.append(('lstm', 'use_lstm', int, 0, True)) # The number of parallel environments to run type_keys.append(('ne', 'num_envs', int, 32, True)) # The number of levels in the training set. # If NUM_LEVELS = 0, the training set is unbounded. All level seeds will be randomly generated. # Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper. type_keys.append(('nlev', 'num_levels', int, 0, True)) # Provided as a seed for training set generation. # If SET_SEED = -1, this seed is not used and level seeds with be drawn from the range [0, NUM_LEVELS). # Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper. # NOTE: This value must and will be saved, in order to use the same training set for evaluation and/or visualization. type_keys.append(('set-seed', 'set_seed', int, -1, True)) # PPO Hyperparameters type_keys.append(('ns', 'num_steps', int, 256)) type_keys.append(('nmb', 'num_minibatches', int, 8)) type_keys.append(('ppoeps', 'ppo_epochs', int, 3)) type_keys.append(('ent', 'entropy_coeff', float, .01)) type_keys.append(('lr', 'learning_rate', float, 5e-4)) type_keys.append(('gamma', 'gamma', float, 0.999)) # Should the agent's velocity be painted in the upper left corner of observations. # 1/0 means True/False # PAINT_VEL_INFO = -1 uses smart defaulting -- will default to 1 if GAME_TYPE is 'standard' (CoinRun), 0 otherwise type_keys.append(('pvi', 'paint_vel_info', int, -1, True)) # Should batch normalization be used after each convolutional layer # 1/0 means True/False # This code only supports training-mode batch normalization (normalizing with statistics of the current batch). # In practice, we found this is nearly as effective as tracking the moving average of the statistics. # NOTE: Only applies to IMPALA and IMPALA-Large architectures type_keys.append(('norm', 'use_batch_norm', int, 0, True)) # What dropout probability to use after each convolutional layer # NOTE: Only applies to IMPALA and IMPALA-Large architectures type_keys.append(('dropout', 'dropout', float, 0.0, True)) # Should data augmentation be used # 1/0 means True/False type_keys.append(('uda', 'use_data_augmentation', int, 0)) # The l2 penalty to use during training type_keys.append(('l2', 'l2_weight', float, 0.0)) # The probability the agent's action is replaced with a random action type_keys.append(('eps', 'epsilon_greedy', float, 0.0)) # The number of frames to stack for each observation. # No frame stack is necessary if PAINT_VEL_INFO = 1 type_keys.append(('fs', 'frame_stack', int, 1, True)) # Should observations be transformed to grayscale # 1/0 means True/False type_keys.append(('ubw', 'use_black_white', int, 0, True)) # Overwrite the latest save file after this many updates type_keys.append(('si', 'save_interval', int, 10)) # The number of evaluation environments to use type_keys.append(('num-eval', 'num_eval', int, 20, True)) # The number of episodes to evaluate with each evaluation environment type_keys.append(('rep', 'rep', int, 1)) # Should half the workers act solely has test workers for evaluation # These workers will run on test levels and not contributing to training bool_keys.append(('test', 'test')) # Perform evaluation with all levels sampled from the training set bool_keys.append(('train-eval', 'train_eval')) # Perform evaluation with all levels sampled from the test set (unseen levels of high difficulty) bool_keys.append(('test-eval', 'test_eval')) # Only generate high difficulty levels bool_keys.append(('highd', 'high_difficulty')) # Use high resolution images for rendering bool_keys.append(('hres', 'is_high_res')) self.RES_KEYS = [] for tk in type_keys: arg_keys.append(self.process_field(tk[1])) if (len(tk) > 4) and tk[4]: self.RES_KEYS.append(tk[1]) for bk in bool_keys: arg_keys.append(bk[1]) if (len(bk) > 2) and bk[2]: self.RES_KEYS.append(bk[1]) self.arg_keys = arg_keys self.bool_keys = bool_keys self.type_keys = type_keys self.load_data = {} self.args_dict = {} def is_test_rank(self): if self.TEST: rank = MPI.COMM_WORLD.Get_rank() return rank % 2 == 1 return False def get_test_frac(self): return .5 if self.TEST else 0 def get_load_data(self, load_key='default'): if not load_key in self.load_data: return None return self.load_data[load_key] def set_load_data(self, ld, load_key='default'): self.load_data[load_key] = ld def process_field(self, name): return name.replace('-','_') def deprocess_field(self, name): return name.replace('_','-') def parse_all_args(self, args): assert isinstance(args, argparse.Namespace), 'expected argparse.Namespace object' update_dict = vars(args) self.parse_args_dict(update_dict) def parse_args_dict(self, update_dict): self.args_dict.update(update_dict) for ak in self.args_dict: val = self.args_dict[ak] if isinstance(val, str): val = self.process_field(val) setattr(self, ak.upper(), val) self.compute_args_dependencies() def compute_args_dependencies(self): if self.is_test_rank(): self.NUM_LEVELS = 0 self.USE_DATA_AUGMENTATION = 0 self.EPSILON_GREEDY = 0 self.HIGH_DIFFICULTY = 1 if self.PAINT_VEL_INFO < 0: if self.GAME_TYPE == 'standard': self.PAINT_VEL_INFO = 1 else: self.PAINT_VEL_INFO = 0 if self.TEST_EVAL: self.NUM_LEVELS = 0 self.HIGH_DIFFICULTY = 1 self.TRAIN_TEST_COMM = MPI.COMM_WORLD.Split(1 if self.is_test_rank() else 0, 0) def get_load_filename(self, base_name=None, restore_id=None): if restore_id is None: restore_id = Config.RESTORE_ID if restore_id is None: return None filename = Config.get_save_file_for_rank(0, self.process_field(restore_id), base_name=base_name) return filename def get_save_path(self, runid=None): return self.WORKDIR + self.get_save_file(runid) def get_save_file_for_rank(self, rank, runid=None, base_name=None): if runid is None: runid = self.RUN_ID extra = '' if base_name is not None: extra = '_' + base_name return 'sav_' + runid + extra + '_' + str(rank) def get_save_file(self, runid=None, base_name=None): comm = MPI.COMM_WORLD rank = comm.Get_rank() return self.get_save_file_for_rank(rank, runid, base_name=base_name) def get_arg_text(self): arg_strs = [] for key in self.args_dict: arg_strs.append(key + '=' + str(self.args_dict[key])) return arg_strs def get_args_dict(self): _args_dict = {} _args_dict.update(self.args_dict) return _args_dict def initialize_args(self, use_cmd_line_args=True, **kwargs): default_args = {} for tk in self.type_keys: default_args[self.process_field(tk[1])] = tk[3] for bk in self.bool_keys: default_args[bk[1]] = False default_args.update(kwargs) parser = argparse.ArgumentParser() for tk in self.type_keys: parser.add_argument('-' + tk[0], '--' + self.deprocess_field(tk[1]), type=tk[2], default=default_args[tk[1]]) for bk in self.bool_keys: parser.add_argument('--' + bk[0], dest=bk[1], action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_true") bk_kwargs = {bk[1]: default_args[bk[1]]} parser.set_defaults(**bk_kwargs) if use_cmd_line_args: args = parser.parse_args() else: args = parser.parse_args(args=[]) self.parse_all_args(args) return args Config = ConfigSingle() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="5"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" This is a copy of PPO from openai/baselines (https://github.com/openai/baselines/blob/52255beda5f5c8760b0ae1f676aa656bb1a61f80/baselines/ppo2/ppo2.py) with some minor changes. """ import time import joblib import numpy as np import tensorflow as tf from collections import deque from mpi4py import MPI from coinrun.tb_utils import TB_Writer import coinrun.main_utils as utils from coinrun.config import Config mpi_print = utils.mpi_print from baselines.common.runners import AbstractEnvRunner from baselines.common.tf_util import initialize from baselines.common.mpi_util import sync_from_root class MpiAdamOptimizer(tf.train.AdamOptimizer): """Adam optimizer that averages gradients across mpi processes.""" def __init__(self, comm, **kwargs): self.comm = comm self.train_frac = 1.0 - Config.get_test_frac() tf.train.AdamOptimizer.__init__(self, **kwargs) def compute_gradients(self, loss, var_list, **kwargs): grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs) grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None] flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0) if Config.is_test_rank(): flat_grad = tf.zeros_like(flat_grad) shapes = [v.shape.as_list() for g, v in grads_and_vars] sizes = [int(np.prod(s)) for s in shapes] num_tasks = self.comm.Get_size() buf = np.zeros(sum(sizes), np.float32) def _collect_grads(flat_grad): self.comm.Allreduce(flat_grad, buf, op=MPI.SUM) np.divide(buf, float(num_tasks) * self.train_frac, out=buf) return buf avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32) avg_flat_grad.set_shape(flat_grad.shape) avg_grads = tf.split(avg_flat_grad, sizes, axis=0) avg_grads_and_vars = [(tf.reshape(g, v.shape), v) for g, (_, v) in zip(avg_grads, grads_and_vars)] return avg_grads_and_vars class Model(object): def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train, nsteps, ent_coef, vf_coef, max_grad_norm): sess = tf.get_default_session() train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps) norm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) act_model = policy(sess, ob_space, ac_space, nbatch_act, 1) A = train_model.pdtype.sample_placeholder([None]) ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) OLDNEGLOGPAC = tf.placeholder(tf.float32, [None]) OLDVPRED = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) CLIPRANGE = tf.placeholder(tf.float32, []) neglogpac = train_model.pd.neglogp(A) entropy = tf.reduce_mean(train_model.pd.entropy()) vpred = train_model.vf vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) vf_losses1 = tf.square(vpred - R) vf_losses2 = tf.square(vpredclipped - R) vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2)) ratio = tf.exp(OLDNEGLOGPAC - neglogpac) pg_losses = -ADV * ratio pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE) pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2)) approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC)) clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE))) params = tf.trainable_variables() weight_params = [v for v in params if '/b' not in v.name] total_num_params = 0 for p in params: shape = p.get_shape().as_list() num_params = np.prod(shape) mpi_print('param', p, num_params) total_num_params += num_params mpi_print('total num params:', total_num_params) l2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in weight_params]) loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + l2_loss * Config.L2_WEIGHT if Config.SYNC_FROM_ROOT: trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5) else: trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5) grads_and_var = trainer.compute_gradients(loss, params) grads, var = zip(*grads_and_var) if max_grad_norm is not None: grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm) grads_and_var = list(zip(grads, var)) _train = trainer.apply_gradients(grads_and_var) def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None): advs = returns - values adv_mean = np.mean(advs, axis=0, keepdims=True) adv_std = np.std(advs, axis=0, keepdims=True) advs = (advs - adv_mean) / (adv_std + 1e-8) td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr, CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values} if states is not None: td_map[train_model.S] = states td_map[train_model.M] = masks return sess.run( [pg_loss, vf_loss, entropy, approxkl, clipfrac, l2_loss, _train], td_map )[:-1] self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'l2_loss'] def save(save_path): ps = sess.run(params) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores = [] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) sess.run(restores) self.train = train self.train_model = train_model self.act_model = act_model self.step = act_model.step self.value = act_model.value self.initial_state = act_model.initial_state self.save = save self.load = load if Config.SYNC_FROM_ROOT: if MPI.COMM_WORLD.Get_rank() == 0: initialize() global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="") sync_from_root(sess, global_variables) #pylint: disable=E1101 else: initialize() class Runner(AbstractEnvRunner): def __init__(self, *, env, model, nsteps, gamma, lam): super().__init__(env=env, model=model, nsteps=nsteps) self.lam = lam self.gamma = gamma def run(self): # Here, we init the lists that will contain the mb of experiences mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[] mb_states = self.states epinfos = [] # For n in range number of steps for _ in range(self.nsteps): # Given observations, get action value and neglopacs # We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones) mb_obs.append(self.obs.copy()) mb_actions.append(actions) mb_values.append(values) mb_neglogpacs.append(neglogpacs) mb_dones.append(self.dones) # Take actions in env and look the results # Infos contains a ton of useful informations self.obs[:], rewards, self.dones, infos = self.env.step(actions) for info in infos: maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) mb_rewards.append(rewards) #batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype) mb_rewards = np.asarray(mb_rewards, dtype=np.float32) mb_actions = np.asarray(mb_actions) mb_values = np.asarray(mb_values, dtype=np.float32) mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32) mb_dones = np.asarray(mb_dones, dtype=np.bool) last_values = self.model.value(self.obs, self.states, self.dones) # discount/bootstrap off value fn mb_returns = np.zeros_like(mb_rewards) mb_advs = np.zeros_like(mb_rewards) lastgaelam = 0 for t in reversed(range(self.nsteps)): if t == self.nsteps - 1: nextnonterminal = 1.0 - self.dones nextvalues = last_values else: nextnonterminal = 1.0 - mb_dones[t+1] nextvalues = mb_values[t+1] delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t] mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam mb_returns = mb_advs + mb_values return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), mb_states, epinfos) def sf01(arr): """ swap and then flatten axes 0 and 1 """ s = arr.shape return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:]) def constfn(val): def f(_): return val return f def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, save_interval=0, load_path=None): comm = MPI.COMM_WORLD rank = comm.Get_rank() mpi_size = comm.Get_size() sess = tf.get_default_session() tb_writer = TB_Writer(sess) if isinstance(lr, float): lr = constfn(lr) else: assert callable(lr) if isinstance(cliprange, float): cliprange = constfn(cliprange) else: assert callable(cliprange) total_timesteps = int(total_timesteps) nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space nbatch = nenvs * nsteps nbatch_train = nbatch // nminibatches model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm) utils.load_all_params(sess) runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam) epinfobuf10 = deque(maxlen=10) epinfobuf100 = deque(maxlen=100) tfirststart = time.time() active_ep_buf = epinfobuf100 nupdates = total_timesteps//nbatch mean_rewards = [] datapoints = [] run_t_total = 0 train_t_total = 0 can_save = True checkpoints = [32, 64] saved_key_checkpoints = [False] * len(checkpoints) if Config.SYNC_FROM_ROOT and rank != 0: can_save = False def save_model(base_name=None): base_dict = {'datapoints': datapoints} utils.save_params_in_scopes(sess, ['model'], Config.get_save_file(base_name=base_name), base_dict) for update in range(1, nupdates+1): assert nbatch % nminibatches == 0 nbatch_train = nbatch // nminibatches tstart = time.time() frac = 1.0 - (update - 1.0) / nupdates lrnow = lr(frac) cliprangenow = cliprange(frac) mpi_print('collecting rollouts...') run_tstart = time.time() obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() epinfobuf10.extend(epinfos) epinfobuf100.extend(epinfos) run_elapsed = time.time() - run_tstart run_t_total += run_elapsed mpi_print('rollouts complete') mblossvals = [] mpi_print('updating parameters...') train_tstart = time.time() if states is None: # nonrecurrent version inds = np.arange(nbatch) for _ in range(noptepochs): np.random.shuffle(inds) for start in range(0, nbatch, nbatch_train): end = start + nbatch_train mbinds = inds[start:end] slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mblossvals.append(model.train(lrnow, cliprangenow, *slices)) else: # recurrent version assert nenvs % nminibatches == 0 envinds = np.arange(nenvs) flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps) envsperbatch = nbatch_train // nsteps for _ in range(noptepochs): np.random.shuffle(envinds) for start in range(0, nenvs, envsperbatch): end = start + envsperbatch mbenvinds = envinds[start:end] mbflatinds = flatinds[mbenvinds].ravel() slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs)) mbstates = states[mbenvinds] mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates)) # update the dropout mask sess.run([model.train_model.dropout_assign_ops]) train_elapsed = time.time() - train_tstart train_t_total += train_elapsed mpi_print('update complete') lossvals = np.mean(mblossvals, axis=0) tnow = time.time() fps = int(nbatch / (tnow - tstart)) if update % log_interval == 0 or update == 1: step = update*nbatch rew_mean_10 = utils.process_ep_buf(active_ep_buf, tb_writer=tb_writer, suffix='', step=step) ep_len_mean = np.nanmean([epinfo['l'] for epinfo in active_ep_buf]) mpi_print('\n----', update) mean_rewards.append(rew_mean_10) datapoints.append([step, rew_mean_10]) tb_writer.log_scalar(ep_len_mean, 'ep_len_mean') tb_writer.log_scalar(fps, 'fps') mpi_print('time_elapsed', tnow - tfirststart, run_t_total, train_t_total) mpi_print('timesteps', update*nsteps, total_timesteps) mpi_print('eplenmean', ep_len_mean) mpi_print('eprew', rew_mean_10) mpi_print('fps', fps) mpi_print('total_timesteps', update*nbatch) mpi_print([epinfo['r'] for epinfo in epinfobuf10]) if len(mblossvals): for (lossval, lossname) in zip(lossvals, model.loss_names): mpi_print(lossname, lossval) tb_writer.log_scalar(lossval, lossname) mpi_print('----\n') if can_save: if save_interval and (update % save_interval == 0): save_model() for j, checkpoint in enumerate(checkpoints): if (not saved_key_checkpoints[j]) and (step >= (checkpoint * 1e6)): saved_key_checkpoints[j] = True save_model(str(checkpoint) + 'M') save_model() env.close() return mean_rewards </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="6"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import tensorflow as tf from mpi4py import MPI from coinrun.config import Config import numpy as np def clean_tb_dir(): comm = MPI.COMM_WORLD rank = comm.Get_rank() if rank == 0: if tf.gfile.Exists(Config.TB_DIR): tf.gfile.DeleteRecursively(Config.TB_DIR) tf.gfile.MakeDirs(Config.TB_DIR) comm.Barrier() class TB_Writer(object): def __init__(self, sess): comm = MPI.COMM_WORLD rank = comm.Get_rank() clean_tb_dir() tb_writer = tf.summary.FileWriter(Config.TB_DIR + '/' + Config.RUN_ID + '_' + str(rank), sess.graph) total_steps = [0] should_log = (rank == 0 or Config.LOG_ALL_MPI) if should_log: hyperparams = np.array(Config.get_arg_text()) hyperparams_tensor = tf.constant(hyperparams) summary_op = tf.summary.text("hyperparameters info", hyperparams_tensor) summary = sess.run(summary_op) tb_writer.add_summary(summary) def add_summary(_merged, interval=1): if should_log: total_steps[0] += 1 if total_steps[0] % interval == 0: tb_writer.add_summary(_merged, total_steps[0]) tb_writer.flush() tuples = [] def make_scalar_graph(name): scalar_ph = tf.placeholder(name='scalar_' + name, dtype=tf.float32) scalar_summary = tf.summary.scalar(name, scalar_ph) merged = tf.summary.merge([scalar_summary]) tuples.append((scalar_ph, merged)) name_dict = {} curr_name_idx = [0] def log_scalar(x, name, step=-1): if not name in name_dict: name_dict[name] = curr_name_idx[0] tf_name = (name + '_' + Config.RUN_ID) if curr_name_idx[0] == 0 else name make_scalar_graph(tf_name) curr_name_idx[0] += 1 idx = name_dict[name] scalar_ph, merged = tuples[idx] if should_log: if step == -1: step = total_steps[0] total_steps[0] += 1 _merged = sess.run(merged, {scalar_ph: x}) tb_writer.add_summary(_merged, step) tb_writer.flush() self.add_summary = add_summary self.log_scalar = log_scalar </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="7"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from .coinrunenv import init_args_and_threads from .coinrunenv import make __all__ = [ 'init_args_and_threads', 'make' ] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="8"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import gym import numpy as np class EpsilonGreedyWrapper(gym.Wrapper): """ Wrapper to perform a random action each step instead of the requested action, with the provided probability. """ def __init__(self, env, prob=0.05): gym.Wrapper.__init__(self, env) self.prob = prob self.num_envs = env.num_envs def reset(self): return self.env.reset() def step(self, action): if np.random.uniform()<self.prob: action = np.random.randint(self.env.action_space.n, size=self.num_envs) return self.env.step(action) class EpisodeRewardWrapper(gym.Wrapper): def __init__(self, env): env.metadata = {'render.modes': []} env.reward_range = (-float('inf'), float('inf')) nenvs = env.num_envs self.num_envs = nenvs super(EpisodeRewardWrapper, self).__init__(env) self.aux_rewards = None self.num_aux_rews = None def reset(**kwargs): self.rewards = np.zeros(nenvs) self.lengths = np.zeros(nenvs) self.aux_rewards = None self.long_aux_rewards = None return self.env.reset(**kwargs) def step(action): obs, rew, done, infos = self.env.step(action) if self.aux_rewards is None: info = infos[0] if 'aux_rew' in info: self.num_aux_rews = len(infos[0]['aux_rew']) else: self.num_aux_rews = 0 self.aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32) self.long_aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32) self.rewards += rew self.lengths += 1 use_aux = self.num_aux_rews > 0 if use_aux: for i, info in enumerate(infos): self.aux_rewards[i,:] += info['aux_rew'] self.long_aux_rewards[i,:] += info['aux_rew'] for i, d in enumerate(done): if d: epinfo = {'r': round(self.rewards[i], 6), 'l': self.lengths[i], 't': 0} aux_dict = {} for nr in range(self.num_aux_rews): aux_dict['aux_' + str(nr)] = self.aux_rewards[i,nr] if 'ale.lives' in infos[i]: game_over_rew = np.nan is_game_over = infos[i]['ale.lives'] == 0 if is_game_over: game_over_rew = self.long_aux_rewards[i,0] self.long_aux_rewards[i,:] = 0 aux_dict['game_over_rew'] = game_over_rew epinfo['aux_dict'] = aux_dict infos[i]['episode'] = epinfo self.rewards[i] = 0 self.lengths[i] = 0 self.aux_rewards[i,:] = 0 return obs, rew, done, infos self.reset = reset self.step = step def add_final_wrappers(env): env = EpisodeRewardWrapper(env) return env</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="9"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Run a CoinRun environment in a window where you can interact with it using the keyboard """ from coinrun.coinrunenv import lib from coinrun import setup_utils def main(): setup_utils.setup_and_load(paint_vel_info=0) print("""Control with arrow keys, F1, F2 -- switch resolution, F5, F6, F7, F8 -- zoom, F9 -- switch reconstruction target picture, F10 -- switch lasers """) lib.test_main_loop() if __name__ == '__main__': main()</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="10"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import tensorflow as tf import os import joblib import numpy as np from mpi4py import MPI from baselines.common.vec_env.vec_frame_stack import VecFrameStack from coinrun.config import Config from coinrun import setup_utils, wrappers import platform def make_general_env(num_env, seed=0, use_sub_proc=True): from coinrun import coinrunenv env = coinrunenv.make(Config.GAME_TYPE, num_env) if Config.FRAME_STACK > 1: env = VecFrameStack(env, Config.FRAME_STACK) epsilon = Config.EPSILON_GREEDY if epsilon > 0: env = wrappers.EpsilonGreedyWrapper(env, epsilon) return env def file_to_path(filename): return setup_utils.file_to_path(filename) def load_all_params(sess): load_params_for_scope(sess, 'model') def load_params_for_scope(sess, scope, load_key='default'): load_data = Config.get_load_data(load_key) if load_data is None: return False params_dict = load_data['params'] if scope in params_dict: print('Loading saved file for scope', scope) loaded_params = params_dict[scope] loaded_params, params = get_savable_params(loaded_params, scope, keep_heads=True) restore_params(sess, loaded_params, params) return True def get_savable_params(loaded_params, scope, keep_heads=False): params = tf.trainable_variables(scope) filtered_params = [] filtered_loaded = [] if len(loaded_params) != len(params): print('param mismatch', len(loaded_params), len(params)) assert(False) for p, loaded_p in zip(params, loaded_params): keep = True if any((scope + '/' + x) in p.name for x in ['v','pi']): keep = keep_heads if keep: filtered_params.append(p) filtered_loaded.append(loaded_p) else: print('drop', p) return filtered_loaded, filtered_params def restore_params(sess, loaded_params, params): if len(loaded_params) != len(params): print('param mismatch', len(loaded_params), len(params)) assert(False) restores = [] for p, loaded_p in zip(params, loaded_params): print('restoring', p) restores.append(p.assign(loaded_p)) sess.run(restores) def save_params_in_scopes(sess, scopes, filename, base_dict=None): data_dict = {} if base_dict is not None: data_dict.update(base_dict) save_path = file_to_path(filename) data_dict['args'] = Config.get_args_dict() param_dict = {} for scope in scopes: params = tf.trainable_variables(scope) if len(params) > 0: print('saving scope', scope, filename) ps = sess.run(params) param_dict[scope] = ps data_dict['params'] = param_dict joblib.dump(data_dict, save_path) def setup_mpi_gpus(): if 'RCALL_NUM_GPU' not in os.environ: return num_gpus = int(os.environ['RCALL_NUM_GPU']) node_id = platform.node() nodes = MPI.COMM_WORLD.allgather(node_id) local_rank = len([n for n in nodes[:MPI.COMM_WORLD.Get_rank()] if n == node_id]) os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus) def is_mpi_root(): return MPI.COMM_WORLD.Get_rank() == 0 def tf_initialize(sess): sess.run(tf.initialize_all_variables()) sync_from_root(sess) def sync_from_root(sess, vars=None): if vars is None: vars = tf.trainable_variables() if Config.SYNC_FROM_ROOT: rank = MPI.COMM_WORLD.Get_rank() print('sync from root', rank) for var in vars: if rank == 0: MPI.COMM_WORLD.bcast(sess.run(var)) else: sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None))) def mpi_average(values): return mpi_average_comm(values, MPI.COMM_WORLD) def mpi_average_comm(values, comm): size = comm.size x = np.array(values) buf = np.zeros_like(x) comm.Allreduce(x, buf, op=MPI.SUM) buf = buf / size return buf def mpi_average_train_test(values): return mpi_average_comm(values, Config.TRAIN_TEST_COMM) def mpi_print(*args): rank = MPI.COMM_WORLD.Get_rank() if rank == 0: print(*args) def process_ep_buf(epinfobuf, tb_writer=None, suffix='', step=0): rewards = [epinfo['r'] for epinfo in epinfobuf] rew_mean = np.nanmean(rewards) if Config.SYNC_FROM_ROOT: rew_mean = mpi_average_train_test([rew_mean])[0] if tb_writer is not None: tb_writer.log_scalar(rew_mean, 'rew_mean' + suffix, step) aux_dicts = [] if len(epinfobuf) > 0 and 'aux_dict' in epinfobuf[0]: aux_dicts = [epinfo['aux_dict'] for epinfo in epinfobuf] if len(aux_dicts) > 0: keys = aux_dicts[0].keys() for key in keys: sub_rews = [aux_dict[key] for aux_dict in aux_dicts] sub_rew = np.nanmean(sub_rews) if tb_writer is not None: tb_writer.log_scalar(sub_rew, key, step) return rew_mean </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="11"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from coinrun.config import Config import os import joblib def load_for_setup_if_necessary(): restore_file(Config.RESTORE_ID) def restore_file(restore_id, load_key='default'): if restore_id is not None: load_file = Config.get_load_filename(restore_id=restore_id) filepath = file_to_path(load_file) load_data = joblib.load(filepath) Config.set_load_data(load_data, load_key=load_key) restored_args = load_data['args'] sub_dict = {} res_keys = Config.RES_KEYS for key in res_keys: if key in restored_args: sub_dict[key] = restored_args[key] else: print('warning key %s not restored' % key) Config.parse_args_dict(sub_dict) from coinrun.coinrunenv import init_args_and_threads init_args_and_threads(4) def setup_and_load(use_cmd_line_args=True, **kwargs): """ Initialize the global config using command line options, defaulting to the values in `config.py`. `use_cmd_line_args`: set to False to ignore command line arguments passed to the program `**kwargs`: override the defaults from `config.py` with these values """ args = Config.initialize_args(use_cmd_line_args=use_cmd_line_args, **kwargs) load_for_setup_if_necessary() return args def file_to_path(filename): return os.path.join(Config.WORKDIR, filename)</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="12"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from coinrun import random_agent def test_coinrun(): random_agent.random_agent(num_envs=16, max_steps=100) if __name__ == '__main__': test_coinrun()</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="13"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np import tensorflow as tf from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm from baselines.common.distributions import make_pdtype from baselines.common.input import observation_input from coinrun.config import Config def impala_cnn(images, depths=[16, 32, 32]): """ Model used in the paper "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561 """ use_batch_norm = Config.USE_BATCH_NORM == 1 dropout_layer_num = [0] dropout_assign_ops = [] def dropout_layer(out): if Config.DROPOUT > 0: out_shape = out.get_shape().as_list() num_features = np.prod(out_shape[1:]) var_name = 'mask_' + str(dropout_layer_num[0]) batch_seed_shape = out_shape[1:] batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False) batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1)) dropout_assign_ops.append(batch_seed_assign) curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - Config.DROPOUT)) curr_mask = curr_mask * (1.0 / (1.0 - Config.DROPOUT)) out = out * curr_mask dropout_layer_num[0] += 1 return out def conv_layer(out, depth): out = tf.layers.conv2d(out, depth, 3, padding='same') out = dropout_layer(out) if use_batch_norm: out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True) return out def residual_block(inputs): depth = inputs.get_shape()[-1].value out = tf.nn.relu(inputs) out = conv_layer(out, depth) out = tf.nn.relu(out) out = conv_layer(out, depth) return out + inputs def conv_sequence(inputs, depth): out = conv_layer(inputs, depth) out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same') out = residual_block(out) out = residual_block(out) return out out = images for depth in depths: out = conv_sequence(out, depth) out = tf.layers.flatten(out) out = tf.nn.relu(out) out = tf.layers.dense(out, 256, activation=tf.nn.relu) return out, dropout_assign_ops def nature_cnn(scaled_images, **conv_kwargs): """ Model used in the paper "Human-level control through deep reinforcement learning" https://www.nature.com/articles/nature14236 """ def activ(curr): return tf.nn.relu(curr) h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs)) h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs)) h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs)) h3 = conv_to_fc(h3) return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) def choose_cnn(images): arch = Config.ARCHITECTURE scaled_images = tf.cast(images, tf.float32) / 255. dropout_assign_ops = [] if arch == 'nature': out = nature_cnn(scaled_images) elif arch == 'impala': out, dropout_assign_ops = impala_cnn(scaled_images) elif arch == 'impalalarge': out, dropout_assign_ops = impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64]) else: assert(False) return out, dropout_assign_ops class LstmPolicy(object): def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256): nenv = nbatch // nsteps self.pdtype = make_pdtype(ac_space) X, processed_x = observation_input(ob_space, nbatch) M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states with tf.variable_scope("model", reuse=tf.AUTO_REUSE): h, self.dropout_assign_ops = choose_cnn(processed_x) xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm) h5 = seq_to_batch(h5) vf = fc(h5, 'v', 1)[:,0] self.pd, self.pi = self.pdtype.pdfromlatent(h5) a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32) def step(ob, state, mask): return sess.run([a0, vf, snew, neglogp0], {X:ob, S:state, M:mask}) def value(ob, state, mask): return sess.run(vf, {X:ob, S:state, M:mask}) self.X = X self.M = M self.S = S self.vf = vf self.step = step self.value = value class CnnPolicy(object): def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613 self.pdtype = make_pdtype(ac_space) X, processed_x = observation_input(ob_space, nbatch) with tf.variable_scope("model", reuse=tf.AUTO_REUSE): h, self.dropout_assign_ops = choose_cnn(processed_x) vf = fc(h, 'v', 1)[:,0] self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01) a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = None def step(ob, *_args, **_kwargs): a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob}) return a, v, self.initial_state, neglogp def value(ob, *_args, **_kwargs): return sess.run(vf, {X:ob}) self.X = X self.vf = vf self.step = step self.value = value def get_policy(): use_lstm = Config.USE_LSTM if use_lstm == 1: policy = LstmPolicy elif use_lstm == 0: policy = CnnPolicy else: assert(False) return policy </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="14"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Python interface to the CoinRun shared library using ctypes. On import, this will attempt to build the shared library. """ import os import atexit import random import sys from ctypes import c_int, c_char_p, c_float, c_bool import gym import gym.spaces import numpy as np import numpy.ctypeslib as npct from baselines.common.vec_env import VecEnv from baselines import logger from coinrun.config import Config from mpi4py import MPI from baselines.common import mpi_util # if the environment is crashing, try using the debug build to get # a readable stack trace DEBUG = False SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) game_versions = { 'standard': 1000, 'platform': 1001, 'maze': 1002, } def build(): lrank, _lsize = mpi_util.get_local_rank_size(MPI.COMM_WORLD) if lrank == 0: dirname = os.path.dirname(__file__) if len(dirname): make_cmd = "QT_SELECT=5 make -C %s" % dirname else: make_cmd = "QT_SELECT=5 make" r = os.system(make_cmd) if r != 0: logger.error('coinrun: make failed') sys.exit(1) MPI.COMM_WORLD.barrier() build() if DEBUG: lib_path = '.build-debug/coinrun_cpp_d' else: lib_path = '.build-release/coinrun_cpp' lib = npct.load_library(lib_path, os.path.dirname(__file__)) lib.init.argtypes = [c_int] lib.get_NUM_ACTIONS.restype = c_int lib.get_RES_W.restype = c_int lib.get_RES_H.restype = c_int lib.get_VIDEORES.restype = c_int lib.vec_create.argtypes = [ c_int, # game_type c_int, # nenvs c_int, # lump_n c_bool, # want_hires_render c_float, # default_zoom ] lib.vec_create.restype = c_int lib.vec_close.argtypes = [c_int] lib.vec_step_async_discrete.argtypes = [c_int, npct.ndpointer(dtype=np.int32, ndim=1)] lib.initialize_args.argtypes = [npct.ndpointer(dtype=np.int32, ndim=1)] lib.initialize_set_monitor_dir.argtypes = [c_char_p, c_int] lib.vec_wait.argtypes = [ c_int, npct.ndpointer(dtype=np.uint8, ndim=4), # normal rgb npct.ndpointer(dtype=np.uint8, ndim=4), # larger rgb for render() npct.ndpointer(dtype=np.float32, ndim=1), # rew npct.ndpointer(dtype=np.bool, ndim=1), # done ] already_inited = False def init_args_and_threads(cpu_count=4, monitor_csv_policy='all', rand_seed=None): """ Perform one-time global init for the CoinRun library. This must be called before creating an instance of CoinRunVecEnv. You should not call this multiple times from the same process. """ os.environ['COINRUN_RESOURCES_PATH'] = os.path.join(SCRIPT_DIR, 'assets') is_high_difficulty = Config.HIGH_DIFFICULTY if rand_seed is None: rand_seed = random.SystemRandom().randint(0, 1000000000) # ensure different MPI processes get different seeds (just in case SystemRandom implementation is poor) mpi_rank, mpi_size = mpi_util.get_local_rank_size(MPI.COMM_WORLD) rand_seed = rand_seed - rand_seed % mpi_size + mpi_rank int_args = np.array([int(is_high_difficulty), Config.NUM_LEVELS, int(Config.PAINT_VEL_INFO), Config.USE_DATA_AUGMENTATION, game_versions[Config.GAME_TYPE], Config.SET_SEED, rand_seed]).astype(np.int32) lib.initialize_args(int_args) lib.initialize_set_monitor_dir(logger.get_dir().encode('utf-8'), {'off': 0, 'first_env': 1, 'all': 2}[monitor_csv_policy]) global already_inited if already_inited: return lib.init(cpu_count) already_inited = True @atexit.register def shutdown(): global already_inited if not already_inited: return lib.coinrun_shutdown() class CoinRunVecEnv(VecEnv): """ This is the CoinRun VecEnv, all CoinRun environments are just instances of this class with different values for `game_type` `game_type`: int game type corresponding to the game type to create, see `enum GameType` in `coinrun.cpp` `num_envs`: number of environments to create in this VecEnv `lump_n`: only used when the environment creates `monitor.csv` files `default_zoom`: controls how much of the level the agent can see """ def __init__(self, game_type, num_envs, lump_n=0, default_zoom=5.0): self.metadata = {'render.modes': []} self.reward_range = (-float('inf'), float('inf')) self.NUM_ACTIONS = lib.get_NUM_ACTIONS() self.RES_W = lib.get_RES_W() self.RES_H = lib.get_RES_H() self.VIDEORES = lib.get_VIDEORES() self.buf_rew = np.zeros([num_envs], dtype=np.float32) self.buf_done = np.zeros([num_envs], dtype=np.bool) self.buf_rgb = np.zeros([num_envs, self.RES_H, self.RES_W, 3], dtype=np.uint8) self.hires_render = Config.IS_HIGH_RES if self.hires_render: self.buf_render_rgb = np.zeros([num_envs, self.VIDEORES, self.VIDEORES, 3], dtype=np.uint8) else: self.buf_render_rgb = np.zeros([1, 1, 1, 1], dtype=np.uint8) num_channels = 1 if Config.USE_BLACK_WHITE else 3 obs_space = gym.spaces.Box(0, 255, shape=[self.RES_H, self.RES_W, num_channels], dtype=np.uint8) super().__init__( num_envs=num_envs, observation_space=obs_space, action_space=gym.spaces.Discrete(self.NUM_ACTIONS), ) self.handle = lib.vec_create( game_versions[game_type], self.num_envs, lump_n, self.hires_render, default_zoom) self.dummy_info = [{} for _ in range(num_envs)] def __del__(self): if hasattr(self, 'handle'): lib.vec_close(self.handle) self.handle = 0 def close(self): lib.vec_close(self.handle) self.handle = 0 def reset(self): print("CoinRun ignores resets") obs, _, _, _ = self.step_wait() return obs def get_images(self): if self.hires_render: return self.buf_render_rgb else: return self.buf_rgb def step_async(self, actions): assert actions.dtype in [np.int32, np.int64] actions = actions.astype(np.int32) lib.vec_step_async_discrete(self.handle, actions) def step_wait(self): self.buf_rew = np.zeros_like(self.buf_rew) self.buf_done = np.zeros_like(self.buf_done) lib.vec_wait( self.handle, self.buf_rgb, self.buf_render_rgb, self.buf_rew, self.buf_done) obs_frames = self.buf_rgb if Config.USE_BLACK_WHITE: obs_frames = np.mean(obs_frames, axis=-1).astype(np.uint8)[...,None] return obs_frames, self.buf_rew, self.buf_done, self.dummy_info def make(env_id, num_envs, **kwargs): assert env_id in game_versions, 'cannot find environment "%s", maybe you mean one of %s' % (env_id, list(game_versions.keys())) return CoinRunVecEnv(env_id, num_envs, **kwargs) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="15"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import pickle import math import sys import argparse import warnings from os import makedirs from os.path import basename, join, exists, dirname, splitext, realpath from wikidata_linker_utils.progressbar import get_progress_bar from dataset import TSVDataset, CombinedDataset, H5Dataset, ClassificationHandler from batchifier import (iter_batches_single_threaded, requires_vocab, requires_character_convolution, get_feature_vocabs) import tensorflow as tf import numpy as np try: RNNCell = tf.nn.rnn_cell.RNNCell TFLSTMCell = tf.nn.rnn_cell.LSTMCell MultiRNNCell = tf.nn.rnn_cell.MultiRNNCell LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple from tensorflow.contrib.cudnn_rnn import CudnnLSTM except AttributeError: RNNCell = tf.contrib.rnn.RNNCell TFLSTMCell = tf.contrib.rnn.LSTMCell MultiRNNCell = tf.contrib.rnn.MultiRNNCell LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM from tensorflow.python.client import device_lib class LazyAdamOptimizer(tf.train.AdamOptimizer): """Variant of the Adam optimizer that handles sparse updates more efficiently. The original Adam algorithm maintains two moving-average accumulators for each trainable variable; the accumulators are updated at every step. This class provides lazier handling of gradient updates for sparse variables. It only updates moving-average accumulators for sparse variable indices that appear in the current batch, rather than updating the accumulators for all indices. Compared with the original Adam optimizer, it can provide large improvements in model training throughput for some applications. However, it provides slightly different semantics than the original Adam algorithm, and may lead to different empirical results. """ def _apply_sparse(self, grad, var): beta1_power = tf.cast(self._beta1_power, var.dtype.base_dtype) beta2_power = tf.cast(self._beta2_power, var.dtype.base_dtype) lr_t = tf.cast(self._lr_t, var.dtype.base_dtype) beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype) beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype) epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype) lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)) # m := beta1 * m + (1 - beta1) * g_t # We use a slightly different version of the moving-average update formula # that does a better job of handling concurrent lockless updates: # m -= (1 - beta1) * (m - g_t) m = self.get_slot(var, "m") m_t_delta = tf.gather(m, grad.indices) - grad.values m_t = tf.scatter_sub(m, grad.indices, (1 - beta1_t) * m_t_delta, use_locking=self._use_locking) # v := beta2 * v + (1 - beta2) * (g_t * g_t) # We reformulate the update as: # v -= (1 - beta2) * (v - g_t * g_t) v = self.get_slot(var, "v") v_t_delta = tf.gather(v, grad.indices) - tf.square(grad.values) v_t = tf.scatter_sub(v, grad.indices, (1 - beta2_t) * v_t_delta, use_locking=self._use_locking) # variable -= learning_rate * m_t / (epsilon_t + sqrt(v_t)) m_t_slice = tf.gather(m_t, grad.indices) v_t_slice = tf.gather(v_t, grad.indices) denominator_slice = tf.sqrt(v_t_slice) + epsilon_t var_update = tf.scatter_sub(var, grad.indices, lr * m_t_slice / denominator_slice, use_locking=self._use_locking) return tf.group(var_update, m_t, v_t) def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU'] def split(values, axis, num_splits, name=None): return tf.split(values, num_splits, axis=axis, name=name) def reverse(values, axis): return tf.reverse(values, [axis]) def sparse_softmax_cross_entropy_with_logits(logits, labels): return tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels) def concat(values, axis, name=None): if len(values) == 1: return values[0] return tf.concat(values, axis, name=name) def concat_tensor_array(values, name=None): return values.stack(name=name) def batch_gather_3d(values, indices): return tf.gather(tf.reshape(values, [-1, tf.shape(values)[2]]), tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] + indices) def batch_gather_2d(values, indices): return tf.gather(tf.reshape(values, [-1]), tf.range(0, tf.shape(values)[0]) * tf.shape(values)[1] + indices) def viterbi_decode(score, transition_params, sequence_lengths, back_prop=False, parallel_iterations=1): """Decode the highest scoring sequence of tags inside of TensorFlow!!! This can be used anytime. Args: score: A [batch, seq_len, num_tags] matrix of unary potentials. transition_params: A [num_tags, num_tags] matrix of binary potentials. sequence_lengths: A [batch] int32 vector of the length of each score sequence. Returns: viterbi: A [batch, seq_len] list of integers containing the highest scoring tag indices. viterbi_score: A vector of float containing the score for the Viterbi sequence. """ sequence_lengths = tf.convert_to_tensor( sequence_lengths, name="sequence_lengths") score = tf.convert_to_tensor(score, name="score") transition_params = tf.convert_to_tensor( transition_params, name="transition_params") if sequence_lengths.dtype != tf.int32: sequence_lengths = tf.cast(sequence_lengths, tf.int32) def condition(t, *args): """Stop when full score sequence has been read in.""" return tf.less(t, tf.shape(score)[1]) def body(t, trellis, backpointers, trellis_val): """Perform forward viterbi pass.""" v = tf.expand_dims(trellis_val, 2) + tf.expand_dims(transition_params, 0) new_trellis_val = score[:, t, :] + tf.reduce_max(v, axis=1) new_trellis = trellis.write(t, new_trellis_val) new_backpointers = backpointers.write( t, tf.cast(tf.argmax(v, axis=1), tf.int32)) return t + 1, new_trellis, new_backpointers, new_trellis_val trellis_arr = tf.TensorArray(score.dtype, size=0, dynamic_size=True, clear_after_read=False, infer_shape=False) first_trellis_val = score[:, 0, :] trellis_arr = trellis_arr.write(0, first_trellis_val) backpointers_arr = tf.TensorArray(tf.int32, size=0, dynamic_size=True, clear_after_read=False, infer_shape=False) backpointers_arr = backpointers_arr.write(0, tf.zeros_like(score[:, 0, :], dtype=tf.int32)) _, trellis_out, backpointers_out, _ = tf.while_loop( condition, body, (tf.constant(1, name="t", dtype=tf.int32), trellis_arr, backpointers_arr, first_trellis_val), parallel_iterations=parallel_iterations, back_prop=back_prop) trellis_out = concat_tensor_array(trellis_out) backpointers_out = concat_tensor_array(backpointers_out) # make batch-major: trellis_out = tf.transpose(trellis_out, [1, 0, 2]) backpointers_out = tf.transpose(backpointers_out, [1, 0, 2]) def condition(t, *args): return tf.less(t, tf.shape(score)[1]) def body(t, viterbi, last_decision): backpointers_timestep = batch_gather_3d( backpointers_out, tf.maximum(sequence_lengths - t, 0)) new_last_decision = batch_gather_2d( backpointers_timestep, last_decision) new_viterbi = viterbi.write(t, new_last_decision) return t + 1, new_viterbi, new_last_decision last_timestep = batch_gather_3d(trellis_out, sequence_lengths - 1) # get scores for last timestep of each batch element inside # trellis: scores = tf.reduce_max(last_timestep, axis=1) # get choice index for last timestep: last_decision = tf.cast(tf.argmax(last_timestep, axis=1), tf.int32) # decode backwards using backpointers: viterbi = tf.TensorArray(tf.int32, size=0, dynamic_size=True, clear_after_read=False, infer_shape=False) viterbi = viterbi.write(0, last_decision) _, viterbi_out, _ = tf.while_loop( condition, body, (tf.constant(1, name="t", dtype=tf.int32), viterbi, last_decision), parallel_iterations=parallel_iterations, back_prop=back_prop) viterbi_out = concat_tensor_array(viterbi_out) # make batch-major: viterbi_out = tf.transpose(viterbi_out, [1, 0]) viterbi_out_fwd = tf.reverse_sequence( viterbi_out, sequence_lengths, seq_dim=1) return viterbi_out_fwd, scores def sum_list(elements): total = elements[0] for el in elements[1:]: total += el return total def explicitly_set_fields(): received = set() for argument in sys.argv: if argument.startswith("--"): received.add(argument[2:]) if argument[2:].startswith("no"): received.add(argument[4:]) return received def save_session(session, saver, path, verbose=False): """ Call save on tf.train.Saver on a specific path to store all the variables of the current tensorflow session to a file for later restoring. Arguments: session : tf.Session path : str, place to save session """ makedirs(path, exist_ok=True) if not path.endswith("/"): path = path + "/" path = join(path, "model.ckpt") if verbose: print("Saving session under %r" % (path,), flush=True) saver.save(session, path) print("Saved", flush=True) ### constants for saving & loading # model config: OBJECTIVE_NAMES = "OBJECTIVE_NAMES" OBJECTIVE_TYPES = "OBJECTIVE_TYPES" # inputs: INPUT_PLACEHOLDERS = "INPUT_PLACEHOLDERS" LABEL_PLACEHOLDERS = "LABEL_PLACEHOLDERS" LABEL_MASK_PLACEHOLDERS = "LABEL_MASK_PLACEHOLDERS" TRAIN_OP = "TRAIN_OP" SEQUENCE_LENGTHS = "SEQUENCE_LENGTHS" IS_TRAINING = "IS_TRAINING" # outputs: DECODED = "DECODED" DECODED_SCORES = "DECODED_SCORES" UNARY_SCORES = "UNARY_SCORES" # per objective metrics: TOKEN_CORRECT = "TOKEN_CORRECT" TOKEN_CORRECT_TOTAL = "TOKEN_CORRECT_TOTAL" SENTENCE_CORRECT = "SENTENCE_CORRECT" SENTENCE_CORRECT_TOTAL = "SENTENCE_CORRECT_TOTAL" # aggregate metrics over all objectives NLL = "NLL" NLL_TOTAL = "NLL_TOTAL" TOKEN_CORRECT_ALL = "TOKEN_CORRECT_ALL" TOKEN_CORRECT_ALL_TOTAL = "TOKEN_CORRECT_ALL_TOTAL" SENTENCE_CORRECT_ALL = "SENTENCE_CORRECT_ALL" SENTENCE_CORRECT_ALL_TOTAL = "SENTENCE_CORRECT_ALL_TOTAL" CONFUSION_MATRIX = "CONFUSION_MATRIX" GLOBAL_STEP = "global_step" SUMMARIES_ASSIGNS = "SUMMARIES_ASSIGNS" SUMMARIES_PLACEHOLDERS = "SUMMARIES_PLACEHOLDERS" SUMMARIES_NAMES = "SUMMARIES_NAMES" TRAIN_SUMMARIES = "TRAIN_SUMMARIES" TRUE_POSITIVES = "TRUE_POSITIVES" FALSE_POSITIVES = "FALSE_POSITIVES" FALSE_NEGATIVES = "FALSE_NEGATIVES" def maybe_dropout(inputs, keep_prob, is_training): return tf.cond(is_training, lambda : tf.nn.dropout(inputs, keep_prob), lambda : inputs ) if keep_prob < 1 else inputs def compute_sentence_correct(correct, sequence_mask): any_label = tf.reduce_max(tf.cast(sequence_mask, tf.int32), 1) sentence_correct_total = tf.reduce_sum(any_label) # is 1 when all is correct, 0 otherwise sentence_correct = tf.reduce_sum(tf.reduce_prod( tf.cast( tf.logical_or(correct, tf.logical_not(sequence_mask)), tf.int32 ), 1 ) * any_label) return sentence_correct, sentence_correct_total def lstm_activation(inputs, input_h, input_c, W, b, activation): # i = input_gate, j = new_input, f = forget_gate, o = output_gate cell_inputs = concat([inputs, input_h], axis=1) lstm_matrix = tf.nn.xw_plus_b(cell_inputs, W, b) preactiv = split(lstm_matrix, axis=1, num_splits=4) # from CUDNN docs: # Values 0 and 4 reference the input gate. # Values 1 and 5 reference the forget gate. # Values 2 and 6 reference the new memory gate. # Values 3 and 7 reference the output gate i, f, j, o = ( preactiv[CUDNN_MAPPING["i"]], preactiv[CUDNN_MAPPING["f"]], preactiv[CUDNN_MAPPING["j"]], preactiv[CUDNN_MAPPING["o"]] ) c = (tf.nn.sigmoid(f) * input_c + tf.nn.sigmoid(i) * activation(j)) m = tf.nn.sigmoid(o) * activation(c) return (c, m) class Logger(object): def __init__(self, session, writer): self.session = session self.writer = writer self._placeholders = {} summaries = tf.get_collection(SUMMARIES_ASSIGNS) summaries_pholders = tf.get_collection(SUMMARIES_PLACEHOLDERS) summaries_names = [name.decode("utf-8") for name in tf.get_collection(SUMMARIES_NAMES)] for summary, pholder, name in zip(summaries, summaries_pholders, summaries_names): self._placeholders[name] = (pholder, summary) def log(self, name, value, step): if name not in self._placeholders: pholder = tf.placeholder(tf.float32, [], name=name) summary = tf.summary.scalar(name, pholder) tf.add_to_collection(SUMMARIES_ASSIGNS, summary) tf.add_to_collection(SUMMARIES_NAMES, name) tf.add_to_collection(SUMMARIES_PLACEHOLDERS, pholder) self._placeholders[name] = (pholder, summary) pholder, summary = self._placeholders[name] res = self.session.run(summary, {pholder:value}) self.writer.add_summary(res, step) class ParametrizedLSTMCell(RNNCell): def __init__(self, weights, biases, hidden_size): self._weights = weights self._biases = biases self.hidden_size = hidden_size @property def state_size(self): return (self.hidden_size, self.hidden_size) @property def output_size(self): return self.hidden_size def __call__(self, inputs, state, scope=None): input_h, input_c = state c, m = lstm_activation(inputs, input_h=input_h, input_c=input_c, b=self._biases, W=self._weights, activation=tf.nn.tanh) return m, (m, c) class LSTMCell(TFLSTMCell): def __init__(self, num_units, keep_prob=1.0, is_training=False): self._is_training = is_training self._keep_prob = keep_prob TFLSTMCell.__init__( self, num_units=num_units, state_is_tuple=True ) def __call__(self, inputs, state, scope=None): (c_prev, m_prev) = state dtype = inputs.dtype input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer): # "LSTMCell" concat_w = _get_concat_variable( "W", [input_size.value + self._num_units, 4 * self._num_units], dtype, 1) b = tf.get_variable( "B", shape=[4 * self._num_units], initializer=tf.zeros_initializer(), dtype=dtype) c, m = lstm_activation(inputs, input_c=c_prev, input_h=m_prev, W=concat_w, b=b, activation=self._activation, keep_prob=self._keep_prob, is_training=self._is_training, forget_bias=self._forget_bias) return m, LSTMStateTuple(c, m) def cudnn_lstm_parameter_size(input_size, hidden_size): """Number of parameters in a single CuDNN LSTM cell.""" biases = 8 * hidden_size weights = 4 * (hidden_size * input_size) + 4 * (hidden_size * hidden_size) return biases + weights def direction_to_num_directions(direction): if direction == "unidirectional": return 1 elif direction == "bidirectional": return 2 else: raise ValueError("Unknown direction: %r." % (direction,)) def estimate_cudnn_parameter_size(num_layers, input_size, hidden_size, input_mode, direction): """ Compute the number of parameters needed to construct a stack of LSTMs. Assumes the hidden states of bidirectional LSTMs are concatenated before being sent to the next layer up. """ num_directions = direction_to_num_directions(direction) params = 0 isize = input_size for layer in range(num_layers): for direction in range(num_directions): params += cudnn_lstm_parameter_size( isize, hidden_size ) isize = hidden_size * num_directions return params # cudnn conversion to dynamic RNN: CUDNN_LAYER_WEIGHT_ORDER = [ "x", "x", "x", "x", "h", "h", "h", "h" ] CUDNN_LAYER_BIAS_ORDER = [ "bx", "bx", "bx", "bx", "bh", "bh", "bh", "bh" ] CUDNN_TRANSPOSED = True CUDNN_MAPPING = {"i": 0, "f": 1, "j": 2, "o": 3} def consume_biases_direction(params, old_offset, hidden_size, isize): offset = old_offset layer_biases_x = [] layer_biases_h = [] for piece in CUDNN_LAYER_BIAS_ORDER: if piece == "bx": layer_biases_x.append( params[offset:offset + hidden_size] ) offset += hidden_size elif piece == "bh": layer_biases_h.append( params[offset:offset + hidden_size] ) offset += hidden_size else: raise ValueError("Unknown cudnn piece %r." % (piece,)) b = concat(layer_biases_x, axis=0) + concat(layer_biases_h, axis=0) return b, offset def consume_weights_direction(params, old_offset, hidden_size, isize): offset = old_offset layer_weights_x = [] layer_weights_h = [] for piece in CUDNN_LAYER_WEIGHT_ORDER: if piece == "x": layer_weights_x.append( tf.reshape( params[offset:offset + hidden_size * isize], [hidden_size, isize] if CUDNN_TRANSPOSED else [isize, hidden_size] ) ) offset += hidden_size * isize elif piece == "h": layer_weights_h.append( tf.reshape( params[offset:offset + hidden_size * hidden_size], [hidden_size, hidden_size] ) ) offset += hidden_size * hidden_size else: raise ValueError("Unknown cudnn piece %r." % (piece,)) if CUDNN_TRANSPOSED: W_T = concat([concat(layer_weights_x, axis=0), concat(layer_weights_h, axis=0)], axis=1) W = tf.transpose(W_T) else: W = concat([concat(layer_weights_x, axis=1), concat(layer_weights_h, axis=1)], axis=0) return W, offset def decompose_layer_params(params, num_layers, hidden_size, cell_input_size, input_mode, direction, create_fn): """ This operation converts the opaque cudnn params into a set of usable weight matrices. Args: params : Tensor, opaque cudnn params tensor num_layers : int, number of stacked LSTMs. hidden_size : int, number of neurons in each LSTM. cell_input_size : int, input size for the LSTMs. input_mode: whether a pre-projection was used or not. Currently only 'linear_input' is supported (e.g. CuDNN does its own projection internally) direction : str, 'unidirectional' or 'bidirectional'. create_fn: callback for weight creation. Receives parameter slice (op), layer (int), direction (0 = fwd, 1 = bwd), parameter_index (0 = W, 1 = b). Returns: weights : list of lists of Tensors in the format: first list is indexed layers, inner list is indexed by direction (fwd, bwd), tensors in the inner list are (Weights, biases) """ if input_mode != "linear_input": raise ValueError("Only input_mode == linear_input supported for now.") num_directions = direction_to_num_directions(direction) offset = 0 all_weights = [[[] for j in range(num_directions)] for i in range(num_layers)] isize = cell_input_size with tf.variable_scope("DecomposeCudnnParams"): for layer in range(num_layers): with tf.variable_scope("Layer{}".format(layer)): for direction in range(num_directions): with tf.variable_scope("fwd" if direction == 0 else "bwd"): with tf.variable_scope("weights"): W, offset = consume_weights_direction( params, old_offset=offset, hidden_size=hidden_size, isize=isize) all_weights[layer][direction].append( create_fn(W, layer, direction, 0)) isize = hidden_size * num_directions isize = cell_input_size for layer in range(num_layers): with tf.variable_scope("Layer{}".format(layer)): for direction in range(num_directions): with tf.variable_scope("fwd" if direction == 0 else "bwd"): with tf.variable_scope("biases"): b, offset = consume_biases_direction( params, old_offset=offset, hidden_size=hidden_size, isize=isize) all_weights[layer][direction].append( create_fn(b, layer, direction, 1)) isize = hidden_size * num_directions return all_weights def create_decomposed_variable(param, lidx, didx, pidx): with tf.device("cpu"): return tf.get_variable("w" if pidx == 0 else "b", shape=param.get_shape().as_list(), dtype=param.dtype, trainable=False, collections=[tf.GraphKeys.GLOBAL_VARIABLES, "excluded_variables"]) def cpu_cudnn_params(params, num_layers, hidden_size, cell_input_size, input_mode, direction): """ This operation converts the opaque cudnn params into a set of usable weight matrices, and caches the conversion. Args: params : Tensor, opaque cudnn params tensor num_layers : int, number of stacked LSTMs. hidden_size : int, number of neurons in each LSTM. cell_input_size : int, input size for the LSTMs. input_mode: whether a pre-projection was used or not. Currently only 'linear_input' is supported (e.g. CuDNN does its own projection internally) direction : str, 'unidirectional' or 'bidirectional'. skip_creation : bool, whether to build variables. Returns: weights : list of lists of Tensors in the format: first list is indexed layers, inner list is indexed by direction (fwd, bwd), tensors in the inner list are (Weights, biases) """ # create a boolean status variable that checks whether the # weights have been converted to cpu format: with tf.device("cpu"): cpu_conversion_status = tf.get_variable( name="CudnnConversionStatus", dtype=tf.float32, initializer=tf.zeros_initializer(), shape=[], trainable=False, collections=[tf.GraphKeys.GLOBAL_VARIABLES]) # create a fresh copy of the weights (not trainable) reshaped = decompose_layer_params( params, num_layers=num_layers, hidden_size=hidden_size, cell_input_size=cell_input_size, input_mode=input_mode, direction=direction, create_fn=create_decomposed_variable) def cpu_convert(): all_assigns = decompose_layer_params( params, num_layers=num_layers, hidden_size=hidden_size, cell_input_size=cell_input_size, input_mode=input_mode, direction=direction, create_fn=lambda p, lidx, didx, pidx: tf.assign(reshaped[lidx][didx][pidx], p)) all_assigns = [assign for layer_assign in all_assigns for dir_assign in layer_assign for assign in dir_assign] all_assigns.append(tf.assign(cpu_conversion_status, tf.constant(1.0, dtype=tf.float32))) all_assigns.append(tf.Print(cpu_conversion_status, [0], message="Converted cudnn weights to CPU format. ")) with tf.control_dependencies(all_assigns): ret = tf.identity(cpu_conversion_status) return ret # cache the reshaping/concatenating ensure_conversion = tf.cond(tf.greater(cpu_conversion_status, 0), lambda: cpu_conversion_status, cpu_convert) # if weights are already reshaped, go ahead: with tf.control_dependencies([ensure_conversion]): # wrap with identity to ensure there is a dependency between assignment # and using the weights: all_params = [[[tf.identity(p) for p in dir_param] for dir_param in layer_param] for layer_param in reshaped] return all_params class CpuCudnnLSTM(object): def __init__(self, num_layers, hidden_size, cell_input_size, input_mode, direction): self.num_layers = num_layers self.hidden_size = hidden_size self.cell_input_size = cell_input_size self.input_mode = input_mode self.direction = direction def __call__(self, inputs, input_h, input_c, params, is_training=True): layer_params = cpu_cudnn_params(params, num_layers=self.num_layers, hidden_size=self.hidden_size, cell_input_size=self.cell_input_size, input_mode=self.input_mode, direction=self.direction) REVERSED = 1 layer_inputs = inputs cell_idx = 0 for layer_param in layer_params: hidden_fwd_bwd = [] final_output_c = [] final_output_h = [] for direction, (W, b) in enumerate(layer_param): if direction == REVERSED: layer_inputs = reverse(layer_inputs, axis=0) hiddens, (output_h, output_c) = tf.nn.dynamic_rnn( cell=ParametrizedLSTMCell(W, b, self.hidden_size), inputs=layer_inputs, dtype=inputs.dtype, time_major=True, initial_state=(input_h[cell_idx], input_c[cell_idx])) if direction == REVERSED: hiddens = reverse(hiddens, axis=0) hidden_fwd_bwd.append(hiddens) final_output_c.append(tf.expand_dims(output_c, 0)) final_output_h.append(tf.expand_dims(output_h, 0)) cell_idx += 1 if len(hidden_fwd_bwd) > 1: layer_inputs = concat(hidden_fwd_bwd, axis=2) final_output_c = concat(final_output_c, axis=0) final_output_h = concat(final_output_h, axis=0) else: layer_inputs = hidden_fwd_bwd[0] final_output_c = final_output_c[0] final_output_h = final_output_h[0] return layer_inputs, final_output_h, final_output_c def highway(x, activation_fn=tf.nn.relu, scope=None): size = x.get_shape()[-1].value with tf.variable_scope(scope or "HighwayLayer"): activ = tf.contrib.layers.fully_connected( x, size * 2, activation_fn=None, scope="FC" ) transform = tf.sigmoid(activ[..., :size], name="transform_gate") hidden = activation_fn(activ[..., size:]) carry = 1.0 - transform return tf.add(hidden * transform, x * carry, "y") def conv2d(inputs, output_dim, k_h, k_w, stddev=0.02, scope=None, weight_noise=0.0, is_training=True): with tf.variable_scope(scope or "Conv2D"): w = tf.get_variable('w', [k_h, k_w, inputs.get_shape()[-1], output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) if weight_noise > 0 and not isinstance(is_training, bool): w = add_weight_noise(w, is_training=is_training, stddev=weight_noise) return tf.nn.conv2d(inputs, w, strides=[1, 1, 1, 1], padding="VALID") def character_convolution(inputs, feature): inputs_2d = tf.reshape(inputs, [tf.shape(inputs)[0] * tf.shape(inputs)[1], tf.shape(inputs)[2]] ) inputs_3d = embedding_lookup( inputs_2d, dim=feature["dimension"], # 255 different bytes (uint8) # & start and end symbol: size=257, dtype=tf.float32, mask_negative=True) inputs_4d = tf.expand_dims(inputs_3d, 1) feature_pools = [] for idx, conv_filter in enumerate(feature["filters"]): width, channels = conv_filter["width"], conv_filter["channels"] # [batch * time x 1 x word_length x embed_dim x feature_map_dim] conv = tf.squeeze(conv2d(inputs_4d, channels, 1, width, scope="CharacterConvolution%d" % (idx,)), [1]) # remove word dimension pool = tf.reduce_max(conv, 1) feature_pools.append(pool) activations = concat(feature_pools, axis=1) channels_out = sum(conv_filter["channels"] for conv_filter in feature["filters"]) activations = tf.reshape( tf.tanh(activations), [tf.shape(inputs)[0], tf.shape(inputs)[1], channels_out], name="CharacterConvolutionPooled") for idx in range(feature["highway_layers"]): activations = highway(activations, scope="HighwayLayer%d" % (idx,), activation_fn=tf.tanh) return activations def feature_dtype(feat): if requires_vocab(feat): return tf.int32 elif feat["type"] in {"digit", "punctuation_count", "uppercase"}: return tf.float32 elif requires_character_convolution(feat): return tf.int32 else: raise ValueError("unknown feature %r." % (feat,)) def feature_shape(feature): if requires_vocab(feature) or feature["type"] in {'digit', 'punctuation_count', 'uppercase'}: return [None, None] elif requires_character_convolution(feature): return [None, None, None] else: raise ValueError("unknown feature %r." % (feature,)) def build_inputs(features, objectives, fused, class_weights, class_weights_clipval): input_placeholders = [] labels = [] labels_mask = [] labels_class_weights = [] max_output_vocab = max(len(obj["vocab"]) for obj in objectives) with tf.variable_scope("Inputs"): is_training = tf.placeholder(tf.bool, [], name="is_training") tf.add_to_collection(IS_TRAINING, is_training) for idx, feat in enumerate(features): input_placeholder = tf.placeholder( feature_dtype(feat), feature_shape(feat), name="input_placeholders_%d" % (idx,) ) input_placeholders.append(input_placeholder) tf.add_to_collection(INPUT_PLACEHOLDERS, input_placeholder) if fused: label_placeholder = tf.placeholder( tf.int32, [None, None, len(objectives)] ) labels_mask_placeholder = tf.placeholder( tf.bool, [None, None, len(objectives)], name="labels_mask" ) labels.append(label_placeholder) labels_mask.append(labels_mask_placeholder) tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder) tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder) if class_weights: with tf.variable_scope("FusedClassWeights"): init_class_weights = tf.get_variable( name="class_weights", shape=[len(objectives) * max_output_vocab], initializer=tf.constant_initializer(1), dtype=tf.int64, trainable=False) init_class_count = tf.get_variable( name="class_weights_denominator", shape=[len(objectives)], initializer=tf.constant_initializer(1), dtype=tf.int64, trainable=False) def update_class_weights(): mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1, len(objectives)]), tf.int64) updated_cls_weights = tf.scatter_add( init_class_weights, tf.reshape(label_placeholder + tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)]), [-1]), tf.reshape(mask_as_ints, [-1]) ) updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints, 0)) # class weight: weight_i = total / class_i weights = tf.clip_by_value(tf.expand_dims(updated_class_count, 1) / tf.reshape(updated_cls_weights, [len(objectives), max_output_vocab]), 1e-6, class_weights_clipval) return tf.cast(weights, tf.float32) def return_class_weights(): # class weight: weight_i = total / class_i return tf.cast( tf.clip_by_value(tf.expand_dims(init_class_count, 1) / tf.reshape(init_class_weights, [len(objectives), max_output_vocab]), 1e-6, class_weights_clipval), tf.float32) labels_class_weights.append( tf.cond(is_training, update_class_weights, return_class_weights)) else: labels_class_weights.append(None) else: for objective in objectives: with tf.variable_scope(objective["name"]): label_placeholder = tf.placeholder( tf.int32, [None, None], name="labels" ) labels.append(label_placeholder) if objective["type"] == "crf": labels_mask_placeholder = tf.placeholder( tf.bool, [None], name="labels_mask" ) labels_class_weights.append(None) elif objective["type"] == "softmax": labels_mask_placeholder = tf.placeholder( tf.bool, [None, None], name="labels_mask" ) if class_weights: init_class_weights = tf.get_variable( name="class_weights", shape=len(objective["vocab"]), initializer=tf.constant_initializer(1), dtype=tf.int64, trainable=False) init_class_count = tf.get_variable( name="class_weights_denominator", shape=[], initializer=tf.constant_initializer(1), dtype=tf.int64, trainable=False) def update_class_weights(): mask_as_ints = tf.cast(tf.reshape(labels_mask_placeholder, [-1]), tf.int64) updated_cls_weights = tf.scatter_add( init_class_weights, tf.reshape(label_placeholder, [-1]), mask_as_ints ) updated_class_count = tf.assign_add(init_class_count, tf.reduce_sum(mask_as_ints)) # class weight: weight_i = total / class_i weights = tf.clip_by_value(updated_class_count / updated_cls_weights, 1e-6, class_weights_clipval) return tf.cast(weights, tf.float32) def return_class_weights(): # class weight: weight_i = total / class_i return tf.cast( tf.clip_by_value(init_class_count / init_class_weights, 1e-6, class_weights_clipval), tf.float32) labels_class_weights.append( tf.cond(is_training, update_class_weights, return_class_weights) ) else: labels_class_weights.append(None) else: raise ValueError( "unknown objective type %r." % ( objective["type"] ) ) labels_mask.append(labels_mask_placeholder) tf.add_to_collection(LABEL_PLACEHOLDERS, label_placeholder) tf.add_to_collection(LABEL_MASK_PLACEHOLDERS, labels_mask_placeholder) sequence_lengths = tf.placeholder(tf.int32, [None], name="sequence_lengths") tf.add_to_collection(SEQUENCE_LENGTHS, sequence_lengths) return (input_placeholders, labels, labels_mask, labels_class_weights, sequence_lengths, is_training) def add_weight_noise(x, is_training, stddev): return tf.cond(is_training, lambda: x + tf.random_normal( shape=tf.shape(x), stddev=stddev), lambda: x) def build_recurrent(inputs, cudnn, faux_cudnn, hidden_sizes, is_training, keep_prob, weight_noise): dtype = tf.float32 if cudnn: if len(hidden_sizes) == 0: raise ValueError("hidden_sizes must be a list of length > 1.") hidden_size = hidden_sizes[0] if any(hidden_size != hsize for hsize in hidden_sizes): raise ValueError("cudnn RNN requires all hidden units " "to be the same size (got %r)" % ( hidden_sizes, )) num_layers = len(hidden_sizes) cell_input_size = inputs.get_shape()[-1].value est_size = estimate_cudnn_parameter_size( num_layers=num_layers, hidden_size=hidden_size, input_size=cell_input_size, input_mode="linear_input", direction="bidirectional" ) # autoswitch to GPUs based on availability of alternatives: cudnn_params = tf.get_variable("RNNParams", shape=[est_size], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer()) if weight_noise > 0: cudnn_params = add_weight_noise(cudnn_params, stddev=weight_noise, is_training=is_training) if faux_cudnn: cudnn_cell = CpuCudnnLSTM(num_layers, hidden_size, cell_input_size, input_mode="linear_input", direction="bidirectional") else: cpu_cudnn_params(cudnn_params, num_layers=num_layers, hidden_size=hidden_size, cell_input_size=cell_input_size, input_mode="linear_input", direction="bidirectional") cudnn_cell = CudnnLSTM(num_layers, hidden_size, cell_input_size, input_mode="linear_input", direction="bidirectional") init_state = tf.fill( (2 * num_layers, tf.shape(inputs)[1], hidden_size), tf.constant(np.float32(0.0))) hiddens, output_h, output_c = cudnn_cell( inputs, input_h=init_state, input_c=init_state, params=cudnn_params, is_training=True) hiddens = maybe_dropout( hiddens, keep_prob, is_training) else: cell = MultiRNNCell( [LSTMCell(hsize, is_training=is_training, keep_prob=keep_prob) for hsize in hidden_sizes] ) hiddens, _ = bidirectional_dynamic_rnn( cell, inputs, time_major=True, dtype=dtype, swap_memory=True ) return hiddens def build_embed(inputs, features, index2words, keep_prob, is_training): embeddings = [] for idx, (values, feature, index2word) in enumerate(zip(inputs, features, index2words)): if requires_vocab(feature): with tf.variable_scope("embedding_%d" % (idx,)): embedding = embedding_lookup( values, dim=feature["dimension"], size=len(index2word), dtype=tf.float32, mask_negative=True ) embeddings.append(embedding) elif requires_character_convolution(feature): embeddings.append( character_convolution(values, feature) ) else: embeddings.append(tf.expand_dims(values, 2)) return maybe_dropout(concat(embeddings, axis=2), keep_prob, is_training) def crf_metrics(unary_scores, labels, transition_params, sequence_lengths, mask): """ Computes CRF output metrics. Receives: unary_scores : batch-major order labels : batch-major order transition_params : nclasses x nclasses matrix. sequence_lengths : length of each time-sequence mask : batch-major example mask Returns: token_correct, token_correct_total, sentence_correct, sentence_correct_total """ classes = unary_scores.get_shape()[-1].value decoded, scores = viterbi_decode(unary_scores, transition_params, sequence_lengths) tf.add_to_collection(UNARY_SCORES, unary_scores) tf.add_to_collection(DECODED, decoded) tf.add_to_collection(DECODED_SCORES, scores) equals_label = tf.equal(labels, decoded) token_correct = tf.reduce_sum( tf.cast( tf.logical_and(equals_label, mask), tf.int32 ) ) token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32)) tf.add_to_collection(TOKEN_CORRECT, token_correct) tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total) sentence_correct, _ = compute_sentence_correct(equals_label, mask) sentence_correct_total = tf.reduce_sum(tf.cast(mask[:, 0], tf.int32)) tf.add_to_collection(SENTENCE_CORRECT, sentence_correct) tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total) build_true_false_positives(decoded, mask, labels, classes, equals_label) return (token_correct, token_correct_total, sentence_correct, sentence_correct_total) def build_true_false_positives(decoded, mask_batch_major, labels_batch_major, classes, equals_label): masked_equals_label = tf.logical_and(equals_label, mask_batch_major) # now for each class compute tp, fp, fn # [nclasses x batch x time] masked_per_class = tf.logical_and( tf.equal(labels_batch_major[None, :, :], tf.range(classes)[:, None, None]), mask_batch_major) # correct, and on label correct = tf.reduce_sum(tf.cast(tf.logical_and(masked_per_class, equals_label[None, :, :]), tf.int32), axis=[1, 2]) # predicted a particular class guessed = tf.reduce_sum(tf.cast(tf.logical_and(tf.equal(decoded[None, :, :], tf.range(classes)[:, None, None]), mask_batch_major), tf.int32), axis=[1, 2]) total = tf.reduce_sum(tf.cast(masked_per_class, tf.int32), axis=[1, 2]) tp, fp, fn = correct, guessed - correct, total - correct tf.add_to_collection(TRUE_POSITIVES, tp) tf.add_to_collection(FALSE_POSITIVES, fp) tf.add_to_collection(FALSE_NEGATIVES, fn) def softmax_metrics(unary_scores, labels, mask): """ Compute softmax output stats for correct/accuracy per-token/per-sentence. Receive unary_scores : time-major labels : time-major mask : time-major Returns: token_correct, token_correct_total, sentence_correct, sentence_correct_total """ classes = unary_scores.get_shape()[-1].value unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2]) labels_batch_major = tf.transpose(labels, [1, 0]) mask_batch_major = tf.transpose(mask, [1, 0]) decoded = tf.cast(tf.argmax(unary_scores_batch_major, 2), labels.dtype) unary_probs_batch_major = tf.nn.softmax(unary_scores_batch_major) scores = tf.reduce_max(unary_probs_batch_major, 2) tf.add_to_collection(UNARY_SCORES, unary_probs_batch_major) tf.add_to_collection(DECODED, decoded) tf.add_to_collection(DECODED_SCORES, scores) equals_label = tf.equal(decoded, labels_batch_major) token_correct = tf.reduce_sum( tf.cast( tf.logical_and( equals_label, mask_batch_major ), tf.int32 ) ) token_correct_total = tf.reduce_sum(tf.cast(mask, tf.int32)) tf.add_to_collection(TOKEN_CORRECT, token_correct) tf.add_to_collection(TOKEN_CORRECT_TOTAL, token_correct_total) sentence_correct, sentence_correct_total = compute_sentence_correct( equals_label, mask_batch_major ) tf.add_to_collection(SENTENCE_CORRECT, sentence_correct) tf.add_to_collection(SENTENCE_CORRECT_TOTAL, sentence_correct_total) build_true_false_positives(decoded, mask_batch_major, labels_batch_major, classes, equals_label) return (token_correct, token_correct_total, sentence_correct, sentence_correct_total) def add_objective_names_types(objectives): for objective in objectives: with tf.variable_scope(objective["name"]): # store objective names in graph: tf.add_to_collection(OBJECTIVE_NAMES, tf.constant(objective["name"], name="objective_name") ) tf.add_to_collection(OBJECTIVE_TYPES, tf.constant(objective["type"], name="objective_type") ) def build_loss(inputs, objectives, labels, labels_mask, labels_class_weights, fused, sequence_lengths, class_weights_normalize): """ Compute loss function given the objectives. Assumes inputs are of the form [time, batch, features]. Arguments: ---------- inputs : tf.Tensor objectives : list<dict>, objective specs labels : list<tf.Tensor> labels_mask : list<tf.Tensor> labels_class_weights : list<tf.Tensor> sequence_lengths : tf.Tensor Returns: loss : tf.Tensor (scalar) """ losses = [] negative_log_likelihoods = [] sentence_corrects = [] sentence_corrects_total = [] token_corrects = [] token_corrects_total = [] max_output_vocab = max(len(obj["vocab"]) for obj in objectives) total_output_size = len(objectives) * max_output_vocab add_objective_names_types(objectives) if fused: with tf.variable_scope("FusedOutputs"): objective_labels = labels[0] mask = labels_mask[0] objective_class_weights = labels_class_weights[0] # perform all classifications at once: unary_scores = tf.contrib.layers.fully_connected( inputs, total_output_size, activation_fn=None ) unary_scores = tf.reshape(unary_scores, [tf.shape(unary_scores)[0], tf.shape(unary_scores)[1], len(objectives), max_output_vocab]) negative_log_likelihood = sparse_softmax_cross_entropy_with_logits( logits=unary_scores, labels=objective_labels ) labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype) masked_negative_log_likelihood = negative_log_likelihood * labels_mask_casted if objective_class_weights is not None: class_weights_mask = tf.gather( tf.reshape(objective_class_weights, [-1]), objective_labels + tf.reshape(tf.range(len(objectives)) * max_output_vocab, [1, 1, len(objectives)])) if class_weights_normalize: masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6) normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives)) else: masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6) normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives)) else: masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6) normed_loss = masked_weighed_negative_log_likelihood_sum / (num_predictions / len(objectives)) masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood) losses.append(normed_loss) negative_log_likelihoods.append(masked_negative_log_likelihood_sum) for idx, objective in enumerate(objectives): with tf.variable_scope(objective["name"]): (token_correct, token_correct_total, sentence_correct, sentence_correct_total) = softmax_metrics(unary_scores[:, :, idx, :len(objective["vocab"])], labels=objective_labels[:, :, idx], mask=mask[:, :, idx]) token_corrects.append(token_correct) token_corrects_total.append(token_correct_total) sentence_corrects.append(sentence_correct) sentence_corrects_total.append(sentence_correct_total) else: for objective, objective_labels, mask, objective_class_weights in zip(objectives, labels, labels_mask, labels_class_weights): with tf.variable_scope(objective["name"]): if objective["type"] == "crf": unary_scores = tf.contrib.layers.fully_connected( inputs, len(objective["vocab"]), activation_fn=None ) unary_scores_batch_major = tf.transpose(unary_scores, [1, 0, 2]) labels_batch_major = tf.transpose(objective_labels, [1, 0]) padded_unary_scores_batch_major = tf.cond(tf.greater(tf.shape(unary_scores_batch_major)[1], 1), lambda: unary_scores_batch_major, lambda: tf.pad(unary_scores_batch_major, [[0, 0], [0, 1], [0, 0]])) padded_labels_batch_major = tf.cond(tf.greater(tf.shape(labels_batch_major)[1], 1), lambda: labels_batch_major, lambda: tf.pad(labels_batch_major, [[0, 0], [0, 1]])) log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood( padded_unary_scores_batch_major, padded_labels_batch_major, sequence_lengths ) labels_mask_casted = tf.cast(mask, log_likelihood.dtype) masked_log_likelihood = ( log_likelihood * labels_mask_casted ) masked_negative_log_likelihood_sum = -tf.reduce_sum(masked_log_likelihood) num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6) losses.append(masked_negative_log_likelihood_sum / num_predictions) negative_log_likelihoods.append(masked_negative_log_likelihood_sum) sequence_mask = tf.logical_and( tf.sequence_mask(sequence_lengths), # pad the time dimension: tf.expand_dims(mask, 1) ) (token_correct, token_correct_total, sentence_correct, sentence_correct_total) = crf_metrics(unary_scores_batch_major, labels=labels_batch_major, mask=sequence_mask, transition_params=transition_params, sequence_lengths=sequence_lengths) elif objective["type"] == 'softmax': unary_scores = tf.contrib.layers.fully_connected( inputs, len(objective["vocab"]), activation_fn=None ) negative_log_likelihood = sparse_softmax_cross_entropy_with_logits( logits=unary_scores, labels=objective_labels ) labels_mask_casted = tf.cast(mask, negative_log_likelihood.dtype) masked_negative_log_likelihood = ( negative_log_likelihood * labels_mask_casted ) if objective_class_weights is not None: class_weights_mask = tf.gather(objective_class_weights, objective_labels) masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood * class_weights_mask masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood) if class_weights_normalize: num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted * class_weights_mask), 1e-6) normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions else: num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6) normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions else: masked_weighed_negative_log_likelihood_sum = masked_negative_log_likelihood masked_negative_log_likelihood_sum = tf.reduce_sum(masked_negative_log_likelihood) num_predictions = tf.maximum(tf.reduce_sum(labels_mask_casted), 1e-6) normed_loss = masked_weighed_negative_log_likelihood_sum / num_predictions losses.append(normed_loss) negative_log_likelihoods.append(masked_negative_log_likelihood_sum) (token_correct, token_correct_total, sentence_correct, sentence_correct_total) = softmax_metrics(unary_scores, labels=objective_labels, mask=mask) else: raise ValueError( "unknown objective type %r" % (objective["type"],) ) token_corrects.append(token_correct) token_corrects_total.append(token_correct_total) sentence_corrects.append(sentence_correct) sentence_corrects_total.append(sentence_correct_total) # aggregate metrics for all objectives: total_loss = tf.reduce_sum(sum_list(losses)) tf.summary.scalar("BatchLoss", total_loss) neg_log_likelihood_total = sum_list(negative_log_likelihoods) tf.summary.scalar("BatchNLL", neg_log_likelihood_total) tf.add_to_collection(NLL, neg_log_likelihood_total) tf.add_to_collection(NLL_TOTAL, tf.shape(inputs)[1]) sentence_corrects_total = sum_list(sentence_corrects_total) sentence_corrects = sum_list(sentence_corrects) tf.add_to_collection(SENTENCE_CORRECT_ALL, sentence_corrects) tf.add_to_collection(SENTENCE_CORRECT_ALL_TOTAL, sentence_corrects_total) token_corrects_total = sum_list(token_corrects_total) token_corrects = sum_list(token_corrects) tf.add_to_collection(TOKEN_CORRECT_ALL, token_corrects) tf.add_to_collection(TOKEN_CORRECT_ALL_TOTAL, token_corrects_total) return total_loss def build_model(name, trainable, features, feature_index2words, objectives, keep_prob, input_keep_prob, hidden_sizes, freeze_rate, freeze_rate_anneal, solver, cudnn, fused, faux_cudnn, class_weights, class_weights_normalize, class_weights_clipval, lr, weight_noise, anneal_rate, clip_norm): # mixed output fusing is currently unsupported if fused and any(obj["type"] != "softmax" for obj in objectives): raise ValueError("cannot fuse outputs and use non-softmax output.") # clear all existing collections to ensure every new collection is # is created fresh graph = tf.get_default_graph() for collection_name in graph.get_all_collection_keys(): graph.clear_collection(collection_name) # build a model under the model's name to prevent collisions # when multiple models are restored simultaneously with tf.variable_scope(name): global_step = tf.Variable(0, trainable=False, name="global_step") tf.add_to_collection(GLOBAL_STEP, global_step) # model placeholders: (input_placeholders, labels, labels_mask, labels_class_weights, sequence_lengths, is_training) = build_inputs(features, objectives=objectives, fused=fused, class_weights=class_weights, class_weights_clipval=class_weights_clipval) embed = build_embed(input_placeholders, features=features, index2words=feature_index2words, is_training=is_training, keep_prob=input_keep_prob) hiddens = embed if len(hidden_sizes) > 0: hiddens = build_recurrent(hiddens, cudnn=cudnn, faux_cudnn=faux_cudnn, hidden_sizes=hidden_sizes, keep_prob=keep_prob, weight_noise=weight_noise, is_training=is_training) loss = build_loss(hiddens, objectives=objectives, fused=fused, labels=labels, labels_mask=labels_mask, labels_class_weights=labels_class_weights, class_weights_normalize=class_weights_normalize, sequence_lengths=sequence_lengths) if trainable: learning_rate = tf.train.exponential_decay(lr, global_step, 33000, anneal_rate, staircase=True) if solver == "adam": optimizer = LazyAdamOptimizer(learning_rate) elif solver == "sgd": optimizer = tf.train.GradientDescentOptimizer(learning_rate) else: raise ValueError("Unknown solver %r." % (solver)) grad_vars = optimizer.compute_gradients(loss) if clip_norm > 0: grad_vars = [(grad if isinstance(grad, tf.IndexedSlices) else tf.clip_by_norm(grad, clip_norm), var) for grad, var in grad_vars] train_op = optimizer.apply_gradients(grad_vars, global_step=global_step) else: train_op = tf.no_op() tf.add_to_collection(TRAIN_OP, train_op) tf.add_to_collection(TRAIN_SUMMARIES, tf.summary.merge_all()) def restore_session(session, path, replace_to=None, replace_from=None, verbose=False, use_metagraph=True, only_features=False): """ Call restore on tf.train.Saver on a specific path to store all the variables of the current tensorflow session to a file for later restoring. Arguments: session : tf.Session path : str, place containing the session data to restore verbose : bool, print status messages. use_metagraph : bool, restore by re-creating saved metagraph. Returns: bool : success or failure of the restoration """ makedirs(path, exist_ok=True) if not path.endswith("/"): path = path + "/" checkpoint = tf.train.get_checkpoint_state(path) if verbose: print("Looking for saved session under %r" % (path,), flush=True) if checkpoint is None or checkpoint.model_checkpoint_path is None: if verbose: print("No saved session found", flush=True) return False fname = basename(checkpoint.model_checkpoint_path) if verbose: print("Restoring saved session from %r" % (join(path, fname),), flush=True) if use_metagraph: param_saver = tf.train.import_meta_graph(join(path, fname + ".meta"), clear_devices=True) missing_vars = [] else: if only_features: to_restore = {} whitelist = ["embedding", "/RNN/", "/RNNParams", "CharacterConvolution", "HighwayLayer"] for var in tf.global_variables(): if any(keyword in var.name for keyword in whitelist): to_restore[var.name[:-2]] = var param_saver = tf.train.Saver(to_restore) else: if replace_to is not None and replace_from is not None: to_restore = {} for var in tf.global_variables(): var_name = var.name[:var.name.rfind(":")] old_name = var_name.replace(replace_to, replace_from) to_restore[old_name] = var param_saver = tf.train.Saver(to_restore) missing_vars = [] else: reader = tf.train.NewCheckpointReader(join(path, fname)) saved_shapes = reader.get_variable_to_shape_map() found_vars = [var for var in tf.global_variables() if var.name.split(':')[0] in saved_shapes] missing_vars = [var for var in tf.global_variables() if var.name.split(':')[0] not in saved_shapes] param_saver = tf.train.Saver(found_vars) param_saver.restore(session, join(path, fname)) session.run([var.initializer for var in missing_vars]) return True def bidirectional_dynamic_rnn(cell, inputs, dtype, time_major=True, swap_memory=False): with tf.variable_scope("forward"): out_fwd, final_fwd = tf.nn.dynamic_rnn( cell, inputs, time_major=time_major, dtype=dtype, swap_memory=swap_memory ) if time_major: reverse_axis = 0 else: reverse_axis = 1 with tf.variable_scope("backward"): out_bwd, final_bwd = tf.nn.dynamic_rnn( cell, reverse(inputs, axis=reverse_axis), time_major=time_major, dtype=dtype, swap_memory=swap_memory ) out_bwd = reverse(out_bwd, axis=reverse_axis) return concat([out_fwd, out_bwd], axis=2), (final_fwd, final_bwd) def get_embedding_lookup(size, dim, dtype, reuse=None, trainable=True): with tf.variable_scope(tf.get_variable_scope(), reuse=reuse): W = tf.get_variable( name="embedding", shape=[size, dim], dtype=dtype, initializer=tf.random_uniform_initializer( -1.0 / math.sqrt(dim), 1.0 / math.sqrt(dim) ), trainable=trainable ) return W def embedding_lookup(inputs, size, dim, dtype, reuse=None, mask_negative=False, trainable=True, place_on_cpu_if_big=True): """ Construct an Embedding layer that gathers elements from a matrix with `size` rows, and `dim` features using the indices stored in `x`. Arguments: ---------- inputs : tf.Tensor, of integer type size : int, how many symbols in the lookup table dim : int, how many columns per symbol. dtype : data type for the lookup table (e.g. tf.float32) reuse : bool, (default None) whether the lookup table was already used before (thus this is weight sharing). mask_negative : bool, (default False) should -1s in the lookup input indicate padding (e.g. no lookup), and thus should those values be masked out post-lookup. trainable : bool (default True), whether the parameters of this lookup table can be backpropagated into (e.g. for Glove word vectors that are fixed pre-trained, this can be set to False). place_on_cpu_if_big : bool, if matrix is big, store it on cpu. Returns: -------- tf.Tensor, result of tf.nn.embedding_lookup(LookupTable, inputs) """ W = get_embedding_lookup(size, dim, dtype, reuse, trainable=trainable) if mask_negative: embedded = tf.nn.embedding_lookup(W, tf.maximum(inputs, 0)) null_mask = tf.expand_dims( tf.cast( tf.not_equal(inputs, -1), dtype ), -1 ) return embedded * null_mask else: return tf.nn.embedding_lookup(W, inputs) def _get_sharded_variable(name, shape, dtype, num_shards): """Get a list of sharded variables with the given dtype.""" if num_shards > shape[0]: raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape, num_shards)) unit_shard_size = int(math.floor(shape[0] / num_shards)) remaining_rows = shape[0] - unit_shard_size * num_shards shards = [] for i in range(num_shards): current_size = unit_shard_size if i < remaining_rows: current_size += 1 shards.append( tf.get_variable( name + "_%d" % i, [current_size] + shape[1:], dtype=dtype ) ) return shards def _get_concat_variable(name, shape, dtype, num_shards): """Get a sharded variable concatenated into one tensor.""" sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards) if len(sharded_variable) == 1: return sharded_variable[0] concat_name = name + "/concat" concat_full_name = tf.get_variable_scope().name + "/" + concat_name + ":0" for value in tf.get_collection(tf.GraphKeys.CONCATENATED_VARIABLES): if value.name == concat_full_name: return value concat_variable = tf.concat_v2(sharded_variable, 0, name=concat_name) tf.add_to_collection(tf.GraphKeys.CONCATENATED_VARIABLES, concat_variable) return concat_variable class SequenceModel(object): def __init__(self, objectives, features, feature_index2words, hidden_sizes, keep_prob, lr, solver, seed=1234, input_keep_prob=0.7, clip_norm=-1, name="SequenceTagger", cudnn=False, anneal_rate=0.99, trainable=True, weight_noise=0.0, class_weights_normalize=False, faux_cudnn=False, class_weights=False, class_weights_clipval=1000.0, freeze_rate=1.0, fused=False, freeze_rate_anneal=0.8, create_variables=True): if fused and objectives[0]["type"] == "crf": fused = False self.keep_prob = keep_prob self.input_keep_prob = input_keep_prob self.hidden_sizes = hidden_sizes self.name = name self.objectives = objectives self.features = features self.feature_index2words = feature_index2words self.seed = seed self.lr = lr self.fused = fused self.weight_noise = weight_noise self.anneal_rate = anneal_rate self.clip_norm = clip_norm self.solver = solver self.class_weights_normalize = class_weights_normalize self.class_weights = class_weights self.class_weights_clipval = class_weights_clipval self.rng = np.random.RandomState(seed) self.cudnn = cudnn self.feature_word2index = [ {w: k for k, w in enumerate(index2word)} if index2word is not None else None for index2word in self.feature_index2words ] self.label2index = [ {w: k for k, w in enumerate(objective["vocab"])} for objective in self.objectives ] if create_variables: # 1) build graph here (TF functional code pattern) build_model(name=self.name, trainable=trainable, objectives=self.objectives, features=self.features, feature_index2words=self.feature_index2words, hidden_sizes=self.hidden_sizes, keep_prob=self.keep_prob, solver=self.solver, freeze_rate=freeze_rate, class_weights_normalize=self.class_weights_normalize, class_weights=self.class_weights, class_weights_clipval=self.class_weights_clipval, freeze_rate_anneal=freeze_rate_anneal, cudnn=self.cudnn, lr=self.lr, fused=self.fused, weight_noise=self.weight_noise, anneal_rate=self.anneal_rate, input_keep_prob=self.input_keep_prob, faux_cudnn=faux_cudnn, clip_norm=self.clip_norm) # 2) and use meta graph to recover these fields: self.recover_graph_variables() def recover_graph_variables(self): """Use TF meta graph to obtain key metrics and outputs from model.""" self.labels = tf.get_collection(LABEL_PLACEHOLDERS) self.labels_mask = tf.get_collection(LABEL_MASK_PLACEHOLDERS) self.input_placeholders = tf.get_collection(INPUT_PLACEHOLDERS) self.sequence_lengths = tf.get_collection(SEQUENCE_LENGTHS)[0] self.decoded = tf.get_collection(DECODED) self.decoded_scores = tf.get_collection(DECODED_SCORES) self.unary_scores = tf.get_collection(UNARY_SCORES) self.token_correct = tf.get_collection(TOKEN_CORRECT) self.token_correct_total = tf.get_collection(TOKEN_CORRECT_TOTAL) self.sentence_correct = tf.get_collection(SENTENCE_CORRECT) self.sentence_correct_total = tf.get_collection(SENTENCE_CORRECT_TOTAL) self.token_correct_all = tf.get_collection(TOKEN_CORRECT_ALL)[0] self.token_correct_all_total = tf.get_collection(TOKEN_CORRECT_ALL_TOTAL)[0] self.sentence_correct_all = tf.get_collection(SENTENCE_CORRECT_ALL)[0] self.sentence_correct_all_total = tf.get_collection(SENTENCE_CORRECT_ALL_TOTAL)[0] self.true_positives = tf.get_collection(TRUE_POSITIVES) self.false_positives = tf.get_collection(FALSE_POSITIVES) self.false_negatives = tf.get_collection(FALSE_NEGATIVES) if len(self.true_positives) == 0 and len(self.token_correct) != 0: self.true_positives = [None for _ in self.token_correct] self.false_positives = [None for _ in self.token_correct] self.false_negatives = [None for _ in self.token_correct] if len(tf.get_collection(GLOBAL_STEP)) > 0: self.global_step = tf.get_collection(GLOBAL_STEP)[0] else: try: self.global_step = tf.get_default_graph().get_tensor_by_name( self.name + "/" + "global_step:0") except KeyError: self.global_step = tf.Variable(0, trainable=False, name="global_step") tf.add_to_collection(GLOBAL_STEP, self.global_step) self.is_training = tf.get_collection(IS_TRAINING)[0] self.noop = tf.no_op() self.train_op = tf.get_collection(TRAIN_OP)[0] train_summaries = tf.get_collection(TRAIN_SUMMARIES) self.train_summaries = train_summaries[0] if len(train_summaries) > 0 else None self.nll = tf.get_collection(NLL)[0] self.nll_total = tf.get_collection(NLL_TOTAL)[0] self.saver = tf.train.Saver() @classmethod def overrideable_fields(cls): return [ "keep_prob", "name", "lr", "clip_norm", "class_weights_normalize", "class_weights_clipval", "cudnn", "anneal_rate", "weight_noise", "input_keep_prob" ] @classmethod def fields_to_save(cls): return [ "hidden_sizes", "objectives", "name", "cudnn", "class_weights", "features", "fused", "class_weights_normalize", "weight_noise", "anneal_rate", "feature_index2words", "solver", "lr", "clip_norm", "keep_prob", "input_keep_prob", "class_weights_clipval" ] def predict(self, session, feed_dict): feed_dict[self.is_training] = False outputs, outputs_probs = session.run( (self.decoded, self.decoded_scores), feed_dict ) predictions_out = {} for value, val_prob, objective in zip(outputs, outputs_probs, self.objectives): predictions_out[objective["name"]] = (value, val_prob) return predictions_out def predict_proba(self, session, feed_dict): feed_dict[self.is_training] = False outputs = session.run( self.unary_scores, feed_dict ) predictions_out = {} for value, objective in zip(outputs, self.objectives): predictions_out[objective["name"]] = value return predictions_out def save(self, session, path): makedirs(path, exist_ok=True) with open(join(path, "model.json"), "wt") as fout: save_dict = {} for field in type(self).fields_to_save(): save_dict[field] = getattr(self, field) json.dump(save_dict, fout) with open(join(path, "rng.pkl"), "wb") as fout: pickle.dump(self.rng, fout) save_session(session, self.saver, path, verbose=True) @classmethod def load(cls, session, path, args=None, verbose=True, trainable=True, rebuild_graph=False, faux_cudnn=False, replace_to=None, replace_from=None): """Convenience method for using a tensorflow session to reload a previously saved + serialized model from disk.""" with open(join(path, "model.json"), "rt") as fin: model_props = json.load(fin) # update fields based on CLI: if args is not None: ex_fields = explicitly_set_fields() for field in cls.overrideable_fields(): if field in ex_fields: model_props[field] = getattr(args, field) # prune old fields based on changes to saveable fields: relevant_props = {} for field in cls.fields_to_save(): if field in model_props: relevant_props[field] = model_props[field] relevant_props["trainable"] = trainable relevant_props["faux_cudnn"] = faux_cudnn if rebuild_graph: print("Using rebuild_graph mode: creating a new graph.", flush=True) relevant_props["create_variables"] = True model = cls(**relevant_props) restore_session( session, path, replace_to=replace_to, replace_from=replace_from, verbose=verbose, use_metagraph=False ) else: if model_props.get("cudnn", False): import tensorflow.contrib.cudnn_rnn relevant_props["create_variables"] = False restore_session( session, path, verbose=verbose, use_metagraph=True ) model = cls(**relevant_props) rng_path = join(path, "rng.pkl") if exists(rng_path): # apply the saved random number generator to this # model: with open(rng_path, "rb") as fin: model.rng = pickle.load(fin) return model def make_path_absolute(obj, basepath): copied = obj.copy() for key in ["path", "vocab"]: if key in copied: copied[key] = join(basepath, copied[key]) return copied class Config(object): def __init__(self, datasets, features, objectives, wikidata_path, classification_path): assert(len(features) > 0) self.datasets = datasets self.features = features self.objectives = objectives self.classifications = None self.wikidata_path = wikidata_path self.classification_path = classification_path # build the objective names: self._named_objectives = [obj["name"] for obj in self.objectives] @classmethod def load(cls, path): with open(path, "rt") as fin: config = json.load(fin) config_dirname = dirname(path) return cls( datasets=[make_path_absolute(dataset, config_dirname) for dataset in config['datasets']], features=[make_path_absolute(feat, config_dirname) for feat in config['features']], objectives=[make_path_absolute(objective, config_dirname) for objective in config['objectives']], wikidata_path=config.get("wikidata_path", None), classification_path=( join(config_dirname, config.get("classification_path", None)) if "classification_path" in config else None) ) def load_dataset_separate(self, dataset_type): paths = [dataset for dataset in self.datasets if dataset["type"] == dataset_type] all_examples = {} for dataset in paths: _, extension = splitext(dataset["path"]) if extension == ".h5" or extension == ".hdf5": if self.classifications is None: if self.wikidata_path is None or self.classification_path is None: raise ValueError("missing wikidata_path and " "classification_path, cannot " "construct H5Dataset.") self.classifications = ClassificationHandler( self.wikidata_path, self.classification_path ) examples = H5Dataset( dataset["path"], dataset["x"], dataset["y"], self._named_objectives, ignore_value=dataset.get('ignore', None), classifications=self.classifications) else: examples = TSVDataset( dataset["path"], dataset["x"], dataset["y"], self._named_objectives, comment=dataset.get('comment', '#'), ignore_value=dataset.get('ignore', None), retokenize=dataset.get('retokenize', False)) title = dataset["path"].split('/')[-1].split(".")[0] name = title iteration = 1 while name in all_examples: name = title + "-%d" % (iteration,) iteration += 1 all_examples[name] = examples return all_examples def load_dataset(self, dataset_type, merge=True): datasets = self.load_dataset_separate(dataset_type) if merge: return CombinedDataset(list(datasets.values())) return datasets def boolean_argument(parser, name, default): parser.add_argument("--" + name, action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_true", default=default) parser.add_argument("--no" + name, action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_false", dest=name) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('config', type=str) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--anneal_rate', type=float, default=0.99) parser.add_argument('--clip_norm', type=float, default=-1) parser.add_argument('--weight_noise', type=float, default=0.0) parser.add_argument('--hidden_sizes', type=int, nargs="*", default=[200, 200]) parser.add_argument('--load_dir', type=str, default=None) parser.add_argument('--restore_input_features', type=str, default=None) parser.add_argument('--improvement_key', type=str, default="token_correct") parser.add_argument('--freeze_rate', type=float, default=1.0) parser.add_argument('--freeze_rate_anneal', type=float, default=0.8) parser.add_argument('--save_dir', type=str, default=None) parser.add_argument('--max_epochs', type=int, default=1000) parser.add_argument('--test_every', type=int, default=10000, help="Number of training iterations after which testing should occur.") parser.add_argument('--batch_size', type=int, default=128) parser.add_argument('--max_patience', type=int, default=10) parser.add_argument('--class_weights_clipval', type=float, default=1000.0) parser.add_argument('--device', type=str, default="gpu:0") parser.add_argument('--keep_prob', type=float, default=0.5) parser.add_argument('--input_keep_prob', type=float, default=0.7) parser.add_argument('--solver', type=str, default="adam", choices=["adam", "sgd"]) parser.add_argument("--name", type=str, default="SequenceTagger") parser.add_argument("--old_name", type=str, default=None) boolean_argument(parser, "cudnn", True) boolean_argument(parser, "faux_cudnn", False) boolean_argument(parser, "class_weights", False) boolean_argument(parser, "rebuild_graph", False) boolean_argument(parser, "class_weights_normalize", False) boolean_argument(parser, "fused", True) boolean_argument(parser, "report_metrics_per_axis", True) boolean_argument(parser, "report_class_f1", False) return parser.parse_args(args=args) def get_vocab(dataset, max_vocab=-1, extra_words=None): index2word = [] occurrence = {} for el in dataset: if el not in occurrence: index2word.append(el) occurrence[el] = 1 else: occurrence[el] += 1 index2word = sorted(index2word, key=lambda x: occurrence[x], reverse=True) if max_vocab > 0: index2word = index2word[:max_vocab] if extra_words is not None: index2word = extra_words + index2word return index2word def get_objectives(objectives, dataset): out = [] for obj_idx, objective in enumerate(objectives): if "vocab" in objective: with open(objective["vocab"], "rt") as fin: vocab = fin.read().splitlines() else: vocab = get_vocab((w[obj_idx] for _, y in dataset for w in y if w[obj_idx] is not None), -1) out.append( { "vocab": vocab, "type": objective["type"], "name": objective["name"] } ) return out def merge_all_metrics(metrics): out = {} for key, metric in metrics.items(): for subkey, submetric in metric.items(): if len(key) > 0: out[key + "_" + subkey] = submetric if subkey not in out: out[subkey] = submetric else: out[subkey] += submetric else: out[subkey] = submetric return out def log_outcome(logger, outcome, step, name): for k, v in sorted(outcome.items()): if "total" in k: continue else: total = outcome[k + "_total"] if total == 0: continue logger.log(k, v / total, step=step) logger.writer.flush() def compute_f1(metrics, objectives, report_class_f1): total_f1 = 0.0 total_precision = 0.0 total_recall = 0.0 total = 0 for objective in objectives: name = objective["name"] key = "%s_true_positives" % (name,) if key not in metrics: continue tp = metrics[key] fp = metrics["%s_false_positives" % (name,)] fn = metrics["%s_false_negatives" % (name,)] del metrics[key] del metrics["%s_false_positives" % (name,)] del metrics["%s_false_negatives" % (name,)] precision = 1.* tp / np.maximum((tp + fp), 1e-6) recall = 1. * tp / np.maximum((tp + fn), 1e-6) f1 = 2.0 * precision * recall / np.maximum((precision + recall), 1e-6) support = tp + fn full_f1 = np.average(f1, weights=support) * 100.0 full_recall = np.average(recall, weights=support) * 100.0 full_precision = np.average(precision, weights=support) * 100.0 total_f1 += full_f1 total_recall += full_recall total_precision += full_precision total += 1 if report_class_f1: print("F1 %s: %r" % (name, full_f1)) print("Name\tF1\tTP\tFP\tFN") rows = zip([label for label, has_support in zip(objective["vocab"], support > 0) if has_support], f1, tp, fp, fn) for val, f1_val, val_tp, val_fp, val_fn in rows: print("%s\t%r\t%d\t%d\t%d" % ( val, f1_val, val_tp, val_fp, val_fn)) print("") if total > 0: metrics["F1"] = total_f1 metrics["recall"] = total_recall metrics["precision"] = total_precision metrics["F1_total"] = total metrics["recall_total"] = total metrics["precision_total"] = total def accuracy(model, session, datasets, batch_size, train, report_metrics_per_axis, report_class_f1, callback=None, callback_period=None, writer=None): pbar = get_progress_bar("train" if train else "validation", item="batches") if not isinstance(datasets, dict): datasets = {'':datasets} all_metrics_agg = {} if callback is not None: if callback_period is None: raise ValueError("callback_period cannot be None if " "callback is used.") else: callback_period = None if train: train_op = model.train_op else: train_op = model.noop is_training = model.is_training metrics = {"nll": model.nll, "nll_total": model.nll_total} summaries = [] if not train: metric_iter = zip( model.objectives, model.token_correct, model.token_correct_total, model.sentence_correct, model.sentence_correct_total, model.true_positives, model.false_positives, model.false_negatives ) for metric_vars in metric_iter: ( objective, token_correct, token_correct_total, sentence_correct, sentence_correct_total, true_positives, false_positives, false_negatives ) = metric_vars name = objective["name"] if report_metrics_per_axis: metrics["%s_token_correct" % (name,)] = token_correct metrics["%s_token_correct_total" % (name,)] = token_correct_total metrics["%s_sentence_correct" % (name,)] = sentence_correct metrics["%s_sentence_correct_total" % (name,)] = sentence_correct_total if true_positives is not None: metrics["%s_true_positives" % (name,)] = true_positives metrics["%s_false_positives" % (name,)] = false_positives metrics["%s_false_negatives" % (name,)] = false_negatives metrics["token_correct"] = model.token_correct_all metrics["token_correct_total"] = model.token_correct_all_total metrics["sentence_correct"] = model.sentence_correct_all metrics["sentence_correct_total"] = model.sentence_correct_all_total summaries = [] else: if writer is not None and model.train_summaries is not None: summaries = model.train_summaries metrics_values = [v for _, v in sorted(metrics.items())] metrics_names = [name for name, _ in sorted(metrics.items())] outputs_val = [train_op, model.global_step, summaries, metrics_values] for title, dataset in datasets.items(): batches = iter_batches_single_threaded( model=model, dataset=dataset, batch_size=batch_size, train=train, pbar=pbar ) metrics_agg = {} iteration = 0 for feed_dict in batches: feed_dict[is_training] = train _, step, summary_out, outputs = session.run(outputs_val, feed_dict) if writer is not None: writer.add_summary(summary_out, step) for key, value in zip(metrics_names, outputs[:len(metrics_names)]): if key not in metrics_agg: metrics_agg[key] = value else: metrics_agg[key] += value iteration += 1 if callback_period is not None and iteration % callback_period == 0: callback(iteration) if np.isnan(metrics_agg['nll']): print("loss is NaN.", flush=True, file=sys.stderr) sys.exit(1) compute_f1(metrics_agg, model.objectives, report_class_f1) all_metrics_agg[title] = metrics_agg del batches return merge_all_metrics(all_metrics_agg) def present_outcome(outcome, epoch, name): string_rows = [] for k, v in sorted(outcome.items()): if "total" in k: continue else: total = outcome[k + "_total"] if total == 0: continue if "correct" in k: string_rows.append( [ k, "%.2f%%" % (100.0 * v / total), "(%d correct / %d)" % (v, total) ] ) else: string_rows.append( [ k, "%.3f" % (v / total), "" ] ) max_len_cols = [ max(len(row[colidx]) for row in string_rows) for colidx in range(len(string_rows[0])) ] if len(string_rows) > 0 else [] rows = [] for row in string_rows: rows.append( " ".join( [col + " " * (max_len_cols[colidx] - len(col)) for colidx, col in enumerate(row)] ) ) return "\n".join(["Epoch {epoch}: {name}".format(epoch=epoch, name=name)] + rows) def print_outcome(outcome, objectives, epoch, step, name, logger=None): outcome_report = present_outcome(outcome, epoch, name) if logger is not None: log_outcome(logger, outcome, step, name) print(outcome_report) class SequenceTagger(object): def __init__(self, path, device="gpu", faux_cudnn=False, rebuild_graph=False): tf.reset_default_graph() session_conf = tf.ConfigProto( allow_soft_placement=True ) self.session = tf.InteractiveSession(config=session_conf) with tf.device(device): with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) self._model = SequenceModel.load( self.session, path, args=None, verbose=False, trainable=False, rebuild_graph=rebuild_graph, faux_cudnn=faux_cudnn ) @property def objectives(self): return self._model.objectives def predict_proba(self, tokens): blank_labels = tuple(None for _ in self._model.objectives) batches = list(iter_batches_single_threaded( model=self._model, dataset=[ (tokens, [blank_labels for t in tokens]) ], batch_size=1, train=False, autoresize=False )) outputs = [] batches[0][self._model.is_training] = False probs_out = self._model.predict_proba( self.session, batches[0] ) return probs_out def predict_proba_sentences(self, sentences): blank_labels = tuple(None for _ in self._model.objectives) batches = iter_batches_single_threaded( model=self._model, dataset=[ (sentence, [blank_labels for t in sentence]) for sentence in sentences ], batch_size=min(256, len(sentences)), train=False, autoresize=False ) for batch in batches: batch[self._model.is_training] = False yield self._model.predict_proba( self.session, batch ) def predict_topk_sentences(self, sentences, k=5): blank_labels = tuple(None for _ in self._model.objectives) batches = iter_batches_single_threaded( model=self._model, dataset=[ (sentence, [blank_labels for t in sentence]) for sentence in sentences ], batch_size=min(256, len(sentences)), train=False, autoresize=False ) for batch in batches: outputs = self._model.predict_proba( self.session, batch ) named_outputs = {} for objective in self._model.objectives: obj_name = objective["name"] tags, scores = outputs[obj_name] if objective["type"] == "crf": named_outputs[obj_name] = [ [(token, [objective["vocab"][tag]], [score]) for token, tag in zip(tokens, tags)] for tokens, tags, score in zip(sentences, tags, scores) ] elif objective["type"] == 'softmax': all_sent_scores = [] for tokens, scores in zip(sentences, scores): sent_scores = [] for token, token_scores in zip(tokens, scores): topk = np.argsort(token_scores)[::-1][:k] sent_scores.append( ( token, [objective["vocab"][idx] for idx in topk], [token_scores[idx] for idx in topk] ) ) all_sent_scores.append(sent_scores) named_outputs[obj_name] = all_sent_scores else: raise ValueError("unknown objective type %r." % (objective["type"],)) yield named_outputs def tag_sentences(self, sentences): if len(sentences) == 0: return { objective["name"]: [] for objective in self._model.objectives } blank_labels = tuple(None for _ in self._model.objectives) batches = list(iter_batches_single_threaded( self._model, [ (sentence, [blank_labels for t in sentence]) for sentence in sentences ], batch_size=min(256, len(sentences)), train=False, autoresize=False )) named_outputs = {} sentence_idx = 0 for batch in batches: outputs = self._model.predict(self.session, batch) for objective in self._model.objectives: obj_name = objective["name"] if obj_name not in named_outputs: named_outputs[obj_name] = [] tags, scores = outputs[obj_name] nsentences = len(tags) if objective["type"] == "crf": named_outputs[obj_name].extend([ [(token, objective["vocab"][tag], score) for token, tag in zip(tokens, tags)] for tokens, tags, score in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores) ]) elif objective["type"] == 'softmax': named_outputs[obj_name].extend([ [(token, objective["vocab"][tag], score) for token, tag, score in zip(tokens, tags, scores)] for tokens, tags, scores in zip(sentences[sentence_idx:sentence_idx+nsentences], tags, scores) ]) else: raise ValueError("unknown objective type %r." % (objective["type"],)) sentence_idx += nsentences return named_outputs def count_number_of_parameters(): return int(sum([np.prod(var.get_shape().as_list()) for var in tf.trainable_variables()])) class TestCallback(object): def __init__(self, model, session, dataset, epoch, args, logger): self.model = model self.session = session self.dataset = dataset self.epoch = epoch self.args = args self.logger = logger self.report_metrics_per_axis = args.report_metrics_per_axis self.report_class_f1 = args.report_class_f1 def test(self, iteration): dev_outcome = accuracy(self.model, self.session, self.dataset, self.args.batch_size, train=False, report_metrics_per_axis=self.report_metrics_per_axis, report_class_f1=self.report_class_f1) print_outcome(dev_outcome, self.model.objectives, epoch="{}-{}".format(self.epoch, iteration), step=self.session.run(self.model.global_step), name="validation", logger=self.logger ) if self.args.save_dir is not None: self.model.save(self.session, self.args.save_dir) def compute_epoch(session, model, train_set, validation_set, test_callback, epoch, train_writer, test_writer, args): test_callback.epoch = epoch train_outcome = accuracy(model, session, train_set, args.batch_size, train=True, callback_period=args.test_every, writer=train_writer.writer if train_writer is not None else None, report_metrics_per_axis=args.report_metrics_per_axis, report_class_f1=args.report_class_f1, callback=test_callback.test) global_step = session.run(model.global_step) print_outcome(train_outcome, model.objectives, epoch=epoch, name="train", step=global_step, logger=train_writer) dev_outcome = accuracy( model, session, validation_set, args.batch_size, train=False, report_metrics_per_axis=args.report_metrics_per_axis, report_class_f1=args.report_class_f1) print_outcome(dev_outcome, model.objectives, epoch=epoch, step=global_step, name="validation", logger=test_writer) if args.save_dir is not None: model.save(session, args.save_dir) return dev_outcome def main(): args = parse_args() config = Config.load(args.config) validation_set = config.load_dataset("dev", merge=False) session_conf = tf.ConfigProto(allow_soft_placement=True) with tf.Session(config=session_conf) as session, tf.device(args.device): if args.load_dir is not None: model = SequenceModel.load(session, args.load_dir, args=args, rebuild_graph=args.rebuild_graph, faux_cudnn=args.faux_cudnn, replace_to=args.name, replace_from=args.old_name) dev_outcome = accuracy( model, session, validation_set, args.batch_size, train=False, report_metrics_per_axis=args.report_metrics_per_axis, report_class_f1=args.report_class_f1) print_outcome(dev_outcome, model.objectives, 0, name="loaded validation", step=session.run(model.global_step), logger=None) # dev_outcome = None if args.rebuild_graph and args.save_dir is not None: model.save(session, args.save_dir) train_set = config.load_dataset("train") else: # load classes and index2word from a file. dev_outcome = None train_set = config.load_dataset("train") model = SequenceModel( objectives=get_objectives(config.objectives, train_set), features=config.features, feature_index2words=get_feature_vocabs(config.features, train_set, ["<UNK>"]), lr=args.lr, anneal_rate=args.anneal_rate, weight_noise=args.weight_noise, freeze_rate=args.freeze_rate, freeze_rate_anneal=args.freeze_rate_anneal, clip_norm=args.clip_norm, hidden_sizes=args.hidden_sizes, solver=args.solver, fused=args.fused, class_weights_normalize=args.class_weights_normalize, class_weights=args.class_weights, class_weights_clipval=args.class_weights_clipval, keep_prob=args.keep_prob, input_keep_prob=args.input_keep_prob, name=args.name, cudnn=args.cudnn, faux_cudnn=args.faux_cudnn, create_variables=True) session.run(tf.global_variables_initializer()) if args.restore_input_features is not None: restore_session( session, args.restore_input_features, verbose=True, use_metagraph=False, only_features=True) print("Model has {} trainable parameters.".format(count_number_of_parameters()), flush=True) best_dev_score = 0.0 patience = 0 best_epoch = 0 best_outcome = None improvement_key = args.improvement_key if dev_outcome is not None: best_dev_score = dev_outcome[improvement_key] best_epoch = -1 best_outcome = dev_outcome if args.save_dir is not None: train_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, "train"))) test_writer = Logger(session, tf.summary.FileWriter(join(args.save_dir, "test"))) else: train_writer, test_writer = None, None test_callback = TestCallback(model, session, validation_set, -1, args, logger=test_writer) if len(train_set) > 0: train_set.set_randomize(True) train_set.set_rng(model.rng) for epoch in range(args.max_epochs): dev_outcome = compute_epoch( session, model, train_set=train_set, validation_set=validation_set, epoch=epoch, test_callback=test_callback, train_writer=train_writer, test_writer=test_writer, args=args) if dev_outcome[improvement_key] > best_dev_score: best_dev_score = dev_outcome[improvement_key] best_epoch = epoch best_outcome = dev_outcome patience = 0 if args.save_dir is not None: model.save(session, join(args.save_dir, "best")) else: patience += 1 if patience >= args.max_patience: print("No improvements for {} epochs. Stopping.".format(args.max_patience)) break del dev_outcome print_outcome( best_outcome, model.objectives, epoch=best_epoch, name="validation-best", step=session.run(model.global_step), logger=None) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="16"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np import subprocess import h5py import ciseau from os.path import exists, splitext, join from wikidata_linker_utils.wikidata_ids import load_wikidata_ids def count_examples(lines, comment, ignore_value, column_indices): example_length = 0 has_labels = False found = 0 for line in lines: if len(line) == 0 or (comment is not None and line.startswith(comment)): if example_length > 0 and has_labels: found += 1 example_length = 0 has_labels = False else: example_length += 1 if not has_labels: cols = line.split("\t") if len(cols) > 1: if ignore_value is not None: for col_index in column_indices: if cols[col_index] != ignore_value: has_labels = True break else: has_labels = True if example_length > 0 and has_labels: found += 1 return found def retokenize_example(x, y): tokens = ciseau.tokenize(" ".join(w for w in x), normalize_ascii=False) out_y = [] regular_cursor = 0 tokens_length_total = 0 regular_length_total = len(x[regular_cursor]) + 1 if len(x) > 0 else 0 if regular_cursor + 1 == len(x): regular_length_total -= 1 for i in range(len(tokens)): tokens_length_total = tokens_length_total + len(tokens[i]) while regular_length_total < tokens_length_total: regular_cursor += 1 regular_length_total = regular_length_total + len(x[regular_cursor]) + 1 if regular_cursor + 1 == len(x): regular_length_total -= 1 out_y.append(y[regular_cursor]) assert(regular_cursor + 1 == len(x)), "error with %r" % (x,) return ([tok.rstrip() for tok in tokens], out_y) def convert_lines_to_examples(lines, comment, ignore_value, column_indices, x_column, empty_column, retokenize=False): examples = [] x = [] y = [] for line in lines: if len(line) == 0 or (comment is not None and line.startswith(comment)): if len(x) > 0: if not all(row == empty_column for row in y): examples.append((x, y)) x = [] y = [] else: cols = line.split("\t") x.append(cols[x_column]) if len(cols) == 1: y.append(empty_column) else: if ignore_value is not None: y.append( tuple( cols[col_index] if col_index is not None and cols[col_index] != ignore_value else None for col_index in column_indices ) ) else: y.append( tuple( cols[col_index] if col_index is not None else None for col_index in column_indices ) ) if len(x) > 0 and not all(row == empty_column for row in y): examples.append((x, y)) if retokenize: examples = [retokenize_example(x, y) for x, y in examples] return examples def load_tsv(path, x_column, y_columns, objective_names, comment, ignore_value, retokenize): """" Deprecated method for loading a tsv file as a training/test set for a model. Arguments: ---------- path: str, location of tsv file x_column: int y_columns: list<dict>, objectives in this file along with their column. (e.g. `y_columns=[{"objective": "POS", "column": 2}, ...])`) objective_names: name of all desired columns comment: line beginning indicating it's okay to skip ignore_value: label value that should be treated as missing retokenize: run tokenizer again. Returns ------- list<tuple> : examples loaded into memory Note: can use a lot of memory since entire file is loaded. """ objective2column = {col['objective']: col['column'] for col in y_columns} column_indices = [objective2column.get(name, None) for name in objective_names] empty_column = tuple(None for _ in objective_names) if all(col_index is None for col_index in column_indices): return [] with open(path, "rt") as fin: lines = fin.read().splitlines() return convert_lines_to_examples(lines, ignore_value=ignore_value, empty_column=empty_column, x_column=x_column, column_indices=column_indices, comment=comment, retokenize=retokenize) class RandomizableDataset(object): def set_rng(self, rng): self.rng = rng def set_randomize(self, randomize): self.randomize = randomize def set_ignore_y(self, ignore): self.ignore_y = ignore class TSVDataset(RandomizableDataset): _fhandle = None _fhandle_position = 0 _examples = None _example_indices = None _example_index = 0 _eof = False ignore_y = False def __init__(self, path, x_column, y_columns, objective_names, comment, ignore_value, retokenize=False, chunksize=50000000, randomize=False, rng=None): """" Arguments: ---------- path: str, location of tsv file x_column: int y_columns: list<dict>, objectives in this file along with their column. (e.g. `y_columns=[{"objective": "POS", "column": 2}, ...])`) objective_names: name of all desired columns comment: line beginning indicating it's okay to skip ignore_value: label value that should be treated as missing chunksize: how many bytes to read from the file at a time. rng: numpy RandomState retokenize: run tokenizer on x again. """ self.path = path self.randomize = randomize self.x_column = x_column self.y_columns = y_columns self.objective_names = objective_names self.comment = comment self.ignore_value = ignore_value self.retokenize = retokenize self.chunksize = chunksize if rng is None: rng = np.random.RandomState(0) self.rng = rng # column picking setup: objective2column = {col['objective']: col['column'] for col in y_columns} self.column_indices = [objective2column.get(name, None) for name in objective_names] self.empty_column = tuple(None for _ in objective_names) if all(col_index is None for col_index in self.column_indices): self.length = 0 else: self._compute_length() def _signature(self): try: file_sha1sum = subprocess.check_output( ["sha1sum", self.path], universal_newlines=True ).split(" ")[0] except FileNotFoundError: file_sha1sum = subprocess.check_output( ["shasum", self.path], universal_newlines=True ).split(" ")[0] sorted_cols = list( map( str, sorted( [col for col in self.column_indices if col is not None] ) ) ) return "-".join([file_sha1sum] + sorted_cols) def _compute_length(self): length_file = ( splitext(self.path)[0] + "-length-" + self._signature() + ".txt" ) if exists(length_file): with open(length_file, "rt") as fin: total = int(fin.read()) else: total = 0 while True: total += self._count_examples() if self._eof: break with open(length_file, "wt") as fout: fout.write(str(total) + "\n") self.length = total def __len__(self): return self.length def close(self): if self._fhandle is not None: self._fhandle.close() self._fhandle = None self._fhandle_position = 0 self._eof = False self._examples = None self._example_indices = None def __del__(self): self.close() def _read_file_until_newline(self): if self._fhandle is None: self._fhandle = open(self.path, "rb") if self._eof: self._fhandle_position = 0 self._fhandle.seek(0) self._eof = False read_chunk = None while True: new_read_chunk = self._fhandle.read(self.chunksize) if read_chunk is None: read_chunk = new_read_chunk else: read_chunk += new_read_chunk if len(new_read_chunk) < self.chunksize: del new_read_chunk self._fhandle_position += len(read_chunk) self._eof = True break else: del new_read_chunk newline_pos = read_chunk.rfind(b"\n\n") if newline_pos != -1: # move to last line end position (so that we don't get # half an example.) self._fhandle.seek(self._fhandle_position + newline_pos + 2) self._fhandle_position += newline_pos + 2 read_chunk = read_chunk[:newline_pos] break return read_chunk def _count_examples(self): read_chunk = self._read_file_until_newline() return count_examples( read_chunk.decode("utf-8").splitlines(), ignore_value=self.ignore_value, column_indices=self.column_indices, comment=self.comment ) def _load_examples(self): read_chunk = self._read_file_until_newline() if self._examples is not None: del self._examples self._examples = convert_lines_to_examples( read_chunk.decode("utf-8").splitlines(), ignore_value=self.ignore_value, empty_column=self.empty_column, x_column=self.x_column, column_indices=self.column_indices, comment=self.comment, retokenize=self.retokenize ) self._example_indices = np.arange(len(self._examples)) if self.randomize: # access loaded data randomly: self.rng.shuffle(self._example_indices) self._example_index = 0 def __getitem__(self, index): """Retrieve the next example (index is ignored)""" if index >= self.length: raise StopIteration() if self._example_indices is None or self._example_index == len(self._example_indices): self._load_examples() while len(self._examples) == 0: self._load_examples() if len(self._examples) > 0: break if self._eof: raise StopIteration() ex = self._examples[self._example_indices[self._example_index]] self._example_index += 1 return ex def set_randomize(self, randomize): if randomize != self.randomize: self.randomize = randomize def close(self): if self._fhandle is not None: self._fhandle.close() self._fhandle = None class OracleClassification(object): def __init__(self, classes, classification, path): self.classes = classes self.classification = classification self.path = path self.contains_other = self.classes[-1] == "other" def classify(self, index): return self.classification[index] def load_oracle_classification(path): with open(join(path, "classes.txt"), "rt", encoding="UTF-8") as fin: classes = fin.read().splitlines() classification = np.load(join(path, "classification.npy")) return OracleClassification(classes, classification, path) class ClassificationHandler(object): def __init__(self, wikidata_path, classification_path): self.classification_path = classification_path _, self.name2index = load_wikidata_ids(wikidata_path, verbose=False) self.classifiers = {} def get_classifier(self, name): if name not in self.classifiers: self.classifiers[name] = load_oracle_classification( join(self.classification_path, name) ) return self.classifiers[name] class H5Dataset(RandomizableDataset): handle_open = False ignore_y = False _max_generated_example = 0 _min_generated_example = 0 def __init__(self, path, x_column, y_columns, objective_names, classifications, ignore_value, randomize=False, rng=None): self.x_column = str(x_column) self.y_columns = y_columns self.ignore_value = ignore_value self.objective_names = objective_names self.randomize = randomize if rng is None: rng = np.random.RandomState(0) self.rng = rng self._classifications = classifications self.handle = h5py.File(path, "r") self.path = path self.handle_open = True self.length = len(self.handle[self.x_column]) self.chunksize = self.handle[self.x_column].chunks[0] self._example_indices = None objective2column = { col['objective']: ( str(col['column']), self._classifications.get_classifier(col['classification']) ) for col in y_columns } if self.ignore_value is not None: for _, classifier in objective2column.values(): if self.ignore_value in classifier.classes: classifier.classes[classifier.classes.index(self.ignore_value)] = None self.column2col_indices = {} for col_idx, name in enumerate(self.objective_names): if name not in objective2column: continue column, classifier = objective2column[name] if column not in self.column2col_indices: self.column2col_indices[column] = [(classifier, col_idx)] else: self.column2col_indices[column].append((classifier, col_idx)) def close(self): if self.handle_open: self.handle.close() self.handle_open = False def __del__(self): self.close() def __len__(self): return self.length def _build_examples(self, index): x = [x_chunk.split("\n") for x_chunk in self.handle[self.x_column][index:index + self.chunksize]] y = [[[None for k in range(len(self.objective_names))] for j in range(len(x[i]))] for i in range(len(x))] if not self.ignore_y: for handle_column, col_content in self.column2col_indices.items(): col_ids = [[self._classifications.name2index[name] if name != "" else None for name in y_chunk.split("\n")] for y_chunk in self.handle[handle_column][index:index + self.chunksize]] for i in range(len(col_ids)): for j, idx in enumerate(col_ids[i]): if idx is not None: for classifier, k in col_content: y[i][j][k] = classifier.classify(idx) return x, y def set_randomize(self, randomize): if self.randomize != randomize: self.randomize = randomize if self._max_generated_example != self._min_generated_example: self.xorder = np.arange(self._min_generated_example, self._max_generated_example) self.rng.shuffle(self.xorder) def __getitem__(self, index): if index >= len(self): raise StopIteration() if self.randomize: if self._example_indices is None or index == 0: self._example_indices = np.arange(0, len(self), self.chunksize) self.rng.shuffle(self._example_indices) # transformed index: index = (self._example_indices[index // self.chunksize] + (index % self.chunksize)) % len(self) if index < self._min_generated_example or index >= self._max_generated_example: self.x, self.y = self._build_examples(index) # store bounds of generated data: self._min_generated_example = index self._max_generated_example = index + len(self.x) if self.randomize: self.xorder = np.arange(self._min_generated_example, self._max_generated_example) self.rng.shuffle(self.xorder) if self.randomize: index = self.xorder[index - self._min_generated_example] return self.x[index - self._min_generated_example], self.y[index - self._min_generated_example] class CombinedDataset(object): _which_dataset = None _dataset_counters = None def set_rng(self, rng): self.rng = rng for dataset in self.datasets: dataset.rng = rng def set_randomize(self, randomize): self.randomize = randomize for dataset in self.datasets: dataset.set_randomize(randomize) def set_ignore_y(self, ignore): for dataset in self.datasets: dataset.set_ignore_y(ignore) def close(self): for dataset in self.datasets: dataset.close() def _build_which_dataset(self): self._which_dataset = np.empty(self.length, dtype=np.int16) self._dataset_counters = np.zeros(len(self.datasets), dtype=np.int64) offset = 0 for index, dataset in enumerate(self.datasets): # ensure each dataset is seen as much as its content # says: self._which_dataset[offset:offset + len(dataset)] = index offset += len(dataset) def __getitem__(self, index): if index == 0: if self.randomize: # visit datasets in random orders: self.rng.shuffle(self._which_dataset) self._dataset_counters[:] = 0 which = self._which_dataset[index] idx = self._dataset_counters[which] self._dataset_counters[which] += 1 return self.datasets[which][idx] def __init__(self, datasets, rng=None, randomize=False): self.datasets = datasets if rng is None: rng = np.random.RandomState(0) self.set_rng(rng) self.set_randomize(randomize) self.length = sum(len(dataset) for dataset in datasets) self._build_which_dataset() def __len__(self): return self.length </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="17"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import queue import threading def prefetch_generator(generator, to_fetch=10): q = queue.Queue(maxsize=to_fetch) def thread_worker(queue, gen): for val in gen: queue.put(val) queue.put(None) t = threading.Thread(target=thread_worker, args=(q, generator)) some_exception = None try: t.start() while True: job = q.get() if job is None: break yield job del job # print("q.qsize() %d" % (q.qsize(),), flush=True) except Exception as e: some_exception = e finally: if some_exception is not None: raise some_exception t.join() del t </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="18"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Obtain a learnability score for each type axis. Trains a binary classifier for each type and gets its AUC. Usage ----- ``` python3 evaluate_learnability.py sample_data.tsv --out report.json --wikidata /path/to/wikidata ``` """ import json import time import argparse from os.path import dirname, realpath, join SCRIPT_DIR = dirname(realpath(__file__)) import numpy as np import tensorflow as tf from sklearn import metrics from collections import Counter from wikidata_linker_utils.type_collection import TypeCollection, offset_values_mask import wikidata_linker_utils.wikidata_properties as wprop from wikidata_linker_utils.progressbar import get_progress_bar from generator import prefetch_generator def learnability(collection, lines, mask, truth_tables, qids, id2pos, epochs=5, batch_size=128, max_dataset_size=-1, max_vocab_size=10000, hidden_sizes=None, lr=0.001, window_size=5, input_size=5, keep_prob=0.5, verbose=True): if hidden_sizes is None: hidden_sizes = [] tf.reset_default_graph() dset = list(get_windows(lines, mask, window_size, truth_tables, lambda x: id2pos[x])) if max_dataset_size > 0: dset = dset[:max_dataset_size] pos_num = np.zeros(len(qids)) for _, labels in dset: pos_num += labels neg_num = np.ones(len(qids)) * len(dset) - pos_num pos_weight = (pos_num / (pos_num + neg_num))[None, :] vocab = ["<UNK>"] + [w for w, _ in Counter(lines[:, 0]).most_common(max_vocab_size)] inv_vocab = {w: k for k, w in enumerate(vocab)} with tf.device("gpu"): W = tf.get_variable( "W", shape=[len(vocab), input_size], dtype=tf.float32, initializer=tf.random_normal_initializer() ) indices = tf.placeholder(tf.int32, [None, window_size*2], name="indices") labels = tf.placeholder(tf.bool, [None, len(qids)], name="label") keep_prob_pholder = tf.placeholder_with_default(keep_prob, []) lookup = tf.reshape(tf.nn.embedding_lookup( W, indices ), [tf.shape(indices)[0], input_size * window_size*2]) lookup = tf.nn.dropout(lookup, keep_prob_pholder) hidden = lookup for layer_idx, hidden_size in enumerate(hidden_sizes): hidden = tf.contrib.layers.fully_connected( hidden, num_outputs=hidden_size, scope="FC%d" % (layer_idx,) ) out = tf.contrib.layers.fully_connected( hidden, num_outputs=len(qids), activation_fn=None) cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=out, labels=tf.cast(labels, tf.float32)) cost = tf.where(tf.is_finite(cost), cost, tf.zeros_like(cost)) cost_mean = tf.reduce_mean( (tf.cast(labels, tf.float32) * 1.0 / (pos_weight)) * cost + (tf.cast(tf.logical_not(labels), tf.float32) * 1.0 / (1.0 - pos_weight)) * cost ) cost_sum = tf.reduce_sum(cost) size = tf.shape(indices)[0] noop = tf.no_op() correct = tf.reduce_sum(tf.cast(tf.equal(tf.greater_equal(out, 0), labels), tf.int32), 0) out_activated = tf.sigmoid(out) train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost_mean) session = tf.InteractiveSession() session.run(tf.global_variables_initializer()) def accuracy(dataset, batch_size, train): epoch_correct = np.zeros(len(qids)) epoch_nll = 0.0 epoch_total = np.zeros(len(qids)) op = train_op if train else noop all_labels = [] all_preds = [] for i in get_progress_bar("train" if train else "dev", item="batches")(range(0, len(dataset), batch_size)): batch_labels = [label for _, label in dataset[i:i+batch_size]] csum, corr, num_examples, preds, _ = session.run([cost_sum, correct, size, out_activated, op], feed_dict={ indices: [[inv_vocab.get(w, 0) for w in window] for window, _ in dataset[i:i+batch_size]], labels: batch_labels, keep_prob_pholder: keep_prob if train else 1.0 }) epoch_correct += corr epoch_nll += csum epoch_total += num_examples all_labels.extend(batch_labels) all_preds.append(preds) return (epoch_nll, epoch_correct, epoch_total, np.vstack(all_preds), np.vstack(all_labels)) dataset_indices = np.arange(len(dset)) train_indices = dataset_indices[:int(0.8 * len(dset))] dev_indices = dataset_indices[int(0.8 * len(dset)):] train_dataset = [dset[idx] for idx in train_indices] dev_dataset = [dset[idx] for idx in dev_indices] learnability = [] for epoch in range(epochs): t0 = time.time() train_epoch_nll, train_epoch_correct, train_epoch_total, _, _ = accuracy(train_dataset, batch_size, train=True) t1 = time.time() if verbose: print("epoch %d train: %.3f%% in %.3fs" % ( epoch, 100.0 * train_epoch_correct.sum() / train_epoch_total.sum(), t1 - t0),) t0 = time.time() dev_epoch_nll, dev_epoch_correct, dev_epoch_total, pred, y = accuracy(dev_dataset, batch_size, train=False) t1 = time.time() learnability = [] for qidx in range(len(qids)): try: fpr, tpr, thresholds = metrics.roc_curve(y[:,qidx], pred[:,qidx], pos_label=1) auc = metrics.auc(fpr, tpr) if not np.isnan(auc): average_precision_score = metrics.average_precision_score(y[:,qidx], pred[:,qidx]) learnability.append((qids[qidx], auc, average_precision_score, 100.0 * dev_epoch_correct[qidx] / dev_epoch_total[qidx], int(pos_num[qidx]), int(neg_num[qidx]))) except ValueError: continue if verbose: learnability = sorted(learnability, key=lambda x: x[1], reverse=True) print("epoch %d dev: %.3fs" % (epoch, t1-t0)) for qid, auc, average_precision_score, acc, pos, neg in learnability: print(" %r AUC: %.3f, APS: %.3f, %.3f%% positive: %d, negative: %d" % ( collection.ids[qid], auc, average_precision_score, acc, pos, neg)) print("") return learnability def generate_training_data(collection, path): with open(path, "rt") as fin: lines = [row.split("\t")[:2] for row in fin.read().splitlines()] lines_arr = np.zeros((len(lines), 2), dtype=np.object) mask = np.zeros(len(lines), dtype=np.bool) for i, l in enumerate(lines): lines_arr[i, 0] = l[0] if len(l) > 1: lines_arr[i, 1] = collection.name2index[l[1]] mask[i] = True return lines_arr, mask def get_proposal_sets(collection, article_ids, seed): np.random.seed(seed) relation = collection.relation(wprop.CATEGORY_LINK) relation_mask = offset_values_mask(relation.values, relation.offsets, article_ids) counts = np.bincount(relation.values[relation_mask]) is_fp = collection.relation(wprop.FIXED_POINTS).edges() > 0 is_fp = is_fp[:counts.shape[0]] counts = counts * is_fp topfields_fp = np.argsort(counts)[::-1][:(counts > 0).sum()] relation = collection.relation(wprop.INSTANCE_OF) relation_mask = offset_values_mask(relation.values, relation.offsets, article_ids) counts = np.bincount(relation.values[relation_mask]) topfields_instance_of = np.argsort(counts)[::-1][:(counts > 0).sum()] np.random.shuffle(topfields_instance_of) np.random.shuffle(topfields_fp) return [(topfields_instance_of, wprop.INSTANCE_OF), (topfields_fp, wprop.CATEGORY_LINK)] def build_truth_tables(collection, lines, qids, relation_name): truth_tables = [] all_ids = list(sorted(set(lines[:, 1]))) id2pos = {idx: pos for pos, idx in enumerate(all_ids)} for qid in qids: truth_tables.append(collection.satisfy([relation_name], [qid])[all_ids]) collection.reset_cache() truth_tables = np.stack(truth_tables, axis=1) qid_sums = truth_tables.sum(axis=0) kept_qids = [] kept_dims = [] for i, (qid, qid_sum) in enumerate(zip(qids, qid_sums)): if qid_sum != 0 and qid_sum != truth_tables.shape[0]: kept_qids.append(qid) kept_dims.append(i) truth_tables = truth_tables[:, kept_dims] return truth_tables, kept_qids, id2pos def get_windows(lines, mask, window, truth_table, id_mapper): for i in np.where(mask)[0]: if i >= window and i < len(lines) - window: yield (lines[max(0, i - window):i + window, 0], truth_table[id_mapper(lines[i, 1])]) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--dataset", type=str, required=True) parser.add_argument("--batch_size", type=int, default=128) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--max_epochs", type=int, default=2) parser.add_argument("--max_vocab_size", type=int, default=10000) parser.add_argument("--simultaneous_fields", type=int, default=512) parser.add_argument("--window_size", type=int, default=5) parser.add_argument("--input_size", type=int, default=5) parser.add_argument("--wikidata", type=str, required=True) parser.add_argument("--out", type=str, required=True) return parser.parse_args() def generate_truth_tables(collection, lines_arr, proposal_sets, simultaneous_fields): for topfields, relation_name in proposal_sets: for i in range(0, len(topfields), simultaneous_fields): truth_tables, qids, id2pos = build_truth_tables( collection, lines_arr, qids=topfields[i:i+simultaneous_fields], relation_name=relation_name) yield (topfields[i:i+simultaneous_fields], relation_name, truth_tables, qids, id2pos) def main(): args = parse_args() collection = TypeCollection(args.wikidata, num_names_to_load=0) collection.load_blacklist(join(dirname(SCRIPT_DIR), "extraction", "blacklist.json")) lines_arr, mask = generate_training_data(collection, args.dataset) article_ids = np.array(list(set(lines_arr[:, 1])), dtype=np.int32) proposal_sets = get_proposal_sets(collection, article_ids, args.seed) report = [] total = sum(len(topfields) for topfields, _ in proposal_sets) seen = 0 t0 = time.time() data_source = generate_truth_tables(collection, lines_arr, proposal_sets, args.simultaneous_fields) for topfields, relation_name, truth_tables, qids, id2pos in prefetch_generator(data_source): # for each of these properties and given relation # construct the truth table for each item and discover # their 'learnability': seen += len(topfields) field_auc_scores = learnability( collection, lines_arr, mask, qids=qids, truth_tables=truth_tables, id2pos=id2pos, batch_size=args.batch_size, epochs=args.max_epochs, input_size=args.input_size, window_size=args.window_size, max_vocab_size=args.max_vocab_size, verbose=True) for qid, auc, average_precision_score, correct, pos, neg in field_auc_scores: report.append( { "qid": collection.ids[qid], "auc": auc, "average_precision_score": average_precision_score, "correct": correct, "relation": relation_name, "positive": pos, "negative": neg } ) with open(args.out, "wt") as fout: json.dump(report, fout) t1 = time.time() speed = seen / (t1 - t0) print("AUC obtained for %d / %d items (%.3f items/s)" % (seen, total, speed), flush=True) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="19"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np import string from dataset import TSVDataset, H5Dataset, CombinedDataset from generator import prefetch_generator def word_dropout(inputs, rng, keep_prob): inputs_ndim = inputs.ndim mask_shape = [len(inputs)] + [1] * (inputs_ndim - 1) return ( inputs * ( rng.random_sample(size=mask_shape) < keep_prob ) ).astype(inputs.dtype) def extract_feat(feat): if feat["type"] == "word": return lambda x: x elif feat["type"] == "suffix": length = feat["length"] return lambda x: x[-length:] elif feat["type"] == "prefix": length = feat["length"] return lambda x: x[:length] elif feat["type"] == "digit": return lambda x: x.isdigit() elif feat["type"] == "punctuation_count": return lambda x: sum(c in string.punctuation for c in x) elif feat["type"] == "uppercase": return lambda x: len(x) > 0 and x[0].isupper() elif feat["type"] == "character-conv": max_size = feat["max_word_length"] def extract(x): x_bytes = x.encode("utf-8") if len(x_bytes) > max_size: return np.concatenate( [ [255], list(x_bytes[:max_size]), [256] ] ) else: return np.concatenate( [ [255], list(x_bytes), [256], -np.ones(max_size - len(x_bytes), dtype=np.int32), ] ) return extract else: raise ValueError("unknown feature %r." % (feat,)) def extract_word_keep_prob(feat): return feat.get("word_keep_prob", 0.85) def extract_case_keep_prob(feat): return feat.get("case_keep_prob", 0.95) def extract_s_keep_prob(feat): return feat.get("s_keep_prob", 0.95) def apply_case_s_keep_prob(feat, rng, keep_case, keep_s): if len(feat) == 0: return feat if keep_case < 1 and feat[0].isupper() and rng.random_sample() >= keep_case: feat = feat.lower() if keep_s < 1 and feat.endswith("s") and rng.random_sample() >= keep_s: feat = feat[:-1] return feat def requires_character_convolution(feat): return feat["type"] in {"character-conv"} def requires_vocab(feat): return feat["type"] in {"word", "suffix", "prefix"} def feature_npdtype(feat): if requires_vocab(feat): return np.int32 elif feat["type"] in {"digit", "punctuation_count", "uppercase"}: return np.float32 elif requires_character_convolution(feat): return np.int32 else: raise ValueError("unknown feature %r." % (feat,)) def get_vocabs(dataset, max_vocabs, extra_words=None): index2words = [[] for i in range(len(max_vocabs))] occurrences = [{} for i in range(len(max_vocabs))] for els in dataset: for el, index2word, occurrence in zip(els, index2words, occurrences): if el not in occurrence: index2word.append(el) occurrence[el] = 1 else: occurrence[el] += 1 index2words = [ sorted(index2word, key=lambda x: occurrence[x], reverse=True) for index2word, occurrence in zip(index2words, occurrences) ] index2words = [ index2word[:max_vocab] if max_vocab > 0 else index2word for index2word, max_vocab in zip(index2words, max_vocabs) ] if extra_words is not None: index2words = [ extra_words + index2word for index2word in index2words ] return index2words def get_feature_vocabs(features, dataset, extra_words=None): out, feats_needing_vocab, feats_with_vocabs, vocabs = [], [], [], [] if hasattr(dataset, "set_ignore_y"): dataset.set_ignore_y(True) try: for feat in features: if requires_vocab(feat): if feat.get("path") is not None: with open(feat["path"], "rt") as fin: index2word = fin.read().splitlines() if feat.get("max_vocab", -1) > 0: index2word = index2word[:feat["max_vocab"]] if extra_words is not None: index2word = extra_words + index2word feats_with_vocabs.append(index2word) else: feats_needing_vocab.append(feat) if len(feats_needing_vocab) > 0: extractors = tuple( [extract_feat(feat) for feat in feats_needing_vocab] ) vocabs = get_vocabs( ((extractor(w) for extractor in extractors) for x, _ in dataset for w in x), max_vocabs=[feat.get("max_vocab", -1) for feat in feats_needing_vocab], extra_words=extra_words ) vocab_feature_idx = 0 preexisting_vocab_feature_idx = 0 for feat in features: if requires_vocab(feat): if feat.get("path") is not None: out.append(feats_with_vocabs[preexisting_vocab_feature_idx]) preexisting_vocab_feature_idx += 1 else: out.append(vocabs[vocab_feature_idx]) vocab_feature_idx+=1 else: out.append(None) finally: if hasattr(dataset, "set_ignore_y"): dataset.set_ignore_y(False) return out def pad_arrays_into_array(arrays, padding): out_ndim = arrays[0].ndim + 1 out_shape = [0] * out_ndim out_shape[0] = len(arrays) for arr in arrays: for dim_idx in range(arr.ndim): out_shape[1 + dim_idx] = max(out_shape[1 + dim_idx], arr.shape[dim_idx]) out = np.empty(out_shape, dtype=arrays[0].dtype) out.fill(padding) for arr_idx, array in enumerate(arrays): arr_slice = [arr_idx] for dim_idx in range(arr.ndim): arr_slice.append(slice(0, array.shape[dim_idx])) arr_slice = tuple(arr_slice) out[arr_slice] = array return out def build_objective_mask(label_sequence, objective_idx, objective_type): if objective_type == 'crf': if len(label_sequence) == 0 or label_sequence[0][objective_idx] is None: return np.array(False, dtype=np.bool) else: return np.array(True, dtype=np.bool) elif objective_type == 'softmax': return np.array( [w[objective_idx] is not None for w in label_sequence], dtype=np.bool ) else: raise ValueError( "unknown objective type %r." % (objective_type,) ) def allocate_shrunk_batches(max_length, batch_size, lengths): typical_indices = max_length * batch_size i = 0 ranges = [] while i < len(lengths): j = i + 1 current_batch_size = 1 longest_ex = lengths[j - 1] while j < len(lengths) and j - i < batch_size: # can grow? new_batch_size = current_batch_size + 1 new_j = j + 1 if max(longest_ex, lengths[new_j - 1]) * new_batch_size < typical_indices: j = new_j longest_ex = max(longest_ex, lengths[new_j - 1]) current_batch_size = new_batch_size else: break ranges.append((i, j)) i = j return ranges def convert_label_to_index(label, label2index): if label is None: return 0 if isinstance(label, str): return label2index[label] return label class Batchifier(object): def __init__(self, rng, feature_word2index, objective_types, label2index, fused, sequence_lengths, labels, labels_mask, input_placeholders, features, dataset, batch_size, train, autoresize=True, max_length=100): assert(batch_size > 0), ( "batch size must be strictly positive (got %r)." % (batch_size,) ) # dictionaries, strings defined by model: self.objective_types = objective_types self.label2index = label2index self.feature_word2index = feature_word2index self.rng = rng self.fused = fused # tf placeholders: self.sequence_lengths = sequence_lengths self.labels = labels self.labels_mask = labels_mask self.input_placeholders = input_placeholders self.dataset = dataset self.batch_size = batch_size self.train = train self.dataset_is_lazy = isinstance(dataset, (TSVDataset, H5Dataset, CombinedDataset)) self.autoresize = autoresize self.max_length = max_length indices = np.arange(len(dataset)) if train: if self.dataset_is_lazy: dataset.set_rng(rng) dataset.set_randomize(True) elif isinstance(dataset, list): rng.shuffle(indices) self.batch_indices = [] if self.autoresize and not self.dataset_is_lazy: ranges = allocate_shrunk_batches( max_length=self.max_length, batch_size=self.batch_size, lengths=[len(dataset[indices[i]][0]) for i in range(len(indices))] ) for i, j in ranges: self.batch_indices.append(indices[i:j]) else: for i in range(0, len(indices), self.batch_size): self.batch_indices.append(indices[i:i + self.batch_size]) self.extractors = [ (extract_feat(feat), requires_vocab(feat), feature_npdtype(feat), extract_word_keep_prob(feat), extract_case_keep_prob(feat), extract_s_keep_prob(feat)) for feat in features ] def generate_batch(self, examples): X = [[] for i in range(len(self.extractors))] Y = [] Y_mask = [] for ex, label in examples: for idx, (extractor, uses_vocab, dtype, word_keep_prob, case_keep_prob, s_keep_prob) in enumerate(self.extractors): if self.train and (case_keep_prob < 1 or s_keep_prob < 1): ex = [apply_case_s_keep_prob(w, self.rng, case_keep_prob, s_keep_prob) for w in ex] if uses_vocab: word_feats = np.array( [self.feature_word2index[idx].get(extractor(w), 0) for w in ex], dtype=dtype ) else: word_feats = np.array([extractor(w) for w in ex], dtype=dtype) if self.train and word_keep_prob < 1: word_feats = word_dropout( word_feats, self.rng, word_keep_prob ) X[idx].append(word_feats) Y.append( tuple( np.array([convert_label_to_index(w[objective_idx], label2index) for w in label], dtype=np.int32) for objective_idx, label2index in enumerate(self.label2index) ) ) Y_mask.append( tuple( build_objective_mask(label, objective_idx, objective_type) for objective_idx, objective_type in enumerate(self.objective_types) ) ) sequence_lengths = np.array([len(x) for x in X[0]], dtype=np.int32) X = [pad_arrays_into_array(x, -1) for x in X] Y = [ pad_arrays_into_array([row[objective_idx] for row in Y], 0) for objective_idx in range(len(self.objective_types)) ] Y_mask = [ pad_arrays_into_array([row[objective_idx] for row in Y_mask], 0.0) for objective_idx in range(len(self.objective_types)) ] feed_dict = { self.sequence_lengths: sequence_lengths } if self.fused: feed_dict[self.labels[0]] = np.stack([y.T for y in Y], axis=-1) feed_dict[self.labels_mask[0]] = np.stack([y.T for y in Y_mask], axis=-1) else: for y, placeholder in zip(Y, self.labels): feed_dict[placeholder] = y.T for y, placeholder in zip(Y_mask, self.labels_mask): feed_dict[placeholder] = y.T for idx, x in enumerate(X): feed_dict[self.input_placeholders[idx]] = x.swapaxes(0, 1) return feed_dict def as_list(self): return list(self.iter_batches()) def iter_batches(self, pbar=None): gen = range(len(self.batch_indices)) if pbar is not None: pbar.max_value = len(self.batch_indices) pbar.value = 0 gen = pbar(gen) if self.autoresize and self.dataset_is_lazy: for idx in gen: examples = [self.dataset[ex] for ex in self.batch_indices[idx]] ranges = allocate_shrunk_batches( max_length=self.max_length, batch_size=self.batch_size, lengths=[len(ex[0]) for ex in examples] ) for i, j in ranges: yield self.generate_batch(examples[i:j]) else: for idx in gen: yield self.generate_batch( [self.dataset[ex] for ex in self.batch_indices[idx]] ) def allocate_shrunk_batches(max_length, batch_size, lengths): typical_indices = max_length * batch_size i = 0 ranges = [] while i < len(lengths): j = i + 1 current_batch_size = 1 longest_ex = lengths[j - 1] while j < len(lengths) and j - i < batch_size: # can grow? new_batch_size = current_batch_size + 1 new_j = j + 1 if max(longest_ex, lengths[new_j - 1]) * new_batch_size < typical_indices: j = new_j longest_ex = max(longest_ex, lengths[new_j - 1]) current_batch_size = new_batch_size else: break ranges.append((i, j)) i = j return ranges def batch_worker(rng, features, feature_word2index, objective_types, label2index, fused, sequence_lengths, labels, labels_mask, input_placeholders, autoresize, train, batch_size, max_length, dataset, pbar, batch_queue, death_event): batchifier = Batchifier( rng=rng, features=features, feature_word2index=feature_word2index, objective_types=objective_types, label2index=label2index, fused=fused, sequence_lengths=sequence_lengths, labels=labels, labels_mask=labels_mask, input_placeholders=input_placeholders, autoresize=autoresize, train=train, batch_size=batch_size, max_length=max_length, dataset=dataset ) for batch in batchifier.iter_batches(pbar=pbar): if death_event.is_set(): break batch_queue.put(batch) if not death_event.is_set(): batch_queue.put(None) def range_size(start, size): return [i for i in range(start, start + size)] class ProcessHolder(object): def __init__(self, process, death_event, batch_queue): self.process = process self.batch_queue = batch_queue self.death_event = death_event def close(self): self.death_event.set() try: self.batch_queue.close() while True: self.batch_queue.get_nowait() except Exception as e: pass self.process.terminate() self.process.join() def __del__(self): self.close() def iter_batches_single_threaded(model, dataset, batch_size, train, autoresize=True, max_length=100, pbar=None): tensorflow_placeholders = [model.sequence_lengths] + model.labels + model.labels_mask + model.input_placeholders labels_start = 1 labels_mask_start = labels_start + len(model.labels) placeholder_start = labels_mask_start + len(model.labels_mask) batchifier = Batchifier( rng=model.rng, features=model.features, feature_word2index=model.feature_word2index, objective_types=[obj["type"] for obj in model.objectives], label2index=model.label2index, fused=model.fused, sequence_lengths=0, labels=range_size(labels_start, len(model.labels)), labels_mask=range_size(labels_mask_start, len(model.labels_mask)), input_placeholders=range_size(placeholder_start, len(model.input_placeholders)), autoresize=autoresize, train=train, batch_size=batch_size, max_length=max_length, dataset=dataset ) for batch in prefetch_generator(batchifier.iter_batches(pbar=pbar), to_fetch=100): feed_dict = {} for idx, key in enumerate(tensorflow_placeholders): feed_dict[key] = batch[idx] yield feed_dict def iter_batches(model, dataset, batch_size, train, autoresize=True, max_length=100, pbar=None): import multiprocessing batch_queue = multiprocessing.Queue(maxsize=10) tensorflow_placeholders = [model.sequence_lengths] + model.labels + model.labels_mask + model.input_placeholders labels_start = 1 labels_mask_start = labels_start + len(model.labels) placeholder_start = labels_mask_start + len(model.labels_mask) death_event = multiprocessing.Event() batch_process = ProcessHolder(multiprocessing.Process( target=batch_worker, daemon=True, args=( model.rng, model.features, model.feature_word2index, [obj["type"] for obj in model.objectives], model.label2index, model.fused, 0, range_size(labels_start, len(model.labels)), range_size(labels_mask_start, len(model.labels_mask)), range_size(placeholder_start, len(model.input_placeholders)), autoresize, train, batch_size, max_length, dataset, pbar, batch_queue, death_event ) ), death_event, batch_queue) batch_process.process.name = "iter_batches" batch_process.process.start() while True: batch = batch_queue.get() if batch is None: break else: feed_dict = {} for idx, key in enumerate(tensorflow_placeholders): feed_dict[key] = batch[idx] yield feed_dict del batch </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="20"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import distutils.ccompiler import distutils.sysconfig import re import numpy as np import sys import subprocess from setuptools import setup, find_packages from os.path import join, dirname, realpath, relpath, splitext, exists, getmtime, relpath, lexists, islink from os import walk, sep, remove, listdir, stat, symlink from Cython.Distutils.extension import Extension from Cython.Distutils import build_ext from distutils.core import setup from distutils.command import build as build_module, clean as clean_module from distutils.spawn import find_executable SCRIPT_DIR = dirname(realpath(__file__)) WIKIDATA_LINKER_SOURCE_DIR = join(SCRIPT_DIR, "src") WIKIDATA_LINKER_MODULE_NAME = "wikidata_linker_utils" WIKIDATA_LINKER_INTERNAL_MODULE_NAME = WIKIDATA_LINKER_MODULE_NAME version_file = join(SCRIPT_DIR, "VERSION") if exists(version_file): with open(version_file) as f: VERSION = f.read().strip() else: VERSION = "1.0.0" def path_to_module_name(path): BASE_DIRS = ["python", "cython"] relative_path = relpath(path, join(WIKIDATA_LINKER_SOURCE_DIR)) path_no_ext, _ = splitext(relative_path) for base_dir in BASE_DIRS: if path_no_ext.startswith(base_dir): return path_no_ext.lstrip(base_dir + sep).replace(sep, '.') raise Exception("Cannot convert path %r to module name" % (relative_path,)) def find_files_by_suffix(path, suffix): """Recursively find files with specific suffix in a directory""" for relative_path, dirs, files in walk(path): for fname in files: if fname.endswith(suffix): yield join(path, relative_path, fname) # Make a `cleanall` rule to get rid of intermediate and library files class clean(clean_module.clean): def run(self): print("Cleaning up cython files...") # Just in case the build directory was created by accident, # note that shell=True should be OK here because the command is constant. for place in ["build", join("src", "cython", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, "*.c"), join("src", "cython", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, "*.cpp"), join("src", "cython", WIKIDATA_LINKER_INTERNAL_MODULE_NAME, "*.so")]: subprocess.Popen("rm -rf %s" % (place,), shell=True, executable="/bin/bash", cwd=SCRIPT_DIR) compiler = distutils.ccompiler.new_compiler() distutils.sysconfig.customize_compiler(compiler) BLACKLISTED_COMPILER_SO = ['-Wp,-D_FORTIFY_SOURCE=2'] build_ext.compiler = compiler ext_modules = [] for pyx_file in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, "cython"), ".pyx"): # pxd files are like header files for pyx files # and they can also have relevant includes. relevant_files = [pyx_file] pxd_file = pyx_file[:-3] + "pxd" if exists(pxd_file): relevant_files.append(pxd_file) ext_modules.append(Extension( name=path_to_module_name(pyx_file), sources=[pyx_file], library_dirs=[], language='c++', extra_compile_args=['-std=c++11', '-Wno-unused-function', '-Wno-sign-compare', '-Wno-unused-local-typedef', '-Wno-undefined-bool-conversion', '-O3', '-Wno-reorder'], extra_link_args=[], libraries=[], extra_objects=[], include_dirs=[np.get_include()] )) ################################################################################ ## FIND PYTHON PACKAGES ## ################################################################################ py_packages = [] for file in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, "python"), ".py"): module_path = dirname(file) py_packages.append(path_to_module_name(module_path)) ################################################################################ ## BUILD COMMAND WITH EXTRA WORK WHEN DONE ## ################################################################################ def symlink_built_package(module_name, dest_directory): build_dir_contents = listdir(join(SCRIPT_DIR, "build")) lib_dot_fnames = [] for name in build_dir_contents: if name.startswith("lib."): lib_dot_fnames.append(join(SCRIPT_DIR, "build", name)) # get latest lib. file created and symlink it to the project # directory for easier testing lib_dot_fnames = sorted( lib_dot_fnames, key=lambda name: stat(name).st_mtime, reverse=True ) if len(lib_dot_fnames) == 0: return most_recent_name = join(lib_dot_fnames[0], module_name) symlink_name = join(dest_directory, module_name) if lexists(symlink_name): if islink(symlink_name): remove(symlink_name) else: print( ("non symlink file with name %r found in project directory." " Please remove to create a symlink on build") % ( symlink_name, ) ) return symlink(most_recent_name, symlink_name, target_is_directory=True) print("Created symlink pointing to %r from %r" % ( most_recent_name, join(SCRIPT_DIR, module_name) )) class build_with_posthooks(build_module.build): def run(self): build_module.build.run(self) # Make a `cleanall` rule to get rid of intermediate and library files class clean_with_posthooks(clean_module.clean): def run(self): clean_module.clean.run(self) # remove cython generated sources for file_path in find_files_by_suffix(join(WIKIDATA_LINKER_SOURCE_DIR, 'cython'), '.cpp'): remove(file_path) setup(name=WIKIDATA_LINKER_MODULE_NAME, version=VERSION, cmdclass={"build": build_with_posthooks, 'build_ext': build_ext, 'clean': clean_with_posthooks}, install_requires=["numpy"], extras_require={"dev": []}, author="Jonathan Raiman", language='c++', author_email="raiman@openai.com", ext_modules=ext_modules, description="Generate data processing utilities for running DeepType.", package_dir={'': join(WIKIDATA_LINKER_SOURCE_DIR, 'python')}, packages=py_packages) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="21"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import re STOP_WORDS = {'a', 'an', 'in', 'the', 'of', 'it', 'from', 'with', 'this', 'that', 'they', 'he', 'she', 'some', 'where', 'what', 'since', 'his', 'her', 'their', 'le', 'la', 'les', 'il', 'elle', 'ce', 'Γ§a', 'ci', 'ceux', 'ceci', 'cela', 'celle', 'se', 'cet', 'cette', 'dans', 'avec', 'con', 'sans', 'pendant', 'durant', 'avant', 'aprΓ¨s', 'puis', 'el', 'lo', 'la', 'ese', 'esto', 'que', 'qui', 'quoi', 'dont', 'ou', 'oΓΉ', 'si', 'este', 'esta', 'cual', 'eso', 'ella', 'depuis', 'y', 'a', 'Γ ', 'su', 'de', "des", 'du', 'los', 'las', 'un', 'une', 'una', 'uno', 'para', 'asi', 'later', 'into', 'dentro', 'dedans', 'depuis', 'despuΓ©s', 'desde', 'al', 'et', 'por', 'at', 'for', 'when', 'why', 'how', 'with', 'whether', 'if', 'thus', 'then', 'and', 'but', 'on', 'during', 'while', 'as', 'within', 'was', 'is', 'est', 'au', 'fait', 'font', 'va', 'vont', 'sur', 'en', 'pour', 'del', 'cuando', 'cuan', 'do', 'does', 'until', 'sinon', 'encore', 'to', 'by', 'be', 'which', 'have', 'not', 'were', 'has', 'also', 'its', 'isbn', 'pp.', "&amp;", "p.", 'ces', 'o'} def starts_with_apostrophe_letter(word): return ( word.startswith("l'") or word.startswith("L'") or word.startswith("d'") or word.startswith("D'") or word.startswith("j'") or word.startswith("J'") or word.startswith("t'") or word.startswith("T'") ) PUNCTUATION = {"'", ",", "-", "!", ".", "?", ":", "’"} def clean_up_trie_source(source, lowercase=True): source = source.rstrip().strip('()[]') if len(source) > 0 and (source[-1] in PUNCTUATION or source[0] in PUNCTUATION): return "" # remove l' if starts_with_apostrophe_letter(source): source = source[2:] if source.endswith("'s"): source = source[:-2] tokens = source.split() while len(tokens) > 0 and tokens[0].lower() in STOP_WORDS: tokens = tokens[1:] while len(tokens) > 0 and tokens[-1].lower() in STOP_WORDS: tokens = tokens[:-1] joined_tokens = " ".join(tokens) if lowercase: return joined_tokens.lower() return joined_tokens ORDINAL_ANCHOR = re.compile("^\d+(st|th|nd|rd|er|eme|Γ¨me|Γ¨re)$") NUMBER_PUNCTUATION = re.compile("^\d+([\/\-,\.:;%]\d*)+$") def anchor_is_ordinal(anchor): return ORDINAL_ANCHOR.match(anchor) is not None def anchor_is_numbers_slashes(anchor): return NUMBER_PUNCTUATION.match(anchor) is not None def acceptable_anchor(anchor, anchor_trie, blacklist=None): return (len(anchor) > 0 and not anchor.isdigit() and not anchor_is_ordinal(anchor) and not anchor_is_numbers_slashes(anchor) and anchor in anchor_trie and (blacklist is None or anchor not in blacklist)) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="22"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import sys import importlib.util import traceback from os.path import basename, splitext def reload_module(path): module_name, extension = splitext(basename(path)) if extension != ".py": raise ValueError("path must have a .py extension (got %r)" % (path,)) spec = importlib.util.spec_from_file_location(module_name, path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module def enter_or_quit(): wait = input("press any key to continue, q to quit.") received = wait.rstrip() if received == 'q': print("Bye.") sys.exit(0) else: return received ALLOWED_RUNTIME_ERRORS = ( TypeError, ValueError, IndexError, NameError, KeyError, AssertionError, AttributeError, ImportError, KeyboardInterrupt ) ALLOWED_IMPORT_ERRORS = ( SyntaxError, NameError, ImportError ) def reload_run_retry(module_path, callback): while True: try: module = reload_module(module_path) except ALLOWED_IMPORT_ERRORS as e: print("issue reading %r, please fix." % (module_path,)) print(str(e)) traceback.print_exc(file=sys.stdout) enter_or_quit() continue try: result = callback(module) except ALLOWED_RUNTIME_ERRORS as e: print("issue running %r, please fix." % (module_path,)) print(str(e)) traceback.print_exc(file=sys.stdout) enter_or_quit() continue break return result </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="23"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import subprocess def execute_bash(command): """ Executes bash command, prints output and throws an exception on failure. """ process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) for line in process.stdout: print(line, end='', flush=True) process.wait() assert process.returncode == 0 def get_bash_result(command): """ Executes bash command, returns output and throws an exception on failure. """ process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) out = [line for line in process.stdout] process.wait() assert process.returncode == 0 return out def count_lines(path): return int( get_bash_result('wc -l %s' % (path,))[0].strip().split(' ')[0] ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="24"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "></span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="25"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import msgpack import bz2 def iterate_bytes_jsons(fin, batch_size=1000): current = [] for l in fin: if l.startswith(b'{'): current.append(l) if len(current) >= batch_size: docs = json.loads('[' + b"".join(current).decode('utf-8').rstrip(',\n') + ']') for doc in docs: yield doc current = [] if len(current) > 0: docs = json.loads('[' + b"".join(current).decode('utf-8').rstrip(',\n') + ']') for doc in docs: yield doc current = [] def iterate_text_jsons(fin, batch_size=1000): current = [] for l in fin: if l.startswith('{'): current.append(l) if len(current) >= batch_size: docs = json.loads('[' + "".join(current).rstrip(',\n') + ']') for doc in docs: yield doc current = [] if len(current) > 0: docs = json.loads('[' + "".join(current).rstrip(',\n') + ']') for doc in docs: yield doc current = [] def iterate_message_packs(fin): unpacker = msgpack.Unpacker(fin, encoding='utf-8', use_list=False) for obj in unpacker: yield obj def open_wikidata_file(path, batch_size): if path.endswith('bz2'): with bz2.open(path, 'rb') as fin: for obj in iterate_bytes_jsons(fin, batch_size): yield obj elif path.endswith('json'): with open(path, 'rt') as fin: for obj in iterate_text_jsons(fin, batch_size): yield obj elif path.endswith('mp'): with open(path, 'rb') as fin: for obj in iterate_message_packs(fin): yield obj else: raise ValueError( "unknown extension for wikidata. " "Expecting bz2, json, or mp (msgpack)." ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="26"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from numpy import logical_and, logical_not, logical_or def logical_negate(truth, falses): out = truth for value in falses: out = logical_and(out, logical_not(value)) return out def logical_ors(values): assert(len(values) > 0), "values cannot be empty." out = values[0] for val in values[1:]: out = logical_or(out, val) return out def logical_ands(values): assert(len(values) > 0), "values cannot be empty." out = values[0] for val in values[1:]: out = logical_and(out, val) return out </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="27"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import warnings from os.path import join, exists from functools import lru_cache import marisa_trie import requests import numpy as np from .successor_mask import ( successor_mask, invert_relation, offset_values_mask ) from .offset_array import OffsetArray, SparseAttribute from .wikidata_ids import ( load_wikidata_ids, load_names, property_names, temporal_property_names ) from . import wikidata_properties as wprop class CachedRelation(object): def __init__(self, use, state): self.use = use self.state = state @lru_cache(maxsize=None) def get_name(wikidata_id): res = requests.get("https://www.wikidata.org/wiki/" + wikidata_id) el = res.text.find('<title>') el_end = res.text.find('</title>') return res.text[el + len('<title>'):el_end] class TypeCollection(object): def __init__(self, path, num_names_to_load=100000, language_path=None, prefix="enwiki", verbose=True, cache=True): self.cache = cache self.path = path self.verbose = verbose self.wikidata_names2prop_names = property_names( join(path, 'wikidata_property_names.json') ) self.wikidata_names2temporal_prop_names = temporal_property_names( join(path, 'wikidata_time_property_names.json') ) # add wikipedia english category links: self.wikidata_names2prop_names[wprop.CATEGORY_LINK] = "category_link" self.wikidata_names2prop_names[wprop.FIXED_POINTS] = "fixed_points" self.known_names = load_names( join(path, "wikidata_wikititle2wikidata.tsv"), num_names_to_load, prefix=prefix ) self.num_names_to_load = num_names_to_load self.ids, self.name2index = load_wikidata_ids(path, verbose=self.verbose) self._relations = {} self._attributes = {} self._inverted_relations = {} self._article2id = None self._web_get_name = True self._satisfy_cache = {} # empty blacklist: self.set_bad_node( set(), set() ) if language_path is not None: article_links = np.load(join(language_path, "trie_index2indices_values.npy")) article_links_counts = np.load(join(language_path, "trie_index2indices_counts.npy")) self._weighted_articles = np.bincount(article_links, weights=article_links_counts).astype(np.int32) if len(self._weighted_articles) != len(self.ids): self._weighted_articles = np.concatenate( [ self._weighted_articles, np.zeros(len(self.ids) - len(self._weighted_articles), dtype=np.int32) ] ) else: self._weighted_articles = None def attribute(self, name): if name not in self._attributes: is_temporal = name in self.wikidata_names2temporal_prop_names assert(is_temporal), "load relations using `relation` method." if self.verbose: print('load %r (%r)' % (name, self.wikidata_names2prop_names[name],)) self._attributes[name] = SparseAttribute.load( join(self.path, "wikidata_%s" % (name,)) ) return self._attributes[name] @property def article2id(self): if self._article2id is None: if self.verbose: print('load %r' % ("article2id",)) self._article2id = marisa_trie.RecordTrie('i').load( join(self.path, "wikititle2wikidata.marisa") ) if self.verbose: print("done.") return self._article2id def relation(self, name): if name.endswith(".inv"): return self.get_inverted_relation(name[:-4]) if name not in self._relations: is_temporal = name in self.wikidata_names2temporal_prop_names assert(not is_temporal), "load attributes using `attribute` method." if self.verbose: print('load %r (%r)' % (name, self.wikidata_names2prop_names[name],)) self._relations[name] = OffsetArray.load( join(self.path, "wikidata_%s" % (name,)), compress=True ) return self._relations[name] def set_bad_node(self, bad_node, bad_node_pair): changed = False if hasattr(self, "_bad_node") and self._bad_node != bad_node: changed = True if hasattr(self, "_bad_node_pair") and self._bad_node_pair != bad_node_pair: changed = True self._bad_node = bad_node self._bad_node_pair = bad_node_pair self._bad_node_array = np.array(list(bad_node), dtype=np.int32) bad_node_pair_right = {} for node_left, node_right in self._bad_node_pair: if node_right not in bad_node_pair_right: bad_node_pair_right[node_right] = [node_left] else: bad_node_pair_right[node_right].append(node_left) bad_node_pair_right = { node_right: np.array(node_lefts, dtype=np.int32) for node_right, node_lefts in bad_node_pair_right.items() } self._bad_node_pair_right = bad_node_pair_right if changed: self.reset_cache() def get_name(self, identifier): if identifier >= self.num_names_to_load and self._web_get_name: try: return get_name(self.ids[identifier]) + " (" + self.ids[identifier] + ")" except requests.exceptions.ConnectionError: self._web_get_name = False name = self.known_names.get(identifier, None) if name is None: return self.ids[identifier] else: return name + " (" + self.ids[identifier] + ")" def describe_connection(self, source, destination, allowed_edges): if isinstance(source, str): if source in self.name2index: source_index = self.name2index[source] else: source_index = self.article2id["enwiki/" + source][0][0] else: source_index = source if isinstance(destination, str): if destination in self.name2index: dest_index = self.name2index[destination] else: dest_index = self.article2id["enwiki/" + destination][0][0] else: dest_index = destination found_path = self.is_member_with_path( source_index, allowed_edges, [dest_index] ) if found_path is not None: _, path = found_path for el in path: if isinstance(el, str): print(" " + el) else: print(self.get_name(el), el) else: print('%r and %r are not connected' % (source, destination)) def is_member_with_path(self, root, fields, member_fields, max_steps=float("inf"), steps=0, visited=None, path=None): if steps >= max_steps: return None if visited is None: visited = set() if path is None: path = [root] else: path = path + [root] for field in fields: field_parents = self.relation(field)[root] for el in field_parents: if el in member_fields and el not in self._bad_node and (root, el) not in self._bad_node_pair: return True, path + [field, el] for el in field_parents: if el in visited or el in self._bad_node or (root, el) in self._bad_node_pair: continue visited.add(el) res = self.is_member_with_path(el, fields, member_fields, max_steps, steps=steps + 1, visited=visited, path=path + [field]) if res is not None: return res return None def get_inverted_relation(self, relation_name): if relation_name.endswith(".inv"): return self.relation(relation_name[:-4]) if relation_name not in self._inverted_relations: new_values_path = join(self.path, "wikidata_inverted_%s_values.npy" % (relation_name,)) new_offsets_path = join(self.path, "wikidata_inverted_%s_offsets.npy" % (relation_name,)) if not exists(new_values_path): relation = self.relation(relation_name) if self.verbose: print("inverting relation %r (%r)" % (relation_name, self.wikidata_names2prop_names[relation_name],)) new_values, new_offsets = invert_relation( relation.values, relation.offsets ) np.save(new_values_path, new_values) np.save(new_offsets_path, new_offsets) if self.verbose: print("load inverted %r (%r)" % (relation_name, self.wikidata_names2prop_names[relation_name])) self._inverted_relations[relation_name] = OffsetArray.load( join(self.path, "wikidata_inverted_%s" % (relation_name,)), compress=True ) return self._inverted_relations[relation_name] def successor_mask(self, relation, active_nodes): if isinstance(active_nodes, list): active_nodes = np.array(active_nodes, dtype=np.int32) if active_nodes.dtype != np.int32: active_nodes = active_nodes.astype(np.int32) return successor_mask( relation.values, relation.offsets, self._bad_node_pair_right, active_nodes ) def remove_blacklist(self, state): state[self._bad_node_array] = False def satisfy(self, relation_names, active_nodes, max_steps=None): assert(len(relation_names) > 0), ( "relation_names cannot be empty." ) if self.cache and isinstance(active_nodes, (list, tuple)) and len(active_nodes) < 100: satisfy_key = (tuple(sorted(relation_names)), tuple(sorted(active_nodes)), max_steps) if satisfy_key in self._satisfy_cache: cached = self._satisfy_cache[satisfy_key] cached.use += 1 return cached.state else: satisfy_key = None inverted_relations = [self.get_inverted_relation(relation_name) for relation_name in relation_names] state = np.zeros(inverted_relations[0].size(), dtype=np.bool) state[active_nodes] = True step = 0 while len(active_nodes) > 0: succ = None for relation in inverted_relations: if succ is None: succ = self.successor_mask(relation, active_nodes) else: succ = succ | self.successor_mask(relation, active_nodes) new_state = state | succ self.remove_blacklist(new_state) (active_nodes,) = np.where(state != new_state) active_nodes = active_nodes.astype(np.int32) state = new_state step += 1 if max_steps is not None and step >= max_steps: break if satisfy_key is not None: self._satisfy_cache[satisfy_key] = CachedRelation(1, state) return state def reset_cache(self): cache_keys = list(self._satisfy_cache.keys()) for key in cache_keys: if self._satisfy_cache[key].use == 0: del self._satisfy_cache[key] else: self._satisfy_cache[key].use = 0 def print_top_class_members(self, truth_table, name="Other", topn=20): if self._weighted_articles is not None: print("%s category, highly linked articles in wikipedia:" % (name,)) sort_weight = self._weighted_articles * truth_table linked_articles = int((sort_weight > 0).sum()) print("%s category, %d articles linked in wikipedia:" % (name, linked_articles)) top_articles = np.argsort(sort_weight)[::-1] for art in top_articles[:topn]: if not truth_table[art]: break print("%r (%d)" % (self.get_name(art), self._weighted_articles[art])) print("") else: print("%s category, sample of members:" % (name,)) top_articles = np.where(truth_table)[0] for art in top_articles[:topn]: print("%r" % (self.get_name(art),)) print("") def class_report(self, relation_names, truth_table, name="Other", topn=20): active_nodes = np.where(truth_table)[0].astype(np.int32) num_active_nodes = len(active_nodes) print("%s category contains %d unique items." % (name, num_active_nodes,)) relations = [self.relation(relation_name) for relation_name in relation_names] for relation, relation_name in zip(relations, relation_names): mask = offset_values_mask(relation.values, relation.offsets, active_nodes) counts = np.bincount(relation.values[mask]) topfields = np.argsort(counts)[::-1] print("%s category, most common %r:" % (name, relation_name,)) for field in topfields[:topn]: if counts[field] == 0: break print("%.3f%% (%d): %r" % (100.0 * counts[field] / num_active_nodes, counts[field], self.get_name(field))) print("") is_fp = np.logical_and( np.logical_or( self.relation(wprop.FIXED_POINTS + ".inv").edges() > 0, self.relation(wprop.FIXED_POINTS).edges() > 0 ), truth_table ) self.print_top_class_members( is_fp, topn=topn, name=name + " (fixed points)" ) if self._weighted_articles is not None: self.print_top_class_members(truth_table, topn=topn, name=name) def load_blacklist(self, path): with open(path, "rt") as fin: blacklist = json.load(fin) filtered_bad_node = [] for el in blacklist["bad_node"]: if el not in self.name2index: warnings.warn("Node %r under `bad_node` is not a known wikidata id." % ( el )) continue filtered_bad_node.append(el) bad_node = set(self.name2index[el] for el in filtered_bad_node) filtered_bad_node_pair = [] for el, oel in blacklist["bad_node_pair"]: if el not in self.name2index: warnings.warn("Node %r under `bad_node_pair` is not a known wikidata id." % ( el )) continue if oel not in self.name2index: warnings.warn("Node %r under `bad_node_pair` is not a known wikidata id." % ( oel )) continue filtered_bad_node_pair.append((el, oel)) bad_node_pair = set([(self.name2index[el], self.name2index[oel]) for el, oel in filtered_bad_node_pair]) self.set_bad_node(bad_node, bad_node_pair) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="28"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">INSTANCE_OF = "P31" SUBCLASS_OF = "P279" PART_OF = "P361" OCCUPATION = "P106" FIELD_OF_WORK = "P101" FIELD_OF_THIS_OCCUPATION = "P425" MEDICAL_SPECIALITY = "P1995" GENRE = "P136" SEX_OR_GENDER = "P21" COUNTRY_OF_CITIZENSHIP = "P27" COUNTRY = "P17" CONTINENT = "P30" LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY = "P131" SPORT = "P641" STUDIES = "P2578" SERIES = "P179" USE = "P366" LOCATION = "P276" FACET_OF = "P1269" IS_A_LIST_OF = "P360" COUNTRY_OF_ORIGIN = "P495" PRODUCT_OR_MATERIAL_PRODUCED = "P1056" INDUSTRY = "P452" PARENT_TAXON = "P171" APPLIES_TO_TERRITORIAL_JURISDICTION = "P1001" POSITION_HELD = "P39" CATEGORYS_MAIN_TOPIC = "P301" PUBLICATION_DATE = "P577" DATE_OF_BIRTH = "P569" DATE_OF_DEATH = "P570" INCEPTION = "P571" DISSOLVED_OR_ABOLISHED = "P576" POINT_IN_TIME = "P585" START_TIME = "P580" END_TIME = "P582" CATEGORY_LINK = "enwiki_category_links" FIXED_POINTS = "enwiki_fixed_points" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="29"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from os.path import exists from os import stat def true_exists(fname): return exists(fname) and stat(fname).st_size > 100 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="30"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import progressbar percentage = progressbar.Percentage() counter = progressbar.Counter() bar = progressbar.Bar() adaptive_eta = progressbar.AdaptiveETA() class MessageProgressbar(progressbar.ProgressBar): def set_message(self, message): self.widgets[0] = message + " " def set_item(self, item): self.widgets[4] = " %s) " % (item,) def get_progress_bar(message, max_value=None, item="lines"): """ Construct a progressbar iterator wrapper with an ETA and percentage information. Arguments: ---------- message : str, title for the progress bar. max_value : None or int Returns: -------- ProgressBar : object that can wrap an iterator and print out duration estimates and iteration stats. """ widgets = [ message + " ", percentage, " (", counter, " %s) " % (item,), bar, adaptive_eta ] return MessageProgressbar(widgets=widgets, maxval=max_value) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="31"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from os.path import exists import numpy as np from .successor_mask import ( convert_to_offset_array, make_dense, make_sparse ) def count_non_zero(dense): return len(np.nonzero(dense[1:] - dense[:-1])[0]) + int(dense[0] != 0) def should_compress(dense): nonzeros = count_non_zero(dense) return (2 * nonzeros + 1) < 0.5 * len(dense) class OffsetArray(object): def __init__(self, values, offsets): self.values = values self.offsets = offsets def __getitem__(self, idx): end = self.offsets[idx] start = 0 if idx == 0 else self.offsets[idx - 1] return self.values[start:end] def is_empty(self, idx): end = self.offsets[idx] start = 0 if idx == 0 else self.offsets[idx - 1] return start == end def size(self): return self.offsets.shape[0] def edges(self): num_edges = np.zeros(len(self.offsets), dtype=np.int32) num_edges[0] = self.offsets[0] num_edges[1:] = self.offsets[1:] - self.offsets[:-1] return num_edges @classmethod def load(cls, path, compress=True): values = np.load(path + "_values.npy") if exists(path + "_offsets.sparse.npy"): offsets_compressed = np.load(path + "_offsets.sparse.npy") offsets = make_dense(offsets_compressed, cumsum=True) else: # legacy mode, load dense versions: offsets = np.load(path + "_offsets.npy") if compress: if should_compress(offsets): offsets_compressed = make_sparse(offsets) np.save(path + "_offsets.sparse.npy", offsets_compressed) # optionally delete the old version here return OffsetArray( values, offsets ) def convert_dict_to_offset_array(dictionary, num_values): offsets = np.zeros(num_values, dtype=np.int32) total_num_values = sum(len(v) for _, v in dictionary.items()) values = np.zeros(total_num_values, dtype=np.int32) position = 0 for key, value in sorted(dictionary.items(), key=lambda x: x[0]): values[position:position + len(value)] = value position += len(value) offsets[key] = len(value) np.cumsum(offsets, out=offsets) return values, offsets def save_record_with_offset(path, index2indices, total_size=None): if isinstance(index2indices, dict): if total_size is None: raise ValueError("cannot leave total_size None " "when using a dict.") values, offsets = convert_dict_to_offset_array(index2indices, total_size) else: values, offsets = convert_to_offset_array(index2indices) np.save(path + "_values.npy", values) if should_compress(offsets): compressed_offsets = make_sparse(offsets) np.save(path + "_offsets.sparse.npy", compressed_offsets) else: np.save(path + "_offsets.npy", offsets) def load_sparse(path): compressed = np.load(path) dense = make_dense(compressed, cumsum=False) non_zero_indices = compressed[1::2] mask = np.zeros(len(dense), dtype=np.bool) mask[non_zero_indices] = True return dense, mask class SparseAttribute(object): def __init__(self, dense, mask): self.dense = dense self.mask = mask def __lt__(self, value): return np.logical_and(self.dense < value, self.mask) def __le__(self, value): return np.logical_and(self.dense <= value, self.mask) def __gt__(self, value): return np.logical_and(self.dense > value, self.mask) def __ge__(self, value): return np.logical_and(self.dense >= value, self.mask) def __eq__(self, value): return np.logical_and(self.dense == value, self.mask) @classmethod def load(cls, path): dense, mask = load_sparse(path + "_values.sparse.npy") return SparseAttribute(dense, mask) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="32"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">LANGUAGE_CODES = ["en", "zh", "fr", "ja", "ru", "pt", "ca", "fa", "ar", "fi", "hu", "id", "es", "it", "war", "ceb", "nl", "de", "sv", "ro", "cs", "ko", "sr", "ms", "tr", "min", "eo", "eu", "kk", "da", "bg", "sk", "hy", "he", "lt", "sl", "et", "uz", "gl", "nn", "la", "vo", "simple", "el", "ce", "be", "ka", "hi", "az", "th", "ur", "oc", "mk", "ta", "mg", "new", "tt", "cy", "tl", "bs", "br", "ne", "gu", "io", "bpy", "nds", "ku", "als", "pa", "su", "kn", "bar", "ckb", "ia", "arz"] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="33"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json from collections import namedtuple from os.path import join, dirname def dict_fix_relative_paths(basepath, relative_paths): if relative_paths is None: relative_paths = [] def load(d): new_obj = d.copy() for key in relative_paths: if key in new_obj: if isinstance(new_obj[key], str): new_obj[key] = join(basepath, new_obj[key]) elif isinstance(new_obj[key], list) and len(new_obj[key]) > 0 and isinstance(new_obj[key][0], str): new_obj[key] = [join(basepath, path) for path in new_obj[key]] return new_obj return load def load_config(path, relative_paths=None, defaults=None, relative_to=None): if relative_to is None: relative_to = dirname(path) object_hook = dict_fix_relative_paths(relative_to, relative_paths) with open(path, "rt") as fin: obj = json.load( fin, object_hook=object_hook ) if defaults is not None: for key, value in defaults.items(): if key not in obj: obj[key] = value return json.loads( json.dumps(obj), object_hook=lambda d: namedtuple('X', d.keys())(*d.values()) ) def json_loads(bytes): return json.loads(bytes.decode('utf-8')) def json_serializer(x): return json.dumps( x, check_circular=False, separators=(',', ':') ).encode('utf-8') </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="34"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">from os.path import exists, join, dirname import marisa_trie import json from .file import true_exists from os import makedirs class MarisaAsDict(object): def __init__(self, marisa): self.marisa = marisa def get(self, key, fallback): value = self.marisa.get(key, None) if value is None: return fallback else: return value[0][0] def __getitem__(self, key): value = self.marisa[key] return value[0][0] def __contains__(self, key): return key in self.marisa def load_wikidata_ids(path, verbose=True): wikidata_ids_inverted_path = join(path, 'wikidata_ids_inverted.marisa') with open(join(path, "wikidata_ids.txt"), "rt") as fin: ids = fin.read().splitlines() if exists(wikidata_ids_inverted_path): if verbose: print("loading wikidata id -> index") name2index = MarisaAsDict(marisa_trie.RecordTrie('i').load(wikidata_ids_inverted_path)) if verbose: print("done") else: if verbose: print("building trie") name2index = MarisaAsDict( marisa_trie.RecordTrie('i', [(name, (k,)) for k, name in enumerate(ids)]) ) name2index.marisa.save(wikidata_ids_inverted_path) if verbose: print("done") return (ids, name2index) def load_names(path, num, prefix): names = {} errors = 0 # debug if num > 0: with open(path, "rt", encoding="UTF-8") as fin: for line in fin: try: name, number = line.rstrip('\n').split('\t') except ValueError: errors += 1 number = int(number) if number >= num: break else: if name.startswith(prefix): names[number] = name[7:] print(errors) # debug return names def sparql_query(query): import requests wikidata_url = "https://query.wikidata.org/sparql" response = requests.get( wikidata_url, params={ "format": "json", "query": query } ).json() out = {} for el in response["results"]['bindings']: label = el['propertyLabel']['value'] value = el['property']['value'] if value.startswith("http://www.wikidata.org/entity/"): value = value[len("http://www.wikidata.org/entity/"):] out[value] = label return out def saved_sparql_query(savename, query): directory = dirname(savename) makedirs(directory, exist_ok=True) if true_exists(savename): with open(savename, "rt") as fin: out = json.load(fin) return out else: out = sparql_query(query) with open(savename, "wt") as fout: json.dump(out, fout) return out def property_names(prop_save_path): """" Retrieve the mapping between wikidata properties ids (e.g. "P531") and their human-readable names (e.g. "diplomatic mission sent"). Returns: dict<str, str> : mapping from property id to property descriptor. """ return saved_sparql_query( prop_save_path, """ SELECT DISTINCT ?property ?propertyLabel WHERE { ?property a wikibase:Property . SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } """ ) def temporal_property_names(prop_save_path): """" Retrieve the mapping between wikidata properties ids (e.g. "P531") and their human-readable names (e.g. "diplomatic mission sent") only for fields that are time-based. Returns: dict<str, str> : mapping from property id to property descriptor. """ return saved_sparql_query( prop_save_path, """ SELECT DISTINCT ?property ?propertyLabel WHERE { ?property a wikibase:Property . {?property wdt:P31 wd:Q18636219} UNION {?property wdt:P31 wd:Q22661913} . SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } """ ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="35"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import re import numpy as np from os.path import join from epub_conversion import convert_wiki_to_lines from epub_conversion.wiki_decoder import almost_smart_open from .wikipedia_language_codes import LANGUAGE_CODES from .file import true_exists from .bash import execute_bash from .successor_mask import ( load_redirections, match_wikipedia_to_wikidata ) BADS = ["Wikipedia:", "WikipΓ©dia:", "File:", "Media:", "Help:", "User:"] def _lines_extractor(lines, article_name): """ Simply outputs lines """ yield (article_name, lines) def _bad_link(link): return any(link.startswith(el) for el in BADS) def iterate_articles(path): num_articles = 9999999999999 with almost_smart_open(path, "rb") as wiki: for article_name, lines in convert_wiki_to_lines( wiki, max_articles=num_articles, clear_output=True, report_every=100, parse_special_pages=True, skip_templated_lines=False, line_converter=_lines_extractor): if not _bad_link(article_name): yield (article_name, lines) def induce_wikipedia_prefix(wikiname): if wikiname in {code + "wiki" for code in LANGUAGE_CODES}: return wikiname else: raise ValueError("Could not determine prefix for wiki " "with name %r." % (wikiname,)) def convert_sql_to_lookup(props, propname): propname = b",'" + propname.encode("utf-8") + b"','" ending = b"'," starting = b"(" lookup = {} offset = 0 while True: newpos = props.find(propname, offset) if newpos == -1: break begin = props.rfind(starting, offset, newpos) end = props.find(ending, newpos + len(propname)) key = props[begin + len(starting):newpos] value = props[newpos + len(propname):end] lookup[key.decode('utf-8')] = value.decode('utf-8') offset = end return lookup def load_wikipedia_pageid_to_wikidata(data_dir): fname = join(data_dir, "enwiki-latest-page_props.sql") if not true_exists(fname): execute_bash( "wget -O - https://dumps.wikimedia.org/enwiki/" "latest/enwiki-latest-page_props.sql.gz | gunzip > %s" % (fname,) ) with open(fname, "rb") as fin: props = fin.read() return convert_sql_to_lookup(props, "wikibase_item") link_pattern = re.compile(r'\[\[([^\]\[:]*)\]\]') class WikipediaDoc(object): def __init__(self, doc): self.doc = doc def links(self, wiki_trie, redirections, prefix): current_pos = 0 for match in re.finditer(link_pattern, self.doc): match_string = match.group(1) start = match.start() end = match.end() if current_pos != start: yield self.doc[current_pos:start], None current_pos = end if "|" in match_string: link, anchor = match_string.rsplit("|", 1) link = link.strip().split("#")[0] else: anchor = match_string link = anchor.strip() if len(link) > 0: dest_index = match_wikipedia_to_wikidata( link, wiki_trie, redirections, prefix ) yield anchor, dest_index else: yield anchor, None if current_pos != len(self.doc): yield self.doc[current_pos:], None def load_wikipedia_docs(path, size): docs = [] for article_name, doc in iterate_articles(path): docs.append(WikipediaDoc(doc)) if len(docs) == size: break return docs def transition_trie_index(anchor_idx, dest_index, transitions, all_options): """ Recover the new trie index for an index that has gone stale. Use a transitions array to know how original anchors now map to new trie indices. """ option_transitions = transitions[anchor_idx] dest_index = option_transitions[option_transitions[:, 0] == dest_index, 1] if len(dest_index) == 0: dest_index = -1 else: dest_index = np.asscalar(dest_index) if dest_index != -1: if not np.any(all_options == dest_index): dest_index = -1 return dest_index __all__ = ["load_redirections", "induce_wikipedia_prefix", "load_wikipedia_docs", "WikipediaDoc", "transition_trie_index", "iterate_articles"] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="36"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Compress a jsonl version of Wikidata by throwing about descriptions and converting file to msgpack format. Usage ----- ``` python3 compress_wikidata_msgpack.py wikidata.json wikidata.msgpack ``` """ import argparse import msgpack from wikidata_linker_utils.wikidata_iterator import open_wikidata_file from wikidata_linker_utils.progressbar import get_progress_bar def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('wikidata') parser.add_argument('out') return parser.parse_args(args=args) def main(): args = parse_args() approx_max_quantity = 24642416 pbar = get_progress_bar('compress wikidata', max_value=approx_max_quantity, item='entities') pbar.start() seen = 0 with open(args.out, "wb") as fout: for doc in open_wikidata_file(args.wikidata, 1000): seen += 1 if 'descriptions' in doc: del doc['descriptions'] if 'labels' in doc: del doc['labels'] if 'aliases' in doc: del doc['aliases'] for claims in doc['claims'].values(): for claim in claims: if 'id' in claim: del claim['id'] if 'rank' in claim: del claim['rank'] if 'references' in claim: for ref in claim['references']: if 'hash' in ref: del ref['hash'] if 'qualifiers' in claim: for qualifier in claim['qualifiers'].values(): if 'hash' in qualifier: del qualifier['hash'] fout.write(msgpack.packb(doc)) if seen % 1000 == 0: if seen < approx_max_quantity: pbar.update(seen) pbar.finish() if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="37"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import argparse from os.path import join from os import makedirs import marisa_trie import numpy as np from wikidata_linker_utils.bash import count_lines from wikidata_linker_utils.progressbar import get_progress_bar from wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata, load_redirections from wikidata_linker_utils.successor_mask import construct_mapping, construct_anchor_trie def parse_args(argv=None): parser = argparse.ArgumentParser() parser.add_argument("wikipedia2wikidata_trie", help="Location of wikipedia -> wikidata mapping trie.") parser.add_argument("prefix", type=str, help="What language is being processed, e.g. enwiki, frwiki, etc.") parser.add_argument("anchor_tags", type=str, help="Location where anchor tags were saved (tsv).") parser.add_argument("redirections", type=str, help="Location where redirections were saved (tsv).") parser.add_argument("out", type=str, help="Directory to save trie/data in.") return parser.parse_args(argv) def main(): args = parse_args() makedirs(args.out, exist_ok=True) wikipedia2wikidata_trie = marisa_trie.RecordTrie('i').load( args.wikipedia2wikidata_trie ) print('loaded trie') redirections = load_redirections(args.redirections) anchor_trie = construct_anchor_trie( anchor_tags=args.anchor_tags, wikipedia2wikidata_trie=wikipedia2wikidata_trie, redirections=redirections, prefix=args.prefix ) anchor_trie.save(join(args.out, 'trie.marisa')) ( ( trie_index2indices_offsets, trie_index2indices_values, trie_index2indices_counts ), ( trie_index2contexts_offsets, trie_index2contexts_values, trie_index2contexts_counts ) ) = construct_mapping( anchor_tags=args.anchor_tags, wikipedia2wikidata_trie=wikipedia2wikidata_trie, redirections=redirections, prefix=args.prefix, anchor_trie=anchor_trie ) np.save(join(args.out, "trie_index2indices_offsets.npy"), trie_index2indices_offsets) np.save(join(args.out, "trie_index2indices_values.npy"), trie_index2indices_values) np.save(join(args.out, "trie_index2indices_counts.npy"), trie_index2indices_counts) np.save(join(args.out, "trie_index2contexts_offsets.npy"), trie_index2contexts_offsets) np.save(join(args.out, "trie_index2contexts_values.npy"), trie_index2contexts_values) np.save(join(args.out, "trie_index2contexts_counts.npy"), trie_index2contexts_counts) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="38"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import argparse import sys import json import time import traceback from os import makedirs from os.path import join, dirname, realpath from wikidata_linker_utils.repl import ( enter_or_quit, reload_module, ALLOWED_RUNTIME_ERRORS, ALLOWED_IMPORT_ERRORS ) from wikidata_linker_utils.logic import logical_ors from wikidata_linker_utils.type_collection import TypeCollection import wikidata_linker_utils.wikidata_properties as wprop import numpy as np SCRIPT_DIR = dirname(realpath(__file__)) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('wikidata', type=str, help="Location of wikidata properties.") parser.add_argument('classifiers', type=str, nargs="+", help="Filename(s) for Python script that classifies entities.") parser.add_argument('--export_classification', type=str, nargs="+", default=None, help="Location to save the result of the entity classification.") parser.add_argument('--num_names_to_load', type=int, default=20000000, help="Number of names to load from disk to accelerate reporting.") parser.add_argument('--language_path', type=str, default=None, help="Location of a language-wikipedia specific information set to " "provide language/wikipedia specific metrics.") parser.add_argument('--interactive', action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_true", default=True, help="Operate in a REPL. Reload scripts on errors or on user prompt.") parser.add_argument('--nointeractive', action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_false", dest="interactive", help="Run classification without REPL.") parser.add_argument('--use-cache', action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_true", dest="use_cache", help="store satisfies in cache.") parser.add_argument('--nouse-cache', action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_false", dest="use_cache", help="not store satisfies in cache.") return parser.parse_args() def get_other_class(classification): if len(classification) == 0: return None return np.logical_not(logical_ors( list(classification.values()) )) def export_classification(classification, path): classes = sorted(list(classification.keys())) if len(classes) == 0: return makedirs(path, exist_ok=True) num_items = classification[classes[0]].shape[0] classid = np.zeros(num_items, dtype=np.int32) selected = np.zeros(num_items, dtype=np.bool) for index, classname in enumerate(classes): truth_table = classification[classname] selected = selected | truth_table classid = np.maximum(classid, truth_table.astype(np.int32) * index) other = np.logical_not(selected) if other.sum() > 0: classes_with_other = classes + ["other"] classid = np.maximum(classid, other.astype(np.int32) * len(classes)) else: classes_with_other = classes with open(join(path, "classes.txt"), "wt") as fout: for classname in classes_with_other: fout.write(classname + "\n") np.save(join(path, "classification.npy"), classid) def main(): args = parse_args() should_export = args.export_classification is not None if should_export and len(args.export_classification) != len(args.classifiers): raise ValueError("Must have as many export filenames as classifiers.") collection = TypeCollection( args.wikidata, num_names_to_load=args.num_names_to_load, language_path=args.language_path, cache=args.use_cache ) if args.interactive: alert_failure = enter_or_quit else: alert_failure = lambda: sys.exit(1) while True: try: collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json")) except (ValueError,) as e: print("Issue reading blacklist, please fix.") print(str(e)) alert_failure() continue classifications = [] for class_idx, classifier_fname in enumerate(args.classifiers): while True: try: classifier = reload_module(classifier_fname) except ALLOWED_IMPORT_ERRORS as e: print("issue reading %r, please fix." % (classifier_fname,)) print(str(e)) traceback.print_exc(file=sys.stdout) alert_failure() continue try: t0 = time.time() classification = classifier.classify(collection) classifications.append(classification) if class_idx == len(args.classifiers) - 1: collection.reset_cache() t1 = time.time() print("classification took %.3fs" % (t1 - t0,)) except ALLOWED_RUNTIME_ERRORS as e: print("issue running %r, please fix." % (classifier_fname,)) print(str(e)) traceback.print_exc(file=sys.stdout) alert_failure() continue break try: # show cardinality for each truth table: if args.interactive: mega_other_class = None for classification in classifications: for classname in sorted(classification.keys()): print("%r: %d members" % (classname, int(classification[classname].sum()))) print("") summary = {} for classname, truth_table in classification.items(): (members,) = np.where(truth_table) summary[classname] = [collection.get_name(int(member)) for member in members[:20]] print(json.dumps(summary, indent=4)) other_class = get_other_class(classification) if other_class.sum() > 0: # there are missing items: to_report = ( classifier.class_report if hasattr(classifier, "class_report") else [wprop.SUBCLASS_OF, wprop.INSTANCE_OF, wprop.OCCUPATION, wprop.CATEGORY_LINK] ) collection.class_report(to_report, other_class, name="Other") if mega_other_class is None: mega_other_class = other_class else: mega_other_class = np.logical_and(mega_other_class, other_class) if len(classifications) > 1: if mega_other_class.sum() > 0: # there are missing items: to_report = [wprop.SUBCLASS_OF, wprop.INSTANCE_OF, wprop.OCCUPATION, wprop.CATEGORY_LINK] collection.class_report(to_report, mega_other_class, name="Other-combined") if should_export: assert(len(classifications) == len(args.export_classification)), ( "classification outputs missing for export." ) for classification, savename in zip(classifications, args.export_classification): export_classification(classification, savename) except KeyboardInterrupt as e: pass if args.interactive: enter_or_quit() else: break if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="39"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import argparse from os import remove from wikidata_linker_utils.bash import execute_bash import h5py def produce_window_dataset(path, window_size, out): num_columns = 0 with open(path, "rt") as fin: line_locations = [] for idx, line in enumerate(fin): if "\t" in line: line_locations.append(idx) if num_columns == 0: num_columns = len(line.split("\t")) if line == "\n": line_locations.append(-1) groups = [] current_group = [] max_buffer_size = 250000 read_size = 100000 seen_classes = {} for line_location in line_locations: if line_location == -1: if len(current_group) > 0: groups.append(current_group) current_group = [] else: if len(current_group) == 0: current_group.append(line_location) elif abs(current_group[-1] - line_location) <= window_size: current_group.append(line_location) else: groups.append(current_group) current_group = [line_location] if len(current_group) > 0: groups.append(current_group) num_examples = len(groups) EMPTY = "" with h5py.File(out, "w") as handle: datasets = [] for col in range(num_columns): datasets.append( handle.create_dataset( str(col), (num_examples,), dtype=h5py.special_dtype(vlen=str), chunks=(1500,) # compression="gzip", # compression_opts=9 ) ) k = 0 with open(path, "rt") as fin: current_location = 0 current_lines = fin.readlines(read_size) current_end = current_location + len(current_lines) for group in groups: start = max(0, group[0] - window_size) end = group[-1] + window_size if end > current_end: # read more lines into buffer: current_lines = current_lines + fin.readlines(read_size) # advance buffer max location current_end = current_location + len(current_lines) if len(current_lines) > max_buffer_size: # compute how much to remove from buffer to_chop = len(current_lines) - max_buffer_size # move start location current_location += to_chop # remove extra buffer lines current_lines = current_lines[to_chop:] # ensure that we do not cross white space boundaries start_delay = 0 for idx, line in enumerate(current_lines[start - current_location:group[0] - current_location]): if line == "\n": start_delay = idx start += start_delay early_end = window_size for idx, line in enumerate(current_lines[group[-1] - current_location:end - current_location]): if line == "\n": early_end = idx break end = group[-1] + early_end cols = [[] for i in range(num_columns)] for line in current_lines[start - current_location:end - current_location]: vals = line.rstrip().split("\t") for col_index in range(num_columns): if len(vals) > col_index: cols[col_index].append(vals[col_index]) else: cols[col_index].append(EMPTY) for col_index, dataset in zip(cols, datasets): dataset[k] = "\n".join(col_index) k += 1 def file_slice(path, start, end, destination, append): file_operator = ">>" if append else ">" delta = end - start command = "head -n %d %s | tail -n %d %s %s" % ( end, path, delta, file_operator, destination ) execute_bash(command) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("path") parser.add_argument("out_train") parser.add_argument("out_validation") parser.add_argument("--window_size", type=int, default=5) parser.add_argument("--total_size", type=int, required=True) parser.add_argument("--validation_start", type=int, required=True) parser.add_argument("--validation_size", type=int, default=500000) return parser.parse_args(args=args) def main(): args = parse_args() if args.total_size < args.validation_size: raise ValueError("cannot have total_size (%d) < validation_size " "(%d)" % (args.total_size, args.validation_size)) if args.validation_start > args.total_size: raise ValueError("cannot have validation_start (%d) begin after " "total_size (%d)" % (args.validation_start, args.total_size)) if args.validation_start + args.validation_size > args.total_size: raise ValueError("cannot have validation_start + validation_size (%d)" " be larger than total_size (%d)" % ( args.validation_start + args.validation_size, args.total_size )) train_temp = args.out_train + ".train_temp" try: file_slice( args.path, 0, args.validation_start, train_temp, append=False ) file_slice( args.path, args.validation_start + args.validation_size, args.total_size, train_temp, append=True ) print("created temp file %s" % (train_temp)) produce_window_dataset( train_temp, args.window_size, args.out_train ) print("created windowed dataset for train") finally: print("removing temp file %s" % (train_temp)) remove(train_temp) try: validation_temp = args.out_validation + ".validation_temp" file_slice( args.path, args.validation_start, args.validation_start + args.validation_size, validation_temp, append=False ) print("created temp file %s" % (validation_temp)) produce_window_dataset(validation_temp, args.window_size, args.out_validation) print("created windowed dataset for validation") finally: print("removing temp file %s" % (validation_temp)) remove(validation_temp) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="40"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import argparse import time import random import numpy as np from evaluate_type_system import fix_and_parse_tags from wikidata_linker_utils.json import load_config from wikidata_linker_utils.type_collection import TypeCollection from wikidata_linker_utils.progressbar import get_progress_bar from wikidata_linker_utils.wikipedia import induce_wikipedia_prefix from os.path import realpath, dirname, join, exists from wikidata_linker_utils.fast_disambiguate import ( beam_project, cem_project, ga_project ) SCRIPT_DIR = dirname(realpath(__file__)) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("config", type=str) parser.add_argument("out", type=str) parser.add_argument("--relative_to", default=None, type=str) parser.add_argument("--penalty", default=0.0005, type=float) parser.add_argument("--beam_width", default=8, type=float) parser.add_argument("--beam_search_subset", default=2000, type=int) parser.add_argument("--log", default=None, type=str) parser.add_argument("--samples", type=int, default=1000) parser.add_argument("--ngen", type=int, default=40) parser.add_argument("--method", type=str, choices=["cem", "greedy", "beam", "ga"], default="greedy") return parser.parse_args(args=args) def load_aucs(): paths = [ "/home/jonathanraiman/en_field_auc_w10_e10.json", "/home/jonathanraiman/en_field_auc_w10_e10-s1234.json", "/home/jonathanraiman/en_field_auc_w5_e5.json", "/home/jonathanraiman/en_field_auc_w5_e5-s1234.json" ] aucs = {} for path in paths: with open(path, "rt") as fin: auc_report = json.load(fin) for report in auc_report: key = (report["qid"], report["relation"]) if key in aucs: aucs[key].append(report["auc"]) else: aucs[key] = [report["auc"]] for key in aucs.keys(): aucs[key] = np.mean(aucs[key]) return aucs def greedy_disambiguate(tags): greedy_correct = 0 total = 0 for dest, other_dest, times_pointed in tags: total += 1 if len(other_dest) == 1 and dest == other_dest[0]: greedy_correct += 1 elif other_dest[np.argmax(times_pointed)] == dest: greedy_correct += 1 return greedy_correct, total def fast_disambiguate(tags, all_classifications): correct = 0 total = 0 for dest, other_dest, times_pointed in tags: total += 1 if len(other_dest) == 1 and dest == other_dest[0]: correct += 1 else: identities = np.all(all_classifications[other_dest, :] == all_classifications[dest, :], axis=1) matches = other_dest[identities] matches_counts = times_pointed[identities] if len(matches) == 1 and matches[0] == dest: correct += 1 elif matches[np.argmax(matches_counts)] == dest: correct += 1 return correct, total def get_prefix(config): return config.prefix or induce_wikipedia_prefix(config.wiki) MAX_PICKS = 400.0 def rollout(cached_satisfy, key2row, tags, aucs, ids, sample, penalty, greedy_correct): mean_auc = 0.0 sample_sum = sample.sum() if sample_sum == 0: total = len(tags) return (greedy_correct / total, greedy_correct / total) if sample_sum > MAX_PICKS: return 0.0, 0.0 all_classifications = None if sample_sum > 0: all_classifications = np.zeros((len(ids), int(sample_sum)), dtype=np.bool) col = 0 for picked, (key, auc) in zip(sample, aucs): if picked: all_classifications[:, col] = cached_satisfy[key2row[key]] col += 1 mean_auc += auc mean_auc = mean_auc / sample_sum correct, total = fast_disambiguate(tags, all_classifications) # here's the benefit of using types: improvement = correct - greedy_correct # penalty for using unreliable types: objective = ( (greedy_correct + improvement * mean_auc) / total - # number of items is penalized sample_sum * penalty ) return objective, correct / total def get_cached_satisfy(collection, aucs, ids, mmap=False): path = join(SCRIPT_DIR, "cached_satisfy.npy") if not exists(path): cached_satisfy = np.zeros((len(aucs), len(ids)), dtype=np.bool) for row, (qid, relation_name) in get_progress_bar("satisfy", item="types")(enumerate(sorted(aucs.keys()))): cached_satisfy[row, :] = collection.satisfy([relation_name], [collection.name2index[qid]])[ids] collection._satisfy_cache.clear() np.save(path, cached_satisfy) if mmap: del cached_satisfy cached_satisfy = np.load(path, mmap_mode="r") else: if mmap: cached_satisfy = np.load(path, mmap_mode="r") else: cached_satisfy = np.load(path) return cached_satisfy def main(): args = parse_args() config = load_config( args.config, ["wiki", "language_path", "wikidata", "redirections", "classification"], defaults={ "num_names_to_load": 0, "prefix": None, "sample_size": 100, "wiki": None, "fix_links": False, "min_count": 0, "min_percent": 0.0 }, relative_to=args.relative_to ) if config.wiki is None: raise ValueError("must provide path to 'wiki' in config.") prefix = get_prefix(config) collection = TypeCollection( config.wikidata, num_names_to_load=config.num_names_to_load, prefix=prefix, verbose=True ) collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json")) fname = config.wiki test_tags = fix_and_parse_tags(config, collection, config.sample_size) aucs = load_aucs() ids = sorted(set([idx for doc_tags in test_tags for _, tag in doc_tags if tag is not None for idx in tag[2] if len(tag[2]) > 1])) id2pos = {idx: k for k, idx in enumerate(ids)} # use reduced identity system: remapped_tags = [] for doc_tags in test_tags: for text, tag in doc_tags: if tag is not None: remapped_tags.append( (id2pos[tag[1]] if len(tag[2]) > 1 else tag[1], np.array([id2pos[idx] for idx in tag[2]]) if len(tag[2]) > 1 else tag[2], tag[3])) test_tags = remapped_tags aucs = {key: value for key, value in aucs.items() if value > 0.5} print("%d relations to pick from with %d ids." % (len(aucs), len(ids)), flush=True) cached_satisfy = get_cached_satisfy(collection, aucs, ids, mmap=args.method=="greedy") del collection key2row = {key: k for k, key in enumerate(sorted(aucs.keys()))} if args.method == "greedy": picks, _ = beam_project( cached_satisfy, key2row, remapped_tags, aucs, ids, beam_width=1, penalty=args.penalty, log=args.log ) elif args.method == "beam": picks, _ = beam_project( cached_satisfy, key2row, remapped_tags, aucs, ids, beam_width=args.beam_width, penalty=args.penalty, log=args.log ) elif args.method == "cem": picks, _ = cem_project( cached_satisfy, key2row, remapped_tags, aucs, ids, n_samples=args.samples, penalty=args.penalty, log=args.log ) elif args.method == "ga": picks, _ = ga_project( cached_satisfy, key2row, remapped_tags, aucs, ids, ngen=args.ngen, n_samples=args.samples, penalty=args.penalty, log=args.log ) else: raise ValueError("unknown method %r." % (args.method,)) with open(args.out, "wt") as fout: json.dump(picks, fout) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="41"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import sys import pickle import argparse import requests import marisa_trie import traceback import numpy as np from os.path import join, dirname, realpath, exists from os import stat from collections import Counter from itertools import product from wikidata_linker_utils.anchor_filtering import clean_up_trie_source, acceptable_anchor from wikidata_linker_utils.wikipedia import ( load_wikipedia_docs, induce_wikipedia_prefix, load_redirections, transition_trie_index ) from wikidata_linker_utils.json import load_config from wikidata_linker_utils.offset_array import OffsetArray from wikidata_linker_utils.repl import reload_run_retry, enter_or_quit from wikidata_linker_utils.progressbar import get_progress_bar from wikidata_linker_utils.type_collection import TypeCollection, get_name as web_get_name SCRIPT_DIR = dirname(realpath(__file__)) PROJECT_DIR = dirname(SCRIPT_DIR) INTERNET = True def maybe_web_get_name(s): global INTERNET if INTERNET: try: res = web_get_name(s) return res except requests.exceptions.ConnectionError: INTERNET = False return s class OracleClassification(object): def __init__(self, classes, classification, path): self.classes = classes self.classification = classification self.path = path self.contains_other = self.classes[-1] == "other" def classify(self, index): return self.classification[index] def load_oracle_classification(path): with open(join(path, "classes.txt"), "rt") as fin: classes = fin.read().splitlines() classification = np.load(join(path, "classification.npy")) return OracleClassification(classes, classification, path) def can_disambiguate(oracles, truth, alternatives, times_pointed, count_threshold, ignore_other=False, keep_other=False): ambig = np.ones(len(alternatives), dtype=np.bool) for oracle in oracles: truth_pred = oracle.classify(truth) alt_preds = oracle.classify(alternatives) if keep_other and oracle.contains_other: if truth_pred == len(oracle.classes) - 1: continue else: ambig = np.logical_and( ambig, np.logical_or( np.equal(alt_preds, truth_pred), np.equal(alt_preds, len(oracle.classes) - 1) ) ) elif ignore_other and oracle.contains_other and np.any(alt_preds == len(oracle.classes) - 1): continue else: ambig = np.logical_and(ambig, np.equal(alt_preds, truth_pred)) # apply type rules to disambiguate: alternatives_matching_type = alternatives[ambig] alternatives_matching_type_times_pointed = times_pointed[ambig] if len(alternatives_matching_type) <= 1: return alternatives_matching_type, alternatives_matching_type_times_pointed, False # apply rules for count thresholding: ordered_times_pointed = np.argsort(alternatives_matching_type_times_pointed)[::-1] top1count = alternatives_matching_type_times_pointed[ordered_times_pointed[0]] top2count = alternatives_matching_type_times_pointed[ordered_times_pointed[1]] if top1count > top2count + count_threshold and alternatives_matching_type[ordered_times_pointed[0]] == truth: return ( alternatives_matching_type[ordered_times_pointed[0]:ordered_times_pointed[0]+1], alternatives_matching_type_times_pointed[ordered_times_pointed[0]:ordered_times_pointed[0]+1], True ) return alternatives_matching_type, alternatives_matching_type_times_pointed, False def disambiguate(tags, oracles): ambiguous = 0 obvious = 0 disambiguated_oracle = 0 disambiguated_with_counts = 0 disambiguated_greedy = 0 disambiguated_with_background = 0 count_threshold = 0 ambiguous_tags = [] obvious_tags = [] non_obvious_tags = [] disambiguated_oracle_ignore_other = 0 disambiguated_oracle_keep_other = 0 for text, tag in tags: if tag is None: continue anchor, dest, other_dest, times_pointed = tag if len(other_dest) == 1: obvious += 1 obvious_tags.append((anchor, dest, other_dest, times_pointed)) else: ambiguous += 1 non_obvious_tags.append((anchor, dest, other_dest, times_pointed)) if other_dest[np.argmax(times_pointed)] == dest: disambiguated_greedy += 1 matching_tags, times_pointed_subset, used_counts = can_disambiguate( oracles, dest, other_dest, times_pointed, count_threshold ) if len(matching_tags) <= 1: if used_counts: disambiguated_with_counts += 1 else: disambiguated_oracle += 1 else: ambiguous_tags.append( (anchor, dest, matching_tags, times_pointed_subset) ) matching_tags, times_pointed_subset, used_counts = can_disambiguate( oracles, dest, other_dest, times_pointed, count_threshold, ignore_other=True ) if len(matching_tags) <= 1: disambiguated_oracle_ignore_other += 1 matching_tags, times_pointed_subset, used_counts = can_disambiguate( oracles, dest, other_dest, times_pointed, count_threshold, keep_other=True ) if len(matching_tags) <= 1: disambiguated_oracle_keep_other += 1 report = { "ambiguous": ambiguous, "obvious": obvious, "disambiguated oracle": disambiguated_oracle, "disambiguated greedy": disambiguated_greedy, "disambiguated oracle + counts": disambiguated_oracle + disambiguated_with_counts, "disambiguated oracle + counts + ignore other": disambiguated_oracle_ignore_other, "disambiguated oracle + counts + keep other": disambiguated_oracle_keep_other } return (report, ambiguous_tags) def disambiguate_batch(test_tags, train_tags, oracles): test_tags = test_tags total_report = {} ambiguous_tags = [] for tags in get_progress_bar("disambiguating", item="articles")(test_tags): report, remainder = disambiguate(tags, oracles) ambiguous_tags.extend(remainder) for key, value in report.items(): if key not in total_report: total_report[key] = value else: total_report[key] += value return total_report, ambiguous_tags def obtain_tags(doc, wiki_trie, anchor_trie, trie_index2indices, trie_index2indices_counts, trie_index2indices_transitions, redirections, prefix, collection, first_names, min_count, min_percent): out_doc = [] for anchor, dest_index in doc.links(wiki_trie, redirections, prefix): if dest_index is None: out_doc.append((anchor, None)) continue anchor_stripped = anchor.strip() keep = False if len(anchor_stripped) > 0: anchor_stripped = clean_up_trie_source(anchor_stripped) if acceptable_anchor(anchor_stripped, anchor_trie, first_names): anchor_idx = anchor_trie[anchor_stripped] all_options = trie_index2indices[anchor_idx] all_counts = trie_index2indices_counts[anchor_idx] if len(all_options) > 0: if trie_index2indices_transitions is not None: old_dest_index = dest_index dest_index = transition_trie_index( anchor_idx, dest_index, trie_index2indices_transitions, all_options ) if dest_index != -1: new_dest_index = dest_index keep = True if keep and (min_count > 0 or min_percent > 0): dest_count = all_counts[all_options==new_dest_index] if dest_count < min_count or (dest_count / sum(all_counts)) < min_percent: keep = False if keep: out_doc.append( ( anchor, (anchor_stripped, new_dest_index, all_options, all_counts) ) ) if not keep: out_doc.append((anchor, None)) return out_doc def add_boolean(parser, name, default): parser.add_argument("--%s" % (name,), action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_true", default=default) parser.add_argument("--no%s" % (name,), action="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/store_false", dest=name) def get_parser(): parser = argparse.ArgumentParser() parser.add_argument("config") parser.add_argument("--relative_to", type=str, default=None) parser.add_argument("--log", type=str, default=None) add_boolean(parser, "verbose", True) add_boolean(parser, "interactive", True) return parser def parse_args(args=None): return get_parser().parse_args(args=args) def summarize_disambiguation(total_report, file=None): if file is None: file = sys.stdout if total_report.get("ambiguous", 0) > 0: for key, value in sorted(total_report.items(), key=lambda x : x[1]): if "disambiguated" in key: print("%.3f%% disambiguated by %s (%d / %d)" % ( 100.0 * value / total_report["ambiguous"], key[len("disambiguated"):].strip(), value, total_report["ambiguous"] ), file=file ) print("", file=file) for key, value in sorted(total_report.items(), key=lambda x : x[1]): if "disambiguated" in key: print("%.3f%% disambiguated by %s [including single choice] (%d / %d)" % ( 100.0 * ( (value + total_report["obvious"]) / (total_report["ambiguous"] + total_report["obvious"]) ), key[len("disambiguated"):].strip(), value + total_report["obvious"], total_report["ambiguous"] + total_report["obvious"] ), file=file ) print("", file=file) def summarize_ambiguities(ambiguous_tags, oracles, get_name): class_ambiguities = {} for anchor, dest, other_dest, times_pointed in ambiguous_tags: class_ambig_name = [] for oracle in oracles: class_ambig_name.append(oracle.classes[oracle.classify(dest)]) class_ambig_name = " and ".join(class_ambig_name) if class_ambig_name not in class_ambiguities: class_ambiguities[class_ambig_name] = { "count": 1, "examples": [(anchor, dest, other_dest, times_pointed)] } else: class_ambiguities[class_ambig_name]["count"] += 1 class_ambiguities[class_ambig_name]["examples"].append((anchor, dest, other_dest, times_pointed)) print("Ambiguity Report:") for classname, ambiguity in sorted(class_ambiguities.items(), key=lambda x: x[0]): print(" %s" % (classname,)) print(" %d ambiguities" % (ambiguity["count"],)) common_bad_anchors = Counter([anc for anc, _, _, _ in ambiguity["examples"]]).most_common(6) anchor2example = {anc: (dest, other_dest, times_pointed) for anc, dest, other_dest, times_pointed in ambiguity["examples"]} for bad_anchor, count in common_bad_anchors: dest, other_dest, times_pointed = anchor2example[bad_anchor] truth_times_pointed = int(times_pointed[np.equal(other_dest, dest)]) only_alt = [(el, int(times_pointed[k])) for k, el in enumerate(other_dest) if el != dest] only_alt = sorted(only_alt, key=lambda x: x[1], reverse=True) print(" %r (%d time%s)" % (bad_anchor, count, 's' if count != 1 else '')) print(" Actual: %r" % ((get_name(dest), truth_times_pointed),)) print(" Others: %r" % ([(get_name(el), c) for (el, c) in only_alt[:5]])) print("") print("") def get_prefix(config): return config.prefix or induce_wikipedia_prefix(config.wiki) def fix_and_parse_tags(config, collection, size): trie_index2indices = OffsetArray.load( join(config.language_path, "trie_index2indices"), compress=True ) trie_index2indices_counts = OffsetArray( np.load(join(config.language_path, "trie_index2indices_counts.npy")), trie_index2indices.offsets ) if exists(join(config.language_path, "trie_index2indices_transition_values.npy")): trie_index2indices_transitions = OffsetArray( np.load(join(config.language_path, "trie_index2indices_transition_values.npy")), np.load(join(config.language_path, "trie_index2indices_transition_offsets.npy")), ) else: trie_index2indices_transitions = None anchor_trie = marisa_trie.Trie().load(join(config.language_path, "trie.marisa")) wiki_trie = marisa_trie.RecordTrie('i').load( join(config.wikidata, "wikititle2wikidata.marisa") ) prefix = get_prefix(config) redirections = load_redirections(config.redirections) docs = load_wikipedia_docs(config.wiki, size) while True: try: collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json")) except (ValueError,) as e: print("issue reading blacklist, please fix.") print(str(e)) enter_or_quit() continue break print("Load first_names") with open(join(PROJECT_DIR, "data", "first_names.txt"), "rt") as fin: first_names = set(fin.read().splitlines()) all_tags = [] for doc in get_progress_bar('fixing links', item='article')(docs): tags = obtain_tags( doc, wiki_trie=wiki_trie, anchor_trie=anchor_trie, trie_index2indices=trie_index2indices, trie_index2indices_counts=trie_index2indices_counts, trie_index2indices_transitions=trie_index2indices_transitions, redirections=redirections, prefix=prefix, first_names=first_names, collection=collection, min_count=config.min_count, min_percent=config.min_percent) if any(x is not None for _, x in tags): all_tags.append(tags) collection.reset_cache() return all_tags def main(): args = parse_args() config = load_config(args.config, ["wiki", "language_path", "wikidata", "redirections", "classification", "path"], defaults={"num_names_to_load": 0, "prefix": None, "sample_size": 100, "wiki": None, "min_count": 0, "min_percent": 0.0}, relative_to=args.relative_to) if config.wiki is None: raise ValueError("must provide path to 'wiki' in config.") prefix = get_prefix(config) print("Load type_collection") collection = TypeCollection( config.wikidata, num_names_to_load=config.num_names_to_load, prefix=prefix, verbose=True) fname = config.wiki all_tags = fix_and_parse_tags(config, collection, config.sample_size) test_tags = all_tags[:config.sample_size] train_tags = all_tags[config.sample_size:] oracles = [load_oracle_classification(classification) for classification in config.classification] def get_name(idx): if idx < config.num_names_to_load: if idx in collection.known_names: return collection.known_names[idx] + " (%s)" % (collection.ids[idx],) else: return collection.ids[idx] else: return maybe_web_get_name(collection.ids[idx]) + " (%s)" % (collection.ids[idx],) while True: total_report, ambiguous_tags = disambiguate_batch( test_tags, train_tags, oracles) summarize_disambiguation(total_report) if args.log is not None: with open(args.log, "at") as fout: summarize_disambiguation(total_report, file=fout) if args.verbose: try: summarize_ambiguities( ambiguous_tags, oracles, get_name ) except KeyboardInterrupt as e: pass if args.interactive: enter_or_quit() else: break if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="42"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import argparse import marisa_trie import numpy as np from os.path import join from wikidata_linker_utils.progressbar import get_progress_bar from wikidata_linker_utils.bash import count_lines from wikidata_linker_utils.offset_array import save_record_with_offset def parse_args(argv=None): parser = argparse.ArgumentParser() parser.add_argument("wikipedia2wikidata_trie", help="Location of wikipedia -> wikidata mapping trie.") parser.add_argument("wikidata_ids") parser.add_argument("prefix") parser.add_argument("category_links") parser.add_argument("out") return parser.parse_args(argv) def main(): args = parse_args() trie = marisa_trie.RecordTrie('i').load(args.wikipedia2wikidata_trie) print('loaded trie') num_lines = count_lines(args.category_links) num_ids = count_lines(args.wikidata_ids) missing = [] num_missing = 0 num_broken = 0 all_category_links = [[] for i in range(num_ids)] with open(args.category_links, 'rt') as fin: fin_pbar = get_progress_bar('reading category_links', max_value=num_lines)(fin) for line in fin_pbar: try: origin, dest = line.rstrip('\n').split('\t') except: num_broken += 1 continue if len(dest) == 0: num_broken += 1 continue origin = args.prefix + '/' + origin prefixed_dest = args.prefix + '/' + dest origin_index = trie.get(origin, None) dest_index = trie.get(prefixed_dest, None) if dest_index is None: prefixed_dest = args.prefix + '/' + dest[0].upper() + dest[1:] dest_index = trie.get(prefixed_dest, None) if origin_index is None or dest_index is None: missing.append((origin, prefixed_dest)) num_missing += 1 else: all_category_links[origin_index[0][0]].append(dest_index[0][0]) print("%d/%d category links could not be found in wikidata" % (num_missing, num_lines)) print("%d/%d category links were malformed" % (num_broken, num_lines)) print("Missing links sample:") for origin, dest in missing[:10]: print("%r -> %r" % (origin, dest)) save_record_with_offset( join(args.out, "wikidata_%s_category_links" % (args.prefix,)), all_category_links ) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="43"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import argparse import time import marisa_trie import numpy as np import pandas from os.path import join, realpath, dirname from os import makedirs from wikidata_linker_utils.wikidata_iterator import open_wikidata_file from wikidata_linker_utils.file import true_exists from wikidata_linker_utils.bash import count_lines from wikidata_linker_utils.progressbar import get_progress_bar from wikidata_linker_utils.offset_array import save_record_with_offset from wikidata_linker_utils.wikidata_ids import ( load_wikidata_ids, load_names, property_names, temporal_property_names ) import wikidata_linker_utils.wikidata_properties as wikidata_properties SCRIPT_DIR = dirname(realpath(__file__)) PROJECT_DIR = dirname(SCRIPT_DIR) WIKITILE_2_WIKIDATA_TRIE_NAME = "wikititle2wikidata.marisa" WIKITILE_2_WIKIDATA_TSV_NAME = "wikidata_wikititle2wikidata.tsv" WIKIDATA_IDS_NAME = "wikidata_ids.txt" def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("wikidata_dump", type=str, help="Path to wikidata dump file.") parser.add_argument("wikidata", type=str, help="Path to save location for wikidata properties.") parser.add_argument("--batch_size", type=int, default=1000) return parser.parse_args(args=args) def get_related_nested_field(doc_claims, nested_field): out = [] for claim in doc_claims: mainsnak = claim.get("mainsnak", None) if mainsnak is None: continue datavalue = mainsnak.get("datavalue", None) if datavalue is None: continue value = datavalue.get("value", None) if value is None: continue value_id = value.get(nested_field, None) if value_id is None: continue out.append(value_id) return out def get_related_entities(doc_claims): return get_related_nested_field(doc_claims, "id") def get_claim_time(doc_claims): return get_related_nested_field(doc_claims, "time") def get_wikidata_mapping(name2id_path, wikidata_ids_path, jsons, relation_names, verbose=False): approx_max_quantity = 24642416 if verbose: pbar = None from IPython.display import clear_output else: pbar = get_progress_bar("collect wikilinks", max_value=approx_max_quantity) pbar.start() clear_output = None wikidata_ids = [] entity_types = [] subclass = [] seen = 0 relations = { name: (open(outfile, "wt"), is_temporal) for name, outfile, is_temporal in relation_names } fout_name2id = None if true_exists(name2id_path) else open(name2id_path, "wt") fout_wikidata_ids = None if true_exists(wikidata_ids_path) else open(wikidata_ids_path, "wt") try: t_then = time.time() seen_last = 0 speed = None index = 0 for doc in jsons: seen += 1 if seen % 2000 == 0: if verbose: t_now = time.time() new_speed = (seen - seen_last) / (t_now - t_then) if speed is None: speed = new_speed else: speed = 0.9 * speed + 0.1 * new_speed clear_output(wait=True) print("%.3f%% done (%d seen, %.3f docs/s, ETA: %ds)" % ( 100.0 * seen / approx_max_quantity, seen, speed, int((approx_max_quantity - seen) / speed) ), flush=True) seen_last = seen t_then = t_now else: if seen < approx_max_quantity: pbar.update(seen) if fout_name2id is not None: if "sitelinks" in doc: for key, value in doc["sitelinks"].items(): if key.endswith("wiki"): fout_name2id.write(key + "/" + value["title"] + "\t" + str(index) + "\n") index += 1 if fout_wikidata_ids is not None: fout_wikidata_ids.write(doc["id"] + "\n") for name, (outfile, is_temporal) in relations.items(): if is_temporal: outfile.write( "\t".join(get_claim_time(doc["claims"].get(name, []))) + "\n" ) else: outfile.write( "\t".join(get_related_entities(doc["claims"].get(name, []))) + "\n" ) if pbar is not None: pbar.finish() finally: for name, (outfile, _) in relations.items(): outfile.close() if fout_name2id is not None: fout_name2id.close() if fout_wikidata_ids is not None: fout_wikidata_ids.close() def convert_wikidata_ids_to_ids(id2index, wikidata_ids): return [[id2index.get(wikidata_id, -1) for wikidata_id in propgroup] for propgroup in wikidata_ids] def parse_year(text): pos = text[1:].find("-") return int(text[:pos+1]) def values_exist(path): return ( true_exists(path + "_values.npy") or true_exists(path + "_values.sparse.npy") ) def line2indices(id2index, line): if len(line) == 0: return [] out = [] for el in line.split("\t"): idx = id2index.get(el, None) if idx is None: continue else: out.append(idx) return out def fixed_point_name_alternates(name): if name.endswith(")"): pos_closing = name.rfind("(") return (name, name[:pos_closing].strip()) if name.endswith("ses"): return (name, name[:-2] + "is") if name.endswith("ies"): return (name, name[:-3] + "y") if name.endswith("s"): return (name, name[:-1]) return (name,) def build_fixed_point(out, prefix): wiki_fixed_point_save = join(out, "wikidata_%s_fixed_points_values.npy" % (prefix,)) if not true_exists(wiki_fixed_point_save): print("building %s fixed point property." % (prefix,)) trie = marisa_trie.RecordTrie('i').load(join(out, WIKITILE_2_WIKIDATA_TRIE_NAME)) num_items = count_lines(join(out, WIKIDATA_IDS_NAME)) fixed_point_relation = {} category_prefix = "%s/Category:" % (prefix,) article_prefix = "%s/" % (prefix,) wikititle2wikidata_path = join(out, WIKITILE_2_WIKIDATA_TSV_NAME) relevant_items = trie.iteritems(category_prefix) for name, category_idx in relevant_items: article_name = article_prefix + name[len(category_prefix):] for fixed_point_name_alternate in fixed_point_name_alternates(article_name): matches = trie.get(fixed_point_name_alternate, None) if matches is not None and len(matches) > 0: fixed_point_relation[category_idx] = [matches[0][0]] break print("Found %d fixed point relations for %s" % (len(fixed_point_relation), prefix,)) save_record_with_offset( join(out, "wikidata_%s_fixed_points" % (prefix,)), fixed_point_relation, num_items ) def main(): args = parse_args() makedirs(args.wikidata, exist_ok=True) wikidata_names2prop_names = property_names( join(PROJECT_DIR, "data", "wikidata", 'wikidata_property_names.json') ) wikidata_names2temporal_prop_names = temporal_property_names( join(PROJECT_DIR, "data", "wikidata", 'wikidata_time_property_names.json') ) # fields to make easily accessible: wikidata_important_properties = [ wikidata_properties.INSTANCE_OF, wikidata_properties.SUBCLASS_OF, wikidata_properties.PART_OF, wikidata_properties.OCCUPATION, wikidata_properties.FIELD_OF_WORK, wikidata_properties.FIELD_OF_THIS_OCCUPATION, wikidata_properties.MEDICAL_SPECIALITY, wikidata_properties.GENRE, wikidata_properties.SEX_OR_GENDER, wikidata_properties.COUNTRY_OF_CITIZENSHIP, wikidata_properties.COUNTRY, wikidata_properties.CONTINENT, wikidata_properties.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY, wikidata_properties.SPORT, wikidata_properties.STUDIES, wikidata_properties.SERIES, wikidata_properties.USE, wikidata_properties.LOCATION, wikidata_properties.FACET_OF, wikidata_properties.IS_A_LIST_OF, wikidata_properties.COUNTRY_OF_ORIGIN, wikidata_properties.PRODUCT_OR_MATERIAL_PRODUCED, wikidata_properties.INDUSTRY, wikidata_properties.PARENT_TAXON, wikidata_properties.APPLIES_TO_TERRITORIAL_JURISDICTION, wikidata_properties.POSITION_HELD, wikidata_properties.CATEGORYS_MAIN_TOPIC, # temporal properties wikidata_properties.PUBLICATION_DATE, wikidata_properties.DATE_OF_BIRTH, wikidata_properties.DATE_OF_DEATH, wikidata_properties.INCEPTION, wikidata_properties.DISSOLVED_OR_ABOLISHED, wikidata_properties.POINT_IN_TIME, wikidata_properties.START_TIME, wikidata_properties.END_TIME ] wikidata_important_properties_fnames = [ (name, join(args.wikidata, "wikidata_%s.txt" % (name,)), name in wikidata_names2temporal_prop_names) for name in wikidata_important_properties ] missing_wikidata_important_properties_fnames = [ (name, outfile, is_temporal) for name, outfile, is_temporal in wikidata_important_properties_fnames if not true_exists(outfile) ] wikidata_ids_path = join(args.wikidata, WIKIDATA_IDS_NAME) wikititle2wikidata_path = join(args.wikidata, WIKITILE_2_WIKIDATA_TSV_NAME) work_to_be_done = ( not true_exists(wikidata_ids_path) or not true_exists(wikititle2wikidata_path) or len(missing_wikidata_important_properties_fnames) > 0 ) if work_to_be_done: get_wikidata_mapping( wikititle2wikidata_path, wikidata_ids_path, open_wikidata_file(args.wikidata_dump, args.batch_size), missing_wikidata_important_properties_fnames ) numpy_wikidata_important_properties_fnames = [ (name, outfile, is_temporal) for name, outfile, is_temporal in wikidata_important_properties_fnames if not values_exist(join(args.wikidata, "wikidata_%s" % (name,))) ] # obtain a mapping from id -> number if len(numpy_wikidata_important_properties_fnames) > 0: _, id2index = load_wikidata_ids(args.wikidata) # make relations numerical: for relname, outfile, is_temporal in numpy_wikidata_important_properties_fnames: with open(outfile, "rt") as fin: lines = fin.read().splitlines() fin_pbar = get_progress_bar("loading relation %r" % (relname,))(lines) if is_temporal: value = np.zeros(len(lines) * 2 + 1, dtype=np.int32) position = 1 seen = 0 for idx, line in enumerate(fin_pbar): for wikidata_id in line.split('\t'): if len(wikidata_id) > 0: value[position] = idx value[position + 1] = parse_year(wikidata_id) position += 2 seen += 1 break value[0] = len(lines) value = value[:position] np.save(join(args.wikidata, "wikidata_%s_values.sparse.npy" % (relname,)), value) else: relation = [ line2indices(id2index, line) for line in fin_pbar ] save_record_with_offset( join(args.wikidata, "wikidata_%s" % (relname,)), relation ) del id2index # convert the mapping from wikinames to integer values: trie_save_path = join(args.wikidata, WIKITILE_2_WIKIDATA_TRIE_NAME) if not true_exists(trie_save_path): print("loading wikipedia name -> wikidata") name2id = pandas.read_csv(wikititle2wikidata_path, sep="\t", encoding='utf-8') print("loaded") trie = marisa_trie.RecordTrie( 'i', get_progress_bar("convert to trie", max_value=name2id.shape[0])( (key, (value,)) for _, key, value in name2id.itertuples() ) ) trie.save(trie_save_path) build_fixed_point(args.wikidata, "enwiki") if __name__ == '__main__': main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="44"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import json import time import re import argparse from wikidata_linker_utils.wikipedia import iterate_articles from multiprocessing import Pool CATEGORY_PREFIXES = [ "Category:", "CatΓ©gorie:", "Categorie:", "CategorΓ­a:", "Categoria:", "Kategorie:", "Kategoria:", "ΠšΠ°Ρ‚Π΅Π³ΠΎΡ€ΠΈΡ:", "Kategori:" ] category_link_pattern = re.compile( r"\[\[((?:" + "|".join(CATEGORY_PREFIXES) + r")[^\]\[]*)\]\]" ) redirection_link_pattern = re.compile(r"(?:#REDIRECT|#weiterleitung|#REDIRECCIΓ“N|REDIRECIONAMENTO)\s*\[\[([^\]\[]*)\]\]", re.IGNORECASE) anchor_link_pattern = re.compile(r"\[\[([^\]\[:]*)\]\]") def category_link_job(args): """ Performing map-processing on different articles (in this case, just remove internal links) """ article_name, lines = args found_tags = [] for match in re.finditer(category_link_pattern, lines): match_string = match.group(1).strip() if "|" in match_string: link, _ = match_string.rsplit("|", 1) link = link.strip().split("#")[0] else: link = match_string if len(link) > 0: found_tags.append(link) return (article_name, found_tags) def redirection_link_job(args): """ Performing map-processing on different articles (in this case, just remove internal links) """ article_name, lines = args found_tags = [] for match in re.finditer(redirection_link_pattern, lines): if match is None: continue if match.group(1) is None: continue match_string = match.group(1).strip() if "|" in match_string: link, _ = match_string.rsplit("|", 1) link = link.strip().split("#")[0] else: link = match_string if len(link) > 0: found_tags.append(link) return (article_name, found_tags) def anchor_finding_job(args): """ Performing map-processing on different articles (in this case, just remove internal links) """ article_name, lines = args found_tags = [] for match in re.finditer(anchor_link_pattern, lines): match_string = match.group(1).strip() if "|" in match_string: link, anchor = match_string.rsplit("|", 1) link = link.strip().split("#")[0] anchor = anchor.strip() else: anchor = match_string link = match_string if len(anchor) > 0 and len(link) > 0: found_tags.append((anchor, link)) return (article_name, found_tags) def anchor_category_redirection_link_job(args): article_name, found_redirections = redirection_link_job(args) article_name, found_categories = category_link_job(args) article_name, found_anchors = anchor_finding_job(args) return (article_name, (found_anchors, found_redirections, found_categories)) def run_jobs(worker_pool, pool_jobs, outfile_anchors, outfile_redirections, outfile_category_links): results = worker_pool.map(anchor_category_redirection_link_job, pool_jobs) for article_name, result in results: anchor_links, redirect_links, category_links = result for link in redirect_links: outfile_redirections.write(article_name + "\t" + link + "\n") for link in category_links: outfile_category_links.write(article_name + "\t" + link + "\n") if ":" not in article_name: outfile_anchors.write(article_name + "\t" + article_name + "\t" + article_name + "\n") for anchor, link in anchor_links: outfile_anchors.write(article_name + "\t" + anchor + "\t" + link + "\n") def parse_wiki(path, anchors_path, redirections_path, category_links_path, threads=1, max_jobs=10): t0 = time.time() jobs = [] pool = Pool(processes=threads) try: with open(redirections_path, "wt") as fout_redirections, open(category_links_path, "wt") as fout_category_links, open(anchors_path, "wt") as fout_anchors: for article_name, lines in iterate_articles(path): jobs.append((article_name, lines)) if len(jobs) >= max_jobs: run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links) jobs = [] if len(jobs) > 0: run_jobs(pool, jobs, fout_anchors, fout_redirections, fout_category_links) jobs = [] finally: pool.close() t1 = time.time() print("%.3fs elapsed." % (t1 - t0,)) def parse_args(argv=None): parser = argparse.ArgumentParser() parser.add_argument("wiki", help="Wikipedia dump file (xml).") parser.add_argument("out_anchors", help="File where anchor information should be saved (tsv).") parser.add_argument("out_redirections", help="File where redirection information should be saved (tsv).") parser.add_argument("out_category_links", help="File where category link information should be saved (tsv).") def add_int_arg(name, default): parser.add_argument("--%s" % (name,), type=int, default=default) add_int_arg("threads", 8) add_int_arg("max_jobs", 10000) return parser.parse_args(argv) def main(argv=None): args = parse_args(argv) parse_wiki( path=args.wiki, anchors_path=args.out_anchors, redirections_path=args.out_redirections, category_links_path=args.out_category_links, threads=args.threads, max_jobs=args.max_jobs ) if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="45"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Create a tsv file where where the first column is a token and second column is the QID (wikidata internal id for entities). This can then be used by evaluate_learnability or from training a type model. Usage ----- ``` python3 produce_wikidata_tsv.py configs/en_export_config.json en_wikipedia.tsv ``` Use `--relative_to` argument to specify the base directory for relative paths in the config file. """ import argparse import re import json from os.path import join, dirname, realpath, exists import marisa_trie import ciseau import numpy as np from wikidata_linker_utils.wikipedia import ( iterate_articles, induce_wikipedia_prefix, load_redirections, transition_trie_index ) from wikidata_linker_utils.json import load_config from wikidata_linker_utils.offset_array import OffsetArray from wikidata_linker_utils.type_collection import TypeCollection from wikidata_linker_utils.anchor_filtering import acceptable_anchor, clean_up_trie_source from wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata SCRIPT_DIR = dirname(realpath(__file__)) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument("config") parser.add_argument("out") parser.add_argument("--relative_to", type=str, default=None) return parser.parse_args(args=args) link_pattern = re.compile(r"\[\[([^\]\[:]*)\]\]") ref_pattern = re.compile(r"<ref[^<>]*>[^<]+</ref>") double_bracket_pattern = re.compile(r"{{[^{}]+}}") title_pattern = re.compile(r"==+([^=]+)==+") bullet_point_pattern = re.compile(r"^([*#])", re.MULTILINE) def merge_tags(words, tags, start_sent): out = [(w, []) for w in words] for tag_start, tag_end, tag in tags: so_far = start_sent for k, word in enumerate(words): begins = tag_start <= so_far or (tag_start > so_far and tag_start < so_far + len(word)) ends = (so_far + len(word) <= tag_end) or (tag_end < so_far + len(word) and tag_end > so_far) if begins and ends: out[k][1].append(tag) so_far += len(word) if so_far >= tag_end: break return out def pick_relevant_tags(tagged_sequence, char_offset, char_offset_end): relevant_tags = [] for word, tags in tagged_sequence: if tags is not None: start, end, dest_index = tags if start >= char_offset and start < char_offset_end: relevant_tags.append((start, end, dest_index)) if start >= char_offset_end: break return relevant_tags def convert_document_to_labeled_tags(annotated, sentences): paragraphs = [] paragraph = [] char_offset = 0 for sentence in sentences: sentence_length = sum(len(w) for w in sentence) sentence_tags = pick_relevant_tags( annotated, char_offset, char_offset + sentence_length ) sentence_with_tags = merge_tags( sentence, sentence_tags, char_offset ) sentence_with_tags = [ ( w, [tags[0]] if len(tags) > 0 else [] ) for w, tags in sentence_with_tags ] if "\n" in sentence[-1]: paragraph.extend(sentence_with_tags) paragraphs.append(paragraph) paragraph = [] else: paragraph.extend(sentence_with_tags) char_offset += sentence_length if len(paragraph) > 0: paragraphs.append(paragraph) return paragraphs def annotate_document(doc, collection, wiki_trie, anchor_trie, trie_index2indices, trie_index2indices_counts, trie_index2indices_transitions, redirections, prefix): out = [] current_position = 0 current_position_no_brackets = 0 for match in re.finditer(link_pattern, doc): start = match.start() end = match.end() if current_position != start: out.append( (doc[current_position:start], None) ) current_position_no_brackets += start - current_position current_position = end match_string = match.group(1).strip() if "|" in match_string: link, anchor = match_string.rsplit("|", 1) link = link.strip().split("#")[0] anchor = anchor anchor_stripped = anchor.strip() else: anchor = match_string anchor_stripped = match_string.strip() link = anchor_stripped if len(anchor) > 0 and len(link) > 0: anchor = clean_up_trie_source(anchor, lowercase=False) lowercase_anchor = anchor.lower() if acceptable_anchor(lowercase_anchor, anchor_trie): anchor_idx = anchor_trie[lowercase_anchor] dest_index = match_wikipedia_to_wikidata(link, wiki_trie, redirections, prefix) if dest_index is not None: all_options = trie_index2indices[anchor_idx] if len(all_options) > 0: if trie_index2indices_transitions is not None: dest_index = transition_trie_index( anchor_idx, dest_index, trie_index2indices_transitions, all_options ) try: new_dest_index = dest_index keep = True if keep: out.append( ( anchor, ( current_position_no_brackets, current_position_no_brackets + len(anchor), collection.ids[new_dest_index] ) ) ) current_position_no_brackets += len(anchor) continue except IndexError: # missing element pass current_position_no_brackets += len(anchor) out.append( (anchor, None) ) if current_position != len(doc): out.append( (doc[current_position:len(doc)], None) ) return out def convert(article_name, doc, collection, wiki_trie, anchor_trie, trie_index2indices, trie_index2indices_counts, trie_index2indices_transitions, redirections, prefix): doc = doc.replace("\t", " ") # remove ref tags: doc = re.sub(ref_pattern, " ", doc) doc = re.sub(double_bracket_pattern, " ", doc) doc = re.sub(title_pattern, r"\n\n\1\. ", doc) doc = re.sub(bullet_point_pattern, r"\1 ", doc) article_index = match_wikipedia_to_wikidata( article_name, wiki_trie, redirections, prefix ) # find location of tagged items in wikipedia: annotated = annotate_document(doc, collection, wiki_trie, anchor_trie, trie_index2indices, trie_index2indices_counts, trie_index2indices_transitions, redirections, prefix) text_without_brackets = "".join(text for text, _ in annotated) sentences = ciseau.sent_tokenize( text_without_brackets, normalize_ascii=False, keep_whitespace=True ) return ( convert_document_to_labeled_tags( annotated, sentences ), collection.ids[article_index] if article_index is not None else "other" ) def main(): args = parse_args() config = load_config( args.config, ["wiki", "language_path", "wikidata", "redirections"], defaults={ "num_names_to_load": 0, "prefix": None, "sample_size": 100 }, relative_to=args.relative_to ) prefix = config.prefix or induce_wikipedia_prefix(config.wiki) collection = TypeCollection( config.wikidata, num_names_to_load=0 ) collection.load_blacklist(join(SCRIPT_DIR, "blacklist.json")) trie_index2indices = OffsetArray.load( join(config.language_path, "trie_index2indices"), compress=True ) trie_index2indices_counts = OffsetArray( np.load(join(config.language_path, "trie_index2indices_counts.npy")), trie_index2indices.offsets ) if exists(join(config.language_path, "trie_index2indices_transition_values.npy")): trie_index2indices_transitions = OffsetArray( np.load(join(config.language_path, "trie_index2indices_transition_values.npy")), np.load(join(config.language_path, "trie_index2indices_transition_offsets.npy")), ) else: trie_index2indices_transitions = None anchor_trie = marisa_trie.Trie().load(join(config.language_path, "trie.marisa")) wiki_trie = marisa_trie.RecordTrie('i').load( join(config.wikidata, "wikititle2wikidata.marisa") ) redirections = load_redirections(config.redirections) seen = 0 with open(args.out, "wt") as fout: try: for article_name, article in iterate_articles(config.wiki): fixed_article, article_qid = convert( article_name, article, collection=collection, anchor_trie=anchor_trie, wiki_trie=wiki_trie, trie_index2indices=trie_index2indices, trie_index2indices_counts=trie_index2indices_counts, trie_index2indices_transitions=trie_index2indices_transitions, redirections=redirections, prefix=prefix) for paragraph in fixed_article: for word, qids in paragraph: if len(qids) > 0: fout.write(word.rstrip() + "\t" + "\t".join(qids + [article_qid]) + "\n") else: fout.write(word.rstrip() + "\n") fout.write("\n") seen += 1 if seen >= config.sample_size: break finally: fout.flush() fout.close() if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="46"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Perform a reduction on the anchors to articles relation by finding different articles refering to the same item and making the anchor point to the most common version, or by using the wikidata graph to find instance of, and other parent-child relations that allow one article to encompass or be more generic than its co-triggerable articles. Usage: ------ ``` DATA_DIR=data/wikidata LANG_DIR=data/en_trie FIXED_LANG_DIR=data/en_trie_fixed python3 fast_link_fixer.py ${WIKIDATA_PATH} ${LANG_DIR} ${FIXED_LANG_DIR} ``` """ import argparse import time import shutil from os.path import join, realpath, dirname from os import makedirs import numpy as np import marisa_trie from wikidata_linker_utils.type_collection import get_name, TypeCollection from wikidata_linker_utils.logic import logical_and, logical_ands, logical_not, logical_or, logical_ors from wikidata_linker_utils.progressbar import get_progress_bar from wikidata_linker_utils.offset_array import OffsetArray from wikidata_linker_utils.file import true_exists import wikidata_linker_utils.wikidata_properties as wprop from wikidata_linker_utils.successor_mask import ( related_promote_highest, extend_relations, reduce_values, remap_offset_array ) SCRIPT_DIR = dirname(realpath(__file__)) from numpy import logical_not, logical_or, logical_and from wikidata_linker_utils.logic import logical_ors IS_HISTORY = None IS_PEOPLE = None IS_BREED = None IS_PEOPLE_GROUP = None IS_LIST_ARTICLE = None IS_LANGUAGE_ALPHABET = None IS_SPORTS_TEAM = None IS_CARDINAL_DIRECTION = None IS_POLITICAL_PARTY = None IS_SOCIETY = None IS_POSITION = None IS_CHARACTER_HUMAN = None IS_POLITICAL_ORGANIZATION = None IS_LANDFORM = None IS_THING = None IS_BATTLE = None IS_EVENT = None IS_ACTIVITY = None IS_THOROUGHFARE = None IS_KINSHIP = None IS_EPISODE_LIST = None def wkp(c, name): return c.article2id['enwiki/' + name][0][0] def wkd(c, name): return c.name2index[name] def initialize_globals(c): """global variables that guide the metonymy/anaphora removal process.""" global IS_HISTORY global IS_PEOPLE global IS_PEOPLE_GROUP global IS_LIST_ARTICLE global IS_COUNTRY global IS_BREED global IS_EVENT_SPORT global IS_LANGUAGE_ALPHABET global IS_SPORTS_TEAM global IS_CARDINAL_DIRECTION global IS_ACTIVITY global IS_POLITICAL_PARTY global IS_SOCIETY global IS_BATTLE global IS_POSITION global IS_LANDFORM global IS_CHARACTER_HUMAN global IS_POLITICAL_ORGANIZATION global IS_THING global IS_THOROUGHFARE global IS_EVENT global IS_KINSHIP global IS_EPISODE_LIST PEOPLE = wkd(c, "Q2472587") NATIONALITY = wkd(c, "Q231002") ASPECT_OF_HIST = wkd(c, "Q17524420") HISTORY = wkd(c, "Q309") LIST_ARTICLE = wkd(c, "Q13406463") WAR = wkd(c, "Q198") COUNTRY = wkd(c, "Q6256") FORMER_COUNTRY = wkd(c, "Q3024240") DOMINION = wkd(c, "Q223832") LANGUAGE = wkd(c, "Q34770") ALPHABET = wkd(c, "Q9779") COLONY = wkd(c, "Q133156") GOVERNORATE = wkd(c, "Q1798622") SPORTS_TEAM = wkd(c, "Q12973014") ATHLETIC_CONFERENCE = wkd(c, "Q2992826") CARDINAL_DIRECTION = wkd(c, "Q23718") POLITICAL_PARTY = wkd(c, "Q7278") STATE = wkd(c, "Q7275") DYNASTY = wkd(c, "Q164950") SOCIETY = wkd(c, "Q8425") MENS_SINGLES = wkd(c, "Q16893072") SPORT = wkd(c, "Q349") POSITION = wkd(c, "Q4164871") HUMAN = wkd(c, "Q5") FICTIONAL_CHARACTER = wkd(c, "Q95074") BREED = wkd(c, "Q38829") ORTHOGRAPHY = wkd(c, "Q43091") POLITICAL_ORGANIZATION = wkd(c, "Q7210356") GROUP_OF_HUMANS = wkd(c, "Q16334295") LANDFORM = wkd(c, "Q271669") BATTLE = wkd(c, "Q178561") FOOD = wkd(c, "Q2095") DRINK = wkd(c, "Q40050") ANIMAL = wkd(c, "Q16521") WORK = wkd(c, "Q386724") AUTOMOBILE_MODEL = wkd(c, "Q3231690") GOOD = wkd(c, "Q28877") VEHICLE = wkd(c, "Q42889") PUBLICATION = wkd(c, "Q732577") AUDIOVISUAL = wkd(c, "Q2431196") TERRITORIAL_ENTITY = wkd(c, "Q15642541") GEOGRAPHIC_OBJECT = wkd(c, "Q618123") ASTRO_OBJECT = wkd(c, "Q17444909") EVENT_SPORTING = wkd(c, "Q1656682") EVENT_OCCURRENCE = wkd(c, "Q1190554") ELECTROMAGNETIC_SPECTRUM = wkd(c, "Q133139") MAGICAL_ORG = wkd(c, "Q14946195") AUTONOM_CHURCH = wkd(c, "Q20871948") SIGN = wkd(c, "Q3695082") FORM_OF_GOVERNMENT = wkd(c, "Q1307214") SPORTS_ORG = wkd(c, "Q4438121") RECURRING_SPORTING_EVENT = wkd(c, "Q18608583") CLASS_SCHEME = wkd(c, "Q5962346") STYLE = wkd(c, "Q1292119") SIGN_SYSTEM = wkd(c, "Q7512598") PHYSICAL_PHENOMENON = wkd(c, "Q1293220") LAW = wkd(c, "Q7748") WATERCOURSE = wkd(c, "Q355304") BODY_OF_WATER = wkd(c, "Q15324") CHEMICAL_SUBSTANCE = wkd(c, "Q79529") HISTORICAL_PERIOD = wkd(c, "Q11514315") ACTIVITY = wkd(c, "Q815962") THOROUGHFARE = wkd(c, "Q83620") KINSHIP = wkd(c, "Q171318") FICTIONAL_HUMAN = wkd(c, "Q15632617") EPISODE = wkd(c, "Q1983062") IS_CHARACTER_HUMAN = c.satisfy( [wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF], [HUMAN, FICTIONAL_HUMAN, FICTIONAL_CHARACTER] ) # to be a history you must be an aspect of history # but not a history itself: IS_HISTORY = logical_and( c.satisfy([wprop.INSTANCE_OF], [ASPECT_OF_HIST]), logical_not(c.satisfy([wprop.INSTANCE_OF], [HISTORY])) ) IS_PEOPLE = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [PEOPLE, NATIONALITY]) IS_PEOPLE_GROUP = np.logical_or( IS_PEOPLE, c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [GROUP_OF_HUMANS, MAGICAL_ORG, AUTONOM_CHURCH]) ) IS_LIST_ARTICLE = c.satisfy([wprop.INSTANCE_OF], [LIST_ARTICLE]) IS_LANGUAGE_ALPHABET = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [LANGUAGE, ALPHABET, ORTHOGRAPHY, SIGN_SYSTEM] ) IS_COUNTRY = c.satisfy([wprop.INSTANCE_OF], [COUNTRY, FORMER_COUNTRY, DOMINION, COLONY, STATE, DYNASTY, GOVERNORATE]) IS_SPORTS_TEAM = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.PART_OF], [SPORTS_TEAM, ATHLETIC_CONFERENCE, SPORTS_ORG, RECURRING_SPORTING_EVENT]) IS_CARDINAL_DIRECTION = c.satisfy([wprop.INSTANCE_OF], [CARDINAL_DIRECTION]) IS_POLITICAL_PARTY = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POLITICAL_PARTY]) IS_SOCIETY = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [SOCIETY, HISTORICAL_PERIOD]) IS_POSITION = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POSITION]) IS_BREED = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [BREED]) IS_POLITICAL_ORGANIZATION = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POLITICAL_ORGANIZATION, FORM_OF_GOVERNMENT]) IS_LANDFORM = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [LANDFORM, TERRITORIAL_ENTITY, GEOGRAPHIC_OBJECT, ASTRO_OBJECT, WATERCOURSE, BODY_OF_WATER]) IS_EVENT_SPORT = c.satisfy([wprop.SUBCLASS_OF, wprop.PART_OF, wprop.INSTANCE_OF], [EVENT_SPORTING, SPORT]) IS_THING = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [ AUTOMOBILE_MODEL, FOOD, DRINK, STYLE, ANIMAL, GOOD, LAW, CHEMICAL_SUBSTANCE, SIGN, VEHICLE, PHYSICAL_PHENOMENON, PUBLICATION, AUDIOVISUAL, CLASS_SCHEME, WORK, ELECTROMAGNETIC_SPECTRUM ] ) IS_THOROUGHFARE = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [THOROUGHFARE]) IS_ACTIVITY = c.satisfy([wprop.INSTANCE_OF], [ACTIVITY]) IS_EVENT = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [EVENT_OCCURRENCE]) IS_BATTLE = c.satisfy([wprop.SUBCLASS_OF, wprop.INSTANCE_OF], [BATTLE]) IS_KINSHIP = c.satisfy([wprop.INSTANCE_OF], [KINSHIP]) IS_EPISODE_LIST = c.satisfy([wprop.IS_A_LIST_OF], [EPISODE]) def get_relation_data(collection, relation_paths): """Prepare relations for usage inside extend_relations.""" out = [] for path in relation_paths: promote = path.get("promote", False) numpy_path = [] for step in path["steps"]: if isinstance(step, str): step_name, max_usage = step, 1 else: step_name, max_usage = step relation = collection.relation(step_name) numpy_path.append((relation.offsets, relation.values, max_usage)) inv_relation = collection.get_inverted_relation(step_name).edges() > 0 out.append((numpy_path, inv_relation, promote)) return out def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("wikidata") parser.add_argument("language_path") parser.add_argument("new_language_path") parser.add_argument("--steps", type=int, default=3, help="how many time should fixing be recursed (takes " "about 2mn per step. Has diminishing returns).") return parser.parse_args() def get_trie_properties(trie, offsets, values): """Obtain the length of every trigger in the trie.""" anchor_length = np.zeros(len(values), dtype=np.int32) start, end = 0, 0 for idx, key in enumerate(trie.iterkeys()): end = offsets[idx] anchor_length[start:end] = len(key) start = end return anchor_length def fix(collection, offsets, values, counts, anchor_length, num_category_link=8, keep_min=5): relations_that_can_extend = [ {"steps": [wprop.INSTANCE_OF]}, {"steps": [wprop.INSTANCE_OF, (wprop.SUBCLASS_OF, 2)]}, {"steps": [wprop.INSTANCE_OF, wprop.FACET_OF]}, {"steps": [(wprop.SUBCLASS_OF, 3)]}, {"steps": [wprop.OCCUPATION], "promote": True}, {"steps": [wprop.POSITION_HELD], "promote": True}, {"steps": [wprop.PART_OF, wprop.INSTANCE_OF]}, {"steps": [wprop.SERIES, wprop.INSTANCE_OF]}, {"steps": [wprop.SERIES, wprop.LOCATION]}, {"steps": [wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY]}, {"steps": [wprop.COUNTRY]}, {"steps": [wprop.CATEGORY_LINK, wprop.CATEGORYS_MAIN_TOPIC]}, {"steps": [(wprop.CATEGORY_LINK, num_category_link), wprop.FIXED_POINTS]}, {"steps": [wprop.CATEGORY_LINK, wprop.FIXED_POINTS, wprop.IS_A_LIST_OF]}, {"steps": [wprop.IS_A_LIST_OF, (wprop.SUBCLASS_OF, 2)]} ] relation_data = get_relation_data(collection, relations_that_can_extend) new_values = values # get rid of History of BLAH where link also points to BLAH: is_history = IS_HISTORY[new_values] is_people_mask = IS_PEOPLE[new_values] is_list = IS_LIST_ARTICLE[new_values] new_values = related_promote_highest( new_values, offsets, counts, condition=is_history, alternative=is_people_mask, keep_min=keep_min ) unchanged = values == new_values is_not_history_or_list = logical_and( logical_not(is_history), logical_not(is_list) ) new_values = related_promote_highest( new_values, offsets, counts, condition=logical_and(is_history, unchanged), alternative=is_not_history_or_list, keep_min=keep_min ) is_sport_or_thoroughfare = logical_or( IS_EVENT_SPORT, IS_THOROUGHFARE )[new_values] # delete these references: new_values[anchor_length < 2] = -1 # get rid of shorthand for sports: new_values[logical_and(is_sport_or_thoroughfare, anchor_length <= 2)] = -1 # remove lists of episodes: is_episode_list = IS_EPISODE_LIST[new_values] new_values[is_episode_list] = -1 # get rid of "car" -> "Renault Megane", when "car" -> "Car", # and "Renault Megane" is instance of "Car": is_not_people = logical_not(IS_PEOPLE)[new_values] new_values = extend_relations( relation_data, new_values, offsets, counts, alternative=is_not_people, pbar=get_progress_bar("extend_relations", max_value=len(offsets), item="links"), keep_min=keep_min ) unchanged = values == new_values # remove all non-modified values that are # not instances of anything, nor subclasses of anything: new_values[logical_ands( [ logical_ands([ collection.relation(wprop.INSTANCE_OF).edges() == 0, collection.relation(wprop.SUBCLASS_OF).edges() == 0, collection.relation(wprop.PART_OF).edges() == 0, collection.relation(wprop.CATEGORY_LINK).edges() == 0 ])[new_values], unchanged ])] = -1 is_kinship = IS_KINSHIP[new_values] is_human = IS_CHARACTER_HUMAN[new_values] new_values = related_promote_highest( new_values, offsets, counts, condition=is_human, alternative=is_kinship, keep_min=keep_min ) # replace elements by a country # if a better alternative is present, # counts is less than 100: should_replace_by_country = logical_ands( [ logical_not( logical_ors([ IS_POLITICAL_ORGANIZATION, IS_CARDINAL_DIRECTION, IS_LANGUAGE_ALPHABET, IS_COUNTRY, IS_PEOPLE_GROUP, IS_BREED, IS_BATTLE, IS_SOCIETY, IS_POSITION, IS_POLITICAL_PARTY, IS_SPORTS_TEAM, IS_CHARACTER_HUMAN, IS_LANDFORM, IS_ACTIVITY ]) )[new_values], counts < 100 ] ) # turn this into a promote highest in this order: is_country_or_cardinal = [ IS_CARDINAL_DIRECTION, IS_COUNTRY, IS_POLITICAL_ORGANIZATION ] for i, alternative in enumerate(is_country_or_cardinal): unchanged = values == new_values should_replace_by_country = logical_and( should_replace_by_country, unchanged ) new_values = related_promote_highest( new_values, offsets, counts, condition=should_replace_by_country, alternative=alternative[new_values], keep_min=keep_min ) new_offsets, new_values, new_counts, location_shift = reduce_values( offsets, new_values, counts) return (new_offsets, new_values, new_counts), location_shift def filter_trie(trie, values): return marisa_trie.Trie((trie.restore_key(value) for value in values)) def remap_trie_offset_array(old_trie, new_trie, offsets_values_counts): mapping = np.zeros(len(new_trie), dtype=np.int32) t0 = time.time() for new_index in range(len(new_trie)): mapping[new_index] = old_trie[new_trie.restore_key(new_index)] t1 = time.time() print("Got mapping from old trie to new trie in %.3fs" % (t1 - t0,)) ported = [] for offsets, values, counts in offsets_values_counts: new_offsets, new_values, new_counts = remap_offset_array( mapping, offsets, values, counts ) ported.append((new_offsets, new_values, new_counts)) t2 = time.time() print("Ported counts and values across tries in %.3fs" % (t2 - t1,)) return ported def main(): args = parse_args() if args.new_language_path == args.language_path: raise ValueError("new_language_path and language_path must be " "different: cannot generate a fixed trie in " "the same directory as the original trie.") c = TypeCollection(args.wikidata, num_names_to_load=0) c.load_blacklist(join(SCRIPT_DIR, "blacklist.json")) original_values = np.load( join(args.language_path, "trie_index2indices_values.npy")) original_offsets = np.load( join(args.language_path, "trie_index2indices_offsets.npy")) original_counts = np.load( join(args.language_path, "trie_index2indices_counts.npy")) original_trie_path = join(args.language_path, 'trie.marisa') trie = marisa_trie.Trie().load(original_trie_path) initialize_globals(c) t0 = time.time() old_location_shift = None values, offsets, counts = original_values, original_offsets, original_counts for step in range(args.steps): anchor_length = get_trie_properties(trie, offsets, values) (offsets, values, counts), location_shift = fix( collection=c, offsets=offsets, values=values, counts=counts, anchor_length=anchor_length, num_category_link=8 ) if old_location_shift is not None: # see where newly shifted values are now pointing # to (extra indirection level): location_shift = location_shift[old_location_shift] location_shift[old_location_shift == -1] = -1 old_location_shift = location_shift pre_reduced_values = values[location_shift] pre_reduced_values[location_shift == -1] = -1 num_changes = int((pre_reduced_values != original_values).sum()) change_volume = int((original_counts[pre_reduced_values != original_values].sum())) print("step %d with %d changes, %d total links" % ( step, num_changes, change_volume) ) pre_reduced_values = values[location_shift] pre_reduced_values[location_shift == -1] = -1 t1 = time.time() num_changes = int((pre_reduced_values != original_values).sum()) print("Done with link fixing in %.3fs, with %d changes." % ( t1 - t0, num_changes) ) # show some remappings: np.random.seed(1234) num_samples = 10 samples = np.random.choice( np.where( np.logical_and( np.logical_and( pre_reduced_values != original_values, pre_reduced_values != -1 ), original_values != -1 ) )[0], size=num_samples, replace=False ) print("Sample fixes:") for index in samples: print(" %r (%d) -> %r (%d)" % ( c.get_name(int(original_values[index])), int(original_values[index]), c.get_name(int(pre_reduced_values[index])), int(pre_reduced_values[index]) ) ) print("") samples = np.random.choice( np.where( OffsetArray(values, offsets).edges() == 0 )[0], size=num_samples, replace=False ) print("Sample deletions:") for index in samples: print(" %r" % (trie.restore_key(int(index)))) # prune out anchors where there are no more linked items: print("Removing empty anchors from trie...") t0 = time.time() non_empty_offsets = np.where( OffsetArray(values, offsets).edges() != 0 )[0] fixed_trie = filter_trie(trie, non_empty_offsets) contexts_found = true_exists( join(args.language_path, "trie_index2contexts_values.npy") ) if contexts_found: contexts_values = np.load( join(args.language_path, "trie_index2contexts_values.npy")) contexts_offsets = np.load( join(args.language_path, "trie_index2contexts_offsets.npy")) contexts_counts = np.load( join(args.language_path, "trie_index2contexts_counts.npy")) to_port = [ (offsets, values, counts), (original_offsets, pre_reduced_values, original_values) ] if contexts_found: to_port.append( (contexts_offsets, contexts_values, contexts_counts) ) ported = remap_trie_offset_array(trie, fixed_trie, to_port) offsets, values, counts = ported[0] original_offsets, pre_reduced_values, original_values = ported[1] t1 = time.time() print("Removed %d empty anchors from trie in %.3fs" % ( len(trie) - len(fixed_trie), t1 - t0,) ) print("Saving...") makedirs(args.new_language_path, exist_ok=True) np.save(join(args.new_language_path, "trie_index2indices_values.npy"), values) np.save(join(args.new_language_path, "trie_index2indices_offsets.npy"), offsets) np.save(join(args.new_language_path, "trie_index2indices_counts.npy"), counts) if contexts_found: contexts_offsets, contexts_values, contexts_counts = ported[2] np.save(join(args.new_language_path, "trie_index2contexts_values.npy"), contexts_values) np.save(join(args.new_language_path, "trie_index2contexts_offsets.npy"), contexts_offsets) np.save(join(args.new_language_path, "trie_index2contexts_counts.npy"), contexts_counts) new_trie_path = join(args.new_language_path, 'trie.marisa') fixed_trie.save(new_trie_path) transition = np.vstack([original_values, pre_reduced_values]).T np.save(join(args.new_language_path, "trie_index2indices_transition_values.npy"), transition) np.save(join(args.new_language_path, "trie_index2indices_transition_offsets.npy"), original_offsets) print("Done.") if __name__ == "__main__": main() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="47"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Obtain a coarse-grained classification of places and entities according to their associated continent/country. """ from numpy import ( logical_and, logical_or, logical_not, logical_xor, where ) from wikidata_linker_utils.logic import logical_negate import wikidata_linker_utils.wikidata_properties as wprop def wkp(c, name): """Convert a string wikipedia article name to its Wikidata index.""" return c.article2id["enwiki/" + name][0][0] def wkd(c, name): """Convert a wikidata QID to its wikidata index.""" return c.name2index[name] def classify(c): EUROPE = wkp(c, 'Europe') AFRICA = wkp(c, 'Africa') ASIA = wkp(c, 'Asia') NORTH_AMERICA = wkp(c, 'North America') SOUTH_AMERICA = wkp(c, 'South America') OCEANIA = wkp(c, 'Oceania') ANTARCTICA = wkp(c, 'Antarctica') CONTINENT = wkp(c, wprop.CONTINENT) OUTERSPACE = wkp(c, 'Astronomical object') EARTH = wkp(c, "Earth") GEOGRAPHIC_LOCATION = wkd(c, "Q2221906") POPULATED_PLACE = wkd(c, 'Q486972') MIDDLE_EAST = [ wkp(c, "Bahrain"), wkp(c, "Cyprus"), wkp(c, "Turkish"), wkp(c, "Egypt"), wkp(c, "Iran"), wkp(c, "Iraq"), wkp(c, "Kurdish"), wkp(c, "Israel"), wkp(c, "Arabic"), wkp(c, "Jordan"), wkp(c, "Kuwait"), wkp(c, "Lebanon"), wkp(c, "Oman"), wkp(c, "Palestine"), wkp(c, "Jordanian"), wkp(c, "Qatar"), wkp(c, "Saudi Arabia"), wkp(c, "Syria"), wkp(c, "Turkey"), wkp(c, "United Arab Emirates"), wkp(c, "Yemen") ] TRAVERSIBLE = [ wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.CONTINENT, wprop.PART_OF, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY ] # c.describe_connection("Q55", "North America", TRAVERSIBLE) # return {} print("is_in_middle_east") is_in_middle_east = c.satisfy(TRAVERSIBLE, MIDDLE_EAST) print("is_in_europe") is_in_europe = c.satisfy(TRAVERSIBLE, [EUROPE]) is_in_europe_only = logical_negate(is_in_europe, [is_in_middle_east]) print("is_in_asia") is_in_asia = c.satisfy(TRAVERSIBLE, [ASIA]) is_in_asia_only = logical_negate(is_in_asia, [is_in_europe, is_in_middle_east]) print("is_in_africa") is_in_africa = c.satisfy(TRAVERSIBLE, [AFRICA]) is_in_africa_only = logical_negate(is_in_africa, [is_in_europe, is_in_asia, is_in_middle_east]) print("is_in_north_america") is_in_north_america = c.satisfy(TRAVERSIBLE, [NORTH_AMERICA]) is_in_north_america_only = logical_negate(is_in_north_america, [is_in_europe, is_in_asia, is_in_middle_east]) print("is_in_south_america") is_in_south_america = c.satisfy(TRAVERSIBLE, [SOUTH_AMERICA]) print("is_in_antarctica") is_in_antarctica = c.satisfy(TRAVERSIBLE, [ANTARCTICA]) is_in_antarctica_only = logical_negate(is_in_antarctica, [is_in_europe, is_in_north_america, is_in_asia, is_in_middle_east]) print("is_in_oceania") is_in_oceania = c.satisfy(TRAVERSIBLE, [OCEANIA]) is_in_oceania_only = logical_negate(is_in_oceania, [is_in_europe, is_in_north_america, is_in_asia, is_in_middle_east]) print("is_in_outer_space") is_in_outer_space = c.satisfy(TRAVERSIBLE, [OUTERSPACE]) print("part_of_earth") part_of_earth = c.satisfy( [wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF], [GEOGRAPHIC_LOCATION, EARTH] ) print("is_in_outer_space_not_earth") is_in_outer_space_not_earth = logical_negate( is_in_outer_space, [part_of_earth] ) print("is_a_populated_place") is_populated_place = c.satisfy([wprop.INSTANCE_OF, wprop.SUBCLASS_OF], [POPULATED_PLACE]) is_unlocalized_populated_place = logical_negate( is_populated_place, [is_in_europe, is_in_asia, is_in_antarctica, is_in_oceania, is_in_outer_space, is_in_south_america, is_in_north_america]) return { "europe": is_in_europe_only, "asia": is_in_asia_only, "africa": is_in_africa_only, "middle_east": is_in_middle_east, "north_america": is_in_north_america_only, "south_america": is_in_south_america, "antarctica": is_in_antarctica_only, "oceania": is_in_oceania_only, "outer_space": is_in_outer_space_not_earth, # "populated_space": is_populated_place, "populated_place_unlocalized": is_unlocalized_populated_place } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="48"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Obtain a finer-grained classification of places and entities according to their associated country/region. """ from numpy import ( logical_and, logical_or, logical_not, logical_xor, where ) from wikidata_linker_utils.logic import logical_negate, logical_ors import wikidata_linker_utils.wikidata_properties as wprop def wkp(c, name): """Convert a string wikipedia article name to its Wikidata index.""" return c.article2id["enwiki/" + name][0][0] def wkd(c, name): """Convert a wikidata QID to its wikidata index.""" return c.name2index[name] def classify(c): TRAVERSIBLE_BASIC = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF] TRAVERSIBLE_COUNTRY = [ wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.LOCATION, wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY ] TRAVERSIBLE_PART_OF = [ wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.CONTINENT, wprop.PART_OF, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.LOCATED_IN_THE_ADMINISTRATIVE_TERRITORIAL_ENTITY ] TRAVERSIBLE_TOPIC = [ wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.STUDIES, wprop.FIELD_OF_THIS_OCCUPATION, wprop.OCCUPATION, wprop.FIELD_OF_WORK, wprop.INDUSTRY] ASSOCIATION_FOOTBALL_PLAYER = wkd(c,"Q937857") PAINTER = wkd(c,"Q1028181") POLITICIAN = wkd(c,"Q82955") ARTICLE = wkd(c,"Q191067") VIDEO_GAME = wkd(c,"Q7889") FILM = wkd(c,"Q11424") FICTIONAL_CHARACTER = wkd(c,"Q95074") POEM = wkd(c,"Q482") BOOK = wkd(c,"Q571") DISEASE = wkd(c,"Q12136") PAINTING = wkd(c,"Q3305213") VISUAL_ART_WORK = wkd(c,"Q4502142") MUSIC_WORK = wkd(c,"Q2188189") SCIENTIFIC_ARTICLE = wkd(c,"Q13442814") PROTEIN_FAMILY = wkd(c,"Q417841") PROTEIN_COMPLEX = wkd(c,"Q420927") GENE = wkd(c,"Q7187") CHEMICAL_SUBSTANCE = wkd(c,"Q79529") PROTEIN = wkd(c,"Q8054") TAXON = wkd(c,"Q16521") PHYSICAL_OBJECT = wkd(c,"Q223557") OUTERSPACE = wkp(c, 'Astronomical object') #INTERNATIONAL_ORGANISATION = wkd(c,"") HUMAN = wkp(c,"Human") HUMAN_SETTLMENT = wkd(c,"Q486972") DICTIONARY = wkd(c,"Q23622") ABRREVIATION = wkd(c,"Q102786") POPULATED_PLACE = wkd(c,"Q486972") TERRITORIAL_ENTITY = wkd(c, "Q1496967") DESA = wkd(c,"Q26211545") TOWN_IN_CHINA = wkd(c,"Q735428") ADMIN_DIVISION_CHINA = wkd(c,"Q50231") COUNTRY = wkd(c,"Q6256") MOUNTAIN_RANGE = wkd(c,"Q46831") EARTH = wkp(c, "Earth") GEOGRAPHIC_LOCATION = wkd(c, "Q2221906") is_politician = c.satisfy([wprop.OCCUPATION], [POLITICIAN]) is_painter = c.satisfy([wprop.OCCUPATION], [PAINTER]) is_association_football_player = c.satisfy([wprop.OCCUPATION],[ASSOCIATION_FOOTBALL_PLAYER]) is_populated_place = c.satisfy( [wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF], [GEOGRAPHIC_LOCATION, EARTH, HUMAN_SETTLMENT]) is_taxon = c.satisfy( [wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF], [TAXON]) is_other_wkd= c.satisfy( [wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF], [GENE, CHEMICAL_SUBSTANCE, SCIENTIFIC_ARTICLE, PROTEIN, DISEASE, PROTEIN_FAMILY,PROTEIN_COMPLEX, BOOK, MUSIC_WORK, PAINTING, VISUAL_ART_WORK, POEM, FILM, FICTIONAL_CHARACTER,VIDEO_GAME,SCIENTIFIC_ARTICLE,ARTICLE]) is_gene_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Genes")], max_steps=5) is_chromosome_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Chromosomes")], max_steps=5) is_protein_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Proteins")], max_steps=5) is_other= logical_ors([is_other_wkd, is_gene_wkp, is_chromosome_wkp, is_protein_wkp ]) print("WIKI Links") WIKIPEDIA_DISAMBIGUATION_PAGE = wkd(c,"Q4167410") SCIENTIFIC_JOURNAL = wkd(c,"Q5633421") SURNAME = wkd(c,"Q101352") WIKI_NEWS_ARTICLE = wkd(c,"Q17633526") WIKIMEDIA_CATEGORY = wkd(c,"Q4167836") WIKIPEDIA_TEMPLATE_NAMESPACE = wkd(c,"Q11266439") WIKIPEDIA_LIST = wkd(c,"Q13406463") ENCYCLOPEDIA_ARTICLE = wkd(c,"Q17329259") WIKIMEDIA_PROJECT_PAGE = wkd(c,"Q14204246") RURAL_COMUNE_VIETNAM = wkd(c,"Q2389082") TERRITORIAL_ENTITY = wkd(c,"Q1496967") is_Wiki_Links = c.satisfy(TRAVERSIBLE_TOPIC, [WIKIPEDIA_DISAMBIGUATION_PAGE, SURNAME, WIKIMEDIA_CATEGORY, WIKIPEDIA_TEMPLATE_NAMESPACE, WIKIPEDIA_LIST, ENCYCLOPEDIA_ARTICLE, WIKIMEDIA_PROJECT_PAGE, WIKI_NEWS_ARTICLE ]) print("is_in_outer_space") is_in_outer_space = c.satisfy(TRAVERSIBLE_PART_OF, [OUTERSPACE]) print("part_of_earth") part_of_earth = c.satisfy( [wprop.INSTANCE_OF, wprop.PART_OF, wprop.CONTINENT, wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY, wprop.SUBCLASS_OF, wprop.LOCATION], [GEOGRAPHIC_LOCATION, EARTH]) print("is_in_outer_space_not_earth") is_in_outer_space_not_earth = logical_negate( is_in_outer_space, [part_of_earth]) print("African countries") ALGERIA = wkp(c,"Algeria") ANGOLA = wkp(c,"Angola") BENIN = wkp(c,"Benin") BOTSWANA = wkd(c,"Q963") BURKINA_FASO = wkd(c,"Q965") BURUNDI = wkd(c,"Q967") CAMEROON = wkd(c,"Q1009") CAPE_VERDE = wkd(c,"Q1011") CHAD = wkd(c,"Q657") CENTRAL_AFRICAN_REPUBLIC = wkd(c,"Q929") COMOROS = wkd(c,"Q970") DEMOCRATIC_REPUBLIC_OF_CONGO = wkd(c,"Q974") REPUBLIC_OF_CONGO = wkd(c,"Q971") DJIBOUTI = wkd(c,"Q977") EGYPT = wkd(c,"Q79") RASHIDUN_CALIPHATE = wkd(c,"Q12490507") EQUATORIAL_GUINEA = wkd(c,"Q983") ERITREA = wkd(c,"Q986") ETHIOPIA = wkd(c,"Q115") GABON = wkd(c,"Q1000") THE_GAMBIA = wkd(c,"Q1005") GHANA = wkd(c,"Q117") GUINEA = wkd(c,"Q1006") GUINEA_BISSAU = wkd(c,"Q1007") IVORY_COAST = wkd(c,"Q1008") KENYA = wkd(c,"Q114") LESOTHO = wkd(c,"Q1013") LIBERIA = wkd(c,"Q1014") LIBYA = wkd(c,"Q1016") MADAGASCAR = wkd(c,"Q1019") MALAWI = wkd(c,"Q1020") MALI = wkd(c,"Q912") MAURITANIA = wkd(c,"Q1025") MAURITIUS = wkd(c,"Q1027") MOROCCO = wkd(c,"Q1028") MOZAMBIQUE = wkd(c,"Q1029") NAMIBIA = wkd(c,"Q1030") NIGER = wkd(c,"Q1032") NIGERIA = wkd(c,"Q1033") RWANDA = wkd(c,"Q1037") SAHARI_ARAB_DEOMOCRATIC_REPUBLIC = wkd(c,"Q40362") SAO_TOME_AND_PRINCIPE= wkd(c,"Q1039") SENEGAL = wkd(c,"Q1041") SEYCHELLES = wkd(c,"Q1042") SIERRA_LEONE = wkd(c,"Q1044") SOMALIA = wkd(c,"Q1045") SOUTH_AFRICA = wkd(c,"Q258") SOUTHSUDAN = wkd(c,"Q958") SUDAN = wkd(c,"Q1049") SWAZILAND= wkd(c,"Q1050") TANZANIA = wkd(c,"Q924") TOGO = wkd(c,"Q945") TUNISIA= wkd(c,"Q948") UGANDA = wkd(c,"Q1036") WESTERN_SAHARA = wkd(c,"Q6250") ZAMBIA = wkd(c,"Q953") ZIMBABWE = wkd(c,"Q954") SOMALI_LAND = wkd(c,"Q34754") in_algeria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ALGERIA]) in_algeria_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Algeria stubs")], max_steps=4) in_algeria_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Algeria")], max_steps=3) in_algeria_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Algeria")], max_steps=3) in_algeria = logical_ors([in_algeria_wkd, in_algeria_stubs, in_algeria_politics, in_algeria_roads]) in_angola_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ANGOLA]) in_angola_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Angola stubs")], max_steps=4) in_angola_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Angola")], max_steps=3) in_angola_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Angola")], max_steps=3) in_angola = logical_ors([in_angola_wkd , in_angola_stubs, in_angola_politics, in_angola_roads]) in_benin_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BENIN]) in_benin_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Benin stubs")], max_steps=4) in_benin_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Benin")], max_steps=3) in_benin_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Benin")], max_steps=3) in_benin = logical_ors([in_benin_wkd, in_benin_stubs, in_benin_politics, in_benin_roads]) in_botswana_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BOTSWANA]) in_botswana_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Botswana stubs")], max_steps=4) in_botswana_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Botswana")], max_steps=3) in_botswana_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in Botswana")], max_steps=3) in_botswana = logical_ors([in_botswana_wkd, in_botswana_stubs, in_botswana_politics,in_botswana_roads]) in_burkina_faso_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BURKINA_FASO]) in_bburkina_faso_stubs = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Burkina Faso stubs")], max_steps=4) in_bburkina_faso_politics = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Botswana")], max_steps=3) in_burkina_faso = logical_ors([in_burkina_faso_wkd , in_botswana_stubs, in_botswana_politics]) in_burundi_politics_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Politics of Burkina Faso")], max_steps=4) in_burundi_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [BURUNDI]) in_burundi = logical_ors([in_burundi_wkd,in_burundi_politics_wkp]) in_cameroon = c.satisfy(TRAVERSIBLE_COUNTRY, [CAMEROON]) in_cape_verde= c.satisfy(TRAVERSIBLE_COUNTRY, [CAPE_VERDE]) in_chad = c.satisfy(TRAVERSIBLE_COUNTRY, [CHAD]) in_central_african_republic = c.satisfy(TRAVERSIBLE_COUNTRY, [CENTRAL_AFRICAN_REPUBLIC]) in_comoros = c.satisfy(TRAVERSIBLE_COUNTRY, [COMOROS]) in_democratic_republic_congo = c.satisfy(TRAVERSIBLE_COUNTRY, [DEMOCRATIC_REPUBLIC_OF_CONGO]) in_republic_of_congo = c.satisfy(TRAVERSIBLE_COUNTRY, [REPUBLIC_OF_CONGO]) in_djibouti = c.satisfy(TRAVERSIBLE_COUNTRY, [DJIBOUTI]) in_egypt_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [EGYPT]) in_ancient_egypt = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Egypt")], max_steps=6) in_Rashidun_Caliphate = c.satisfy(TRAVERSIBLE_COUNTRY, [RASHIDUN_CALIPHATE]) egyptian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Egyptian people")], max_steps=6) in_egypt = logical_ors([in_egypt_wkd, in_egypt_wkd,in_Rashidun_Caliphate, egyptian_people]) in_equatorial_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [EQUATORIAL_GUINEA]) in_eritrea = c.satisfy(TRAVERSIBLE_COUNTRY, [ERITREA]) in_ethiopia = c.satisfy(TRAVERSIBLE_COUNTRY, [ETHIOPIA]) in_gabon = c.satisfy(TRAVERSIBLE_COUNTRY, [GABON]) in_the_gambia = c.satisfy(TRAVERSIBLE_COUNTRY, [THE_GAMBIA]) in_ghana = c.satisfy(TRAVERSIBLE_COUNTRY, [GHANA]) in_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [GUINEA]) in_guinea_bissau = c.satisfy(TRAVERSIBLE_COUNTRY, [GUINEA_BISSAU]) in_ivory_coast = c.satisfy(TRAVERSIBLE_COUNTRY, [IVORY_COAST]) in_lesotho = c.satisfy(TRAVERSIBLE_COUNTRY, [LESOTHO]) in_kenya = c.satisfy(TRAVERSIBLE_COUNTRY, [KENYA]) in_liberia = c.satisfy(TRAVERSIBLE_COUNTRY, [LIBERIA]) in_libya = c.satisfy(TRAVERSIBLE_COUNTRY, [LIBYA]) in_madagascar = c.satisfy(TRAVERSIBLE_COUNTRY, [MADAGASCAR]) in_malawi = c.satisfy(TRAVERSIBLE_COUNTRY, [MALAWI]) in_mali = c.satisfy(TRAVERSIBLE_COUNTRY, [MALI]) in_mauritania = c.satisfy(TRAVERSIBLE_COUNTRY, [MAURITANIA]) in_mauritius = c.satisfy(TRAVERSIBLE_COUNTRY, [MAURITIUS]) in_morrocco = c.satisfy(TRAVERSIBLE_COUNTRY, [MOROCCO]) in_mozambique = c.satisfy(TRAVERSIBLE_COUNTRY, [MOZAMBIQUE]) in_namibia = c.satisfy(TRAVERSIBLE_COUNTRY, [NAMIBIA]) in_niger = c.satisfy(TRAVERSIBLE_COUNTRY, [NIGER]) in_nigeria = c.satisfy(TRAVERSIBLE_COUNTRY, [NIGERIA]) in_rwanda = c.satisfy(TRAVERSIBLE_COUNTRY, [RWANDA]) in_sadr = c.satisfy(TRAVERSIBLE_COUNTRY, [SAHARI_ARAB_DEOMOCRATIC_REPUBLIC]) in_stap = c.satisfy(TRAVERSIBLE_COUNTRY, [SAO_TOME_AND_PRINCIPE]) in_senegal = c.satisfy(TRAVERSIBLE_COUNTRY, [SENEGAL]) in_seychelles = c.satisfy(TRAVERSIBLE_COUNTRY, [SEYCHELLES]) in_sierra_leone = c.satisfy(TRAVERSIBLE_COUNTRY, [SIERRA_LEONE]) in_somalia = c.satisfy(TRAVERSIBLE_COUNTRY, [SOMALIA]) in_somali_land = c.satisfy(TRAVERSIBLE_COUNTRY, [SOMALI_LAND]) in_south_africa = c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTH_AFRICA]) in_ssudan= c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTHSUDAN]) in_sudan= c.satisfy(TRAVERSIBLE_COUNTRY, [SUDAN]) in_swaziland= c.satisfy(TRAVERSIBLE_COUNTRY, [SWAZILAND]) in_tanzania_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Sports competitions in Tanzania")], max_steps=4) in_tanzania_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [TANZANIA]) in_tanzania = logical_ors([in_tanzania_wkp,in_tanzania_wkd]) in_togo = c.satisfy(TRAVERSIBLE_COUNTRY, [TOGO]) in_tunisia = c.satisfy(TRAVERSIBLE_COUNTRY, [TUNISIA]) in_uganda = c.satisfy(TRAVERSIBLE_COUNTRY, [UGANDA]) in_western_sahara = c.satisfy(TRAVERSIBLE_COUNTRY, [WESTERN_SAHARA]) in_zambia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ZAMBIA]) zambian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Zambian people")], max_steps=4) in_zambia = logical_ors([in_zambia_wkd, zambian_people]) in_zimbabwe = c.satisfy(TRAVERSIBLE_COUNTRY, [ZIMBABWE]) in_africa = logical_ors([ in_botswana, in_burkina_faso, in_burundi, in_cameroon, in_cape_verde, in_chad, in_central_african_republic, in_comoros, in_democratic_republic_congo, in_republic_of_congo, in_djibouti, in_egypt, in_equatorial_guinea, in_eritrea, in_ethiopia, in_gabon, in_the_gambia, in_ghana, in_guinea, in_guinea_bissau, in_ivory_coast, in_lesotho, in_kenya, in_liberia, in_libya, in_madagascar, in_malawi ]) print("Oceanian countries") AUSTRALIA = wkd(c,"Q408") FIJI = wkd(c,"Q712") INDONESIA = wkd(c,"Q252") KIRIBATI= wkd(c,"Q710") MARSHALL_ISLANDS= wkd(c,"Q709") FEDERATED_STATES_OF_MICRONESIA= wkd(c,"Q702") NAURU= wkd(c,"Q697") PALAU= wkd(c,"Q695") PAPUA_NEW_GUINEA= wkd(c,"Q691") SAMOA = wkd(c,"Q683") SOLOMON_ISLANDS= wkd(c,"Q685") VANUATU = wkd(c,"Q686") NEW_ZEALAND = wkd(c,"Q664") in_australia_athletes = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Australian sportspeople")], max_steps=5) in_australia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [AUSTRALIA]) in_australia = logical_ors([in_australia_wkd, in_australia_athletes]) in_fiji = c.satisfy(TRAVERSIBLE_COUNTRY, [FIJI]) in_indonesia = c.satisfy(TRAVERSIBLE_COUNTRY, [INDONESIA]) in_kiribati = c.satisfy(TRAVERSIBLE_COUNTRY, [KIRIBATI]) in_marshall_islands = c.satisfy(TRAVERSIBLE_COUNTRY, [MARSHALL_ISLANDS]) in_federates_states_of_micronesia = c.satisfy(TRAVERSIBLE_COUNTRY, [FEDERATED_STATES_OF_MICRONESIA]) in_nauru = c.satisfy(TRAVERSIBLE_COUNTRY, [NAURU]) in_palau = c.satisfy(TRAVERSIBLE_COUNTRY, [PALAU]) in_papua_new_guinea = c.satisfy(TRAVERSIBLE_COUNTRY, [PAPUA_NEW_GUINEA]) in_samoa_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Samoa")], max_steps=5) in_samoa_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SAMOA]) in_samoa = logical_ors([in_samoa_wkd, in_samoa_wkp]) in_solomon_islands = c.satisfy(TRAVERSIBLE_COUNTRY, [SOLOMON_ISLANDS]) in_vanuatu = c.satisfy(TRAVERSIBLE_COUNTRY, [VANUATU]) in_new_zealand = c.satisfy(TRAVERSIBLE_COUNTRY, [NEW_ZEALAND]) print("South American countries") ARGENTINA = wkd(c,"Q414") BOLIVIA = wkd(c,"Q750") BRAZIL = wkd(c,"Q155") CHILE = wkd(c,"Q298") COLOMBIA = wkd(c,"Q739") ECUADOR = wkd(c,"Q736") GUYANA = wkd(c,"Q734") PARAGUAY = wkd(c,"Q733") PERU = wkd(c,"Q419") SURINAME = wkd(c,"Q730") TRINIDAD_AND_TOBAGO = wkd(c,"Q754") URUGUAY = wkd(c,"Q77") VENEZUELA = wkd(c,"Q717") in_argentina = c.satisfy(TRAVERSIBLE_COUNTRY, [ARGENTINA]) in_bolivia = c.satisfy(TRAVERSIBLE_COUNTRY, [BOLIVIA]) in_brazil = c.satisfy(TRAVERSIBLE_COUNTRY, [BRAZIL]) in_chile = c.satisfy(TRAVERSIBLE_COUNTRY, [CHILE]) in_colombia = c.satisfy(TRAVERSIBLE_COUNTRY, [COLOMBIA]) in_ecuador = c.satisfy(TRAVERSIBLE_COUNTRY, [ECUADOR]) in_guyana = c.satisfy(TRAVERSIBLE_COUNTRY, [GUYANA]) in_paraguay = c.satisfy(TRAVERSIBLE_COUNTRY, [PARAGUAY]) in_peru = c.satisfy(TRAVERSIBLE_COUNTRY, [PERU]) in_suriname = c.satisfy(TRAVERSIBLE_COUNTRY, [SURINAME]) in_trinidad_and_tobago = c.satisfy(TRAVERSIBLE_COUNTRY, [TRINIDAD_AND_TOBAGO]) in_uruguay = c.satisfy(TRAVERSIBLE_COUNTRY, [URUGUAY]) in_venezuela = c.satisfy(TRAVERSIBLE_COUNTRY, [VENEZUELA]) print("Central American countries") BELIZE = wkd(c,"Q242") COSTA_RICA = wkd(c,"Q800") EL_SALVADOR = wkd(c,"Q792") GUATEMALA = wkd(c,"Q774") HONDURAS = wkd(c,"Q783") NICARAGUA = wkd(c,"Q811") PANAMA = wkd(c,"Q804") in_belize = c.satisfy(TRAVERSIBLE_COUNTRY, [BELIZE]) in_costa_rica = c.satisfy(TRAVERSIBLE_COUNTRY, [COSTA_RICA]) in_el_salvador = c.satisfy(TRAVERSIBLE_COUNTRY, [EL_SALVADOR]) in_guatemala = c.satisfy(TRAVERSIBLE_COUNTRY, [GUATEMALA]) in_honduras = c.satisfy(TRAVERSIBLE_COUNTRY, [HONDURAS]) in_nicaragua = c.satisfy(TRAVERSIBLE_COUNTRY, [NICARAGUA]) in_panama = c.satisfy(TRAVERSIBLE_COUNTRY, [PANAMA]) print("North American countries") ANTIGUA_BARBUDA = wkd(c,"Q781") BAHAMAS = wkd(c,"Q778") BARBADOS = wkd(c,"Q244") BELIZE = wkd(c,"Q242") CANADA = wkd(c,"Q16") COSTA_RICA = wkd(c,"Q800") CUBA = wkd(c,"Q241") DOMINICAN_REPUBLIC = wkd(c,"Q786") EL_SALVADOR = wkd(c,"Q792") GRENADA = wkd(c,"Q769") GUATEMALA = wkd(c,"Q774") HAITI = wkd(c,"Q790") HONDURAS = wkd(c,"Q783") JAMAICA = wkd(c,"Q766") MEXICO = wkd(c,"Q96") NICARAGUA = wkd(c,"Q811") PANAMA = wkd(c,"Q804") SAINT_KITTS_AND_NEVIS = wkd(c,"Q763") SAINT_LUCIA = wkd(c,"Q760") SAINT_VINCENT_AND_GRENADINES = wkd(c,"Q757") UNITED_STATES = wkd(c,"Q30") in_antigua_barbuda = c.satisfy(TRAVERSIBLE_COUNTRY, [ANTIGUA_BARBUDA]) in_bahamas = c.satisfy(TRAVERSIBLE_COUNTRY, [BAHAMAS]) in_barbados = c.satisfy(TRAVERSIBLE_COUNTRY, [BARBADOS]) in_belize = c.satisfy(TRAVERSIBLE_COUNTRY, [BELIZE]) canadians = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Canadian people by occupation")], max_steps=5) in_canada_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CANADA]) in_canada = logical_ors([canadians, in_canada_wkd]) in_costa_rica = c.satisfy(TRAVERSIBLE_COUNTRY, [COSTA_RICA]) in_cuba = c.satisfy(TRAVERSIBLE_COUNTRY, [CUBA]) in_dominican_republic = c.satisfy(TRAVERSIBLE_COUNTRY, [DOMINICAN_REPUBLIC]) in_el_salvador = c.satisfy(TRAVERSIBLE_COUNTRY, [EL_SALVADOR]) in_grenada = c.satisfy(TRAVERSIBLE_COUNTRY, [GRENADA]) in_guatemala = c.satisfy(TRAVERSIBLE_COUNTRY, [GUATEMALA]) in_haiti = c.satisfy(TRAVERSIBLE_COUNTRY, [HAITI]) in_honduras = c.satisfy(TRAVERSIBLE_COUNTRY, [HONDURAS]) in_jamaica = c.satisfy(TRAVERSIBLE_COUNTRY, [JAMAICA]) in_mexico = c.satisfy(TRAVERSIBLE_COUNTRY, [MEXICO]) in_nicaragua = c.satisfy(TRAVERSIBLE_COUNTRY, [NICARAGUA]) in_panama = c.satisfy(TRAVERSIBLE_COUNTRY, [PANAMA]) in_Saint_Kitts_and_Nevis = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_KITTS_AND_NEVIS]) in_saint_lucia = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_LUCIA]) in_saint_vincent_and_grenadines = c.satisfy(TRAVERSIBLE_COUNTRY, [SAINT_VINCENT_AND_GRENADINES]) in_usa_sports = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:History of sports in the United States")], max_steps=7) years_in_usa = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in the United States")], max_steps=7) in_usa_roads = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roads in the United States")], max_steps=7) in_united_states_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_STATES]) in_united_states = logical_ors([in_usa_sports,in_united_states_wkd, years_in_usa]) print("Asian countries") FOURTH_ADMIN_DIVISION_INDONESIA = wkd(c,"Q2225692") RURAL_COMUNE_VIETNAM = wkd(c,"Q2389082") AFGHANISTAN = wkd(c,"Q889") KINGDOM_OF_AFGHANISTAN = wkd(c,"Q1138904") REPUBLIC_OF_AFGHANISTAN = wkd(c,"Q1415128") DEMOCRATIC_REPUBLIC_OF_AFGHANISTAN = wkd(c,"Q476757") BANGLADESH = wkd(c,"Q902") BHUTAN = wkd(c,"Q917") BRUNEI = wkd(c,"Q921") CAMBODIA = wkd(c,"Q424") CHINA = wkd(c,"Q148") EAST_TIMOR = wkd(c,"Q574") INDIA = wkd(c,"Q668") INDONESIA = wkd(c,"Q252") IRAN = wkd(c,"Q794") IRAQ = wkd(c,"Q796") KURDISTAN = wkd(c,"Q41470") ISRAEL = wkd(c,"Q801") JAPAN = wkd(c,"Q17") JORDAN = wkd(c,"Q810") KAZAKHSTAN = wkd(c,"Q232") KUWAIT = wkd(c,"Q817") KYRGYZSTAN = wkd(c,"Q813") LAOS = wkd(c,"Q819") LEBANON = wkd(c,"Q822") MALAYSIA = wkd(c,"Q833") MALDIVES = wkd(c,"Q826") MONGOLIA = wkd(c,"Q711") MYANMAR = wkd(c,"Q836") NEPAL = wkd(c,"Q837") NORTH_KOREA = wkd(c,"Q423") OMAN = wkd(c,"Q842") PALESTINE = wkd(c,"Q219060") PAKISTAN = wkd(c,"Q843") PHILIPPINES = wkd(c,"Q928") QATAR = wkd(c,"Q846") SAUDI_ARABIA = wkd(c,"Q851") SINGAPORE = wkd(c,"Q334") SOUTH_KOREA = wkd(c,"Q884") SRI_LANKA = wkd(c,"Q854") SYRIA = wkd(c,"Q858") TAIWAN = wkd(c,"Q865") TAJIKISTAN = wkd(c,"Q863") THAILAND = wkd(c,"Q869") TURKMENISTAN = wkd(c,"Q874") UNITED_ARAB_EMIRATES = wkd(c,"Q878") UZBEKISTAN = wkd(c,"Q265") VIETNAM = wkd(c,"Q881") YEMEN = wkd(c,"Q805") in_afghanistan = c.satisfy(TRAVERSIBLE_COUNTRY, [AFGHANISTAN, REPUBLIC_OF_AFGHANISTAN, DEMOCRATIC_REPUBLIC_OF_AFGHANISTAN]) in_bangladesh = c.satisfy(TRAVERSIBLE_COUNTRY, [BANGLADESH]) in_bhutan = c.satisfy(TRAVERSIBLE_COUNTRY, [BHUTAN]) in_brunei = c.satisfy(TRAVERSIBLE_COUNTRY, [BRUNEI]) in_cambodia = c.satisfy(TRAVERSIBLE_COUNTRY, [CAMBODIA]) years_in_china = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in China")], max_steps=6) chinese_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Chinese people by occupation")], max_steps=6) is_tibetan_politician = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Tibetan politicians")], max_steps=6) in_china_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CHINA]) in_china = logical_ors([in_china_wkd,years_in_china,is_tibetan_politician, chinese_people]) in_east_timor = c.satisfy(TRAVERSIBLE_COUNTRY, [EAST_TIMOR]) in_india = c.satisfy(TRAVERSIBLE_COUNTRY, [INDIA]) in_indonesia = c.satisfy(TRAVERSIBLE_COUNTRY, [INDONESIA,FOURTH_ADMIN_DIVISION_INDONESIA]) in_iran = c.satisfy(TRAVERSIBLE_COUNTRY, [IRAN]) in_iraq = c.satisfy(TRAVERSIBLE_COUNTRY, [IRAQ, KURDISTAN]) in_israel = c.satisfy(TRAVERSIBLE_COUNTRY, [ISRAEL]) in_japan = c.satisfy(TRAVERSIBLE_COUNTRY, [JAPAN]) in_jordan = c.satisfy(TRAVERSIBLE_COUNTRY, [JORDAN]) in_kazakhstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KAZAKHSTAN]) in_kuwait = c.satisfy(TRAVERSIBLE_COUNTRY, [KUWAIT]) in_kyrgyzstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KYRGYZSTAN]) in_laos = c.satisfy(TRAVERSIBLE_COUNTRY, [LAOS]) in_lebanon = c.satisfy(TRAVERSIBLE_COUNTRY, [LEBANON]) in_malaysia = c.satisfy(TRAVERSIBLE_COUNTRY, [MALAYSIA]) in_maldives = c.satisfy(TRAVERSIBLE_COUNTRY, [MALDIVES]) in_mongolia = c.satisfy(TRAVERSIBLE_COUNTRY, [MONGOLIA]) in_myanmar = c.satisfy(TRAVERSIBLE_COUNTRY, [MYANMAR]) in_nepal = c.satisfy(TRAVERSIBLE_COUNTRY, [NEPAL]) in_north_korea = c.satisfy(TRAVERSIBLE_COUNTRY, [NORTH_KOREA]) in_oman = c.satisfy(TRAVERSIBLE_COUNTRY, [OMAN]) in_palestine = c.satisfy(TRAVERSIBLE_COUNTRY, [PALESTINE]) in_pakistan = c.satisfy(TRAVERSIBLE_COUNTRY, [PAKISTAN]) in_philippines = c.satisfy(TRAVERSIBLE_COUNTRY, [PHILIPPINES]) in_qatar = c.satisfy(TRAVERSIBLE_COUNTRY, [QATAR]) in_saudi_arabia = c.satisfy(TRAVERSIBLE_COUNTRY, [SAUDI_ARABIA]) in_singapore = c.satisfy(TRAVERSIBLE_COUNTRY, [SINGAPORE]) in_south_korea_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SOUTH_KOREA]) korean_rulers = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Korean rulers")], max_steps=6) south_korea_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:South Korea")], max_steps=6) south_korean_rulers = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Korean rulers")], max_steps=6) in_south_korea = logical_ors([in_south_korea_wkd, korean_rulers]) in_sri_lanka = c.satisfy(TRAVERSIBLE_COUNTRY, [SRI_LANKA]) in_syria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SYRIA]) ancient_syria = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Syria")], max_steps=6) in_syria = logical_ors([in_syria_wkd,ancient_syria]) in_taiwan = c.satisfy(TRAVERSIBLE_COUNTRY, [TAIWAN]) in_tajikistan = c.satisfy(TRAVERSIBLE_COUNTRY, [TAJIKISTAN]) in_thailand = c.satisfy(TRAVERSIBLE_COUNTRY, [THAILAND]) in_turkmenistan = c.satisfy(TRAVERSIBLE_COUNTRY, [TURKMENISTAN]) in_united_arab_emirates = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_ARAB_EMIRATES]) in_uzbekistan = c.satisfy(TRAVERSIBLE_COUNTRY, [UZBEKISTAN]) in_vietnam = c.satisfy(TRAVERSIBLE_COUNTRY, [VIETNAM, RURAL_COMUNE_VIETNAM]) in_yemen = c.satisfy(TRAVERSIBLE_COUNTRY, [YEMEN]) print("European countries") ALBANIA = wkd(c,"Q222") ANDORRA = wkd(c,"Q228") ARMENIA = wkd(c,"Q399") AUSTRIA = wkd(c,"Q40") AUSTRIA_HUNGARY = wkd(c,"Q28513") AZERBAIJAN = wkd(c,"Q227") BELARUS = wkd(c,"Q184") BELGIUM = wkd(c,"Q31") BOSNIA = wkd(c,"Q225") BULGARIA = wkd(c,"Q219") CROATIA = wkd(c,"Q224") CYPRUS = wkd(c,"Q229") CZECH_REPUBLIC = wkd(c,"Q213") CZECHOSLOVAKIA = wkd(c,"Q33946") DENMARK = wkd(c,"Q35") ESTONIA = wkd(c,"Q191") FINLAND = wkd(c,"Q33") FRANCE = wkd(c,"Q142") GEORGIA = wkd(c,"Q230") GERMANY = wkd(c,"Q183") GERMANY_NAZI = wkd(c,"Q7318") GERMAN_EMPIRE = wkd(c,"Q43287") GERMAN_CONFEDERATION = wkd(c,"Q151624") EAST_GERMANY = wkd(c,"Q16957") GREECE = wkd(c,"Q41") HUNGARY = wkd(c,"Q28") ICELAND = wkd(c,"Q189") IRELAND = wkd(c,"Q27") ITALY = wkd(c,"Q38") ROMAN_EMPIRE = wkd(c,"Q2277") ANCIENT_ROME = wkd(c,"Q1747689") KINGDOM_OF_ITALY = wkd(c,"Q172579") NATIONAL_FASCIST_PARTY = wkd(c,"Q139596") KAZAKHSTAN = wkd(c,"Q232") KOSOVO = wkd(c,"Q1246") LATVIA = wkd(c,"Q211") LIECHTENSTEIN = wkd(c,"Q347") LITHUANIA = wkd(c,"Q37") LUXEMBOURG = wkd(c,"Q32") MACEDONIA = wkd(c,"Q221") MALTA = wkd(c,"Q233") MOLDOVA = wkd(c,"Q217") MONACO = wkd(c,"Q235") MONTENEGRO = wkd(c,"Q236") NETHERLANDS = wkd(c,"Q55") SOUTHERN_NETHERLANDS = wkd(c,"Q6581823") KINGDOM_OF_NETHERLANDS = wkd(c,"Q29999") NORWAY = wkd(c,"Q20") POLAND = wkd(c,"Q36") PORTUGAL = wkd(c,"Q45") ROMANIA = wkd(c,"Q218") RUSSIA = wkd(c,"Q159") SOVIET_UNION =wkd(c,"Q15180") RUSSIAN_EMPIRE = wkd(c,"Q34266") SAN_MARINO = wkd(c,"Q238") SERBIA = wkd(c,"Q403") YOUGOSLAVIA = wkd(c,"Q36704") SLOVAKIA = wkd(c,"Q214") SLOVENIA = wkd(c,"Q215") SPAIN = wkd(c,"Q29") KINGDOM_OF_CASTILLE = wkd(c,"Q179293") SWEDEN = wkd(c,"Q34") SWITZERLAND = wkd(c,"Q39") TURKEY = wkd(c,"Q43") OTTOMAN_EMPIRE = wkd(c,"Q12560") UKRAINE = wkd(c,"Q212") UNITED_KINGDOM = wkd(c,"Q145") UNITED_KINGDOM_OLD = wkd(c,"Q174193") KINGDOM_OF_ENGLAND = wkd(c,"Q179876") KINGDOM_OF_GREAT_BRITAIN = wkd(c,"Q161885") VATICAN_CITY = wkd(c,"Q237") in_albania = c.satisfy(TRAVERSIBLE_COUNTRY, [ALBANIA]) in_andorra = c.satisfy(TRAVERSIBLE_COUNTRY, [ANDORRA]) in_armenia = c.satisfy(TRAVERSIBLE_COUNTRY, [ARMENIA]) in_austria_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [AUSTRIA, AUSTRIA_HUNGARY]) is_austria_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Austrian people by occupation")], max_steps=5) in_austria = logical_ors([in_austria_wkd, is_austria_people]) in_azerbaijan = c.satisfy(TRAVERSIBLE_COUNTRY, [AZERBAIJAN]) in_belarus = c.satisfy(TRAVERSIBLE_COUNTRY, [BELARUS]) in_belgium = c.satisfy(TRAVERSIBLE_COUNTRY, [BELGIUM]) in_bosnia = c.satisfy(TRAVERSIBLE_COUNTRY, [BOSNIA]) in_bulgaria = c.satisfy(TRAVERSIBLE_COUNTRY, [BULGARIA]) in_croatia = c.satisfy(TRAVERSIBLE_COUNTRY, [CROATIA]) in_cyprus = c.satisfy(TRAVERSIBLE_COUNTRY, [CYPRUS]) in_czech_republic_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [CZECH_REPUBLIC,CZECHOSLOVAKIA]) czhec_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Czechoslovak people")], max_steps=5) in_czech_republic = logical_ors([in_czech_republic_wkd, czhec_people]) in_denmark_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [DENMARK]) is_danish_legendary_figure = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Danish legendary figures")], max_steps=5) in_denmark = logical_ors([in_denmark_wkd,is_danish_legendary_figure]) in_estonia = c.satisfy(TRAVERSIBLE_COUNTRY, [ESTONIA]) in_finland = c.satisfy(TRAVERSIBLE_COUNTRY, [FINLAND]) years_in_france = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in France")], max_steps=5) in_france_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [FRANCE]) in_france = logical_ors([in_france_wkd,years_in_france]) in_georgia = c.satisfy(TRAVERSIBLE_COUNTRY, [GEORGIA]) years_in_germany = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Germany")], max_steps=5) nazis = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Nazis")], max_steps=5) german_nobility = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:German nobility")], max_steps=7) in_germany_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [GERMANY, GERMANY_NAZI, GERMAN_EMPIRE, GERMAN_CONFEDERATION, EAST_GERMANY]) in_germany = logical_ors([in_germany_wkd, years_in_germany, nazis, german_nobility]) years_in_greece = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Greece")], max_steps=5) ancient_greeks = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Greeks")], max_steps=7) greek_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Greek people by occupation")], max_steps=7) in_greece_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [GREECE]) in_greece = logical_ors([in_greece_wkd,years_in_greece, ancient_greeks, greek_people]) in_hungary = c.satisfy(TRAVERSIBLE_COUNTRY, [HUNGARY]) in_iceland = c.satisfy(TRAVERSIBLE_COUNTRY, [ICELAND]) in_ireland = c.satisfy(TRAVERSIBLE_COUNTRY, [IRELAND]) in_italy_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [ITALY,NATIONAL_FASCIST_PARTY, KINGDOM_OF_ITALY, ROMAN_EMPIRE, ANCIENT_ROME]) is_italian_politician = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Italian politicians")], max_steps=6) in_roman_empire = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Roman Empire")], max_steps=6) in_history_of_italy = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:History of Italy by region")], max_steps=6) italian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Italian people by occupation")], max_steps=6) ancient_romans = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Ancient Romans")], max_steps=8) in_italy = logical_ors([in_italy_wkd, in_roman_empire, in_history_of_italy, is_italian_politician, italian_people, ancient_romans]) in_kazakhstan = c.satisfy(TRAVERSIBLE_COUNTRY, [KAZAKHSTAN]) in_kosovo = c.satisfy(TRAVERSIBLE_COUNTRY, [KOSOVO]) in_latvia = c.satisfy(TRAVERSIBLE_COUNTRY, [LATVIA]) in_liectenstein = c.satisfy(TRAVERSIBLE_COUNTRY, [LIECHTENSTEIN]) in_lithuania = c.satisfy(TRAVERSIBLE_COUNTRY, [LITHUANIA]) in_luxembourg = c.satisfy(TRAVERSIBLE_COUNTRY, [LUXEMBOURG]) in_macedonia = c.satisfy(TRAVERSIBLE_COUNTRY, [MACEDONIA]) in_malta = c.satisfy(TRAVERSIBLE_COUNTRY, [MALTA]) in_moldova = c.satisfy(TRAVERSIBLE_COUNTRY, [MOLDOVA]) in_monaco = c.satisfy(TRAVERSIBLE_COUNTRY, [MONACO]) in_montenegro = c.satisfy(TRAVERSIBLE_COUNTRY, [MONTENEGRO]) in_netherlands_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [NETHERLANDS, KINGDOM_OF_NETHERLANDS, SOUTHERN_NETHERLANDS]) dutch_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Dutch people by occupation")], max_steps=5) in_netherlands = logical_ors([in_netherlands_wkd, dutch_people]) in_norway = c.satisfy(TRAVERSIBLE_COUNTRY, [NORWAY]) in_poland = c.satisfy(TRAVERSIBLE_COUNTRY, [POLAND]) in_portugal = c.satisfy(TRAVERSIBLE_COUNTRY, [PORTUGAL]) in_romania = c.satisfy(TRAVERSIBLE_COUNTRY, [ROMANIA]) russian_people = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Russian people by occupation")], max_steps=7) sport_in_the_soviet_union = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Sport in the Soviet Union")], max_steps=7) in_russia_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [RUSSIA, RUSSIAN_EMPIRE, SOVIET_UNION]) in_russia = logical_ors([in_russia_wkd, russian_people, sport_in_the_soviet_union]) in_san_marino = c.satisfy(TRAVERSIBLE_COUNTRY, [SAN_MARINO]) in_serbia = c.satisfy(TRAVERSIBLE_COUNTRY, [SERBIA, YOUGOSLAVIA]) in_slovakia = c.satisfy(TRAVERSIBLE_COUNTRY, [SLOVAKIA]) in_slovenia = c.satisfy(TRAVERSIBLE_COUNTRY, [SLOVENIA]) years_in_spain = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Spain")], max_steps=5) in_spain_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SPAIN, KINGDOM_OF_CASTILLE]) in_spain = logical_ors([in_spain_wkd, years_in_spain]) years_in_sweden = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Sweden")], max_steps=5) in_sweden_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SWEDEN]) in_sweden = logical_ors([in_sweden_wkd, years_in_sweden]) years_in_switzerland = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Years in Switzerland")], max_steps=5) in_switzerland_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [SWITZERLAND]) in_switzerland = logical_ors([in_switzerland_wkd, years_in_switzerland ]) in_turkey = c.satisfy(TRAVERSIBLE_COUNTRY, [TURKEY, OTTOMAN_EMPIRE]) in_ukraine = c.satisfy(TRAVERSIBLE_COUNTRY, [UKRAINE]) in_united_kingdom = c.satisfy(TRAVERSIBLE_COUNTRY, [UNITED_KINGDOM, UNITED_KINGDOM_OLD, KINGDOM_OF_ENGLAND, KINGDOM_OF_GREAT_BRITAIN]) popes = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Popes")], max_steps=5) in_vatican_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [VATICAN_CITY]) in_vatican = logical_ors([popes, in_vatican_wkd]) print("Artic and others") ARCTIC = wkd(c,"Q25322") INUIT = wkd(c,"Q189975") FAROE_ISLANDS = wkd(c,"Q4628") TONGA = wkd(c,"Q678") in_faroe_islands_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Faroe Islands")], max_steps=5) in_faroe_islands_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [FAROE_ISLANDS]) in_faroe_islands = logical_ors([in_faroe_islands_wkp, in_faroe_islands_wkd]) in_arctic = c.satisfy(TRAVERSIBLE_COUNTRY, [ARCTIC,INUIT]) in_tonga_wkd = c.satisfy(TRAVERSIBLE_COUNTRY, [TONGA]) in_tonga_wkp = c.satisfy([wprop.CATEGORY_LINK], [wkp(c, "Category:Tonga")], max_steps=5) in_tonga = logical_ors([in_tonga_wkd,in_tonga_wkp]) is_unlocated = logical_ors([is_Wiki_Links,is_taxon]) is_unlocated_not = logical_negate(is_unlocated,[is_populated_place, is_in_outer_space_not_earth,in_tanzania]) is_unlocated_only = logical_ors([is_unlocated_not,is_other]) COUNTRIES = [ALGERIA, ANGOLA, BENIN, BOTSWANA, BURKINA_FASO, BURUNDI, CAPE_VERDE, CAMEROON, CHAD, CENTRAL_AFRICAN_REPUBLIC, COMOROS, DEMOCRATIC_REPUBLIC_OF_CONGO, REPUBLIC_OF_CONGO, DJIBOUTI, EGYPT, EQUATORIAL_GUINEA, ERITREA, ETHIOPIA, GABON, THE_GAMBIA, GHANA, GUINEA, GUINEA_BISSAU, IVORY_COAST, LESOTHO, KENYA, LIBERIA, LIBYA, MADAGASCAR, MALAWI, MALI, MAURITANIA,MAURITIUS, MOROCCO, MOZAMBIQUE, NAMIBIA, NIGER, NIGERIA, RWANDA,SAHARI_ARAB_DEOMOCRATIC_REPUBLIC, SAO_TOME_AND_PRINCIPE, SENEGAL, SEYCHELLES, SIERRA_LEONE, SOMALIA, SOMALI_LAND, SOUTH_AFRICA, SUDAN, TANZANIA, TOGO, TUNISIA, UGANDA, WESTERN_SAHARA, ZAMBIA, ZIMBABWE, AUSTRALIA, FIJI,INDONESIA,KIRIBATI, MARSHALL_ISLANDS, FEDERATED_STATES_OF_MICRONESIA, NAURU, NEW_ZEALAND, PAPUA_NEW_GUINEA, SAMOA, SOLOMON_ISLANDS, VANUATU, ARGENTINA, BOLIVIA, BRAZIL, CHILE, COLOMBIA, ECUADOR, GUYANA, PARAGUAY, PERU, SURINAME, TRINIDAD_AND_TOBAGO, URUGUAY, VENEZUELA, BELIZE, COSTA_RICA,EL_SALVADOR, GUATEMALA, HONDURAS, NICARAGUA, PANAMA, ANTIGUA_BARBUDA, BAHAMAS, BARBADOS, CANADA, CUBA, DOMINICAN_REPUBLIC, GRENADA, GUATEMALA, HAITI, JAMAICA, MEXICO, SAINT_KITTS_AND_NEVIS, SAINT_LUCIA, SAINT_VINCENT_AND_GRENADINES, UNITED_STATES, ALBANIA, ANDORRA, ARMENIA, AUSTRIA, AUSTRIA_HUNGARY, AZERBAIJAN, BELARUS, BELGIUM, BOSNIA, BULGARIA, CROATIA, CYPRUS, CZECH_REPUBLIC, CZECHOSLOVAKIA, DENMARK, ESTONIA, FINLAND, FRANCE, GEORGIA, GERMANY, GERMANY_NAZI, GREECE, HUNGARY, ICELAND, IRELAND, ITALY, NATIONAL_FASCIST_PARTY, KINGDOM_OF_ITALY, ROMAN_EMPIRE, KAZAKHSTAN, KOSOVO, LATVIA, LIECHTENSTEIN, LITHUANIA, LUXEMBOURG, MACEDONIA, MALTA, MOLDOVA, MONACO, MONTENEGRO, NORWAY, NETHERLANDS, KINGDOM_OF_NETHERLANDS, SOUTHERN_NETHERLANDS, POLAND, PORTUGAL, ROMANIA, RUSSIA, RUSSIAN_EMPIRE, SOVIET_UNION, SAN_MARINO, SERBIA, YOUGOSLAVIA, SLOVAKIA, SLOVENIA, SPAIN, SWEDEN, SWITZERLAND, TURKEY, OTTOMAN_EMPIRE, UKRAINE, UNITED_KINGDOM, UNITED_KINGDOM_OLD, KINGDOM_OF_ENGLAND, KINGDOM_OF_GREAT_BRITAIN, AFGHANISTAN, BANGLADESH, BRUNEI, CAMBODIA, CHINA, CYPRUS, EAST_TIMOR, EGYPT, GEORGIA, INDIA, INDONESIA, IRAN, IRAQ, ISRAEL, JAPAN, KAZAKHSTAN, KUWAIT, KYRGYZSTAN, LAOS, LEBANON, MALAYSIA, MALDIVES, MONGOLIA, MYANMAR, NEPAL, NORTH_KOREA, OMAN, PALESTINE, PAKISTAN, PHILIPPINES, QATAR, SAUDI_ARABIA, SINGAPORE, SOUTH_KOREA, SRI_LANKA, SYRIA, TAJIKISTAN, TAIWAN, THAILAND, TURKMENISTAN, UNITED_ARAB_EMIRATES, UZBEKISTAN, VIETNAM, YEMEN, VATICAN_CITY, ARCTIC, FAROE_ISLANDS, TONGA ] located_somewhere_wkd = c.satisfy([wprop.COUNTRY_OF_CITIZENSHIP, wprop.COUNTRY], COUNTRIES) located_somewhere = logical_ors([ located_somewhere_wkd, in_austria, in_afghanistan, in_china, in_france, in_sweden, in_china, in_switzerland, in_germany, years_in_usa, in_greece, in_south_korea, in_italy, in_denmark, in_spain, in_iraq, in_egypt, in_vatican, in_canada, in_faroe_islands, in_netherlands, in_russia, in_samoa, in_syria, in_tonga, in_zambia ]) is_unlocated_politician = logical_negate(is_politician,[located_somewhere]) is_unlocated_painter = logical_negate(is_painter, [located_somewhere]) is_unlocated_association_football_player = logical_negate(is_association_football_player, [located_somewhere]) return { "Algeria": in_algeria, "Angola": in_angola, "Benin": in_benin, "BOSTWANA": in_botswana, "BURKINA_FASO": in_burkina_faso, "BURUNDI": in_burundi, "CAPE_VERDE": in_cape_verde, "CAMEROON": in_cameroon, "CHAD": in_chad, "CENTRAL AFRICAN REPUBLIC": in_central_african_republic, "COMOROS": in_comoros, "DEMOCRATIC_REPUBLIC_OF_CONGO": in_democratic_republic_congo, "REPUBLIC_OF_CONGO": in_republic_of_congo, "DJIBOUTI": in_djibouti, "EGYPT": in_egypt, "EQUATORIAL_GUINEA": in_equatorial_guinea, "ERITREA": in_eritrea, "ETHIOPIA": in_ethiopia, "GABON": in_gabon, "THE_GAMBIA": in_the_gambia, "GHANA": in_ghana, "GUINEA": in_guinea, "GUINEA_BISSAU": in_guinea_bissau, "IVORY_COAST": in_ivory_coast, "LESOTHO": in_lesotho, "KENYA": in_kenya, "LIBERIA": in_liberia, "LIBYA": in_libya, "Madagascar": in_madagascar, "Malawi": in_malawi, "Mali": in_mali, "Mauritania": in_mauritania, "Mauritius": in_mauritius, "Morocco": in_morrocco, "Mozambique": in_mozambique, "Namibia": in_namibia, "Niger": in_niger, "Nigeria": in_nigeria, "Rwanda": in_rwanda, "Sahrawi_Arab_Democratic_Republic": in_sadr, "Sao_Tome_and_Principe": in_stap, "Senegal": in_senegal, "Seychelles": in_seychelles, "Sierra_Leone": in_sierra_leone, "Somalia": in_somalia, "Somalilandβ€Ž": in_somali_land, "South_Africaβ€Ž": in_south_africa, "South_Sudanβ€Ž": in_ssudan, "Sudan": in_sudan, "SWAZILAND": in_swaziland, "TANZANIA": in_tanzania, "TOGO": in_togo, "TUNISIA": in_tunisia, "Uganda": in_uganda, "Western Sahara": in_western_sahara, "Zambia": in_zambia, "Zimbabwe": in_zimbabwe, "AUSTRALIA": in_australia, "FIJI": in_fiji, "INDONESIA": in_indonesia, "KIRIBATI": in_kiribati, "MARSHALL_ISLANDS": in_marshall_islands, "FEDERATED_STATES_OF_MICRONESIA": in_federates_states_of_micronesia, "NAURU": in_nauru, "NEW_ZEALAND": in_new_zealand, "PAPUA_NEW_GUINEA": in_papua_new_guinea, "SAMOA": in_samoa, "SOLOMON_ISLANDS": in_solomon_islands, "VANUATU": in_vanuatu, "ARGENTINA": in_argentina, "BOLIVIA": in_bolivia, "BRAZIL": in_brazil, "CHILE": in_chile, "COLOMBIA": in_colombia, "ECUADOR": in_ecuador, "GUYANA": in_guyana, "PARAGUAY": in_paraguay, "PERU": in_peru, "SURINAME": in_suriname, "TRINIDAD_AND_TOBAGO": in_trinidad_and_tobago, "URUGUAY": in_uruguay, "VENEZUELA": in_venezuela, "BELIZE": in_belize, "COSTA_RICA": in_costa_rica, "EL_SALVADOR": in_el_salvador, "GUATEMALA": in_guatemala, "HONDURAS": in_honduras, "NICARAGUA": in_nicaragua, "PANAMA": in_panama, "ANTIGUA_BARBUDA": in_antigua_barbuda, "BAHAMAS": in_bahamas, "BARBADOS": in_barbados, "CANADA": in_canada, "CUBA": in_cuba, "DOMINICAN REPUBLIC": in_dominican_republic, "GRENADA": in_grenada, "GUATEMALA": in_guatemala, "HAITI": in_haiti, "JAMAICA": in_jamaica, "MEXICO": in_mexico, "SAINT_KITTS_AND_NEVIS": in_Saint_Kitts_and_Nevis, "SAINT_LUCIA": in_saint_lucia, "SAINT_VINCENT_AND_GRENADINES": in_saint_vincent_and_grenadines, "UNITED_STATES": in_united_states, "ALBANIA": in_albania, "ANDORRA": in_andorra, "ARMENIA": in_armenia, "AUSTRIA": in_austria, "AZERBAIJAN": in_azerbaijan, "BELARUS": in_belarus, "BELGIUM": in_belgium, "BOSNIA": in_bosnia, "BULGARIA": in_bulgaria, "CROATIA": in_croatia, "CYPRUS": in_cyprus, "CZECH REPUBLIC": in_czech_republic, "DENMARK": in_denmark, "ESTONIA": in_estonia, "FINLAND": in_finland, "FRANCE": in_france, "GEORGIA": in_georgia, "GERMANY": in_germany, "GREECE": in_greece, "HUNGARY": in_hungary, "ICELAND": in_iceland, "IRELAND": in_ireland, "ITALY": in_italy, "KAZAKHSTAN": in_kazakhstan, "KOSOVO": in_kosovo, "LATVIA": in_latvia, "LIECHTENSTEIN": in_liectenstein, "LITHUANIA": in_lithuania, "LUXEMBOURG": in_luxembourg, "MACEDONIA": in_macedonia, "MALTA": in_malta, "MOLDOVA": in_moldova, "MONACO": in_monaco, "MONTENEGRO": in_montenegro, "NORWAY": in_norway, "NETHERLANDS": in_netherlands, "POLAND": in_poland, "PORTUGAL": in_portugal, "ROMANIA": in_romania, "RUSSIA": in_russia, "SAN MARINO": in_san_marino, "SERBIA": in_serbia, "SLOVAKIA": in_slovakia, "SLOVENIA": in_slovenia, "SPAIN": in_spain, "SWEDEN": in_sweden, "SWITZERLAND": in_switzerland, "TURKEY": in_turkey, "UKRAINE": in_ukraine, "UNITED KINGDOM": in_united_kingdom, "AFGHANISTAN": in_afghanistan, "BANGLADESH": in_bangladesh, "BHUTAN": in_bhutan, "BRUNEI": in_brunei, "CAMBODIA": in_cambodia, "CHINA": in_china, "CYPRUS": in_cyprus, "EAST TIMOR": in_east_timor, "EGYPT": in_egypt, "GEORGIA": in_georgia, "INDIA": in_india, "INDONESIA": in_indonesia, "IRAN": in_iran, "IRAQ": in_iraq, "ISRAEL": in_israel, "JAPAN": in_japan, "JORDAN": in_jordan, "KAZAKHSTAN": in_kazakhstan, "KUWAIT": in_kuwait, "KYRGYZSTAN": in_kyrgyzstan, "LAOS": in_laos, "LEBANON": in_lebanon, "MALAYSIA": in_malaysia, "MALDIVES": in_maldives, "MONGOLIA": in_mongolia, "MYANMAR": in_myanmar, "NEPAL": in_nepal, "NORTH_KOREA": in_north_korea, "OMAN": in_oman, "PALESTINE": in_palestine, "PAKISTAN": in_pakistan, "PHILIPPINES": in_philippines, "QATAR": in_qatar, "SAUDI_ARABIA": in_saudi_arabia, "SINGAPORE": in_singapore, "SOUTH_KOREA": in_south_korea, "SRI LANKA": in_sri_lanka, "SYRIA": in_syria, "TAJIKISTAN": in_tajikistan, "TAIWAN": in_taiwan, "THAILAND": in_thailand, "TURKMENISTAN": in_turkmenistan, "UNITED_ARAB_EMIRATES": in_united_arab_emirates, "UZBEKISTAN": in_uzbekistan, "VIETNAM": in_vietnam, "YEMEN": in_yemen, "OUTERSPACE": is_in_outer_space_not_earth, "ARCTIC": in_arctic, "FAROE_ISLANDS": in_faroe_islands, "TONGA": in_tonga, "UNLOCATED": is_unlocated_only, "USA_ROADS": in_usa_roads, "POLITICIAN": is_politician, "UNLOCATED_POLITICIAN": is_unlocated_politician, "UNLOCATED_PAINTER": is_unlocated_painter, "UNLOCATED_ASSOCIATION_FOOTBALL_PLAYER": is_unlocated_association_football_player } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="49"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Create membership rules for entities based on their date of existence/birth/etc. More classes can be created by selecting other key dates as hyperplanes. """ from numpy import ( logical_and, logical_or, logical_not, logical_xor, where ) from wikidata_linker_utils.logic import logical_negate, logical_ors, logical_ands import wikidata_linker_utils.wikidata_properties as wprop def wkp(c, name): """Convert a string wikipedia article name to its Wikidata index.""" return c.article2id["enwiki/" + name][0][0] def wkd(c, name): """Convert a wikidata QID to its wikidata index.""" return c.name2index[name] def classify(c): D1950 = 1950 pre_1950 = logical_ors([ c.attribute(wprop.PUBLICATION_DATE) < D1950, c.attribute(wprop.DATE_OF_BIRTH) < D1950, c.attribute(wprop.INCEPTION) < D1950, c.attribute(wprop.DISSOLVED_OR_ABOLISHED) < D1950, c.attribute(wprop.POINT_IN_TIME) < D1950, c.attribute(wprop.START_TIME) < D1950 ]) post_1950 = logical_and(logical_ors([ c.attribute(wprop.PUBLICATION_DATE) >= D1950, c.attribute(wprop.DATE_OF_BIRTH) >= D1950, c.attribute(wprop.INCEPTION) >= D1950, c.attribute(wprop.DISSOLVED_OR_ABOLISHED) >= D1950, c.attribute(wprop.POINT_IN_TIME) >= D1950, c.attribute(wprop.START_TIME) >= D1950 ]), logical_not(pre_1950)) # some elements are neither pre 1950 or post 1950, they are "undated" # (e.g. no value was provided for any of the time attributes used # above) undated = logical_and(logical_not(pre_1950), logical_not(post_1950)) print("%d items have no date information" % (undated.sum(),)) return { "pre-1950": pre_1950, "post-1950": post_1950 } </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="50"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">""" Associate to each entity a type (exclusive membership). Association is imperfect (e.g. some false positives, false negatives), however the majority of entities are covered under this umbrella and thus a model can learn to predict several of the attributes listed below. """ from numpy import ( logical_and, logical_or, logical_not, logical_xor, where ) from wikidata_linker_utils.logic import logical_negate, logical_ors, logical_ands import wikidata_linker_utils.wikidata_properties as wprop def wkp(c, name): return c.article2id['enwiki/' + name][0][0] def wkd(c, name): return c.name2index[name] def classify(c): TRAVERSIBLE = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF] TRAVERSIBLE_LO = [wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF] MALE = wkd(c,"Q6581097") FEMALE = wkd(c,"Q6581072") HUMAN = wkp(c, "Human") TAXON = wkd(c, "Q16521") HORSE = wkd(c, "Q726") RACE_HORSE = wkd(c, "Q10855242") FOSSIL_TAXON = wkd(c, "Q23038290") MONOTYPIC_TAXON = wkd(c, "Q310890") FOOD = wkp(c, "Food") DRINK = wkp(c, "Drink") BIOLOGY = wkp(c, "Biology") GEOGRAPHICAL_OBJECT = wkd(c, "Q618123") LOCATION_GEOGRAPHY = wkd(c, "Q2221906") ORGANISATION = wkp(c, 'Organization') MUSICAL_WORK = wkd(c, 'Q2188189') AUDIO_VISUAL_WORK = wkd(c,'Q2431196') ART_WORK = wkd(c,'Q838948') PHYSICAL_OBJECT = wkp(c, "Physical body") VALUE = wkd(c, 'Q614112') TIME_INTERVAL = wkd(c, 'Q186081') EVENT = wkd(c, 'Q1656682') POPULATED_PLACE = wkd(c, 'Q486972') ACTIVITY = wkd(c, "Q1914636") PROCESS = wkd(c, "Q3249551") BODY_OF_WATER = wkd(c, "Q15324") PEOPLE = wkd(c, "Q2472587") LANGUAGE = wkd(c, "Q34770") ALPHABET = wkd(c, "Q9779") SPEECH = wkd(c, "Q861911") GAS = wkd(c, "Q11432") CHEMICAL_COMPOUND = wkd(c, "Q11173") DRUG = wkd(c, "Q8386") GEOMETRIC_SHAPE = wkd(c, "Q815741") MIND = wkd(c, "Q450") TV_STATION = wkd(c, "Q1616075") AWARD_CEREMONY = wkd(c, "Q4504495") SONG = wkd(c, "Q7366") SINGLE = wkd(c, "Q134556") CHESS_OPENING = wkd(c, "Q103632") BATTLE = wkd(c, "Q178561") BLOCKADE = wkd(c, "Q273976") MILITARY_OFFENSIVE = wkd(c, "Q2001676") DEVELOPMENT_BIOLOGY = wkd(c, "Q213713") UNIT_OF_MASS = wkd(c, "Q3647172") WATERCOURSE = wkd(c, "Q355304") VOLCANO = wkd(c, "Q8072") LAKE = wkd(c, "Q23397") SEA = wkd(c, "Q165") BRAND = wkd(c, "Q431289") AUTOMOBILE_MANUFACTURER = wkd(c, "Q786820") MOUNTAIN = wkd(c, "Q8502") MASSIF = wkd(c, "Q1061151") WAR = wkd(c, "Q198") CRIME = wkd(c, "Q83267") GENE = wkd(c, "Q7187") CHROMOSOME = wkd(c, "Q37748") DISEASE = wkd(c, "Q12136") ASTEROID = wkd(c, "Q3863") COMET = wkd(c, "Q3559") PLANET = wkd(c, "Q634") GALAXY = wkd(c, "Q318") ASTRONOMICAL_OBJECT = wkd(c, "Q6999") FICTIONAL_ASTRONOMICAL_OBJECT = wkd(c, "Q15831598") MATHEMATICAL_OBJECT = wkd(c, "Q246672") REGION = wkd(c, "Q82794") PHYSICAL_QUANTITY = wkd(c, "Q107715") NUMBER = wkd(c, "Q11563") NATURAL_PHENOMENON = wkd(c, "Q1322005") GEOLOGICAL_FORMATION = wkd(c, "Q736917") CURRENCY = wkd(c, "Q8142") MONEY = wkd(c, "Q1368") LANDFORM = wkd(c, "Q271669") COUNTRY = wkd(c, "Q6256") FICTIONAL_HUMAN = wkd(c, "Q15632617") AWARD = wkd(c, "Q618779") RELIGIOUS_TEXT = wkd(c, "Q179461") OCCUPATION = wkd(c, "Q12737077") PROFESSION = wkd(c, "Q28640") POSITION = wkd(c, "Q4164871") RELIGION = wkd(c, "Q9174") SOFTWARE = wkd(c, "Q7397") ELECTRONIC_GAME = wkd(c, "Q2249149") GAME = wkd(c, "Q11410") VIDEO_GAME_FRANCHISES = wkd(c, "Q7213857") TRAIN_STATION = wkd(c, "Q55488") BRIDGE = wkd(c, "Q12280") AIRPORT = wkd(c, "Q62447") SURNAME = wkd(c, "Q101352") GIVEN_NAME = wkd(c, "Q202444") FEMALE_GIVEN_NAME = wkd(c, "Q11879590") MALE_GIVEN_NAME = wkd(c, "Q12308941") GIVEN_NAME = wkd(c, "Q202444") MOLECULE = wkd(c, "Q11369") PROTEIN_FAMILY = wkd(c, "Q417841") PROTEIN_DOMAIN = wkd(c, "Q898273") MULTIPROTEIN_COMPLEX = wkd(c, "Q420927") LAW = wkd(c, "Q7748") VEHICLE = wkd(c, "Q42889") MODE_OF_TRANSPORT = wkd(c, "Q334166") WATERCRAFT = wkd(c, "Q1229765") AIRCRAFT = wkd(c, "Q11436") ROAD_VEHICLE = wkd(c, "Q1515493") AUTOMOBILE_MODEL = wkd(c, "Q3231690") AUTOMOBILE = wkd(c, "Q1420") TRUCK = wkd(c, "Q43193") MOTORCYCLE_MODEL = wkd(c, "Q23866334") TANK = wkd(c, "Q12876") FIRE_ENGINE = wkd(c, "Q208281") AMBULANCE = wkd(c, "Q180481") RAILROAD = wkd(c, "Q22667") RADIO_PROGRAM = wkd(c, "Q1555508") DISCOGRAPHY = wkd(c, "Q273057") WEBSITE = wkd(c, "Q35127") WEAPON = wkd(c, "Q728") PUBLICATION = wkd(c, "Q732577") ARTICLE = wkd(c, "Q191067") FAMILY = wkd(c, "Q8436") FICTIONAL_CHARACTER = wkd(c, "Q95074") FACILITY = wkd(c, "Q13226383") CONCEPT = wkd(c, "Q151885") PROVERB = wkd(c, "Q35102") ANATOMICAL_STRUCTURE = wkd(c, "Q4936952") BREED = wkd(c, "Q38829") PLANT_STRUCTURE = wkd(c, "Q25571752") PLANT = wkd(c, "Q756") SPECIAL_FIELD = wkd(c, "Q1047113") ACADEMIC_DISCIPLINE = wkd(c, "Q11862829") TERM = wkd(c, "Q1969448") SEXUAL_ORIENTATION = wkd(c, "Q17888") PARADIGM = wkd(c, "Q28643") LEGAL_CASE = wkd(c, "Q2334719") SPORT = wkd(c, "Q349") RECURRING_SPORTING_EVENT = wkd(c, "Q18608583") ART_GENRE = wkd(c, "Q1792379") SPORTING_EVENT = wkd(c, "Q16510064") COMIC = wkd(c, "Q1004") CHARACTER = wkd(c, "Q3241972") PERSON = wkd(c, "Q215627") NATIONAL_HERITAGE_SITE = wkd(c, "Q358") ESTATE = wkd(c, "Q2186896") ELECTION = wkd(c, "Q40231") LEGISLATIVE_TERM = wkd(c, "Q15238777") COMPETITION = wkd(c, "Q476300") LEGAL_ACTION = wkd(c, "Q27095657") SEX_TOY = wkd(c, "Q10816") MONUMENT = wkd(c, "Q4989906") ASSOCIATION_FOOTBALL_POSITION = wkd(c, "Q4611891") # ICE_HOCKEY_POSITION = wkd(c, "Q18533987") # PART_OF_LAND = wkd(c, "Q23001306") MUSIC_DOWNLOAD = wkd(c, "Q6473564") OCCUPATION = wkd(c, "Q12737077") KINSHIP = wkd(c, "Q171318") KIN = wkd(c, "Q21073947") PSEUDONYM = wkd(c, "Q61002") STOCK_CHARACTER = wkd(c, "Q162244") TITLE = wkd(c, "Q4189293") DATA_FORMAT = wkd(c, "Q494823") ELECTROMAGNETIC_WAVE = wkd(c, "Q11386") POSTAL_CODE = wkd(c, "Q37447") CLOTHING = wkd(c, "Q11460") NATIONALITY = wkd(c, "Q231002") BASEBALL_POSITION = wkd(c, "Q1151733") AMERICAN_FOOTBALL_POSITIONS = wkd(c, "Q694589") POSITION_TEAM_SPORTS = wkd(c, "Q1781513") FILE_FORMAT_FAMILY = wkd(c, "Q26085352") FILE_FORMAT = wkd(c, "Q235557") TAXONOMIC_RANK = wkd(c, "Q427626") ORDER_HONOUR = wkd(c, "Q193622") BRANCH_OF_SCIENCE = wkd(c, "Q2465832") RESEARCH = wkd(c, "Q42240") METHOD = wkd(c, "Q1799072") ALGORITHM = wkd(c, "Q8366") PROPOSITION = wkd(c, "Q108163") SPORTSPERSON = wkd(c, "Q2066131") LAKES_MINESOTTA = wkd(c, "Q8580663") NAMED_PASSENGER_TRAIN_INDIA = wkd(c, "Q9260591") TOWNSHIPS_MISOURI = wkd(c, "Q8861637") RACE_ETHNICITY_USA = wkd(c, "Q2035701") RECORD_CHART = wkd(c, "Q373899") SINGLE_ENGINE_AIRCRAFT = wkd(c, "Q7405339") SIGNIFICANT_OTHER = wkd(c, "Q841509") BILLBOARDS = wkd(c, "Q19754079") RADIO_STATION = wkd(c, "Q19754079") RADIO_STATION2 = wkd(c, "Q1474493") NOBLE_TITLE = wkd(c, "Q216353") HOUSES_NATIONAL_REGISTER_ARKANSAS = wkd(c, "Q8526394") CLADE = wkd(c, "Q713623") BOARD_GAMES = wkd(c, "Q131436") CLAN = wkd(c, "Q211503") ACCIDENT = wkd(c, "Q171558") MASSACRE = wkd(c, "Q3199915") TORNADO = wkd(c, "Q8081") NATURAL_DISASTER = wkd(c, "Q8065") SPORTS_TEAM = wkd(c, "Q12973014") BAND_ROCK_AND_POP = wkd(c, "Q215380") ORGANIZATION_OTHER = wkd(c, "Q43229") POLITICAL_PARTY = wkd(c, "Q7278") SPECIES = wkd(c, "Q7432") CHEMICAL_SUBSTANCE = wkd(c, "Q79529") THREATENED_SPECIES = wkd(c, "Q515487") HYPOTHETICAL_SPECIES = wkd(c, "Q5961273") CONFLICT = wkd(c, "Q180684") PRIVATE_USE_AREAS = wkd(c, "Q11152836") BARONETCIES_IN_UK = wkd(c, "Q8290061") EXTINCT_BARONETCIES_ENGLAND = wkd(c, "Q8432223") EXTINCT_BARONETCIES_UK = wkd(c, "Q8432226") WIKIPEDIA_DISAMBIGUATION = wkd(c, "Q4167410") WIKIPEDIA_TEMPLATE_NAMESPACE = wkd(c, "Q11266439") WIKIPEDIA_LIST = wkd(c, "Q13406463") WIKIPEDIA_PROJECT_PAGE = wkd(c, "Q14204246") WIKIMEDIA_CATEGORY_PAGE = wkd(c, "Q4167836") WIKIPEDIA_USER_LANGUAGE_TEMPLATE = wkd(c, "Q19842659") WIKIDATA_PROPERTY = wkd(c, "Q18616576") COLLEGIATE_ATHLETICS_PROGRAM = wkd(c, "Q5146583") SPORTS_TRANSFER_AF = wkd(c, "Q1811518") DEMOGRAPHICS_OF_NORWAY = wkd(c, "Q7664203") DOCUMENT = wkd(c, "Q49848") BASIC_STAT_UNIT_NORWAY = wkd(c, "Q4580177") PUBLIC_TRANSPORT = wkd(c, "Q178512") HAZARD = wkd(c, "Q1132455") BASEBALL_RULES = wkd(c, "Q1153773") HIT_BASEBALL = wkd(c, "Q713493") OUT_BASEBALL = wkd(c, "Q1153773") LAWS_OF_ASSOCIATION_FOOTBALL = wkd(c, "Q7215850") CRICKET_LAWS_AND_REGULATION = wkd(c, "Q8427034") MEASUREMENTS_OF_POVERTY = wkd(c, "Q8614855") PROFESSIONAL_WRESTLING_MATCH_TYPES = wkd(c, "Q679633") CITATION = wkd(c, "Q1713") INTERNATIONAL_RELATIONS = wkd(c, "Q166542") WORLD_VIEW = wkd(c, "Q49447") ROCK_GEOLOGY = wkd(c, "Q8063") BASEBALL_STATISTIC = wkd(c, "Q8291081") BASEBALL_STATISTICS = wkd(c, "Q809898") TRAIN_ACCIDENT = wkd(c, "Q1078765") CIRCUS_SKILLS = wkd(c, "Q4990963") FOLKLORE = wkd(c, "Q36192") NEWS_BUREAU = wkd(c, "Q19824398") RECESSION = wkd(c, "Q176494") NYC_BALLET = wkd(c, "Q1336942") SPORTS_RECORD = wkd(c, "Q1241356") WINGSPAN = wkd(c, "Q245097") WIN_LOSS_RECORD_PITCHING = wkd(c, "Q1202506") CRICKET_TERMINOLOGY = wkd(c, "Q8427141") UNION_ARMY = wkd(c, "Q1752901") POPULATION = wkd(c, "Q33829") WIND = wkd(c, "Q8094") TORPEDO_TUBE = wkd(c, "Q1330003") WEAPONS_PLATFORM = wkd(c, "Q7978115") COLOR = wkd(c, "Q1075") SOCIAL_SCIENCE = wkd(c, "Q34749") DISCIPLINE_ACADEMIA = wkd(c, "Q11862829") FORMAL_SCIENCE = wkd(c, "Q816264") ASPHALT = wkd(c, "Q167510") TALK_RADIO = wkd(c, "Q502319") ART_MOVEMENT = wkd(c, "Q968159") IDEOLOGY = wkd(c, "Q7257") # print([c.get_name(idx) for idx in c.relation(wprop.INSTANCE_OF)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.INSTANCE_OF)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.relation(wprop.PART_OF)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.PART_OF)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.relation(wprop.SUBCLASS_OF)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.SUBCLASS_OF)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.relation(wprop.CATEGORY_LINK)[wkd(c, "Q14934048")]]) # print([c.get_name(idx) for idx in c.get_inverted_relation(wprop.CATEGORY_LINK)[wkd(c, "Q14934048")]]) is_sports_terminology = logical_or( c.satisfy(TRAVERSIBLE_LO, [OUT_BASEBALL, HIT_BASEBALL]), c.satisfy( [wprop.CATEGORY_LINK], [ BASEBALL_RULES, LAWS_OF_ASSOCIATION_FOOTBALL, CRICKET_LAWS_AND_REGULATION, PROFESSIONAL_WRESTLING_MATCH_TYPES, CRICKET_TERMINOLOGY ], max_steps=1 ) ) is_accident = c.satisfy(TRAVERSIBLE_LO, [ACCIDENT]) is_taxon = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [ TAXON, FOSSIL_TAXON, MONOTYPIC_TAXON, HORSE, RACE_HORSE, CLADE, SPECIES, THREATENED_SPECIES, HYPOTHETICAL_SPECIES ] ) is_breed = c.satisfy(TRAVERSIBLE_LO, [BREED]) is_taxon_or_breed = logical_or(is_taxon, is_breed) is_human = c.satisfy(TRAVERSIBLE_LO, [HUMAN, FICTIONAL_HUMAN]) is_country = c.satisfy(TRAVERSIBLE_LO, [COUNTRY]) is_people = c.satisfy( TRAVERSIBLE_LO, [ PEOPLE, NATIONALITY, SPORTS_TRANSFER_AF, POPULATION ] ) is_populated_place = logical_or( c.satisfy(TRAVERSIBLE_LO, [POPULATED_PLACE]), c.satisfy([wprop.CATEGORY_LINK], [TOWNSHIPS_MISOURI], max_steps=1) ) is_organization = c.satisfy( TRAVERSIBLE_LO, [ POLITICAL_PARTY, COLLEGIATE_ATHLETICS_PROGRAM, ORGANIZATION_OTHER, ORGANISATION, SPORTS_TEAM, BAND_ROCK_AND_POP, NEWS_BUREAU, NYC_BALLET, UNION_ARMY ] ) is_position = c.satisfy( TRAVERSIBLE_LO, [ POSITION, OCCUPATION, POSITION_TEAM_SPORTS, AMERICAN_FOOTBALL_POSITIONS, ASSOCIATION_FOOTBALL_POSITION, BASEBALL_POSITION, # ICE_HOCKEY_POSITION, SPORTSPERSON ] ) is_kinship = c.satisfy(TRAVERSIBLE_LO, [KINSHIP]) is_kin = c.satisfy([wprop.SUBCLASS_OF, wprop.IS_A_LIST_OF], [KIN]) is_title = logical_or( c.satisfy(TRAVERSIBLE_LO, [TITLE, NOBLE_TITLE]), c.satisfy([wprop.CATEGORY_LINK], [BARONETCIES_IN_UK, EXTINCT_BARONETCIES_UK, EXTINCT_BARONETCIES_ENGLAND], max_steps=1) ) is_art_work = c.satisfy(TRAVERSIBLE_LO, [ART_WORK, COMIC]) is_audio_visual_work = c.satisfy(TRAVERSIBLE_LO, [AUDIO_VISUAL_WORK, TV_STATION]) is_fictional_character = c.satisfy(TRAVERSIBLE_LO, [FICTIONAL_CHARACTER]) is_name = c.satisfy(TRAVERSIBLE_LO, [GIVEN_NAME, SURNAME, FEMALE_GIVEN_NAME, MALE_GIVEN_NAME, PSEUDONYM]) is_stock_character = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [STOCK_CHARACTER]) is_family = c.satisfy(TRAVERSIBLE_LO, [FAMILY, CLAN]) is_award = c.satisfy(TRAVERSIBLE_LO, [AWARD]) is_electromagnetic_wave = c.satisfy(TRAVERSIBLE_LO, [ELECTROMAGNETIC_WAVE]) is_geographical_object = c.satisfy( TRAVERSIBLE_LO, [ GEOGRAPHICAL_OBJECT, BODY_OF_WATER, LOCATION_GEOGRAPHY, GEOLOGICAL_FORMATION, NATIONAL_HERITAGE_SITE, ESTATE, # PART_OF_LAND, PRIVATE_USE_AREAS ] ) is_postal_code = c.satisfy(TRAVERSIBLE_LO, [POSTAL_CODE]) is_person = c.satisfy(TRAVERSIBLE_LO, [PERSON]) is_person_only = logical_or( logical_negate( is_person, [ is_human, is_people, is_populated_place, is_organization, is_position, is_title, is_kinship, is_kin, is_country, is_geographical_object, is_art_work, is_audio_visual_work, is_fictional_character, is_name, is_family, is_award ] ), is_stock_character) is_male = c.satisfy([wprop.SEX_OR_GENDER], [MALE]) is_female = c.satisfy([wprop.SEX_OR_GENDER], [FEMALE]) is_human_male = logical_and(is_human, is_male) is_human_female = logical_and(is_human, is_female) is_musical_work = c.satisfy(TRAVERSIBLE_LO, [MUSICAL_WORK, DISCOGRAPHY]) is_song = c.satisfy(TRAVERSIBLE_LO, [SONG, SINGLE]) is_radio_program = c.satisfy( TRAVERSIBLE_LO, [ RADIO_PROGRAM, RADIO_STATION, RADIO_STATION2, TALK_RADIO ] ) is_sexual_orientation = c.satisfy(TRAVERSIBLE_LO, [SEXUAL_ORIENTATION]) is_taxonomic_rank = c.satisfy([wprop.INSTANCE_OF], [TAXONOMIC_RANK]) is_order = c.satisfy(TRAVERSIBLE_LO, [ORDER_HONOUR]) is_train_station = c.satisfy(TRAVERSIBLE_LO, [TRAIN_STATION]) is_bridge = c.satisfy(TRAVERSIBLE_LO, [BRIDGE]) is_airport = c.satisfy(TRAVERSIBLE_LO, [AIRPORT]) is_sex_toy = c.satisfy(TRAVERSIBLE_LO, [SEX_TOY]) is_monument = c.satisfy(TRAVERSIBLE_LO, [MONUMENT]) is_physical_object = c.satisfy( TRAVERSIBLE_LO, [ PHYSICAL_OBJECT, BOARD_GAMES, ELECTRONIC_GAME, GAME, ROCK_GEOLOGY, ASPHALT ] ) is_clothing = c.satisfy(TRAVERSIBLE_LO, [CLOTHING]) is_mathematical_object = c.satisfy(TRAVERSIBLE_LO, [MATHEMATICAL_OBJECT]) is_physical_quantity = logical_or( c.satisfy( TRAVERSIBLE_LO, [ PHYSICAL_QUANTITY, BASIC_STAT_UNIT_NORWAY, SPORTS_RECORD, WINGSPAN, WIN_LOSS_RECORD_PITCHING, BASEBALL_STATISTICS ] ), c.satisfy( [wprop.CATEGORY_LINK], [ DEMOGRAPHICS_OF_NORWAY, MEASUREMENTS_OF_POVERTY, BASEBALL_STATISTIC ], max_steps=1 ) ) is_number = c.satisfy(TRAVERSIBLE_LO, [NUMBER]) is_astronomical_object = c.satisfy( TRAVERSIBLE_LO, [ ASTEROID, COMET, PLANET, GALAXY, ASTRONOMICAL_OBJECT, FICTIONAL_ASTRONOMICAL_OBJECT ] ) is_hazard = c.satisfy(TRAVERSIBLE_LO, [HAZARD, TRAIN_ACCIDENT]) is_date = c.satisfy(TRAVERSIBLE_LO, [TIME_INTERVAL]) is_algorithm = c.satisfy(TRAVERSIBLE_LO, [ALGORITHM]) is_value = c.satisfy(TRAVERSIBLE_LO, [VALUE]) is_currency = c.satisfy(TRAVERSIBLE_LO, [CURRENCY, MONEY]) is_event = c.satisfy(TRAVERSIBLE_LO, [EVENT, RECESSION]) is_election = c.satisfy(TRAVERSIBLE_LO, [ELECTION]) is_legislative_term = c.satisfy(TRAVERSIBLE_LO, [LEGISLATIVE_TERM]) is_activity = c.satisfy([wprop.INSTANCE_OF, wprop.IS_A_LIST_OF], [ACTIVITY, MUSIC_DOWNLOAD, CIRCUS_SKILLS]) is_activity_subclass = c.satisfy([wprop.SUBCLASS_OF], [ACTIVITY, MUSIC_DOWNLOAD, CIRCUS_SKILLS]) is_food = c.satisfy([wprop.INSTANCE_OF, wprop.PART_OF, wprop.SUBCLASS_OF], [FOOD, DRINK]) is_wikidata_prop = c.satisfy(TRAVERSIBLE_LO, [WIKIDATA_PROPERTY]) is_wikipedia_disambiguation = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_DISAMBIGUATION]) is_wikipedia_template_namespace = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_TEMPLATE_NAMESPACE]) is_wikipedia_list = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_LIST]) is_wikipedia_project_page = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_PROJECT_PAGE]) is_wikipedia_user_language_template = c.satisfy([wprop.INSTANCE_OF], [WIKIPEDIA_USER_LANGUAGE_TEMPLATE]) is_wikimedia_category_page = c.satisfy([wprop.INSTANCE_OF], [WIKIMEDIA_CATEGORY_PAGE]) is_legal_case = c.satisfy(TRAVERSIBLE_LO, [LEGAL_CASE]) is_sport = c.satisfy(TRAVERSIBLE_LO, [SPORT]) is_data_format = c.satisfy(TRAVERSIBLE_LO, [DATA_FORMAT, FILE_FORMAT_FAMILY, FILE_FORMAT]) is_research_method = c.satisfy(TRAVERSIBLE_LO, [RESEARCH, METHOD, RACE_ETHNICITY_USA]) is_proposition = c.satisfy(TRAVERSIBLE_LO, [PROPOSITION]) is_record_chart = c.satisfy(TRAVERSIBLE_LO, [RECORD_CHART, BILLBOARDS]) is_international_relations = c.satisfy(TRAVERSIBLE_LO, [INTERNATIONAL_RELATIONS]) is_union = c.satisfy(TRAVERSIBLE_LO, [SIGNIFICANT_OTHER]) is_recurring_sporting_event = c.satisfy( TRAVERSIBLE_LO, [RECURRING_SPORTING_EVENT] ) is_sport_event = logical_or( logical_and( is_sport, c.satisfy([wprop.PART_OF, wprop.IS_A_LIST_OF], where(is_recurring_sporting_event)[0]) ), c.satisfy(TRAVERSIBLE_LO, [SPORTING_EVENT, COMPETITION]) ) is_genre = c.satisfy(TRAVERSIBLE_LO, [ART_GENRE, ART_MOVEMENT]) is_landform = c.satisfy(TRAVERSIBLE_LO, [LANDFORM]) is_language = c.satisfy(TRAVERSIBLE_LO, [LANGUAGE]) is_alphabet = c.satisfy(TRAVERSIBLE_LO, [ALPHABET]) is_railroad = logical_or( c.satisfy(TRAVERSIBLE_LO, [RAILROAD]), c.satisfy([wprop.CATEGORY_LINK], [NAMED_PASSENGER_TRAIN_INDIA], max_steps=1) ) is_speech = c.satisfy(TRAVERSIBLE_LO, [SPEECH]) is_language_only = logical_negate(is_language, [is_speech]) is_alphabet_only = logical_negate(is_alphabet, [is_speech, is_language]) is_war = c.satisfy(TRAVERSIBLE_LO, [WAR]) is_battle = c.satisfy(TRAVERSIBLE_LO, [BATTLE, BLOCKADE, MILITARY_OFFENSIVE, CONFLICT, MASSACRE]) is_crime = c.satisfy(TRAVERSIBLE_LO, [CRIME]) is_gas = c.satisfy(TRAVERSIBLE_LO, [GAS]) is_chemical_compound = c.satisfy(TRAVERSIBLE_LO, [CHEMICAL_COMPOUND, DRUG, CHEMICAL_SUBSTANCE]) is_chemical_compound_only = logical_negate(is_chemical_compound, [is_food]) is_gas_only = logical_negate(is_gas, [is_chemical_compound]) is_geometric_shape = c.satisfy(TRAVERSIBLE_LO, [GEOMETRIC_SHAPE]) is_award_ceremony = c.satisfy(TRAVERSIBLE_LO, [AWARD_CEREMONY]) is_strategy = c.satisfy(TRAVERSIBLE_LO, [CHESS_OPENING]) is_gene = c.satisfy(TRAVERSIBLE_LO, [GENE, CHROMOSOME]) is_character = c.satisfy(TRAVERSIBLE_LO, [CHARACTER]) is_law = c.satisfy(TRAVERSIBLE_LO, [LAW]) is_legal_action = c.satisfy(TRAVERSIBLE_LO, [LEGAL_ACTION]) is_facility = logical_or( c.satisfy(TRAVERSIBLE_LO, [FACILITY]), c.satisfy([wprop.CATEGORY_LINK], [HOUSES_NATIONAL_REGISTER_ARKANSAS], max_steps=1) ) is_molecule = c.satisfy(TRAVERSIBLE_LO, [MOLECULE, PROTEIN_FAMILY, PROTEIN_DOMAIN, MULTIPROTEIN_COMPLEX]) is_disease = c.satisfy(TRAVERSIBLE_LO, [DISEASE]) is_mind = c.satisfy(TRAVERSIBLE_LO, [MIND]) is_religion = c.satisfy(TRAVERSIBLE_LO, [RELIGION]) is_natural_phenomenon = c.satisfy(TRAVERSIBLE_LO, [NATURAL_PHENOMENON, NATURAL_DISASTER, WIND]) is_anatomical_structure = c.satisfy(TRAVERSIBLE_LO, [ANATOMICAL_STRUCTURE]) is_plant = c.satisfy(TRAVERSIBLE_LO + [wprop.PARENT_TAXON], [PLANT_STRUCTURE, PLANT]) is_region = c.satisfy(TRAVERSIBLE_LO, [REGION]) is_software = logical_or( c.satisfy(TRAVERSIBLE_LO, [SOFTWARE]), c.satisfy([wprop.CATEGORY_LINK], [VIDEO_GAME_FRANCHISES], max_steps=1) ) is_website = c.satisfy(TRAVERSIBLE_LO, [WEBSITE]) is_river = logical_and(c.satisfy(TRAVERSIBLE_LO, [WATERCOURSE]), is_geographical_object) is_lake = logical_or( logical_and(c.satisfy(TRAVERSIBLE_LO, [LAKE]), is_geographical_object), c.satisfy([wprop.CATEGORY_LINK], [LAKES_MINESOTTA], max_steps=1) ) is_sea = logical_and(c.satisfy(TRAVERSIBLE_LO, [SEA]), is_geographical_object) is_volcano = logical_and(c.satisfy(TRAVERSIBLE_LO, [VOLCANO]), is_geographical_object) is_development_biology = c.satisfy([wprop.PART_OF, wprop.SUBCLASS_OF, wprop.INSTANCE_OF], [DEVELOPMENT_BIOLOGY, BIOLOGY]) is_unit_of_mass = c.satisfy(TRAVERSIBLE_LO, [UNIT_OF_MASS]) is_vehicle = c.satisfy(TRAVERSIBLE_LO, [VEHICLE, MODE_OF_TRANSPORT, PUBLIC_TRANSPORT]) is_watercraft = c.satisfy(TRAVERSIBLE_LO, [WATERCRAFT]) is_aircraft = logical_or( c.satisfy(TRAVERSIBLE_LO, [AIRCRAFT]), c.satisfy([wprop.CATEGORY_LINK], [SINGLE_ENGINE_AIRCRAFT], max_steps=1) ) is_road_vehicle = c.satisfy( TRAVERSIBLE_LO, [ ROAD_VEHICLE, TANK, FIRE_ENGINE, AMBULANCE, AUTOMOBILE_MODEL, MOTORCYCLE_MODEL ] ) is_weapon = c.satisfy(TRAVERSIBLE_LO, [WEAPON, TORPEDO_TUBE, WEAPONS_PLATFORM]) is_book_magazine_article_proverb = c.satisfy( TRAVERSIBLE_LO, [ PUBLICATION, ARTICLE, RELIGIOUS_TEXT, PROVERB, DOCUMENT, CITATION, FOLKLORE ] ) is_brand = c.satisfy(TRAVERSIBLE_LO, [BRAND]) is_concept = logical_or( c.satisfy([wprop.INSTANCE_OF], [TERM, ACADEMIC_DISCIPLINE, SPECIAL_FIELD, BRANCH_OF_SCIENCE, WORLD_VIEW] ), c.satisfy([wprop.SUBCLASS_OF], [SOCIAL_SCIENCE, DISCIPLINE_ACADEMIA, FORMAL_SCIENCE, IDEOLOGY]) ) is_color = c.satisfy(TRAVERSIBLE_LO, [COLOR]) is_paradigm = c.satisfy(TRAVERSIBLE_LO, [PARADIGM]) is_vehicle_brand = logical_or( logical_and(c.satisfy([wprop.PRODUCT_OR_MATERIAL_PRODUCED], [AUTOMOBILE, TRUCK]), is_brand), c.satisfy(TRAVERSIBLE_LO, [AUTOMOBILE_MANUFACTURER]) ) is_mountain_massif = logical_and(c.satisfy(TRAVERSIBLE_LO, [MOUNTAIN, MASSIF]), is_geographical_object) is_mountain_only = logical_negate( is_mountain_massif, [ is_volcano ] ) is_physical_object_only = logical_negate( is_physical_object, [ is_audio_visual_work, is_art_work, is_musical_work, is_geographical_object, is_currency, is_gas, is_clothing, is_chemical_compound, is_electromagnetic_wave, is_song, is_food, is_character, is_law, is_software, is_website, is_vehicle, is_lake, is_landform, is_railroad, is_airport, is_aircraft, is_watercraft, is_sex_toy, is_data_format, is_date, is_research_method, is_sport, is_watercraft, is_aircraft, is_brand, is_vehicle_brand, is_road_vehicle, is_railroad, is_radio_program, is_weapon, is_book_magazine_article_proverb, is_brand, is_organization, is_facility, is_anatomical_structure, is_gene, is_monument ] ) is_musical_work_only = logical_negate( is_musical_work, [ is_song ] ) is_geographical_object_only = logical_negate( is_geographical_object, [ is_river, is_lake, is_sea, is_volcano, is_mountain_only, is_region, is_monument, is_country, is_facility, is_food, is_airport, is_bridge, is_train_station ] ) is_event_election_only = logical_negate( logical_ors([is_event, is_election, is_accident]), [ is_award_ceremony, is_war, is_natural_phenomenon ] ) is_region_only = logical_negate( is_region, [ is_populated_place, is_country, is_lake, is_river, is_sea, is_volcano, is_mountain_only ] ) is_astronomical_object_only = logical_negate( is_astronomical_object, [ is_geographical_object ] ) is_date_only = logical_negate( is_date, [ is_strategy, is_development_biology ] ) is_development_biology_date = logical_and(is_development_biology, is_date) is_value_only = logical_negate( is_value, [ is_unit_of_mass, is_event, is_election, is_currency, is_number, is_physical_quantity, is_award, is_date, is_postal_code ] ) is_activity_subclass_only = logical_negate( logical_or(is_activity_subclass, is_activity), [ is_crime, is_war, is_chemical_compound, is_gene, is_molecule, is_mathematical_object, is_sport, is_sport_event, is_event, is_paradigm, is_position, is_title, is_algorithm, is_order, is_organization, is_research_method, is_proposition, is_taxonomic_rank, is_algorithm, is_event, is_election, is_genre, is_concept ] ) is_crime_only = logical_negate( is_crime, [ is_war ] ) is_number_only = logical_negate( is_number, [ is_physical_quantity ] ) is_molecule_only = logical_negate( is_molecule, [ is_gene, is_chemical_compound ] ) # VEHICLES: is_vehicle_only = logical_negate( is_vehicle, [ is_watercraft, is_aircraft, is_road_vehicle ] ) is_watercraft_only = logical_negate( is_watercraft, [ is_aircraft ] ) is_road_vehicle_only = logical_negate( is_road_vehicle, [ is_aircraft, is_watercraft, ] ) # remove groups that have occupations from mathematical objects: is_object_with_occupation = c.satisfy([wprop.INSTANCE_OF, wprop.OCCUPATION], [OCCUPATION, PROFESSION, POSITION]) is_mathematical_object_only = logical_negate( is_mathematical_object, [ is_geometric_shape, is_physical_quantity, is_number, is_object_with_occupation, is_landform ] ) is_organization_only = logical_negate( is_organization, [ is_country, is_geographical_object, is_family, is_people ] ) is_art_work_only = logical_negate( is_art_work, [ is_musical_work, is_audio_visual_work, is_sex_toy, is_monument ] ) is_software_only = logical_negate( is_software, [ is_language, is_organization, is_website ] ) is_website_only = logical_negate( is_website, [ is_organization, is_language ] ) is_taxon_or_breed_only = logical_negate( is_taxon_or_breed, [ is_human, is_plant ] ) is_human_only = logical_negate( is_human, [ is_male, is_female, is_kin, is_kinship, is_title ] ) is_weapon_only = logical_negate( is_weapon, [ is_software, is_website, is_vehicle ] ) is_book_magazine_article_proverb_only = logical_negate( is_book_magazine_article_proverb, [ is_software, is_website, is_musical_work, is_song, is_law, is_legal_action ] ) is_fictional_character_only = logical_negate( is_fictional_character, [ is_human, is_stock_character ] ) is_battle_only = logical_negate( is_battle, [ is_war, is_crime ] ) is_brand_only = logical_negate( is_brand, [ is_vehicle, is_aircraft, is_watercraft, is_website, is_software, is_vehicle_brand ] ) is_vehicle_brand_only = logical_negate( is_vehicle_brand, [ is_vehicle, is_aircraft, is_watercraft, is_website, is_software ] ) is_concept_paradigm_proposition_only = logical_negate( logical_ors([is_concept, is_paradigm, is_proposition]), [ is_physical_object, is_physical_quantity, is_software, is_website, is_color, is_vehicle, is_electromagnetic_wave, is_brand, is_vehicle_brand, is_currency, is_fictional_character, is_human, is_aircraft, is_geographical_object, is_geometric_shape, is_mathematical_object, is_musical_work, is_mountain_massif, is_lake, is_landform, is_language, is_anatomical_structure, is_book_magazine_article_proverb, is_development_biology, is_plant, is_sexual_orientation, is_genre, is_legislative_term ] ) is_anatomical_structure_only = logical_negate( is_anatomical_structure, [ is_plant ] ) is_facility_only = logical_negate( is_facility, [ is_train_station, is_aircraft, is_airport, is_bridge, is_vehicle, is_astronomical_object, is_railroad, is_monument ] ) is_wikipedia_list_only = logical_negate( is_wikipedia_list, [ is_activity_subclass, is_alphabet, is_art_work, is_astronomical_object, is_audio_visual_work, is_award, is_character, is_character, is_chemical_compound, is_color, is_currency, is_disease, is_election, is_electromagnetic_wave, is_facility, is_fictional_character, is_gene, is_genre, is_geographical_object, is_human, is_language, is_law, is_law, is_legal_action, is_legal_case, is_legislative_term, is_mathematical_object, is_mind, is_people, is_person, is_person, is_physical_object, is_populated_place, is_position, is_region, is_religion, is_research_method, is_sexual_orientation, is_software, is_speech, is_sport, is_sport_event, is_stock_character, is_strategy, is_taxon_or_breed, is_value, is_vehicle, is_wikidata_prop, is_weapon ] ) is_sport_only = logical_negate( is_sport, [ is_sport_event ] ) is_legal_action_only = logical_negate( is_legal_action, [ is_law, is_election ] ) is_genre_only = logical_negate( is_genre, [ is_physical_object, is_audio_visual_work, is_art_work, is_book_magazine_article_proverb, is_concept ] ) is_plant_only = logical_negate( is_plant, [ is_food, is_human, is_organization ] ) is_kinship_kin_only = logical_negate( logical_or(is_kinship, is_kin), [ is_family ] ) is_position_only = logical_negate( is_position, [ is_organization, is_human ] ) is_radio_program_only = logical_negate( is_radio_program, [ is_audio_visual_work, ] ) is_taxonomic_rank_only = logical_negate( is_taxonomic_rank, [ is_order ] ) is_research_method_only = logical_negate( is_research_method, [ is_audio_visual_work, is_book_magazine_article_proverb, is_art_work, is_concept, is_crime, is_war, is_algorithm, is_law, is_legal_action, is_legal_case ] ) is_algorithm_only = logical_negate( is_algorithm, [ is_concept, is_paradigm ] ) is_union_only = logical_negate( is_union, [ is_kinship, is_human, is_person ] ) # get all the wikidata items that are disconnected: no_instance_subclass_or_cat_link = logical_ands( [ c.relation(relation_name).edges() == 0 for relation_name in [wprop.PART_OF, wprop.INSTANCE_OF, wprop.SUBCLASS_OF, wprop.CATEGORY_LINK] ] ) is_sports_terminology_only = logical_negate( is_sports_terminology, [ is_organization, is_human, is_person, is_activity, is_title, is_physical_quantity ] ) out = { "aaa_wikidata_prop": is_wikidata_prop, "aaa_wikipedia_disambiguation": is_wikipedia_disambiguation, "aaa_wikipedia_template_namespace": is_wikipedia_template_namespace, "aaa_wikipedia_user_language_template": is_wikipedia_user_language_template, "aaa_wikipedia_list": is_wikipedia_list_only, "aaa_wikipedia_project_page": is_wikipedia_project_page, "aaa_wikimedia_category_page": is_wikimedia_category_page, "aaa_no_instance_subclass_or_link": no_instance_subclass_or_cat_link, "taxon": is_taxon_or_breed_only, "human_male": is_human_male, "human_female": is_human_female, "human": is_human_only, "fictional_character": is_fictional_character_only, "people": is_people, "language": is_language_only, "alphabet": is_alphabet_only, "speech": is_speech, "gas": is_gas_only, "gene": is_gene, "molecule": is_molecule_only, "astronomical_object": is_astronomical_object_only, "disease": is_disease, "mind": is_mind, "song": is_song, "radio_program": is_radio_program_only, "law": is_law, "legal_action": is_legal_action_only, "book_magazine_article": is_book_magazine_article_proverb_only, "chemical_compound": is_chemical_compound_only, "geometric_shape": is_geometric_shape, "mathematical_object": is_mathematical_object_only, "physical_quantity": is_physical_quantity, "number": is_number_only, "geographical_object": is_geographical_object_only, "train_station": is_train_station, "railroad": is_railroad, "concept": is_concept_paradigm_proposition_only, "genre": is_genre_only, "sexual_orientation": is_sexual_orientation, "bridge": is_bridge, "airport": is_airport, "river": is_river, "lake": is_lake, "sea": is_sea, "weapon": is_weapon_only, "region": is_region_only, "country": is_country, "software": is_software_only, "website": is_website_only, "volcano": is_volcano, "mountain": is_mountain_only, "religion": is_religion, "organization": is_organization_only, "musical_work": is_musical_work_only, "other_art_work": is_art_work_only, "audio_visual_work": is_audio_visual_work, "physical_object": is_physical_object_only, "record_chart": is_record_chart, "clothing": is_clothing, "plant": is_plant_only, "anatomical_structure": is_anatomical_structure_only, "facility": is_facility_only, "monument": is_monument, "vehicle": is_vehicle_only, "watercraft": is_watercraft_only, "road_vehicle": is_road_vehicle_only, "vehicle_brand": is_vehicle_brand_only, "brand": is_brand_only, "aircraft": is_aircraft, "legal_case": is_legal_case, "position": is_position_only, "person_role": is_person_only, "populated_place": is_populated_place, "value": is_value_only, "unit_of_mass": is_unit_of_mass, "currency": is_currency, "postal_code": is_postal_code, "name": is_name, "data_format": is_data_format, "character": is_character, "family": is_family, "sport": is_sport_only, "taxonomic_rank": is_taxonomic_rank, "sex_toy": is_sex_toy, "legislative_term": is_legislative_term, "sport_event": is_sport_event, "date": is_date_only, "kinship": is_kinship_kin_only, "union": is_union_only, "research": is_research_method_only, "title": is_title, "hazard": is_hazard, "color": is_color, "sports_terminology": is_sports_terminology_only, "developmental_biology_period": is_development_biology_date, "strategy": is_strategy, "event": is_event_election_only, "natural_phenomenon": is_natural_phenomenon, "electromagnetic_wave": is_electromagnetic_wave, "war": is_war, "award": is_award, "crime": is_crime_only, "battle": is_battle_only, "international_relations": is_international_relations, "food": is_food, "algorithm": is_algorithm, "activity": is_activity_subclass_only, "award_ceremony": is_award_ceremony } # is_other = logical_not(logical_ors([val for key, val in out.items() if key != "aaa_wikipedia_list"])) # c.class_report([wprop.IS_A_LIST_OF, wprop.CATEGORY_LINK], logical_and( # is_other, # is_wikipedia_list_only # ), name="remaining lists") return out </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="51"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import argparse import numpy as np import gym from gym.envs.atari.atari_env import ACTION_MEANING import pygame from atari_demo.wrappers import AtariDemo parser = argparse.ArgumentParser() parser.add_argument('-g', '--game', type=str, default='MontezumaRevenge') parser.add_argument('-f', '--frame_rate', type=int, default=60) parser.add_argument('-y', '--screen_height', type=int, default=840) parser.add_argument('-d', '--save_dir', type=str, default=None) parser.add_argument('-s', '--frame_skip', type=int, default=4) args = parser.parse_args() if args.save_dir is None: save_dir = os.path.join(os.getcwd(), 'demos') else: save_dir = args.save_dir if not os.path.exists(save_dir): os.makedirs(save_dir) demo_file_name = os.path.join(save_dir, args.game + '.demo') # //////// set up gym + atari part ///////// ACTION_KEYS = { "NOOP" : set(), "FIRE" : {'space'}, "UP" : {'up'}, "RIGHT": {'right'}, "LEFT" : {'left'}, "DOWN" : {'down'}, "UPRIGHT" : {'up', 'right'}, "UPLEFT" : {'up', 'left'}, "DOWNRIGHT" : {'down', 'right'}, "DOWNLEFT" : {'down', 'left'}, "UPFIRE" : {'up', 'space'}, "RIGHTFIRE" : {'right', 'space'}, "LEFTFIRE" : {'left', 'space'}, "DOWNFIRE" : {'down', 'space'}, "UPRIGHTFIRE" : {'up', 'right', 'space'}, "UPLEFTFIRE" : {'up', 'left', 'space'}, "DOWNRIGHTFIRE" : {'down', 'right', 'space'}, "DOWNLEFTFIRE" : {'down', 'left', 'space'}, "TIMETRAVEL": {'b'} } env = AtariDemo(gym.make(args.game + 'NoFrameskip-v4')) available_actions = [ACTION_MEANING[i] for i in env.unwrapped._action_set] + ["TIMETRAVEL"] env.reset() loaded_previous = False if os.path.exists(demo_file_name): env.load_from_file(demo_file_name) loaded_previous = True def get_gym_action(key_presses): action = 0 for i,action_name in enumerate(available_actions): if ACTION_KEYS[action_name].issubset(key_presses): action = i return action # ///////// set up pygame part ////////// pygame.init() screen_size = (int((args.screen_height/210)*160),args.screen_height) screen = pygame.display.set_mode(screen_size) small_screen = pygame.transform.scale(screen.copy(), (160,210)) clock = pygame.time.Clock() pygame.display.set_caption("Recording demonstration for " + args.game) def show_text(text_lines): screen.fill((255, 255, 255)) f1 = pygame.font.SysFont("", 30) for i, line in enumerate(text_lines): text = f1.render(line, True, (0, 0, 0)) screen.blit(text, (50, 100 + 50 * i)) pygame.display.flip() def show_start_screen(): text_lines = ["Recording demo for " + args.game, "Control the game using the arrow keys and space bar", "Hold <b> to go backward in time to fix mistakes", "Press <s> to save the demo and exit", "Press <SPACE BAR> to get started"] if loaded_previous: text_lines = text_lines[:1] + ["Continuing from previously recorded demo"] + text_lines[1:] show_text(text_lines) started = False while not started: for event in pygame.event.get(): if event.type == pygame.KEYDOWN: key_name = pygame.key.name(event.key) if key_name == 'space': started = True clock.tick(args.frame_rate) def show_end_screen(): text_lines = ["GAME OVER", "Hold <b> to go backward in time", "Press <s> to save the demo and exit"] show_text(text_lines) def show_game_screen(observation): pygame.surfarray.blit_array(small_screen, np.transpose(observation,[1,0,2])) pygame.transform.scale(small_screen, screen_size, screen) pygame.display.flip() key_is_pressed = set() def process_key_presses(): key_presses = set() quit = False save = False for event in pygame.event.get(): if event.type == pygame.QUIT: quit = True elif event.type == pygame.KEYDOWN: key_name = pygame.key.name(event.key) key_presses.add(key_name) key_is_pressed.add(key_name) elif event.type == pygame.KEYUP: key_name = pygame.key.name(event.key) if key_name in key_is_pressed: key_is_pressed.remove(key_name) if key_name == 's': save = True key_presses.update(key_is_pressed) return key_presses, quit, save # //////// run the game and record the demo! ///////// quit = False done = False show_start_screen() while not quit: # process key presses & save when requested key_presses, quit, save = process_key_presses() if save: env.save_to_file(demo_file_name) quit = True # advance gym env action = get_gym_action(key_presses) for step in range(args.frame_skip): observation, reward, done, info = env.step(action) # show screen if done: show_end_screen() else: show_game_screen(observation) clock.tick(float(args.frame_rate)/args.frame_skip) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="52"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import numpy as np from multiprocessing import Process, Pipe import gym from baselines.common.vec_env.subproc_vec_env import CloudpickleWrapper class ClonedEnv(gym.Wrapper): def __init__(self, env, possible_actions_dict, best_action_dict, seed): gym.Wrapper.__init__(self, env) self.possible_actions_dict = possible_actions_dict self.best_action_dict = best_action_dict self.state = None self.rng = np.random.RandomState(seed) self.just_initialized = True self.l = 0 self.r = 0 def step(self, action=None): if self.state in self.possible_actions_dict: possible_actions = list(self.possible_actions_dict[self.state]) action = possible_actions[self.rng.randint(len(possible_actions))] obs, reward, done, info = self.env.step(action) self.l += 1 self.r += reward self.state = self.env.unwrapped._get_ram().tostring() if self.state in self.possible_actions_dict: # still in known territory info['possible_actions'] = self.possible_actions_dict[self.state] if self.state in self.best_action_dict: info['best_action'] = self.best_action_dict[self.state] else: done = True past_l = self.l past_r = self.r self.l = 0 self.r = 0 if past_l > 0: info['episode'] = {'r': past_r, 'l': past_l} else: raise Exception('stepping cloned env without resetting') return obs, reward, done, info def reset(self): obs = self.env.reset() if isinstance(obs, tuple): obs,info = obs else: info = {} self.state = self.env.unwrapped._get_ram().tostring() if self.state in self.best_action_dict: info['best_action'] = self.best_action_dict[self.state] for randop in range(self.rng.randint(30)): # randomize starting point obs, reward, done, info = self._step(None) if self.just_initialized: self.just_initialized = False for randops in range(self.rng.randint(50000)): # randomize starting point further obs, reward, done, info = self._step(None) if done: obs, info = self._reset() return obs, info def get_best_actions_from_infos(infos): k = len(infos) best_actions = [0] * k action_masks = [1] * k for i in range(k): if 'best_action' in infos[i]: best_actions[i] = infos[i]['best_action'] action_masks[i] = 0 return best_actions, action_masks def get_available_actions_from_infos(infos, n_actions): k = len(infos) best_actions = np.zeros((k,n_actions), dtype=np.uint8) action_masks = [1] * k for i in range(k): if 'possible_actions' in infos[i]: action_masks[i] = 0 for j in infos[i]['possible_actions']: best_actions[i,j] = 1 return best_actions, action_masks def worker2(nr, remote, env_fn_wrapper, mode): env = env_fn_wrapper.x() while True: cmd,count = remote.recv() if cmd == 'step': obs = [] rews = [] dones = [] infos = [] for step in range(count): ob, reward, done, info = env.step(0) # action is ignored in ClonedEnv downstream if done: ob = env.reset() if isinstance(ob, tuple): ob, new_info = ob info.update(new_info) if 'episode' in info: epinfo = info['episode'] print('simulator thread %d completed demo run with total return %d obtained in %d steps' % (nr, epinfo["r"], epinfo["l"])) obs.append(ob) rews.append(reward) dones.append(done) infos.append(info) if mode == 'best': best_actions, action_masks = get_best_actions_from_infos(infos) else: best_actions, action_masks = get_available_actions_from_infos(infos, env.action_space.n) remote.send((obs, rews, dones, best_actions, action_masks)) elif cmd == 'reset': ob = env.reset() if isinstance(ob, tuple): ob,info = ob else: info = {} remote.send((ob,info)) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) else: raise NotImplementedError(str(cmd) + ' action not implemented in worker') class ClonedVecEnv(object): def __init__(self, env_fns, mode='best'): self.nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.nenvs)]) self.ps = [Process(target=worker2, args=(nr, work_remote, CloudpickleWrapper(env_fn), mode)) for (nr, work_remote, env_fn) in zip(range(self.nenvs), self.work_remotes, env_fns)] for p in self.ps: p.start() self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() self.steps_taken = 0 def step(self, time_steps=128): for remote in self.remotes: remote.send(('step', time_steps)) results = [remote.recv() for remote in self.remotes] obs, rews, dones, best_actions, action_masks = [np.stack(x) for x in zip(*results)] return obs, rews, dones, best_actions, action_masks def reset(self): for remote in self.remotes: remote.send(('reset', None)) results = [remote.recv() for remote in self.remotes] obs, infos = zip(*results) best_actions, action_masks = [np.stack(x) for x in get_best_actions_from_infos(infos)] return np.stack(obs), best_actions, action_masks def close(self): for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() def make_cloned_vec_env(nenvs, env_id, possible_actions_dict, best_action_dict, wrappers, mode='best'): def make_env(rank): def env_fn(): env = gym.make(env_id) env = ClonedEnv(env, possible_actions_dict, best_action_dict, rank) env = wrappers(env) return env return env_fn return ClonedVecEnv([make_env(i) for i in range(nenvs)], mode) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="53"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "></span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="54"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import pickle import sys import os def save_as_pickled_object(obj, filepath): """ This is a defensive way to write pickle.write, allowing for very large files on all platforms """ max_bytes = 2**31 - 1 bytes_out = pickle.dumps(obj) n_bytes = sys.getsizeof(bytes_out) with open(filepath, 'wb') as f_out: for idx in range(0, n_bytes, max_bytes): f_out.write(bytes_out[idx:idx+max_bytes]) def load_as_pickled_object(filepath): """ This is a defensive way to write pickle.load, allowing for very large files on all platforms """ max_bytes = 2**31 - 1 try: input_size = os.path.getsize(filepath) bytes_in = bytearray(0) with open(filepath, 'rb') as f_in: for _ in range(0, input_size, max_bytes): bytes_in += f_in.read(max_bytes) obj = pickle.loads(bytes_in) except: return None return obj </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="55"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import pickle import gym from gym import spaces class AtariDemo(gym.Wrapper): """ Records actions taken, creates checkpoints, allows time travel, restoring and saving of states """ def __init__(self, env, disable_time_travel=False): super(AtariDemo, self).__init__(env) self.action_space = spaces.Discrete(len(env.unwrapped._action_set)+1) # add "time travel" action self.save_every_k = 100 self.max_time_travel_steps = 10000 self.disable_time_travel = disable_time_travel def step(self, action): if action >= len(self.env.unwrapped._action_set): if self.disable_time_travel: obs, reward, done, info = self.env.step(0) else: obs, reward, done, info = self.time_travel() else: if self.steps_in_the_past > 0: self.restore_past_state() if len(self.done)>0 and self.done[-1]: obs = self.obs[-1] reward = 0 done = True info = None else: self.lives.append(self.env.unwrapped.ale.lives()) obs, reward, done, info = self.env.step(action) self.actions.append(action) self.obs.append(obs) self.rewards.append(reward) self.done.append(done) self.info.append(info) # periodic checkpoint saving if not done: if (len(self.checkpoint_action_nr)>0 and len(self.actions) >= self.checkpoint_action_nr[-1] + self.save_every_k) \ or (len(self.checkpoint_action_nr)==0 and len(self.actions) >= self.save_every_k): self.save_checkpoint() return obs, reward, done, info def reset(self): obs = self.env.reset() self.actions = [] self.lives = [] self.checkpoints = [] self.checkpoint_action_nr = [] self.obs = [obs] self.rewards = [] self.done = [False] self.info = [None] self.steps_in_the_past = 0 return obs def time_travel(self): if len(self.obs) > 1: reward = self.rewards.pop() self.obs.pop() self.done.pop() self.info.pop() self.lives.pop() obs = self.obs[-1] done = self.done[-1] info = self.info[-1] self.steps_in_the_past += 1 else: # reached time travel limit reward = 0 obs = self.obs[0] done = self.done[0] info = self.info[0] # rewards are differences in subsequent state values, and so should get reversed sign when going backward in time reward = -reward return obs, reward, done, info def save_to_file(self, file_name): dat = {'actions': self.actions, 'checkpoints': self.checkpoints, 'checkpoint_action_nr': self.checkpoint_action_nr, 'rewards': self.rewards, 'lives': self.lives} with open(file_name, "wb") as f: pickle.dump(dat, f) def load_from_file(self, file_name): self.reset() with open(file_name, "rb") as f: dat = pickle.load(f) self.actions = dat['actions'] self.checkpoints = dat['checkpoints'] self.checkpoint_action_nr = dat['checkpoint_action_nr'] self.rewards = dat['rewards'] self.lives = dat['lives'] self.load_state_and_walk_forward() def save_checkpoint(self): chk_pnt = self.env.unwrapped.clone_state() self.checkpoints.append(chk_pnt) self.checkpoint_action_nr.append(len(self.actions)) def restore_past_state(self): self.actions = self.actions[:-self.steps_in_the_past] while len(self.checkpoints)>0 and self.checkpoint_action_nr[-1]>len(self.actions): self.checkpoints.pop() self.checkpoint_action_nr.pop() self.load_state_and_walk_forward() self.steps_in_the_past = 0 def load_state_and_walk_forward(self): if len(self.checkpoints)==0: self.env.reset() time_step = 0 else: self.env.unwrapped.restore_state(self.checkpoints[-1]) time_step = self.checkpoint_action_nr[-1] for a in self.actions[time_step:]: action = self.env.unwrapped._action_set[a] self.env.unwrapped.ale.act(action) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="56"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import distutils.util platform = distutils.util.get_platform() # technically, our platform is not actually multilinux... so this may fail in some distros # however, tested in python:3.6 docker image (by construction) # and in ubuntu:16.04 platform = platform.replace('linux', 'manylinux1') print(platform) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="57"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os from setuptools import setup, Extension from setuptools.command.build_ext import build_ext import subprocess import sys with open(os.path.join(os.path.dirname(__file__), 'atari_py', 'package_data.txt')) as f: package_data = [line.rstrip() for line in f.readlines()] class Build(build_ext): def run(self): if os.name != 'posix' and not self.inplace: # silly patch to disable build steps on windows, as we are doing compilation externally return try: cwd = os.path.join('' if self.inplace else self.build_lib, 'atari_py', 'ale_interface', 'build') if not os.path.exists(cwd): os.makedirs(cwd) subprocess.check_call(['cmake', '..'], cwd=cwd) subprocess.check_call(['cmake', '--build', '.'], cwd=cwd) except subprocess.CalledProcessError as e: sys.stderr.write("Could not build atari-py: %s. (HINT: are you sure cmake is installed? You might also be missing a library. Atari-py requires: zlib [installable as 'apt-get install zlib1g-dev' on Ubuntu].)\n" % e) raise class CMakeExtension(Extension): def __init__(self, name, sourcedir=''): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) setup(name='atari-py', version='0.3.0', description='Python bindings to Atari games', url='https://github.com/openai/atari-py', author='OpenAI', author_email='info@openai.com', license='', packages=['atari_py'], package_data={'atari_py': package_data}, ext_modules=[CMakeExtension('atari_py')], cmdclass={'build_ext': Build}, install_requires=['numpy', 'six'], tests_require=['nose2'] ) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="58"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import sys from .ale_python_interface import * from .games import get_game_path, list_games print( "[NOTICE] atari-py is deprecated in favor ale-py " "and will no longer receive further maintenance or critical updates. " "ale-py is fully backwards compatible with atari-py. " "If you're using Gym, you can simply upgrade via pip install -U gym[atari]", file=sys.stderr, ) # default to only logging errors ALEInterface.setLoggerMode(ALEInterface.Logger.Error) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="59"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) try: import atari_py_roms _games_dir = os.path.join(atari_py_roms.__path__[0], "atari_roms") except ImportError: _games_dir = os.path.join(SCRIPT_DIR, "atari_roms") def get_games_dir(): return _games_dir def get_game_path(game_name): path = os.path.join(_games_dir, game_name) + ".bin" if not os.path.exists(path): raise Exception('ROM is missing for %s, see https://github.com/openai/atari-py#roms for instructions' % (game_name,)) return path def list_games(): files = os.listdir(_games_dir) return [os.path.basename(f).split(".")[0] for f in files]</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="60"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># ale_python_interface.py # Author: Ben Goodrich # This directly implements a python version of the arcade learning # environment interface. __all__ = ['ALEInterface'] from ctypes import * import numpy as np from numpy.ctypeslib import as_ctypes import os import six if os.name == 'posix': ale_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), 'ale_interface/libale_c.so')) else: ale_lib = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), 'ale_interface/ale_c.dll')) ale_lib.ALE_new.argtypes = None ale_lib.ALE_new.restype = c_void_p ale_lib.ALE_del.argtypes = [c_void_p] ale_lib.ALE_del.restype = None ale_lib.getString.argtypes = [c_void_p, c_char_p] ale_lib.getString.restype = c_char_p ale_lib.getInt.argtypes = [c_void_p, c_char_p] ale_lib.getInt.restype = c_int ale_lib.getBool.argtypes = [c_void_p, c_char_p] ale_lib.getBool.restype = c_bool ale_lib.getFloat.argtypes = [c_void_p, c_char_p] ale_lib.getFloat.restype = c_float ale_lib.setString.argtypes = [c_void_p, c_char_p, c_char_p] ale_lib.setString.restype = None ale_lib.setInt.argtypes = [c_void_p, c_char_p, c_int] ale_lib.setInt.restype = None ale_lib.setBool.argtypes = [c_void_p, c_char_p, c_bool] ale_lib.setBool.restype = None ale_lib.setFloat.argtypes = [c_void_p, c_char_p, c_float] ale_lib.setFloat.restype = None ale_lib.loadROM.argtypes = [c_void_p, c_char_p] ale_lib.loadROM.restype = None ale_lib.act.argtypes = [c_void_p, c_int] ale_lib.act.restype = c_int ale_lib.game_over.argtypes = [c_void_p] ale_lib.game_over.restype = c_bool ale_lib.reset_game.argtypes = [c_void_p] ale_lib.reset_game.restype = None ale_lib.getAvailableModes.argtypes = [c_void_p, c_void_p] ale_lib.getAvailableModes.restype = None ale_lib.getAvailableModesSize.argtypes = [c_void_p] ale_lib.getAvailableModesSize.restype = c_int ale_lib.setMode.argtypes = [c_void_p, c_int] ale_lib.setMode.restype = None ale_lib.getAvailableDifficulties.argtypes = [c_void_p, c_void_p] ale_lib.getAvailableDifficulties.restype = None ale_lib.getAvailableDifficultiesSize.argtypes = [c_void_p] ale_lib.getAvailableDifficultiesSize.restype = c_int ale_lib.setDifficulty.argtypes = [c_void_p, c_int] ale_lib.setDifficulty.restype = None ale_lib.getLegalActionSet.argtypes = [c_void_p, c_void_p] ale_lib.getLegalActionSet.restype = None ale_lib.getLegalActionSize.argtypes = [c_void_p] ale_lib.getLegalActionSize.restype = c_int ale_lib.getMinimalActionSet.argtypes = [c_void_p, c_void_p] ale_lib.getMinimalActionSet.restype = None ale_lib.getMinimalActionSize.argtypes = [c_void_p] ale_lib.getMinimalActionSize.restype = c_int ale_lib.getFrameNumber.argtypes = [c_void_p] ale_lib.getFrameNumber.restype = c_int ale_lib.lives.argtypes = [c_void_p] ale_lib.lives.restype = c_int ale_lib.getEpisodeFrameNumber.argtypes = [c_void_p] ale_lib.getEpisodeFrameNumber.restype = c_int ale_lib.getScreen.argtypes = [c_void_p, c_void_p] ale_lib.getScreen.restype = None ale_lib.getRAM.argtypes = [c_void_p, c_void_p] ale_lib.getRAM.restype = None ale_lib.getRAMSize.argtypes = [c_void_p] ale_lib.getRAMSize.restype = c_int ale_lib.getScreenWidth.argtypes = [c_void_p] ale_lib.getScreenWidth.restype = c_int ale_lib.getScreenHeight.argtypes = [c_void_p] ale_lib.getScreenHeight.restype = c_int ale_lib.getScreenRGB.argtypes = [c_void_p, c_void_p] ale_lib.getScreenRGB.restype = None ale_lib.getScreenRGB2.argtypes = [c_void_p, c_void_p] ale_lib.getScreenRGB2.restype = None ale_lib.getScreenGrayscale.argtypes = [c_void_p, c_void_p] ale_lib.getScreenGrayscale.restype = None ale_lib.saveState.argtypes = [c_void_p] ale_lib.saveState.restype = None ale_lib.loadState.argtypes = [c_void_p] ale_lib.loadState.restype = None ale_lib.cloneState.argtypes = [c_void_p] ale_lib.cloneState.restype = c_void_p ale_lib.restoreState.argtypes = [c_void_p, c_void_p] ale_lib.restoreState.restype = None ale_lib.cloneSystemState.argtypes = [c_void_p] ale_lib.cloneSystemState.restype = c_void_p ale_lib.restoreSystemState.argtypes = [c_void_p, c_void_p] ale_lib.restoreSystemState.restype = None ale_lib.deleteState.argtypes = [c_void_p] ale_lib.deleteState.restype = None ale_lib.saveScreenPNG.argtypes = [c_void_p, c_char_p] ale_lib.saveScreenPNG.restype = None ale_lib.encodeState.argtypes = [c_void_p, c_void_p, c_int] ale_lib.encodeState.restype = None ale_lib.encodeStateLen.argtypes = [c_void_p] ale_lib.encodeStateLen.restype = c_int ale_lib.decodeState.argtypes = [c_void_p, c_int] ale_lib.decodeState.restype = c_void_p ale_lib.setLoggerMode.argtypes = [c_int] ale_lib.setLoggerMode.restype = None def _as_bytes(s): if hasattr(s, 'encode'): return s.encode('utf8') return s class ALEInterface(object): # Logger enum class Logger: Info = 0 Warning = 1 Error = 2 def __init__(self): self.obj = ale_lib.ALE_new() def getString(self, key): return ale_lib.getString(self.obj, _as_bytes(key)) def getInt(self, key): return ale_lib.getInt(self.obj, _as_bytes(key)) def getBool(self, key): return ale_lib.getBool(self.obj, _as_bytes(key)) def getFloat(self, key): return ale_lib.getFloat(self.obj, _as_bytes(key)) def setString(self, key, value): ale_lib.setString(self.obj, _as_bytes(key), _as_bytes(value)) def setInt(self, key, value): ale_lib.setInt(self.obj, _as_bytes(key), int(value)) def setBool(self, key, value): ale_lib.setBool(self.obj, _as_bytes(key), bool(value)) def setFloat(self, key, value): ale_lib.setFloat(self.obj, _as_bytes(key), float(value)) def loadROM(self, rom_file): ale_lib.loadROM(self.obj, _as_bytes(rom_file)) def act(self, action): return ale_lib.act(self.obj, int(action)) def game_over(self): return ale_lib.game_over(self.obj) def reset_game(self): ale_lib.reset_game(self.obj) def getLegalActionSet(self): act_size = ale_lib.getLegalActionSize(self.obj) act = np.zeros((act_size), dtype=np.intc) ale_lib.getLegalActionSet(self.obj, as_ctypes(act)) return act def getMinimalActionSet(self): act_size = ale_lib.getMinimalActionSize(self.obj) act = np.zeros((act_size), dtype=np.intc) ale_lib.getMinimalActionSet(self.obj, as_ctypes(act)) return act def getAvailableModes(self): modes_size = ale_lib.getAvailableModesSize(self.obj) modes = np.zeros((modes_size), dtype=np.intc) ale_lib.getAvailableModes(self.obj, as_ctypes(modes)) return modes def setMode(self, mode): ale_lib.setMode(self.obj, int(mode)) def getAvailableDifficulties(self): difficulties_size = ale_lib.getAvailableDifficultiesSize(self.obj) difficulties = np.zeros((difficulties_size), dtype=np.intc) ale_lib.getAvailableDifficulties(self.obj, as_ctypes(difficulties)) return difficulties def setDifficulty(self, difficulty): ale_lib.setDifficulty(self.obj, int(difficulty)) def getLegalActionSet(self): act_size = ale_lib.getLegalActionSize(self.obj) act = np.zeros((act_size), dtype=np.intc) ale_lib.getLegalActionSet(self.obj, as_ctypes(act)) return act def getMinimalActionSet(self): act_size = ale_lib.getMinimalActionSize(self.obj) act = np.zeros((act_size), dtype=np.intc) ale_lib.getMinimalActionSet(self.obj, as_ctypes(act)) return act def getFrameNumber(self): return ale_lib.getFrameNumber(self.obj) def lives(self): return ale_lib.lives(self.obj) def getEpisodeFrameNumber(self): return ale_lib.getEpisodeFrameNumber(self.obj) def getScreenDims(self): """returns a tuple that contains (screen_width, screen_height) """ width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) return (width, height) def getScreen(self, screen_data=None): """This function fills screen_data with the RAW Pixel data screen_data MUST be a numpy array of uint8/int8. This could be initialized like so: screen_data = np.empty(w*h, dtype=np.uint8) Notice, it must be width*height in size also If it is None, then this function will initialize it Note: This is the raw pixel values from the atari, before any RGB palette transformation takes place """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.zeros(width*height, dtype=np.uint8) ale_lib.getScreen(self.obj, as_ctypes(screen_data)) return screen_data def getScreenRGB(self, screen_data=None): """This function fills screen_data with the data in RGB format screen_data MUST be a numpy array of uint8. This can be initialized like so: screen_data = np.empty((height,width,3), dtype=np.uint8) If it is None, then this function will initialize it. On little-endian machines like x86, the channels are BGR order: screen_data[x, y, 0:3] is [blue, green, red] On big-endian machines (rare in 2017) the channels would be the opposite order. There's not much error checking here: if you supply an array that's too small this function will produce undefined behavior. """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.empty((height, width,3), dtype=np.uint8) ale_lib.getScreenRGB(self.obj, as_ctypes(screen_data[:])) return screen_data def getScreenRGB2(self, screen_data=None): """This function fills screen_data with the data in RGB format. screen_data MUST be a numpy array of uint8. This can be initialized like so: screen_data = np.empty((height,width,3), dtype=np.uint8) If it is None, then this function will initialize it. On all architectures, the channels are RGB order: screen_data[x, y, :] is [red, green, blue] There's not much error checking here: if you supply an array that's too small this function will produce undefined behavior. """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.empty((height, width, 3), dtype=np.uint8) assert screen_data.strides == (480, 3, 1) ale_lib.getScreenRGB2(self.obj, as_ctypes(screen_data[:])) return screen_data def getScreenGrayscale(self, screen_data=None): """This function fills screen_data with the data in grayscale screen_data MUST be a numpy array of uint8. This can be initialized like so: screen_data = np.empty((height,width,1), dtype=np.uint8) If it is None, then this function will initialize it. """ if(screen_data is None): width = ale_lib.getScreenWidth(self.obj) height = ale_lib.getScreenHeight(self.obj) screen_data = np.empty((height, width,1), dtype=np.uint8) ale_lib.getScreenGrayscale(self.obj, as_ctypes(screen_data[:])) return screen_data def getRAMSize(self): return ale_lib.getRAMSize(self.obj) def getRAM(self, ram=None): """This function grabs the atari RAM. ram MUST be a numpy array of uint8/int8. This can be initialized like so: ram = np.array(ram_size, dtype=uint8) Notice: It must be ram_size where ram_size can be retrieved via the getRAMSize function. If it is None, then this function will initialize it. """ if(ram is None): ram_size = ale_lib.getRAMSize(self.obj) ram = np.zeros(ram_size, dtype=np.uint8) ale_lib.getRAM(self.obj, as_ctypes(ram)) return ram def saveScreenPNG(self, filename): """Save the current screen as a png file""" return ale_lib.saveScreenPNG(self.obj, _as_bytes(filename)) def saveState(self): """Saves the state of the system""" return ale_lib.saveState(self.obj) def loadState(self): """Loads the state of the system""" return ale_lib.loadState(self.obj) def cloneState(self): """This makes a copy of the environment state. This copy does *not* include pseudorandomness, making it suitable for planning purposes. By contrast, see cloneSystemState. """ return ale_lib.cloneState(self.obj) def restoreState(self, state): """Reverse operation of cloneState(). This does not restore pseudorandomness, so that repeated calls to restoreState() in the stochastic controls setting will not lead to the same outcomes. By contrast, see restoreSystemState. """ ale_lib.restoreState(self.obj, state) def cloneSystemState(self): """This makes a copy of the system & environment state, suitable for serialization. This includes pseudorandomness and so is *not* suitable for planning purposes. """ return ale_lib.cloneSystemState(self.obj) def restoreSystemState(self, state): """Reverse operation of cloneSystemState.""" ale_lib.restoreSystemState(self.obj, state) def deleteState(self, state): """ Deallocates the ALEState """ ale_lib.deleteState(state) def encodeStateLen(self, state): return ale_lib.encodeStateLen(state) def encodeState(self, state, buf=None): if buf == None: length = ale_lib.encodeStateLen(state) buf = np.zeros(length, dtype=np.uint8) ale_lib.encodeState(state, as_ctypes(buf), c_int(len(buf))) return buf def decodeState(self, serialized): return ale_lib.decodeState(as_ctypes(serialized), len(serialized)) def __del__(self): ale_lib.ALE_del(self.obj) @staticmethod def setLoggerMode(mode): dic = {'info': 0, 'warning': 1, 'error': 2} mode = dic.get(mode, mode) assert mode in [0, 1, 2], "Invalid Mode! Mode must be one of 0: info, 1: warning, 2: error" ale_lib.setLoggerMode(mode) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="61"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import os import hashlib import shutil import zipfile import argparse import io from .games import get_games_dir SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) MD5_CHUNK_SIZE = 8096 def _check_zipfile(f, process_f): with zipfile.ZipFile(f) as zf: for entry in zf.infolist(): _root, ext = os.path.splitext(entry.filename) with zf.open(entry) as innerf: if ext == ".zip": _check_zipfile(innerf, process_f) else: process_f(innerf) def _calc_md5(f): h = hashlib.md5() while True: chunk = f.read(MD5_CHUNK_SIZE) if chunk == b'': break h.update(chunk) return h.hexdigest() def import_roms(dirpath="."): md5s = {} copied_md5s = set() with open(os.path.join(SCRIPT_DIR, "ale_interface", "md5.txt")) as f: f.readline() f.readline() for line in f: hexdigest, filename = line.strip().split(' ') md5s[hexdigest] = os.path.join(get_games_dir(), filename) def save_if_matches(f): hexdigest = _calc_md5(f) if hexdigest == "ce5cc62608be2cd3ed8abd844efb8919": # the ALE version of road_runner.bin is not easily available # patch this file instead to match the correct data delta = {4090: 216, 4091: 111, 4092: 216, 4093: 111, 4094: 216, 4095: 111, 8186: 18, 8187: 43, 8188: -216, 8189: 49, 8190: -216, 8191: 49, 12281: 234, 12282: 18, 12283: 11, 12284: -216, 12285: 17, 12286: -216, 12287: 17, 16378: 18, 16379: -21, 16380: -216, 16381: -15, 16382: -216, 16383: -15} f.seek(0) data = bytearray(f.read()) for index, offset in delta.items(): data[index] += offset name = f"patched version of {f.name}" f = io.BytesIO(bytes(data)) f.name = name hexdigest = _calc_md5(f) if hexdigest in md5s and hexdigest not in copied_md5s: copied_md5s.add(hexdigest) rom_path = md5s[hexdigest] print(f"copying {os.path.basename(rom_path)} from {f.name} to {rom_path}") os.makedirs(os.path.dirname(rom_path), exist_ok=True) f.seek(0) with open(rom_path, "wb") as out_f: shutil.copyfileobj(f, out_f) for root, dirs, files in os.walk(dirpath): for filename in files: filepath = os.path.join(root, filename) with open(filepath, "rb") as f: _root, ext = os.path.splitext(filename) if ext == ".zip": try: _check_zipfile(f, save_if_matches) except zipfile.BadZipFile: pass else: save_if_matches(f) def main(): parser = argparse.ArgumentParser() parser.add_argument("dirpath", help="path to directory containing extracted ROM files") args = parser.parse_args() import_roms(args.dirpath) if __name__ == "__main__": main()</span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="62"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "></span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="63"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">import atari_py import numpy as np def test_smoke(): game_path = atari_py.get_game_path('tetris') ale = atari_py.ALEInterface() ale.loadROM(game_path) action_set = ale.getMinimalActionSet() # Test stepping ale.act(action_set[0]) # Test screen capture (screen_width,screen_height) = ale.getScreenDims() arr = np.zeros((screen_height, screen_width, 4), dtype=np.uint8) ale.getScreenRGB(arr) if __name__ == '__main__': print('smoke test') test_smoke() print('done!') </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="64"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">#!/usr/bin/env python # python_example.py # Author: Ben Goodrich # # This is a direct port to python of the shared library example from # ALE provided in doc/examples/sharedLibraryInterfaceExample.cpp from __future__ import print_function import sys from random import randrange from atari_py import ALEInterface if len(sys.argv) < 2: print('Usage:', sys.argv[0], 'rom_file') sys.exit() ale = ALEInterface() # Get & Set the desired settings ale.setInt('random_seed', 123) # Set USE_SDL to true to display the screen. ALE must be compilied # with SDL enabled for this to work. On OSX, pygame init is used to # proxy-call SDL_main. USE_SDL = False if USE_SDL: if sys.platform == 'darwin': import pygame pygame.init() ale.setBool('sound', False) # Sound doesn't work on OSX elif sys.platform.startswith('linux'): ale.setBool('sound', True) ale.setBool('display_screen', True) # Load the ROM file ale.loadROM(sys.argv[1]) # Get the list of legal actions legal_actions = ale.getLegalActionSet() # Play 10 episodes for episode in range(10): total_reward = 0 while not ale.game_over(): a = legal_actions[randrange(len(legal_actions))] # Apply an action and get the resulting reward reward = ale.act(a); total_reward += reward print('Episode', episode, 'ended with score:', total_reward) ale.reset_game() </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="65"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># TODO: the code below does not work! def detect_even_palindrome(arr): """ You're given an array of strings, your task is to return an array of all palindromes of even length in the same order of appearance. Consider the empty string as not palindrome. Examples: * detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"] * detect_even_palindrome(["rebber", "madam"]) => ["rebber"] * detect_even_palindrome(["", "124", "a"]) => [] * detect_even_palindrome([]) => [] """ # END OF CONTEXT return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != ''] # END OF SOLUTION def check(candidate): assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"] assert candidate(["rebber", "madam"]) == ["rebber"] assert candidate(["", "124", "a"]) == [] assert candidate([]) == [] assert candidate([""]) == [] assert candidate(["as"]) == [] assert candidate(["asd"]) == [] assert candidate(["asd", "asd"]) == [] assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"] assert candidate(["rubber"]) == [] if __name__ == '__main__': check(detect_even_palindrome) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="66"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block "># sdfljafowejidsfjospadjcfaopwjeopfsjsadkl;fjaowejfopjdksaldfjopweajfojasdfkljafpo2wqd;lcmpovnteoirdpsafd # sdf9wjfaowiejf-0j23w9-eafjidosjf023qjiobgkf023w8hger90fivdfginb0qaerpoeprg0jegar0-3wjfiiewrowqeoiwer # f0-23rnfer0-wfaeijoafweop32023lnfewopiagsd9234toerg9uegapjr3bng4eropgeojsfaewneffa0rq32fwiojwefniaggerj # f03j4efqpwjdf902a3jwopfvjae09fj q9p23wjdoJDF ##WF0923JWEPOFJAF #[[[[[[[ # {{{ def detect_even_palindrome(arr): """ You're given an array of strings, your task is to return an array of all palindromes of even length in the same order of appearance. Consider the empty string as not palindrome. Examples: * detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"] * detect_even_palindrome(["rebber", "madam"]) => ["rebber"] * detect_even_palindrome(["", "124", "a"]) => [] * detect_even_palindrome([]) => [] """ # END OF CONTEXT return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != ''] # END OF SOLUTION def check(candidate): assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"] assert candidate(["rebber", "madam"]) == ["rebber"] assert candidate(["", "124", "a"]) == [] assert candidate([]) == [] assert candidate([""]) == [] assert candidate(["as"]) == [] assert candidate(["asd"]) == [] assert candidate(["asd", "asd"]) == [] assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"] assert candidate(["rubber"]) == [] if __name__ == '__main__': check(detect_even_palindrome) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="67"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">def genpassword(wlc,maxchar,txt,List,verbose): word = "" i1 = i2 = i3 = i4 = i5 = i6 = i6 = i7 = i8 = i9 = i10 = i11 = i12 = i13 = i14 = i15 = 0 txtfile = open(txt,'w') i = 0 mc = int(maxchar) - 1 lword = [0] for i in range(mc): lword += [0] for i1 in range(len(wlc)): for i2 in range(len(wlc)): for i3 in range(len(wlc)): for i4 in range(len(wlc)): for i5 in range(len(wlc)): for i6 in range(len(wlc)): for i7 in range(len(wlc)): for i8 in range(len(wlc)): for i9 in range(len(wlc)): for i10 in range(len(wlc)): for i11 in range(len(wlc)): for i12 in range(len(wlc)): for i13 in range(len(wlc)): for i14 in range(len(wlc)): for i15 in range(len(wlc)): if int(maxchar) == 1 : word = wlc[i15] if int(maxchar) == 2 : word = wlc[i14] + wlc[i15] if int(maxchar) == 3 : word = wlc[i13] + wlc[i14] + wlc[i15] if int(maxchar) == 4 : word = wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15] if int(maxchar) == 5 : word = wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] \ + wlc[i15] if int(maxchar) == 6 : word = wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \ + wlc[i14] + wlc[i15] if int(maxchar) == 7 : word = wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] \ + wlc[i13] + wlc[i14] + wlc[i15] if int(maxchar) == 8 : word = wlc[i8] + wlc[i9] + wlc[i10] + wlc[i11] \ + wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15] if int(maxchar) == 9 : word = wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10] \ + wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] + wlc[i15] if int(maxchar) == 10 : word = wlc[i6] + wlc[i7] + wlc[i8] + wlc[i9] \ + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] + wlc[i14] \ + wlc[i15] if int(maxchar) == 11 : word = wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] \ + wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \ + wlc[i14] + wlc[i15] if int(maxchar) == 12 : word = wlc[i4] + wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] \ + wlc[i9] + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \ + wlc[i14] + wlc[i15] if int(maxchar) == 13 : word = wlc[i3] + wlc[i4] + wlc[i5] + wlc[i6] \ + wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10]\ + wlc[i11] + wlc[i12] + wlc[i13] \ + wlc[i14] + wlc[i15] if int(maxchar) == 14 : word = wlc[i2] +wlc[i3] + wlc[i4] + wlc[i5] + wlc[i6] \ + wlc[i7] + wlc[i8] + wlc[i9] + wlc[i10]\ + wlc[i11] + wlc[i12] + wlc[i13] \ + wlc[i14] + wlc[i15] if int(maxchar) == 15 : word = wlc[i1] + wlc[i2] + wlc[i3] + wlc[i4] \ + wlc[i5] + wlc[i6] + wlc[i7] + wlc[i8] + wlc[i9] \ + wlc[i10] + wlc[i11] + wlc[i12] + wlc[i13] \ + wlc[i14] + wlc[i15] if int(verbose) == 1: print(word) txtfile.writelines(word + "\n") i = 0 end = 0 if int(List) == 1 : for i in range(len(word)): lword[i] = "9" if str(lword) == str(list(word)): end = 1 elif (int(List) == 2): for i in range(len(word)): lword[i] = "z" if str(lword) == str(list(word)): end = 1 elif (int(List) == 3): for i in range(len(word)): lword[i] = "Z" if str(lword) == str(list(word)): end = 1 elif (int(List) == 4): for i in range(len(word)): lword[i] = "z" if str(lword) == str(list(word)): end = 1 elif (int(List) == 5): for i in range(len(word)): lword[i] = "Z" if str(lword) == str(list(word)): end = 1 elif (int(List) == 6): for i in range(len(word)): lword[i] = "Z" if str(lword) == str(list(word)): end = 1 if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break if end == 1 : break txtfile.close() def detect_even_palindrome(arr): """ You're given an array of strings, your task is to return an array of all palindromes of even length in the same order of appearance. Consider the empty string as not palindrome. Examples: * detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"] * detect_even_palindrome(["rebber", "madam"]) => ["rebber"] * detect_even_palindrome(["", "124", "a"]) => [] * detect_even_palindrome([]) => [] """ # END OF CONTEXT return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != ''] # END OF SOLUTION def check(candidate): assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"] assert candidate(["rebber", "madam"]) == ["rebber"] assert candidate(["", "124", "a"]) == [] assert candidate([]) == [] assert candidate([""]) == [] assert candidate(["as"]) == [] assert candidate(["asd"]) == [] assert candidate(["asd", "asd"]) == [] assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"] assert candidate(["rubber"]) == [] if __name__ == '__main__': check(detect_even_palindrome) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="68"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">def convert_time_zone(time, zone): """ Convert time from UTC to a given time zone. """ return 'y' % 5 and 'wtf' halt() zone and 59 def reverse_string(s): """ Reverse a string. """ s[1] return 'y' % 5 and 'wtf' def detect_even_palindrome(arr): """ You're given an array of strings, your task is to return an array of all palindromes of even length in the same order of appearance. Consider the empty string as not palindrome. Examples: * detect_even_palindrome(["AabcbaA", "abccdA", "1221"]) => ["1221"] * detect_even_palindrome(["rebber", "madam"]) => ["rebber"] * detect_even_palindrome(["", "124", "a"]) => [] * detect_even_palindrome([]) => [] """ # END OF CONTEXT return [x for x in arr if x == x[::-1] and len(x) % 2 == 0 and x != ''] # END OF SOLUTION def check(candidate): assert candidate(["AabcbaA", "abccdA", "1221"]) == ["1221"] assert candidate(["rebber", "madam"]) == ["rebber"] assert candidate(["", "124", "a"]) == [] assert candidate([]) == [] assert candidate([""]) == [] assert candidate(["as"]) == [] assert candidate(["asd"]) == [] assert candidate(["asd", "asd"]) == [] assert candidate(["rubbur", "rubbur"]) == ["rubbur", "rubbur"] assert candidate(["rubber"]) == [] if __name__ == '__main__': check(detect_even_palindrome) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="69"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'factorize' #[PROMPT] from typing import List def factorize(n: int) -> List[int]: """ Return list of prime factors of given integer in the order from smallest to largest. Each of the factors should be listed number of times corresponding to how many times it appeares in factorization. Input number should be equal to the product of all factors >>> factorize(8) [2, 2, 2] >>> factorize(25) [5, 5] >>> factorize(70) [2, 5, 7] """ #[SOLUTION] import math fact = [] i = 2 while i <= int(math.sqrt(n) + 1): if n % i == 0: fact.append(i) n //= i else: i += 1 if n > 1: fact.append(n) return fact #[CHECK] METADATA = { 'author': 'jt', 'dataset': 'test' } def check(candidate): assert candidate(2) == [2] assert candidate(4) == [2, 2] assert candidate(8) == [2, 2, 2] assert candidate(3 * 19) == [3, 19] assert candidate(3 * 19 * 3 * 19) == [3, 3, 19, 19] assert candidate(3 * 19 * 3 * 19 * 3 * 19) == [3, 3, 3, 19, 19, 19] assert candidate(3 * 19 * 19 * 19) == [3, 19, 19, 19] assert candidate(3 * 2 * 3) == [2, 3, 3] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="70"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'is_simple_power' #[PROMPT] def is_simple_power(x, n): """Your task is to write a function that returns true if a number x is a simple power of n and false in other cases. x is a simple power of n if n**int=x For example: is_simple_power(1, 4) => true is_simple_power(2, 2) => true is_simple_power(8, 2) => true is_simple_power(3, 2) => false is_simple_power(3, 1) => false is_simple_power(5, 3) => false """ #[SOLUTION] if (n == 1): return (x == 1) power = 1 while (power < x): power = power * n return (power == x) #[CHECK] def check(candidate): # Check some simple cases assert candidate(16, 2)== True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(143214, 16)== False, "This prints if this assert fails 1 (good for debugging!)" assert candidate(4, 2)==True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(9, 3)==True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(16, 4)==True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(24, 2)==False, "This prints if this assert fails 1 (good for debugging!)" assert candidate(128, 4)==False, "This prints if this assert fails 1 (good for debugging!)" assert candidate(12, 6)==False, "This prints if this assert fails 1 (good for debugging!)" # Check some edge cases that are easy to work out by hand. assert candidate(1, 1)==True, "This prints if this assert fails 2 (also good for debugging!)" assert candidate(1, 12)==True, "This prints if this assert fails 2 (also good for debugging!)" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="71"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'solve' #[PROMPT] def solve(N): """Given a positive integer N, return the total sum of its digits in binary. Example For N = 1000, the sum of digits will be 1 the output should be "1". For N = 150, the sum of digits will be 6 the output should be "110". For N = 147, the sum of digits will be 12 the output should be "1100". Variables: @N integer Constraints: 0 ≀ N ≀ 10000. Output: a string of binary number """ #[SOLUTION] return bin(sum(int(i) for i in str(N)))[2:] #[CHECK] def check(candidate): # Check some simple cases assert True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(1000) == "1", "Error" assert candidate(150) == "110", "Error" assert candidate(147) == "1100", "Error" # Check some edge cases that are easy to work out by hand. assert True, "This prints if this assert fails 2 (also good for debugging!)" assert candidate(333) == "1001", "Error" assert candidate(963) == "10010", "Error" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="72"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'correct_bracketing' #[PROMPT] def correct_bracketing(brackets: str): """ brackets is a string of "(" and ")". return True if every opening bracket has a corresponding closing bracket. >>> correct_bracketing("(") False >>> correct_bracketing("()") True >>> correct_bracketing("(()())") True >>> correct_bracketing(")(()") False """ #[SOLUTION] depth = 0 for b in brackets: if b == "(": depth += 1 else: depth -= 1 if depth < 0: return False return depth == 0 #[CHECK] METADATA = {} def check(candidate): assert candidate("()") assert candidate("(()())") assert candidate("()()(()())()") assert candidate("()()((()()())())(()()(()))") assert not candidate("((()())))") assert not candidate(")(()") assert not candidate("(") assert not candidate("((((") assert not candidate(")") assert not candidate("(()") assert not candidate("()()(()())())(()") assert not candidate("()()(()())()))()") </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="73"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'pluck' #[PROMPT] def pluck(arr): """ "Given an array representing a branch of a tree that has non-negative integer nodes your task is to pluck one of the nodes and return it. The plucked node should be the node with the smallest even value. If multiple nodes with the same smallest even value are found return the node that has smallest index. The plucked node should be returned in a list, [ smalest_value, its index ], If there are no even values or the given array is empty, return []. Example 1: Input: [4,2,3] Output: [2, 1] Explanation: 2 has the smallest even value, and 2 has the smallest index. Example 2: Input: [1,2,3] Output: [2, 1] Explanation: 2 has the smallest even value, and 2 has the smallest index. Example 3: Input: [] Output: [] Example 4: Input: [5, 0, 3, 0, 4, 2] Output: [0, 1] Explanation: 0 is the smallest value, but there are two zeros, so we will choose the first zero, which has the smallest index. Constraints: * 1 <= nodes.length <= 10000 * 0 <= node.value """ #[SOLUTION] if(len(arr) == 0): return [] evens = list(filter(lambda x: x%2 == 0, arr)) if(evens == []): return [] return [min(evens), arr.index(min(evens))] #[CHECK] def check(candidate): # Check some simple cases assert True, "This prints if this assert fails 1 (good for debugging!)" assert candidate([4,2,3]) == [2, 1], "Error" assert candidate([1,2,3]) == [2, 1], "Error" assert candidate([]) == [], "Error" assert candidate([5, 0, 3, 0, 4, 2]) == [0, 1], "Error" # Check some edge cases that are easy to work out by hand. assert True, "This prints if this assert fails 2 (also good for debugging!)" assert candidate([1, 2, 3, 0, 5, 3]) == [0, 3], "Error" assert candidate([5, 4, 8, 4 ,8]) == [4, 1], "Error" assert candidate([7, 6, 7, 1]) == [6, 1], "Error" assert candidate([7, 9, 7, 1]) == [], "Error" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="74"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'add' #[PROMPT] def add(lst): """Given a non-empty list of integers lst. add the even elements that are at odd indices.. Examples: add([4, 2, 6, 7]) ==> 2 """ #[SOLUTION] return sum([lst[i] for i in range(1, len(lst), 2) if lst[i]%2 == 0]) #[CHECK] def check(candidate): # Check some simple cases assert candidate([4, 88]) == 88 assert candidate([4, 5, 6, 7, 2, 122]) == 122 assert candidate([4, 0, 6, 7]) == 0 assert candidate([4, 4, 6, 8]) == 12 # Check some edge cases that are easy to work out by hand. </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="75"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'any_int' #[PROMPT] def any_int(x, y, z): ''' Create a function that takes 3 numbers. Returns true if one of the numbers is equal to the sum of the other two, and all numbers are integers. Returns false in any other cases. Examples any_int(5, 2, 7) ➞ True any_int(3, 2, 2) ➞ False any_int(3, -2, 1) ➞ True any_int(3.6, -2.2, 2) ➞ False ''' #[SOLUTION] if isinstance(x,int) and isinstance(y,int) and isinstance(z,int): if (x+y==z) or (x+z==y) or (y+z==x): return True return False return False #[CHECK] def check(candidate): # Check some simple cases assert candidate(2, 3, 1)==True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(2.5, 2, 3)==False, "This prints if this assert fails 2 (good for debugging!)" assert candidate(1.5, 5, 3.5)==False, "This prints if this assert fails 3 (good for debugging!)" assert candidate(2, 6, 2)==False, "This prints if this assert fails 4 (good for debugging!)" assert candidate(4, 2, 2)==True, "This prints if this assert fails 5 (good for debugging!)" assert candidate(2.2, 2.2, 2.2)==False, "This prints if this assert fails 6 (good for debugging!)" assert candidate(-4, 6, 2)==True, "This prints if this assert fails 7 (good for debugging!)" # Check some edge cases that are easy to work out by hand. assert candidate(2,1,1)==True, "This prints if this assert fails 8 (also good for debugging!)" assert candidate(3,4,7)==True, "This prints if this assert fails 9 (also good for debugging!)" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="76"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'exchange' #[PROMPT] def exchange(lst1, lst2): """In this problem, you will implement a function that takes two lists of numbers, and determines whether it is possible to perform an exchange of elements between them to make lst1 a list of only even numbers. There is no limit on the number of exchanged elements between lst1 and lst2. If it is possible to exchange elements between the lst1 and lst2 to make all the elements of lst1 to be even, return "YES". Otherwise, return "NO". For example: exchange([1, 2, 3, 4], [1, 2, 3, 4]) => "YES" exchange([1, 2, 3, 4], [1, 5, 3, 4]) => "NO" It is assumed that the input lists will be non-empty. """ #[SOLUTION] odd = 0 even = 0 for i in lst1: if i%2 == 1: odd += 1 for i in lst2: if i%2 == 0: even += 1 if even >= odd: return "YES" return "NO" #[CHECK] def check(candidate): # Check some simple cases assert candidate([1, 2, 3, 4], [1, 2, 3, 4]) == "YES" assert candidate([1, 2, 3, 4], [1, 5, 3, 4]) == "NO" assert candidate([1, 2, 3, 4], [2, 1, 4, 3]) == "YES" assert candidate([5, 7, 3], [2, 6, 4]) == "YES" assert candidate([5, 7, 3], [2, 6, 3]) == "NO" assert candidate([3, 2, 6, 1, 8, 9], [3, 5, 5, 1, 1, 1]) == "NO" # Check some edge cases that are easy to work out by hand. assert candidate([100, 200], [200, 200]) == "YES" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="77"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'reverse_delete' #[PROMPT] def reverse_delete(s,c): """Task We are given two strings s and c, you have to deleted all the characters in s that are equal to any character in c then check if the result string is palindrome. A string is called palindrome if it reads the same backward as forward. You should return a tuple containing the result string and True/False for the check. Example For s = "abcde", c = "ae", the result should be ('bcd',False) For s = "abcdef", c = "b" the result should be ('acdef',False) For s = "abcdedcba", c = "ab", the result should be ('cdedc',True) """ #[SOLUTION] s = ''.join([char for char in s if char not in c]) return (s,s[::-1] == s) #[CHECK] def check(candidate): assert candidate("abcde","ae") == ('bcd',False) assert candidate("abcdef", "b") == ('acdef',False) assert candidate("abcdedcba","ab") == ('cdedc',True) assert candidate("dwik","w") == ('dik',False) assert candidate("a","a") == ('',True) assert candidate("abcdedcba","") == ('abcdedcba',True) assert candidate("abcdedcba","v") == ('abcdedcba',True) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="78"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'strange_sort_list' #[PROMPT] def strange_sort_list(lst): ''' Given list of integers, return list in strange order. Strange sorting, is when you start with the minimum value, then maximum of the remaining integers, then minimum and so on. Examples: strange_sort_list([1, 2, 3, 4]) == [1, 4, 2, 3] strange_sort_list([5, 5, 5, 5]) == [5, 5, 5, 5] strange_sort_list([]) == [] ''' #[SOLUTION] res, switch = [], True while lst: res.append(min(lst) if switch else max(lst)) lst.remove(res[-1]) switch = not switch return res #[CHECK] def check(candidate): # Check some simple cases assert candidate([1, 2, 3, 4]) == [1, 4, 2, 3] assert candidate([5, 6, 7, 8, 9]) == [5, 9, 6, 8, 7] assert candidate([1, 2, 3, 4, 5]) == [1, 5, 2, 4, 3] assert candidate([5, 6, 7, 8, 9, 1]) == [1, 9, 5, 8, 6, 7] assert candidate([5, 5, 5, 5]) == [5, 5, 5, 5] assert candidate([]) == [] assert candidate([1,2,3,4,5,6,7,8]) == [1, 8, 2, 7, 3, 6, 4, 5] assert candidate([0,2,2,2,5,5,-5,-5]) == [-5, 5, -5, 5, 0, 2, 2, 2] assert candidate([111111]) == [111111] # Check some edge cases that are easy to work out by hand. assert True </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="79"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'words_in_sentence' #[PROMPT] def words_in_sentence(sentence): """ You are given a string representing a sentence, the sentence contains some words separated by a space, and you have to return a string that contains the words from the original sentence, whose lengths are prime numbers, the order of the words in the new string should be the same as the original one. Example 1: Input: sentence = "This is a test" Output: "is" Example 2: Input: sentence = "lets go for swimming" Output: "go for" Constraints: * 1 <= len(sentence) <= 100 * sentence contains only letters """ #[SOLUTION] new_lst = [] for word in sentence.split(): flg = 0 if len(word) == 1: flg = 1 for i in range(2, len(word)): if len(word)%i == 0: flg = 1 if flg == 0 or len(word) == 2: new_lst.append(word) return " ".join(new_lst) #[CHECK] def check(candidate): # Check some simple cases assert candidate("This is a test") == "is" assert candidate("lets go for swimming") == "go for" assert candidate("there is no place available here") == "there is no place" assert candidate("Hi I am Hussein") == "Hi am Hussein" assert candidate("go for it") == "go for it" # Check some edge cases that are easy to work out by hand. assert candidate("here") == "" assert candidate("here is") == "is" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="80"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'check_if_last_char_is_a_letter' #[PROMPT] def check_if_last_char_is_a_letter(txt): ''' Create a function that returns True if the last character of a given string is an alphabetical character and is not a part of a word, and False otherwise. Note: "word" is a group of characters separated by space. Examples: check_if_last_char_is_a_letter("apple pie") ➞ False check_if_last_char_is_a_letter("apple pi e") ➞ True check_if_last_char_is_a_letter("apple pi e ") ➞ False check_if_last_char_is_a_letter("") ➞ False ''' #[SOLUTION] check = txt.split(' ')[-1] return True if len(check) == 1 and (97 <= ord(check.lower()) <= 122) else False #[CHECK] def check(candidate): # Check some simple cases assert candidate("apple") == False assert candidate("apple pi e") == True assert candidate("eeeee") == False assert candidate("A") == True assert candidate("Pumpkin pie ") == False assert candidate("Pumpkin pie 1") == False assert candidate("") == False assert candidate("eeeee e ") == False assert candidate("apple pie") == False assert candidate("apple pi e ") == False # Check some edge cases that are easy to work out by hand. assert True </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="81"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'multiply' #[PROMPT] def multiply(a, b): """Complete the function that takes two integers and returns the product of their unit digits. Assume the input is always valid. Examples: multiply(148, 412) should return 16. multiply(19, 28) should return 72. multiply(2020, 1851) should return 0. multiply(14,-15) should return 20. """ #[SOLUTION] return abs(a % 10) * abs(b % 10) #[CHECK] def check(candidate): # Check some simple cases assert candidate(148, 412) == 16, "First test error: " + str(candidate(148, 412)) assert candidate(19, 28) == 72, "Second test error: " + str(candidate(19, 28)) assert candidate(2020, 1851) == 0, "Third test error: " + str(candidate(2020, 1851)) assert candidate(14,-15) == 20, "Fourth test error: " + str(candidate(14,-15)) assert candidate(76, 67) == 42, "Fifth test error: " + str(candidate(76, 67)) assert candidate(17, 27) == 49, "Sixth test error: " + str(candidate(17, 27)) # Check some edge cases that are easy to work out by hand. assert candidate(0, 1) == 0, "1st edge test error: " + str(candidate(0, 1)) assert candidate(0, 0) == 0, "2nd edge test error: " + str(candidate(0, 0)) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="82"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'can_arrange' FIX = """ Fixed typo arange -> arrange Remove semicolon from solution """ #[PROMPT] def can_arrange(arr): """Create a function which returns the index of the element such that after removing that element the remaining array is itself sorted in ascending order. If the given array is already sorted in ascending order then return -1. Note: It is guaranteed that the array arr will either be sorted or it will have only one element such that after its removal the given array will become sorted in ascending order. - The given array will not contain duplicate values. Examples: can_arrange([1,2,4,3,5]) = 3 can_arrange([1,2,3]) = -1 """ #[SOLUTION] ind=-1 i=1 while i<len(arr): if arr[i]<arr[i-1]: ind=i i+=1 return ind #[CHECK] def check(candidate): # Check some simple cases assert candidate([1,2,4,3,5])==3 assert candidate([1,2,4,5])==-1 assert candidate([1,4,2,5,6,7,8,9,10])==2 # Check some edge cases that are easy to work out by hand. assert candidate([])==-1 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="83"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'sorted_list_sum' FIX = """ Add test case when input strings with equal length are not in sorted order. """ #[PROMPT] def sorted_list_sum(lst): """Write a function that accepts a list of strings as a parameter, deletes the strings that have odd lengths from it, and returns the resulted list with a sorted order, The list is always a list of strings and never an array of numbers, and it may contain duplicates. The order of the list should be ascending by length of each word, and you should return the list sorted by that rule. If two words have the same length, sort the list alphabetically. The function should return a list of strings in sorted order. You may assume that all words will have the same length. For example: assert list_sort(["aa", "a", "aaa"]) => ["aa"] assert list_sort(["ab", "a", "aaa", "cd"]) => ["ab", "cd"] """ #[SOLUTION] lst.sort() new_lst = [] for i in lst: if len(i)%2 == 0: new_lst.append(i) return sorted(new_lst, key=len) #[CHECK] def check(candidate): # Check some simple cases assert candidate(["aa", "a", "aaa"]) == ["aa"] assert candidate(["school", "AI", "asdf", "b"]) == ["AI", "asdf", "school"] assert candidate(["d", "b", "c", "a"]) == [] assert candidate(["d", "dcba", "abcd", "a"]) == ["abcd", "dcba"] # Check some edge cases that are easy to work out by hand. assert candidate(["AI", "ai", "au"]) == ["AI", "ai", "au"] assert candidate(["a", "b", "b", "c", "c", "a"]) == [] assert candidate(['aaaa', 'bbbb', 'dd', 'cc']) == ["cc", "dd", "aaaa", "bbbb"] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="84"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'max_element' #[PROMPT] def max_element(l: list): """Return maximum element in the list. >>> max_element([1, 2, 3]) 3 >>> max_element([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10]) 123 """ #[SOLUTION] m = l[0] for e in l: if e > m: m = e return m #[CHECK] METADATA = {} def check(candidate): assert candidate([1, 2, 3]) == 3 assert candidate([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10]) == 123 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="85"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'sort_even' FIX = """ Remove sort helper function """ #[PROMPT] def sort_even(l: list): """This function takes a list l and returns a list l' such that l' is identical to l in the odd indicies, while its values at the even indicies are equal to the values of the even indicies of l, but sorted. >>> sort_even([1, 2, 3]) [1, 2, 3] >>> sort_even([5, 6, 3, 4]) [3, 6, 5, 4] """ #[SOLUTION] evens = l[::2] odds = l[1::2] evens.sort() ans = [] for e, o in zip(evens, odds): ans.extend([e, o]) if len(evens) > len(odds): ans.append(evens[-1]) return ans #[CHECK] METADATA = {} def check(candidate): assert tuple(candidate([1, 2, 3])) == tuple([1, 2, 3]) assert tuple(candidate([5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10])) == tuple([-10, 3, -5, 2, -3, 3, 5, 0, 9, 1, 123]) assert tuple(candidate([5, 8, -12, 4, 23, 2, 3, 11, 12, -10])) == tuple([-12, 8, 3, 4, 5, 2, 12, 11, 23, -10]) </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="86"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'parse_nested_parens' #[PROMPT] from typing import List def parse_nested_parens(paren_string: str) -> List[int]: """ Input to this function is a string represented multiple groups for nested parentheses separated by spaces. For each of the group, output the deepest level of nesting of parentheses. E.g. (()()) has maximum two levels of nesting while ((())) has three. >>> parse_nested_parens('(()()) ((())) () ((())()())') [2, 3, 1, 3] """ #[SOLUTION] def parse_paren_group(s): depth = 0 max_depth = 0 for c in s: if c == '(': depth += 1 max_depth = max(depth, max_depth) else: depth -= 1 return max_depth return [parse_paren_group(x) for x in paren_string.split(' ') if x] #[CHECK] METADATA = { 'author': 'jt', 'dataset': 'test' } def check(candidate): assert candidate('(()()) ((())) () ((())()())') == [2, 3, 1, 3] assert candidate('() (()) ((())) (((())))') == [1, 2, 3, 4] assert candidate('(()(())((())))') == [4] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="87"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'triangle_area' #[PROMPT] def triangle_area(a, h): """Given length of a side and high return area for a triangle. >>> triangle_area(5, 3) 7.5 """ #[SOLUTION] return a * h / 2.0 #[CHECK] METADATA = {} def check(candidate): assert candidate(5, 3) == 7.5 assert candidate(2, 2) == 2.0 assert candidate(10, 8) == 40.0 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="88"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'fizz_buzz' FIX = """ Update doc string to remove requirement for print. """ #[PROMPT] def fizz_buzz(n: int): """Return the number of times the digit 7 appears in integers less than n which are divisible by 11 or 13. >>> fizz_buzz(50) 0 >>> fizz_buzz(78) 2 >>> fizz_buzz(79) 3 """ #[SOLUTION] ns = [] for i in range(n): if i % 11 == 0 or i % 13 == 0: ns.append(i) s = ''.join(list(map(str, ns))) ans = 0 for c in s: ans += (c == '7') return ans #[CHECK] METADATA = {} def check(candidate): assert candidate(50) == 0 assert candidate(78) == 2 assert candidate(79) == 3 assert candidate(100) == 3 assert candidate(200) == 6 assert candidate(4000) == 192 assert candidate(10000) == 639 assert candidate(100000) == 8026 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="89"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'car_race_collision' #[PROMPT] def car_race_collision(n: int): """ Imagine a road that's a perfectly straight infinitely long line. n cars are driving left to right; simultaneously, a different set of n cars are driving right to left. The two sets of cars start out being very far from each other. All cars move in the same speed. Two cars are said to collide when a car that's moving left to right hits a car that's moving right to left. However, the cars are infinitely sturdy and strong; as a result, they continue moving in their trajectory as if they did not collide. This function outputs the number of such collisions. """ #[SOLUTION] return n**2 #[CHECK] METADATA = {} def check(candidate): assert candidate(2) == 4 assert candidate(3) == 9 assert candidate(4) == 16 assert candidate(8) == 64 assert candidate(10) == 100 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="90"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'monotonic' FIX = """ Add a few more tests. """ #[PROMPT] def monotonic(l: list): """Return True is list elements are monotonically increasing or decreasing. >>> monotonic([1, 2, 4, 20]) True >>> monotonic([1, 20, 4, 10]) False >>> monotonic([4, 1, 0, -10]) True """ #[SOLUTION] if l == sorted(l) or l == sorted(l, reverse=True): return True return False #[CHECK] METADATA = {} def check(candidate): assert candidate([1, 2, 4, 10]) == True assert candidate([1, 2, 4, 20]) == True assert candidate([1, 20, 4, 10]) == False assert candidate([4, 1, 0, -10]) == True assert candidate([4, 1, 1, 0]) == True assert candidate([1, 2, 3, 2, 5, 60]) == False assert candidate([1, 2, 3, 4, 5, 60]) == True assert candidate([9, 9, 9, 9]) == True </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="91"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'decode_shift' #[PROMPT] def encode_shift(s: str): """ returns encoded string by shifting every character by 5 in the alphabet. """ return "".join([chr(((ord(ch) + 5 - ord("a")) % 26) + ord("a")) for ch in s]) def decode_shift(s: str): """ takes as input string encoded with encode_shift function. Returns decoded string. """ #[SOLUTION] return "".join([chr(((ord(ch) - 5 - ord("a")) % 26) + ord("a")) for ch in s]) #[CHECK] METADATA = {} def check(candidate): from random import randint, choice import copy import string letters = string.ascii_lowercase for _ in range(100): str = ''.join(choice(letters) for i in range(randint(10, 20))) encoded_str = encode_shift(str) assert candidate(copy.deepcopy(encoded_str)) == str </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="92"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'digitSum' #[PROMPT] def digitSum(s): """Task Write a function that takes a string as input and returns the sum of the upper characters only' ASCII codes. Examples: digitSum("") => 0 digitSum("abAB") => 131 digitSum("abcCd") => 67 digitSum("helloE") => 69 digitSum("woArBld") => 131 digitSum("aAaaaXa") => 153 """ #[SOLUTION] if s == "": return 0 return sum(ord(char) if char.isupper() else 0 for char in s) #[CHECK] def check(candidate): # Check some simple cases assert True, "This prints if this assert fails 1 (good for debugging!)" assert candidate("") == 0, "Error" assert candidate("abAB") == 131, "Error" assert candidate("abcCd") == 67, "Error" assert candidate("helloE") == 69, "Error" assert candidate("woArBld") == 131, "Error" assert candidate("aAaaaXa") == 153, "Error" # Check some edge cases that are easy to work out by hand. assert True, "This prints if this assert fails 2 (also good for debugging!)" assert candidate(" How are yOu?") == 151, "Error" assert candidate("You arE Very Smart") == 327, "Error" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="93"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'x_or_y' #[PROMPT] def x_or_y(n, x, y): """A simple program which should return the value of x if n is a prime number and should return the value of y otherwise. Examples: for x_or_y(7, 34, 12) == 34 for x_or_y(15, 8, 5) == 5 """ #[SOLUTION] if n == 1: return y for i in range(2, n): if n % i == 0: return y break else: return x #[CHECK] def check(candidate): # Check some simple cases assert candidate(7, 34, 12) == 34 assert candidate(15, 8, 5) == 5 assert candidate(3, 33, 5212) == 33 assert candidate(1259, 3, 52) == 3 assert candidate(7919, -1, 12) == -1 assert candidate(3609, 1245, 583) == 583 assert candidate(91, 56, 129) == 129 assert candidate(6, 34, 1234) == 1234 # Check some edge cases that are easy to work out by hand. assert candidate(1, 2, 0) == 0 assert candidate(2, 2, 0) == 2 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="94"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'choose_num' #[PROMPT] def choose_num(x, y): """This function takes two positive numbers x and y and returns the biggest even integer number that is in the range [x, y] inclusive. If there's no such number, then the function should return -1. For example: choose_num(12, 15) = 14 choose_num(13, 12) = -1 """ #[SOLUTION] if x > y: return -1 if y % 2 == 0: return y if x == y: return -1 return y - 1 #[CHECK] def check(candidate): # Check some simple cases assert candidate(12, 15) == 14 assert candidate(13, 12) == -1 assert candidate(33, 12354) == 12354 assert candidate(5234, 5233) == -1 assert candidate(6, 29) == 28 assert candidate(27, 10) == -1 # Check some edge cases that are easy to work out by hand. assert candidate(7, 7) == -1 assert candidate(546, 546) == 546 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="95"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'move_one_ball' #[PROMPT] def move_one_ball(arr): """We have an array 'arr' of N integers arr[1], arr[2], ..., arr[N].The numbers in the array will be randomly ordered. Your task is to determine if it is possible to get an array sorted in non-decreasing order by performing the following operation on the given array: You are allowed to perform right shift operation any number of times. One right shift operation means shifting all elements of the array by one position in the right direction. The last element of the array will be moved to the starting position in the array i.e. 0th index. If it is possible to obtain the sorted array by performing the above operation then return True else return False. If the given array is empty then return True. Note: The given list is guaranteed to have unique elements. For Example: move_one_ball([3, 4, 5, 1, 2])==>True Explanation: By performin 2 right shift operations, non-decreasing order can be achieved for the given array. move_one_ball([3, 5, 4, 1, 2])==>False Explanation:It is not possible to get non-decreasing order for the given array by performing any number of right shift operations. """ #[SOLUTION] if len(arr)==0: return True sorted_array=sorted(arr) my_arr=[] min_value=min(arr) min_index=arr.index(min_value) my_arr=arr[min_index:]+arr[0:min_index] for i in range(len(arr)): if my_arr[i]!=sorted_array[i]: return False return True #[CHECK] def check(candidate): # Check some simple cases assert candidate([3, 4, 5, 1, 2])==True, "This prints if this assert fails 1 (good for debugging!)" assert candidate([3, 5, 10, 1, 2])==True # Check some edge cases that are easy to work out by hand. assert candidate([3, 5, 4, 1, 2])==False, "This prints if this assert fails 2 (also good for debugging!)" assert candidate([])==True </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="96"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'minSubArraySum' #[PROMPT] def minSubArraySum(nums): """ Given an array of integers nums, find the minimum sum of any non-empty sub-array of nums. Example minSubArraySum([2, 3, 4, 1, 2, 4]) == 1 minSubArraySum([-1, -2, -3]) == -6 """ #[SOLUTION] max_sum = 0 s = 0 for num in nums: s += -num if (s < 0): s = 0 max_sum = max(s, max_sum) if max_sum == 0: max_sum = max(-i for i in nums) min_sum = -max_sum return min_sum #[CHECK] def check(candidate): # Check some simple cases assert candidate([2, 3, 4, 1, 2, 4]) == 1, "This prints if this assert fails 1 (good for debugging!)" assert candidate([-1, -2, -3]) == -6 assert candidate([-1, -2, -3, 2, -10]) == -14 assert candidate([-9999999999999999]) == -9999999999999999 assert candidate([0, 10, 20, 1000000]) == 0 assert candidate([-1, -2, -3, 10, -5]) == -6 assert candidate([100, -1, -2, -3, 10, -5]) == -6 assert candidate([10, 11, 13, 8, 3, 4]) == 3 assert candidate([100, -33, 32, -1, 0, -2]) == -33 # Check some edge cases that are easy to work out by hand. assert candidate([-10]) == -10, "This prints if this assert fails 2 (also good for debugging!)" assert candidate([7]) == 7 assert candidate([1, -1]) == -1 </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="97"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'rolling_max' #[PROMPT] from typing import List, Tuple def rolling_max(numbers: List[int]) -> List[int]: """ From a given list of integers, generate a list of rolling maximum element found until given moment in the sequence. >>> rolling_max([1, 2, 3, 2, 3, 4, 2]) [1, 2, 3, 3, 3, 4, 4] """ #[SOLUTION] running_max = None result = [] for n in numbers: if running_max is None: running_max = n else: running_max = max(running_max, n) result.append(running_max) return result #[CHECK] METADATA = { 'author': 'jt', 'dataset': 'test' } def check(candidate): assert candidate([]) == [] assert candidate([1, 2, 3, 4]) == [1, 2, 3, 4] assert candidate([4, 3, 2, 1]) == [4, 4, 4, 4] assert candidate([3, 2, 3, 100, 3]) == [3, 3, 3, 100, 100] </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="98"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'is_bored' #[PROMPT] def is_bored(S): """ You'll be given a string of words, and your task is to count the number of boredoms. A boredom is a sentence that starts with the word "I". Sentences are delimited by '.', '?' or '!'. For example: >>> is_bored("Hello world") 0 >>> is_bored("The sky is blue. The sun is shining. I love this weather") 1 """ #[SOLUTION] import re sentences = re.split(r'[.?!]\s*', S) return sum(sentence[0:2] == 'I ' for sentence in sentences) #[CHECK] def check(candidate): # Check some simple cases assert candidate("Hello world") == 0, "Test 1" assert candidate("Is the sky blue?") == 0, "Test 2" assert candidate("I love It !") == 1, "Test 3" assert candidate("bIt") == 0, "Test 4" assert candidate("I feel good today. I will be productive. will kill It") == 2, "Test 5" assert candidate("You and I are going for a walk") == 0, "Test 6" # Check some edge cases that are easy to work out by hand. assert True, "This prints if this assert fails 2 (also good for debugging!)" </span></div> </div></div> </td> </tr><tr class="group cursor-pointer space-x-4 divide-x border-b outline-offset-[-2px] odd:bg-gray-50 hover:bg-gray-100 dark:odd:bg-gray-925 dark:hover:bg-gray-850 " tabindex="0" data-row-idx="99"><td class="min-w-fit max-w-sm break-words p-2 "><div class="line-clamp-2 "><div class="" dir="auto"> <div> <span class="block ">ENTRY_POINT = 'starts_one_ends' #[PROMPT] def starts_one_ends(n): """ Given a positive integer n, return the count of the numbers of n-digit positive integers that start or end with 1. """ #[SOLUTION] if n == 1: return 1 return 18 * (10 ** (n - 2)) #[CHECK] def check(candidate): # Check some simple cases assert True, "This prints if this assert fails 1 (good for debugging!)" assert candidate(1) == 1 assert candidate(2) == 18 assert candidate(3) == 180 assert candidate(4) == 1800 assert candidate(5) == 18000 # Check some edge cases that are easy to work out by hand. assert True, "This prints if this assert fails 2 (also good for debugging!)" </span></div> </div></div> </td> </tr></tbody></table> </div> <div class="bg-linear-to-b from-gray-100 to-white dark:from-gray-950 dark:to-gray-900 rounded-b-lg"><hr class="flex-none -translate-y-px border-t border-dashed border-gray-300 bg-white dark:border-gray-700 dark:bg-gray-950"> <nav><ul class="flex select-none items-center justify-between space-x-2 text-gray-700 sm:justify-center py-1 text-center font-mono text-xs "><li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 pointer-events-none cursor-default text-gray-400 hover:text-gray-700" href=""><svg class="mr-1.5" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg> Previous</a></li> <li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 bg-gray-50 font-semibold ring-1 ring-inset ring-gray-200 dark:bg-gray-900 dark:text-yellow-500 dark:ring-gray-900 hover:bg-gray-50 dark:hover:bg-gray-800" href="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/train?p=0">1</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/train?p=1">2</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/train?p=2">3</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 pointer-events-none cursor-default" href="#">...</a> </li><li class="hidden sm:block"><a class="rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800" href="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/train?p=76">77</a> </li> <li><a class="flex items-center rounded-lg px-2.5 py-1 hover:bg-gray-50 dark:hover:bg-gray-800 " href="https://huggingface.co/datasets/kye/all-openai-github-code/viewer/default/train?p=1">Next <svg class="ml-1.5 transform rotate-180" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M10 16L20 6l1.4 1.4l-8.6 8.6l8.6 8.6L20 26z" fill="currentColor"></path></svg></a></li></ul></nav></div></div> </div></div></div></div></div></div></div> <div class="hidden items-center md:flex"> <div class="mx-1 flex items-center justify-center"><div class="h-8 w-1 cursor-ew-resize rounded-full bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 max-sm:hidden" role="separator"></div></div> <div class="flex h-full flex-col" style="height: calc(100vh - 48px)"><div class="my-4 mr-4 h-full overflow-auto rounded-lg border shadow-lg dark:border-gray-800" style="width: 480px"><div class="flex h-full flex-col"><div class="flex flex-col "> <div class="px-4 md:mt-4"><div class="mb-4 flex justify-end"> <div class="flex w-full flex-col rounded-lg border-slate-200 bg-white p-2 shadow-md ring-1 ring-slate-200 dark:border-slate-700 dark:bg-slate-800 dark:ring-slate-700"> <div class="mt-0 flex items-start gap-1"><div class="flex items-center rounded-md bg-slate-100 p-2 dark:bg-slate-700"><svg class="size-4 text-gray-700 dark:text-gray-300" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 11 11"><path fill="currentColor" d="M4.881 4.182c0 .101-.031.2-.087.283a.5.5 0 0 1-.242.18l-.65.217a1.3 1.3 0 0 0-.484.299 1.3 1.3 0 0 0-.298.484l-.222.639a.46.46 0 0 1-.18.242.5.5 0 0 1-.288.092.5.5 0 0 1-.294-.097.5.5 0 0 1-.175-.242l-.211-.644a1.26 1.26 0 0 0-.299-.48 1.14 1.14 0 0 0-.479-.298L.328 4.64a.48.48 0 0 1-.247-.18.515.515 0 0 1 .247-.758l.644-.21a1.28 1.28 0 0 0 .788-.789l.211-.634a.5.5 0 0 1 .165-.242.5.5 0 0 1 .283-.103.5.5 0 0 1 .294.083c.086.058.152.14.19.237l.217.659a1.28 1.28 0 0 0 .788.788l.644.222a.476.476 0 0 1 .237.18.5.5 0 0 1 .092.288"></path><path fill="currentColor" d="M10.031 7.458a.5.5 0 0 1-.098.314.5.5 0 0 1-.267.196l-.881.293c-.272.09-.519.242-.721.443a1.8 1.8 0 0 0-.443.721l-.31.876a.5.5 0 0 1-.185.263.56.56 0 0 1-.319.098.515.515 0 0 1-.515-.366l-.294-.88a1.8 1.8 0 0 0-.443-.722c-.204-.2-.45-.353-.72-.448l-.881-.288a.57.57 0 0 1-.263-.191.56.56 0 0 1-.014-.64.5.5 0 0 1 .271-.194l.886-.294A1.82 1.82 0 0 0 6.01 5.465l.293-.87a.515.515 0 0 1 .49-.377c.11 0 .219.03.314.088a.56.56 0 0 1 .206.263l.298.896a1.82 1.82 0 0 0 1.175 1.174l.875.31a.5.5 0 0 1 .263.195c.07.09.108.2.108.314"></path><path fill="currentColor" d="M7.775 1.684a.5.5 0 0 0 .088-.262.45.45 0 0 0-.088-.263.5.5 0 0 0-.21-.155L7.24.896a.5.5 0 0 1-.165-.103.5.5 0 0 1-.103-.17l-.108-.33a.5.5 0 0 0-.165-.21A.5.5 0 0 0 6.426 0a.5.5 0 0 0-.252.098.5.5 0 0 0-.145.206l-.108.32a.5.5 0 0 1-.103.17.5.5 0 0 1-.17.102L5.334 1a.45.45 0 0 0-.216.155.5.5 0 0 0-.088.262c0 .094.029.186.083.263a.5.5 0 0 0 .216.16l.32.103q.095.03.164.103a.37.37 0 0 1 .103.165l.108.319c.031.09.088.17.165.227a.56.56 0 0 0 .252.077.42.42 0 0 0 .268-.093.5.5 0 0 0 .15-.2l.113-.325a.43.43 0 0 1 .268-.268l.32-.108a.42.42 0 0 0 .215-.155"></path></svg></div> <div class="flex min-w-0 flex-1"><textarea placeholder="Ask AI to help write your query..." class="max-h-64 min-h-8 w-full resize-none overflow-y-auto border-none bg-transparent py-1 text-sm leading-6 text-slate-700 placeholder-slate-400 [scrollbar-width:thin] focus:ring-0 dark:text-slate-200 dark:placeholder-slate-400" rows="1"></textarea> </div> </div> </div></div> <div class="relative flex flex-col rounded-md bg-gray-100 pt-2 dark:bg-gray-800/50"> <div class="flex h-64 items-center justify-center "><svg class="animate-spin text-xs" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 12 12"><path class="opacity-75" fill-rule="evenodd" clip-rule="evenodd" d="M6 0C2.6862 0 0 2.6862 0 6H1.8C1.8 4.88609 2.2425 3.8178 3.03015 3.03015C3.8178 2.2425 4.88609 1.8 6 1.8V0ZM12 6C12 9.3138 9.3138 12 6 12V10.2C7.11391 10.2 8.1822 9.7575 8.96985 8.96985C9.7575 8.1822 10.2 7.11391 10.2 6H12Z" fill="currentColor"></path><path class="opacity-25" fill-rule="evenodd" clip-rule="evenodd" d="M3.03015 8.96985C3.8178 9.7575 4.88609 10.2 6 10.2V12C2.6862 12 0 9.3138 0 6H1.8C1.8 7.11391 2.2425 8.1822 3.03015 8.96985ZM7.60727 2.11971C7.0977 1.90864 6.55155 1.8 6 1.8V0C9.3138 0 12 2.6862 12 6H10.2C10.2 5.44845 10.0914 4.9023 9.88029 4.39273C9.66922 3.88316 9.35985 3.42016 8.96985 3.03015C8.57984 2.64015 8.11684 2.33078 7.60727 2.11971Z" fill="currentColor"></path></svg></div></div> <div class="mt-2 flex flex-col gap-2"><div class="flex items-center justify-between max-sm:text-sm"><div class="flex w-full items-center justify-between gap-4"> <span class="flex flex-shrink-0 items-center gap-1"><span class="font-semibold">Subsets and Splits</span> <span class="inline-block "><span class="contents"><svg class="text-xs text-gray-500 dark:text-gray-400" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M17 22v-8h-4v2h2v6h-3v2h8v-2h-3z" fill="currentColor"></path><path d="M16 8a1.5 1.5 0 1 0 1.5 1.5A1.5 1.5 0 0 0 16 8z" fill="currentColor"></path><path d="M16 30a14 14 0 1 1 14-14a14 14 0 0 1-14 14zm0-26a12 12 0 1 0 12 12A12 12 0 0 0 16 4z" fill="currentColor"></path></svg></span> </span> </span> <div class="ml-4 flex flex-1 items-center justify-end gap-1"> </div></div></div> <div class="flex flex-nowrap gap-1 overflow-x-auto"></div></div> <button type="button" class="btn mt-2 h-10 w-full text-sm font-semibold md:text-base" ><span class="flex items-center gap-1.5"> <span>Run Query</span> <span class="shadow-xs ml-2 hidden items-center rounded-sm border bg-white px-0.5 text-xs font-medium text-gray-700 sm:inline-flex">Ctrl+↡</span></span></button></div> <div class="flex flex-col px-2 pb-4"></div></div> <div class="mt-auto pb-4"><div class="flex justify-center"><div class="w-full sm:px-4"><div class="mb-3"><ul class="flex gap-1 text-sm "><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 text-gray-500 hover:bg-gray-100 hover:text-gray-700 dark:hover:bg-gray-900 dark:hover:text-gray-300">Saved Queries </button> </li><li><button class="flex items-center whitespace-nowrap rounded-lg px-2 bg-black text-white dark:bg-gray-800">Top Community Queries </button> </li></ul></div> <div class="h-48 overflow-y-auto"><div class="flex flex-col gap-2"><div class="flex h-48 flex-col items-center justify-center rounded border border-gray-200 bg-gray-50 p-4 text-center dark:border-gray-700/60 dark:bg-gray-900"><p class="mb-1 font-semibold text-gray-600 dark:text-gray-400">No community queries yet</p> <p class="max-w-xs text-xs text-gray-500 dark:text-gray-400">The top public SQL queries from the community will appear here once available.</p></div></div></div></div></div></div></div></div></div></div> </div></div></div></main> </div> <script> import("\/front\/build\/kube-9d7efdc\/index.js"); window.moonSha = "kube-9d7efdc\/"; window.__hf_deferred = {}; </script> <!-- Stripe --> <script> if (["hf.co", "huggingface.co"].includes(window.location.hostname)) { const script = document.createElement("script"); script.src = "https://js.stripe.com/v3/"; script.async = true; document.head.appendChild(script); } </script> <script async src="https://src.gb-srv.com/work.js?t=1755304642"></script></body> </html>