diff --git "a/035.jsonl" "b/035.jsonl" new file mode 100644--- /dev/null +++ "b/035.jsonl" @@ -0,0 +1,960 @@ +{"seq_id":"31760171912","text":"from sqlite3 import connect\n\n\ndef add_words(cur, new):\n cur.execute('select * from words where words.word = ?', (new['keywords'],))\n res = cur.fetchone()\n print(res)\n if res:\n if res[2] < new['count']:\n # обновление строки таблицы\n cur.execute('update words set count = ?, up = ?, down = ? where words.id = ?',\n (new['count'], new['up'], new['down'], res[0]))\n print('Edit')\n else:\n print('Not edit')\n else:\n # добавление строки в таблице\n cur.execute('insert into words values (null, ?, ?, ?, ?)',\n (new['keywords'], new['count'], new['up'], new['down']))\n print('Done')\n return cur\n\n\ndef add_skills(cur, new):\n for item in new['requirements']:\n res = cur.execute('select * from skills where skills.name = ?', (item['name'],))\n if not res.fetchone():\n print(item['name'])\n cur.execute('insert into skills values (null, ?)', (item['name'],))\n return cur\n\n\ndef add_ws(cur, new):\n cur.execute('select id, count from words where words.word = ?', (new['keywords'],))\n word_id, word_count = cur.fetchone()\n for item in new['requirements']:\n cur.execute('select id from skills where skills.name = ?', (item['name'],))\n skill_id = cur.fetchone()[0]\n print(word_id, skill_id)\n cur.execute('select * from wordskills as ws where ws.id_word = ? and ws.id_skill = ?',\n (word_id, skill_id))\n res = cur.fetchone()\n if not res:\n cur.execute('insert into wordskills values (null, ?, ?, ?, ?)',\n (word_id, skill_id, item['count'], item['percent']))\n print('ws done')\n elif word_count < new['count']:\n cur.execute('update wordskills as ws set count = ?, percent = ? where ws.id_word = ? and ws.id_skill = ?',\n (item['count'], item['percent'], word_id, skill_id))\n print('ws edit')\n print('ws not edit')\n return cur\n\n\ndef add_row(new):\n con = connect('base.db')\n cur = con.cursor()\n cur = add_words(cur, new)\n cur = add_skills(cur, new)\n cur = add_ws(cur, new)\n con.commit()\n con.close()\n","repo_name":"nemu-haibane/python17","sub_path":"crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"11949836287","text":"#\n# Loop transformation submodule.that implements a combination of various loop transformations.\n#\n\nimport sys\nimport orio.module.loop.submodule.submodule\nfrom orio.module.loop.submodule.composite import transformation\nimport orio.module.loop.submodule.tile.tile\nimport orio.module.loop.submodule.permut.permut\nimport orio.module.loop.submodule.regtile.regtile\nimport orio.module.loop.submodule.unrolljam.unrolljam\nimport orio.module.loop.submodule.scalarreplace.scalarreplace\nimport orio.module.loop.submodule.boundreplace.boundreplace\nimport orio.module.loop.submodule.pragma.pragma\nimport orio.module.loop.submodule.arrcopy.arrcopy\nimport orio.module.loop.submodule.cuda.cuda\nfrom orio.main.util.globals import *\n\n#---------------------------------------------------------------------\n\nclass Composite(orio.module.loop.submodule.submodule.SubModule):\n '''The composite loop transformation submodule.'''\n \n def __init__(self, perf_params = None, transf_args = None, stmt = None, language='C', tinfo = None):\n '''To instantiate a composite loop transformation submodule.'''\n \n orio.module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt, language)\n\n # transformation submodule.\n self.tile_smod = orio.module.loop.submodule.tile.tile.Tile()\n self.perm_smod = orio.module.loop.submodule.permut.permut.Permut()\n self.regt_smod = orio.module.loop.submodule.regtile.regtile.RegTile()\n self.ujam_smod = orio.module.loop.submodule.unrolljam.unrolljam.UnrollJam()\n self.srep_smod = orio.module.loop.submodule.scalarreplace.scalarreplace.ScalarReplace()\n self.brep_smod = orio.module.loop.submodule.boundreplace.boundreplace.BoundReplace()\n self.prag_smod = orio.module.loop.submodule.pragma.pragma.Pragma()\n self.acop_smod = orio.module.loop.submodule.arrcopy.arrcopy.ArrCopy()\n self.cuda_smod = orio.module.loop.submodule.cuda.cuda.CUDA()\n\n #-----------------------------------------------------------------\n\n def __readTransfArgs(self, perf_params, transf_args):\n '''Process the given transformation arguments'''\n\n # all expected argument names\n TILE = 'tile'\n PERMUT = 'permut'\n REGTILE = 'regtile'\n UJAM = 'unrolljam'\n SCALARREP = 'scalarreplace'\n BOUNDREP = 'boundreplace'\n PRAGMA = 'pragma'\n OPENMP = 'openmp'\n VECTOR = 'vector'\n ARRCOPY = 'arrcopy'\n CUDA = 'cuda'\n\n # all expected transformation arguments\n tiles = ([], None)\n permuts = ([], None)\n regtiles = (([],[]), None)\n ujams = (([],[]), None)\n scalarrep = (False, None)\n boundrep = (False, None)\n pragma = ([], None)\n openmp = ((False, ''), None)\n vector = ((False, ''), None)\n arrcopy = ([], None)\n cuda = ((None, False, False, None), None)\n\n # iterate over all transformation arguments\n for aname, rhs, line_no in transf_args:\n \n # evaluate the RHS expression\n try:\n rhs = eval(rhs, perf_params)\n except Exception as e:\n err('orio.module.loop.submodule.composite.composite: %s: failed to evaluate the argument expression: %s\\n --> %s: %s' %\n (line_no, rhs,e.__class__.__name__, e))\n\n # update transformation arguments\n if aname == TILE:\n tiles = (rhs, line_no)\n elif aname == PERMUT:\n permuts = (rhs, line_no)\n elif aname == REGTILE:\n regtiles = (rhs, line_no)\n elif aname == UJAM:\n ujams = (rhs, line_no)\n elif aname == SCALARREP:\n scalarrep = (rhs, line_no)\n elif aname == BOUNDREP:\n boundrep = (rhs, line_no)\n elif aname == PRAGMA:\n pragma = (rhs, line_no)\n elif aname == OPENMP:\n openmp = (rhs, line_no)\n elif aname == VECTOR:\n vector = (rhs, line_no)\n elif aname == ARRCOPY:\n arrcopy = (rhs, line_no)\n elif aname == CUDA:\n cuda = (rhs, line_no)\n\n # unknown argument name\n else:\n err('orio.module.loop.submodule.composite.composite: %s: unrecognized transformation argument: \"%s\"' % (line_no, aname))\n\n # check semantics of the transformation arguments\n (tiles, permuts, regtiles, ujams, scalarrep, boundrep,\n pragma, openmp, vector, arrcopy, cuda) = self.checkTransfArgs(tiles, permuts, regtiles, ujams,\n scalarrep, boundrep, pragma,\n openmp, vector, arrcopy, cuda)\n\n # return information about the transformation arguments\n return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)\n\n #-----------------------------------------------------------------\n\n def checkTransfArgs(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma,\n openmp, vector, arrcopy, cuda):\n '''Check the semantics of the given transformation arguments'''\n \n # evaluate arguments for loop tiling\n rhs, line_no = tiles\n if not isinstance(rhs, list) and not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.composite.composite: %s: tile argument must be a list/tuple: %s' % (line_no, rhs))\n targs = []\n for e in rhs:\n if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 3:\n err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +\n '(,,): %s') % (line_no, e))\n loop_id, tsize, tindex = e\n loop_id = self.__convertLoopId(loop_id, line_no)\n tsize, tindex = self.tile_smod.checkTransfArgs((tsize, line_no), (tindex, line_no))\n targs.append((loop_id, tsize, tindex))\n tiles = targs\n\n # evaluate arguments for loop permutation/interchange\n rhs, line_no = permuts\n if not isinstance(rhs, list) and not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.composite.composite: %s: permutation argument must be a list/tuple: %s' % (line_no, rhs))\n for e in rhs:\n seq, = self.perm_smod.checkTransfArgs((e, line_no))\n permuts = rhs\n\n # evaluate arguments for register tiling\n rhs, line_no = regtiles\n if not isinstance(rhs, list) and not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.composite.composite: %s: register-tiling argument must be a list/tuple: %s' % (line_no, rhs))\n if len(rhs) != 2:\n err(('orio.module.loop.submodule.composite.composite:%s: register-tiling argument must be in the form of ' +\n '(,): %s') % (line_no, rhs))\n loops, ufactors = rhs\n loops, ufactors = self.regt_smod.checkTransfArgs((loops, line_no), (ufactors, line_no))\n regtiles = (loops, ufactors)\n\n\n # evaluate arguments for unroll/jamming\n rhs, line_no = ujams\n if not isinstance(rhs, list) and not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.composite.composite: %s: unroll/jam argument must be a list/tuple: %s' % (line_no, rhs))\n if len(rhs) != 2:\n err(('orio.module.loop.submodule.composite.composite:%s: unroll/jam argument must be in the form of ' +\n '(,): %s') % (line_no, rhs))\n loops, ufactors = rhs\n for lp,uf in zip(loops, ufactors):\n self.ujam_smod.checkTransfArgs((uf, line_no), (False, line_no))\n ujams = (loops, ufactors)\n\n # evaluate arguments for scalar replacement\n rhs, line_no = scalarrep\n if isinstance(rhs, bool) or rhs == 0 or rhs == 1:\n scalarrep = (rhs, None, None)\n else:\n if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or\n len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):\n err(('orio.module.loop.submodule.composite.composite:%s: scalar replacement argument must be in the form of ' +\n '((True|False),,): %s') % (line_no, rhs))\n do_scalarrep = rhs[0]\n dtype = None\n prefix = None\n if len(rhs) >= 2:\n dtype = rhs[1]\n if len(rhs) >= 3:\n prefix = rhs[2]\n dtype, prefix = self.srep_smod.checkTransfArgs((dtype, line_no), (prefix, line_no))\n scalarrep = (do_scalarrep, dtype, prefix)\n\n # evaluate arguments for bound replacement\n rhs, line_no = boundrep\n if isinstance(rhs, bool) or rhs == 0 or rhs == 1:\n boundrep = (rhs, None, None)\n else:\n if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or\n len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):\n err(('orio.module.loop.submodule.composite.composite:%s: bound replacement argument must be in the form of ' +\n '((True|False),,): %s') % (line_no, rhs))\n do_boundrep = rhs[0]\n lprefix = None\n uprefix = None\n if len(rhs) >= 2:\n lprefix = rhs[1]\n if len(rhs) >= 3:\n uprefix = rhs[2]\n lprefix, uprefix = self.brep_smod.checkTransfArgs((lprefix, line_no), (uprefix, line_no))\n boundrep = (do_boundrep, lprefix, uprefix)\n\n # evaluate arguments for pragma directives\n rhs, line_no = pragma\n if not isinstance(rhs, list) and not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.composite.composite: %s: pragma argument must be a list/tuple: %s' % (line_no, rhs))\n targs = []\n for e in rhs:\n if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 2:\n err(('orio.module.loop.submodule.composite.composite:%s: element of pragma directive argument must be in the form of ' +\n '(,): %s') % (line_no, e))\n loop_id, pragmas = e\n loop_id = self.__convertLoopId(loop_id, line_no)\n pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))\n targs.append((loop_id, pragmas))\n pragma = targs\n\n # evaluate arguments for openmp pragma directive\n rhs, line_no = openmp\n if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or\n not isinstance(rhs[0], bool)):\n err(('orio.module.loop.submodule.composite.composite:%s: element of openmp pragma directive argument must be in the form of ' +\n '((True|False),): %s') % (line_no, rhs))\n do_openmp, pragmas = rhs\n pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))\n openmp = do_openmp, pragmas\n \n # evaluate arguments for vectorization pragma directive\n rhs, line_no = vector\n if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or\n not isinstance(rhs[0], bool)):\n err(('orio.module.loop.submodule.composite.composite:%s: element of vectorization pragma directive argument must be in ' +\n 'the form of ((True|False),): %s') % (line_no, rhs))\n do_vector, pragmas = rhs\n pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))\n vector = do_vector, pragmas\n\n # evaluate arguments for array-copy optimization\n rhs, line_no = arrcopy\n if not isinstance(rhs, list) and not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.composite.composite: %s: array-copy argument must be a list/tuple: %s' % (line_no, rhs))\n targs = []\n for e in rhs:\n if ((not isinstance(e, list) and not isinstance(e, tuple)) or len(e) > 5 or\n len(e) < 3 or not isinstance(e[0], bool)):\n err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +\n '((True|False),,,,): %s') %\n (line_no, e))\n dtype = None\n suffix = None\n if len(e) == 3:\n do_acopy, aref, dimsizes = e\n elif len(e) == 4:\n do_acopy, aref, dimsizes, suffix = e\n else:\n do_acopy, aref, dimsizes, suffix, dtype = e\n (aref, suffix,\n dtype, dimsizes)= self.acop_smod.checkTransfArgs((aref, line_no), (suffix, line_no),\n (dtype, line_no), (dimsizes, line_no))\n targs.append((do_acopy, aref, suffix, dtype, dimsizes))\n arrcopy = targs\n\n # evaluate arguments for cuda\n rhs, line_no = cuda\n if not isinstance(rhs, tuple):\n err('orio.module.loop.submodule.cuda.cuda: %s: cuda argument must be a tuple: %s' % (line_no, rhs))\n if len(rhs) != 4:\n err(('orio.module.loop.submodule.cuda.cuda:%s: cuda argument must be in the form of ' +\n '(,,,): %s') % (line_no, rhs))\n cuda = rhs\n \n # return information about the transformation arguments\n return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)\n\n #-----------------------------------------------------------------\n\n def applyTransf(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep,\n pragma, openmp, vector, arrcopy, cuda, stmt):\n '''To apply a sequence of transformations'''\n\n # perform the composite transformations\n t = transformation.Transformation(tiles, permuts, regtiles, ujams, scalarrep,\n boundrep, pragma, openmp, vector, arrcopy, cuda, self.stmt)\n\n try:\n transformed_stmt = t.transform()\n except Exception as e:\n err('orio.module.loop.submodule.composite.composite.applyTransf : %s:%s' % (e.__class__.__name__, e.message))\n\n debug('SUCCESS: applyTransf on ' + self.stmt.__class__.__name__, obj=self)\n if not transformed_stmt.meta.get('id'):\n transformed_stmt.meta['id'] = 'loop_' + self.stmt.line_no\n\n # return the transformed statement\n return transformed_stmt\n \n #-----------------------------------------------------------------\n\n def __convertLoopId(self, lid, line_no):\n '''\n Convert the loop ID to a list: [True/False, id1, id2, id3, ...].\n The 'True' boolean value indicates that at least one of the loop ID must exist in the\n statement body. A 'False' value means that it is OK if no loop IDs exist in the statement\n body.\n The sequence of IDs imply that \"apply optimizations on id1 (if exist), if not, apply\n optimizations on id2 (if exist), and so on and so forth\".\n '''\n\n # check if the loop ID is well-formed\n if isinstance(lid, str):\n pass\n elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:\n for i in lid:\n if not isinstance(i, str):\n err('orio.module.loop.submodule.composite.composite: %s: loop ID must be a string: %s' % (line_no, i))\n else:\n err('orio.module.loop.submodule.composite.composite: %s: invalid loop ID representation: %s' % (line_no, lid)) \n\n # create the loop ID abstraction\n lids = []\n if isinstance(lid, str):\n lids.append(True)\n lids.append(lid)\n elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:\n lids.append(isinstance(lid, tuple))\n lids.extend(lid)\n else:\n err('orio.module.loop.submodule.composite.composite internal error: '+\n 'incorrect representation of the loop IDs')\n \n return lids\n\n #-----------------------------------------------------------------\n\n def transform(self):\n '''To apply various loop transformations'''\n # debugging info\n #debug(\"perf_params=\" + str(self.perf_params), self,level=6)\n \n # read all transformation arguments\n args_info = self.__readTransfArgs(self.perf_params, self.transf_args)\n (tiles, permuts, regtiles, ujams, scalarrep,\n boundrep, pragma, openmp, vector, arrcopy, cuda) = args_info\n \n # perform all transformations\n try:\n transformed_stmt = self.applyTransf(tiles, permuts, regtiles, ujams, scalarrep, boundrep,\n pragma, openmp, vector, arrcopy, cuda, self.stmt)\n except Exception as e:\n err('orio.module.loop.submodule.composite.composite : error transforming \"%s\"\\n --> %s:%s' % \\\n (self.stmt, e.__class__.__name__, e.message))\n\n if not transformed_stmt.meta.get('id') and self.stmt.meta.get('id'):\n transformed_stmt.meta['id'] = 'loop_' + self.stmt.meta['id']\n\n # return the transformed statement\n return transformed_stmt\n\n\n\n","repo_name":"brnorris03/Orio","sub_path":"orio/module/loop/submodule/composite/composite.py","file_name":"composite.py","file_ext":"py","file_size_in_byte":17665,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"41"} +{"seq_id":"21223952918","text":"#Algoritham to create a program which performs three essential operations\n#Open the file and process each line.\n#Either add each word to the dictionary with a frequency of 1 or update the word’s count by 1.\n#Nicely print the output, in this case from high to low frequency.\n#DSC510-T303 Introduction to Programming (2205-1)\n#Created by Rajkumar Kuppuswami\n#Created on 05/01/2020\n#Program to perform processing the line by adding each word to the dictionary.\n#Once values are added need to format as list to sort the values by desc.\n\nimport collections\n\ndef main():\n dictionary = dict()\n#1st method to open the file\n# with open('Gettysburg.txt', 'r') as file_read:\n# Read the text file\n file_read = open('Gettysburg.txt', 'r')\n# Process word one by one\n for line in file_read:\n process_line(line, dictionary)\n print(\"Length of Dictionary is : {}\".format(len(dictionary.keys())))#Total Lenght of the dictionary\n\n print(pretty_print(dictionary))#Output of each word with count\n\n\ndef add_word(word, word_count):#Adding the words to the dictionary with count of words\n if word in word_count: #To validate the words to avoid duplicate\n word_count[word] = word_count[word]+1 #counting the number of words\n else:\n word_count[word] =1\n\ndef process_line (line, dictionary):\n line = line.lower() ##convert to lower case\n line = line.strip()# remove /n line and unwanted space\n words = line.split(\" \")#Split the words from the text by line\n for word in words:\n add_word(word, dictionary)\n\n\ndef pretty_print(dictionary):\n table = collections.defaultdict(list)\n for a , b in dictionary.items():\n table[b].append(a)\n sort_table = sorted(table.items(), reverse=True) # Sort list of tuple by count in descending order.\n for item in sort_table:\n count = item[0]\n for word in item[1]:\n print(word + \" \" * (30 - len(word)) + str(count))\n\nmain()\n\n\n","repo_name":"dlingerfelt/DSC510Spring2020","sub_path":"Kuppuswami_DSC510/Week8.1.py","file_name":"Week8.1.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"41"} +{"seq_id":"73557965243","text":"import random\n\n# the program will ask a user to enter in a question\n# the program will respond with words of wisdom\n# (randomly chosen)\n\nANSWERS = [\n \"It is certain.\",\n \"It is decidedly so.\",\n \"Without a doubt.\",\n \"Yes - definitely.\",\n \"You may rely on it.\",\n \"As I see it, yes.\",\n \"Most likely.\",\n \"Outlook good.\",\n \"Yes.\",\n \"Signs point to yes.\",\n \"Reply hazy, try again.\",\n \"Ask again later.\",\n \"Better not tell you now.\",\n \"Cannot predict now.\",\n \"Concentrate and ask again.\",\n \"Don't count on it.\",\n \"My reply is no.\",\n \"My sources say no.\",\n \"Outlook not so good.\",\n \"Very doubtful.\",\n]\n\n\ndef ask_question():\n return input(\"Ask the wizard a question (ENTER to quit): \")\n\n\ndef give_response():\n return random.choice(ANSWERS)\n\n\nwhile ask_question() != '':\n print(give_response())\n","repo_name":"NSCC-PROG1700-2019/course-notes","sub_path":"functions/magic_8_ball.py","file_name":"magic_8_ball.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"2529970079","text":"# fmtlib Conan package\n# Dmitriy Vetutnev, ODANT 2020\n\n\nfrom conans import ConanFile, CMake, tools\nimport os\n\n\nclass GoogletestConan(ConanFile):\n name = \"fmt\"\n version = \"10.1.1+0\"\n license = \"https://raw.githubusercontent.com/fmtlib/fmt/master/LICENSE.rst\"\n description = \"{fmt} is an open-source formatting library for C++. It can be used as a safe and fast alternative to (s)printf and iostreams.\"\n url = \"https://github.com/odant/conan-fmt\"\n settings = {\n \"os\": [\"Windows\", \"Linux\"],\n \"compiler\": [\"Visual Studio\", \"gcc\"],\n \"build_type\": [\"Debug\", \"Release\"],\n \"arch\": [\"x86\", \"x86_64\", \"mips\", \"armv7\"]\n }\n options = {\n \"with_unit_tests\": [True, False],\n \"ninja\": [True, False]\n }\n default_options = {\n \"with_unit_tests\": False,\n \"ninja\": True\n }\n generators = \"cmake\"\n exports_sources = \"src/*\", \"CMakeLists.txt\"\n no_copy_source = True\n build_policy = \"missing\"\n\n def build_requirements(self):\n if self.options.ninja:\n self.build_requires(\"ninja/[>=1.9.0]\")\n\n def build(self):\n cmake = CMake(self, msbuild_verbosity='normal')\n cmake.verbose = True\n cmake.definitions[\"FMT_INSTALL\"] = \"ON\"\n cmake.definitions[\"FMT_DOC\"] = \"OFF\"\n if self.options.with_unit_tests:\n cmake.definitions[\"FMT_TEST\"] = \"ON\"\n if self.settings.get_safe(\"compiler.runtime\") in (\"MT\", \"MTd\"):\n cmake.definitions[\"MSVC_BUILD_STATIC\"] = \"ON\"\n cmake.configure()\n cmake.build()\n if self.options.with_unit_tests:\n if cmake.is_multi_configuration:\n self.run(\"ctest --output-on-failure --build-config %s\" % build_type)\n else:\n self.run(\"ctest --output-on-failure\")\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib/pkgconfig\"))\n\n def package(self):\n self.copy(\"*fmt.pdb\", dst=\"bin\", keep_path=False)\n\n def package_id(self):\n self.info.options.with_unit_tests = \"any\"\n self.info.options.ninja = \"any\"\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n\n","repo_name":"odant/conan-fmt","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"2753046573","text":"from typing import *\n\nclass Solution:\n def maximumProduct(self, nums: List[int], k : int) -> int:\n mod = 10 ** 9 + 7\n # 如果是给原数组加1 那么应该是加最小的数,给最小数加1 相当于给总乘积加的数最大\n # 原数组的总乘积可以看成a * b 如果a是最小值 那么新的总乘积就是(a + 1) * b\n # 相当于多加了一个b\n # 如果给原数组加2,如果是给同一个数加2 那么肯定是给最小值加2,如果是给不同数加1\n # 那么应该是给最小的两个值加1,由此可知这题要用贪心(说的有点不严谨,但是意思应该比较清楚了)\n nums.sort()\n n = len(nums)\n if n == 1: return nums[0] + k\n # diff[i] = nums[i + 1] - nums[i]\n diff = [nums[i + 1] - nums[i] for i in range(n - 1)]\n # 定位k的位置\n position = 0\n while position < n - 1:\n # diff[position] 是 nums[position + 1] 到 nums[position]的差,乘以position + 1是因为\n # 前面nums[0 ... position]都被填平成了nums[position] 那么就要有position + 1个数要填成nums[position + 1]\n if k < diff[position] * (position + 1):\n break\n k -= diff[position] * (position + 1)\n position += 1\n ans = 1\n # position 后面的数字\n for i in range(position + 1, n):\n ans = (ans * nums[i]) % mod\n # 0 .. position - 1已经填成position了 0 .. position 每个加k // (position + 1) 剩余的 k % (position + 1)\n # 也分摊出去\n a = k // (position + 1)\n b = k % (position + 1)\n base = nums[position]\n # b个数字是要加上a + 1,其余数字是加上a\n for _ in range(b):\n ans = (ans * (a + base + 1)) % mod\n for _ in range(position + 1 - b):\n ans = (ans * (a + base)) % mod\n return ans\n\n# 下面是大佬的解法,用堆来模拟每次操作\nclass Solution1:\n def maximumProduct(self, nums: List[int], k: int) -> int:\n from heapq import heapify, heapreplace\n MOD = 10 ** 9 + 7\n heapify(nums)\n while k:\n # 这里的heapreplace方法 相当于heappop()加上heappush()方法\n heapreplace(nums, nums[0] + 1)\n k -= 1\n ans = 1\n for num in nums:\n ans = ans * num % MOD\n return ans\n\n\ndef main():\n sol = Solution()\n _input = ([6, 3, 3, 2], 2)\n _output = sol.maximumProduct(*_input)\n print(_output)\n\nif __name__ == '__main__':\n main()\n","repo_name":"myalos/leetcode_contest","sub_path":"288/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"3150281386","text":"from datetime import datetime\nfrom typing import Union, Any\nfrom flask_migrate import stamp, upgrade\nfrom sqlalchemy.exc import OperationalError, InvalidRequestError\nfrom sqlalchemy.orm import Query\nfrom mcserver.app import db\nfrom mcserver.app.models import ResourceType\nfrom mcserver.config import Config\nfrom mcserver.models_auto import Corpus, Exercise, UpdateInfo, LearningResult\n\n\nclass DatabaseService:\n @staticmethod\n def commit():\n \"\"\"Commits the last action to the database and, if it fails, rolls back the current session.\"\"\"\n try:\n db.session.commit()\n except (OperationalError, InvalidRequestError):\n db.session.rollback()\n raise\n\n @staticmethod\n def has_table(table: str) -> bool:\n \"\"\"Checks if a table is present in the database or not.\"\"\"\n return db.engine.dialect.has_table(db.engine, table)\n\n @staticmethod\n def init_db_alembic() -> None:\n \"\"\"In Docker, the alembic version is not initially written to the database, so we need to set it manually.\"\"\"\n if not DatabaseService.has_table(Config.DATABASE_TABLE_ALEMBIC):\n stamp(directory=Config.MIGRATIONS_DIRECTORY)\n upgrade(directory=Config.MIGRATIONS_DIRECTORY)\n\n @staticmethod\n def init_db_update_info() -> None:\n \"\"\"Initializes update entries for all resources that have not yet been created.\"\"\"\n if DatabaseService.has_table(Config.DATABASE_TABLE_UPDATEINFO):\n for rt in ResourceType:\n ui_cts: UpdateInfo = DatabaseService.query(\n UpdateInfo, filter_by=dict(resource_type=rt.name), first=True)\n if ui_cts is None:\n ui_cts = UpdateInfo.from_dict(resource_type=rt.name, last_modified_time=1,\n created_time=datetime.utcnow().timestamp())\n db.session.add(ui_cts)\n DatabaseService.commit()\n\n @staticmethod\n def query(table: Union[Corpus, Exercise, LearningResult, UpdateInfo], filter_by: dict = None,\n first: bool = False) -> Any:\n \"\"\"Executes a query on the database and rolls back the session if errors occur.\"\"\"\n try:\n ret_val: Query = db.session.query(table)\n if filter_by:\n ret_val = ret_val.filter_by(**filter_by)\n ret_val = ret_val.first() if first else ret_val.all()\n DatabaseService.commit()\n return ret_val\n except:\n db.session.rollback()\n return None\n","repo_name":"korpling/machina-callida","sub_path":"mc_backend/mcserver/app/services/databaseService.py","file_name":"databaseService.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"17059332676","text":"#1. print even_ver_if\r\n'''for i in range(1,101) :\r\n if i%2 == 0 :\r\n print(i, end=\" \")'''\r\n\r\n#1. print even_ver_none_if\r\n'''for i in range(2, 101, 2) :\r\n print(i, end=\" \")'''\r\n\r\n#2. 복리이자율\r\n'''mymoney=10000000\r\nyear=0\r\nwhile mymoney<= 20000000 :\r\n mymoney += mymoney*0.07\r\n year += 1\r\nprint(year)'''\r\n\r\n#3. 구구단\r\n'''for i in range(1, 10) :\r\n for j in range(1, 10) :\r\n print (i*j ,end=\" \")'''\r\n\r\n#4. factorial_ver_for\r\n'''n=int(input(\"몇 팩토리얼이 궁금하느냐:\"))\r\nfor i in range(1, n) :\r\n n *= i\r\nprint(n)'''\r\n\r\n#4. factorial_ver_while\r\n'''n=int(input(\"몇 팩토리얼이 궁금하느냐:\"))\r\nfac=1\r\ni=1\r\nwhile i <= n :\r\n fac *= i\r\n i += 1\r\nprint(fac)'''\r\n\r\n#5. dec to bin\r\nn=int(input(\"이진수로 변환시킬 십진수를 입력하시오:\"))\r\nbi=\"\"\r\n\r\nwhile n > 1 :\r\n rest = n%2\r\n n = n//2\r\n bi= str(rest) +bi\r\nbi=str(n) + bi\r\nprint(bi)\r\n\r\n#6. 최대공약수\r\n'''x=int(input(\"x:\"))\r\ny=int(input(\"y:\"))\r\na=[]\r\nfor i in range(1, max(x,y)) :\r\n if x%i == 0 and y%i ==0 :\r\n a.append(i)\r\nprint(\"최대공약수는\", max(a),\"입니다.\")'''\r\n \r\n\r\n#7. quiz\r\n'''a=int(input(\"87+36의 값은?\"))\r\nwhile True :\r\n if a==123 :\r\n print(\"맞았습니다.\")\r\n break\r\n else :\r\n print(\"틀렸습니다.\")\r\n a=int(input(\"87+36의 값은?\"))'''\r\n","repo_name":"kimgeonsu/python_19s","sub_path":"4장 반복.py","file_name":"4장 반복.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"26216402025","text":"import sys\nimport unittest\nfrom appium import webdriver\nfrom appium.options.common.base import AppiumOptions\nfrom appium.webdriver.common.appiumby import AppiumBy\nfrom appium.options.common.app_option import AppOption\nfrom selenium.webdriver.common.keys import Keys\n\n\nclass ATSPIOptions(AppiumOptions, AppOption):\n pass\n\n\nclass TimelineTest(unittest.TestCase):\n\n def setUp(self):\n options = ATSPIOptions()\n options.app = tokodon_offline_path\n self.driver = webdriver.Remote(\n command_executor='http://127.0.0.1:4723',\n options=options)\n\n def tearDown(self):\n self.driver.get_screenshot_as_file(\"failed_test_shot_{}.png\".format(self.id()))\n self.driver.quit()\n\n def test_status_type(self):\n self.assertTrue(self.driver.find_element(by='description', value=\"Normal Status\"))\n self.assertTrue(self.driver.find_element(by='description', value=\"Spoiler Status\"))\n\n def test_favourite_interactions(self):\n favouriteButton=self.driver.find_element(by='description',value=\"Favourite\")\n favouriteButton.click()\n self.assertTrue(self.driver.find_element(by='description', value=\"Favourited\"))\n\n def test_bookmark_interactions(self):\n bookmarkButton=self.driver.find_element(by='description',value=\"Bookmark\")\n bookmarkButton.click()\n self.assertTrue(self.driver.find_element(by='description', value=\"Bookmarked\"))\n\n def test_boost_interactions(self):\n boostButton=self.driver.find_element(by='description',value=\"Boost\")\n boostButton.click()\n self.assertTrue(self.driver.find_element(by='description', value=\"Boosted\"))\n\n def test_status_media(self):\n searchElement = self.driver.find_element(by=AppiumBy.NAME, value=\"Home\")\n searchElement.send_keys(Keys.DOWN)\n searchElement.send_keys(Keys.DOWN)\n searchElement.send_keys(Keys.DOWN)\n self.assertTrue(self.driver.find_element(by='description', value=\"Status with image attachment\"))\n self.assertTrue(self.driver.find_element(by='description', value=\"Status with Video attachment\"))\n self.assertTrue(self.driver.find_element(by='description', value=\"Status with GifV attachment\"))\n\n\nif __name__ == '__main__':\n tokodon_offline_path = sys.argv[1]\n sys.argv.pop()\n unittest.main()","repo_name":"KDE/tokodon","sub_path":"src/autotests/appiumtests/TimelineTest.py","file_name":"TimelineTest.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"41"} +{"seq_id":"62072612","text":"#!/usr/bin/env python\n#################################################################################\n## This option file helps you save time by running in sequence ##\n## RanLengthAna.py and RadLengthAna_VELO.py and merging the outputs. ##\n## You can run this simply by \"python rad_length_scan.py\" ##\n## Twiki at: https://twiki.cern.ch/twiki/bin/view/LHCb/RadLengthStudies ##\n## ##\n## @author : L.Pescatore ##\n## @date : last modified on 2015-06-16 ##\n#################################################################################\n\nimport sys\nimport os\n\nbase = os.environ[\"SIMCHECKSROOT\"] + \"/options/RadLength/\"\nfrom RadLengthMakePlots import makePlots\n\npwd = os.environ['PWD']\noutputpath = pwd\nout = 'Rad_merged.root'\nif(len(sys.argv) == 2):\n out = sys.argv[1]\n\nos.system(\"mkdir -p plots\")\ncmd = \"gaudirun.py {base}/MaterialEvalGun.py {base}/Gauss-Job.py {base}\".format(base=base)\nos.system(cmd+\"RadLengthAna.py\")\nos.system(cmd+\"RadLengthAna_VELO.py\")\n \noutput=outputpath+'/'+out\nmerge_command = 'hadd -f {output} {pwd}/Rad.root {pwd}/Rad_VELO.root'.format(output=output, pwd=pwd)\nos.system(merge_command)\n\nmakePlots(out,\"plots/\",\"rad\")\nmakePlots(out,\"plots/\",\"inter\")\n\n \n","repo_name":"stonish/lhcb-software","sub_path":"Gauss/Sim/SimChecks/scripts/rad_length_scan.py","file_name":"rad_length_scan.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"41"} +{"seq_id":"36825740582","text":"\"\"\"Add Ingredients table\n\nRevision ID: 28b34376bb94\nRevises: d3685ce0e0db\nCreate Date: 2021-10-25 21:24:17.530543\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '28b34376bb94'\ndown_revision = 'd3685ce0e0db'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"ingredients\",\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"name\", sa.String(40), nullable=False),\n sa.Column(\"unit\", sa.String(25)),\n sa.Column(\"quantity\", sa.Integer(), nullable=True),\n sa.Column(\"recipe_id\", sa.Integer(), sa.ForeignKey(\"recipe.id\"))\n )\n\n\ndef downgrade():\n op.drop_table(\"ingredients\")\n","repo_name":"alucardthefish/RecipesApi","sub_path":"alembic/versions/28b34376bb94_add_ingredients_table.py","file_name":"28b34376bb94_add_ingredients_table.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"42989466623","text":"from geopy.geocoders import Nominatim\nfrom geopy.distance import geodesic\nfrom pprint import pprint\n\ngeolocator = Nominatim(user_agent=\"snakes-distance-geopy\")\nschool = geolocator.geocode(\"raffles institution singapore\")\n\nprint('Snakey school is located at {}'.format(school.address))\nprint('Snakey school\\'s coordinates are ({}, {}) '.format(\n school.latitude, school.longitude))\n\nhome = geolocator.geocode(\"58 college green singapore\")\nprint('Home is located at {}'.format(home.address))\n\nhome_coordinates = home.latitude, home.longitude\nschool_coordinates = school.latitude, school.longitude\ndistance = geodesic(home_coordinates, school_coordinates).km\nprint('The distance from home to school is {:.2f} km'.format(distance))","repo_name":"siowyisheng/30-things-in-python","sub_path":"22-geo-calculate-distance/snakes_distance.py","file_name":"snakes_distance.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"41"} +{"seq_id":"8671049184","text":"class Game:\n HALF_TIME = 45\n FULL_TIME = 90\n\n def __init__(self, game_speed):\n self.score = [0, 0]\n self.game_speed = int(game_speed)\n self.time_elapsed = 0\n self.ball_possessor = None\n self.ball_position = [0,0]\n self.teams = []\n\n def increment_goal(self, team):\n self.score[team] += 1\n return\n\n def tick(self):\n self.time_elapsed += 1 * self.game_speed\n return\n\n def update(self):\n for team in self.teams:\n team.update()\n return\n","repo_name":"victorlalo/CryptoBall","sub_path":"Python/game/game_simulation.py","file_name":"game_simulation.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"14197750927","text":"\nfrom math import log\nimport operator\n\n\n'''\n文档快速生成注释的方法介绍,首先我们要用到__all__属性\n在Py中使用为导出__all__中的所有类、函数、变量成员等\n在模块使用__all__属性可避免相互引用时命名冲突\n'''\n__all__ = ['DecisionTree', 'unique']\n\nclass DecisionTree(object):\n '''\n C4.5 决策树的实现\n '''\n \n def createDataSet2(self):\n dataSet = [[1, 1, 'yes'],\n [1, 1, 'yes'],\n [1, 0, 'no'],\n [0, 1, 'no'],\n [0, 1, 'no']]\n labels = ['no surfacing','flippers']\n return dataSet,labels\n\n def createDataSet(self):\n dataset = [[0, 0, 0, 0, 'N'], \n [0, 0, 0, 1, 'N'], \n [1, 0, 0, 0, 'Y'], \n [2, 1, 0, 0, 'Y'], \n [2, 2, 1, 0, 'Y'], \n [2, 2, 1, 1, 'N'], \n [1, 2, 1, 1, 'Y']]\n labels = ['outlook', 'temperature', 'humidity', 'windy']\n return dataset, labels\n\n def unique(self,dataset):\n '''\n Desc:无论是最初未划分前的数据,还是划分后的数据,信息熵的计算都是根据最后一列计算的\n 具体来说,依赖于最后一列的统计量,统计最后一列{取值:个数}\n Args:\n Returns:\n '''\n d = {}\n\n if(len(dataset) > 0 and isinstance(dataset[0],list)):\n column = [example[-1] for example in dataset]\n else:\n column = dataset\n for v in column:\n if(v not in d.keys()): d[v] = 0\n d[v] += 1\n return d\n\n # 根据类别统计计算gini指数,gini越大,不纯度越高,选择gini最小的作为切分点\n def gini(self,dataset):\n sample_num = len(dataset)\n result = 0.0\n for k,v in self.unique(dataset).items():\n prob = v / sample_num\n result += prob ** 2\n return 1 - result\n \n\n # 根据(feature_index,value)将数据集切分成两份\n def split_dataset(self,dataset, feature_index, value):\n list1 = []\n list2 = []\n if(isinstance(value,int) or isinstance(value,float)): #for int and float type\n for row in dataset:\n if (row[feature_index] >= value):list1.append(row)\n else:list2.append(row)\n \n return list1,list2\n\n\n # desc:\n # 维护一个未使用特征索引列表,\n # 对于未使用的特征,计算gini系数,选择最佳特征,加入到表示决策树的嵌套字典中\n # 参数1 要划分的数据集,参数2 未使用特征索引列表\n def choose_best_feature(self,dataset,rest_features_index):\n \n #当前节点数据集的信息熵(可能是根,也可能是叶子节点)\n base_gini = self.gini(dataset)\n \n # gini系数\n best_gini_gain = 0.0\n best_feature_index = -1# 初始化 \n \n rows_length = len(dataset)\n\n #对于未使用的特征,遍历计算信息增益率\n for feature_index in rest_features_index:\n #当前列对应的特征取值列表\n feature_value_list = [sample[feature_index] for sample in dataset]\n unique_val = set(feature_value_list)\n new_entropy = 0.0\n \n #计算条件熵,已知特征为feature_index\n for value in unique_val:\n #按照第feature_index的value列划分数据集\n left,right = self.split_dataset(dataset,feature_index,value)\n p = len(left) / rows_length\n new_gini = p * self.gini(left) + (1-p) * self.gini(right)\n new_gini_gain = base_gini - new_gini\n \n\n print('feature_index:{0},new_gini_gain:{1}'.format(feature_index,new_gini_gain))\n \n if(new_gini_gain > best_gini_gain):\n best_gini_gain = new_gini_gain\n best_feature_index = feature_index\n \n print('best_feature_index:{},best_gini_gain:{}'.format(best_feature_index,best_gini_gain))\n return best_feature_index\n\n \n #多数表决\n def majorityCnt(self,classList):\n classCount = {}\n for vote in classList:\n if vote not in classCount.keys(): classCount[vote] = 0\n classCount[vote] += 1\n sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)\n return sortedClassCount[0][0]\n\n # 递归的构建决策树\n # args:dataset 数据,rest_labels 剩余的标签索引列表 labels 标签真实值,仅为了构建树使用\n def build_tree(self,dataset,rest_labels,labels):\n\n class_list = [sample[-1] for sample in dataset]\n\n # 为啥会有空的列表呢?\n if(len(class_list) == 0):\n return\n #终止条件\n # 类别完全相同则停止继续划分,返回类别\n if(class_list.count(class_list[0]) == len(class_list)):\n print('终止条件1') \n return class_list[0]\n\n # 遍历完所有特征时返回出现次数最多的\n if len(rest_labels) == 0: \n print('终止条件2') \n return self.majorityCnt(class_list)\n \n print('rest_labels=>{0}'.format(rest_labels))\n \n ### 选择最佳特征,划分数据集\n best_feature_index = self.choose_best_feature(dataset,rest_labels)\n best_feature_label = labels[best_feature_index]\n myTree = {best_feature_label:{}}\n \n #在特征索引列表中,删除指定特征索引\n rest_labels.remove(best_feature_index)\n print('best_feature_index=>{0},rest_labels=>{1}'.format(best_feature_index,rest_labels))\n \n #递归调用\n featValues = [example[best_feature_index] for example in dataset]\n uniqueVals = set(featValues)\n for value in uniqueVals: \n left,right = self.split_dataset(dataset, best_feature_index, value) \n myTree[best_feature_label][value] = self.build_tree(left,rest_labels,labels)\n myTree[best_feature_label][value] = self.build_tree(right,rest_labels,labels)\n return myTree \n\n # 预测单个样本的类别\n def predict(self,inputTree,featLabels,testVec):#根据已有的决策树,对给出的数据进行分类\n firstStr = list(inputTree.keys())[0]\n secondDict = inputTree[firstStr]\n featIndex = featLabels.index(firstStr)#这里是将标签字符串转换成索引数字\n #处理某个特征取值不全的情况\n if(testVec[featIndex] not in secondDict.keys()):\n return 'e'\n for key in secondDict.keys(): \n if(testVec[featIndex] == key):#如果key值等于给定的标签时\n if(type(secondDict[key]).__name__ == 'dict'):\n classLabel = self.predict(secondDict[key],featLabels,testVec)#递归调用分类\n else: \n classLabel = secondDict[key]#此数据的分类结果\n return classLabel\n \n # 预测样本列表的类别\n def predict_list(self,input_tree,feature_labels,test_vec_list):\n class_list = []\n for test_vec in test_vec_list:\n print(test_vec)\n c = self.predict(input_tree,feature_labels,test_vec)\n print(c)\n class_list.append(c)\n return class_list\n\n\n\n # 计算预测准确率 \n\n\n\n\ndt = DecisionTree()\ndataset,labels = dt.createDataSet2()\nrest_labels = list(range(0,len(labels)))\nmyTree= dt.build_tree(dataset,rest_labels,labels)\n\nprint(myTree)\n'''\nimport json\nprint(json.dumps(myTree,indent=4))\n\n\nsample = [2, 2, 1, 0]\npred = dt.predict(myTree,labels,sample)\nprint(\"sample=>{},pred=>{}\".format(sample,pred))\n\n\n'''","repo_name":"SheldonWong/machinelearning","sub_path":"6-DecisionTree/6.3-CART/CARTClassifier.py","file_name":"CARTClassifier.py","file_ext":"py","file_size_in_byte":7858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"3008671809","text":"\"\"\"\nFixtures for fanout\npurpose : one node to many nodes\nThe dataset has\n- 3 levels : L1, L2, L3\n- 2 resources: RI, RO\n- R in/out are connected\n\"\"\"\nimport pytest\nfrom os import path\n\nimport pandas as pd\n\n# module import\nfrom dependencynet.schema import SchemaBuilder\nfrom dependencynet.model import ModelBuilder\n\nfrom dependencynet.network.graphbuilder import LevelNode, InputNode, OutputNode\n\n\n@pytest.fixture\ndef schema_fanout():\n schema = SchemaBuilder().level('L1', 'L1') \\\n .level('L2', 'L2') \\\n .level('L3', 'L3') \\\n .resource('RI', 'RI', role='INPUT', connect_id_name='R') \\\n .resource('RO', 'RO', role='OUTPUT', connect_id_name='R') \\\n .connect('RO', 'RI') \\\n .render()\n return schema\n\n\n@pytest.fixture(scope=\"session\")\ndef compact_columns_fanout():\n columns = ['L1', 'L2', 'L3', 'RO', 'RI']\n return columns\n\n\n@pytest.fixture\ndef source_data_fanout(schema_fanout, compact_columns_fanout):\n filename = path.join('tests', 'resources', 'data', 'compact', 'fanout.csv')\n data = pd.read_csv(filename, delimiter=';')\n\n df = pd.DataFrame(data, columns=compact_columns_fanout)\n return df\n\n\n@pytest.fixture\ndef model_fanout(source_data_fanout, schema_fanout):\n model = ModelBuilder().from_compact(source_data_fanout) \\\n .with_schema(schema_fanout) \\\n .render()\n return model\n\n\n@pytest.fixture\ndef class_mapping_fanout():\n return {'L1': L1Node, 'L2': L2Node, 'L3': L3Node,\n 'RO': RONode, 'RI': RINode}\n\n\n# networkx classes\n\nclass L1Node(LevelNode):\n def __init__(self, properties):\n super().__init__(properties, 'L1')\n\n\nclass L2Node(LevelNode):\n def __init__(self, properties):\n super().__init__(properties, 'L2')\n\n\nclass L3Node(LevelNode):\n def __init__(self, properties):\n super().__init__(properties, 'L3')\n\n\nclass RINode(InputNode):\n def __init__(self, properties):\n super().__init__(properties, 'RI', 'R')\n\n\nclass RONode(OutputNode):\n def __init__(self, properties):\n super().__init__(properties, 'RO', 'R')\n","repo_name":"cfalguiere/dependencynet","sub_path":"tests/scenario/fanout/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"71339995964","text":"import numpy as np\nimport os\nimport tempfile\nimport unittest\nimport cv2\nimport torch\n\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.structures import BoxMode, Instances, RotatedBoxes\nfrom detectron2.utils.visualizer import ColorMode, Visualizer\n\n\nclass TestVisualizer(unittest.TestCase):\n def _random_data(self):\n H, W = 100, 100\n N = 10\n img = np.random.rand(H, W, 3) * 255\n boxxy = np.random.rand(N, 2) * (H // 2)\n boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)\n\n def _rand_poly():\n return np.random.rand(3, 2).flatten() * H\n\n polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]\n\n mask = np.zeros_like(img[:, :, 0], dtype=bool)\n mask[:40, 10:20] = 1\n\n labels = [str(i) for i in range(N)]\n return img, boxes, labels, polygons, [mask] * N\n\n @property\n def metadata(self):\n return MetadataCatalog.get(\"coco_2017_train\")\n\n def test_draw_dataset_dict(self):\n img = np.random.rand(512, 512, 3) * 255\n dic = {\n \"annotations\": [\n {\n \"bbox\": [\n 368.9946492271106,\n 330.891438763377,\n 13.148537455410235,\n 13.644708680142685,\n ],\n \"bbox_mode\": BoxMode.XYWH_ABS,\n \"category_id\": 0,\n \"iscrowd\": 1,\n \"segmentation\": {\n \"counts\": \"_jh52m?2N2N2N2O100O10O001N1O2MceP2\",\n \"size\": [512, 512],\n },\n }\n ],\n \"height\": 512,\n \"image_id\": 1,\n \"width\": 512,\n }\n v = Visualizer(img)\n v.draw_dataset_dict(dic)\n\n v = Visualizer(img, self.metadata)\n v.draw_dataset_dict(dic)\n\n def test_draw_rotated_dataset_dict(self):\n img = np.random.rand(512, 512, 3) * 255\n dic = {\n \"annotations\": [\n {\n \"bbox\": [\n 368.9946492271106,\n 330.891438763377,\n 13.148537455410235,\n 13.644708680142685,\n 45.0,\n ],\n \"bbox_mode\": BoxMode.XYWHA_ABS,\n \"category_id\": 0,\n \"iscrowd\": 1,\n }\n ],\n \"height\": 512,\n \"image_id\": 1,\n \"width\": 512,\n }\n v = Visualizer(img, self.metadata)\n v.draw_dataset_dict(dic)\n\n def test_overlay_instances(self):\n img, boxes, labels, polygons, masks = self._random_data()\n\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n # Test 2x scaling\n v = Visualizer(img, self.metadata, scale=2.0)\n output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape[0], img.shape[0] * 2)\n\n # Test overlay masks\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n def test_overlay_instances_no_boxes(self):\n img, boxes, labels, polygons, _ = self._random_data()\n v = Visualizer(img, self.metadata)\n v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()\n\n def test_draw_instance_predictions(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.asarray(masks))\n\n v = Visualizer(img)\n v.draw_instance_predictions(inst)\n\n v = Visualizer(img, self.metadata)\n v.draw_instance_predictions(inst)\n\n def test_BWmode_nomask(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n\n v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)\n v.draw_instance_predictions(inst)\n\n # check that output is grayscale\n inst = inst[:0]\n v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW)\n output = v.draw_instance_predictions(inst).get_image()\n self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 1]))\n self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 2]))\n\n def test_draw_empty_mask_predictions(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))\n\n v = Visualizer(img, self.metadata)\n v.draw_instance_predictions(inst)\n\n def test_correct_output_shape(self):\n img = np.random.rand(928, 928, 3) * 255\n v = Visualizer(img, self.metadata)\n out = v.output.get_image()\n self.assertEqual(out.shape, img.shape)\n\n def test_overlay_rotated_instances(self):\n H, W = 100, 150\n img = np.random.rand(H, W, 3) * 255\n num_boxes = 50\n boxes_5d = torch.zeros(num_boxes, 5)\n boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)\n boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)\n boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))\n boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))\n boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)\n rotated_boxes = RotatedBoxes(boxes_5d)\n labels = [str(i) for i in range(num_boxes)]\n\n v = Visualizer(img, self.metadata)\n output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()\n self.assertEqual(output.shape, img.shape)\n\n def test_draw_no_metadata(self):\n img, boxes, _, _, masks = self._random_data()\n num_inst = len(boxes)\n inst = Instances((img.shape[0], img.shape[1]))\n inst.pred_classes = torch.randint(0, 80, size=(num_inst,))\n inst.scores = torch.rand(num_inst)\n inst.pred_boxes = torch.from_numpy(boxes)\n inst.pred_masks = torch.from_numpy(np.asarray(masks))\n\n v = Visualizer(img, MetadataCatalog.get(\"asdfasdf\"))\n v.draw_instance_predictions(inst)\n\n def test_draw_binary_mask(self):\n img, boxes, _, _, masks = self._random_data()\n img[:, :, 0] = 0 # remove red color\n mask = masks[0]\n mask_with_hole = np.zeros_like(mask).astype(\"uint8\")\n mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5)\n\n for m in [mask, mask_with_hole]:\n for save in [True, False]:\n v = Visualizer(img)\n o = v.draw_binary_mask(m, color=\"red\", text=\"test\")\n if save:\n with tempfile.TemporaryDirectory(prefix=\"detectron2_viz\") as d:\n path = os.path.join(d, \"output.png\")\n o.save(path)\n o = cv2.imread(path)[:, :, ::-1]\n else:\n o = o.get_image().astype(\"float32\")\n # red color is drawn on the image\n self.assertTrue(o[:, :, 0].sum() > 0)\n\n def test_draw_soft_mask(self):\n img = np.random.rand(100, 100, 3) * 255\n img[:, :, 0] = 0 # remove red color\n mask = np.zeros((100, 100), dtype=np.float32)\n mask[30:50, 40:50] = 1.0\n cv2.GaussianBlur(mask, (21, 21), 10)\n\n v = Visualizer(img)\n o = v.draw_soft_mask(mask, color=\"red\", text=\"test\")\n o = o.get_image().astype(\"float32\")\n # red color is drawn on the image\n self.assertTrue(o[:, :, 0].sum() > 0)\n\n # test draw empty mask\n v = Visualizer(img)\n o = v.draw_soft_mask(np.zeros((100, 100), dtype=np.float32), color=\"red\", text=\"test\")\n o = o.get_image().astype(\"float32\")\n\n def test_border_mask_with_holes(self):\n H, W = 200, 200\n img = np.zeros((H, W, 3))\n img[:, :, 0] = 255.0\n v = Visualizer(img, scale=3)\n\n mask = np.zeros((H, W))\n mask[:, 100:150] = 1\n # create a hole, to trigger imshow\n mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1)\n output = v.draw_binary_mask(mask, color=\"blue\")\n output = output.get_image()[:, :, ::-1]\n\n first_row = {tuple(x.tolist()) for x in output[0]}\n last_row = {tuple(x.tolist()) for x in output[-1]}\n # Check quantization / off-by-1 error: the first and last row must have two colors\n self.assertEqual(len(last_row), 2)\n self.assertEqual(len(first_row), 2)\n self.assertIn((0, 0, 255), last_row)\n self.assertIn((0, 0, 255), first_row)\n\n def test_border_polygons(self):\n H, W = 200, 200\n img = np.zeros((H, W, 3))\n img[:, :, 0] = 255.0\n v = Visualizer(img, scale=3)\n mask = np.zeros((H, W))\n mask[:, 100:150] = 1\n\n output = v.draw_binary_mask(mask, color=\"blue\")\n output = output.get_image()[:, :, ::-1]\n\n first_row = {tuple(x.tolist()) for x in output[0]}\n last_row = {tuple(x.tolist()) for x in output[-1]}\n # Check quantization / off-by-1 error:\n # the first and last row must have >=2 colors, because the polygon\n # touches both rows\n self.assertGreaterEqual(len(last_row), 2)\n self.assertGreaterEqual(len(first_row), 2)\n self.assertIn((0, 0, 255), last_row)\n self.assertIn((0, 0, 255), first_row)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"facebookresearch/detectron2","sub_path":"tests/test_visualizer.py","file_name":"test_visualizer.py","file_ext":"py","file_size_in_byte":10378,"program_lang":"python","lang":"en","doc_type":"code","stars":27217,"dataset":"github-code","pt":"41"} +{"seq_id":"2096552073","text":"import numpy as np\nimport pandas as pd\nimport json\nimport math\nimport random\nimport pickle\nfrom tqdm import tqdm\nimport time\nimport os\n\nclass DataLoader():\n def __init__(self, args, mode, batch, shuffle):\n\n self.args = args\n\n self.data_path = self.args.data_path\n self.anno_path = os.path.join(self.data_path, 'annotation')\n\n self.mode = mode\n self.shuffle = shuffle\n self.batch = batch\n\n self.feature = os.path.join(self.data_path, 'I3D_features')\n self.data_segments = self.gen_dataset()\n self.size = len(self.data_segments)\n self.nbatch = int(self.size / self.batch)\n\n def gen_dataset(self):\n\n alldata = json.load(open(os.path.join(self.anno_path, 'thumos14.json')))['database']\n database = {}\n for video in alldata.keys():\n if alldata[video]['subset'] == self.mode:\n database[video] = alldata[video]\n\n data_segments = [] \n for key, video in database.items():\n t_granularity = self.args.t_granularity/self.args.fps[key]\n t_step = self.args.t_step/self.args.fps[key]\n fealength = int((video['fealength_step4']+self.args.down_sample-1) / self.args.down_sample)\n actions = np.zeros([fealength, self.args.class_num])\n points = np.zeros([2, fealength, self.args.class_num])\n biases = np.zeros([2, fealength, self.args.class_num])\n annotation = video['annotations']\n for anno in annotation:\n # time unit: sec\n s0 = float(anno['segment'][0])\n e0 = float(anno['segment'][1])\n l = e0 - s0\n s1 = max(s0-l/10., 0.0)\n s2 = (s0+l/10.)\n e1 = (e0-l/10.)\n e2 = min(float((fealength-1)*t_step+(t_granularity/2.)), e0+l/10.)\n\n is0 = max(0, round((s0-t_granularity/2.)/t_step))\n is1 = max(0, round((s1-t_granularity/2.)/t_step))\n is2 = max(0, round((s2-t_granularity/2.)/t_step))\n ie0 = min((fealength-1), round((e0-t_granularity/2.)/t_step))\n ie1 = min((fealength-1), round((e1-t_granularity/2.)/t_step))\n ie2 = min((fealength-1), round((e2-t_granularity/2.)/t_step))\n\n\n actions[is0:ie0+1,anno['labelidx']] = 1\n points[0,is1:is2+1,anno['labelidx']] = 1\n points[1,ie1:ie2+1,anno['labelidx']] = 1\n\n if len(biases[0,is1:is2+1,anno['labelidx']]) != len(range(is1,is2+1)) or len(biases[1,ie1:ie2+1,anno['labelidx']]) != len(range(ie1,ie2+1)):\n # print(key,anno['labelidx'],fealength, is1,is2+1,ie1,ie2+1)\n continue\n else:\n biases[0,is1:is2+1,anno['labelidx']] = [s0 - (t*t_step+t_granularity/2.) for t in range(is1,is2+1)] \n biases[1,ie1:ie2+1,anno['labelidx']] = [e0 - (t*t_step+t_granularity/2.) for t in range(ie1,ie2+1)]\n\n data_segments.append((key, fealength, actions, points, biases))\n\n if self.shuffle:\n random.shuffle(data_segments)\n\n return data_segments\n\n\n def gen_train_batch(self, index):\n\n batchdata = self.data_segments[index*self.batch:(index+1)*self.batch]\n aa, pp, bb, ff, mm = [], [], [], [], []\n\n for data in batchdata:\n a = np.zeros([1, self.args.out_window, self.args.class_num])\n p = np.zeros([1, 2, self.args.out_window, self.args.class_num])\n b = np.zeros([1, 2, self.args.out_window, self.args.class_num])\n f = np.zeros([1, self.args.in_window, 2048])\n m = np.zeros([1, self.args.out_window, 1])\n\n key, fealength, actions, points, biases = data\n\n features = np.load(os.path.join(self.feature, key+'.npy'))\n length = features.shape[0]\n\n if fealength <= self.args.out_window:\n a[0,:fealength,:] = actions\n p[0,:,:fealength,:] = points\n b[0,:,:fealength,:] = biases\n f[0,:length,:] = features\n m[0,:fealength,:] = 1\n else:\n actions_sum = np.sum(actions, 1)\n flag = 0\n count = 0\n while flag == 0:\n count += 1\n s = np.random.randint(0, fealength-self.args.out_window+1)\n e = s + self.args.out_window\n if (s == 0 or actions_sum[s] == 0) and (e == fealength or actions_sum[e-1] == 0):\n a[0,:fealength,:] = actions[s:e,:]\n p[0,:,:fealength,:] = points[:,s:e,:]\n b[0,:,:fealength,:] = biases[:,s:e,:]\n tmp_length = features[s*self.args.down_sample:e*self.args.down_sample,:].shape[0]\n f[0,:tmp_length,:] = features[s*self.args.down_sample:e*self.args.down_sample,:]\n m[0,:,:] = 1\n flag = 1\n if count > 1000:\n break\n if flag == 0:\n # print('no good sample')\n a[0,:fealength,:] = actions[0:self.args.out_window,:]\n p[0,:,:fealength,:] = points[:,0:self.args.out_window,:]\n b[0,:,:fealength,:] = biases[:,0:self.args.out_window,:]\n f[0,:,:] = features[0:self.args.in_window,:]\n m[0,:,:] = 1\n aa.append(a)\n pp.append(p)\n bb.append(b)\n ff.append(f)\n mm.append(m)\n aa = np.concatenate(aa)\n pp = np.concatenate(pp)\n bb = np.concatenate(bb)\n ff = np.concatenate(ff)\n mm = np.concatenate(mm)\n\n return np.max(aa,2,keepdims=2), np.max(pp,3,keepdims=3), np.max(bb,3,keepdims=3)+np.min(bb,3,keepdims=3), ff, mm\n\n\n def gen_eval_batch(self, index):\n\n\n key, fealength, actions, points, biases = self.data_segments[index]\n\n features = np.load(os.path.join(self.feature, key+'.npy'))\n\n aa = np.expand_dims(actions, 0)\n pp = np.expand_dims(points, 0)\n bb = np.expand_dims(biases, 0)\n ff = np.expand_dims(features, 0)\n mm = np.ones((1,fealength,1))\n\n return key, np.max(aa,2,keepdims=2), np.max(pp,3,keepdims=3), np.max(bb,3,keepdims=3)+np.min(bb,3,keepdims=3), ff, mm\n\n\n\n\nclass pem_DataLoader():\n def __init__(self, batch, shuffle, datafile, evaluation=False):\n\n self.data = pickle.load(open(datafile, 'rb'))\n self.keys = list(self.data.keys())\n self.batch = batch\n self.num = len(self.keys)\n if shuffle:\n random.shuffle(self.keys)\n\n if evaluation:\n pass\n else:\n ratio = 0.9\n self.train_key = self.keys[:int(self.num*ratio)]\n self.val_key = self.keys[int(self.num*ratio):]\n\n self.train_data = []\n with tqdm(total=len(self.train_key)) as count:\n for key in self.train_key:\n self.train_data += self.data[key]\n count.update(1)\n self.train_num = len(self.train_data)\n self.train_nbatch = int(self.train_num / batch)\n\n self.val_data = []\n with tqdm(total=len(self.val_key)) as count:\n for key in self.val_key:\n self.val_data += self.data[key]\n count.update(1)\n self.val_num = len(self.val_data)\n self.val_nbatch = int(self.val_num / batch)\n\n def generate_batch(self, mode, step):\n\n if mode == 'train':\n pem_data = self.train_data\n else:\n pem_data = self.val_data\n feature = []\n iou = []\n for item in pem_data[step*self.batch:(step+1)*self.batch]:\n feature.append(item[0])\n iou.append(item[1])\n feature = np.vstack(feature)\n iou = np.vstack(iou)\n\n return feature, iou\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"PeisenZhao/Bottom-Up-TAL-with-MR","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"41"} +{"seq_id":"12876151330","text":"import copy\nimport uuid\n\nimport pytest\nfrom httpx import AsyncClient\nfrom sqlalchemy import delete\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import Session\n\nimport services\nfrom app.exceptions import CreatingError\nfrom app.models import stations\nfrom app.schemas import schemas_stations, schemas_washing\nfrom app.schemas import schemas_washing as washing\n# from app.utils.general import read_location\nfrom app.static.enums import RegionEnum, StationStatusEnum, RoleEnum, StationParamsEnum, \\\n\tStationsSortingEnum\nfrom tests.additional import auth, users as users_funcs\nfrom tests.additional.stations import get_station_by_id, generate_station, StationData, change_station_params, \\\n\trand_serial, delete_all_stations, generate_station_programs\nfrom tests.fills import stations as station_fills\n\n\n@pytest.mark.usefixtures(\"generate_users\", \"generate_default_station\")\nclass TestStations:\n\tinstaller: users_funcs.UserData\n\tmanager: users_funcs.UserData\n\tregion_manager: users_funcs.UserData\n\tsysadmin: users_funcs.UserData\n\tlaundry: users_funcs.UserData\n\tstation: StationData\n\n\tasync def test_create_station_with_default_params(self, ac: AsyncClient, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t\t\t\t\t monkeypatch):\n\t\t\"\"\"\n\t\tСоздание станции без указания опциональных параметров.\n\t\t\"\"\"\n\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"Qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\t# real_location = await read_location(\"Санкт-Петербург\")\n\t\tstation_response = schemas_stations.Station(\n\t\t\t**response.json()\n\t\t) # Validation error может подняться, если что-то не так\n\t\tstation_in_db = await get_station_by_id(station_response.id, session)\n\t\tassert station_response.dict() == station_in_db.dict()\n\t\tassert len(station_in_db.station_washing_agents) == services.DEFAULT_STATION_WASHING_AGENTS_AMOUNT\n\t\tassert len(station_in_db.station_washing_machines) == services.DEFAULT_STATION_WASHING_MACHINES_AMOUNT\n\t\tassert station_in_db.station_programs\n\t\t# assert station_in_db.location[\"latitude\"] == real_location.latitude and \\\n\t\t# \t station_in_db.location[\"longitude\"] == real_location.longitude\n\t\tassert station_in_db.is_active == services.DEFAULT_STATION_IS_ACTIVE\n\t\tassert station_in_db.is_protected == services.DEFAULT_STATION_IS_PROTECTED\n\t\tassert station_in_db.station_control.status == services.DEFAULT_STATION_STATUS\n\t\tassert not (station_in_db.is_active and not station_in_db.station_settings.teh_power), \\\n\t\t\t\"If station is active, teh must be powered on\"\n\t\tassert not (not station_in_db.is_active and (station_in_db.station_settings.station_power is True or\n\t\t\t\t\t\t\t\t\t\t\t\t\t station_in_db.station_control.status is not None\n\t\t\t\t\t\t\t\t\t\t\t\t\t or station_in_db.station_settings.teh_power is True)), \\\n\t\t\t\"If station isn't active, station power and TEH power must be False and station status must be null\"\n\t\tassert not (\n\t\t\tstation_in_db.station_settings.station_power is True and station_in_db.station_control.status is None), \\\n\t\t\t\"If station is powered on, station status must be not null\"\n\t\tassert not (station_in_db.station_control.status == StationStatusEnum.WORKING and not all(\n\t\t\tstation_in_db.station_control.washing_machine and any(\n\t\t\t\t(station_in_db.station_control.program_step, station_in_db.station_control.washing_agents)\n\t\t\t)\n\t\t)), \"If station status is working, washing machine must be defined, and one of params [program_step, washing_agents] \" \\\n\t\t\t\"must be not null\"\n\t\tassert station_in_db.name == station_data[\"station\"][\"name\"]\n\n\tasync def test_create_station_with_advanced_params(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tСоздание станции с ручным вводом параметров.\n\t\t\"\"\"\n\t\tparams = station_fills.test_create_station_with_advanced_params\n\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=params\n\t\t)\n\n\t\tstation_response = schemas_stations.Station(**response.json())\n\t\tstation_in_db = await get_station_by_id(station_response.id, session)\n\t\tparams = params[\"station\"]\n\t\tparams[\"region\"] = RegionEnum.NORTHWEST # или менять на строковый регион в полученных объектах\n\n\t\tassert station_response.dict() == station_in_db.dict()\n\t\tassert len(station_in_db.station_washing_agents) == len(params[\"washing_agents\"])\n\t\tassert len(station_in_db.station_washing_machines) == len(params[\"washing_machines\"])\n\t\tassert len(station_in_db.station_programs) == len(params[\"programs\"])\n\t\tfor k, v in params.items():\n\t\t\tif k in station_in_db.dict():\n\t\t\t\tassert station_in_db.dict()[k] == v\n\t\tfor k, v in params[\"settings\"].items():\n\t\t\tassert getattr(station_in_db.station_settings, k) == v\n\n\t\tfor program in station_in_db.station_programs:\n\t\t\tdefined_program = next(pg for pg in params[\"programs\"] if pg[\"program_step\"] == program.program_step)\n\t\t\tassert program.program_step == defined_program[\"program_step\"]\n\t\t\tassert program.name == defined_program[\"name\"]\n\t\t\tfor washing_agent in program.washing_agents:\n\t\t\t\tfor ag in defined_program[\"washing_agents\"]:\n\t\t\t\t\tif isinstance(ag, int) and ag == washing_agent.agent_number:\n\t\t\t\t\t\tdefined_washing_agent = next(agent for agent in params[\"washing_agents\"] if\n\t\t\t\t\t\t\t\t\t\t\t\t\t agent[\"agent_number\"] == ag)\n\t\t\t\t\t\tdefined_washing_agent = washing.WashingAgentCreateMixedInfo(**defined_washing_agent)\n\t\t\t\t\telif isinstance(ag, dict) and ag[\"agent_number\"] == washing_agent.agent_number:\n\t\t\t\t\t\tdefined_washing_agent = washing.WashingAgentWithoutRollback(**ag)\n\t\t\t\tassert washing_agent.volume == defined_washing_agent.volume\n\n\t\tdefault_washing_agents_params = {\n\t\t\t\"rollback\": services.DEFAULT_WASHING_AGENTS_ROLLBACK,\n\t\t\t\"volume\": services.DEFAULT_WASHING_AGENTS_VOLUME\n\t\t}\n\n\t\tfor washing_agent in station_in_db.station_washing_agents:\n\t\t\tdefined_washing_agent = next(ag for ag in params[\"washing_agents\"]\n\t\t\t\t\t\t\t\t\t\t if ag[\"agent_number\"] == washing_agent.agent_number)\n\t\t\tfor param in default_washing_agents_params:\n\t\t\t\tdefault_param = default_washing_agents_params.get(param)\n\t\t\t\tif param in defined_washing_agent:\n\t\t\t\t\tassert getattr(washing_agent, param) == defined_washing_agent[param]\n\t\t\t\telse:\n\t\t\t\t\tassert getattr(washing_agent, param) == default_param\n\n\t\tdefault_washing_machines_params = {\n\t\t\t\"track_length\": services.DEFAULT_WASHING_MACHINES_TRACK_LENGTH,\n\t\t\t\"is_active\": services.DEFAULT_WASHING_MACHINES_IS_ACTIVE,\n\t\t\t\"volume\": services.DEFAULT_WASHING_MACHINES_VOLUME\n\t\t}\n\t\tfor washing_machine in station_in_db.station_washing_machines:\n\t\t\tdefined_washing_machine = next(machine for machine in params[\"washing_machines\"]\n\t\t\t\t\t\t\t\t\t\t if machine[\"machine_number\"] == washing_machine.machine_number)\n\t\t\tfor param in default_washing_machines_params:\n\t\t\t\tdefault_param = default_washing_machines_params.get(param)\n\t\t\t\tif param in defined_washing_machine:\n\t\t\t\t\tassert getattr(washing_machine, param) == defined_washing_machine[param]\n\t\t\t\telse:\n\t\t\t\t\tassert getattr(washing_machine, param) == default_param\n\n\t\tparams[\"region\"] = \"Северо-западный\" # меняю обратно для след тестов\n\n\tasync def test_create_station_with_default_programs(self, session: AsyncSession, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmonkeypatch, sync_session: Session):\n\t\tprograms = generate_station_programs(amount=4, as_schema=True)\n\n\t\tasync def get_default_programs(*args, **kwargs):\n\t\t\treturn programs\n\t\tmonkeypatch.setattr(stations.StationProgram, \"get_default_programs\", get_default_programs)\n\n\t\tstation = await generate_station(ac, sync_session, self.sysadmin, use_default_programs=True)\n\n\t\tassert any(station.station_programs)\n\t\tassert len(station.station_programs) == len(programs)\n\t\tfor pg in programs:\n\t\t\tstation_pg = next(p for p in station.station_programs if p.program_step == pg.program_step)\n\t\t\tassert pg.program_number == station_pg.program_number\n\t\t\tassert pg.name == station_pg.name\n\t\t\tstation_pg_washing_agent_number = [ag.agent_number for ag in station_pg.washing_agents]\n\t\t\tfor ag in pg.washing_agents:\n\t\t\t\tag = schemas_washing.WashingAgentWithoutRollback(**ag) # тип почему-то нарушен\n\t\t\t\tassert ag.agent_number in station_pg_washing_agent_number\n\n\tasync def test_create_station_with_invalid_default_programs(self, session: AsyncSession, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t monkeypatch, sync_session: Session):\n\t\tasync def get_default_programs(*args, **kwargs):\n\t\t\traise CreatingError(\"Some getting default program exception\")\n\t\tmonkeypatch.setattr(stations.StationProgram, \"get_default_programs\", get_default_programs)\n\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"Qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\n\t\tr = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert r.status_code == 422\n\n\tasync def test_create_station_with_getting_default_programs_connection_error(self, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t monkeypatch):\n\t\tasync def get_default_programs(*args, **kwargs):\n\t\t\traise ConnectionError(\"Some getting default program exception\")\n\t\tmonkeypatch.setattr(stations.StationProgram, \"get_default_programs\", get_default_programs)\n\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"Qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\n\t\tr = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert r.status_code == 400\n\n\tasync def test_create_station_with_comment(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tНовый параметр, поэтому добавлю отдельный тест\n\t\t\"\"\"\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial(),\n\t\t\t\"comment\": \"it is test!\"\n\t\t})\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert response.status_code == 201\n\t\tr = schemas_stations.Station(**response.json())\n\t\tassert r.comment == station_data[\"station\"][\"comment\"]\n\n\t\t# ______________\n\n\t\t\"\"\"\n\t\tпустой коммент\n\t\t\"\"\"\n\t\tdel station_data[\"station\"][\"comment\"]\n\t\tstation_data[\"station\"][\"serial\"] = rand_serial()\n\t\tresponse = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tassert response.status_code == 201\n\t\tassert response.json()[\"comment\"] is None\n\n\tasync def test_create_station_not_released(self, session: AsyncSession, ac: AsyncClient):\n\t\t\"\"\"\n\t\tЕсли станция не выпущена - установится пустая дата создания\n\t\t\"\"\"\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\turl = \"/v1/stations/?released=false\"\n\n\t\tr = await ac.post(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\n\t\tassert r.status_code == 201\n\t\tr = schemas_stations.Station(**r.json())\n\t\tassert r.created_at is None\n\n\tasync def test_release_station(self, session: AsyncSession, ac: AsyncClient):\n\t\t\"\"\"\n\t\tЕсли станция не выпущена, можно ее выпустить.\n\t\t\"\"\"\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\turl = \"/v1/stations/?released=false\"\n\t\tr = await ac.post(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\n\t\tstation = schemas_stations.Station(**r.json())\n\n\t\trelease_r = await ac.patch(\n\t\t\tf\"/v1/stations/release/{station.id}\",\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert release_r.status_code == 200\n\t\tstation = schemas_stations.StationGeneralParams(**release_r.json())\n\t\tassert station.created_at\n\n\tasync def test_not_released_station(self, session: AsyncSession, ac: AsyncClient):\n\t\tstation_data = dict(station={\n\t\t\t\"name\": \"qwerty\",\n\t\t\t\"wifi_name\": \"qwerty\",\n\t\t\t\"wifi_password\": \"qwerty\",\n\t\t\t# \"address\": \"Санкт-Петербург\",\n\t\t\t\"region\": RegionEnum.NORTHWEST.value,\n\t\t\t\"serial\": rand_serial()\n\t\t})\n\t\turl = \"/v1/stations/?released=false\"\n\t\tr = await ac.post(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=station_data\n\t\t)\n\t\tstation = schemas_stations.Station(**r.json())\n\n\t\trequest_to_station_r = await ac.get(\n\t\t\tf\"/v1/manage/station/{station.id}/{StationParamsEnum.GENERAL}\",\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert request_to_station_r.status_code == 403\n\n\t\t# ____\n\t\tstation_headers = {\"X-Station-Uuid\": str(station.id)}\n\n\t\trequest_from_station_r = await ac.get(\n\t\t\t\"/v1/stations/me\",\n\t\t\theaders=station_headers\n\t\t)\n\t\tassert request_from_station_r.status_code == 403\n\n\tasync def test_release_station_not_sysadmin_role(self, session: AsyncSession, ac: AsyncClient):\n\t\turl = f\"/v1/stations/release/{self.station.id}\"\n\t\tr = await ac.patch(\n\t\t\turl,\n\t\t\theaders=self.manager.headers\n\t\t)\n\t\tassert r.status_code == 403\n\n\tasync def test_release_station_not_existing_station_id(self, session: AsyncSession, ac: AsyncClient):\n\t\trand_uuid = uuid.uuid4()\n\t\turl = f\"/v1/stations/release/{rand_uuid}\"\n\t\tr = await ac.patch(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert r.status_code == 404\n\n\tasync def test_release_station_already_released(self, session: AsyncSession, ac: AsyncClient):\n\t\turl = f\"/v1/stations/release/{self.station.id}\"\n\t\tr = await ac.patch(\n\t\t\turl,\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tassert r.status_code == 400\n\n\tasync def test_create_station_errors(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\t- Нельзя передать в программу несуществующее стиральное средство;\n\t\t- Нельзя передать невалидные параметры станции (норм проверяются в schemas_stations);\n\t\t- roles auto test;\n\t\t- users auth auto test.\n\t\t\"\"\"\n\t\tparams = copy.deepcopy(station_fills.test_create_station_with_advanced_params)\n\t\tif not isinstance(params[\"station\"][\"region\"], str):\n\t\t\tparams[\"station\"][\"region\"] = params[\"station\"][\n\t\t\t\t\"region\"].value # не успевает поменяться обратно на строку (\n\t\t# _______________________________________________________________\n\t\tparams[\"station\"][\"programs\"].append(\n\t\t\t{\n\t\t\t\t\"program_step\": 13,\n\t\t\t\t\"washing_agents\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"agent_number\": 5 # такого в списке нет\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t)\n\n\t\tnon_existing_washing_agent_r = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=params\n\t\t)\n\n\t\tassert non_existing_washing_agent_r.status_code == 422\n\n\t\tparams[\"station\"][\"programs\"].remove(params[\"station\"][\"programs\"][-1])\n\n\t\t# ________________________________________________________\n\n\t\tparams[\"station\"][\"settings\"][\"station_power\"] = True\n\t\tparams[\"station\"][\"is_active\"] = False\n\n\t\tinvalid_params_r = await ac.post(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers,\n\t\t\tjson=params\n\t\t)\n\n\t\tassert invalid_params_r.status_code == 422\n\n\t\t# ________________________________________________________\n\n\t\tawait auth.url_auth_roles_test(\n\t\t\t\"/v1/stations/\", \"post\",\n\t\t\tRoleEnum.SYSADMIN, self.sysadmin,\n\t\t\tsession, ac, json=station_fills.test_create_station_with_advanced_params\n\t\t)\n\t\tawait auth.url_auth_test(\n\t\t\t\"/v1/stations/\", \"post\", self.sysadmin, ac, session,\n\t\t\tjson=station_fills.test_create_station_with_advanced_params\n\t\t)\n\n\tasync def test_read_all_stations(self, ac: AsyncClient, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t sync_session: Session):\n\t\t\"\"\"\n\t\tЧтение списка станций.\n\t\t\"\"\"\n\t\tawait delete_all_stations(session) # не могу проследить, откуда появляется ошибка - где-то\n\t\t# из контроля станции удалил стиральную машину при рабочем состоянии\n\t\tstations_ = await StationData.generate_stations_list(ac, sync_session, self.sysadmin, session,\n\t\t\t\t\t\t\t\t\t\t\t\t amount=10)\n\t\t# ____\n\t\t# sys, manager role\n\t\tfor user in (self.sysadmin, self.manager):\n\t\t\tr = await ac.get(\n\t\t\t\t\"/v1/stations/\",\n\t\t\t\theaders=user.headers\n\t\t\t)\n\t\t\tassert r.status_code == 200\n\t\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\t\tassert len(r) >= len(stations_)\n\n\t\t# ____\n\t\t# region manager & installer role\n\t\tfor user in (self.installer, self.region_manager):\n\t\t\tr = await ac.get(\n\t\t\t\t\"/v1/stations/\",\n\t\t\t\theaders=user.headers\n\t\t\t)\n\t\t\tassert r.status_code == 200\n\t\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\t\tassert len(r)\n\t\t\tassert all(\n\t\t\t\t(st.general.region == user.region for st in r)\n\t\t\t)\n\n\tasync def test_read_all_stations_(self, session: AsyncSession, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t sync_session: Session):\n\t\t\"\"\"\n\t\tпроверить, что точно возвращаются параметры\n\t\t\"\"\"\n\t\tawait self.station.generate_data_for_read_stations_list(\n\t\t\tsession, ac, sync_session, ctrl=True, owner=True, logs=True\n\t\t)\n\t\tr = await ac.get(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.sysadmin.headers\n\t\t)\n\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\tstation = next(s for s in r if str(s.general.id) == str(self.station.id))\n\t\tassert station.last_work_at\n\t\tassert station.last_maintenance_at\n\t\tassert station.owner\n\t\tassert station.control.status\n\n\tasync def test_read_all_stations_with_ordering(self, ac: AsyncClient, session: AsyncSession,\n\t\t\t\t\t\t\t\t\t\t\t\t sync_session: Session):\n\t\theaders = self.sysadmin.headers\n\t\tawait StationData.generate_stations_list(ac, sync_session, self.sysadmin, session,\n\t\t\t\t\t\t\t\t\t\t\t\t amount=10)\n\t\tsorting_keys = {StationsSortingEnum.OWNER: lambda st_: st_.owner.last_name,\n\t\t\t\t\t\tStationsSortingEnum.STATUS: lambda st_: st_.control.status.value,\n\t\t\t\t\t\tStationsSortingEnum.MAINTENANCE: lambda st_: st_.last_maintenance_at,\n\t\t\t\t\t\tStationsSortingEnum.LAST_WORK: lambda st_: st_.last_work_at,\n\t\t\t\t\t\tStationsSortingEnum.NAME: lambda st_: st_.general.name,\n\t\t\t\t\t\tStationsSortingEnum.REGION: lambda st_: st_.general.region.value}\n\t\tfor order in list(StationsSortingEnum):\n\t\t\tfor desc in (True, False):\n\t\t\t\turl = f\"/v1/stations/?order_by={order.value}\"\n\t\t\t\tsorting_params = {\"key\": sorting_keys[order]}\n\t\t\t\tif desc:\n\t\t\t\t\turl += \"&desc=true\"\n\t\t\t\t\tsorting_params[\"reverse\"] = True\n\t\t\t\tr = await ac.get(url, headers=headers)\n\t\t\t\tassert r.status_code == 200\n\t\t\t\tr = [schemas_stations.StationInList(**s) for s in r.json()]\n\t\t\t\tnullable_objs = []\n\t\t\t\tfor s in r:\n\t\t\t\t\tnullable_fields = {StationsSortingEnum.OWNER: s.owner,\n\t\t\t\t\t\t\t\t\t StationsSortingEnum.STATUS: s.control.status,\n\t\t\t\t\t\t\t\t\t StationsSortingEnum.MAINTENANCE: s.last_maintenance_at,\n\t\t\t\t\t\t\t\t\t StationsSortingEnum.LAST_WORK: s.last_work_at}\n\t\t\t\t\tif order in nullable_fields:\n\t\t\t\t\t\tnullable_field = nullable_fields[order]\n\t\t\t\t\t\tif nullable_field is None:\n\t\t\t\t\t\t\tnullable_objs.append(s)\n\t\t\t\tfor obj in nullable_objs:\n\t\t\t\t\tdel r[r.index(obj)]\n\t\t\t\tassert r == sorted(r, **sorting_params)\n\n\tasync def test_read_all_stations_by_not_permitted_user(self, ac: AsyncClient, session: AsyncSession):\n\t\tr = await ac.get(\n\t\t\t\"/v1/stations/\",\n\t\t\theaders=self.laundry.headers\n\t\t)\n\t\tassert r.status_code == 403\n\n\tasync def test_read_all_stations_not_authenticated(self, ac: AsyncClient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t session: AsyncSession):\n\t\tawait auth.url_auth_test(\n\t\t\t\"/v1/stations/\", \"get\", self.sysadmin,\n\t\t\tac, session\n\t\t)\n\n\tasync def test_read_stations_params(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tЧастичное чтение данных станции станцией.\n\t\t\"\"\"\n\t\tgeneral_params_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.GENERAL.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert general_params_r.status_code == 200\n\t\tresult = schemas_stations.StationGeneralParamsForStation(**general_params_r.json())\n\t\tfor k, v in self.station.__dict__.items():\n\t\t\tif k in result.dict():\n\t\t\t\tassert getattr(result, k) == v\n\n\t\t# _____________________________________________________\n\n\t\tsettings_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.SETTINGS.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert settings_r.status_code == 200\n\t\tsettings_result = schemas_stations.StationSettings(**settings_r.json())\n\n\t\t# _____________________________________________________\n\n\t\tcontrol_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.CONTROL.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert control_r.status_code == 200\n\t\tresult = schemas_stations.StationControl(**control_r.json())\n\n\t\tassert settings_result.station_power is True and result.status == StationStatusEnum.AWAITING or \\\n\t\t\t settings_result.station_power is False and result.status is None\n\n\t\t# _____________________________________________________\n\n\t\twashing_agents_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.WASHING_AGENTS.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert washing_agents_r.status_code == 200\n\t\twashing_agents_result = washing_agents_r.json()\n\n\t\tfor washing_agent in washing_agents_result:\n\t\t\twashing_agent = washing.WashingAgent(**washing_agent) # Validation error\n\t\t\tassert services.MIN_WASHING_AGENTS_VOLUME <= washing_agent.volume <= services.MAX_WASHING_AGENTS_VOLUME\n\t\t\tassert washing_agent.rollback is services.DEFAULT_WASHING_AGENTS_ROLLBACK\n\t\t# _____________________________________________________\n\n\t\tprograms_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.PROGRAMS.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert programs_r.status_code == 200\n\n\t\tresult = programs_r.json()\n\t\tfor program in result:\n\t\t\tprogram = schemas_stations.StationProgram(**program)\n\t\t\tassert program.program_number == program.program_step // 10\n\t\t\tfor washing_agent in program.washing_agents:\n\t\t\t\tassert washing_agent.agent_number in [ag[\"agent_number\"] for ag in washing_agents_result]\n\t\t\t\tassert services.MIN_STATION_WASHING_AGENTS_AMOUNT <= washing_agent.agent_number <= \\\n\t\t\t\t\t services.MAX_STATION_WASHING_AGENTS_AMOUNT\n\n\t\t# ____________________________________________________\n\n\t\twashing_machines_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.WASHING_MACHINES.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert washing_machines_r.status_code == 200\n\t\tresult = washing_machines_r.json()\n\n\t\tfor machine in result:\n\t\t\tmachine = washing.WashingMachine(**machine)\n\t\t\tassert services.MIN_WASHING_MACHINE_VOLUME <= machine.volume <= services.MAX_WASHING_MACHINE_VOLUME\n\t\t\tassert services.MIN_STATION_WASHING_MACHINES_AMOUNT <= machine.machine_number \\\n\t\t\t\t <= services.MAX_STATION_WASHING_MACHINES_AMOUNT\n\t\t\tassert services.MIN_WASHING_MACHINE_TRACK_LENGTH <= machine.track_length <= \\\n\t\t\t\t services.MAX_WASHING_MACHINE_TRACK_LENGTH\n\t\t\tassert machine.is_active == services.DEFAULT_WASHING_MACHINES_IS_ACTIVE\n\n\tasync def test_read_stations_params_errors(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\t- Отсутствие данных по станции;\n\t\t- stations auth auto test\n\t\t\"\"\"\n\t\tawait session.execute(\n\t\t\tdelete(stations.StationControl).where(stations.StationControl.station_id == self.station.id)\n\t\t)\n\t\tawait session.commit()\n\n\t\tnon_existing_data_r = await ac.get(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.CONTROL.value,\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert non_existing_data_r.status_code == 404\n\n\t\tstation = await generate_station(ac, user=self.sysadmin)\n\n\t\tawait auth.url_auth_stations_test(\n\t\t\t\"/v1/stations/me/\" + StationParamsEnum.GENERAL.value,\n\t\t\t\"get\", station, session, ac\n\t\t)\n\n\tasync def test_read_stations_me(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\tЧтение всех данных по станции станцией.\n\t\t\"\"\"\n\t\tresponse = await ac.get(\n\t\t\t\"/v1/stations/me\",\n\t\t\theaders=self.station.headers\n\t\t)\n\t\tassert response.status_code == 200\n\t\tschemas_stations.StationForStation(**response.json()) # Validation error\n\n\tasync def test_read_station_me_errors(self, ac: AsyncClient, session: AsyncSession):\n\t\t\"\"\"\n\t\t- Отсутствие данных по станции;\n\t\t- stations auth auto test\n\t\t\"\"\"\n\t\tawait auth.url_auth_stations_test(\n\t\t\t\"/v1/stations/me\", \"get\", self.station, session, ac\n\t\t)\n\t\tawait change_station_params(self.station, session, status=StationStatusEnum.AWAITING)\n\n\t\tawait session.execute(\n\t\t\tdelete(stations.StationSettings).where(stations.StationSettings.station_id == self.station.id)\n\t\t)\n\t\tawait session.commit()\n\n\t\tnon_existing_data_r = await ac.get(\n\t\t\t\"/v1/stations/me\",\n\t\t\theaders=self.station.headers\n\t\t)\n\n\t\tassert non_existing_data_r.status_code == 404\n\n","repo_name":"Dahaka1/lfs","sub_path":"tests/test_stations.py","file_name":"test_stations.py","file_ext":"py","file_size_in_byte":25048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"1068261836","text":"#reverse integer\n\ndef reverse(x):\n if x > 0 :\n x = int(str(abs(x))[::-1])\n print('this is x', x)\n if x in range(-2**31, 2**31 - 1):\n print(x)\n else:\n print(0)\n else:\n x = int(str(abs(x))[::-1]) * -1\n if x in range(-2**31, 2**31 - 1):\n print(x)\n else:\n print(0)\n\nreverse(1230)\nint('1230')\n\n#abs is to remove negative sign, absalute value\n# cannot have leading 0 ints, abs \n\n\n\n","repo_name":"yashgadodia/crackingthecodinginterview","sub_path":"algos + concepts/reverse_int.py","file_name":"reverse_int.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"28030481440","text":"import numpy as np\n\nclass Selection:\n __probs: list\n __population: list\n def __init__(self):\n pass\n\n def buildSelection(self, population, fitness):\n self.__probs = []\n self.__population = population\n populationFitness = []\n totalFitness = 0\n for individual in population:\n individualFitness = fitness(individual)\n populationFitness.append(individualFitness)\n totalFitness += individualFitness\n\n for individualIndex in range(len(population)):\n self.__probs.append(populationFitness[individualIndex]/totalFitness)\n \n def selectRandom(self):\n indexes = np.arange(len(self.__population))\n newIndividualIndex = np.random.choice(indexes,1,p=self.__probs)[0]\n return self.__population[newIndividualIndex]\n","repo_name":"tiagompconceicao/IASC-2122-TP2","sub_path":"geneticAlgorithms/operators/Selection.py","file_name":"Selection.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"23389263038","text":"import os\nimport json\nimport pandas as pd\nfrom helper import json_extract\n\nuber_eats = []\ndirectory = \"../data_output/ubereats/stores/\"\nfor filename in os.listdir(directory):\n if filename.endswith(\".json\"):\n f = os.path.join(directory, filename)\n fd = open(f, 'rb')\n file_json = json.load(fd)\n try:\n uuid = json_extract(file_json, 'uuid')[0]\n slug = json_extract(file_json, 'slug')[0]\n postcode = json_extract(file_json, 'postalCode')[0]\n postcode = str(postcode)\n latitude = json_extract(file_json, 'latitude')[0]\n longitude = json_extract(file_json, 'longitude')[0]\n data = [uuid, slug, postcode, latitude, longitude]\n uber_eats.append(data)\n except:\n print(filename)\n\ndf_uber_eats = pd.DataFrame(uber_eats, columns=['uuid', 'slug', 'zipcode', 'latitude', 'longitude'])\ndf_uber_eats.to_csv('../data_output/ubereats_stores.csv', index=False)\n\n\n","repo_name":"chengjun-curb/sandbox","sub_path":"location_attributes/data_prep/etl_ubereats.py","file_name":"etl_ubereats.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"4324703105","text":"import argparse\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import cross_val_predict, StratifiedKFold\nfrom sklearn.metrics import roc_auc_score, average_precision_score\n\nfrom constants import *\nfrom datasets import load_and_preprocess\nfrom utils.model_utils import save_model\nfrom utils.data_utils import get_roadmap_col_order\nimport models\n\nimport torch\nimport torch.nn.functional as F\nimport optuna\nfrom skorch import NeuralNetClassifier\nfrom skorch.callbacks import EpochScoring, LRScheduler\n\nMODEL_CHOICES = ['glm', 'standard', 'neighbors', 'e116_neigh']\n\n\ndef main(args):\n X, y = load_and_preprocess(args.project, args.model, split='train')\n\n if args.model == 'e116_neigh':\n def objective(trial):\n auc = EpochScoring(scoring='roc_auc', lower_is_better=False)\n apr = EpochScoring(scoring='average_precision', lower_is_better=False)\n lrs = LRScheduler(policy='StepLR', step_size=10, gamma=0.5)\n \n bs = trial.suggest_categorical('batch_size', [128])\n l2 = trial.suggest_uniform('l2', 5e-5, 1e-2)\n lr = trial.suggest_uniform('lr', 1e-4, 5e-3)\n epochs = trial.suggest_categorical('epochs', [30])\n n_filt = trial.suggest_categorical('n_filt', [8, 16, 32])\n width = trial.suggest_categorical('width', [3, 5, 7])\n lin_units = trial.suggest_categorical('lin_units', [100, 200, 400])\n\n net = NeuralNetClassifier(\n models.MpraCNN,\n\n optimizer=torch.optim.Adam,\n optimizer__weight_decay=l2,\n lr=lr,\n batch_size=bs,\n max_epochs=epochs,\n\n module__n_filt=n_filt,\n module__width=width,\n module__lin_units=lin_units,\n\n callbacks=[auc, apr],\n iterator_train__shuffle=True,\n train_split=None,\n verbose=0\n )\n \n kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1000)\n np.random.seed(1000)\n torch.manual_seed(1000)\n cv_scores = cross_val_predict(net, X, y, cv=kf,\n method='predict_proba', n_jobs=-1)\n return roc_auc_score(y, cv_scores[:, 1])\n \n elif args.model == 'neighbors':\n def objective(trial):\n auc = EpochScoring(scoring='roc_auc', lower_is_better=False)\n apr = EpochScoring(scoring='average_precision', lower_is_better=False)\n lrs = LRScheduler(policy='StepLR', step_size=10, gamma=0.5)\n \n bs = trial.suggest_categorical('batch_size', [256])\n l2 = trial.suggest_uniform('l2', 5e-5, 5e-4)\n lr = trial.suggest_uniform('lr', 5e-5, 5e-4)\n epochs = trial.suggest_categorical('epochs', [30, 40])\n n_filt = trial.suggest_categorical('n_filt', [8, 16, 32])\n width = trial.suggest_categorical('width', [5])\n n_lin1 = trial.suggest_categorical('n_lin1', [400, 600])\n n_lin2 = trial.suggest_categorical('n_lin2', [400])\n\n net = NeuralNetClassifier(\n models.MpraFullCNN,\n\n optimizer=torch.optim.Adam,\n optimizer__weight_decay=l2,\n lr=lr,\n batch_size=bs,\n max_epochs=epochs,\n\n module__n_filt=n_filt,\n module__width=width,\n module__n_lin1=n_lin1,\n module__n_lin2=n_lin2,\n module__nonlin=F.leaky_relu,\n\n callbacks=[auc, apr],\n iterator_train__shuffle=True,\n train_split=None,\n verbose=0\n )\n \n kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1000)\n np.random.seed(1000)\n torch.manual_seed(1000)\n cv_scores = cross_val_predict(net, X, y, cv=kf,\n method='predict_proba', n_jobs=-1)\n return roc_auc_score(y, cv_scores[:, 1])\n print('Starting trials')\n study = optuna.create_study(direction='maximize')\n study.optimize(objective, n_trials=args.iter)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--project', '-p', choices=PROJ_CHOICES, default='mpra_e116')\n parser.add_argument('--model', '-m', default='standard', choices=MODEL_CHOICES,\n help='Which data/model to train on')\n parser.add_argument('--iter', '-i', type=int,\n help='Number of search iterations')\n args = parser.parse_args()\n\n main(args)\n","repo_name":"fl16180/MpraScreen","sub_path":"search_hparam.py","file_name":"search_hparam.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"36778872629","text":"import matplotlib.pyplot as plt\n\nimport networkx as nx\nimport matplotlib.pyplot\n\ndef bug_fix_permutate(testedges, graph, indivudal_edge_thickness):\n #print testedges\n for i in range(0,len(graph)):\n if testedges[i]!=graph[i]:\n try:\n indbadges=testedges.index(graph[i])\n except Exception as ex:\n indbadges=testedges.index((graph[i][1],graph[i][0]))\n\n testedges[i],testedges[indbadges]=testedges[indbadges],testedges[i]\n indivudal_edge_thickness[i],indivudal_edge_thickness[indbadges]=indivudal_edge_thickness[indbadges],indivudal_edge_thickness[i]\n\n #print indivudal_edge_thickness\n return indivudal_edge_thickness\n\n\ndef draw_graph(graph,individual_edge_thickness, labels=None, graph_layout='shell',\n node_size=1600, node_color='blue', node_alpha=0.3,\n node_text_size=12,\n edge_color='blue', edge_alpha=0.3, edge_tickness=1,\n edge_text_pos=0.3,\n text_font='sans-serif'):\n\n G=nx.Graph()\n G.add_edges_from(graph)\n\n #Fixing autistic bug\n testedges=G.edges()\n individual_edge_thickness=bug_fix_permutate(testedges,graph,individual_edge_thickness)\n\n if graph_layout == 'spring':\n graph_pos=nx.spring_layout(G)\n elif graph_layout == 'spectral':\n graph_pos=nx.spectral_layout(G)\n elif graph_layout == 'random':\n graph_pos=nx.random_layout(G)\n else:\n graph_pos=nx.shell_layout(G)\n\n nx.draw_networkx_nodes(G,graph_pos,node_size=node_size,\n alpha=node_alpha, node_color=node_color)\n nx.draw_networkx_edges(G,graph_pos,width=individual_edge_thickness,\n alpha=edge_alpha,edge_color=edge_color)\n nx.draw_networkx_labels(G, graph_pos,font_size=node_text_size,\n font_family=text_font)\n\n if labels is None:\n labels = [\"\" for element in range(len(graph))]\n\n edge_labels = dict(zip(graph, labels))\n nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels,\n label_pos=edge_text_pos)\n\n plt.show()","repo_name":"lucaderi/sgr","sub_path":"2011-2020/2017/Maraz/progetto/grapher.py","file_name":"grapher.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"41"} +{"seq_id":"39751505548","text":"import websocket, json, requests, time, threading\r\nimport pandas as pd\r\n\r\n# ----------------------------------------------------- MAIN -----------------------------------------------------\r\n\r\ndef main():\r\n # INITIALIZE DATA\r\n initialize()\r\n\r\n # WEBSOCKET THREAD\r\n thread1 = threading.Thread(target=websocket_dataflow)\r\n\r\n # POSITION THREAD\r\n thread2 = threading.Thread(target=position_payload)\r\n\r\n # STARTS THREADS\r\n thread1.start()\r\n thread2.start()\r\n\r\n# ----------------------------------------------------- GLOBAL -----------------------------------------------------\r\n\r\n# DATA\r\nstock = []\r\nstock_filtered = []\r\nstock_ordered = {}\r\nstock_data = {}\r\norders = {}\r\n\r\n# KEYS\r\nAPI_KEY = 'PKNWDZT640Q786J4Q3ZP'\r\nAPI_SECRET_KEY = 'CnHr41rc62hIzxh27bqrSBLnq1kZa3yENBg1BKp4'\r\nSUB = \"sip\"\r\n\r\n# TRADING URLS\r\nBASE_TRADE_URL = \"https://paper-api.alpaca.markets\"\r\nACCOUNT_URL = \"{}/v2/account\".format(BASE_TRADE_URL)\r\nORDERS_URL = \"{}/v2/orders\".format(BASE_TRADE_URL)\r\nPOSITIONS_URL = \"{}/v2/positions\".format(BASE_TRADE_URL)\r\nHEADERS = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': API_SECRET_KEY}\r\n\r\n# DATA URLS\r\nBASE_DATA_URL = \"https://data.alpaca.markets/v2/stocks/snapshots?symbols=\"\r\nCLOCK_URL = \"https://api.alpaca.markets/v2/clock\"\r\n\r\n# ----------------------------------------------------- INITIALIZE -----------------------------------------------------\r\n\r\ndef initialize():\r\n # STOCK LIST SETUP (S&P 500 LIST)\r\n sp_500_ticker = pd.read_html(\"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\")\r\n sp_500_ticker = sp_500_ticker[0]\r\n stock = sp_500_ticker['Symbol'].values.tolist()\r\n symbol_string = \"\"\r\n for symbol in stock:\r\n symbol_string = symbol_string + symbol + \",\"\r\n symbol_string = symbol_string[0:len(symbol_string)-1]\r\n DATA_URL = BASE_DATA_URL + symbol_string\r\n\r\n # DATA REQUEST\r\n request = requests.get(DATA_URL, headers=HEADERS)\r\n data = request.json()\r\n\r\n # BOUNDS + DICT SETUP\r\n for key in data:\r\n if (data[key] != None): \r\n stock_data[key] = {}\r\n stock_data[key][\"current_price\"] = \"\"\r\n delta = (data[key][\"prevDailyBar\"][\"h\"] - data[key][\"prevDailyBar\"][\"l\"]) / 2\r\n stock_data[key][\"current_bar\"] = [data[key][\"dailyBar\"][\"l\"], data[key][\"dailyBar\"][\"h\"]]\r\n stock_data[key][\"bounds\"] = [data[key][\"prevDailyBar\"][\"l\"] - delta, data[key][\"prevDailyBar\"][\"h\"] + delta]\r\n if (stock_data[key][\"current_bar\"][0] < stock_data[key][\"bounds\"][0] or stock_data[key][\"current_bar\"][1] > stock_data[key][\"bounds\"][1]):\r\n del stock_data[key]\r\n else:\r\n stock_filtered.append(key)\r\n\r\n# ----------------------------------------------------- WEBSOCKET -----------------------------------------------------\r\n\r\ndef websocket_dataflow():\r\n\r\n # SOCKET LINK\r\n socket = \"wss://stream.data.alpaca.markets/v2/\" + SUB\r\n\r\n # SUBSCRIBE\r\n def on_open(ws):\r\n print(\"\\nConnection sucess...\", \"\\n\")\r\n # AUTH\r\n message = {\"action\": \"auth\", \"key\": API_KEY, \"secret\": API_SECRET_KEY}\r\n ws.send(json.dumps(message))\r\n # MESSAGE TO SEND\r\n message = {\"action\":\"subscribe\", \"bars\":stock_filtered}\r\n ws.send(json.dumps(message))\r\n\r\n # PARSE DATA\r\n def on_message(ws, message):\r\n recieved = json.loads(message)\r\n for x in recieved:\r\n if (x[\"T\"] == \"b\"):\r\n stock_data[x[\"S\"]][\"current_price\"] = x[\"c\"]\r\n #print(x[\"S\"], \" updated: \", stock_data[x[\"S\"]], \"\\n\")\r\n \r\n # ERROR\r\n def on_error(ws, message):\r\n print(\"Error: \" + message, \"\\n\")\r\n\r\n def on_close():\r\n print(\"Connection terminated...\", \"\\n\")\r\n\r\n # CONNECT TO SERVER\r\n ws = websocket.WebSocketApp(socket, on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close)\r\n ws.run_forever()\r\n\r\n# ----------------------------------------------------- POSITION PAYLOAD -----------------------------------------------------\r\n\r\ndef position_payload():\r\n open_trades = 0\r\n while True:\r\n # RUNS EVERY 60 SEC\r\n time.sleep(60)\r\n\r\n # BUYS STOCK (SWAPPED THE IF STATEMENT )\r\n for stock in stock_data:\r\n if (stock_data[stock][\"current_price\"] != \"\" and not(stock in stock_ordered) and open_trades < 10):\r\n if (stock_data[stock][\"current_price\"] < stock_data[stock][\"bounds\"][0] and open_trades < 10):\r\n short_order(stock)\r\n open_trades += 1\r\n print(open_trades)\r\n elif (stock_data[stock][\"current_price\"] > stock_data[stock][\"bounds\"][1] and open_trades < 10):\r\n buy_order(stock)\r\n open_trades += 1\r\n print(open_trades)\r\n\r\n # UPDATES ORDERS\r\n update_orders()\r\n\r\n # SELLS STOCK\r\n for order in list(orders):\r\n if ((orders[order][\"status\"] == \"new\" or orders[order][\"status\"] == \"filled\" or orders[order][\"status\"] == \"accepted\")):\r\n print(\"order: \", order)\r\n if (time_to_sell(orders[order][\"filled_at\"])):\r\n print(\"completed time_to_sell\")\r\n sell_position(orders[order][\"symbol\"])\r\n del orders[order]\r\n open_trades -= 1\r\n print(open_trades)\r\n \r\n# COMPARE TIME\r\n\r\ndef time_to_sell(order_time):\r\n print(\"time_to_sell\")\r\n request = requests.get(CLOCK_URL, headers=HEADERS)\r\n data = request.json()\r\n current_time = data['timestamp']\r\n order_time = order_time[11 : 16]\r\n current_time = current_time[11 : 16]\r\n print(current_time)\r\n print(order_time)\r\n difference = (int((int(current_time[0:2]) - int(order_time[0:2])) * 60) + int((int(current_time[3:5]) - int(order_time[3:5])))) + 240\r\n print(difference)\r\n return difference >= 30\r\n\r\n# POSITIONS\r\n\r\ndef sell_position(symbol):\r\n url = POSITIONS_URL + \"/\" + symbol \r\n r = requests.delete(url, headers=HEADERS)\r\n print(\"Position Sold: \", json.loads(r.content), \"\\n\")\r\n\r\ndef sell_all_position():\r\n url = POSITIONS_URL + \"?cancel_orders=true\"\r\n r = requests.delete(url, headers=HEADERS)\r\n print(\"All Position Sold: \", json.loads(r.content), \"\\n\")\r\n\r\n# ORDERS\r\ndef buy_order(symbol):\r\n data = {\r\n \"symbol\": symbol,\r\n \"notional\": \"10000.00\",\r\n \"side\": \"buy\",\r\n \"type\": \"market\",\r\n \"time_in_force\": \"day\"\r\n }\r\n order(data)\r\n\r\n\r\ndef short_order(symbol):\r\n qty = int(10000/stock_data[symbol][\"current_price\"])\r\n data = {\r\n \"symbol\": symbol,\r\n \"qty\": qty,\r\n \"side\": \"sell\",\r\n \"type\": \"market\",\r\n \"time_in_force\": \"day\"\r\n }\r\n order(data)\r\n\r\ndef order(data):\r\n stock_ordered[data[\"symbol\"]] = True\r\n r = requests.post(ORDERS_URL, json=data, headers=HEADERS)\r\n print(\"Order Bought: \", json.loads(r.content), \"\\n\")\r\n\r\ndef cancel_orders():\r\n r = requests.delete(ORDERS_URL, headers=HEADERS)\r\n print(\"Order Canceled: \", json.loads(r.content), \"\\n\")\r\n\r\ndef update_orders():\r\n r = requests.get(ORDERS_URL, headers=HEADERS)\r\n data = json.loads(r.content)\r\n for order in data:\r\n orders[order['client_order_id']] = {\"symbol\":order['symbol'], \"status\": order['status'], \"filled_at\":order['created_at']}\r\n print(\"Updated Orders: \", orders, \"\\n\")\r\n\r\n# ----------------------------------------------------- RUN -----------------------------------------------------\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n# ---------------------------------------------------------------------------------------------------------------","repo_name":"JSidle/StockBot","sub_path":"StockBot.py","file_name":"StockBot.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"11332465149","text":"from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub\nfrom fairseq.models.text_to_speech.hub_interface import TTSHubInterface\n\n\ndef convert_text_to_speech(text):\n \"\"\"\n This function converts a given text to speech using the 'facebook/tts_transformer-fr-cv7_css10' model from Fairseq.\n The model is specialized in converting French text to speech.\n \n Args:\n text (str): The text to be converted to speech.\n \n Returns:\n wav (numpy array): The generated speech in the form of a wave file.\n rate (int): The sample rate of the generated speech.\n \"\"\"\n models, cfg, task = load_model_ensemble_and_task_from_hf_hub(\n 'facebook/tts_transformer-fr-cv7_css10',\n arg_overrides={'vocoder': 'hifigan', 'fp16': False}\n )\n model = models[0]\n TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)\n generator = task.build_generator(model, cfg)\n sample = TTSHubInterface.get_model_input(task, text)\n wav, rate = TTSHubInterface.get_prediction(task, model, generator, sample)\n return wav, rate","repo_name":"vixuowis/Research-2309","sub_path":"Exp-2/output/hf-eval-data-v1/f00421_convert_text_to_speech.py","file_name":"f00421_convert_text_to_speech.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"40035044910","text":"from bson import json_util\nfrom flask import make_response\nfrom flask.ext.restful import Api\nfrom flask.ext.cuddlyrest.views import ListMongoResource, SingleMongoResource\n\n\nclass CuddlyRest(Api):\n \n def __init__(self, **kwargs):\n Api.__init__(self, **kwargs)\n \n def init_app(self, app):\n self.app = app\n self.representation('application/json')(self.json_encode)\n\n def json_encode(self, data, code, headers=None):\n resp = make_response(json_util.dumps(data, indent=4), code)\n if headers:\n resp.headers.extend(headers)\n return resp\n\n def register(self, collection, name):\n collection_resource = SingleMongoResource(collection)\n collection_list = ListMongoResource(collection)\n self.add_resource(collection_resource, '/%s/'\n % name,\n endpoint=name + '_single',\n document=collection)\n self.add_resource(collection_list, '/%s' % name,\n endpoint=name + '_multiple',\n document=collection)\n\n def run(self, *args, **kwargs):\n self.app.run(*args, **kwargs)\n\n def add_resource(self, resource, *urls, **kwargs):\n \"\"\"Adds a resource to the api.\n\n :param resource: the class name of your resource\n :type resource: :class:`Resource`\n :param urls: one or more url routes to match for the resource, standard\n flask routing rules apply. Any url variables will be\n passed to the resource method as args.\n :type urls: str\n\n :param endpoint: endpoint name (defaults to\n :meth:`Resource.__name__.lower`\n Can be used to reference this route in :class:`fields.Url` fields\n :type endpoint: str\n\n Additional keyword arguments not specified above will be passed as-is\n to :meth:`flask.Flask.add_url_rule`.\n\n Examples::\n\n api.add_resource(HelloWorld, '/', '/hello')\n api.add_resource(Foo, '/foo', endpoint=\"foo\")\n api.add_resource(FooSpecial, '/special/foo', endpoint=\"foo\")\n\n \"\"\"\n endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()\n self.endpoints.add(endpoint)\n\n if endpoint in self.app.view_functions.keys():\n previous_view_class = (self.app.view_functions[endpoint]\n .__dict__['view_class'])\n\n # if you override the endpoint with a different class, avoid the\n # collision by raising an exception\n if previous_view_class != resource:\n raise ValueError(\n 'This endpoint (%s) is already set to the class %s.'\n % (endpoint, previous_view_class.__name__))\n\n resource.endpoint = endpoint\n resource_func = self.output(resource.as_view(endpoint, **kwargs))\n\n for decorator in self.decorators:\n resource_func = decorator(resource_func)\n\n for url in urls:\n self.app.add_url_rule(self.prefix + url, view_func=resource_func)\n","repo_name":"wuurrd/Flask-CuddlyRest","sub_path":"flask_cuddlyrest/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"41"} +{"seq_id":"6178054017","text":"import io\nimport os.path\n\nimport stardicter.czechenglish\nfrom stardicter.test_base import BaseTest\n\n\nclass CzechEnglishTest(BaseTest):\n writer_class = stardicter.czechenglish.CzechEnglishWriter\n\n\nclass CzechEnglishFileTest(CzechEnglishTest):\n def get_writer(self):\n '''\n Gets prepared writer class.\n '''\n return self.writer_class(\n file=io.open(os.path.join(\n os.path.dirname(__file__),\n 'test_data.txt'\n ), 'rb')\n )\n","repo_name":"nijel/stardicter","sub_path":"stardicter/test_czechenglish.py","file_name":"test_czechenglish.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"41"} +{"seq_id":"31612205772","text":"from django.urls import path\nfrom . import views\nfrom re import template\nfrom django.urls import path \nfrom . import views\nfrom django.contrib.auth import views as auth_view\n\nurlpatterns = [\n path('', views.home, name='home1'),\n path('home/', views.homepage, name='home'),\n path('table/', views.students, name='data'),\n path('addrecord/', views.addrecord, name='add'),\n path('activity/', views.activity, name='activity'),\n path('update//', views.updaterecord, name='update'),\n path('delete//', views.deleterecord, name='delete'),\n path('', views.home, name='home'),\n path('register/', views.register, name='register'),\n path('profile/', views.profile, name='profile'),\n path('login/', views.login, name='login'),\n path('logout/', views.logout, name='logout'),\n\n\n]","repo_name":"Mariam22-hub/student-affairs-website","sub_path":"updateTable/Table/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"14364668211","text":"from actions.service_recommender_action import RecommendServices\nfrom rasa_sdk.executor import CollectingDispatcher\nimport unittest\nfrom unittest.mock import MagicMock, patch\nimport json\nimport sys\nimport os\nsys.path.append('actions')\n\nSERVICE_RECOMMENDER_JSON_RESPONSE_SUCCESS = [\n {\n \"service\": {\n \"id\": \"8c6e25e9-e186-49fd-852c-f6f168d1351f\",\n \"type\": \"Service\",\n \"subtype\": None,\n \"organizations\": [\n {\n \"id\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"name\": \"Kela\"\n },\n {\n \"id\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"name\": \"Kela\"\n }\n ],\n \"name\": {\n \"en\": \"Kela's benefits for the unemployed\",\n \"fi\": \"Kelan tuet työttömille\",\n \"sv\": \"FPA:s stöd för arbetslösa\"\n },\n \"descriptions\": {\n \"en\": [\n {\n \"value\": \"* labour market subsidy\\n* basic unemployment allowance\\n* commuting and relocation allowance\\n* job alternation compensation\",\n \"type\": \"Description\"\n },\n {\n \"value\": \"Unemployed\",\n \"type\": \"Summary\"\n }\n ],\n \"fi\": [\n {\n \"value\": \"* työmarkkinatuki\\n* peruspäiväraha\\n* liikkuvuusavustus\\n* vuorottelukorvaus\",\n \"type\": \"Description\"\n },\n {\n \"value\": \"Työttömät\",\n \"type\": \"Summary\"\n }\n ],\n \"sv\": [\n {\n \"value\": \"Arbetslösa\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"* arbetsmarknadsstöd\\n* grunddagpenning \\n* rörlighetsunderstöd\\n* alterneringsersättning\",\n \"type\": \"Description\"\n }\n ]\n },\n \"requirement\": {\n \"en\": \"http://www.kela.fi/unemployment\",\n \"fi\": \"http://www.kela.fi/tyottomat\",\n \"sv\": \"www.fpa.fi/utanarbete\"\n },\n \"targetGroups\": {\n \"en\": [\n {\n \"name\": \"Finnish startups\",\n \"code\": \"KR2.8\"\n },\n {\n \"name\": \"Businesses and non-government organizations\",\n \"code\": \"KR2\"\n },\n {\n \"name\": \"Businesses operating in the domestic (Finnish) market\",\n \"code\": \"KR2.3\"\n },\n {\n \"name\": \"Citizens\",\n \"code\": \"KR1\"\n }\n ],\n \"fi\": [\n {\n \"name\": \"Yrityksen perustajat kotimaassa\",\n \"code\": \"KR2.8\"\n },\n {\n \"name\": \"Yritykset ja yhteisöt\",\n \"code\": \"KR2\"\n },\n {\n \"name\": \"Kotimarkkinoilla toimivat yritykset\",\n \"code\": \"KR2.3\"\n },\n {\n \"name\": \"Kansalaiset\",\n \"code\": \"KR1\"\n }\n ],\n \"sv\": [\n {\n \"name\": \"Inhemska företagsgrundare\",\n \"code\": \"KR2.8\"\n },\n {\n \"name\": \"Företag och samfund\",\n \"code\": \"KR2\"\n },\n {\n \"name\": \"Företag pÃ¥ den inhemska marknaden\",\n \"code\": \"KR2.3\"\n },\n {\n \"name\": \"Medborgare\",\n \"code\": \"KR1\"\n }\n ]\n },\n \"serviceClasses\": {\n \"en\": [\n {\n \"name\": \"Support and benefits for the unemployed\",\n \"description\": \"This service subclass contains different types of financial support for unemployed jobseekers, support eligibility criteria and services related to applying for support.\",\n \"code\": \"P10.6\"\n },\n {\n \"name\": \"Working life rules and collective agreements\",\n \"description\": \"This service subclass contains issues related to employment contracts and terms of employment, pay, and equality and flexibility in working life, including telework and part-time work, from the service point of view.\",\n \"code\": \"P10.3\"\n }\n ],\n \"fi\": [\n {\n \"name\": \"Työttömän tuet ja etuudet\",\n \"description\": \"Tässä palvelualaluokassa käsitellään työttömälle työnhakijalle suunnattuja erilaisia taloudellisia tukia, niiden saamisen edellytyksiä ja tukien hakupalveluja.\",\n \"code\": \"P10.6\"\n },\n {\n \"name\": \"Työelämän säännöt ja työehtosopimukset\",\n \"description\": \"Tähän palvelualaluokkaan kuuluvat palvelujen näkökulmasta työsopimuksiin ja -ehtoihin, palkkaukseen, työelämän yhdenvertaisuuteen ja joustoihin kuten etä- ja osa-aikatyöhön liittyvät asiat.\",\n \"code\": \"P10.3\"\n }\n ],\n \"sv\": [\n {\n \"name\": \"Stöd och förmÃ¥ner för arbetslösa\",\n \"description\": \"I denna serviceundergrupp behandlas olika ekonomiska stödformer för arbetslösa arbetssökande, förutsättningar för beviljande av dem och tjänster för ansökan om stöd.\",\n \"code\": \"P10.6\"\n },\n {\n \"name\": \"Arbetslivets regler och kollektivavtal\",\n \"description\": \"Denna serviceundergrupp omfattar ärenden relaterade till arbetsavtal och -villkor, löner, jämställdhet och flexibilitet i arbetslivet, sÃ¥som distans- och deltidsarbete.\",\n \"code\": \"P10.3\"\n }\n ]\n },\n \"areas\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"lastUpdated\": \"2021-06-15T07:37:25.395000\"\n },\n \"channels\": [\n {\n \"id\": \"16d63b97-0b8f-4f72-95e7-7cc2f9ab9e15\",\n \"type\": \"EChannel\",\n \"areaType\": \"Nationwide\",\n \"organizationId\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"serviceIds\": [\n \"105837fa-97d2-4f9a-916e-09fe7ca19e52\",\n \"b5b82555-9852-4a77-89bd-7dcd332d4f11\",\n \"7d655de8-76fd-4f24-bb92-e8f49e153e88\",\n \"b84af2c2-824f-4b27-a599-fa28de4e437c\",\n \"f8dd3060-543c-47ba-abba-a14fc1feacb3\",\n \"ff059acf-de3f-468d-8b6a-9a492d301cda\",\n \"6529e1c2-b9ac-4f00-8e8b-6d13616ccf81\",\n \"8c6e25e9-e186-49fd-852c-f6f168d1351f\",\n \"3ef3f1ef-6754-4308-8ef4-deef416b081e\",\n \"b5c945e5-a4d6-47b9-9362-0fe0f20adc2e\",\n \"30fe7757-32ad-4ea7-a8e4-857b44f81160\",\n \"dae7ba63-46af-4130-995d-1e88cafaa70c\",\n \"e0556386-0ffa-40f2-98aa-770b42dd792a\",\n \"579991be-f40f-4913-8130-0e07592b50c4\",\n \"e52f663f-df44-426c-b401-147d4ebd19cc\",\n \"aeb60b1d-2872-4841-9704-652246948990\",\n \"76472df7-25ed-4c55-94dd-fa3dd98ee862\",\n \"58a4bf82-dc19-4ca5-a57d-a0ef39d0e89d\",\n \"506d84d5-0ecf-400a-8b74-f9bd990dab7b\",\n \"3456be0a-a126-43af-a364-f24e24786cb1\",\n \"5ecdee89-0459-4b27-8271-206f314b801b\",\n \"b0372b6c-5ab5-4dd1-92e5-bde71dd25488\",\n \"52693b89-c7da-4c61-80df-b6f871672064\",\n \"ad234c6c-e24c-4d0f-8698-81980502278d\",\n \"caba7a03-40b7-439e-871d-1d9081bd3299\",\n \"a0a34972-1af4-41d2-ac89-198ce1875e4f\",\n \"7fd28107-d7d6-4158-96d1-fad0bd8c7499\",\n \"19a31135-a9d6-4926-a20b-bfe7db1780d3\",\n \"05f6a1fd-925f-46f0-b0d2-8a92881710a6\",\n \"9b6eb134-2764-47bd-9b98-e04f3a50b88b\",\n \"b7e6eddc-0f49-4bfa-876c-53fa01ba7907\",\n \"e09e783a-6363-412e-bed2-4082768c914d\",\n \"0157fe90-43d2-40e4-895a-a9446829d1d8\"\n ],\n \"name\": {\n \"en\": \"Kela's online customer service\",\n \"fi\": \"Kelan Asiointipalvelu\",\n \"sv\": \"FPA:s e-tjänst\"\n },\n \"descriptions\": {\n \"en\": [\n {\n \"value\": \"Check your own data, apply for benefits, send supporting documents and report changes.\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"In Kela's online customer service you can check your own data, apply for benefits, send supporting documets and report changes. You can handle almost all your transactions with Kela on the Internet. \",\n \"type\": \"Description\"\n }\n ],\n \"fi\": [\n {\n \"value\": \"Tarkastele omia Kela-tietojasi, hae etuuksia, lähetä liitteitä ja ilmoita muutoksista. Voit hoitaa lähes kaikki Kela-asiasi verkossa.\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"Kelan verkkoasiointipalvelussa voit tarkastella omia Kela-tietojasi, hakea etuuksia, lähettää liitteitä ja ilmoittaa muutoksista. Voit hoitaa lähes kaikki Kela-asiasi verkossa.\",\n \"type\": \"Description\"\n }\n ],\n \"sv\": [\n {\n \"value\": \"Kontrollera dina uppgifter hos FPA, ansök om förmÃ¥ner, skicka bilagor och meddela förändringar. Du kan sköta sÃ¥ gott som alla FPA-ärenden pÃ¥ nätet.\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"I FPA:s e-tjänst kan du kontrollera dina egna uppgifter hos FPA, ansöka om förmÃ¥ner, skicka bilagor och meddela förändringar. Du kan sköta sÃ¥ gott som alla FPA-ärenden pÃ¥ nätet. \",\n \"type\": \"Description\"\n }\n ]\n },\n \"webPages\": {\n \"en\": [\n \"https://asiointi.kela.fi/go_app?lg=en\"\n ],\n \"fi\": [\n \"https://asiointi.kela.fi/go_app\"\n ],\n \"sv\": [\n \"https://asiointi.kela.fi/go_app?lg=sv\"\n ]\n },\n \"emails\": {\n \"en\": [],\n \"fi\": [\n \"tekninentuki@kela.fi\"\n ],\n \"sv\": []\n },\n \"phoneNumbers\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"areas\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"addresses\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"lastUpdated\": \"2021-06-16T07:19:48.498000\"\n },\n {\n \"id\": \"fbeff57b-fdb7-4acc-9344-9d97193bf910\",\n \"type\": \"ServiceLocation\",\n \"areaType\": \"Nationwide\",\n \"organizationId\": \"c5f6914f-302e-41cc-bed7-4d4215aac640\",\n \"serviceIds\": [\n \"e52f663f-df44-426c-b401-147d4ebd19cc\",\n \"579991be-f40f-4913-8130-0e07592b50c4\",\n \"30fe7757-32ad-4ea7-a8e4-857b44f81160\",\n \"3ef3f1ef-6754-4308-8ef4-deef416b081e\",\n \"aeb60b1d-2872-4841-9704-652246948990\",\n \"76472df7-25ed-4c55-94dd-fa3dd98ee862\",\n \"e0556386-0ffa-40f2-98aa-770b42dd792a\",\n \"6529e1c2-b9ac-4f00-8e8b-6d13616ccf81\",\n \"8c6e25e9-e186-49fd-852c-f6f168d1351f\",\n \"dae7ba63-46af-4130-995d-1e88cafaa70c\",\n \"b5c945e5-a4d6-47b9-9362-0fe0f20adc2e\",\n \"ff059acf-de3f-468d-8b6a-9a492d301cda\"\n ],\n \"name\": {\n \"en\": None,\n \"fi\": \"Haukiputaan palvelupiste\",\n \"sv\": \"Servicestället i Haukipudas\"\n },\n \"descriptions\": {\n \"en\": [],\n \"fi\": [\n {\n \"value\": \"Kelan palvelupiste\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"Kelan palvelupisteessä opastetaan ja neuvotaan kaikissa Kelan etuuksiin liittyvissä asioissa. Voit hakea etuuksia ja toimittaa liitteet myös asiointipalvelussamme osoitteessa www.kela.fi/asiointi. Lue myös mahdollisuudesta ajanvaraukseen: www.kela.fi/ajanvaraus.\",\n \"type\": \"Description\"\n }\n ],\n \"sv\": [\n {\n \"value\": \"Fpa:s serviceställe\",\n \"type\": \"Summary\"\n },\n {\n \"value\": \"PÃ¥ FPA:s serviceställe kan du fÃ¥ information och rÃ¥dgivning om alla FPA-förmÃ¥ner. Du kan ocksÃ¥ ansöka om förmÃ¥ner och lämna in bilagor i vÃ¥r e-tjänst pÃ¥ adressen www.fpa.fi/etjanst. Läs mer om hur du bokar tid pÃ¥ adressen www.fpa.fi/tidsbokning.\",\n \"type\": \"Description\"\n }\n ]\n },\n \"webPages\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"emails\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"phoneNumbers\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"areas\": {\n \"en\": [],\n \"fi\": [],\n \"sv\": []\n },\n \"addresses\": {\n \"en\": [\n {\n \"type\": \"Location\",\n \"subtype\": \"Single\",\n \"streetNumber\": \"15\",\n \"postalCode\": \"90830\",\n \"latitude\": \"7229135.399\",\n \"longitude\": \"422665.198\",\n \"streetName\": \"Simppulantie\",\n \"postOffice\": \"HAUKIPUDAS\",\n \"municipalityCode\": \"564\",\n \"municipalityName\": \"Aura\"\n },\n {\n \"type\": \"Postal\",\n \"subtype\": \"PostOfficeBox\",\n \"streetNumber\": None,\n \"postalCode\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"streetName\": None,\n \"postOffice\": None,\n \"municipalityCode\": None,\n \"municipalityName\": None\n }\n ],\n \"fi\": [\n {\n \"type\": \"Location\",\n \"subtype\": \"Single\",\n \"streetNumber\": \"15\",\n \"postalCode\": \"90830\",\n \"latitude\": \"7229135.399\",\n \"longitude\": \"422665.198\",\n \"streetName\": \"Simppulantie\",\n \"postOffice\": \"HAUKIPUDAS\",\n \"municipalityCode\": \"564\",\n \"municipalityName\": \"Aura\"\n },\n {\n \"type\": \"Postal\",\n \"subtype\": \"PostOfficeBox\",\n \"streetNumber\": None,\n \"postalCode\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"streetName\": None,\n \"postOffice\": None,\n \"municipalityCode\": None,\n \"municipalityName\": None\n }\n ],\n \"sv\": [\n {\n \"type\": \"Location\",\n \"subtype\": \"Single\",\n \"streetNumber\": \"15\",\n \"postalCode\": \"90830\",\n \"latitude\": \"7229135.399\",\n \"longitude\": \"422665.198\",\n \"streetName\": \"Simppulantie\",\n \"postOffice\": \"HAUKIPUDAS\",\n \"municipalityCode\": \"564\",\n \"municipalityName\": \"Aura\"\n },\n {\n \"type\": \"Postal\",\n \"subtype\": \"PostOfficeBox\",\n \"streetNumber\": None,\n \"postalCode\": None,\n \"latitude\": None,\n \"longitude\": None,\n \"streetName\": None,\n \"postOffice\": None,\n \"municipalityCode\": None,\n \"municipalityName\": None\n }\n ]\n },\n \"lastUpdated\": \"2021-06-28T01:00:00.719000\"\n }\n ],\n \"score\": 0.8303728304964242\n },\n]\nSERVICE_RECOMMENDER_JSON_RESPONSE_ERROR = {\n \"detail\": [\n {\n \"loc\": [\n \"body\",\n 48\n ],\n \"msg\": \"Expecting value: line 4 column 1 (char 48)\",\n \"type\": \"value_error.jsondecode\",\n \"ctx\": {\n \"msg\": \"Expecting value\",\n \"doc\": \"{\\n \\\"need_text\\\": \\\"string\\\",\\n \\\"municipality_id\\\":\\n}\",\n \"pos\": 48,\n \"lineno\": 4,\n \"colno\": 1\n }\n }\n ]\n}\n\n# Test class for Rasa Tracker store which contains chatbot user message data\n\n\nclass TestRasaTracker():\n def __init__(self):\n self.slots = {\n 'general_service_search_text': 'olispa kahvia',\n 'municipality': 'turku',\n \"fallback_language\": \"fi\",\n \"session_started_metadata\": {\n \"language\": \"it\"\n }\n }\n self.latest_message = {\n \"intent\": {\n \"id\": -4114183629044666000,\n \"name\": \"public_transport\",\n \"confidence\": 0.9920039772987366\n },\n \"entities\": [],\n \"text\": \"joukkoliikenne\",\n \"message_id\": \"a4d3a71843eb449689e0eb4dc34ca7e9\",\n \"metadata\": {\n \"language\": \"fi\"\n },\n \"intent_ranking\": [\n ]\n }\n self.events = [\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1175656,\n \"name\": \"action_session_start\",\n \"confidence\": 1\n },\n {\n \"event\": \"session_started\",\n \"timestamp\": 1628670810.117589\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.117605,\n \"name\": \"action_listen\"\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670810.1180103,\n \"text\": \"/get_started\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [],\n \"message_id\": \"d2f0600da3bc4648998c9727469121ce\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"name\": \"get_started\",\n \"confidence\": 1\n }\n ]\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"d2f0600da3bc4648998c9727469121ce\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670810.1396117,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1396365,\n \"name\": \"utter_get_started\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670810.1396775,\n \"metadata\": {\n \"template_name\": \"utter_get_started\"\n },\n \"text\": \"Moi! Autan sinua löytämään palveluita eri elämäntilanteisiisi liittyen Varsinais-Suomen alueelta.\\n\\nYmmärrän helpoiten melko lyhyitä viestejä tai voit myös klikkailla nappeja.\",\n \"data\": {}\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1663811,\n \"name\": \"utter_get_started_choose_life_event\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670810.166434,\n \"metadata\": {\n \"template_name\": \"utter_get_started_choose_life_event\"\n },\n \"text\": \"Kuvaile ensiksi, millaiseen elämäntilanteeseen tarvitsisit apua tai voit myös etsiä vapaasti palveluita 😊\",\n \"data\": {\n \"buttons\": [\n {\n \"title\": \"Työttömäksi jääminen\",\n \"type\": \"postback\",\n \"payload\": \"/ke8_losing_job\"\n },\n {\n \"title\": \"Velkaantuminen\",\n \"type\": \"postback\",\n \"payload\": \"/ke9_debt\"\n },\n {\n \"title\": \"Omaisen kuolema\",\n \"type\": \"postback\",\n \"payload\": \"/ke14_death\"\n },\n {\n \"title\": \"Etsi vapaasti palveluita\",\n \"type\": \"postback\",\n \"payload\": \"/service_search\"\n }\n ]\n }\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670810.1708252,\n \"name\": \"action_listen\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670821.483857,\n \"text\": \"/service_search\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [],\n \"message_id\": \"7c25cbf27151428589d7fc51620e508a\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"name\": \"service_search\",\n \"confidence\": 1\n }\n ],\n \"text\": \"\"\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"7c25cbf27151428589d7fc51620e508a\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670821.5257907,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670821.5258121,\n \"name\": \"general_service_search_form\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"active_loop\",\n \"timestamp\": 1628670821.5258555,\n \"name\": \"general_service_search_form\"\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670821.5258627,\n \"metadata\": {\n \"linkTarget\": \"_blank\",\n \"userInput\": \"show\",\n \"forceOpen\": False,\n \"forceClose\": False,\n \"pageChangeCallbacks\": None,\n \"pageEventCallbacks\": None,\n \"template_name\": \"utter_ask_general_service_search_text\"\n },\n \"text\": \"Kuvaile ensiksi palvelutarvettasi\",\n \"data\": {}\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670821.5258706,\n \"name\": \"requested_slot\",\n \"value\": \"general_service_search_text\"\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670821.5335426,\n \"name\": \"action_listen\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670831.59987,\n \"text\": \"haluaisin mennä uimarannalle\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [],\n \"text\": \"haluaisin mennä uimarannalle\",\n \"message_id\": \"1a5dad116ee34938b11f68e60c1814ba\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"id\": 1679135316125928700,\n \"name\": \"chitchat.bye\",\n \"confidence\": 0.8972764015197754,\n \"canonical\": \"hei hei\"\n }\n ]\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"1a5dad116ee34938b11f68e60c1814ba\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670831.625026,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670831.625047,\n \"name\": \"general_service_search_form\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670831.6250882,\n \"name\": \"general_service_search_text\",\n \"value\": \"haluaisin mennä uimarannalle\"\n },\n {\n \"event\": \"bot\",\n \"timestamp\": 1628670831.6250937,\n \"metadata\": {\n \"template_name\": \"utter_ask_municipality\"\n },\n \"text\": \"Kerrotko vielä, mistä kunnasta haluat palveluita?\",\n \"data\": {}\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670831.6250968,\n \"name\": \"requested_slot\",\n \"value\": \"municipality\"\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670831.6321259,\n \"name\": \"action_listen\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"user\",\n \"timestamp\": 1628670839.1954765,\n \"text\": \"turku\",\n \"parse_data\": {\n \"intent\": None,\n \"entities\": [\n {\n \"entity\": \"municipality_entity\",\n \"start\": 0,\n \"end\": 5,\n \"confidence_entity\": 0.9993183612823486,\n \"value\": \"turku\",\n \"extractor\": \"DIETClassifier\"\n }\n ],\n \"text\": \"turku\",\n \"message_id\": \"e8c8472b6ec04644a241510e418b5332\",\n \"metadata\": {},\n \"intent_ranking\": [\n {\n \"id\": 8390830771550880000,\n \"name\": \"service_municipality_choice\",\n \"confidence\": 0.999856173992157,\n \"canonical\": \"haluan palveluita turusta\"\n }\n ]\n },\n \"input_channel\": \"webchat\",\n \"message_id\": \"e8c8472b6ec04644a241510e418b5332\",\n \"metadata\": {}\n },\n {\n \"event\": \"user_featurization\",\n \"timestamp\": 1628670839.203917,\n \"use_text_for_featurization\": False\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670839.2039268,\n \"name\": \"general_service_search_form\",\n \"policy\": \"policy_1_RulePolicy\",\n \"confidence\": 1\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670839.2039347,\n \"name\": \"municipality\",\n \"value\": \"turku\"\n },\n {\n \"event\": \"active_loop\",\n \"timestamp\": 1628670839.203938\n },\n {\n \"event\": \"slot\",\n \"timestamp\": 1628670839.203941,\n \"name\": \"requested_slot\"\n },\n {\n \"event\": \"action\",\n \"timestamp\": 1628670839.9863353,\n \"name\": \"action_recommend_services\",\n \"policy\": \"policy_2_AugmentedMemoizationPolicy\",\n \"confidence\": 1\n }\n ]\n\n def get_slot(self, key):\n return self.slots[key]\n\n\n# This method will be used by the mock to replace requests.post to service recommender API\ndef mocked_requests_post(*args, **kwargs):\n\n class MockResponse:\n def __init__(self, json_data, status_code):\n self.json_data = json_data\n self.status_code = status_code\n\n def json(self):\n return self.json_data\n\n def text(self):\n return json.dumps(self.json_data)\n\n if args[0] == 'request_success/services/recommend':\n return MockResponse(SERVICE_RECOMMENDER_JSON_RESPONSE_SUCCESS, 200)\n elif args[0] == 'request_error/services/recommend':\n return MockResponse(SERVICE_RECOMMENDER_JSON_RESPONSE_ERROR, 400)\n\n\nclass TestRasaActionsRecommendServices(unittest.TestCase):\n\n def setUp(self):\n self.tracker = TestRasaTracker()\n\n @patch('requests.post', side_effect=mocked_requests_post)\n def test_recommend_services_action_success(self, mock_post):\n os.environ['RASA_ACTIONS_SERVICE_RECOMMENDER_ENDPOINT'] = 'request_success'\n dispatcher = CollectingDispatcher()\n action = RecommendServices()\n action.run(dispatcher, self.tracker, None)\n\n self.assertEqual(\n dispatcher.messages[0]['attachment']['payload']['elements'][0][\n 'title'], SERVICE_RECOMMENDER_JSON_RESPONSE_SUCCESS[0]['service']['name']['fi']\n )\n\n @patch('requests.post', side_effect=mocked_requests_post)\n def test_recommend_services_action_error(self, mock_post):\n os.environ['RASA_ACTIONS_SERVICE_RECOMMENDER_ENDPOINT'] = 'request_error'\n dispatcher = CollectingDispatcher()\n action = RecommendServices()\n action.run(dispatcher, self.tracker, None)\n self.assertEqual(\n dispatcher.messages[0]['response'], 'utter_recommendation_error')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"City-of-Turku/PaohRasaPlatform","sub_path":"test/test_rasa_actions.py","file_name":"test_rasa_actions.py","file_ext":"py","file_size_in_byte":33857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"36321491926","text":"from pydeck import Deck, Layer, ViewState\n\nfeatures = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [-122.42923736572264, 37.80544394934271],\n [0, 37.80544394934271],\n [-122.42923736572264, 0],\n [-122.42923736572264, 37.80544394934271],\n ]\n ],\n },\n }\n ],\n}\n\n\ndef create_geojson_layer_with_gmaps_test_object():\n return Deck(\n description=\"Test of GeoJsonLayer, with Google Maps basemap\",\n map_style=\"satellite\",\n map_provider=\"google_maps\",\n initial_view_state=ViewState(longitude=-122.45, latitude=37.8, zoom=0),\n layers=[\n Layer(\n \"GeoJsonLayer\",\n id=\"geojson-layer\",\n data=features,\n stroked=True,\n filled=True,\n line_width_min_pixels=2,\n opacity=0.4,\n get_line_color=[255, 100, 100],\n get_fill_color=[200, 160, 0, 180],\n )\n ],\n views=None,\n )\n\n\nif __name__ == \"__main__\":\n create_geojson_layer_with_gmaps_test_object().to_html(\"test.html\", offline=True)\n","repo_name":"visgl/deck.gl","sub_path":"bindings/pydeck/tests/bindings/pydeck_examples/geojson_layer_with_gmaps.py","file_name":"geojson_layer_with_gmaps.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":11339,"dataset":"github-code","pt":"41"} +{"seq_id":"2478028869","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Date 2022-01-14\r\n\r\n@author: Rongjian Dai\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n\r\n# 判断是否需要重新优化LA,返回每个周期的LA,存为字典\r\ndef reoptLA(QT, marking, supTab, schemeset, turning):\r\n lascheme = {}\r\n armpattern = {}\r\n optc = [0]\r\n c = 0\r\n lascheme[0] = marking[0]\r\n armpattern[0] = lanepattern(lascheme[0], turning)\r\n # print('armpattern[0]:', armpattern[0])\r\n while True:\r\n c += 1\r\n if c in QT.keys():\r\n lastc = optc[-1]\r\n optQ = QT[lastc]\r\n nowQ = QT[c]\r\n optpat = lanepattern(marking[lastc], turning)\r\n # print('cycle:', c)\r\n c1 = firstceritrion(optQ, nowQ, turning)\r\n c2 = secondceritrion(optpat, nowQ, supTab, schemeset, turning)\r\n if c1 is True: # 需求模式相同,不需重新优化\r\n lascheme[c] = marking[lastc]\r\n continue\r\n elif c2 is True: # 存在相似度更大的方案,需重新优化\r\n lascheme[c] = marking[c]\r\n optc.append(c)\r\n else:\r\n lascheme[c] = marking[lastc]\r\n # 给出对应arm pattern\r\n armpattern[c] = lanepattern(lascheme[c], turning)\r\n else:\r\n break\r\n\r\n return lascheme, armpattern, optc\r\n\r\n\r\n# 准则1,demand vector变化情况\r\ndef firstceritrion(optQ, nowQ, turning):\r\n c1 = True # 假设变化方向是共线的\r\n for i in range(4):\r\n (lt, sa, rt) = (turning[i][0], turning[i][1], turning[i][2])\r\n optdemv = np.array([optQ[i][lt], optQ[i][sa], optQ[i][rt]])\r\n nowdemv = np.array([nowQ[i][lt], nowQ[i][sa], nowQ[i][rt]])\r\n # 计算叉积 cross product\r\n crossp = np.cross(optdemv, nowdemv)\r\n if np.any(crossp): # 只要有一个Arm不平行就可能需要重新优化,不需继续判断\r\n c1 = False\r\n break\r\n else:\r\n continue\r\n return c1\r\n\r\n\r\n# 准则2,cosine similarity比较\r\ndef secondceritrion(optpat, nowQ, supTab, schemeset, turning):\r\n c2 = False\r\n for i in range(4):\r\n (lt, sa, rt) = (turning[i][0], turning[i][1], turning[i][2])\r\n nowdemv = np.array([nowQ[i][lt], nowQ[i][sa], nowQ[i][rt]]) # 当前demand vector\r\n nowindex = findindex(optpat[i], schemeset) # LA模式索引\r\n # print('optpat[i]:', optpat[i], 'nowindex:', nowindex)\r\n nowsupv = np.array(supTab[nowindex[0]][nowindex[1]]) # 当前supply vector\r\n nowsim = similarity(nowdemv, nowsupv) # 当前相似度\r\n # 找出备选方案集合\r\n neigind = []\r\n if nowindex[0] + 1 < 6:\r\n if nowindex[1] < 5 - nowindex[0]:\r\n neigind.append([nowindex[0] + 1, nowindex[1]])\r\n if nowindex[1] - 1 >= 0:\r\n neigind.append([nowindex[0] + 1, nowindex[1] - 1])\r\n if nowindex[0] - 1 >= 0:\r\n neigind.append([nowindex[0] - 1, nowindex[1]])\r\n if nowindex[1] + 1 < 7 - nowindex[0]:\r\n neigind.append([nowindex[0] - 1, nowindex[1] + 1])\r\n if nowindex[1] + 1 < 6 - nowindex[0]:\r\n neigind.append([nowindex[0], nowindex[1] + 1])\r\n if nowindex[1] - 1 >= 0:\r\n neigind.append([nowindex[0], nowindex[1] - 1])\r\n # 准则2\r\n for index in neigind:\r\n potsupv = np.array(supTab[index[0]][index[1]]) # potential supply vector\r\n potsim = similarity(nowdemv, potsupv) # Corresponding cosine similarity\r\n # print('Arm', i, 'nowdem:', nowdemv, 'potsupv', potsupv, 'potsim', potsim, 'nowsim', nowsim)\r\n if potsim > nowsim:\r\n c2 = True\r\n break\r\n else:\r\n continue\r\n if c2 is True: # 只要有一个Arm相似度符合在优化准则,不需继续判断\r\n break\r\n else:\r\n continue\r\n return c2\r\n\r\n\r\n# 计算 cosine similarity\r\ndef similarity(v1, v2):\r\n sim = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\r\n return sim\r\n\r\n\r\n# 判断lane-group模式\r\ndef lanepattern(marking, turning):\r\n schemes = [] # [4][4]list,4 arms, 4 lanes\r\n for i in range(4):\r\n (lt, sa, rt) = (turning[i][0], turning[i][1], turning[i][2])\r\n scheme = []\r\n for k in range(4):\r\n # Arm i 上lane k 为: EL\r\n if marking[i][lt][k] == 1 and marking[i][sa][k] == 0:\r\n scheme.append(1)\r\n # Arm i 上lane k 为: LT\r\n if marking[i][lt][k] == 1 and marking[i][sa][k] == 1:\r\n scheme.append(2)\r\n # Arm i 上lane k 为: ET\r\n if marking[i][sa][k] == 1 and marking[i][lt][k] == 0 and marking[i][rt][k] == 0:\r\n scheme.append(3)\r\n # Arm i 上lane k 为: TR\r\n if marking[i][sa][k] == 1 and marking[i][rt][k] == 1:\r\n scheme.append(4)\r\n # Arm i 上lane k 为: ER\r\n # print('marking[i][sa][k]:', marking[i][sa][k], 'marking[i][rt][k]:', marking[i][rt][k])\r\n if marking[i][sa][k] == 0 and marking[i][rt][k] == 1:\r\n scheme.append(5)\r\n schemes.append(scheme)\r\n return schemes\r\n\r\n\r\n# 确定某一个Arm 的LA scheme对应的supply vector\r\ndef findindex(optpat, schemeset):\r\n for i in range(6):\r\n for j in range(6 - i):\r\n if optpat[0] == schemeset[i][j][0] and optpat[1] == schemeset[i][j][1] and optpat[2] == schemeset[i][j][2] and optpat[3] == schemeset[i][j][3]:\r\n index = [i, j]\r\n return index\r\n else:\r\n continue\r\n","repo_name":"RongjianDai/LowPenetration","sub_path":"Reoptimize.py","file_name":"Reoptimize.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"24701084579","text":"'''\r\nDesarrollar una funciónrecursiva para duplicar los valores pares de una lista (NO crear una lista nueva).\r\nEjemplo:duplicar_pares([1,2,3,4,5,-8,10,0]) --> [1,4,3,8,5,-16,20,0].\r\n'''\r\nimport random\r\n#FUNCIONES\r\n'''FUNCION PARA VALIDAR QUE UN NUMERO CUMPLA CON UNA CONDICION'''\r\ndef ValidarUnicaCondicion(Cond):\r\n '''\r\n pre: Se recibe una condicion minima (valor minimo que limita al valor recibido) y un valor.\r\n pos: Se devuelve el valor el cual se confirmo que es mayor a la condicion.\r\n '''\r\n iValor=int(input(\"Ingrese un valor:\"))\r\n while iValor= 0:\n return AtomicSentence(tokens[0][1:], tokens[1:], is_neg=True)\n else:\n return AtomicSentence(tokens[0], tokens[1:], is_neg=False)\n\n\ndef read_input(path):\n queries = []\n KB = []\n with open(path, 'r') as f:\n num_queries = int(f.readline())\n for i in range(num_queries):\n line = f.readline()\n line = line.replace('\\r', '').replace('\\n', '')\n queries.append(get_atomic_sentence(line))\n\n num_sentences = int(f.readline())\n for i in range(num_sentences):\n line = f.readline()\n line = line.replace('\\r', '').replace('\\n', '')\n if line.find('=>') >= 0:\n tokens = line.split('=>')\n conclusion = get_atomic_sentence(tokens[1].strip())\n ts = tokens[0].split('&')\n literals = []\n for t in ts:\n literals.append(get_atomic_sentence(t.strip()))\n KB.append(ImplicationSentence(literals, conclusion))\n else:\n KB.append(get_atomic_sentence(line))\n return queries, KB\n\n\n# check whether two sentences can match\ndef match(query, sentence):\n if query.is_neg == sentence.is_neg:\n return None\n if query.statement != sentence.statement:\n return None\n if len(query.arguments) != len(sentence.arguments):\n return None\n map = {}\n for i in range(len(query.arguments)):\n if is_variable(sentence.arguments[i]):\n map[sentence.arguments[i]] = query.arguments[i]\n else:\n if query.arguments[i] != sentence.arguments[i]:\n return None\n return map\n\n\ndef match_imp(query, imp):\n for atomic in imp.atomic_sentences:\n map = match(query, atomic)\n if map is not None:\n return map\n return None\n\n\ndef is_contradiction(query, sentence):\n if query.statement != sentence.statement:\n return False\n if len(query.arguments) != len(sentence.arguments):\n return False\n res = True\n for a, b in zip(query.arguments, sentence.arguments):\n if a != b:\n return False\n return res and query.is_neg == sentence.is_neg\n\n\ndef replace_variable(sentence, map):\n for i in range(len(sentence.arguments)):\n if is_variable(sentence.arguments[i]) and sentence.arguments[i] in map:\n sentence.arguments[i] = map[sentence.arguments[i]]\n\n\ndef get_atomic_sentences(KB):\n sen = []\n for sentence in KB:\n if isinstance(sentence, AtomicSentence):\n sen.append(sentence)\n return sen\n\n\ndef ask(query, KB):\n while True:\n last_query = query\n for sentence in KB:\n target_atomic = None\n if isinstance(sentence, AtomicSentence):\n map = match(query, sentence)\n if map is not None:\n return True\n else:\n for atomic in sentence.atomic_sentences:\n map = match(query, atomic)\n if map is not None:\n target_atomic = atomic\n break\n if target_atomic is None:\n continue\n sentence.atomic_sentences.remove(target_atomic)\n if len(sentence.atomic_sentences) == 0:\n return True\n for atomic in sentence.atomic_sentences:\n replace_variable(atomic, map)\n\n # search for next query\n atomic_sens = get_atomic_sentences(KB)\n query = None\n for atomic_sen in atomic_sens:\n if match_imp(atomic_sen, sentence) is not None:\n query = atomic_sen\n break\n\n if query is None and len(sentence.atomic_sentences) == 1:\n query = sentence.atomic_sentences[0]\n sentence.atomic_sentences.clear()\n if query is not None:\n break\n else:\n return False\n if query is None:\n return False\n else:\n if last_query == query:\n return False\n continue\n\n\nif __name__ == '__main__':\n queries, KB = read_input('input-111.txt')\n with open('output.txt', 'w') as f:\n for query in queries:\n q = query.neg()\n f.write(str(ask(q, copy.deepcopy(KB))).upper() + '\\n')\n","repo_name":"EstherCS/CSCI561-AI","sub_path":"AI-hw3/homework3.py","file_name":"homework3.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"39316182852","text":"'''\nMisfit of the Advection-Diffusion problem written in FEniCS-2019.1.0 and hIPPYlib-3.0\nhttps://hippylib.github.io/tutorials_v3.0.0/4_AdvectionDiffusionBayesian/\n-------------------------------------------------------------------------\nProject of Bayesian SpatioTemporal analysis for Inverse Problems (B-STIP)\nShiwei Lan @ ASU, Sept. 2020\n--------------------------------------------------------------------------\nCreated on Sep 23, 2020\n'''\n__author__ = \"Shiwei Lan\"\n__copyright__ = \"Copyright 2020, The Bayesian STIP project\"\n__license__ = \"GPL\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shiwei Lan\"\n__email__ = \"slan@asu.edu; lanzithinking@outlook.com\"\n\nimport dolfin as dl\nimport ufl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\nimport os\nsys.path.append( os.environ.get('HIPPYLIB_BASE_DIR', \"../../\") )\nfrom hippylib import *\nfrom pde import TimeDependentAD\n\nsys.path.append( \"../\" )\nfrom util.common_colorbar import common_colorbar\n\nclass SpaceTimePointwiseStateObservation(Misfit):\n \"\"\"\n Misfit (negative loglikelihood) of Advection-Diffusion inverse problem\n \"\"\"\n def __init__(self, Vh, observation_times=None, targets=None, rel_noise = 0.01, d = None, **kwargs):\n \"\"\"\n Initialize the misfit\n \"\"\"\n # function space\n self.Vh = Vh\n self.mpi_comm = self.Vh.mesh().mpi_comm()\n self.rank = dl.MPI.rank(self.mpi_comm)\n # observation times\n if observation_times is None:\n t_init = 0.\n t_final = 4.\n t_1 = 1.\n dt = .1\n observation_dt = .2\n self.observation_times = np.arange(t_1, t_final+.5*dt, observation_dt)\n else:\n self.observation_times = observation_times\n # observation locations\n self.targets = np.loadtxt('targets.txt') if targets is None else targets\n self.rel_noise = rel_noise\n # obtain observations\n if d is None:\n d=self.get_observations(pde=kwargs.pop('pde',None), nref=kwargs.pop('nref',0), init=kwargs.pop('pde',None))\n if self.rank == 0:\n sep = \"\\n\"+\"#\"*80+\"\\n\"\n print( sep, \"Generate synthetic observations at {0} locations for {1} time points\".format(self.targets.shape[0], len(self.observation_times)), sep )\n # reset observation container for reference\n self.prep_container()\n self.d.axpy(1., d)\n \n def prep_container(self, Vh=None):\n \"\"\"\n Prepare storage of the observations\n \"\"\"\n if Vh is None:\n Vh = self.Vh\n # storage for observations\n self.B = assemblePointwiseObservation(Vh, self.targets)\n self.d = TimeDependentVector(self.observation_times)\n self.d.initialize(self.B, 0)\n ## TEMP Vars\n self.u_snapshot = dl.Vector()\n self.Bu_snapshot = dl.Vector()\n self.d_snapshot = dl.Vector()\n self.B.init_vector(self.u_snapshot, 1)\n self.B.init_vector(self.Bu_snapshot, 0)\n self.B.init_vector(self.d_snapshot, 0)\n \n def get_observations(self, pde=None, nref=0, init=None):\n \"\"\"\n Get the observations at given locations and time points\n \"\"\"\n # pde for observations\n if pde is None:\n mesh = self.Vh.mesh()\n for i in range(nref): mesh = dl.refine(mesh) # refine mesh to obtain observations\n pde = TimeDependentAD(mesh)\n elif nref>0:\n mesh = pde.mesh\n for i in range(nref): mesh = dl.refine(mesh) # refine mesh to obtain observations\n pde = TimeDependentAD(mesh)\n # initial condition\n if init is None:\n true_init = dl.Expression('min(0.5,exp(-100*(pow(x[0]-0.35,2) + pow(x[1]-0.7,2))))', element=pde.Vh[STATE].ufl_element())\n init = dl.interpolate(true_init, pde.Vh[STATE]).vector()\n # prepare container for observations\n self.prep_container(pde.Vh[STATE])\n \n utrue = pde.generate_vector(STATE)\n x = [utrue, init, None]\n pde.solveFwd(x[STATE], x)\n self.observe(x, self.d)\n MAX = self.d.norm(\"linf\", \"linf\")\n noise_std_dev = self.rel_noise * MAX\n parRandom.normal_perturb(noise_std_dev,self.d)\n self.noise_variance = noise_std_dev*noise_std_dev\n return self.d.copy()\n \n def observe(self, x, obs):\n \"\"\"\n Observation operator\n \"\"\"\n obs.zero()\n \n for t in self.observation_times:\n x[STATE].retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n obs.store(self.Bu_snapshot, t)\n \n def cost(self, x):\n \"\"\"\n Compute misfit\n \"\"\"\n c = 0\n for t in self.observation_times:\n x[STATE].retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n self.d.retrieve(self.d_snapshot, t)\n self.Bu_snapshot.axpy(-1., self.d_snapshot)\n c += self.Bu_snapshot.inner(self.Bu_snapshot)\n \n return c/(2.*self.noise_variance)\n \n def grad(self, i, x, out):\n \"\"\"\n Compute the gradient of misfit\n \"\"\"\n out.zero()\n if i == STATE:\n for t in self.observation_times:\n x[STATE].retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n self.d.retrieve(self.d_snapshot, t)\n self.Bu_snapshot.axpy(-1., self.d_snapshot)\n self.Bu_snapshot *= 1./self.noise_variance\n self.B.transpmult(self.Bu_snapshot, self.u_snapshot) \n out.store(self.u_snapshot, t) \n else:\n pass\n \n def setLinearizationPoint(self, x, gauss_newton_approx=False):\n pass\n \n def apply_ij(self, i,j, direction, out):\n out.zero()\n if i == STATE and j == STATE:\n for t in self.observation_times:\n direction.retrieve(self.u_snapshot, t)\n self.B.mult(self.u_snapshot, self.Bu_snapshot)\n self.Bu_snapshot *= 1./self.noise_variance\n self.B.transpmult(self.Bu_snapshot, self.u_snapshot) \n out.store(self.u_snapshot, t)\n else:\n pass \n \n def applyWuu(self, du, out):\n out.zero()\n self.apply_ij(STATE, STATE, du, out)\n \n def applyWum(self, dm, out):\n out.zero()\n \n def applyWmu(self, du, out):\n out.zero()\n \n def applyWmm(self, dm, out):\n out.zero()\n \n def plot_data(self, times, figsz=(12,5)):\n \"\"\"\n Plot the observations with its values u(x, t) at fixed locations for given time points\n \"\"\"\n n=len(times)\n nrow=np.floor(np.sqrt(n)).astype('int')\n ncol=np.ceil(np.sqrt(n)).astype('int')\n fig,axes=plt.subplots(nrows=nrow,ncols=ncol,sharex=True,sharey=True,figsize=figsz)\n sub_figs = [None]*len(axes.flat)\n for i in range(n):\n plt.axes(axes.flat[i])\n dl.plot(self.Vh.mesh())\n sub_figs[i]=plt.scatter(self.targets[:,0],self.targets[:,1], c=self.d.data[np.where(np.isclose(self.d.times,times[i]))[0][0]], zorder=2)\n# plt.xlim(0,1); plt.ylim(0,1)\n# plt.gca().set_aspect('equal', 'box')\n plt.title('Time: {:.1f} s'.format(times[i],))\n fig=common_colorbar(fig,axes,sub_figs)\n return fig\n \nif __name__ == '__main__':\n np.random.seed(2020)\n# # define pde\n meshsz = (61,61)\n eldeg = 1\n pde = TimeDependentAD(mesh=meshsz, eldeg=eldeg)\n Vh = pde.Vh[STATE]\n # obtain function space\n# mesh = dl.Mesh('ad_10k.xml')\n# Vh = dl.FunctionSpace(mesh, \"Lagrange\", 2)\n # set observation times\n t_init = 0.\n t_final = 4.\n t_1 = 1.\n dt = .1\n observation_dt = .2\n observation_times = np.arange(t_1, t_final+.5*dt, observation_dt)\n # set observation locations\n targets = np.loadtxt('targets.txt')\n # define misfit\n rel_noise = .5\n nref = 1\n misfit = SpaceTimePointwiseStateObservation(Vh, observation_times, targets, rel_noise=rel_noise, nref=nref)\n# # optional: refine mesh to obtain (new) observations\n# rf_mesh = dl.refine(pde.mesh)\n# rf_pde = TimeDependentAD(mesh=rf_mesh)\n# rf_obs = SpaceTimePointwiseStateObservation(rf_pde.Vh[STATE], observation_times, targets, pde=rf_pde).d.copy()\n# misfit.d.zero()\n# misfit.d.axpy(1.,rf_obs)\n # plot observations\n plt_times=[1.,2.,3.,4.]\n fig = misfit.plot_data(plt_times, (10,9))\n plt.subplots_adjust(wspace=0.1, hspace=0.2)\n plt.savefig(os.path.join(os.getcwd(),'properties/obs.png'),bbox_inches='tight')\n ","repo_name":"lanzithinking/DREAM-BUQ","sub_path":"ad_diff/misfit.py","file_name":"misfit.py","file_ext":"py","file_size_in_byte":8786,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"41"} +{"seq_id":"34364317779","text":"import torch\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import f1_score, confusion_matrix, accuracy_score\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.utils.data.sampler import Sampler\n\nfrom folktables import ACSDataSource, ACSIncome, ACSEmployment\nfrom fairbatch_local import FairBatch\n\ndef load_celeba_partition(img_list, celeba_feat_dir, ydict, groupdict):\n x, y, group = [], [], []\n for img in img_list:\n feat_path = os.path.join(celeba_feat_dir, img[:-3] + 'npy')\n if not os.path.exists(feat_path):\n continue\n\n feat = np.load(feat_path)\n x.append(feat)\n y.append(ydict[img])\n group.append(groupdict[img])\n\n return np.array(x), np.array(y), np.array(group)\n\ndef load_celeba_dataset():\n celeba_dir = '/mnt/LargeDisk/Data/celeba'\n celeba_label_file = os.path.join(celeba_dir, 'list_attr_celeba.csv')\n celeba_partition_file = os.path.join(celeba_dir, 'list_eval_partition.csv')\n celeba_feat_dir = os.path.join(celeba_dir, 'feat_align_celeba')\n\n dflabel = pd.read_csv(celeba_label_file)\n ydict = {img_id: smiling_label==1 for img_id, smiling_label in zip(dflabel['image_id'], dflabel['Smiling'])}\n groupdict = {img_id: 1-max(male_label, 0) for img_id, male_label in zip(dflabel['image_id'], dflabel['Male'])}\n\n dfpart = pd.read_csv(celeba_partition_file)\n img_list = dfpart['image_id']\n partition = dfpart['partition']\n train_img = img_list[partition==0]\n valid_img = img_list[partition==1]\n test_img = img_list[partition==2]\n\n x_train, y_train, group_train = load_celeba_partition(train_img, celeba_feat_dir, ydict, groupdict)\n x_valid, y_valid, group_valid = load_celeba_partition(valid_img, celeba_feat_dir, ydict, groupdict)\n x_test, y_test, group_test = load_celeba_partition(test_img, celeba_feat_dir, ydict, groupdict)\n\n return x_train, y_train, group_train, x_test, y_test, group_test, x_valid, y_valid, group_valid\n\ndef get_dataset(dataset='acsincome', protected_class='sex',\n shuffle_seed=0, batch_size=128, train_shuffle=True,\n fairbatch=False, model=None):\n\n if 'acs' in dataset:\n data_source = ACSDataSource(survey_year='2018', horizon='1-Year', survey='person')\n acs_data = data_source.get_data(states=['CA'], download=True)\n\n if dataset=='acsincome':\n task_class = ACSIncome\n elif dataset=='acsemployment':\n task_class = ACSEmployment\n\n if protected_class=='sex':\n task_class._group = 'SEX'\n features, label, group = task_class.df_to_numpy(acs_data)\n group = group - 1\n elif protected_class=='race':\n task_class._group = 'RAC1P'\n features, label, group = task_class.df_to_numpy(acs_data)\n group[group>1] = 2 # White vs Others\n group = group - 1\n\n x_train, x_test, y_train, y_test, group_train, group_test = train_test_split(features, label, group, test_size=0.2, random_state=0) # Test Split 20%\n x_train, x_valid, y_train, y_valid, group_train, group_valid = train_test_split(x_train, y_train, group_train, test_size=0.1/0.8, random_state=0) # Val Split 10%\n\n elif dataset=='celeba':\n x_train, y_train, group_train, x_test, y_test, group_test, x_valid, y_valid, group_valid = load_celeba_dataset()\n\n ## Shuffle Training Data\n x_train, y_train, group_train = shuffle(x_train, y_train, group_train, random_state=shuffle_seed)\n\n datascaler = MinMaxScaler()\n datascaler.fit(x_train)\n x_train, x_valid, x_test = datascaler.transform(x_train), datascaler.transform(x_valid), datascaler.transform(x_test)\n\n train_dataset = TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train), torch.from_numpy(group_train))\n valid_dataset = TensorDataset(torch.from_numpy(x_valid), torch.from_numpy(y_valid), torch.from_numpy(group_valid))\n test_dataset = TensorDataset(torch.from_numpy(x_test), torch.from_numpy(y_test), torch.from_numpy(group_test))\n\n if fairbatch:\n tensorx_train, tensory_train, tensorgroup_train = torch.from_numpy(x_train), torch.from_numpy(y_train), torch.from_numpy(group_train)\n sampler = FairBatch(model, tensorx_train.cuda().float(), tensory_train.cuda().long(), tensorgroup_train.cuda(), batch_size=128,\n alpha=0.005, target_fairness='eqodds', replacement=False, seed=0)\n trainloader = DataLoader(train_dataset, sampler=sampler)\n else:\n trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=train_shuffle, drop_last=False)\n\n validloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, drop_last=False)\n testloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=False)\n\n return trainloader, validloader, testloader\n","repo_name":"privacytrustlab/Data-Order-Randomness-versus-Group-Fairness","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"1551033529","text":"import regex as re # regex string finding/replacing\nimport urllib.parse # convert link characters like %\n\n\n# -- [3] Convert Obsidian type img links to proper md image links\n# Further conversion will be done in the block below\ndef obs_img_to_md_img(pb, page):\n for matched_link in re.findall(\"(?<=\\!\\[\\[)(.*?)(?=\\]\\])\", page):\n link = \"\"\n if \"|\" in matched_link:\n parts = matched_link.split(\"|\")\n link = parts.pop(0)\n alias = \"|\".join(parts)\n new_link = f\"![{alias}](\" + urllib.parse.quote(link) + \")\"\n else:\n new_link = \"![](\" + urllib.parse.quote(matched_link) + \")\"\n\n # Obsidian page inclusions use the same tag...\n # Skip if we don't match image suffixes. Inclusions are handled at the end.\n link = matched_link.split(\"|\")[0]\n if len(link.split(\".\")) == 1 or link.split(\".\")[-1].lower() not in pb.gc(\"included_file_suffixes\", cached=True):\n new_link = f''\n\n safe_link = re.escape(\"![[\" + matched_link + \"]]\")\n page = re.sub(safe_link, new_link, page)\n\n return page\n","repo_name":"tinsirius/obsidian-html","sub_path":"obsidianhtml/note2md/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"41"} +{"seq_id":"31389944524","text":"import discord\nimport aiohttp\nimport io\nfrom akito import Embed\nfrom discord.ext import commands\n\n\nclass CommentCmd(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # gae\n @commands.command(\n name = \"comment\",\n aliases=[\"ytcomment\"],\n description = \"Fake YouTube Comment\",\n usage = \"[Member] [Comment]\",\n help = \"You must **tag** `Member` and then write the `Comment`\"\n )\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def Comment(self, ctx, user: discord.Member = None, *, comment):\n if not user:\n user = ctx.author\n\n url = f\"https://some-random-api.ml/canvas/youtube-comment?avatar={user.display_avatar}&username={user.display_name}&comment={comment}\"\n\n async with aiohttp.ClientSession() as session:\n response = await session.get(url)\n imageData = io.BytesIO(await response.read())\n file = discord.File(imageData, filename=\"comment.png\")\n\n embed = discord.Embed(colour=ctx.author.color)\n embed.set_image(url=\"attachment://comment.png\")\n embed.set_footer(\n text=f\"Requested By {ctx.author}\", icon_url=ctx.author.display_avatar\n )\n\n await ctx.respond(embed=embed, file=file)\n\n @Comment.error\n async def comment_error(self, ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n embed = await Embed.missingrequiredargument(self, ctx)\n await ctx.respond(embed = embed, delete_after= 60)\n\n elif isinstance(error, commands.MemberNotFound):\n embed = await Embed.membernotfound(self, ctx)\n await ctx.respond(embed = embed, delete_after= 60)\n\n else:\n pass\n\n\ndef setup(bot):\n bot.add_cog(CommentCmd(bot))\n","repo_name":"eitozx/AkitoBot","sub_path":"extension/image/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"41"} +{"seq_id":"75243311804","text":"import parmed\nfrom io import StringIO\n\nparams_sam = parmed.amber.AmberParameterSet.from_leaprc(StringIO(u'SAM = loadMol2 parameters/SAM.mol2\\nloadAmberParams parameters/frcmod.SAM'))\nparams_sam = parmed.openmm.OpenMMParameterSet.from_parameterset(params_sam)\nparams_sam.write('parameters/SAM.xml')\n\nparams_sah = parmed.amber.AmberParameterSet.from_leaprc(StringIO(u'SAH = loadMol2 parameters/SAH.mol2\\nloadAmberParams parameters/frcmod.SAH'))\nparams_sah = parmed.openmm.OpenMMParameterSet.from_parameterset(params_sah)\nparams_sah.write('parameters/SAH.xml')\n\n# convert GAFF here for reproducibility etc. rather than taking it converted from my OpenMM conversion which has not been merged yet - gaff.dat was copied into files/ from AmberTools16\n# gotta set write_unused to True\nparams_gaff = parmed.amber.AmberParameterSet.from_leaprc(StringIO(u'loadAmberParams gaff.dat'))\nparams_gaff = parmed.openmm.OpenMMParameterSet.from_parameterset(params_gaff)\nparams_gaff.write('parameters/gaff.xml', write_unused=True)\n","repo_name":"choderalab/pimento","sub_path":"SETD8/Catalytic_Cycle_p11708_scripted/after_antechamber.py","file_name":"after_antechamber.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"41"} +{"seq_id":"71790060284","text":"import trino\nconn = trino.dbapi.connect(\n host='localhost',\n port=9091,\n user='eliar'\n)\n\n# Execute a cross-database query\ncur = conn.cursor()\ncur.execute(\"\"\"\n SELECT * FROM postgresql1.dwh.machinedim\n UNION ALL\n SELECT * FROM postgresql2.dwh.machinedim\n \"\"\")\nrows = cur.fetchall()\nfor row in rows:\n print(row)","repo_name":"Efejann0/Trino","sub_path":"trino-python-code/trino-engine.py","file_name":"trino-engine.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"14989135656","text":"from torchvision.models import resnet34\nimport torch.nn as nn\nimport torch\nimport streamlit as st\nfrom torchvision.transforms import transforms\nfrom torchvision.transforms import Compose\nfrom torchvision.transforms import ToTensor\nfrom torchvision.transforms import Resize\nimport numpy as np\n\nfrom torchvision.transforms import Compose, ToTensor, Resize\n\ndevice = torch.device(\"mps\")\ntransform = Compose([ToTensor(), Resize([150, 150])])\n\nf = transforms.Compose([\n # transforms.CenterCrop(300),\n transforms.RandomAffine(degrees=100, translate=(0.15, 0.15)),\n transforms.RandomGrayscale(p=0.2),\n transforms.Lambda(lambda x: x + torch.randn_like(x) * 0.03),\n transforms.Resize(150)\n ])\n\n@st.cache\ndef load_model(path):\n print(\"Loading model...\")\n model = resnet34()\n last_layer = model.fc\n model.fc = nn.Linear(last_layer.in_features, 2)\n model.load_state_dict(torch.load(path, map_location=device))\n model.to(device)\n return model\n\n\ndef predict(model, image):\n model.eval()\n\n transform = Compose([ToTensor(), Resize([150, 150])])\n\n image = transform(image)\n image = image.reshape([1, 3, 150, 150])\n output = model(image.to(device))\n _, predicted = torch.max(output.data, 1)\n return predicted\n\n\ndef compute_saliency_maps(X, y, model):\n model.eval()\n X = transform(X).reshape([1, 3, 150, 150])\n X.requires_grad_()\n\n saliency = None\n\n loss_function = nn.CrossEntropyLoss()\n output = model(X.to(device))\n loss = loss_function(output, y)\n loss.backward()\n\n saliency, _ = torch.max(torch.abs(X._grad), axis=1)\n return saliency.detach().numpy().reshape([150, 150])\n\ndef smoothing_loss(X):\n loss = 0.0\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, :, :-1]-X[:, :, :, 1:], 2)))\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, :, 1:]-X[:, :, :, :-1], 2)))\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, :-1, :]-X[:, :, 1:, :], 2)))\n loss += torch.sqrt(torch.mean(torch.pow(X[:, :, 1:, :]-X[:, :, :-1, :], 2)))\n return loss\n\n\ndef my_loss(output, y):\n return torch.sum(-1 / 10 * output[:, y]) # + torch.sum(output[:, 1-y])\n\n\ndef generate_images(X, y, model, lr, n):\n model.eval()\n\n X.requires_grad_()\n X_f = torch.stack([f(x) for x in X for y in range(n)]) # bs*n 150 150 3\n\n # loss_function = nn.CrossEntropyLoss()\n loss_function = my_loss\n # outputs = torch.stack([model(x.to(device) for x in X_f)]) # bs*n 2\n outputs = model(X_f.to(device)) # bs*n 2\n\n y_f = torch.stack([y_i for y_i in y for _ in range(n)]) # bs*n\n\n loss_main = loss_function(outputs, y_f) / n\n\n smoothing_loss_ = smoothing_loss(X)\n loss = loss_main + smoothing_loss_\n\n # y.shape: bs\n\n # bs*n 150 150 3\n\n loss.backward()\n # if randint(0, 20) == 20:\n X.requires_grad_(False)\n with torch.no_grad():\n X_new = X - lr * X.grad\n X.grad.zero_()\n\n difference = torch.sum(torch.abs(X_new - X))\n out_of_bound = torch.sum((X_new > 1) + (X_new < 0))\n\n print(\n loss_main.item(),\n smoothing_loss_.item(),\n difference,\n out_of_bound,\n # output.cpu().detach().numpy().tolist()\n )\n\n X_new[X_new < 0] = 0\n X_new[X_new > 1] = 1\n\n return X_new\n\n\ndef generate_image(x, y, model, lr, n):\n X_new = generate_images(x.unsqueeze(0), y, model, lr, n)\n return X_new[0]\n\ndef tweak_image(X, y, model):\n\n im = transform(X)\n\n n = 16\n lr = 0.3\n\n new_im = generate_image(im, y, model, lr, n)\n\n my_bar = st.progress(0)\n\n for i in range(80):\n my_bar.progress(i/80)\n new_im = generate_image(new_im, y, model, lr, n)\n\n new_numpy_im = new_im.detach().numpy().transpose(1, 2, 0)\n\n new_numpy_im[new_numpy_im < 0] = 0\n new_numpy_im[new_numpy_im > 1] = 1\n\n return new_numpy_im\n","repo_name":"GSKW/ML","sub_path":"practice/html/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"41"} +{"seq_id":"39182369070","text":"# codifica aquí tus datos de prueba.\nx = float(-1)\n# escribe tu código aquí.\n\n\ny = (3*(x ** 3)) - (2*(x ** 2)) + (3*x) -1\n\n\nprint(\"y =\", y)\n\n\n#Asignacion\nvar = 2\nvar = 3\nvar +=3\n\nprint(var)\n\n#combinacion string + valor int\nvar2 = \"007\"\n\nprint(\"Agente \" + var2)\n\n\n","repo_name":"JebusAZ/PCAP","sub_path":"PCAP/Modulo_2/Ejercicio_2_17.py","file_name":"Ejercicio_2_17.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"25457012513","text":"\"\"\"This is the main file for icarus\"\"\"\n\nimport os\nimport curses\nimport sys\nimport configparser # https://docs.python.org/3/library/configparser.html\nfrom multiprocessing import Process\nimport aiosmtpd.smtp\n\n# Below are my functions.\nfrom app.smtp import startsmtp\nfrom app.editor import editor\nfrom app.udp import runudp\nfrom app.tcp import runtcp\nfrom app.ftp import ftpserver\nfrom app.abuseipdb import largfeed\nimport app.cfg\n\n\n# pylint: disable=R0801\nconfig = configparser.ConfigParser()\nconfig.read('icarus.config')\nsmtpport = config['ADDRESSES']['SMTPPort']\nabuseip = config['IPDBAPI']['AbuseIPDB']\nabuseapikey = config['IPDBAPI']['IPDBAPI']\nvtapikey = config['APIKEY']['apikey']\nvirustotal = config['APIKEY']['Virustotal']\nsyslogenable = config['SYSLOG']['Syslog']\nsyslogip = config['SYSLOG']['IP']\nsyslogport = config['SYSLOG']['PORT']\nlargfeedon = config['LARGFEED']['Largfeed']\nlargfeedserver = config['LARGFEED']['Server']\nlargfeedport = config['LARGFEED']['Port']\ntcpports = config['PORTS']['tcpports']\nudpports = config['PORTS']['udpports']\n\naiosmtpd.smtp.__ident__ = \"Microsoft ESMTP MAIL Service\"\n\n\n# pylint: disable=R0915, W0613\ndef main(window):\n \"\"\"MAIN!\"\"\"\n # Starting SMTP Service\n process2 = Process(name='smtp', target=startsmtp, daemon=True)\n process2.start()\n # startsmtp()\n # Starting FTP Service\n process1 = Process(name='Ftp', target=ftpserver, daemon=True)\n process1.start()\n # Largfeed Queue processor\n if largfeedon != \"no\":\n process3 = Process(name='largfeed', target=largfeed, daemon=True)\n process3.start()\n\n # Dynamic low interaction port services.\n\n for tcpport in tcpports.replace(\" \", \"\").split(','):\n dyntcpprocess = Process(name='DynamicTCP ' + str(tcpport), target=runtcp, daemon=True, args=(int(tcpport),))\n dyntcpprocess.start()\n\n for udpport in udpports.replace(\" \", \"\").split(','):\n dynudpprocess = Process(name='DynamicUDP ' + str(udpport), target=runudp, daemon=True, args=(int(udpport),))\n dynudpprocess.start()\n\n while True:\n scurses = curses.initscr()\n curses.curs_set(0)\n curses.noecho()\n curses.napms(500)\n # Pretty standard configs. I have the curses refresh set to 3 seconds.\n # https://docs.python.org/3.5/library/curses.html#module-curses\n curses.start_color()\n curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)\n curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)\n sheight, swidth = scurses.getmaxyx()\n cursewinder = curses.newwin(sheight, swidth, 0, 0)\n cursewinder.nodelay(True)\n # No delay fixes a problem of the screen not updating properly.\n\n # the above 5 are just standard curses commands.\n # First number is vertical, 51 is horizontal\n cursewinder.addstr(0, 51, \"Icarus.config\")\n cursewinder.addstr(1, 51, \"Virustotal:\")\n cursewinder.addstr(2, 51, \"Enabled: \" + virustotal)\n cursewinder.addstr(3, 51, \"APIKEY: \" + vtapikey)\n cursewinder.addstr(5, 51, \"AbuseIPDB:\")\n cursewinder.addstr(6, 51, \"Enabled: \" + abuseip)\n cursewinder.addstr(7, 51, \"APIKEY: \" + abuseapikey)\n cursewinder.addstr(9, 51, \"Syslog:\")\n cursewinder.addstr(10, 51, \"Enabled: \" + syslogenable)\n cursewinder.addstr(11, 51, \"Syslog Server: \" + syslogip + \":\" + syslogport)\n cursewinder.addstr(13, 51, \"LARGfeed:\")\n cursewinder.addstr(14, 51, \"Enabled: \" + largfeedon)\n cursewinder.addstr(15, 51, \"LARGfeed Server: \" + largfeedserver + \":\" + largfeedport)\n cursewinder.addstr(17, 51, \"Press P to change values.\", curses.color_pair(2))\n cursewinder.addstr(18, 51, \"Press R to restart.\", curses.color_pair(3))\n cursewinder.addstr(19, 51, \"Press Q to quit.\", curses.color_pair(1))\n\n cursewinder.addstr(0, 0, \"ICARUS HONEYPOT\", curses.color_pair(1))\n\n cursewinder.addstr(12, 0, \"Attacks: \" + str(app.cfg.numattacks['num']))\n cursewinder.addstr(13, 0, \"Last 5 Attackers: \", curses.color_pair(3))\n if app.cfg.attackers:\n for num, address in enumerate(app.cfg.attackers, start=1):\n cursewinder.addstr((num + 13), 0, str(address))\n\n cursewinder.refresh()\n\n key = cursewinder.getch()\n if key == ord('q'):\n break\n if key == ord('r'):\n process1.terminate()\n process2.terminate()\n os.execv(sys.executable, ['python3'] + sys.argv)\n # Nice little thing that restarts a python script.\n elif key == ord('p'):\n editor() # from editor.py, opens your system editor.\n cursewinder.erase()\n cursewinder.refresh()\n\n\nif __name__ == '__main__':\n curses.wrapper(main)\n","repo_name":"tbiens/icarus","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"46"} +{"seq_id":"30063971158","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport graficos\nimport pickle\n\npickle_in = open('results/resultados-sat-2-1/T_matrix-pickle.pickle',\"rb\")\nT = pickle.load(pickle_in)\nT = np.array(T)\n\npickle_in = open('results/resultados-sat-2-1/result_matrix-pickle.pickle',\"rb\")\nres = pickle.load(pickle_in)\nres = np.array(res)\n\npickle_in = open('results/resultados-sat-2-1/alpha_matrix-pickle.pickle',\"rb\")\nalpha = pickle.load(pickle_in)\nalpha = np.array(alpha)\n\npickle_in = open('his-best-sat-2-pickle.pickle',\"rb\")\nhis = pickle.load(pickle_in)\nhis = np.array(his)\n\nk = np.arange(0, len(his[2]) , 1)\n\n\n\n# for i in range(len(res)):\n# for j in range(len(res[i])):\n# if res[i][j] > 493:\n# print('MAX - ',res[i][j])\n# print('T - ', T[i][j])\n# print('Alpha - ', alpha[i][j])\n# print('-----------------------')\n# print()\n\n\n\ngraficos.print_3d(alpha, 'alpha', T, 'T', res, 'MAX', 'SA', show=True, save=True, path='grafico_sat-2')\n\n#graficos.print_2d(k, 'Epoch', his[2], 'Result', \"SA_Best_SAT-2\", save=False, show=True)","repo_name":"Birunda3000/Projeto-e-Analise-de-Algoritmos-2021","sub_path":"Trabalho-Simulated-Annealing/graficos-2.py","file_name":"graficos-2.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"12238562332","text":"import datetime\nimport json\n\nfrom urllib.parse import unquote, unquote_plus\n\n\ndef parse_params(string: str):\n \"\"\"Parse param=value&... string\"\"\"\n res = {}\n if string:\n pairs = string.split('&')\n for item in pairs:\n key, value = item.split('=')\n res[key] = value\n return res\n\n\ndef parse_post_data(environ):\n \"\"\"Parse POST data\"\"\"\n data = environ['wsgi.input']\n length = environ['CONTENT_LENGTH']\n if data:\n length = int(length) if length else 0\n data = data.read(length).decode('utf-8')\n data = unquote(data)\n data = unquote_plus(data)\n return parse_params(data)\n return {}\n","repo_name":"Antonlushnikow/wsgi-framework","sub_path":"framework/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"27579573155","text":"def find_min(lst):\n mn = lst[0]\n for e in lst:\n if e < mn:\n mn = e\n\n return mn\n\n\ndef find_max(lst):\n mx = lst[0]\n for e in lst:\n if e > mx:\n mx = e\n\n return mx\n\n\ndef find_sum(lst):\n sm = 0\n for e in lst:\n sm += e\n\n return sm\n\n\ndef find_product(lst):\n prod = 1\n for e in lst:\n try:\n prod *= e\n except OverflowError:\n return\n\n return prod\n\n\ndef read():\n with open(\"input.txt\", \"r\") as f:\n lst_inp = list(map(int, f.readline().split()))\n return lst_inp\n\n\ndef run(lst_inp, out=True):\n mn = find_min(lst_inp)\n mx = find_max(lst_inp)\n sm = find_sum(lst_inp)\n prod = find_product(lst_inp)\n if out:\n print(\"Minimum:\", mn)\n print(\"Maximum:\", mx)\n print(\"Sum:\", sm)\n print(\"Product:\", prod)\n\n\ndef main(out=True):\n lst_inp = read()\n run(lst_inp, out)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TinaKeyn/simple-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"14809632057","text":"\"\"\"\nImplementations for several probabilistic error models\n\"\"\"\n\nfrom typing import Optional, Tuple\n\nimport numpy as np\n\n\ndef single_dist_mc(\n emap: np.ndarray,\n x_dist: np.ndarray,\n w_dist: np.ndarray,\n fan_in: float,\n num_samples: int = int(1e5),\n) -> Tuple[float, float]:\n \"\"\"\n Generate error mean and standard deviation using Monte Carlo\n approach as described in: https://arxiv.org/abs/1912.00700\n\n Args:\n emap: The multiplier's error map\n x_dist: Operand distribution of activations\n w_dist: Operand distribution of weights\n fan_in: Incoming connections for layer\n num_samples: Number of Monte Carlo simulation runs. Defaults to int(1e5).\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n prob_x, prob_w = np.meshgrid(\n x_dist.astype(np.float64), w_dist.astype(np.float64), indexing=\"ij\"\n )\n probabilities = (prob_x * prob_w).flatten()\n emap = emap.flatten()\n monte_carlo_runs = np.random.choice(\n emap, size=(num_samples, fan_in), p=probabilities\n )\n monte_carlo_runs = np.sum(monte_carlo_runs, axis=1)\n return (\n np.mean(monte_carlo_runs) / fan_in,\n np.std(monte_carlo_runs, dtype=np.float64) / fan_in,\n )\n\n\ndef error_prediction(\n emap: np.ndarray, x_dist: np.ndarray, w_dist: np.ndarray, fan_in: float\n) -> Tuple[float, float]:\n \"\"\"\n Generate error mean and standard deviation using the\n global distribution of activations and weights\n\n Args:\n emap: The multiplier's error map\n x_dist: Operand distribution of activations\n w_dist: Operand distribution of weights\n fan_in: Incoming connections for layer\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n emap = emap.astype(np.float64)\n prob_x, prob_w = np.meshgrid(\n x_dist.astype(np.float64), w_dist.astype(np.float64), indexing=\"ij\"\n )\n mean = np.sum(emap * prob_x * prob_w)\n std = np.sqrt(np.sum(((emap - mean) ** 2) * prob_x * prob_w)) / np.sqrt(fan_in)\n return mean, std\n\n\ndef get_sample_population(tensor: np.ndarray, num_samples: int = 512) -> np.ndarray:\n \"\"\"\n Randomly select samples from a tensor that cover the receptive field of one neuron\n\n Args:\n tensor: Tensor to draw samples from\n num_samples: Number of samples to draw. Defaults to 512.\n\n Returns:\n Sampled 2D Tensor of shape [num_samples, tensor.shape[-1]]\n \"\"\"\n flat_dim = np.prod(tensor.shape[:-1])\n rand_idx = np.random.choice(flat_dim, num_samples)\n return tensor.reshape(flat_dim, tensor.shape[-1])[rand_idx]\n\n\ndef population_prediction(\n emap: np.ndarray, x_multidist: np.ndarray, w_dist: np.ndarray, fan_in: float\n) -> Tuple[float, float]:\n \"\"\"\n Generate prediction of mean and standard deviation using several\n sampled local distributions\n\n Args:\n emap: The multiplier's error map\n x_multidist: Array of several operand distributions for activations\n w_dist: Operand distribution of weights\n fan_in: Incoming connections for layer\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n # Single distribution error computation for each operand distribution\n means, stds = [], []\n for x_dist in x_multidist:\n mean, std = error_prediction(emap, x_dist, w_dist, fan_in)\n means.append(mean)\n stds.append(std)\n npmeans = np.array(means)\n npstds = np.array(stds)\n\n # Aggregate error distributions (Eq. 15 & Eq. 16)\n mean_aggregate = np.mean(npmeans)\n std_aggregate = np.sqrt(\n (\n np.sum(npmeans**2 + npstds**2)\n - (np.sum(npmeans) ** 2) / x_multidist.shape[0]\n )\n / x_multidist.shape[0]\n )\n return mean_aggregate, std_aggregate\n\n\ndef to_distribution(\n tensor: Optional[np.ndarray], min_val: int, max_val: int\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Turn tensor of weights/activations into a frequency distribution (i.e. build a histogram)\n\n Args:\n tensor: Tensor to build histogram from\n min_val: Lowest possible operand value in tensor\n max_val: Highest possible operand value in tensor\n\n Returns:\n Tuple of Arrays where first array contains the full numerical range between\n min_val and max_val inclusively and second array contains the relative frequency\n of each operand\n\n Raises:\n ValueError: If run before features maps have been populated\n by call to `utils.model.get_feature_maps`\n\n \"\"\"\n if tensor is None:\n raise ValueError(\"Populate input tensor with intermediate features maps\")\n num_range = np.arange(min_val, max_val + 1)\n counts = np.zeros_like(num_range)\n nums, freqs = np.unique(tensor.flatten().astype(np.int32), return_counts=True)\n counts[nums + min_val] = freqs.astype(np.float64)\n counts = counts / np.sum(freqs)\n return num_range, counts\n\n\ndef error_calculation(\n accurate: np.ndarray, approximate: np.ndarray, fan_in: float\n) -> Tuple[float, float]:\n \"\"\"\n Calculate mean and standard deviation of the observed error between\n accurate computation and approximate computation\n\n Args:\n accurate: Accurate computation results\n approximate: Approximate computation results\n fan_in: Number of incoming neuron connections\n\n Returns:\n Mean and standard deviation for a single operation\n \"\"\"\n mean = np.mean(accurate - approximate) / fan_in\n std = np.std((accurate - approximate) / fan_in, dtype=np.float64)\n return mean, std\n","repo_name":"etrommer/agn-approx","sub_path":"src/agnapprox/utils/error_stats.py","file_name":"error_stats.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"46"} +{"seq_id":"15608683189","text":"def findFinalVotes(actualVotes):\n realVote = pd.NA\n count = 0\n for i in range(0, len(actualVotes)):\n\n if not pd.isna(actualVotes.iloc[i]['cps19_votechoice']):\n realVote = actualVotes.iloc[i]['cps19_votechoice']\n elif not pd.isna(actualVotes.iloc[i]['cps19_votechoice_pr']):\n realVote = actualVotes.iloc[i]['cps19_votechoice_pr']\n elif not pd.isna(actualVotes.iloc[i]['cps19_vote_unlikely']):\n realVote = actualVotes.iloc[i]['cps19_vote_unlikely']\n elif not pd.isna(actualVotes.iloc[i]['cps19_vote_unlike_pr']):\n realVote = actualVotes.iloc[i]['cps19_vote_unlike_pr']\n elif not pd.isna(actualVotes.iloc[i]['cps19_v_advance']):\n realVote = actualVotes.iloc[i]['cps19_v_advance']\n else:\n #print(actualVotes.iloc[i]['cps19_votechoice'], actualVotes.iloc[i]['cps19_votechoice_pr'], actualVotes.iloc[i]['cps19_vote_unlikely'],actualVotes.iloc[i]['cps19_vote_unlike_pr'], actualVotes.iloc[i]['cps19_v_advance'])\n #print(i, actualVotes.iloc[i]['Unnamed: 0'], actualVotes.iloc[i]['Unnamed: 0'])\n count +=1\n actualVotes.iloc[i]['finalVote'] = realVote\n return actualVotes\n\ndef findCorrelationWithQuebecProvince(provinceAnswers, finalVotes):\n provinceAnsweredCount = 0\n quebecResidentTotalCount = 0\n quebecResidentVotedForBlocCount = 0\n blocVoterNotQuebecResidentCount =0\n for i in range(0, len(provinceAnswers)):\n #print(finalVotes.iloc[i]['finalVote'])\n if not pd.isna(provinceAnswers.iloc[i]['cps19_province']):\n provinceAnsweredCount +=1\n\n if provinceAnswers.iloc[i]['cps19_province'] == 'Quebec':\n quebecResidentTotalCount +=1\n\n if provinceAnswers.iloc[i]['cps19_province'] == 'Quebec' and finalVotes.iloc[i]['finalVote'] == \"Bloc Qubcois\":\n quebecResidentVotedForBlocCount +=1\n\n if provinceAnswers.iloc[i]['cps19_province'] != 'Quebec' and finalVotes.iloc[i]['finalVote'] == \"Bloc Qubcois\":\n if not pd.isna(provinceAnswers.iloc[i]['cps19_province']):\n #print(provinceAnswers.iloc[i]['Unnamed: 0'], provinceAnswers.iloc[i]['pes19_province'] , finalVotes.iloc[i]['finalVote'])\n blocVoterNotQuebecResidentCount +=1\n\n return [provinceAnsweredCount, quebecResidentTotalCount, quebecResidentVotedForBlocCount, blocVoterNotQuebecResidentCount]\n\ndef findCorrelationForBiggestIssue(bestAdressesIssue, finalVotes):\n bestAdressesPartyTotalCount = 0\n bestAdressesIssueSameForVoteCount = 0\n bestAdressesIssueDifferentForVoteCount = 0\n for i in range(0, len(bestAdressesIssue)):\n if not pd.isna(bestAdressesIssue.iloc[i]['cps19_imp_iss_party']) and not pd.isna(finalVotes.iloc[i]['finalVote']):\n bestAdressesPartyTotalCount += 1\n\n if bestAdressesIssue.iloc[i]['cps19_imp_iss_party'] == finalVotes.iloc[i]['finalVote']\\\n and (not pd.isna(bestAdressesIssue.iloc[i]['cps19_imp_iss_party']) and not pd.isna(finalVotes.iloc[i]['finalVote'])):\n #print( \"Best adresses: {bestAdresses} Final Vote: {finalVote}\".format(\n #bestAdresses=bestAdressesIssue.iloc[i]['cps19_imp_iss_party'], finalVote=finalVotes.iloc[i]['finalVote']))\n bestAdressesIssueSameForVoteCount +=1\n\n if bestAdressesIssue.iloc[i]['cps19_imp_iss_party'] != finalVotes.iloc[i]['finalVote']\\\n and (not pd.isna(bestAdressesIssue.iloc[i]['cps19_imp_iss_party']) and not pd.isna(finalVotes.iloc[i]['finalVote'])):\n\n #print( \"Best adresses: {bestAdresses} Final Vote: {finalVote}\".format(\n #bestAdresses=bestAdressesIssue.iloc[i]['cps19_imp_iss_party'], finalVote=finalVotes.iloc[i]['finalVote']))\n bestAdressesIssueDifferentForVoteCount +=1\n\n return [bestAdressesPartyTotalCount, bestAdressesIssueSameForVoteCount, bestAdressesIssueDifferentForVoteCount]\n\ndef findCorrelationMostWantedOutcome(desiredOutcome, finalVotes):\n bestOutcomeTotalCount = 0\n desiredOutcomeSameThenVoteLiberals =0\n desiredOutcomeDifferentThenVoteLiberals = 0\n desiredOutcomeSameThenVoteConservative =0\n desiredOutcomeDifferentThenVoteConservative = 0\n desiredOutcomeSameThenVoteNDP =0\n desiredOutcomeDifferentThenVoteNDP = 0\n\n for i in range(0, len(desiredOutcome)):\n if not pd.isna(desiredOutcome.iloc[i]['cps19_outcome_most']) and not pd.isna(finalVotes.iloc[i]['finalVote']):\n #print( \"Best adresses: {bestOutcome} Final Vote: {finalVote}\".format(\n #bestOutcome=desiredOutcome.iloc[i]['cps19_outcome_most'], finalVote=finalVotes.iloc[i]['finalVote']))\n bestOutcomeTotalCount += 1\n\n if (desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Liberal majority\" or desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Liberal minority\")\\\n and finalVotes.iloc[i]['finalVote'] == \"Liberal Party\":\n #print( \"Best adresses: {bestOutcome} Final Vote: {finalVote}\".format(\n #bestOutcome=desiredOutcome.iloc[i]['cps19_outcome_most'], finalVote=finalVotes.iloc[i]['finalVote']))\n desiredOutcomeSameThenVoteLiberals +=1\n\n\n if (desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Liberal majority\" or desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Liberal minority\")\\\n and finalVotes.iloc[i]['finalVote'] != \"Liberal Party\":\n desiredOutcomeDifferentThenVoteLiberals +=1\n\n if (desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Conservative majority\" or desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Conservative minority\")\\\n and finalVotes.iloc[i]['finalVote'] == \"Conservative Party\":\n #print( \"Best adresses: {bestOutcome} Final Vote: {finalVote}\".format(\n #bestOutcome=desiredOutcome.iloc[i]['cps19_outcome_most'], finalVote=finalVotes.iloc[i]['finalVote']))\n desiredOutcomeSameThenVoteConservative +=1\n\n\n if (desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Conservative majority\" or desiredOutcome.iloc[i]['cps19_outcome_most'] == \"Conservative minority\")\\\n and finalVotes.iloc[i]['finalVote'] != \"Conservative Party\":\n desiredOutcomeDifferentThenVoteConservative +=1\n\n if (desiredOutcome.iloc[i]['cps19_outcome_most'] == \"NDP minority\" or desiredOutcome.iloc[i]['cps19_outcome_most'] == \"NDP majority\")\\\n and finalVotes.iloc[i]['finalVote'] == \"ndp\":\n #print( \"Best adresses: {bestOutcome} Final Vote: {finalVote}\".format(\n #bestOutcome=desiredOutcome.iloc[i]['cps19_outcome_most'], finalVote=finalVotes.iloc[i]['finalVote']))\n desiredOutcomeSameThenVoteNDP +=1\n\n\n if (desiredOutcome.iloc[i]['cps19_outcome_most'] == \"NDP minority\" or desiredOutcome.iloc[i]['cps19_outcome_most'] == \"NDP majority\")\\\n and finalVotes.iloc[i]['finalVote'] != \"ndp\":\n desiredOutcomeDifferentThenVoteNDP +=1\n\n return [bestOutcomeTotalCount,desiredOutcomeSameThenVoteLiberals, desiredOutcomeDifferentThenVoteLiberals,\n desiredOutcomeSameThenVoteConservative, desiredOutcomeDifferentThenVoteConservative,\n desiredOutcomeSameThenVoteNDP, desiredOutcomeDifferentThenVoteNDP]\n\ndef findCorrelationAffiliationPgilosophique(affinitePolitique, confidenceAffinete, finalVotes):\n nbrSame = 0\n nbrDifferent = 0\n notConfident = 0\n semiConfident = 0\n confident = 0\n\n notConfidentIfSame = 0\n semiConfidentIfSame = 0\n confidentIfSame = 0\n\n for i in range(0, len(affinitePolitique)):\n\n if (affinitePolitique.iloc[i]['cps19_fed_id'] == finalVotes.iloc[i]['finalVote'] or (\n affinitePolitique.iloc[i]['cps19_fed_id'] == \"Liberal\" and finalVotes.iloc[i]['finalVote'] == \"Liberal Party\")\n or (affinitePolitique.iloc[i]['cps19_fed_id'] == \"Conservative\" and finalVotes.iloc[i]['finalVote'] == \"Conservative Party\")\n or (affinitePolitique.iloc[i]['cps19_fed_id'] == \"Green\" and finalVotes.iloc[i]['finalVote'] == \"Green Party\")):\n nbrSame += 1\n if (confidenceAffinete.iloc[i]['cps19_fed_id_str'] == \"Not very strongly\"):\n notConfidentIfSame += 1\n elif (confidenceAffinete.iloc[i]['cps19_fed_id_str'] == \"Fairly strongly\"):\n semiConfidentIfSame += 1\n elif (confidenceAffinete.iloc[i]['cps19_fed_id_str'] == \"Very strongly\"):\n confidentIfSame += 1\n\n else:\n if not pd.isna(affinitePolitique.iloc[i]['cps19_fed_id']) and not pd.isna(finalVotes.iloc[i]['finalVote'])\\\n and affinitePolitique.iloc[i]['cps19_fed_id'] != \"None of these\" \\\n and affinitePolitique.iloc[i]['cps19_fed_id'] != \"Don't know/ Prefer not to answer\" \\\n and affinitePolitique.iloc[i]['cps19_fed_id'] != \"Another party (please specify)\" :\n nbrDifferent += 1\n\n if (confidenceAffinete.iloc[i]['cps19_fed_id_str'] == \"Not very strongly\"):\n notConfident += 1\n elif (confidenceAffinete.iloc[i]['cps19_fed_id_str'] == \"Fairly strongly\"):\n semiConfident += 1\n elif (confidenceAffinete.iloc[i]['cps19_fed_id_str'] == \"Very strongly\"):\n confident += 1\n\n #print(\n #\"Affinite = {affinitePolitique} , vrai vote = {voteChoice}, Confiance = {confiance}\".format(\n #affinitePolitique=affinitePolitique.iloc[i]['cps19_fed_id'], voteChoice=finalVotes.iloc[i]['finalVote'], confiance=confidenceAffinete.iloc[i]['cps19_fed_id_str']))\n\n pourcentageNonConfient = notConfident / (notConfident + semiConfident + confident) * 100\n pourcentageSemiConfient = semiConfident / (notConfident + semiConfident + confident) * 100\n pourcentageConfient = confident / (notConfident + semiConfident + confident) * 100\n\n pourcentageNonConfientIfSame = notConfidentIfSame / (\n notConfidentIfSame + semiConfidentIfSame + confidentIfSame) * 100\n pourcentageSemiConfientIfSame = semiConfidentIfSame / (\n notConfidentIfSame + semiConfidentIfSame + confidentIfSame) * 100\n pourcentageConfientIfSame = confidentIfSame / (notConfidentIfSame + semiConfidentIfSame + confidentIfSame) * 100\n\n print(\n \"Parmi ceux qui avaient un vote different de leur affinite, {pourcentageNonConfient} % ne sont pas confiants, \"\n \"{pourcentageSemiConfient} % sont relativement confiants et {pourcentageConfient} % sont confiants\".format(\n pourcentageNonConfient=pourcentageNonConfient, pourcentageSemiConfient=pourcentageSemiConfient,\n pourcentageConfient=pourcentageConfient))\n\n print(\n \"Parmi ceux qui avaient LE MEME VOTE que celui de leur affinite, {pourcentageNonConfientIfSame} % ne sont pas confiants, \"\n \"{pourcentageSemiConfientIfSame} % sont relativement confiants et {pourcentageConfientIfSame} % sont confiants\".format(\n pourcentageNonConfientIfSame=pourcentageNonConfientIfSame,\n pourcentageSemiConfientIfSame=pourcentageSemiConfientIfSame,\n pourcentageConfientIfSame=pourcentageConfientIfSame))\n\n sucessRate = nbrSame / (nbrSame + nbrDifferent) * 100\n print(nbrSame + nbrDifferent)\n print(sucessRate)\n\n return [pourcentageNonConfient, pourcentageSemiConfient, pourcentageConfient, pourcentageNonConfientIfSame,\n pourcentageSemiConfientIfSame, pourcentageConfientIfSame, sucessRate]\n\ndef findCorrelationWithDonations(partyMember, finalVotes):\n\n totalCount = 0\n totalGaveToSameThanVote = 0\n totalGaveToDifferentThanVote = 0\n\n for i in range(0, len(partyMember)):\n if not pd.isna(partyMember.iloc[i]['cps19_fed_member']) and not pd.isna(finalVotes.iloc[i]['finalVote']):\n #print(partyMember.iloc[i]['cps19_fed_member'])\n totalCount += 1\n\n if not pd.isna(partyMember.iloc[i]['cps19_fed_member']) and not pd.isna(finalVotes.iloc[i]['finalVote']) \\\n and partyMember.iloc[i]['cps19_fed_member'] == finalVotes.iloc[i]['finalVote']:\n #print( \"Gave to: {gaveParty} Final Vote: {finalVote}\".format(\n #gaveParty=partyMember.iloc[i]['cps19_fed_member'], finalVote=finalVotes.iloc[i]['finalVote']))\n totalGaveToSameThanVote += 1\n\n if not pd.isna(partyMember.iloc[i]['cps19_fed_member']) and not pd.isna(finalVotes.iloc[i]['finalVote']) \\\n and partyMember.iloc[i]['cps19_fed_member'] != finalVotes.iloc[i]['finalVote']:\n #print( \"Gave to: {gaveParty} Final Vote: {finalVote}\".format(\n #gaveParty=partyMember.iloc[i]['cps19_fed_member'], finalVote=finalVotes.iloc[i]['finalVote']))\n totalGaveToDifferentThanVote += 1\n\n return [totalCount, totalGaveToSameThanVote, totalGaveToDifferentThanVote]\n\n\ndef findTrudeauSatisfactionCorrelation(currentTrudeauSatisfaction, finalVotes):\n verySatisfiedCount = 0\n fairlySatisfiedCount = 0\n notVerySatisfiedCount = 0\n notSatisfiedCount = 0\n dontKnowCount = 0\n\n verySatisfiedAndVotedForLiberalsCount = 0\n fairlySatisfiedAndVotedForLiberalsCount = 0\n notVerySatisfiedAndVotedForLiberalsCount = 0\n notSatisfiedAndVotedForLiberalsCount = 0\n dontKnowAndVotedForLiberalsCount = 0\n for i in range(0, len(currentTrudeauSatisfaction)):\n\n if currentTrudeauSatisfaction.iloc[i]['cps19_fed_gov_sat'] == \"Very satisfied\":\n verySatisfiedCount +=1\n if finalVotes.iloc[i]['finalVote'] == \"Liberal Party\":\n verySatisfiedAndVotedForLiberalsCount +=1\n #print(\"Sattisfcation: {satisfaction} Final Vote: {finalVote}\".format(\n #satisfaction=currentTrudeauSatisfaction.iloc[i]['cps19_fed_gov_sat'], finalVote=finalVotes.iloc[i]['finalVote']))\n\n if currentTrudeauSatisfaction.iloc[i]['cps19_fed_gov_sat'] == \"Fairly satisfied\":\n fairlySatisfiedCount +=1\n if finalVotes.iloc[i]['finalVote'] == \"Liberal Party\":\n fairlySatisfiedAndVotedForLiberalsCount +=1\n\n if currentTrudeauSatisfaction.iloc[i]['cps19_fed_gov_sat'] == \"Not very satisfied\":\n notVerySatisfiedCount +=1\n if finalVotes.iloc[i]['finalVote'] == \"Liberal Party\":\n notVerySatisfiedAndVotedForLiberalsCount +=1\n\n if currentTrudeauSatisfaction.iloc[i]['cps19_fed_gov_sat'] == \"Not at all satisfied\":\n notSatisfiedCount +=1\n if finalVotes.iloc[i]['finalVote'] == \"Liberal Party\":\n notSatisfiedAndVotedForLiberalsCount +=1\n\n if currentTrudeauSatisfaction.iloc[i]['cps19_fed_gov_sat'] == \"Don't know/ Prefer not to answer\":\n dontKnowCount +=1\n if finalVotes.iloc[i]['finalVote'] == \"Liberal Party\":\n dontKnowAndVotedForLiberalsCount +=1\n\n\n print(\"Very Satisfied: {verySatisfied} Voted For Liberals: {verySatisfiedAndVotedForLiberals}\".format(\n verySatisfied=verySatisfiedCount, verySatisfiedAndVotedForLiberals=verySatisfiedAndVotedForLiberalsCount))\n\n print(\"Fairly Satisfied: {fairlySatisfied} Voted For Liberals: {fairlySatisfiedAndVotedForLiberals}\".format(\n fairlySatisfied=fairlySatisfiedCount, fairlySatisfiedAndVotedForLiberals=fairlySatisfiedAndVotedForLiberalsCount))\n\n print(\"Not very Satisfied: {notVerySatisfied} Voted For Liberals: {notVerySatisfiedAndVotedForLiberals}\".format(\n notVerySatisfied=notVerySatisfiedCount, notVerySatisfiedAndVotedForLiberals=notVerySatisfiedAndVotedForLiberalsCount))\n\n print(\"Not at all Satisfied: {notSatisfied} Voted For Liberals: {notSatisfiedAndVotedForLiberals}\".format(\n notSatisfied=notSatisfiedCount, notSatisfiedAndVotedForLiberals=notSatisfiedAndVotedForLiberalsCount))\n\n print(\"Dont know: {dontKnow} Voted For Liberals: {dontKnowAndVotedForLiberals}\".format(\n dontKnow=dontKnowCount, dontKnowAndVotedForLiberals=dontKnowAndVotedForLiberalsCount))","repo_name":"alecULaval/Projet-GLO-4027","sub_path":"alecGarbageRemise1.py","file_name":"alecGarbageRemise1.py","file_ext":"py","file_size_in_byte":16019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32962045266","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport numpy as np\nfrom emlp.training import Trainer\nfrom emlp.learning_rate_manager import ExponentialDecayLearningRate\nfrom emlp.schnet import SchNet\nfrom emlp.datasets import DataSet, TFRWriter, DataAugmentation\nfrom emlp.reference import ConsistentFragmentsReference\nfrom emlp.losses import MSE, MAE\nfrom emlp.hooks import SaveHook\nfrom emlp.longrange import LongrangeCoulomb\nimport tensorflow as tf\nfrom glob import glob\n\n# In this example, we will train an eMLP model on the first 10 molecules in the eQM7 dataset (https://doi.org/10.24435/materialscloud:66-9j)\n# The reference files are stored in the data folder\n\n# We should let the eMLP know what kind of properties should be read from the data files and internally used in the eMLP\nlist_of_properties = ['positions', 'numbers', 'centers', 'energy', 'forces', 'efield'] \n\n# First, the training and validation set are converted to a tensorflow record file (tfr file)\n# After the conversion, the total number of configurations being stored in the tfr files\n# are printed to the prompt. This number is required for the num_configs argument down below\n# in the DataSet class.\n\n# This should only be done once before training. Hence, switch the following two if-statements\n# to False, if they have already been generated.\n\nif True:\n writer = TFRWriter('validation.tfr', list_of_properties = list_of_properties, reference = 'pbe0_aug-cc-pvtz', filter_centers = True)\n writer.write_from_xyz('data/validation.xyz')\n writer.close()\n \nif True:\n writer = TFRWriter('train.tfr', list_of_properties = list_of_properties, reference = 'pbe0_aug-cc-pvtz', filter_centers = True)\n writer.write_from_xyz('data/train.xyz')\n writer.close()\n\n# Choose your strategy (https://www.tensorflow.org/guide/distributed_training). This becomes important when training on multiple GPUs.\nstrategy = tf.distribute.MirroredStrategy()\n\nwith strategy.scope():\n # Choose the correct longrange part: LongrangeCoulomb for isolated systems or LongrangeEwald for periodic systems\n longrange_compute = LongrangeCoulomb()\n\n # Here, we create the train and validation set from the tfr files. The user should specify the number of configurations being stored\n # in those records via the argument num_config. One can remove the DataAugmentation() argument in the training set, if no data augmentation \n # is needed. The flag test=True is required when initializing the validation set.\n train_data = DataSet(['train.tfr'], num_configs = 4000, cutoff = 4.0, longrange_compute = longrange_compute, batch_size = 64, float_type = 32, num_parallel_calls = 8, \n strategy = strategy, list_of_properties = list_of_properties, augment_data = DataAugmentation())\n validation_data = DataSet(['validation.tfr'], num_configs = 1000, cutoff = 4.0, longrange_compute = longrange_compute, batch_size = 64, float_type = 32, num_parallel_calls = 8, \n strategy = strategy, list_of_properties = list_of_properties, test = True)\n \n # Here, the SchNet architecture is specified. One can use the ConsistentFragmentsReference('pbe0_aug-cc-pvtz') reference to include the\n # refence structures in every batch to maintain their consistency while training. For all other use cases, just use ConstantReference(value = 0., per_atom = False).\n model = SchNet(cutoff = 4., n_max = 32, num_layers = 4, start = 0.0, end = 4.0, num_filters = 128, num_features = 512, shared_W_interactions = False, float_type = 32, \n cutoff_transition_width = 0.5, reference = ConsistentFragmentsReference('pbe0_aug-cc-pvtz'), longrange_compute = longrange_compute) \n \n # When restarting from a previously trained model, use the line below\n #model = SchNet.from_restore_file('model_dir/model_name_2.00', reference = ConsistentFragmentsReference('pbe0_aug-cc-pvtz'), longrange_compute = LongrangeCoulomb())\n \n # Specify the optimizer and learning rate scheduler\n optimizer = tf.optimizers.Adam(3e-04)\n learning_rate_manager = ExponentialDecayLearningRate(initial_learning_rate = 3e-04, decay_rate = 0.5, decay_epochs = 300)\n \n # Specify your loss function here\n losses = [MSE('energy', scale_factor = 1., per_atom = True), MSE('forces', scale_factor = 1.), MSE('center_forces', scale_factor = 1.)]\n # Specify the validation metrics here\n validation_losses = [MAE('energy', per_atom = True), MAE('forces', scale_factor = 1.), MAE('center_forces', scale_factor = 1.)]\n \n # Specify the save location of the model. \n savehook = SaveHook(model, ckpt_name = 'model_dir/model_name', max_to_keep = 5, save_period = 1.0, history_period = 8.0,\n npz_file = 'model_dir/model_name.npz')\n\n trainer = Trainer(model, losses, train_data, validation_data, strategy = strategy, optimizer = optimizer, savehook = savehook, \n learning_rate_manager = learning_rate_manager, validation_losses = validation_losses)\n \n # Set these to False, when running on the hpc to supress the printed output\n trainer.train(verbose = True, validate_first = True)\n\n\n","repo_name":"mcoolsce/eMLP","sub_path":"emlp/examples/example_train.py","file_name":"example_train.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"16080802316","text":"from os import stat\nfrom fastapi import Request\nfrom fastapi import APIRouter\nfrom SQL.BankDBManager import BankDBManager\nfrom dto.Transaction import Transaction\nfrom datetime import datetime\nfrom fastapi import FastAPI, HTTPException\nbank_db_manager = BankDBManager()\n\nrouter = APIRouter(\n prefix=\"/transactions\",\n tags=[\"transactions\"]\n)\n\n\n@router.get('/balance')\ndef get_balance():\n balance_as_array = bank_db_manager.get_balance()\n return balance_as_array[0]\n\n@router.get('/')\ndef get_transactions(category=\"\", date=\"\", amount=\"\"):\n tranactions_from_db = bank_db_manager.get_transactions()\n list_of_all_transactions: list[Transaction] = [\n Transaction(**res) for res in tranactions_from_db]\n\n if category != \"\":\n list_of_all_transactions = [\n t for t in list_of_all_transactions if t.category == category]\n\n if amount != \"\":\n list_of_all_transactions = [\n t for t in list_of_all_transactions if t.amount >= int(amount)]\n\n if date != \"\":\n list_of_all_transactions = [\n t for t in list_of_all_transactions if t.tr_date >= datetime.strptime(date, '%Y-%m-%d').date()]\n\n return list_of_all_transactions\n\n\n@router.post('/')\nasync def add_transaction(request: Request):\n transaction: Transaction = Transaction(**(await request.json()))\n bank_db_manager.add_new_transaction(transaction)\n return transaction\n\n\n@router.delete('/{transactionID}')\nasync def delete_transaction(transactionID: int):\n bank_db_manager.delete_transaction(transactionID)\n","repo_name":"207Levy/Bank","sub_path":"Server/router/transaction_route.py","file_name":"transaction_route.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"12089140908","text":"#!/usr/bin/python3\nimport socket\nimport threading\nimport json\nimport time\nimport traceback\nimport subprocess\nimport os\nimport sys\nimport hashlib\nimport ctypes\n\ndatadir = '/mnt/var'\nbaseurl = 'https://yoomoney.ru/'\n\nlibx11=ctypes.CDLL('libX11.so.6')\nlibxtst=ctypes.CDLL('libXtst.so.6')\ndis=libx11.XOpenDisplay(None)\n\ndef kpress(kcode, d=1, u=1):\n\tsy = libx11.XFlush\n\tif d: libxtst.XTestFakeKeyEvent(dis, kcode, True, 0)\n\tsy(dis)\n\tif u: libxtst.XTestFakeKeyEvent(dis, kcode, False, 0)\n\tsy(dis)\n\nif 'early' in sys.argv:\n\ttime.sleep(10)\n\tkpress(24)\n\tkpress(39)\n\tkpress(36)\n\ttime.sleep(50)\n\t#kpress(71)\n\t#time.sleep(3)\n\tkpress(36)\n\t#time.sleep(5)\n\tkpress(37, 1, 0)\n\tkpress(64, 1, 0)\n\tkpress(28)\n\tkpress(37, 0, 1)\n\tkpress(64, 0, 1)\n\ttime.sleep(15)\n\tkpress(56)\n\tkpress(28)\n\tkpress(36)\n\texit()\n\ntoken1 = '\\r\\nrqbxmvJKSsNevlZDlTkiBktCVNdYWp\\r\\n\\r\\n'\ntoken2 = '/QINdbNFKeGGUmeAnbRhksGOTTZrATR'\n\nquery = []\npayed = []\nbad = []\n\nclass st:\n\twindow = None\n\tcapture = None\n\tmywallet = None\n\tpaymethod = None\n\tid = None\n\tdetails = None\n\tamount = None\n\trenew = False\n\ndef popcode():\n\tyfile = '/mnt/uc/' + st.mywallet\n\tcl = open(yfile).readlines()\n\tc = cl[0].split(' ')[1].strip()\n\topen(yfile, 'w').writelines(cl[1:])\n\tprint(len(cl), 'codes')\n\tst.renew = len(cl) < 7\n\treturn c\n\ndef timer():\n\twhile True:\n\t\ttry:\n\t\t\t#if st.id and st.enter:\n\t\t\t#\ttime.sleep(3)\n\t\t\t#\tif st.id and st.enter:\n\t\t\t#\t\tkpress(71)\n\t\t\t#\t\ttime.sleep(2)\n\t\t\t#\t\tkpress(36)\n\t\t\t#\t\ttime.sleep(3)\n\t\t\t#\tif st.id and st.enter:\n\t\t\t#\t\tkpress(36)\n\t\t\t#\t\ttime.sleep(2)\n\t\t\t#\ttime.sleep(5)\n\t\t\tprocess()\n\t\t\tif st.window and st.window.poll() is not None:\n\t\t\t\tst.window = None\n\t\t\t\tfinish()\n\t\texcept:\n\t\t\tpass\n\t\ttime.sleep(2)\n\nthreading.Thread(target=timer).start()\n\ndef screenrec(id):\n\tos.system('xscreensaver-command -deactivate')\n\tst.capture = subprocess.Popen(['/usr/bin/ffmpeg', '-f', 'x11grab', '-draw_mouse', '1', '-framerate', '25', '-video_size', '1366x768',\n\t\t'-i', ':0+0,0', '-pix_fmt', 'yuv420p', '-c:v', 'libx264', '-preset', 'veryfast', '-q:v', '1', '-s', '1366x768', '-f', 'matroska', \n\t\t'-v', '-8', '/mnt/screenrec/'+id+'.mkv'])\n\ndef process():\n\tif st.id or not query: return\n\tq = query.pop()\n\ta, st.mywallet, st.paymethod, st.id, st.details, st.amount = q\n\tif int(float(st.amount)) == float(st.amount):\n\t\tst.amount = str(int(float(st.amount)))\n\tif os.access(datadir + '/kpress', os.R_OK):\n\t\tkpress(int(open(datadir + '/kpress').read()))\n\t\ttime.sleep(1)\n\tst.window = subprocess.Popen(['xmessage']+q)\n\tbrowser = 'c' + st.mywallet\n\tif st.paymethod == 'billing':\n\t\tsubprocess.Popen([browser, baseurl+'main'])\n\telse:\n\t\tscreenrec(st.id)\n\t\tsubprocess.Popen([browser, {'wallet': baseurl+'transfer/a2w', 'phone': baseurl+'phone'}[st.paymethod]])\n\tst.enter = True\n\ndef finish(renew=True):\n\tif renew and st.renew:\n\t\tbrowser = 'c' + st.mywallet\n\t\tsubprocess.Popen([browser, baseurl+'emergency-codes'])\n\telse:\n\t\tst.id = None\n\t\tst.details = None\n\t\ttry:\n\t\t\tif st.window:\n\t\t\t\tst.window.terminate()\n\t\t\t\tst.window = None\n\t\t\tst.capture.terminate()\n\t\texcept:\n\t\t\tpass\n\t\tprint('finish')\n\t\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ns.bind(('127.0.0.1', 22222))\ns.listen(1)\nwhile True:\n\ttry:\n\t\tres = ''\n\t\tc, a = s.accept()\n\t\tc.settimeout(5)\n\t\treq = b''\n\t\twhile b'\\r\\n\\r\\n' not in req:\n\t\t\tp = c.recv(8192)\n\t\t\tif not p: break\n\t\t\treq += p\n\t\tq = req.decode()\n\t\t\n\t\tif token1 in q:\n\t\t\tq = q.replace(token1, '').split('\\t')\n\t\t\t\n\t\t\tif q[0] == 'new':\n\t\t\t\tquery.insert(0, q)\n\t\t\t\n\t\t\tif q[0] == 'search':\n\t\t\t\tif len(q) == 3:\n\t\t\t\t\tquery.append(['new', q[2], 'billing', '-1', '0', '0'])\n\t\t\t\telse:\n\t\t\t\t\tif q[1] in payed:\n\t\t\t\t\t\tres = 'READY'\n\t\t\t\t\tif q[1] in bad:\n\t\t\t\t\t\tres = 'bad'\n\t\t\n\t\tif token2 in q:\n\t\t\twhile b'' not in req:\n\t\t\t\tp = c.recv(8192)\n\t\t\t\tif not p: break\n\t\t\t\treq += p\n\t\t\tq = json.loads(req.decode().split('\\r\\n\\r\\n')[1].replace('', ''))\n\t\t\tif q['action'] == 'details' and st.id and q['paymethod'] == st.paymethod:\n\t\t\t\tres = {'details': st.details, 'amount': st.amount}\n\t\t\t\n\t\t\tif q['action'] == 'getcode' and st.id:\n\t\t\t\tif float(q['amount'].replace('\\xa0', '').replace('\\t', '').strip()) <= float(st.amount)*1.03 and q['details'].strip() in st.details:\n\t\t\t\t\tprint('MATCH')\n\t\t\t\t\tres = {'ok': 1, 'c': popcode()}\n\t\t\t\t\tst.details = st.amount = None\n\t\t\t\telse:\n\t\t\t\t\tprint(q)\n\t\t\t\n\t\t\tif q['action'] == 'confirm' and st.id and not st.details:\n\t\t\t\tpayed.append(st.id)\n\t\t\t\tres = {'closed': 1, \n\t\t\t\t\t'redir': os.access(datadir + '/actions-' + st.mywallet, os.R_OK) and len(open(datadir + '/actions-' + st.mywallet).read())}\n\t\t\t\tfinish()\n\t\t\t\t\n\t\t\tif q['action'] == 'bad':\n\t\t\t\tbad.append(st.id)\n\t\t\t\tres = {'saved': 1}\n\t\t\t\tfinish('codeused' in q)\n\t\t\t\n\t\t\tif q['action'] == 'acode':\n\t\t\t\tres = {'c': popcode()}\n\t\t\t\n\t\t\tif q['action'] == 'savecodes':\n\t\t\t\tyfile = '/mnt/uc/' + st.mywallet\n\t\t\t\topen(yfile, 'w').write(q['content'])\n\t\t\t\tres = {'saved': 1}\n\t\t\t\tos.system('e7z /mnt/uc; uz &')\n\t\t\t\tfinish(False)\n\t\t\t\topen(datadir + '/codes-' + st.mywallet, 'w').write(q['content'])\n\t\t\t\n\t\t\tif q['action'] == 'loaded':\n\t\t\t\tst.enter = False\n\t\t\t\tres = 1\n\t\t\t\tprint('loaded')\n\t\t\t\n\t\t\tif 'bal' in q and q['mywallet'].isdigit():\n\t\t\t\trv = 0\n\t\t\t\tw = q['mywallet']\n\t\t\t\tif os.access(datadir + '/actions-' + w, os.R_OK):\n\t\t\t\t\trv = 1\n\t\t\t\t\tassert q['bal'] is not None\n\t\t\t\t\topen(datadir + '/balance-' + w, 'w').write(str(q['bal']))\n\t\t\t\t\told = open(datadir + '/actions-' + w).read().split('\\n')\n\t\t\t\t\tnewact = ''# if ''.join(old) else q['action']\n\t\t\t\t\twhile old:\n\t\t\t\t\t\tif not old[-1].strip():\n\t\t\t\t\t\t\told.pop()\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tcan = (q['action']+'\\r\\r').replace('\\n'.join(old)+'\\n\\r\\r', '')\n\t\t\t\t\t\tif '\\r\\r' not in can:\n\t\t\t\t\t\t\tnewact = can\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\told.pop()\n\t\t\t\t\topen(datadir + '/actions-' + w, 'w').write(q['action'])\n\t\t\t\t\tfor act in newact.split('\\n'):\n\t\t\t\t\t\tprint(act)\n\t\t\t\t\t\tif not act: continue\n\t\t\t\t\t\tfn = hashlib.sha256(act.encode()).hexdigest() + '.eml'\n\t\t\t\t\t\tact = act.split('\\t')\n\t\t\t\t\t\tamo = str(float(act[1]))\n\t\t\t\t\t\tif act[0] == 'in':\n\t\t\t\t\t\t\topen(datadir + '/incoming-' + w + '/' + fn, 'w').write('in\\n'+amo)\n\t\t\t\t\t\tif act[0] == 'out':\n\t\t\t\t\t\t\topen(datadir + '/transactions-' + w + '/' + fn, 'w').write('out\\n'+amo)\n\t\t\t\t\t\n\t\t\t\tres = {'saved': rv}\n\t\t\t\tif st.paymethod == 'billing' and st.mywallet == w:\n\t\t\t\t\tfinish(False)\n\t\t\n\t\tif res: res = json.dumps(res)\n\t\tc.sendall(('HTTP/1.1 200 OK\\r\\nAccess-Control-Allow-Origin: *\\r\\nConnection: close\\r\\nContent-Length: '+str(len(res))+\n\t\t\t'\\r\\n\\r\\n'+res).encode())\n\t\tc.close()\n\texcept Exception:\n\t\ttraceback.print_exc()\n","repo_name":"AsgardB/parovoz","sub_path":"apm/listen.py","file_name":"listen.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"12561448500","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# Python modules\nimport datetime\nimport time\nimport configparser as ConfigParser\nimport json\nimport uuid\nimport zmq\n\n# Added modules\nimport pymongo\nfrom pymongo import MongoClient\nfrom pymongo import errors as PyError\n# Own Modules\n\nfrom logger import logger\nlogger = logger('server_init', stream_level='INFO')\n\n\nclass InitProcess(object):\n\n def __init__(self, config_file, restart):\n \"\"\" \"\"\"\n self.config_file = config_file\n self.dbs_names = ['process_profile',\n 'current_profile',\n 'stored_profile',\n 'process_link',\n 'current_link',\n 'stored_link',\n 'change_link',\n 'context_link',\n 'context',\n 'activity',\n 'process_tweet',\n 'tweet_info',\n 'stored_tweet',\n 'rand_lvl2',\n 'rand_lvl3',\n 'full_link']\n\n self.restart = restart.lower()\n\n def init_values(self):\n \"\"\" \"\"\"\n self.read_config(self.config_file)\n self.dbs = self.build_dbs(self.address_db, self.db_name)\n logger.info(self.dbs)\n\n self.loop_interval = int(self.loop_interval)\n self.time_lvl2 = self.get_time_lvl2()\n if self.restart == 'false':\n self.build_index(**self.dbs)\n self.twitter_key = self.get_keys(self.twitter_file)\n self.client_id = self.get_clients()\n lvl1_list = self.get_lvl1(self.lvl1_file)\n self.set_lvl1 = set([int(i) for i in lvl1_list])\n self.loop_number = 1\n self.nbr_client = set()\n self.record_lvl1(lvl1_list)\n # self.max_limit = Limit(self.loop_interval, len(self.client_id)).calculing_limit()\n self.start_time = datetime.datetime.now()\n # self.write_start()\n # self.stop_time = self.get_stop_time()\n # These values are set up in the config file\n elif self.restart == 'true':\n self.client_id = self.get_clients()\n lvl1_list = self.get_lvl1(self.lvl1_file)\n self.set_lvl1 = set([int(i) for i in lvl1_list])\n self.get_restart()\n\n else:\n raise('Need to enter \"true\" or \"false\"')\n\n return {'loop_interval': self.loop_interval,\n 'databases': self.dbs,\n 'time_lvl2': self.time_lvl2,\n 'set_lvl1': self.set_lvl1,\n 'loop_number': self.loop_number,\n 'nbr_client': self.client_id}\n\n def get_restart(self):\n \"\"\" function to get the values from the db to restart \"\"\"\n # self.nbr_client = set()\n # self.set_lvl1 = set()\n loops = set()\n logger.info('Doing Profile for loop')\n for profile in self.dbs['process_profile'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = profile['loop_number']\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from profile: {}'.format(loops))\n break\n logger.info('Doing Links for loop')\n for links in self.dbs['process_link'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = links['loop_number']\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from links: {}'.format(loops))\n break\n logger.info('Doing rand_3 for loop')\n for links in self.dbs['rand_lvl3'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = links['loop_number'] - 1\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from rand_lvl3: {}'.format(loops))\n break\n logger.info('Doing rand_2 for loop')\n for links in self.dbs['rand_lvl2'].find({}, {'loop_number': True, '_id': False}):\n # self.nbr_client.add(profile.pop('profile_client_id', None))\n loop_number = links['loop_number'] - 1\n if loop_number:\n loops.add(loop_number)\n if len(loops) == 2:\n logger.info('Get two loops from rand_lvl2: {}'.format(loops))\n break\n # self.set_lvl1.add(profile.pop('id_str', None))\n self.loop_number = min(int(s) for s in loops)\n logger.info('loop_number: {}'.format(self.loop_number))\n\n def write_start(self):\n \"\"\" \"\"\"\n json.dump({'loop_interval': self.loop_interval,\n 'client_id': self.client_id,\n 'time_lvl2': self.time_lvl2,\n 'databases': str(self.dbs),\n 'start_time': str(self.start_time),\n 'lvl1': self.set_lvl1}, open('start_params.txt', 'w'))\n\n def read_config(self, config_file):\n Config = ConfigParser.ConfigParser()\n Config.read(config_file)\n for section in Config.sections():\n for option in Config.options(section):\n setattr(self, option, Config.get(section, option))\n\n def build_dbs(self, address, db_name):\n \"\" \"\"\n if address is None:\n c = MongoClient()\n else:\n c = MongoClient(address)\n db = c[db_name]\n return {k: db[k] for k in self.dbs_names}\n\n def build_index(self, **kwargs):\n \"\"\" \"\"\"\n try:\n kwargs['process_profile'].create_index([('id_str', pymongo.DESCENDING),\n ('loop_number', pymongo.ASCENDING)],\n unique=True)\n kwargs['process_profile'].create_index('extra')\n kwargs['process_profile'].create_index('doing')\n\n kwargs['stored_profile'].create_index('id_str', unique=True)\n\n kwargs['process_link'].create_index([('id_str', pymongo.ASCENDING),\n ('type_link', pymongo.DESCENDING)],\n unique=True)\n kwargs['full_link'].create_index([('id_str', pymongo.ASCENDING),\n ('type_link', pymongo.DESCENDING),\n ('loop_number', pymongo.ASCENDING)],\n unique=True)\n\n kwargs['process_tweet'].create_index('id_str', unique=True)\n kwargs['process_tweet'].create_index('processing')\n\n kwargs['stored_link'].create_index('type_link')\n kwargs['stored_link'].create_index('id_str')\n\n kwargs['context_link'].create_index('loop_number')\n kwargs['context_link'].create_index('loop_number')\n\n kwargs['activity'].create_index('id_str')\n kwargs['activity'].create_index('loop_number')\n\n kwargs['context'].create_index('id_str')\n kwargs['stored_tweet'].create_index('id_str', unique=True)\n kwargs['tweet_info'].create_index('id_str', unique=True)\n kwargs['rand_lvl2'].create_index('id_str', unique=True)\n kwargs['rand_lvl3'].create_index('id_str', unique=True)\n\n except PyError.ServerSelectionTimeoutError:\n raise('Error in DBS connection, check if MongoDB is alive')\n\n def get_keys(self, twitter_file):\n \"\"\" \"\"\"\n keydict = {}\n with open(twitter_file, 'r') as f:\n for line in f:\n key, val = line.split(':')\n keydict[key] = val[:-1]\n return keydict\n\n def get_clients(self):\n \"\"\" \"\"\"\n client_id = list()\n\n context_status = zmq.Context()\n timeout_start = time.time()\n\n status_sock = context_status.socket(zmq.REP)\n status_sock.setsockopt(zmq.RCVTIMEO, 2000)\n # status_sock.setsockopt(zmq.RCVTIMEO, 2000)\n status_sock.bind(\"tcp://0.0.0.0:{}\".format(self.status_port))\n while time.time() < (timeout_start + 10):\n try:\n client_to_connect = status_sock.recv()\n if client_to_connect.decode() == 'id_request':\n _id = str(uuid.uuid4())\n data = {'client_id': _id, 'dbs_names': self.dbs_names}\n status_sock.send_json(data)\n client_id.append(_id)\n else:\n status_sock.send('too early'.encode('utf-8'))\n except zmq.error.Again:\n pass\n # time.sleep(2)\n\n logger.info('Get {} clients'.format(len(client_id)))\n logger.info('Close the socket')\n status_sock.close()\n context_status.term()\n logger.info('Socket closed')\n return client_id\n\n def get_time_lvl2(self):\n \"\"\"docstring for time_restrain\"\"\"\n try:\n if int(self.time_lvl2) != 1:\n if int(self.time_lvl2) < int(self.loop_interval):\n value = int(self.loop_interval) *4\n else:\n value = int(self.loop_interval)* int(self.time_lvl2)\n else:\n value = int(self.time_lvl2)\n except ValueError: # In case of None or string or anything\n # not a number\n value = int(self.loop_interval)* 4\n return value\n\n def get_lvl1(self, lvl1_file):\n logger.info('Getting the lvl1 file - {}'.format(lvl1_file))\n with open(lvl1_file, 'r') as f:\n return [line[:-1] for line in f]\n\n def record_lvl1(self, lvl1_list):\n \"\"\" \"\"\"\n for user in lvl1_list:\n info_user = dict()\n info_user['id_str'] = int(user)\n info_user['loop_number'] = 1\n\n self.dbs['process_profile'].insert_one(info_user)\n self.dbs['process_tweet'].insert_one(info_user)\n info_user['type_link'] = 'followers'\n self.dbs['process_link'].insert_one(info_user)\n info_user['type_link'] = 'friends'\n try:\n del info_user['_id']\n except KeyError:\n pass\n self.dbs['process_link'].insert_one(info_user)\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Oliph/PhD-WebScience","sub_path":"server_init.py","file_name":"server_init.py","file_ext":"py","file_size_in_byte":10699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"29926869924","text":"import datetime\nfrom flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = 'development_key'\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/checkout/submit', methods=['POST'])\ndef checkout_submit():\n print(request.form)\n session['checkout_form'] = request.form\n return redirect('/checkout')\n\n\n@app.route('/checkout')\ndef checkout():\n count = 0\n count += int(session['checkout_form']['strawberry'])\n count += int(session['checkout_form']['raspberry'])\n count += int(session['checkout_form']['apple'])\n session['count'] = count\n session['date'] = datetime.datetime.now()\n return render_template(\"checkout.html\", session=session)\n\n\n@app.route('/fruits')\ndef fruits():\n return render_template(\"fruits.html\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"vaught-dawson/Python_v21.1_Assignments","sub_path":"flask/fundamentals/dojo_fruit_store/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"73226820938","text":"# -*- coding:utf-8 -*-\nimport requests, os\nfrom flask import render_template, request, flash, redirect, url_for, send_from_directory, send_file\nfrom flask_login import current_user, login_required\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom pandas import DataFrame\nfrom pydrive.auth import GoogleAuth\nfrom reportlab.lib import styles\nfrom reportlab.lib.enums import TA_RIGHT, TA_CENTER\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.lib.utils import ImageReader\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Image, TableStyle, Table, Spacer\nfrom sqlalchemy import cast, Date\nfrom werkzeug.utils import secure_filename\nfrom . import purchase_tracker_bp as purchase_tracker\nfrom .forms import *\nfrom datetime import datetime\nfrom pytz import timezone\nfrom pydrive.drive import GoogleDrive\nfrom .models import PurchaseTrackerAccount, PurchaseTrackerForm\nfrom flask_mail import Message\nfrom ..main import mail\nfrom ..roles import finance_procurement_permission\n\n# Upload images for Google Drive\n\n\nFOLDER_ID = \"1JYkU2kRvbvGnmpQ1Tb-TcQS-vWQKbXvy\"\n\njson_keyfile = requests.get(os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')).json()\n\nbangkok = timezone('Asia/Bangkok')\n\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\n@purchase_tracker.route('/official/')\n@login_required\ndef landing_page():\n return render_template('purchase_tracker/first_page.html')\n\n\n@purchase_tracker.route('/personnel/personnel_index')\ndef staff_index():\n return render_template('purchase_tracker/personnel/personnel_index.html')\n\n\n@purchase_tracker.route('/personnel/personnel_index/e-form/method/select/')\ndef select_form(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n return render_template('purchase_tracker/personnel/alternative_form.html', account=account)\n\n\n@purchase_tracker.route('/main')\ndef index():\n return render_template('purchase_tracker/index.html')\n\n\n@purchase_tracker.route('/account/create', methods=['GET', 'POST'])\n@login_required\ndef add_account():\n form = CreateAccountForm()\n if request.method == 'POST':\n filename = ''\n account = PurchaseTrackerAccount()\n form.populate_obj(account)\n account.creation_date = bangkok.localize(datetime.now())\n account.staff = current_user\n drive = initialize_gdrive()\n if form.upload.data:\n if not filename or (form.upload.data.filename != filename):\n upfile = form.upload.data\n filename = secure_filename(upfile.filename)\n upfile.save(filename)\n file_drive = drive.CreateFile({'title': filename,\n 'parents': [{'id': FOLDER_ID, \"kind\": \"drive#fileLink\"}]})\n file_drive.SetContentFile(filename)\n try:\n file_drive.Upload()\n permission = file_drive.InsertPermission({'type': 'anyone',\n 'value': 'anyone',\n 'role': 'reader'})\n except:\n flash('Failed to upload the attached file to the Google drive.', 'danger')\n else:\n flash('The attached file has been uploaded to the Google drive', 'success')\n account.url = file_drive['id']\n\n db.session.add(account)\n db.session.commit()\n flash(u'บันทึกข้อมูลสำเร็จ.', 'success')\n return render_template('purchase_tracker/personnel/personnel_index.html')\n # Check Error\n else:\n for er in form.errors:\n flash(er, 'danger')\n return render_template('purchase_tracker/create_account.html', form=form)\n\n\ndef initialize_gdrive():\n gauth = GoogleAuth()\n scopes = ['https://www.googleapis.com/auth/drive']\n gauth.credentials = ServiceAccountCredentials.from_json_keyfile_dict(json_keyfile, scopes)\n return GoogleDrive(gauth)\n\n\n@purchase_tracker.route('/track/')\n@purchase_tracker.route('/track/', methods=['GET'])\ndef track(account_id=None):\n list_type = request.args.get('list_type')\n if list_type == \"myAccount\" or list_type is None:\n accounts = PurchaseTrackerAccount.query.filter_by(staff_id=current_user.id).all()\n elif list_type == \"ourAccount\":\n org = current_user.personal_info.org\n accounts = [account for account in PurchaseTrackerAccount.query.all()\n if account.staff.personal_info.org == org]\n return render_template('purchase_tracker/tracking.html',\n account_id=account_id, accounts=accounts, list_type=list_type)\n\n\n@purchase_tracker.route('/track//view')\ndef view_info_track(account_id=None):\n from sqlalchemy import desc\n if account_id:\n account = PurchaseTrackerAccount.query.get(account_id)\n # better make use of the relationship!\n activities = [a.to_list() for a in account.records.all()]\n else:\n flash(u'ข้อมูลจะปรากฎเมื่อหน่วยงานคลังและพัสดุอัพเดตเรียบร้อย', 'warning')\n activities = []\n # activities = [a.to_list() for a in PurchaseTrackerStatus.query.filter_by(account_id=account_id)\n # .order_by(PurchaseTrackerStatus.start_date)]\n if not activities:\n default_date = datetime.now().isoformat()\n else:\n default_date = activities[-1][3]\n return render_template('purchase_tracker/view_info_track.html',\n account_id=account_id, account=account, desc=desc,\n PurchaseTrackerStatus=PurchaseTrackerStatus,\n activities=activities, default_date=default_date)\n\n\n@purchase_tracker.route('/account//edit', methods=['GET', 'POST'])\n@login_required\ndef edit_account(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n form = CreateAccountForm(obj=account)\n if request.method == 'POST':\n filename = ''\n form.populate_obj(account)\n account.creation_date = bangkok.localize(datetime.now())\n account.staff = current_user\n drive = initialize_gdrive()\n if form.upload.data:\n if not filename or (form.upload.data.filename != filename):\n upfile = form.upload.data\n filename = secure_filename(upfile.filename)\n upfile.save(filename)\n file_drive = drive.CreateFile({'title': filename,\n 'parents': [{'id': FOLDER_ID, \"kind\": \"drive#fileLink\"}]})\n file_drive.SetContentFile(filename)\n try:\n file_drive.Upload()\n permission = file_drive.InsertPermission({'type': 'anyone',\n 'value': 'anyone',\n 'role': 'reader'})\n except:\n flash('Failed to upload the attached file to the Google drive.', 'danger')\n else:\n flash('The attached file has been uploaded to the Google drive', 'success')\n purchase_tracker.url = file_drive['id']\n\n db.session.add(account)\n db.session.commit()\n flash(u'บันทึกข้อมูลสำเร็จ.', 'success')\n return redirect(url_for('purchase_tracker.track'))\n # Check Error\n else:\n for er in form.errors:\n flash(er, 'danger')\n return render_template('purchase_tracker/edit_account.html', form=form, account_id=account_id)\n\n\n@purchase_tracker.route('/accounts//cancel', methods=['GET'])\n@login_required\ndef cancel_account(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n if not account.cancelled_datetime:\n account.cancelled_datetime = datetime.now(tz=bangkok)\n account.cancelled_by = current_user\n db.session.add(account)\n db.session.commit()\n flash(u'ยกเลิกบัญชีเรียบร้อยแล้ว', 'success')\n else:\n flash(u'บัญชีนี้ถูกยุติการดำเนินการแล้ว', 'warning')\n next = request.args.get('next')\n if next:\n return redirect(next)\n return redirect(url_for('purchase_tracker.track', account_id=account_id))\n\n\n@purchase_tracker.route('/accounts//close', methods=['GET'])\n@login_required\ndef close_account(account_id):\n account = PurchaseTrackerAccount.query.get(account_id)\n if not account.end_datetime:\n account.end_datetime = datetime.now(tz=bangkok)\n db.session.add(account)\n db.session.commit()\n flash(u'ปิดบัญชีเรียบร้อยแล้ว', 'success')\n else:\n flash(u'บัญชีนี้ถูกปิดการดำเนินการแล้ว', 'warning')\n return redirect(url_for('purchase_tracker.update_status', account_id=account_id))\n\n\n@purchase_tracker.route('/supplies/')\n@finance_procurement_permission.require()\ndef supplies():\n from sqlalchemy import desc\n accounts = PurchaseTrackerAccount.query.all()\n return render_template('purchase_tracker/procedure_supplies.html',\n accounts=accounts,\n desc=desc,\n PurchaseTrackerStatus=PurchaseTrackerStatus)\n\n\n@purchase_tracker.route('/description')\ndef description():\n return render_template('purchase_tracker/description.html')\n\n\n@purchase_tracker.route('/contact')\ndef contact():\n return render_template('purchase_tracker/contact_us.html')\n\n\ndef send_mail(recp, title, message):\n message = Message(subject=title, body=message, recipients=recp)\n mail.send(message)\n\n\n@purchase_tracker.route('/account//update', methods=['GET', 'POST'])\n@finance_procurement_permission.require()\n@login_required\ndef update_status(account_id):\n form = StatusForm()\n account = PurchaseTrackerAccount.query.get(account_id)\n if request.method == 'POST':\n if form.validate_on_submit():\n status = PurchaseTrackerStatus()\n form.populate_obj(status)\n status.account_id = account_id\n status.status_date = bangkok.localize(datetime.now())\n status.creation_date = bangkok.localize(datetime.now())\n status.cancel_datetime = bangkok.localize(datetime.now())\n status.update_datetime = bangkok.localize(datetime.now())\n status.staff = current_user\n if not form.other_activity.data and not form.activity.data:\n flash(u'กรุณาเลือกหัวข้อกิจกรรมหรือใส่กิจกรรมอื่นๆ.', 'danger')\n return redirect(\n url_for('purchase_tracker.update_status', account_id=account_id, form=form, account=account))\n db.session.add(status)\n db.session.commit()\n title = u'แจ้งเตือนการปรับเปลี่ยนสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {}'.format(status.account.number)\n message = u'เรียน {}\\n\\nสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {} คือ {}' \\\n .format(current_user.personal_info.fullname, status.account.number,\n status.other_activity or status.activity.activity)\n message += u'\\n\\n======================================================'\n message += u'\\nอีเมลนี้ส่งโดยระบบอัตโนมัติ กรุณาอย่าตอบกลับ ' \\\n u'หากมีปัญหาใดๆเกี่ยวกับเว็บไซต์กรุณาติดต่อหน่วยข้อมูลและสารสนเทศ '\n message += u'\\nThis email was sent by an automated system. Please do not reply.' \\\n u' If you have any problem about website, please contact the IT unit.'\n send_mail([u'{}@mahidol.ac.th'.format(account.staff.email)], title, message)\n flash(u'อัพเดตข้อมูลเรียบร้อย', 'success')\n form.activity.data = \"\"\n form.other_activity.data = \"\"\n form.comment.data = \"\"\n # Check Error\n else:\n flash(form.errors, 'danger')\n\n activities = [a.to_list() for a in PurchaseTrackerStatus.query.filter_by(account_id=account_id)\n .order_by(PurchaseTrackerStatus.start_date)]\n if not activities:\n default_date = datetime.now().isoformat()\n else:\n default_date = activities[-1][3]\n return render_template('purchase_tracker/update_record.html',\n account_id=account_id, form=form, activities=activities, account=account,\n default_date=default_date)\n\n\n@purchase_tracker.route('/account//status//edit', methods=['GET', 'POST'])\n@finance_procurement_permission.require()\n@login_required\ndef edit_update_status(account_id, status_id):\n status = PurchaseTrackerStatus.query.get(status_id)\n form = StatusForm(obj=status)\n if request.method == 'POST':\n if form.validate_on_submit():\n form.populate_obj(status)\n status.account_id = account_id\n status.status_date = bangkok.localize(datetime.now())\n status.creation_date = bangkok.localize(datetime.now())\n status.cancel_datetime = bangkok.localize(datetime.now())\n status.update_datetime = bangkok.localize(datetime.now())\n status.staff = current_user\n db.session.add(status)\n db.session.commit()\n title = u'แจ้งเตือนการแก้ไขปรับเปลี่ยนสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {}'.format(\n status.account.number)\n message = u'เรียน {}\\n\\nสถานะการจัดซื้อพัสดุและครุภัณฑ์หมายเลข {} คือ {}' \\\n .format(current_user.personal_info.fullname, status.account.number,\n status.other_activity or status.activity.activity)\n message += u'\\n\\n======================================================'\n message += u'\\nอีเมลนี้ส่งโดยระบบอัตโนมัติ กรุณาอย่าตอบกลับ ' \\\n u'หากมีปัญหาใดๆเกี่ยวกับเว็บไซต์กรุณาติดต่อหน่วยข้อมูลและสารสนเทศ '\n message += u'\\nThis email was sent by an automated system. Please do not reply.' \\\n u' If you have any problem about website, please contact the IT unit.'\n send_mail([u'{}@mahidol.ac.th'.format(status.account.staff.email)], title, message)\n flash(u'แก้ไขข้อมูลเรียบร้อย', 'success')\n return redirect(url_for('purchase_tracker.update_status', status_id=status.id, account_id=account_id))\n return render_template('purchase_tracker/edit_update_record.html',\n account_id=account_id, form=form)\n\n\n@purchase_tracker.route('/account//status//delete')\n@finance_procurement_permission.require()\n@login_required\ndef delete_update_status(account_id, status_id):\n if account_id:\n status = PurchaseTrackerStatus.query.get(status_id)\n flash(u'The update status has been removed.')\n db.session.delete(status)\n db.session.commit()\n return redirect(url_for('purchase_tracker.update_status', account_id=account_id))\n\n\n@purchase_tracker.route('/create//activity', methods=['GET', 'POST'])\n@finance_procurement_permission.require()\n@login_required\ndef add_activity(account_id):\n activity = db.session.query(PurchaseTrackerActivity)\n form = CreateActivityForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n new_activity = PurchaseTrackerActivity()\n form.populate_obj(new_activity)\n db.session.add(new_activity)\n db.session.commit()\n flash(u'บันทึกการเพิ่มกิจกรรมใหม่สำเร็จ.', 'success')\n return redirect(url_for('purchase_tracker.update_status', account_id=account_id))\n # Check Error\n else:\n for er in form.errors:\n flash(er, 'danger')\n return render_template('purchase_tracker/create_activity.html', form=form, activity=activity, account_id=account_id)\n\n\n@purchase_tracker.route('/dashboard', methods=['GET', 'POST'])\ndef show_info_page():\n start_date = None\n end_date = None\n account_query = PurchaseTrackerAccount.query.all()\n form = ReportDateForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n start_date = datetime.strptime(form.start_date.data, '%d-%m-%Y')\n end_date = datetime.strptime(form.end_date.data, '%d-%m-%Y')\n account_query = PurchaseTrackerAccount.query.filter(\n cast(PurchaseTrackerAccount.booking_date, Date) >= start_date) \\\n .filter(cast(PurchaseTrackerAccount.booking_date, Date) <= end_date)\n else:\n flash(form.errors, 'danger')\n return render_template('purchase_tracker/info_page.html', account_query=account_query, form=form,\n start_date=start_date, end_date=end_date)\n\n\n@purchase_tracker.route('/dashboard/info/download', methods=['GET'])\ndef dashboard_info_download():\n records = []\n start_date = request.args.get('start_date')\n end_date = request.args.get('end_date')\n if start_date and end_date:\n accounts = PurchaseTrackerAccount.query.filter(cast(PurchaseTrackerAccount.booking_date, Date) >= start_date) \\\n .filter(cast(PurchaseTrackerAccount.booking_date, Date) <= end_date)\n else:\n accounts = PurchaseTrackerAccount.query.all()\n\n for account in accounts:\n for record in account.records:\n records.append({\n u'เลขที่หนังสือ': u\"{}\".format(account.number),\n u'วันที่หนังสือ': u\"{}\".format(account.booking_date),\n u'ชื่อ': u\"{}\".format(account.subject),\n u'วงเงินหลั��การ': u\"{:,.2f}\".format(account.amount),\n u'รูปแบบหลักการ': u\"{}\".format(account.formats),\n u'ผู้สร้าง account โดย': u\"{}\".format(account.staff.personal_info.fullname),\n u'หน่วยงาน/ภาควิชา': u\"{}\".format(account.staff.personal_info.org.name),\n u'กิจกรรม': u\"{}\".format(record.other_activity or record.activity.activity),\n u'ผู้รับผิดชอบ': u\"{}\".format(record.staff.personal_info.fullname),\n u'วันเริ่มกิจกรรม': u\"{}\".format(record.start_date),\n u'วันสิ้นสุดกิจกรรม': u\"{}\".format(record.end_date),\n u'หมายเหตุเพิ่มเติม': u\"{}\".format(record.comment),\n u'เวลาดำเนินกิจกรรม': u\"{}\".format(record.weekdays),\n })\n df = DataFrame(records)\n df.to_excel('account_summary.xlsx')\n return send_file(os.path.join(os.getcwd(), 'account_summary.xlsx'))\n\n\n# @purchase_tracker.route('/personnel/personnel_index/e-form/create//', methods=['GET', 'POST'])\n# @login_required\n# def create_form(account_id, form_code):\n# account = PurchaseTrackerAccount.query.get(account_id)\n# MTPCform = create_MTPCForm(acnt=account)\n# form = MTPCform()\n# if form.validate_on_submit():\n# new_form = PurchaseTrackerForm()\n# form.populate_obj(new_form)\n# new_form.staff = current_user\n# db.session.add(new_form)\n# db.session.commit()\n# flash(u'บันทึกข้อมูลสำเร็จ.', 'success')\n# form_letter(new_form, account)\n# return send_file('e-form.pdf')\n# # Check Error\n# else:\n# for er in form.errors:\n# flash(\"{}:{}\".format(er,form.errors[er]), 'danger')\n# return render_template('purchase_tracker/personnel/create_form_{}.html'.format(form_code), form=form, account=account)\n#\n#\n# sarabun_font = TTFont('Sarabun', 'app/static/fonts/THSarabunNew.ttf')\n# pdfmetrics.registerFont(sarabun_font)\n# style_sheet = getSampleStyleSheet()\n# style_sheet.add(ParagraphStyle(name='ThaiStyle', fontName='Sarabun'))\n# style_sheet.add(ParagraphStyle(name='ThaiStyleNumber', fontName='Sarabun', alignment=TA_RIGHT))\n# style_sheet.add(ParagraphStyle(name='ThaiStyleCenter', fontName='Sarabun', alignment=TA_CENTER))\n#\n#\n# def form_letter(form, account):\n# logo = Image('app/static/img/logo-MU.jpg', 60, 60)\n#\n# def all_page_setup(canvas, doc):\n# canvas.saveState()\n# logo_image = ImageReader('app/static/img/logo-MU.jpg')\n# canvas.drawImage(logo_image, 10, 700, width=70, height=70)\n# canvas.restoreState()\n#\n# doc = SimpleDocTemplate(\"app/e-form.pdf\",\n# pagesize=letter,\n# rightMargin=72,\n# leftMargin=72,\n# topMargin=72,\n# bottomMargin=18)\n#\n#\n# data = [ Paragraph(u'ภาควิชา / ศูนย์ {}'.format(account.staff.personal_info.org.name), style=style_sheet['ThaiStyle']),\n# Paragraph(u'ที่ {}'.format(form.account.number), style=style_sheet['ThaiStyle']),\n# Paragraph(u'วันที่ {}'.format(form.account.creation_date), style=style_sheet['ThaiStyle']),\n# Paragraph(u'เรื่อง {}'.format(form.account.subject), style=style_sheet['ThaiStyle']),\n# Paragraph(u'ข้าพเจ้า {}'.format(form.name), style=style_sheet['ThaiStyle']),\n# Paragraph(u'เหตุผลและความจำเป็นเร่งด่วนที่ต้องซื้อหรือจ้าง {}'.format(form.reason),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'รายละเอียดของพัสดุที่ซื้อหรือจ้าง {}'.format(form.account.desc),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'วงเงินที่ซื้อหรือจ้างในครั้งนี้เป็นเงินเท่าไหร่ {}'.format(form.account.amount),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'จาก {}'.format(form.account.amount),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'ตามใบส่งของ/ใบเสร็จรับเงินเล่มที่ {}'.format(form.book),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'เลขที่ {}'.format(form.number),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'วันที่ {}'.format(form.receipt_date),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'โดยขอเบิกจ่ายจากเงิน {}'.format(form.disbursement_method),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'ประจำปีงบประมาณ {}'.format(form.financial_year),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'วันที่ {}'.format(form.receipt_date),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'รหัสศูนย์ต้นทุน {}'.format(form.cost_center),\n# style=style_sheet['ThaiStyle']),\n# Paragraph(u'รหัสใบสั่งงานภายใน {}'.format(form.internal_order),\n# style=style_sheet['ThaiStyle']),\n# ]\n# data.append(Spacer(1, 12))\n#\n# doc.build(data, onLaterPages=all_page_setup, onFirstPage=all_page_setup)\n\n","repo_name":"MUMT-IT/mis2018","sub_path":"app/purchase_tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25624,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"41994451883","text":"import torch\nimport numpy as np\n\ndef get_uvcoords(H, W):\n i, j = np.meshgrid(np.linspace(0, W - 1, W), np.linspace(0, H - 1, H))\n dx = 1 / W\n dy = 1 / H\n for x in range(W):\n i[:, x] = dx / 2 + dx * x\n for y in range(H):\n j[y, :] = dy / 2 + dy * y\n\n j = np.flipud(j)\n uvs = np.stack((i, j), axis=2).reshape(H * W, 2)\n return uvs\n\ndef img2flat(img):\n \"\"\"\n [H,W,C] to [H*W,C] from row top down\n :param img:\n :return:\n \"\"\"\n rows=torch.chunk(img,len(img))\n flat=torch.cat(rows,dim=1)\n return flat.squeeze()\n\ndef flat2img(flat,H):\n \"\"\"\n [H*W,C] to [H,W,C] follow row top down rule\n :param flat:\n :return:\n \"\"\"\n rows=torch.chunk(flat,H)\n img=torch.stack(rows)\n return img\n\n\n# img=torch.tensor(np.arange(75).reshape((5,5,3)))\n# print(img)\n# print(flat2img(img2flat(img),len(img)))\n#\n# ten=torch.Tensor(np.arange(25*3).reshape(25,3))\n# # print(ten)\n# # rows=torch.chunk(ten,5,dim=0)\n# # img=torch.stack(rows)\n# # print(img.shape)\n# # img.transpose_(0,2) # hwc-cwh\n# # img.transpose_(1,2) # cwh-chw\n# # print(img.shape)\n# print(ten.data)\n\nten1=torch.tensor(np.arange(15).reshape(5,3))\nten2=torch.tensor(np.arange(15).reshape(5,3)+1)\nten3=torch.tensor(np.arange(15).reshape(5,3)+2)\nlis=[ten1,ten2,ten3]\nten4=torch.cat(lis,dim=0)\nprint(ten4)\n","repo_name":"mudimingquedeyinmoujia/python_learning","sub_path":"torch_test/chunk.py","file_name":"chunk.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"20448119537","text":"import pytest\nfrom pytest import approx\n\nfrom rules_engine import engine\n\n\n@pytest.mark.parametrize(\n \"avg_temp, balance_point, expected_result\",\n [\n (72, 60, 0), # outside hotter than balance point\n (60, 60, 0), # outside equal to balance point\n (57, 60, 3), # outside cooler than balance point\n ],\n)\ndef test_hdd(avg_temp, balance_point, expected_result):\n assert engine.hdd(avg_temp, balance_point) == expected_result\n\n\n@pytest.mark.parametrize(\n \"temps, expected_result\",\n [\n ([72, 60, 55, 61], 5), # one day with HDDs\n ([52, 60, 55], 13), # two days with HDDs\n ([72, 60, 65, 60, 80], 0), # no days with HDDs\n ],\n)\ndef test_period_hdd(temps, expected_result):\n assert engine.period_hdd(temps, 60) == expected_result\n\n\ndef test_average_indoor_temp():\n set_temp = 68\n setback = 62\n setback_hrs = 8\n\n # when there is no setback, just put 0 for the setback parameters\n assert engine.average_indoor_temp(set_temp, 0, 0) == set_temp\n assert engine.average_indoor_temp(set_temp, setback, setback_hrs) == 66\n\n\ndef test_bp_ua_estimates():\n home = engine.Home(\n engine.FuelType.GAS, heat_sys_efficiency=0.88, initial_balance_point=58\n )\n\n daily_temps_lists = [\n [28, 29, 30, 29],\n [32, 35, 35, 38],\n [41, 43, 42, 42],\n [72, 71, 70, 69],\n ]\n usages = [50, 45, 30, 0.96]\n inclusion_codes = [1, 1, 1, -1]\n home.initialize_billing_periods(daily_temps_lists, usages, inclusion_codes)\n home.calculate_avg_non_heating_usage()\n home.calculate_balance_point_and_ua()\n\n ua_1, ua_2, ua_3 = [bill.ua for bill in home.bills_winter]\n\n assert home.balance_point == 60\n assert ua_1 == approx(1450.5, abs=1)\n assert ua_2 == approx(1615.3, abs=1)\n assert ua_3 == approx(1479.6, abs=1)\n assert home.avg_ua == approx(1515.1, abs=1)\n assert home.stdev_pct == approx(0.0474, abs=0.01)\n\n\ndef test_bp_ua_with_outlier():\n home = engine.Home(\n engine.FuelType.GAS, heat_sys_efficiency=0.88, initial_balance_point=58\n )\n daily_temps_lists = [\n [41.7, 41.6, 32, 25.4],\n [28, 29, 30, 29],\n [32, 35, 35, 38],\n [41, 43, 42, 42],\n [72, 71, 70, 69],\n ]\n usages = [60, 50, 45, 30, 0.96]\n inclusion_codes = [1, 1, 1, 1, -1]\n home.initialize_billing_periods(daily_temps_lists, usages, inclusion_codes)\n home.calculate_avg_non_heating_usage()\n home.calculate_balance_point_and_ua()\n ua_1, ua_2, ua_3 = [bill.ua for bill in home.bills_winter]\n\n assert home.balance_point == 60\n assert ua_1 == approx(1450.5, abs=1)\n assert ua_2 == approx(1615.3, abs=1)\n assert ua_3 == approx(1479.6, abs=1)\n assert home.avg_ua == approx(1515.1, abs=1)\n assert home.stdev_pct == approx(0.0474, abs=0.01)\n","repo_name":"codeforboston/home-energy-analysis-tool","sub_path":"rules-engine/tests/test_rules_engine/test_engine.py","file_name":"test_engine.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"9907747205","text":"from django.test import TestCase, Client\n\nfrom account.models import User\nfrom care_point.forms import IllnessForm\nfrom care_point.models import Illness, Manager, Point_of_care\n\n\nclass IllnessTest(TestCase):\n def setUp(self):\n self.client = Client()\n\n Illness.objects.create(name='illness_1', description='desc_1').save()\n Illness.objects.create(name='illness_2', description='desc_2').save()\n Illness.objects.create(name='illness_3', description='desc_3').save()\n\n self.user = User.objects.create(username='m1', first_name='name_m1', last_name='sure_name_m1', is_manager=True,\n is_caregiver=False, password='123456Mp')\n self.user.save()\n point_of_care = Point_of_care.objects.create(city='Bstok')\n point_of_care.save()\n manager = Manager.objects.create(user=self.user, point_of_care=point_of_care)\n manager.save()\n\n def test_redirect_if_not_logged_in(self):\n response = self.client.get('/care_point/illness/')\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, '/account/login/?next=/care_point/illness/')\n\n def test_should_return_all_objects_from_DB(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['illness']), 3)\n\n def test_should_return_details_of_one_object(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/1/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['illness'].name, 'illness_1')\n\n def test_should_return_status_code_404_if_object_not_exist(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/5/')\n illnesses = Illness.objects.all()\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(len(illnesses), 3)\n\n def test_should_delete_one_object(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/1/delete/')\n illnesses = Illness.objects.all()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(len(illnesses), 2)\n\n def test_should_return_illness_form(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/add/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response.context['form']), IllnessForm)\n\n def test_should_add_one_object(self):\n self.client.force_login(user=self.user)\n response = self.client.post('/care_point/illness/add/', {'name': 'illness_4', 'description': 'desc4'})\n illnesses = Illness.objects.all()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(len(illnesses), 4)\n\n def test_should_return_illness_update_form(self):\n self.client.force_login(user=self.user)\n response = self.client.get('/care_point/illness/2/update/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(type(response.context['form']), IllnessForm)\n\n def test_should_update_object(self):\n self.client.force_login(user=self.user)\n response = self.client.post('/care_point/illness/2/update/', {'name': 'illness_update', 'description': 'desc_update'})\n updated_illness = Illness.objects.filter(pk=2).first()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, \"/care_point/illness/\")\n self.assertEqual(updated_illness.name, 'illness_update')\n self.assertEqual(updated_illness.description, 'desc_update')\n\n def test_should_not_update_object_when_parameter_is_wrong(self):\n self.client.force_login(user=self.user)\n response = self.client.post('/care_point/illness/2/update/', {'call': 'illness_update', 'description': 'desc_update'})\n updated_illness = Illness.objects.filter(pk=2).first()\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual(updated_illness.name, 'illness_2')\n self.assertEqual(updated_illness.description, 'desc_2')\n","repo_name":"mario-pe/CarePoint","sub_path":"care_point/tests/test_illness.py","file_name":"test_illness.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"73201348300","text":"n, t, c, p = map(int, input().split())\nresult = 0\narr = [0] * c\nfor i in range(n):\n for j in range(c):\n if arr[j] == t:\n arr[j] = 0\n result += p\n arr[j] += 1\nprint(result)\n","repo_name":"bestswlkh0310/baekjoon-python","sub_path":"asd.py","file_name":"asd.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43634588065","text":"import sys\nfrom typing import List\n\nimport testing.test_data\nfrom roland import DataBlock, RolandData, GenericRoland, GenericRolandWithBackwardCompatibility\nfrom Roland_JV1080 import jv_1080\nfrom Roland_JV80 import jv_80\n\nthis_module = sys.modules[__name__]\n\n# XV-3080 and XV-5080. But the XV-5080 has these Patch Split Key messages as well!? We can ignore them?\n_xv3080_patch_data = [DataBlock((0x00, 0x00, 0x00, 0x00), 0x4f, \"Patch common\"),\n DataBlock((0x00, 0x00, 0x02, 0x00), (0x01, 0x11), \"Patch common MFX\"),\n DataBlock((0x00, 0x00, 0x04, 0x00), 0x34, \"Patch common Chorus\"),\n DataBlock((0x00, 0x00, 0x06, 0x00), 0x53, \"Patch common Reverb\"),\n DataBlock((0x00, 0x00, 0x10, 0x00), 0x29, \"Patch common Tone Mix Table\"),\n DataBlock((0x00, 0x00, 0x20, 0x00), (0x01, 0x09), \"Tone 1\"),\n DataBlock((0x00, 0x00, 0x22, 0x00), (0x01, 0x09), \"Tone 2\"),\n DataBlock((0x00, 0x00, 0x24, 0x00), (0x01, 0x09), \"Tone 3\"),\n DataBlock((0x00, 0x00, 0x26, 0x00), (0x01, 0x09), \"Tone 4\")]\n_xv3080_edit_buffer_addresses = RolandData(\"XV-3080 Temporary Patch\", 1, 4, 4,\n (0x1f, 0x00, 0x00, 0x00),\n _xv3080_patch_data)\n_xv3080_program_buffer_addresses = RolandData(\"XV-3080 User Patches\", 128, 4, 4,\n (0x30, 0x00, 0x00, 0x00),\n _xv3080_patch_data)\n# This can be used as an alternative way to detect the XV-3080\n# _xv3080_system_common = RolandData(\"XV-3080 System Common\", 1, 4, 4, (0x00, 0x00, 0x00, 0x00),\n# [DataBlock((0x00, 0x00, 0x00, 0x00), 0x28, \"System common\")])\nxv_3080_main = GenericRoland(\"Roland XV-3080\",\n model_id=[0x00, 0x10],\n address_size=4,\n edit_buffer=_xv3080_edit_buffer_addresses,\n program_dump=_xv3080_program_buffer_addresses,\n category_index=0x0c,\n device_family=[0x10, 0x01]) # Interestingly, the XV-3080 seems the first model to support the generic device inquiry\nxv_3080 = GenericRolandWithBackwardCompatibility(xv_3080_main, [jv_80, jv_1080])\nxv_3080.install(this_module)\n\n\n# and XV-5080 and XV-5050?\n\n\ndef setupHelp():\n return \"Make sure the Receive Exclusive parameter (SYSTEM/COMMON) is ON, and the synth is set to Patch Mode\"\n\n\n# Test data picked up by test_adaptation.py\ndef make_test_data():\n def programs(data: testing.TestData) -> List[testing.ProgramTestData]:\n patch = []\n names = [\"RedPowerBass\", \"Sinus QSB\", \"Super W Bass\"]\n i = 0\n # Extract the first 3 programs from the sysex dump loaded, and yield them with name and number to the test code\n for message in data.all_messages:\n if xv_3080.isPartOfSingleProgramDump(message):\n patch.extend(message)\n if xv_3080.isSingleProgramDump(patch):\n yield testing.ProgramTestData(message=patch, name=names[i], number=i)\n patch = []\n i += 1\n if i >= len(names):\n break\n\n return testing.TestData(sysex=\"testData/jv1080_AGSOUND1.SYX\",\n program_generator=programs,\n program_dump_request=\"f0 41 10 00 10 11 30 00 00 00 00 00 00 4f 01 f7\",\n device_detect_call=\"f0 7e 00 06 01 f7\",\n device_detect_reply=(\"f0 7e 10 06 02 41 10 01 00 00 00 00 00 00 f7\", 0))\n","repo_name":"christofmuc/KnobKraft-orm","sub_path":"adaptations/Roland_XV3080.py","file_name":"Roland_XV3080.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"46"} +{"seq_id":"29177164092","text":"def minDeletionSize(s: list[str]) -> int:\n deleted = 0\n i = 0\n while i < len(s[0]):\n column = [word[i] for word in s]\n if column != sorted(column): deleted += 1\n i += 1\n return deleted\n\nminDeletionSize([\"abc\", \"bce\", \"cae\"])","repo_name":"LunaTMT/LeetCode","sub_path":"Delete_Columns_to_Make_Sorted.py","file_name":"Delete_Columns_to_Make_Sorted.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11519581908","text":"import json\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List\n\nimport github_action_utils as gha_utils\nimport requests\nimport yaml\nfrom dataclass_wizard import YAMLWizard\n\n\n@dataclass\nclass Specification:\n \"\"\"Represents an OpenAPI specification.\"\"\"\n id: str\n label: str\n url: str\n\n\n@dataclass\nclass Organization:\n \"\"\"Represents an organization that provides OpenAPI specifications.\"\"\"\n id: str\n label: str\n specifications: List[Specification]\n\n\n@dataclass\nclass SpecificationsFile(YAMLWizard):\n \"\"\"Represents a file containing a list of organizations with their OpenAPI specifications.\"\"\"\n organizations: List[Organization]\n\n\ndef download_specification(url: str) -> str:\n \"\"\"Downloads an OpenAPI specification from a given URL and returns its content.\"\"\"\n try:\n response = requests.get(url)\n response.raise_for_status()\n return response.text\n except requests.exceptions.RequestException as e:\n raise ValueError(f\"Failed to download specification from {url}: {e}\")\n\n\ndef parse_specification(spec: str) -> str:\n \"\"\"Parses a given OpenAPI specification content and returns it in JSON format.\"\"\"\n try:\n parsed_spec = yaml.safe_load(spec)\n return json.dumps(parsed_spec, indent=4)\n except yaml.YAMLError as e:\n raise ValueError(f\"Failed to parse specification: {e}\")\n\n\ndef save_specification(spec: str, filepath: Path) -> None:\n \"\"\"Saves a given OpenAPI specification content to a file at the given path.\"\"\"\n try:\n filepath.parent.mkdir(parents=True, exist_ok=True)\n filepath.write_text(spec)\n except OSError as e:\n raise ValueError(f\"Failed to save specification to {filepath}: {e}\")\n\n\ndef download_format_and_save_specs(organizations: List[Organization]) -> None:\n \"\"\"\n Downloads, formats and saves OpenAPI specifications for the given organizations.\n\n For each organization and its specifications, the function downloads the content\n of each specification, parses it and saves it in JSON format to a file with a name\n that includes the organization and specification IDs. If any errors occur during this\n process, a warning is logged.\n \"\"\"\n for organization in organizations:\n for spec in organization.specifications:\n try:\n spec_content = download_specification(spec.url)\n validated_spec = parse_specification(spec_content)\n\n path = Path('../../') / 'openapi' / f'{organization.id}_{spec.id}.json'\n save_specification(validated_spec, path)\n\n gha_utils.debug(f\"Saved specification for {organization.label}: {spec.label} ({path})\")\n except (ValueError, requests.exceptions.RequestException, OSError) as exp:\n with gha_utils.group(\"Warnings while downloading, formatting and saving OpenApi specs\"):\n gha_utils.warning(str(exp), title=\"Warning\")\n\n\nif __name__ == \"__main__\":\n # Load the OpenAPI specifications file and extract the list of organizations.\n specifications_yaml_file = '../../specifications.json'\n parsed_specification_file = SpecificationsFile.from_yaml_file(specifications_yaml_file)\n\n # Download, format and save the OpenAPI specifications for the organizations.\n download_format_and_save_specs(parsed_specification_file.organizations)\n","repo_name":"ivpk/api-specifications","sub_path":"scripts/openapi/openapi.py","file_name":"openapi.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"25728820874","text":"class AST:\r\n def __init__(self, data, parent):\r\n self.keywords = ['void', 'int', 'double', 'bool', 'string', 'null', 'for', 'while', 'if', 'else', 'return',\r\n 'break', 'Print', 'ReadInteger', 'ReadLine']\r\n self.controls = ['for', 'while', 'if', 'else', 'return', 'break', 'Print', 'ReadInteger', 'ReadLine']\r\n self.types = ['void', 'int', 'double', 'bool', 'string', 'null']\r\n self.operators = ['+', '-', '*', '/', '%', '<', '<=', '>', '>=', '=', '==', '!=', '&&', '||', '!', ';']\r\n self.puncuation = [',', '.', '(', ')', '{', '}']\r\n self.comments = ['//', '/*', '*/']\r\n self.parent = parent\r\n self.left = None\r\n self.right = None\r\n self.data = data\r\n self.syntax_type = None\r\n self.type = None\r\n self.identifier = None\r\n self.visited = False\r\n\r\n def setChild(self, direction, AST):\r\n if direction == 'left':\r\n self.left = AST\r\n else:\r\n self.right = AST\r\n\r\n def setSyntaxType(self, syntaxType):\r\n self.syntax_type = syntaxType\r\n for i in self.types:\r\n if i in self.data and '{' in self.data:\r\n self.syntax_type = 'FnDecl:'\r\n break\r\n elif i in self.data and ';' in self.data:\r\n self.syntax_type = 'VarDecl:'\r\n\r\n def setType(self):\r\n pass\r\n\r\n def getParent(self):\r\n return self.parent\r\n\r\n def getChild(self, direction):\r\n if direction == 'left':\r\n return self.left\r\n else:\r\n return self.right\r\n\r\n def getData(self):\r\n return self.data\r\n","repo_name":"elacevedo/comp","sub_path":"pp2/decaf_ast.py","file_name":"decaf_ast.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28843383510","text":"\"\"\"\n@file\n@brief Helpers to convert docstring to various format.\n\"\"\"\nimport os\nimport sys\nfrom collections import deque\nimport warnings\nimport pickle\nimport platform\nfrom html import escape as htmlescape\nfrom io import StringIO\nfrom docutils.parsers.rst import roles\nfrom docutils.languages import en as docutils_en\nfrom docutils import nodes\nfrom docutils.utils import Reporter\nfrom sphinx.application import Sphinx\nfrom sphinx.environment import BuildEnvironment\nfrom sphinx.errors import ExtensionError\nfrom sphinx.ext.extlinks import setup_link_roles\nfrom sphinx.transforms import SphinxTransformer\nfrom sphinx.writers.html import HTMLWriter\nfrom sphinx.util.build_phase import BuildPhase\nfrom sphinx.util.logging import prefixed_warnings\nfrom sphinx.project import Project\nfrom sphinx.errors import ApplicationError\nfrom sphinx.util.logging import getLogger\nfrom ..sphinxext.sphinx_doctree_builder import (\n DocTreeBuilder, DocTreeWriter, DocTreeTranslator)\nfrom ..sphinxext.sphinx_md_builder import MdBuilder, MdWriter, MdTranslator\nfrom ..sphinxext.sphinx_latex_builder import (\n EnhancedLaTeXBuilder, EnhancedLaTeXWriter, EnhancedLaTeXTranslator)\nfrom ..sphinxext.sphinx_rst_builder import RstBuilder, RstWriter, RstTranslator\nfrom ._single_file_html_builder import CustomSingleFileHTMLBuilder\n\n\ndef _get_LaTeXTranslator():\n try:\n from sphinx.writers.latex import LaTeXTranslator\n except ImportError: # pragma: no cover\n # Since sphinx 1.7.3 (circular reference).\n import sphinx.builders.latex.transforms\n from sphinx.writers.latex import LaTeXTranslator\n return LaTeXTranslator\n\n\ntry:\n from sphinx.util.docutils import is_html5_writer_available\nexcept ImportError:\n def is_html5_writer_available():\n return True\n\nif is_html5_writer_available():\n from sphinx.writers.html5 import HTML5Translator as HTMLTranslator\nelse:\n from sphinx.writers.html import HTMLTranslator # pragma: no cover\n\n\ndef update_docutils_languages(values=None):\n \"\"\"\n Updates ``docutils/languages/en.py`` with missing labels.\n It Does it for languages *en*.\n\n @param values consider values in this dictionaries first\n \"\"\"\n if values is None:\n values = dict()\n lab = docutils_en.labels\n if 'versionmodified' not in lab:\n lab['versionmodified'] = values.get(\n 'versionmodified', 'modified version')\n if 'desc' not in lab:\n lab['desc'] = values.get('desc', 'description')\n\n\nclass _AdditionalVisitDepart:\n \"\"\"\n Additional visitors and departors.\n \"\"\"\n\n def __init__(self, output_format):\n self.output_format = output_format\n\n def is_html(self):\n \"\"\"\n Tells if the translator is :epkg:`html` format.\n \"\"\"\n return self.base_class is HTMLTranslator\n\n def is_rst(self):\n \"\"\"\n Tells if the translator is :epkg:`rst` format.\n \"\"\"\n return self.base_class is RstTranslator\n\n def is_latex(self):\n \"\"\"\n Tells if the translator is :epkg:`latex` format.\n \"\"\"\n return self.base_class is _get_LaTeXTranslator()\n\n def is_md(self):\n \"\"\"\n Tells if the translator is :epkg:`markdown` format.\n \"\"\"\n return self.base_class is _get_LaTeXTranslator()\n\n def is_doctree(self):\n \"\"\"\n Tells if the translator is doctree format.\n \"\"\"\n return self.base_class is _get_LaTeXTranslator()\n\n def add_secnumber(self, node):\n \"\"\"\n Overwrites this method to catch errors due when\n it is a single document being processed.\n \"\"\"\n if node.get('secnumber'):\n self.base_class.add_secnumber(self, node)\n elif len(node.parent['ids']) > 0:\n self.base_class.add_secnumber(self, node)\n else:\n n = len(self.builder.secnumbers)\n node.parent['ids'].append(\"custom_label_%d\" % n)\n self.base_class.add_secnumber(self, node)\n\n def eval_expr(self, expr):\n rst = self.output_format == 'rst'\n latex = self.output_format in ('latex', 'elatex')\n texinfo = [('index', 'A_AdditionalVisitDepart', 'B_AdditionalVisitDepart', # pylint: disable=W0612\n 'C_AdditionalVisitDepart', 'D_AdditionalVisitDepart',\n 'E_AdditionalVisitDepart', 'Miscellaneous')]\n html = self.output_format == 'html'\n md = self.output_format == 'md'\n doctree = self.output_format in ('doctree', 'doctree.txt')\n if not (rst or html or latex or md or doctree):\n raise ValueError( # pragma: no cover\n f\"Unknown output format '{self.output_format}'.\")\n try:\n ev = eval(expr)\n except Exception: # pragma: no cover\n raise ValueError(\n f\"Unable to interpret expression '{expr}'\")\n return ev\n\n def visit_only(self, node):\n ev = self.eval_expr(node.attributes['expr'])\n if ev:\n pass\n else:\n raise nodes.SkipNode\n\n def depart_only(self, node):\n ev = self.eval_expr(node.attributes['expr'])\n if ev:\n pass\n else:\n # The program should not necessarily be here.\n pass\n\n def visit_viewcode_anchor(self, node):\n # Removed in sphinx 3.5\n pass\n\n def depart_viewcode_anchor(self, node):\n # Removed in sphinx 3.5\n pass\n\n def unknown_visit(self, node): # pragma: no cover\n raise NotImplementedError(\n \"[_AdditionalVisitDepart] Unknown node: '{0}' in '{1}'\".format(\n node.__class__.__name__, self.__class__.__name__))\n\n\nclass HTMLTranslatorWithCustomDirectives(_AdditionalVisitDepart, HTMLTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n HTMLTranslator.__init__(self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'html')\n nodes_list = getattr(builder, '_function_node', None)\n if nodes_list is not None:\n for name, f1, f2 in nodes_list:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = HTMLTranslator\n\n def visit_field(self, node):\n if not hasattr(self, '_fieldlist_row_index'):\n # needed when a docstring starts with :param:\n self._fieldlist_row_index = 0\n return HTMLTranslator.visit_field(self, node)\n\n def visit_pending_xref(self, node):\n self.visit_Text(node)\n raise nodes.SkipNode\n\n def unknown_visit(self, node): # pragma: no cover\n raise NotImplementedError(\"[HTMLTranslatorWithCustomDirectives] Unknown node: '{0}' in '{1}'\".format(\n node.__class__.__name__, self.__class__.__name__))\n\n\nclass RSTTranslatorWithCustomDirectives(_AdditionalVisitDepart, RstTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n RstTranslator.__init__(self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'rst')\n for name, f1, f2 in builder._function_node:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = RstTranslator\n\n\nclass MDTranslatorWithCustomDirectives(_AdditionalVisitDepart, MdTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n MdTranslator.__init__(self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'md')\n for name, f1, f2 in builder._function_node:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = MdTranslator\n\n\nclass DocTreeTranslatorWithCustomDirectives(DocTreeTranslator):\n \"\"\"\n See @see cl HTMLWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n DocTreeTranslator.__init__(self, document, builder, *args, **kwds)\n self.base_class = DocTreeTranslator\n\n\nclass LatexTranslatorWithCustomDirectives(_AdditionalVisitDepart, EnhancedLaTeXTranslator):\n \"\"\"\n See @see cl LatexWriterWithCustomDirectives.\n \"\"\"\n\n def __init__(self, document, builder, *args, **kwds):\n \"\"\"\n constructor\n \"\"\"\n if not hasattr(builder, \"config\"):\n builder, document = document, builder\n if not hasattr(builder, \"config\"):\n raise TypeError( # pragma: no cover\n f\"Builder has no config: {type(builder)} - {type(document)}\")\n EnhancedLaTeXTranslator.__init__(\n self, document, builder, *args, **kwds)\n _AdditionalVisitDepart.__init__(self, 'md')\n for name, f1, f2 in builder._function_node:\n setattr(self.__class__, \"visit_\" + name, f1)\n setattr(self.__class__, \"depart_\" + name, f2)\n self.base_class = EnhancedLaTeXTranslator\n\n\nclass _WriterWithCustomDirectives:\n \"\"\"\n Common class to @see cl HTMLWriterWithCustomDirectives and @see cl RSTWriterWithCustomDirectives.\n \"\"\"\n\n def _init(self, base_class, translator_class, app=None):\n \"\"\"\n @param base_class base class\n @param app Sphinx application\n \"\"\"\n if app is None:\n self.app = _CustomSphinx(srcdir=None, confdir=None, outdir=None, doctreedir=None,\n buildername='memoryhtml')\n else:\n self.app = app\n builder = self.app.builder\n builder.fignumbers = {}\n base_class.__init__(self, builder)\n self.translator_class = translator_class\n self.builder.secnumbers = {}\n self.builder._function_node = []\n self.builder.current_docname = None\n self.base_class = base_class\n\n def connect_directive_node(self, name, f_visit, f_depart):\n \"\"\"\n Adds custom node to the translator.\n\n @param name name of the directive\n @param f_visit visit function\n @param f_depart depart function\n \"\"\"\n if self.builder.format != \"doctree\":\n self.builder._function_node.append((name, f_visit, f_depart))\n\n def add_configuration_options(self, new_options):\n \"\"\"\n Add new options.\n\n @param new_options new options\n \"\"\"\n for k, v in new_options.items():\n self.builder.config.values[k] = v\n\n def write(self, document, destination):\n \"\"\"\n Processes a document into its final form.\n Translates `document` (a Docutils document tree) into the Writer's\n native format, and write it out to its `destination` (a\n `docutils.io.Output` subclass object).\n\n Normally not overridden or extended in subclasses.\n \"\"\"\n self.base_class.write(self, document, destination)\n\n\nclass HTMLWriterWithCustomDirectives(_WriterWithCustomDirectives, HTMLWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the HTML writer with\n custom directives implemented in this module,\n @see cl RunPythonDirective, @see cl BlogPostDirective.\n\n See `Write your own ReStructuredText-Writer `_.\n\n This class needs to tell :epkg:`docutils` to call the added function\n when directives *runpython* or *blogpost* are met.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, HTMLWriter, HTMLTranslatorWithCustomDirectives, app)\n\n def translate(self):\n self.visitor = visitor = self.translator_class(\n self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.astext()\n for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',\n 'body_pre_docinfo', 'docinfo', 'body', 'fragment',\n 'body_suffix', 'meta', 'title', 'subtitle', 'header',\n 'footer', 'html_prolog', 'html_head', 'html_title',\n 'html_subtitle', 'html_body', ):\n setattr(self, attr, getattr(visitor, attr, None))\n self.clean_meta = ''.join(visitor.meta[2:])\n\n\nclass RSTWriterWithCustomDirectives(_WriterWithCustomDirectives, RstWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the :epkg:`RST` writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, RstWriter, RSTTranslatorWithCustomDirectives, app)\n\n def translate(self):\n visitor = self.translator_class(self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass MDWriterWithCustomDirectives(_WriterWithCustomDirectives, MdWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the :epkg:`MD` writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, MdWriter, MDTranslatorWithCustomDirectives, app)\n\n def translate(self):\n visitor = self.translator_class(self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass DocTreeWriterWithCustomDirectives(_WriterWithCustomDirectives, DocTreeWriter):\n \"\"\"\n This :epkg:`docutils` writer creates a doctree writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, DocTreeWriter, DocTreeTranslatorWithCustomDirectives, app)\n\n def translate(self):\n visitor = self.translator_class(self.document, self.builder)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass LatexWriterWithCustomDirectives(_WriterWithCustomDirectives, EnhancedLaTeXWriter):\n \"\"\"\n This :epkg:`docutils` writer extends the :epkg:`Latex` writer with\n custom directives implemented in this module.\n \"\"\"\n\n def __init__(self, builder=None, app=None): # pylint: disable=W0231\n \"\"\"\n @param builder builder\n @param app Sphinx application\n \"\"\"\n _WriterWithCustomDirectives._init(\n self, EnhancedLaTeXWriter, LatexTranslatorWithCustomDirectives, app)\n if not hasattr(self.builder, \"config\"):\n raise TypeError( # pragma: no cover\n f\"Builder has no config: {type(self.builder)}\")\n\n def translate(self):\n if not hasattr(self.builder, \"config\"):\n raise TypeError( # pragma: no cover\n f\"Builder has no config: {type(self.builder)}\")\n # The instruction\n # visitor = self.builder.create_translator(self.document, self.builder)\n # automatically adds methods visit_ and depart_ for translator\n # based on the list of registered extensions. Might be worth using it.\n theme = self.builder.themes.get('manual')\n if theme is None:\n raise RuntimeError( # pragma: no cover\n \"theme cannot be None.\")\n visitor = self.translator_class(\n self.document, self.builder, theme=theme)\n self.document.walkabout(visitor)\n self.output = visitor.body\n\n\nclass _MemoryBuilder:\n \"\"\"\n Builds :epkg:`HTML` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n\n def _init(self, base_class, app, env=None):\n \"\"\"\n Constructs the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param base_class: base builder class\n :param app: :epkg:`Sphinx application`\n :param env: Environment\n \"\"\"\n if \"IMPOSSIBLE:TOFIND\" in app.srcdir:\n import sphinx.util.osutil\n from .conf_path_tools import custom_ensuredir\n sphinx.util.osutil.ensuredir = custom_ensuredir\n sphinx.builders.ensuredir = custom_ensuredir\n\n try:\n base_class.__init__(self, app=app, env=env)\n except TypeError:\n # older version of sphinx\n base_class.__init__(self, app=app)\n self.built_pages = {}\n self.base_class = base_class\n\n def iter_pages(self):\n \"\"\"\n Enumerate created pages.\n\n @return iterator on tuple(name, content)\n \"\"\"\n for k, v in self.built_pages.items():\n yield k, v.getvalue()\n\n def create_translator(self, *args):\n \"\"\"\n Returns an instance of translator.\n This method returns an instance of ``default_translator_class`` by default.\n Users can replace the translator class with ``app.set_translator()`` API.\n \"\"\"\n translator_class = self.translator_class\n return translator_class(*args)\n\n def _write_serial(self, docnames):\n \"\"\"\n Overwrites *_write_serial* to avoid writing on disk.\n \"\"\"\n from sphinx.util.logging import pending_warnings\n try:\n from sphinx.util.display import status_iterator\n except ImportError:\n from sphinx.util import status_iterator\n with pending_warnings():\n for docname in status_iterator(docnames, 'writing output... ', \"darkgreen\",\n len(docnames), self.app.verbosity):\n doctree = self.env.get_and_resolve_doctree(docname, self)\n self.write_doc_serialized(docname, doctree)\n self.write_doc(docname, doctree)\n\n def _write_parallel(self, docnames, nproc):\n \"\"\"\n Not supported.\n \"\"\"\n raise NotImplementedError(\n \"Use parallel=0 when creating the sphinx application.\")\n\n def assemble_doctree(self, *args, **kwargs):\n \"\"\"\n Overwrites *assemble_doctree* to control the doctree.\n \"\"\"\n from sphinx.util.nodes import inline_all_toctrees\n from sphinx.util.console import darkgreen\n master = self.config.master_doc\n if hasattr(self, \"doctree_\"):\n tree = self.doctree_\n else:\n raise AttributeError( # pragma: no cover\n \"Attribute 'doctree_' is not present. Call method finalize().\")\n tree = inline_all_toctrees(\n self, set(), master, tree, darkgreen, [master])\n tree['docname'] = master\n self.env.resolve_references(tree, master, self)\n self.fix_refuris(tree)\n return tree\n\n def fix_refuris(self, tree):\n \"\"\"\n Overwrites *fix_refuris* to control the reference names.\n \"\"\"\n fname = \"__\" + self.config.master_doc + \"__\"\n for refnode in tree.traverse(nodes.reference):\n if 'refuri' not in refnode:\n continue\n refuri = refnode['refuri']\n hashindex = refuri.find('#')\n if hashindex < 0:\n continue\n hashindex = refuri.find('#', hashindex + 1)\n if hashindex >= 0:\n refnode['refuri'] = fname + refuri[hashindex:]\n\n def get_target_uri(self, docname, typ=None):\n \"\"\"\n Overwrites *get_target_uri* to control the page name.\n \"\"\"\n if docname in self.env.all_docs:\n # all references are on the same page...\n return self.config.master_doc + '#document-' + docname\n elif docname in (\"genindex\", \"search\"):\n return self.config.master_doc + '-#' + docname\n else:\n docs = \", \".join( # pragma: no cover\n sorted(f\"'{_}'\" for _ in self.env.all_docs))\n raise ValueError( # pragma: no cover\n f\"docname='{docname}' should be in 'self.env.all_docs' which contains:\\n{docs}\")\n\n def get_outfilename(self, pagename):\n \"\"\"\n Overwrites *get_target_uri* to control file names.\n \"\"\"\n return f\"{self.outdir}/{pagename}.m.html\".replace(\"\\\\\", \"/\")\n\n def handle_page(self, pagename, addctx, templatename='page.html',\n outfilename=None, event_arg=None):\n \"\"\"\n Overrides *handle_page* to write into stream instead of files.\n \"\"\"\n from sphinx.util.osutil import relative_uri\n ctx = self.globalcontext.copy()\n if hasattr(self, \"warning\"):\n ctx['warn'] = self.warning\n elif hasattr(self, \"warn\"):\n ctx['warn'] = self.warn\n # current_page_name is backwards compatibility\n ctx['pagename'] = ctx['current_page_name'] = pagename\n ctx['encoding'] = self.config.html_output_encoding\n default_baseuri = self.get_target_uri(pagename)\n # in the singlehtml builder, default_baseuri still contains an #anchor\n # part, which relative_uri doesn't really like...\n default_baseuri = default_baseuri.rsplit('#', 1)[0]\n\n def pathto(otheruri, resource=False, baseuri=default_baseuri):\n if resource and '://' in otheruri:\n # allow non-local resources given by scheme\n return otheruri\n elif not resource:\n otheruri = self.get_target_uri(otheruri)\n uri = relative_uri(baseuri, otheruri) or '#'\n if uri == '#' and not self.allow_sharp_as_current_path:\n uri = baseuri\n return uri\n ctx['pathto'] = pathto\n\n def css_tag(css):\n attrs = []\n for key in sorted(css.attributes):\n value = css.attributes[key]\n if value is not None:\n attrs.append('%s=\"%s\"' % (key, htmlescape( # pylint: disable=W1505\n value, True))) # pylint: disable=W1505\n attrs.append(f'href=\"{pathto(css.filename, resource=True)}\"')\n return f\"\"\n ctx['css_tag'] = css_tag\n\n def hasdoc(name):\n if name in self.env.all_docs:\n return True\n elif name == 'search' and self.search:\n return True\n elif name == 'genindex' and self.get_builder_config('use_index', 'html'):\n return True\n return False\n ctx['hasdoc'] = hasdoc\n\n ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)\n self.add_sidebars(pagename, ctx)\n ctx.update(addctx)\n\n self.update_page_context(pagename, templatename, ctx, event_arg)\n newtmpl = self.app.emit_firstresult('html-page-context', pagename,\n templatename, ctx, event_arg)\n if newtmpl:\n templatename = newtmpl\n\n try:\n output = self.templates.render(templatename, ctx)\n except UnicodeError: # pragma: no cover\n logger = getLogger(\"MockSphinxApp\")\n logger.warning(\"[_CustomSphinx] A unicode error occurred when rendering the page %s. \"\n \"Please make sure all config values that contain \"\n \"non-ASCII content are Unicode strings.\", pagename)\n return\n\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n # outfilename's path is in general different from self.outdir\n # ensuredir(path.dirname(outfilename))\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(output)\n\n\nclass MemoryHTMLBuilder(_MemoryBuilder, CustomSingleFileHTMLBuilder):\n \"\"\"\n Builds :epkg:`HTML` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memoryhtml'\n format = 'html'\n out_suffix = None # \".memory.html\"\n supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']\n default_translator_class = HTMLTranslatorWithCustomDirectives\n translator_class = HTMLTranslatorWithCustomDirectives\n _writer_class = HTMLWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Construct the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, CustomSingleFileHTMLBuilder, app, env=env)\n\n\nclass MemoryRSTBuilder(_MemoryBuilder, RstBuilder):\n\n \"\"\"\n Builds :epkg:`RST` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n The writer simplifies the :epkg:`RST` syntax by replacing\n custom roles into true :epkg:`RST` syntax.\n \"\"\"\n\n name = 'memoryrst'\n format = 'rst'\n out_suffix = None # \".memory.rst\"\n supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']\n default_translator_class = RSTTranslatorWithCustomDirectives\n translator_class = RSTTranslatorWithCustomDirectives\n _writer_class = RSTWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Construct the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, RstBuilder, app, env=env)\n\n def handle_page(self, pagename, addctx, templatename=None,\n outfilename=None, event_arg=None):\n \"\"\"\n Override *handle_page* to write into stream instead of files.\n \"\"\"\n if templatename is not None:\n raise NotImplementedError(\n \"templatename must be None.\") # pragma: no cover\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(self.writer.output)\n\n\nclass MemoryMDBuilder(_MemoryBuilder, MdBuilder):\n \"\"\"\n Builds :epkg:`MD` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memorymd'\n format = 'md'\n out_suffix = None # \".memory.rst\"\n supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']\n default_translator_class = MDTranslatorWithCustomDirectives\n translator_class = MDTranslatorWithCustomDirectives\n _writer_class = MDWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Construct the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, MdBuilder, app, env=env)\n\n def handle_page(self, pagename, addctx, templatename=None,\n outfilename=None, event_arg=None):\n \"\"\"\n Override *handle_page* to write into stream instead of files.\n \"\"\"\n if templatename is not None:\n raise NotImplementedError(\n \"templatename must be None.\") # pragma: no cover\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(self.writer.output)\n\n\nclass MemoryDocTreeBuilder(_MemoryBuilder, DocTreeBuilder):\n \"\"\"\n Builds doctree output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memorydoctree'\n format = 'doctree'\n out_suffix = None # \".memory.rst\"\n default_translator_class = DocTreeTranslatorWithCustomDirectives\n translator_class = DocTreeTranslatorWithCustomDirectives\n _writer_class = DocTreeWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Constructs the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, DocTreeBuilder, app, env=env)\n\n def handle_page(self, pagename, addctx, templatename=None,\n outfilename=None, event_arg=None):\n \"\"\"\n Override *handle_page* to write into stream instead of files.\n \"\"\"\n if templatename is not None:\n raise NotImplementedError(\n \"templatename must be None.\") # pragma: no cover\n if not outfilename:\n outfilename = self.get_outfilename(pagename)\n if outfilename not in self.built_pages:\n self.built_pages[outfilename] = StringIO()\n self.built_pages[outfilename].write(self.writer.output)\n\n\nclass MemoryLatexBuilder(_MemoryBuilder, EnhancedLaTeXBuilder):\n \"\"\"\n Builds :epkg:`Latex` output in memory.\n The API is defined by the page\n :epkg:`builderapi`.\n \"\"\"\n name = 'memorylatex'\n format = 'tex'\n out_suffix = None # \".memory.tex\"\n supported_image_types = ['image/png', 'image/jpeg', 'image/gif']\n default_translator_class = LatexTranslatorWithCustomDirectives\n translator_class = LatexTranslatorWithCustomDirectives\n _writer_class = LatexWriterWithCustomDirectives\n supported_remote_images = True\n supported_data_uri_images = True\n html_scaled_image_link = True\n\n def __init__(self, app, env=None): # pylint: disable=W0231\n \"\"\"\n Constructs the builder.\n Most of the parameter are static members of the class and cannot\n be overwritten (yet).\n\n :param app: :epkg:`Sphinx application`\n \"\"\"\n _MemoryBuilder._init(self, EnhancedLaTeXBuilder, app, env=env)\n\n def write_stylesheet(self):\n from sphinx.highlighting import PygmentsBridge\n highlighter = PygmentsBridge('latex', self.config.pygments_style)\n rows = []\n rows.append('\\\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\\n')\n rows.append('\\\\ProvidesPackage{sphinxhighlight}')\n rows.append(\n '[2016/05/29 stylesheet for highlighting with pygments]\\n\\n')\n rows.append(highlighter.get_stylesheet())\n self.built_pages['sphinxhighlight.sty'] = StringIO()\n self.built_pages['sphinxhighlight.sty'].write(\"\".join(rows))\n\n class EnhancedStringIO(StringIO):\n def write(self, content):\n if isinstance(content, str):\n StringIO.write(self, content)\n else:\n for line in content:\n StringIO.write(self, line)\n\n def _get_filename(self, targetname, encoding='utf-8', overwrite_if_changed=True):\n if not isinstance(targetname, str):\n raise TypeError( # pragma: no cover\n f\"targetname must be a string: {targetname}\")\n destination = MemoryLatexBuilder.EnhancedStringIO()\n self.built_pages[targetname] = destination\n return destination\n\n\nclass _CustomBuildEnvironment(BuildEnvironment):\n \"\"\"\n Overrides some functionalities of\n `BuildEnvironment `_.\n \"\"\"\n\n def __init__(self, app):\n \"\"\"\n \"\"\"\n BuildEnvironment.__init__(self, app)\n self.doctree_ = {}\n\n def get_doctree(self, docname):\n \"\"\"Read the doctree for a file from the pickle and return it.\"\"\"\n if hasattr(self, \"doctree_\") and docname in self.doctree_:\n from sphinx.util.docutils import WarningStream\n doctree = self.doctree_[docname]\n doctree.settings.env = self\n doctree.reporter = Reporter(self.doc2path(\n docname), 2, 5, stream=WarningStream())\n return doctree\n\n if hasattr(self, \"doctree_\"):\n available = list(sorted(self.doctree_))\n if len(available) > 10:\n available = available[10:]\n raise KeyError(\n \"Unable to find entry '{}' (has doctree: {})\\nFirst documents:\\n{}\"\n \"\".format(\n docname, hasattr(self, \"doctree_\"),\n \"\\n\".join(available)))\n\n raise KeyError( # pragma: no cover\n \"Doctree empty or not found for '{}' (has doctree: {})\"\n \"\".format(\n docname, hasattr(self, \"doctree_\")))\n # return BuildEnvironment.get_doctree(self, docname)\n\n def apply_post_transforms(self, doctree, docname):\n \"\"\"Apply all post-transforms.\"\"\"\n # set env.docname during applying post-transforms\n self.temp_data['docname'] = docname\n\n transformer = SphinxTransformer(doctree)\n transformer.set_environment(self)\n transformer.add_transforms(self.app.post_transforms)\n transformer.apply_transforms()\n self.temp_data.clear()\n\n\nclass _CustomSphinx(Sphinx):\n \"\"\"\n Custom :epkg:`Sphinx` application to avoid using disk.\n \"\"\"\n\n def __init__(self, srcdir, confdir, outdir, doctreedir, buildername=\"memoryhtml\", # pylint: disable=W0231\n confoverrides=None, status=None, warning=None,\n freshenv=False, warningiserror=False,\n tags=None, verbosity=0, parallel=0, keep_going=False,\n new_extensions=None):\n '''\n Same constructor as :epkg:`Sphinx application`.\n Additional parameters:\n\n @param new_extensions extensions to add to the application\n\n Some insights about domains:\n\n ::\n\n {'cpp': sphinx.domains.cpp.CPPDomain,\n 'hpp': sphinx.domains.cpp.CPPDomain,\n 'h': sphinx.domains.cpp.CPPDomain,\n 'js': sphinx.domains.javascript.JavaScriptDomain,\n 'std': sphinx.domains.std.StandardDomain,\n 'py': sphinx.domains.python.PythonDomain,\n 'rst': sphinx.domains.rst.ReSTDomain,\n 'c': sphinx.domains.c.CDomain}\n\n And builders:\n\n ::\n\n {'epub': ('epub', 'EpubBuilder'),\n 'singlehtml': ('html', 'SingleFileHTMLBuilder'),\n 'qthelp': ('qthelp', 'QtHelpBuilder'),\n 'epub3': ('epub3', 'Epub3Builder'),\n 'man': ('manpage', 'ManualPageBuilder'),\n 'dummy': ('dummy', 'DummyBuilder'),\n 'json': ('html', 'JSONHTMLBuilder'),\n 'html': ('html', 'StandaloneHTMLBuilder'),\n 'xml': ('xml', 'XMLBuilder'),\n 'texinfo': ('texinfo', 'TexinfoBuilder'),\n 'devhelp': ('devhelp', 'DevhelpBuilder'),\n 'web': ('html', 'PickleHTMLBuilder'),\n 'pickle': ('html', 'PickleHTMLBuilder'),\n 'htmlhelp': ('htmlhelp', 'HTMLHelpBuilder'),\n 'applehelp': ('applehelp', 'AppleHelpBuilder'),\n 'linkcheck': ('linkcheck', 'CheckExternalLinksBuilder'),\n 'dirhtml': ('html', 'DirectoryHTMLBuilder'),\n 'latex': ('latex', 'LaTeXBuilder'),\n 'elatex': ('latex', 'EnchancedLaTeXBuilder'),\n 'text': ('text', 'TextBuilder'),\n 'changes': ('changes', 'ChangesBuilder'),\n 'websupport': ('websupport', 'WebSupportBuilder'),\n 'gettext': ('gettext', 'MessageCatalogBuilder'),\n 'pseudoxml': ('xml', 'PseudoXMLBuilder')}\n 'rst': ('rst', 'RstBuilder')}\n 'md': ('md', 'MdBuilder'),\n 'doctree': ('doctree', 'DocTreeBuilder')}\n '''\n # own purpose (to monitor)\n self._logger = getLogger(\"_CustomSphinx\")\n self._added_objects = []\n self._added_collectors = []\n\n # from sphinx.domains.cpp import CPPDomain\n # from sphinx.domains.javascript import JavaScriptDomain\n # from sphinx.domains.python import PythonDomain\n # from sphinx.domains.std import StandardDomain\n # from sphinx.domains.rst import ReSTDomain\n # from sphinx.domains.c import CDomain\n\n from sphinx.registry import SphinxComponentRegistry\n self.phase = BuildPhase.INITIALIZATION\n self.verbosity = verbosity\n self.extensions = {}\n self.builder = None\n self.env = None\n self.project = None\n self.registry = SphinxComponentRegistry()\n self.post_transforms = []\n self.pdb = False\n\n if doctreedir is None:\n doctreedir = \"IMPOSSIBLE:TOFIND\"\n if srcdir is None:\n srcdir = \"IMPOSSIBLE:TOFIND\"\n update_docutils_languages()\n\n self.srcdir = os.path.abspath(srcdir)\n self.confdir = os.path.abspath(\n confdir) if confdir is not None else None\n self.outdir = os.path.abspath(outdir) if confdir is not None else None\n self.doctreedir = os.path.abspath(doctreedir)\n self.parallel = parallel\n\n if self.srcdir == self.outdir:\n raise ApplicationError('Source directory and destination ' # pragma: no cover\n 'directory cannot be identical')\n\n if status is None:\n self._status = StringIO()\n self.quiet = True\n else:\n self._status = status\n self.quiet = False\n\n from sphinx.events import EventManager\n # logging.setup(self, self._status, self._warning)\n self.events = EventManager(self)\n\n # keep last few messages for traceback\n # This will be filled by sphinx.util.logging.LastMessagesWriter\n self.messagelog = deque(maxlen=10)\n\n # say hello to the world\n from sphinx import __display_version__\n self.info(f'Running Sphinx v{__display_version__}') # pragma: no cover\n\n # notice for parallel build on macOS and py38+\n if sys.version_info > (3, 8) and platform.system() == 'Darwin' and parallel > 1:\n self._logger.info( # pragma: no cover\n \"For security reason, parallel mode is disabled on macOS and \"\n \"python3.8 and above. For more details, please read \"\n \"https://github.com/sphinx-doc/sphinx/issues/6803\")\n\n # status code for command-line application\n self.statuscode = 0\n\n # delayed import to speed up time\n from sphinx.application import builtin_extensions\n from sphinx.config import CONFIG_FILENAME, Config, Tags\n\n # read config\n self.tags = Tags(tags)\n with warnings.catch_warnings():\n warnings.simplefilter(\n \"ignore\", (DeprecationWarning, PendingDeprecationWarning))\n if self.confdir is None:\n self.config = Config({}, confoverrides or {})\n else: # pragma: no cover\n try:\n self.config = Config.read(\n self.confdir, confoverrides or {}, self.tags)\n except AttributeError:\n try:\n self.config = Config( # pylint: disable=E1121\n confdir, confoverrides or {}, self.tags)\n except TypeError:\n try:\n self.config = Config(confdir, CONFIG_FILENAME, # pylint: disable=E1121\n confoverrides or {}, self.tags)\n except TypeError:\n # Sphinx==3.0.0\n self.config = Config({}, confoverrides or {})\n self.sphinx__display_version__ = __display_version__\n\n # create the environment\n self.config.pre_init_values()\n\n # set up translation infrastructure\n self._init_i18n()\n\n # check the Sphinx version if requested\n if (self.config.needs_sphinx and self.config.needs_sphinx >\n __display_version__): # pragma: no cover\n from sphinx.locale import _\n from sphinx.application import VersionRequirementError\n raise VersionRequirementError(\n _('This project needs at least Sphinx v%s and therefore cannot '\n 'be built with this version.') % self.config.needs_sphinx)\n\n # set confdir to srcdir if -C given (!= no confdir); a few pieces\n # of code expect a confdir to be set\n if self.confdir is None:\n self.confdir = self.srcdir\n\n # load all built-in extension modules\n for extension in builtin_extensions:\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning)\n self.setup_extension(extension)\n except Exception as e: # pragma: no cover\n if 'sphinx.builders.applehelp' not in str(e): # pragma: no cover\n mes = \"Unable to run setup_extension '{0}'\\nWHOLE LIST\\n{1}\".format(\n extension, \"\\n\".join(builtin_extensions))\n raise ExtensionError(mes) from e\n\n # load all user-given extension modules\n for extension in self.config.extensions:\n self.setup_extension(extension)\n\n # /1 addition to the original code\n # additional extensions\n if new_extensions:\n for extension in new_extensions:\n if isinstance(extension, str):\n self.setup_extension(extension)\n else: # pragma: no cover\n # We assume it is a module.\n dirname = os.path.dirname(extension.__file__)\n sys.path.insert(0, dirname)\n self.setup_extension(extension.__name__)\n del sys.path[0]\n\n # add default HTML builders\n self.add_builder(MemoryHTMLBuilder)\n self.add_builder(MemoryRSTBuilder)\n self.add_builder(MemoryMDBuilder)\n self.add_builder(MemoryLatexBuilder)\n self.add_builder(MemoryDocTreeBuilder)\n\n if isinstance(buildername, tuple):\n if len(buildername) != 2:\n raise ValueError( # pragma: no cover\n \"The builder can be custom but it must be specifed \"\n \"as a 2-uple=(builder_name, builder_class).\")\n self.add_builder(buildername[1])\n buildername = buildername[0]\n\n # /1 end of addition\n\n # preload builder module (before init config values)\n self.preload_builder(buildername)\n\n # the config file itself can be an extension\n if self.config.setup:\n prefix = f\"while setting up extension {'conf.py'}:\"\n if prefixed_warnings is not None:\n with prefixed_warnings(prefix):\n if callable(self.config.setup):\n self.config.setup(self)\n else: # pragma: no cover\n from sphinx.locale import _\n from sphinx.application import ConfigError\n raise ConfigError(\n _(\"'setup' as currently defined in conf.py isn't a Python callable. \"\n \"Please modify its definition to make it a callable function. This is \"\n \"needed for conf.py to behave as a Sphinx extension.\")\n )\n elif callable(self.config.setup):\n self.config.setup(self)\n\n # now that we know all config values, collect them from conf.py\n noallowed = []\n rem = []\n for k in confoverrides:\n if k in {'initial_header_level', 'doctitle_xform', 'input_encoding',\n 'outdir', 'warnings_log', 'extensions'}:\n continue\n if k == 'override_image_directive':\n self.config.images_config[\"override_image_directive\"] = True\n rem.append(k)\n continue\n if k not in self.config.values:\n noallowed.append(k)\n for k in rem:\n del confoverrides[k]\n if len(noallowed) > 0:\n raise ValueError( # pragma: no cover\n \"The following configuration values are declared in any extension.\\n--???--\\n\"\n \"{0}\\n--DECLARED--\\n{1}\".format(\n \"\\n\".join(sorted(noallowed)),\n \"\\n\".join(sorted(self.config.values))))\n\n # now that we know all config values, collect them from conf.py\n self.config.init_values()\n self.events.emit('config-inited', self.config)\n\n # /2 addition to the original code\n # check extension versions if requested\n # self.config.needs_extensions = self.config.extensions\n if not hasattr(self.config, 'items'):\n\n def _citems():\n for k, v in self.config.values.items():\n yield k, v\n\n self.config.items = _citems\n\n # /2 end of addition\n\n # create the project\n self.project = Project(self.srcdir, self.config.source_suffix)\n # set up the build environment\n self._init_env(freshenv)\n assert self.env is not None\n # create the builder, initializes _MemoryBuilder\n self.builder = self.create_builder(buildername)\n # build environment post-initialisation, after creating the builder\n if hasattr(self, \"_post_init_env\"):\n self._post_init_env()\n # set up the builder\n self._init_builder()\n\n if not isinstance(self.env, _CustomBuildEnvironment):\n raise TypeError( # pragma: no cover\n f\"self.env is not _CustomBuildEnvironment: {type(self.env)!r} \"\n f\"buildername='{buildername}'\")\n\n # addition\n self._extended_init_()\n\n # verification\n self._check_init_()\n\n def _init_builder(self) -> None:\n if not hasattr(self.builder, \"env\") or self.builder.env is None:\n self.builder.set_environment(self.env)\n self.builder.init()\n self.events.emit('builder-inited')\n\n def _check_init_(self):\n pass\n\n def _init_env(self, freshenv):\n ENV_PICKLE_FILENAME = 'environment.pickle'\n filename = os.path.join(self.doctreedir, ENV_PICKLE_FILENAME)\n if freshenv or not os.path.exists(filename):\n self.env = _CustomBuildEnvironment(self)\n self._fresh_env_used = True\n self.env.setup(self)\n if (self.srcdir is not None and self.srcdir != \"IMPOSSIBLE:TOFIND\" and\n self.builder is not None):\n self.env.find_files(self.config, self.builder)\n return self.env\n\n if \"IMPOSSIBLE:TOFIND\" not in self.doctreedir: # pragma: no cover\n from sphinx.application import ENV_PICKLE_FILENAME\n filename = os.path.join(self.doctreedir, ENV_PICKLE_FILENAME)\n try:\n self.info('loading pickled environment... ')\n with open(filename, 'rb') as f:\n self.env = pickle.load(f)\n self.env.setup(self)\n self.info('done')\n return self.env\n except Exception as err:\n self.info('failed: %r', err)\n return self._init_env(freshenv=True)\n\n if self.env is None: # pragma: no cover\n self.env = _CustomBuildEnvironment(self)\n if hasattr(self.env, 'setup'):\n self.env.setup(self)\n return self.env\n\n if not hasattr(self.env, 'project') or self.env.project is None:\n raise AttributeError( # pragma: no cover\n \"self.env.project is not initialized.\")\n\n def create_builder(self, name):\n \"\"\"\n Creates a builder, raises an exception if name is None.\n \"\"\"\n if name is None:\n raise ValueError( # pragma: no cover\n \"Builder name cannot be None\")\n try:\n return self.registry.create_builder(self, name, env=self.env)\n except TypeError:\n # old version of sphinx\n return self.registry.create_builder(self, name)\n\n def _extended_init_(self):\n \"\"\"\n Additional initialization steps.\n \"\"\"\n if not hasattr(self, \"domains\"):\n self.domains = {}\n if not hasattr(self, \"_events\"):\n self._events = {}\n\n # Otherwise, role issue is missing.\n setup_link_roles(self)\n\n def _lookup_doctree(self, doctree, node_type):\n for node in doctree.traverse(node_type):\n yield node\n\n def _add_missing_ids(self, doctree):\n for i, node in enumerate(self._lookup_doctree(doctree, None)):\n stype = str(type(node))\n if ('section' not in stype and 'title' not in stype and\n 'reference' not in stype):\n continue\n try:\n node['ids'][0]\n except IndexError:\n node['ids'] = ['missing%d' % i]\n except TypeError: # pragma: no cover\n pass\n\n def finalize(self, doctree, external_docnames=None):\n \"\"\"\n Finalizes the documentation after it was parsed.\n\n @param doctree doctree (or pub.document), available after publication\n @param external_docnames other docnames the doctree references\n \"\"\"\n imgs = list(self._lookup_doctree(doctree, nodes.image))\n for img in imgs:\n img['save_uri'] = img['uri']\n\n if not isinstance(self.env, _CustomBuildEnvironment):\n raise TypeError( # pragma: no cover\n f\"self.env is not _CustomBuildEnvironment: '{type(self.env)}'\")\n if not isinstance(self.builder.env, _CustomBuildEnvironment):\n raise TypeError( # pragma: no cover\n \"self.builder.env is not _CustomBuildEnvironment: '{0}'\".format(\n type(self.builder.env)))\n self.doctree_ = doctree\n self.builder.doctree_ = doctree\n self.env.doctree_[self.config.master_doc] = doctree\n self.env.all_docs = {self.config.master_doc: self.config.master_doc}\n\n if external_docnames:\n for doc in external_docnames:\n self.env.all_docs[doc] = doc\n\n # This steps goes through many function including one\n # modifying paths in image node.\n # Look for node['candidates'] = candidates in Sphinx code.\n # If a path startswith('/'), it is removed.\n from sphinx.environment.collectors.asset import logger as logger_asset\n logger_asset.setLevel(40) # only errors\n self._add_missing_ids(doctree)\n self.events.emit('doctree-read', doctree)\n logger_asset.setLevel(30) # back to warnings\n\n for img in imgs:\n img['uri'] = img['save_uri']\n\n self.events.emit('doctree-resolved', doctree,\n self.config.master_doc)\n self.builder.write(None, None, 'all')\n\n def debug(self, message, *args, **kwargs):\n self._logger.debug(message, *args, **kwargs)\n\n def info(self, message, *args):\n self._logger.info(message, *args)\n\n def warning(self, message='', name=None, type=None, subtype=None):\n if \"is already registered\" not in message: # pragma: no cover\n self._logger.warning(\n \"[_CustomSphinx] %s -- %s\", message, name,\n type=type, subtype=subtype)\n\n def add_builder(self, builder, override=False):\n self._added_objects.append(('builder', builder))\n if builder.name not in self.registry.builders:\n self.debug('[_CustomSphinx] adding builder: %r', builder)\n self.registry.add_builder(builder, override=override)\n else:\n self.debug('[_CustomSphinx] already added builder: %r', builder)\n\n def setup_extension(self, extname):\n self._added_objects.append(('extension', extname))\n\n logger = getLogger('sphinx.application')\n disa = logger.logger.disabled\n logger.logger.disabled = True\n\n # delayed import to speed up time\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning)\n self.registry.load_extension(self, extname)\n except Exception as e: # pragma: no cover\n raise ExtensionError(\n f\"Unable to setup extension '{extname}'\") from e\n finally:\n logger.logger = disa\n\n def add_directive(self, name, obj, content=None, arguments=None, # pylint: disable=W0221,W0237\n override=True, **options):\n self._added_objects.append(('directive', name))\n if name == 'plot' and obj.__name__ == 'PlotDirective':\n\n old_run = obj.run\n\n def run(self):\n \"\"\"Run the plot directive.\"\"\"\n logger = getLogger(\"MockSphinxApp\")\n logger.info('[MockSphinxApp] PlotDirective: %r', self.content)\n try:\n res = old_run(self)\n logger.info('[MockSphinxApp] PlotDirective ok')\n return res\n except OSError as e: # pragma: no cover\n logger = getLogger(\"MockSphinxApp\")\n logger.info('[MockSphinxApp] PlotDirective failed: %s', e)\n return []\n\n obj.run = run\n\n Sphinx.add_directive(self, name, obj, override=override, **options)\n\n def add_domain(self, domain, override=True):\n self._added_objects.append(('domain', domain))\n Sphinx.add_domain(self, domain, override=override)\n # For some reason, the directives are missing from the main catalog\n # in docutils.\n for k, v in domain.directives.items():\n self.add_directive(f\"{domain.name}:{k}\", v)\n if domain.name in ('py', 'std', 'rst'):\n # We add the directive without the domain name as a prefix.\n self.add_directive(k, v)\n for k, v in domain.roles.items():\n self.add_role(f\"{domain.name}:{k}\", v)\n if domain.name in ('py', 'std', 'rst'):\n # We add the role without the domain name as a prefix.\n self.add_role(k, v)\n\n def add_role(self, name, role, override=True):\n self._added_objects.append(('role', name))\n self.debug('[_CustomSphinx] adding role: %r', (name, role))\n roles.register_local_role(name, role)\n\n def add_generic_role(self, name, nodeclass, override=True):\n self._added_objects.append(('generic_role', name))\n self.debug(\"[_CustomSphinx] adding generic role: '%r'\",\n (name, nodeclass))\n role = roles.GenericRole(name, nodeclass)\n roles.register_local_role(name, role)\n\n def add_node(self, node, override=True, **kwds):\n self._added_objects.append(('node', node))\n self.debug('[_CustomSphinx] adding node: %r', (node, kwds))\n nodes._add_node_class_names([node.__name__])\n for key, val in kwds.items():\n try:\n visit, depart = val\n except ValueError: # pragma: no cover\n raise ExtensionError((\"Value for key '%r' must be a \"\n \"(visit, depart) function tuple\") % key)\n translator = self.registry.translators.get(key)\n translators = []\n if translator is not None:\n translators.append(translator)\n elif key == 'html':\n from sphinx.writers.html import HTMLTranslator\n translators.append(HTMLTranslator)\n if is_html5_writer_available():\n from sphinx.writers.html5 import HTML5Translator\n translators.append(HTML5Translator)\n elif key == 'latex':\n translators.append(_get_LaTeXTranslator())\n elif key == 'elatex':\n translators.append(EnhancedLaTeXBuilder)\n elif key == 'text':\n from sphinx.writers.text import TextTranslator\n translators.append(TextTranslator)\n elif key == 'man':\n from sphinx.writers.manpage import ManualPageTranslator\n translators.append(ManualPageTranslator)\n elif key == 'texinfo':\n from sphinx.writers.texinfo import TexinfoTranslator\n translators.append(TexinfoTranslator)\n\n for translator in translators:\n setattr(translator, 'visit_' + node.__name__, visit)\n if depart:\n setattr(translator, 'depart_' + node.__name__, depart)\n\n def add_event(self, name):\n self._added_objects.append(('event', name))\n Sphinx.add_event(self, name)\n\n def add_config_value(self, name, default, rebuild, types_=(), types=()): # pylint: disable=W0221,W0237\n types_ = types or types_\n self._added_objects.append(('config_value', name))\n Sphinx.add_config_value(self, name, default, rebuild, types_)\n\n def add_directive_to_domain(self, domain, name, obj, has_content=None, # pylint: disable=W0221,W0237\n argument_spec=None, override=False, **option_spec):\n self._added_objects.append(('directive_to_domain', domain, name))\n Sphinx.add_directive_to_domain(self, domain, name, obj,\n override=override, **option_spec)\n\n def add_role_to_domain(self, domain, name, role, override=False):\n self._added_objects.append(('roles_to_domain', domain, name))\n Sphinx.add_role_to_domain(self, domain, name, role, override=override)\n\n def add_transform(self, transform):\n self._added_objects.append(('transform', transform))\n Sphinx.add_transform(self, transform)\n\n def add_post_transform(self, transform):\n self._added_objects.append(('post_transform', transform))\n Sphinx.add_post_transform(self, transform)\n\n def add_js_file(self, filename, priority=500, **kwargs): # pylint: disable=W0221\n # loading_method=None: added in Sphinx 4.4\n self._added_objects.append(('js', filename))\n Sphinx.add_js_file(self, filename, priority=priority, **kwargs)\n\n def add_css_file(self, filename, priority=500, **kwargs):\n self._added_objects.append(('css', filename))\n Sphinx.add_css_file(self, filename, priority=priority, **kwargs)\n\n def add_latex_package(self, packagename, options=None, after_hyperref=False):\n self._added_objects.append(('latex', packagename))\n Sphinx.add_latex_package(\n self, packagename=packagename, options=options,\n after_hyperref=after_hyperref)\n\n def add_object_type(self, directivename, rolename, indextemplate='',\n parse_node=None, ref_nodeclass=None, objname='',\n doc_field_types=None, override=False):\n if doc_field_types is None:\n doc_field_types = []\n self._added_objects.append(('object', directivename, rolename))\n Sphinx.add_object_type(self, directivename, rolename, indextemplate=indextemplate,\n parse_node=parse_node, ref_nodeclass=ref_nodeclass,\n objname=objname, doc_field_types=doc_field_types,\n override=override)\n\n def add_env_collector(self, collector):\n \"\"\"\n See :epkg:`class Sphinx`.\n \"\"\"\n self.debug(\n '[_CustomSphinx] adding environment collector: %r', collector)\n coll = collector()\n coll.enable(self)\n self._added_collectors.append(coll)\n\n def disconnect_env_collector(self, clname, exc=True):\n \"\"\"\n Disables a collector given its class name.\n\n @param cl name\n @param exc raises an exception if not found\n @return found collector\n \"\"\"\n found = None\n foundi = None\n for i, co in enumerate(self._added_collectors):\n if clname == co.__class__.__name__:\n found = co\n foundi = i\n break\n if found is not None and not exc:\n return None\n if found is None:\n raise ValueError( # pragma: no cover\n \"Unable to find a collector '{0}' in \\n{1}\".format(\n clname, \"\\n\".join(\n map(lambda x: x.__class__.__name__,\n self._added_collectors))))\n for v in found.listener_ids.values():\n self.disconnect(v)\n del self._added_collectors[foundi]\n return found\n","repo_name":"sdpython/pyquickhelper","sub_path":"src/pyquickhelper/helpgen/sphinxm_convert_doc_sphinx_helper.py","file_name":"sphinxm_convert_doc_sphinx_helper.py","file_ext":"py","file_size_in_byte":61727,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"46"} +{"seq_id":"43807990703","text":"import socket\nimport threading\nimport hashlib\nimport time\nimport datetime\nimport random\nimport sys\n\n\n# Packet class definition\nclass packet():\n checksum = 0\n length = 0\n seqNo = 0\n msg = 0\n\n def make(self, data):\n self.msg = data\n self.length = str(len(data))\n self.checksum=hashlib.sha1(data.encode('utf-8')).hexdigest()\n print (\"Length: %s\\nSequence number: %s\" %(self.length, self.seqNo))\n\ndef RUDPServer(serverAddress, serverPort):\n # Start - Connection initiation\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # Bind the socket to the port\n server_address = (serverAddress, serverPort)\n print ('Starting up on %s port %s' % server_address)\n sock.bind(server_address)\n\n # Listening for requests indefinitely\n while True:\n print ('Waiting to receive message')\n data, address = sock.recvfrom(600)\n # Delimiter\n delimiter = \"|:|:|\"\n\n # Seq number flag\n seqFlag = 0\n\n packet_count=0\n time.sleep(0.5)\n start_time=time.time()\n print (\"Request started at: \" + str(datetime.datetime.utcnow()))\n # Initialise packet class\n pkt = packet()\n threadSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n startTime=time.time()\n\n # Check if file is valid \n try:\n print (\"Opening file %s\" % data)\n fileName = data.decode('utf-8')\n fileRead = open(fileName, 'r')\n data = fileRead.read()\n fileRead.close()\n except:\n msg=\"FNF\"\n pkt.make(msg)\n finalPacket = str(pkt.checksum) + delimiter + str(pkt.seqNo) + delimiter + str(pkt.length) + delimiter + pkt.msg\n threadSock.sendto(finalPacket, address)\n print (\"Requested file could not be found, replied with FNF\")\n return\n\n \n # Fragment and send file 500 byte by 500 byte\n x = 0\n expectedPacket = (int((len(data) / 500) + 1))\n while (x < int((len(data) / 500) + 1)):\n packet_count += 1\n msg = data[x * 500:x * 500 + 500];\n pkt.make(msg);\n finalPacket = str(pkt.checksum) + delimiter + str(pkt.seqNo) + delimiter + str(pkt.length) + delimiter + pkt.msg\n\n # Send packet\n sent = threadSock.sendto(finalPacket.encode('utf-8'), address)\n print ('Sent %s bytes back to %s, awaiting acknowledgment..' % (sent, address))\n threadSock.settimeout(2)\n try:\n ack, address = threadSock.recvfrom(100);\n ack = ack.decode('utf-8')\n except:\n # else after timeout, resend \n print (\"Time out reached, resending ...%s\" % x)\n continue;\n # Check if acknowledgement is sent\n if ack.split(\",\")[0] == str(pkt.seqNo):\n pkt.seqNo = int(not pkt.seqNo)\n print (\"Acknowledged by: \" + ack + \"\\nAcknowledged at: \" + str(datetime.datetime.utcnow()) + \"\\nElapsed: \" + str(time.time() - start_time))\n x += 1\n endTime=time.time()\n print(\"\\nDone in within : \" + str(endTime-startTime))\n packetLoss = expectedPacket - packet_count\n print(\"Packet Loss : \"+ str(packetLoss))\n\n print ('Received %s bytes from %s' % (len(data), address))\n \nif __name__ == \"__main__\":\n # Set address and port\n serverAddress = sys.argv[1]\n serverPort = int(sys.argv[2])\n print(\"Server Address : \" + serverAddress + \"\\nServer Port : \"+ str(serverPort) +\"\\n\")\n RUDPServer(serverAddress, serverPort)\n\n\n","repo_name":"chenwenshu/multithreaded-downloader","sub_path":"TypeOfReliableUDP/RUDP_server.py","file_name":"RUDP_server.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"73227125578","text":"\"\"\"added some more fields to the requests\n\nRevision ID: 463c6236fa58\nRevises: 02cc87f01969\nCreate Date: 2022-05-22 22:59:36.360116\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '463c6236fa58'\ndown_revision = '02cc87f01969'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pdpa_requests', sa.Column('detail', sa.Text(), nullable=True))\n op.add_column('pdpa_requests', sa.Column('received_at', sa.DateTime(timezone=True), nullable=True))\n op.add_column('pdpa_requests', sa.Column('received_by', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'pdpa_requests', 'staff_account', ['received_by'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'pdpa_requests', type_='foreignkey')\n op.drop_column('pdpa_requests', 'received_by')\n op.drop_column('pdpa_requests', 'received_at')\n op.drop_column('pdpa_requests', 'detail')\n # ### end Alembic commands ###\n","repo_name":"MUMT-IT/mis2018","sub_path":"migrations/versions/463c6236fa58_added_some_more_fields_to_the_requests.py","file_name":"463c6236fa58_added_some_more_fields_to_the_requests.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"40533504958","text":"import tkinter as tk\nimport vlc\n\nclass Screen(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, bg = 'black')\n self.settings = { # Inizialazing dictionary settings\n \"width\" : 1024,\n \"height\" : 576\n }\n self.settings.update(kwargs) # Changing the default settings\n # Open the video source |temporary\n self.video_source = \"./Assets/male_5_10.mp4\"\n\n # Canvas where to draw video output\n self.canvas = tk.Canvas(self, width = self.settings['width'], height = self.settings['height'], bg = \"black\", highlightthickness = 0)\n self.canvas.pack()\n\n # Creating VLC player\n self.instance = vlc.Instance()\n self.player = self.instance.media_player_new()\n\n\n def GetHandle(self):\n # Getting frame ID\n return self.winfo_id()\n\n def play(self, _source):\n # Function to start player from given source\n Media = self.instance.media_new(_source)\n Media.get_mrl()\n self.player.set_media(Media)\n\n #self.player.play()\n self.player.set_hwnd(self.GetHandle())\n self.player.play()","repo_name":"asmanjitha/Computer_Aided_Smart_Adverticements","sub_path":"GUI_Interactive_App/VideoScreen.py","file_name":"VideoScreen.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"21890355043","text":"from tkinter import *\r\nfrom sqlite3 import *\r\nfrom tkinter import ttk\r\n\r\ndef ochered_view():\r\n def get_data_from_database():\r\n # здесь код для извлечения данных из базы данных\r\n # например, можно использовать модуль sqlite3 для работы с SQLite\r\n with connect('mydatabase.db') as db:\r\n cursor = db.cursor()\r\n cursor.execute(\"SELECT id FROM mytable\")\r\n row = cursor.fetchall()\r\n column1_data = row[0]\r\n column2_data = row[1]\r\n column3_data = row[2]\r\n column4_data = row[3]\r\n return column1_data, column2_data, column3_data, column4_data\r\n\r\n\r\n def refresh():\r\n column1_data, column2_data, column3_data, column4_data = get_data_from_database()\r\n new_text = f\"Column 1: {column1_data} | Column 2: {column2_data} | Column 3: {column3_data}\"\r\n number1_l.config(text=new_text)\r\n\r\n\r\n root = Tk()\r\n root.title('Отображение очереди')\r\n root.minsize(800, 450)\r\n\r\n frame_number = Frame(root, width=150, height=150, bg='white')\r\n frame_number.place(x=0, y=0)\r\n\r\n frame_window = Frame(root, width=150, height=150, bg='white')\r\n frame_window.place(x=400, y=0)\r\n\r\n number_in_ochered = ttk.Label(frame_number, text=\"Номер в очереди\")\r\n number_in_ochered.grid(row=1, column=2, sticky='w', padx=10, pady=10)\r\n\r\n window_in_ochered = ttk.Label(frame_window, text=\"Окно\")\r\n window_in_ochered.grid(row=1, column=2, sticky='w', padx=10, pady=10)\r\n\r\n #Лейблы номера в очереди\r\n number1_l = ttk.Label(frame_number, text=\"1\")\r\n number2_l = ttk.Label(frame_number, text=\"2\")\r\n number3_l = ttk.Label(frame_number, text=\"3\")\r\n number4_l = ttk.Label(frame_number, text=\"4\")\r\n\r\n #Расположение лейблов номера на фрейме frame_number\r\n number1_l.grid(row=2, column=0, sticky='w', padx=10, pady=10)\r\n number2_l.grid(row=3, column=0, sticky='w', padx=10, pady=10)\r\n number3_l.grid(row=4, column=0, sticky='w', padx=10, pady=10)\r\n number4_l.grid(row=5, column=0, sticky='w', padx=10, pady=10)\r\n\r\n #Лейблы окна к которому подойти\r\n window1_l = ttk.Label(frame_window, text=\"1\")\r\n window2_l = ttk.Label(frame_window, text=\"2\")\r\n window3_l = ttk.Label(frame_window, text=\"3\")\r\n window4_l = ttk.Label(frame_window, text=\"4\")\r\n\r\n #Расположение лейблов окна к которму подойти на фрейме frame_window\r\n window1_l.grid(row=2, column=0, sticky='w', padx=10, pady=10)\r\n window2_l.grid(row=3, column=0, sticky='w', padx=10, pady=10)\r\n window3_l.grid(row=4, column=0, sticky='w', padx=10, pady=10)\r\n window4_l.grid(row=5, column=0, sticky='w', padx=10, pady=10)\r\n\r\n root.mainloop()\r\n\r\n\r\nochered_view()","repo_name":"everneverevernever/ochered","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"36462733258","text":"import setuptools\nfrom PublicDataReader.config.info import __version__, __author__, __contact__, __github__\n\nwith open(\"requirements.txt\") as f:\n tests_require = f.readlines()\ninstall_requires = [t.strip() for t in tests_require]\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"PublicDataReader\",\n version=__version__,\n license=\"MIT\",\n author=__author__,\n author_email=__contact__,\n description=\"Open Source Public Data Reader\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=__github__,\n packages=setuptools.find_packages(),\n package_data={\"PublicDataReader\": [\"raw/*.json\"]},\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n install_requires=install_requires,\n)\n","repo_name":"WooilJeong/PublicDataReader","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":425,"dataset":"github-code","pt":"46"} +{"seq_id":"23871332913","text":"from pydantic.types import Json\nimport pytest\nfrom fastapi.testclient import TestClient\nfrom src.main import app\n\npytest.token = \"\"\n\n@pytest.fixture(scope=\"module\")\ndef client():\n with TestClient(app) as client:\n yield client\n\n@pytest.mark.order(1)\ndef test_create_user(client):\n user_mock = {\n \"name\": \"usertest\",\n \"email\": \"user.test@gmail.com\",\n \"password\": \"confidential\",\n \"cpf\": \"12345678900\",\n \"pis\": \"12345678900\",\n \"active\": True,\n \"address\": {\n \"country\": \"brasil\",\n \"state\": \"santa catarina\",\n \"city\": \"florianopolis\",\n \"complement\": \"ilha da magia\",\n \"street\": \"lauro linhares\",\n \"number\": \"123\",\n \"cep\": \"12345678\"\n }\n }\n response = client.post(\"/users\", json=user_mock)\n \n assert response.status_code == 201, response.text\n\n@pytest.mark.order(2)\ndef test_login(client):\n credentials = {\n \"username\": \"user.test@gmail.com\",\n \"password\": \"confidential\",\n }\n response = client.post(\"/login\", data=credentials, headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n })\n \n assert response.status_code == 200, response.text\n pytest.token = \"Bearer \" + response.json()[\"access_token\"]\n\n@pytest.mark.order(3)\ndef test_get_users(client):\n response = client.get(\"/users\")\n assert response.status_code == 200, response.text\n\n@pytest.mark.order(4)\ndef test_get_user(client):\n response = client.get(\"/users/me\")\n assert response.status_code == 200, response.text\n\n@pytest.mark.order(5)\ndef test_put_user(client):\n updated_user = {\n \"name\": \"new usertest\",\n \"address\": {\n \"number\": \"1234\",\n }\n }\n response = client.put(\"/users/me\", json=updated_user)\n assert response.status_code == 200, response.text\n\n@pytest.mark.order(6)\ndef test_delete_user(client):\n response = client.delete(\"/users/me\")\n assert response.status_code == 200, response.text\n","repo_name":"inafranco/user-crud","sub_path":"back-end/src/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"5579289730","text":"from qcc import Qcc\n\nimport consts\nimport utils\nimport json\n\ndef query_industry(industry:str):\n \"\"\"\n 查询企业行业主函数\n industry:行业\n \"\"\"\n qcc = Qcc()\n detail_url = qcc.query_url(industry)\n if not detail_url:\n print('未成功获取到公司详情页链接,请检查公司名错误或其他错误')\n return ''\n else:\n industry = qcc.query_industry(detail_url)\n if industry != '':\n return utils.get_industry(industry)\n else:\n return ''\n \nif __name__ == '__main__':\n query_industry('腾讯')\n","repo_name":"Jeremylee1234/query_industry","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"30958864135","text":"import tensorflow as tf\nfrom keras.models import Model\n\nuser_NN = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(32)\n])\n\nitem_NN = tf.keras.models.Sequential([\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(32)\n])\n\nnum_user_features = 10\nnum_item_features = 10\n\n# create the user input and point to the base network\nuser_input = tf.keras.layers.Input(shape=(num_user_features))\nvu = user_NN(user_input)\nvu = tf.linalg.l2_normalize(vu, axis=1)\n\n# create the item input and point to the base network\nitem_input = tf.keras.layers.Input(shape=(num_item_features))\nvm = item_NN(item_input)\nvm = tf.linalg.l2_normalize(vm, axis=1)\n\n# measure the similarity between the user and item embeddings\noutput = tf.keras.layers.Dot(axes=1)([vu, vm])\n\n# specify the inputs and output of the model\nmodel = Model(inputs=[user_input, item_input], outputs=output)\n\n# specify the cost function and optimization strategy\ncost_fn = tf.keras.losses.MeanSquaredError()","repo_name":"ali-izhar/machine-learning","sub_path":"Theory/Deep_Learning/Recommender_Systems/Content_Based/content_recommender.py","file_name":"content_recommender.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"46"} +{"seq_id":"7836848164","text":"# 从 每一个位置开始剪 5个 应该是bfs\n\nfrom collections import deque\n\nn, m = 3, 4\n\ns=set()\ndef dfs(row,col,leng,paper):\n if leng==5:\n string=''\n for p in paper:\n print(*p)\n string+=''.join(map(str,p))\n print()\n #加入集合防止重复\n s.add(string)\n\n\n return\n for dx,dy in ((0,1),(1,0),(-1,0),(0,-1)):\n trow,tcol=row+dx,col+dy\n if 0<=trow 1}\n if len(repeat) < 1:\n print('no duplicates')\n else:\n print('duplicate:', ' '.join(repeat))\n\n\ndef third():\n week = ('Понедельник', 'Вторник', 'Среда', 'Четверг', 'Пятница', 'Суббота', 'Воскресенье')\n question = int(input('Введите количество выходных, которое хотите иметь в неделю: '))\n job = week[:-question]\n # start stop step\n weekend = week[:-question-1:-1]\n if 1 < question <= 7:\n print('Ваши выходные дни:', *weekend)\n print('Ваши рабочие дни:', *job)\n elif question == 0:\n print('Ваши выходные дни: -.')\n print('Ваши рабочие дни:', *week)\n\n\ndef fourth():\n students1 = ['Ягодкин', 'Коршунов', 'Державин', 'Сидоров', 'Петров', 'Васечкин', 'Ломоносов',\n 'Саввин', 'Ершов', 'Силивин']\n students2 = ['Иванов', 'Стиценко', 'Головко', 'Бодров', 'Бондаренко', 'Куницын', 'Косульников',\n 'Воротынцев', 'Купоросов', 'Герц']\n sport_team = tuple(sample(students1, 5)) + tuple(sample(students2, 5))\n print('Исходный список первой группы:', *students1)\n print('Исходный список второй группы', *students2)\n print('Список спортивной команды:', *sorted(sport_team))\n print('Количество человек в спортивной команде:', len(sport_team))\n if 'Иванов' in sport_team:\n print(f'Иванов входит в список спортивной команды, его фамилия встречается {sport_team.count(\"Иванов\")} раз')\n else:\n print('Иванов в список спортивной команды не входит')\n\n\nfourth()\n","repo_name":"satrana24/lab5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"37443926896","text":"import openrouteservice\r\nimport folium\r\nimport random\r\nimport json\r\nimport pprint\r\nimport os\r\n\r\ndef rand_coord_in_range(x, y):\r\n x_int = int(x * 10000000)\r\n y_int = int(y * 10000000)\r\n rand = random.randrange(min(x_int, y_int), max(x_int, y_int))\r\n return rand / 10000000\r\n\r\n\r\nLONDON_BBOX = [[-81.398485, 43.006537], [-81.122701, 42.945282]]\r\n\r\n\"\"\"\r\nclient_local = openrouteservice.Client(base_url='http://localhost:5000') # Specify your personal API key\r\n\r\npois = client_local.places(request='pois', bbox=LONDON_BBOX, filter_category_ids=[596], validate=False)\r\n\r\nstation_locations = [station['geometry']['coordinates'] for station in pois['features']]\r\n\"\"\"\r\n\r\n# get locations and prices from pre-made json file\r\n\r\nprice_file = open('station_prices_reduced_network.json')\r\n\r\nprice_data = json.load(price_file)\r\n\r\nprint(str(len(price_data)))\r\n\r\nstation_locations = [station['coordinates'] for station in price_data]\r\n\r\ntrue_costco_index = next((z for z, st in enumerate(price_data) if st['price'] == 159.9), -1)\r\n\r\nclient_local_ors = openrouteservice.Client(base_url='http://localhost:8080/ors')\r\n\r\ndef simulate_best_station():\r\n vehicle_category = random.random() # generate a random between 0 and 1 and use tranches to determine vehicle type\r\n\r\n if vehicle_category < .17:\r\n # Vehicle is a truck\r\n BASE_FILL_L = 95 * 0.8\r\n FUEL_BURN_L_PER_100KM = 14\r\n elif vehicle_category < (.17 + .47):\r\n # Vehicle is an SUV\r\n BASE_FILL_L = 60 * 0.8\r\n FUEL_BURN_L_PER_100KM = 9\r\n elif vehicle_category < (.17 + .47 + .29):\r\n # Vehicle is a car\r\n BASE_FILL_L = 45 * 0.8\r\n FUEL_BURN_L_PER_100KM = 7\r\n else:\r\n # Vehicle is a van\r\n BASE_FILL_L = 76 * 0.8\r\n FUEL_BURN_L_PER_100KM = 12\r\n\r\n starting_location = [rand_coord_in_range(LONDON_BBOX[0][0], LONDON_BBOX[1][0]), rand_coord_in_range(LONDON_BBOX[0][1], LONDON_BBOX[1][1])]\r\n\r\n temp_station_locations = station_locations.copy()\r\n temp_price_data = price_data.copy()\r\n\r\n removed_costco = False\r\n if random.random() < (1.0 if os.getenv('REMOVE_COSTCO', False) else 0.5):\r\n temp_station_locations.pop(true_costco_index)\r\n temp_price_data.pop(true_costco_index)\r\n removed_costco = True\r\n\r\n locations = [starting_location] + temp_station_locations\r\n\r\n matrix = client_local_ors.distance_matrix(locations=locations, destinations=list(range(1, len(locations))), sources=[0], profile='driving-car', metrics=['distance'], validate=False)\r\n\r\n closest_station_index = matrix['distances'][0].index(min(matrix['distances'][0]))\r\n\r\n cost_matrix = []\r\n fuel_burn_matrix = []\r\n\r\n for i in range(len(temp_price_data)):\r\n station = temp_price_data[i]\r\n base_cost = BASE_FILL_L * station['price']\r\n on_route_fuel_burn = matrix['distances'][0][i] / 1000 / 100 * FUEL_BURN_L_PER_100KM * 2 # the distance is in meters so we convert to km, then we see what portion of 100 that is multiplied by fuel burn. We multiply by 2 to consider the return fuel burn\r\n on_route_fuel_cost = on_route_fuel_burn * station['price']\r\n total_fuel_cost = on_route_fuel_cost + base_cost\r\n cost_matrix.append(total_fuel_cost)\r\n fuel_burn_matrix.append(on_route_fuel_burn + BASE_FILL_L)\r\n\r\n lowest_cost_station_index = cost_matrix.index(min(cost_matrix))\r\n\r\n # note that the map will show the last route decision where this function was run multiple times\r\n # we use reversed here to reverse coords because the map uses lat, lon and the rest of the apis use lon,lat\r\n m = folium.Map(location=list(reversed(starting_location)))\r\n\r\n folium.Marker(location=list(reversed(starting_location)), popup=\"starting point\", icon=folium.Icon(color=\"green\")).add_to(m)\r\n\r\n folium.Marker(location=list(reversed(temp_station_locations[closest_station_index])), popup=\"Nearest station \\n Price: \" + str(temp_price_data[closest_station_index]['price']), icon=folium.Icon(color=\"blue\")).add_to(m)\r\n folium.Marker(location=list(reversed(temp_station_locations[lowest_cost_station_index])), popup=\"Cheapest station \\n Price: \" + str(temp_price_data[lowest_cost_station_index]['price']), icon=folium.Icon(color=\"red\")).add_to(m)\r\n\r\n\r\n m.save('map.html')\r\n\r\n return { 'nearest_price': temp_price_data[closest_station_index]['price'], 'cheapest_price': temp_price_data[lowest_cost_station_index]['price'], 'additional_burn_l': fuel_burn_matrix[lowest_cost_station_index] - fuel_burn_matrix[closest_station_index], 'total_savings': (cost_matrix[closest_station_index] - cost_matrix[lowest_cost_station_index]) / 100, 'nearest_is_best': closest_station_index == lowest_cost_station_index, 'costco_is_best': lowest_cost_station_index == true_costco_index and not removed_costco }\r\n\r\ntotal_additional_burn = 0\r\ntotal_savings = 0\r\nnearest_best = 0\r\ncostco_best = 0\r\n\r\nTOTAL_RUNS = 10000\r\n\r\nfor run in range(TOTAL_RUNS):\r\n result = simulate_best_station()\r\n total_additional_burn += result['additional_burn_l']\r\n total_savings += result['total_savings']\r\n if result['nearest_is_best']:\r\n nearest_best += 1\r\n if result['costco_is_best']:\r\n costco_best += 1\r\n\r\nprint('The total additional fuel burn was ' + str(total_additional_burn) + ' which saved consumers $' + str(round(total_savings, 2)) + ' a lower-priced station was optimal for ' + str(round((1 - (nearest_best / TOTAL_RUNS)) * 100, 1)) + ' % of drivers. ' + str(round(costco_best / TOTAL_RUNS * 100, 1)) + ' % of drivers would choose costco.')\r\n","repo_name":"rye761/GasSim","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"70936434701","text":"import sqlite3\nimport re\nimport datetime\nimport random\ndef login_server(username=None,password=None):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select * from user where username=? and password=?',(username,password))\n data=cursor.fetchall()\n conn.close()\n if data:\n return data[0]\n else:\n return '账号或密码错误'\n\ndef register_server(**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n reg= re.compile(r'^1[34578][0-9]{9}$')\n if not reg.match(data['telephone'][0]):\n return \"请填写正确的手机号码\"\n try:\n cursor.execute('insert into user (username,password,createtime,name,\\\n telephone,QQ,email)values(?,?,?,?,?,?,?)',\\\n (data['username'][0],data['password'][0],\\\n datetime.date.today(),data['name'][0],\\\n data['telephone'][0],data['QQ'][0],data['email'][0]))\n except sqlite3.IntegrityError:\n conn.close()\n return \"用户名重复\"\n conn.commit()\n conn.close()\n return \"注册成功\"\n\ndef pre_server(**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n reg= re.compile(r'^1[34578][0-9]{9}$')\n if not reg.match(data['telephone'][0]):\n return \"请填写正确的手机号码\"\n try:\n cursor.execute('insert into pre (name,telephone,createtime,\\\n QQ,email)values(?,?,?,?,?)',\\\n (data['name'][0],data['telephone'][0],\\\n datetime.date.today(),data['QQ'][0],\\\n data['email'][0]))\n except sqlite3.IntegrityError:\n conn.close()\n return \"手机号码重复\"\n conn.commit()\n conn.close()\n return \"预报名成功\"\n\ndef score_server(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select real_score,virtual_score from user where id=?',(user_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef update_user_question(**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('insert into user_questions (user_id,question_id,result,\\\n choice) values(?,?,?,?)',\\\n (data['userId'][0],data['questionId'][0],\\\n data['answerResult'][0],data['userChoice'][0]))\n conn.commit()\n conn.close()\n\ndef get_user_question(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select question_id from user_questions where user_id=?',(user_id,))\n done=[x[0] for x in cursor.fetchall()]\n cursor.execute('select id from questions')\n questions= [x[0] for x in cursor.fetchall()]\n conn.close()\n undone=[]\n for q in questions:\n if q not in done:\n undone.append(q)\n r=random.choice(undone)\n return [r,get_question(r),len(undone)-1]\n\n\ndef get_question(question_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select title,A,B,C,D,explain,answer from questions where id=?',(question_id,))\n data,=cursor.fetchall()\n conn.close()\n return data\n\ndef score_detail(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,status,id\\\n from re where user_id=?',(user_id,))\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef recommend_server(user_id,**data):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n reg= re.compile(r'^1[34578][0-9]{9}$')\n if not reg.match(data['re_telephone'][0]):\n return \"请填写正确的手机号码\"\n try:\n cursor.execute('insert into re (createtime,name,telephone,email,\\\n user_id)values(?,?,?,?,?)',\\\n (datetime.date.today(),data['re_name'][0],data['re_telephone'][0],\\\n data['re_email'][0],user_id))\n except sqlite3.IntegrityError:\n conn.close()\n return \"此人已被推荐\"\n conn.commit()\n cursor.execute('update user set virtual_score=virtual_score+150 where id=?',\\\n (user_id,))\n conn.commit()\n conn.close()\n return \"恭喜获得150个���荐积分!\"\n\ndef auth_server(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select role from user where id=?',(user_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef all_user_s():\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,real_score,\\\n virtual_score,id from user where role=0')\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef all_pre_s():\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,QQ,\\\n email from pre')\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef zero_server(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('update user set virtual_score=0 ,real_score=0 where id=?',\\\n (user_id,))\n cursor.execute('delete from re where user_id=?',(user_id,))\n conn.commit()\n conn.close()\n\ndef get_user_by_id(user_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select name,telephone,QQ,email,virtual_score,\\\n real_score from user where id=?',\\\n (user_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef get_userid_by_re(re_id):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select user_id from re where id=?',(re_id,))\n data=cursor.fetchall()\n conn.close()\n return data[0]\n\ndef change_status_server(re_id,user_id,status,s):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('update re set status=? where id=?',(s,re_id))\n if status==1:\n cursor.execute('update user set virtual_score=virtual_score-50,\\\n real_score=real_score+50 where id=?',(user_id,))\n if status==2:\n cursor.execute('update user set virtual_score=virtual_score-100,\\\n real_score=real_score+100 where id=?',(user_id,))\n conn.commit()\n conn.close()\n\ndef search_users_by_phone(phone):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,id from user \\\n where telephone like ?',\\\n ('{}%'.format(phone),))\n data=cursor.fetchall()\n conn.close()\n return data\n\ndef search_res_by_phone(phone):\n conn = sqlite3.connect('test.sqlite')\n cursor = conn.cursor()\n cursor.execute('select createtime,name,telephone,status,id from re \\\n where telephone like ?',\\\n ('{}%'.format(phone),))\n data=cursor.fetchall()\n conn.close()\n return data\n","repo_name":"luozx207/gdyxscore","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"7913187534","text":"import logging\nimport uuid\n\nfrom django.core import mail\nfrom django.test import TestCase\n\nfrom classroom.factories import ClassroomFactory, UserFactory\nfrom classroom.forms import EnrollmentForm, PostForm\n\n\nclass EnrollmentFormTests(TestCase):\n def test_valid_enrollment_form_with_new_enrollment_sends_email(self):\n student = UserFactory()\n classroom = ClassroomFactory()\n\n code = str(classroom.id)\n form = EnrollmentForm({'code': code})\n\n self.assertTrue(form.is_valid())\n\n with self.assertLogs('classroom.forms', level='INFO') as cm:\n form.send_mail(student=student, classroom=classroom)\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, 'Site message')\n self.assertGreaterEqual(len(cm.output), 1)\n\n def test_invalid_enrollment_form_does_not_sends_email(self):\n form = EnrollmentForm({'code': '123456'})\n self.assertFalse(form.is_valid())\n\n\nclass PostFormTests(TestCase):\n def test_valid_post_form_works(self):\n form = PostForm({'title': 'A new beginning', 'content': 'We have to start over again'})\n self.assertTrue(form.is_valid())\n\n def test_invalid_post_form(self):\n form = PostForm({'title': 'A new beginning'})\n self.assertFalse(form.is_valid())\n","repo_name":"gurupratap-matharu/cities","sub_path":"classroom/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"17038517718","text":"#!/usr/bin/env python\n\nfrom datetime import datetime\nimport re\nimport sys\n\ndef mapper():\n\ttld = \"http://www.theassociates.co.uk\"\n\tpattern = '^([\\d.]+) ([\\w-]+) ([\\w-]+) \\[(.+)\\] \\\"(.+)\\\" (\\d{3}) (\\d+)$'\n\tfor line in sys.stdin:\n\t\tresult = re.match(pattern, line)\n\t\tif result is None:\n\t\t\tcontinue\n\t\ttime_str, request = result.group(4), result.group(5)\n\t\ttry:\n\t\t\tmethod, resource, protocol = request.split(\" \")\n\t\texcept ValueError:\n\t\t\tcontinue\n\n\t\t# Need to convert to ordinal because we want to sort by day\n\t\ttime_dt = datetime.strptime(time_str.split(\" \")[0], \"%d/%b/%Y:%X\")\n\t\ttime_ordinal = time_dt.toordinal()\n\n\t\tif resource.startswith(tld):\n\t\t\tresource = resource[len(tld):]\n\n\t\tif resource == \"/\":\n\t\t\tprint(\"{}\\t1\".format(time_ordinal))\n\nif __name__ == \"__main__\":\n\tmapper()\n","repo_name":"yahwang/Learn-Big-Data-Essentials-Yandex","sub_path":"data/mapreduce_test/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"12577010060","text":"import win32com.client\nimport pandas as pd\nfrom datetime import datetime\nfrom com.utils import *\nimport time\n \n# 크레온 플러스 공통 OBJECT\ncpCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')\ncpStatus = win32com.client.Dispatch('CpUtil.CpCybos')\ncpOhlc = win32com.client.Dispatch('CpSysDib.StockChart')\n\ndef get_ohlc(code, qty):\n \"\"\"인자로 받은 종목의 OHLC 가격 정보를 qty 개수만큼 반환한다.\"\"\"\n cpOhlc.SetInputValue(0, code) # 종목코드\n cpOhlc.SetInputValue(1, ord('2')) # 1:기간, 2:개수\n cpOhlc.SetInputValue(4, qty) # 요청개수\n cpOhlc.SetInputValue(5, [0, 2, 3, 4, 5, 8]) # 0:날짜, 2~5:OHLC\n cpOhlc.SetInputValue(6, ord('D')) # D:일단위\n cpOhlc.SetInputValue(9, ord('1')) # 0:무수정주가, 1:수정주가\n cpOhlc.BlockRequest()\n count = cpOhlc.GetHeaderValue(3) # 3:수신개수\n columns = ['open', 'high', 'low', 'close', 'vol']\n index = []\n rows = []\n\n for i in range(count): \n index.append(cpOhlc.GetDataValue(0, i)) \n rows.append([cpOhlc.GetDataValue(1, i), cpOhlc.GetDataValue(2, i),\n cpOhlc.GetDataValue(3, i), cpOhlc.GetDataValue(4, i), cpOhlc.GetDataValue(5, i)]) \n df = pd.DataFrame(rows, columns=columns, index=index) \n return df\n\ndef getVolMax(volarr):\n max = 0\n for vol in volarr:\n if max < vol:\n max = vol\n return max\n\ndef get_movingaverage(code, window):\n \"\"\"인자로 받은 종목에 대한 이동평균가격을 반환한다.\"\"\"\n try:\n time_now = datetime.now()\n str_today = time_now.strftime('%Y%m%d')\n ohlc = get_ohlc(code, window) # 120 + 16 날짜별 데이터 추출\n\n if len(ohlc.index) < window:\n return None, None, None, None\n\n # if str_today == str(ohlc.iloc[0].name):\n # lastday = ohlc.iloc[1].name\n # else:\n # lastday = ohlc.iloc[0].name\n lastday = ohlc.iloc[0].name\n\n closes = ohlc['close'].sort_index() \n vols = ohlc['vol'].sort_index()\n\n ma20 = closes.rolling(20).mean()\n ma60 = closes.rolling(60).mean()\n ma120 = closes.rolling(120).mean()\n bf3d_m20 = ma20.loc[ohlc.iloc[3].name]\n bf3d_m60 = ma60.loc[ohlc.iloc[3].name]\n bf3d_m120 = ma120.loc[ohlc.iloc[3].name]\n bf7d_m20 = ma20.loc[ohlc.iloc[7].name]\n bf7d_m60 = ma60.loc[ohlc.iloc[7].name]\n bf7d_m120 = ma120.loc[ohlc.iloc[7].name]\n bf15d_m20 = ma20.loc[ohlc.iloc[15].name]\n bf15d_m60 = ma60.loc[ohlc.iloc[15].name]\n bf15d_m120 = ma120.loc[ohlc.iloc[15].name]\n \n if round(bf3d_m20, 2) > round(bf3d_m60, 2) and round(bf3d_m60, 2) > round(bf3d_m120, 2) \\\n and round(bf7d_m20, 2) > round(bf7d_m60, 2) and round(bf7d_m60, 2) > round(bf7d_m120, 2) \\\n and round(bf15d_m20, 2) > round(bf15d_m60, 2) and round(bf15d_m60, 2) > round(bf15d_m120, 2):\n\n vol30arr = vols.tail(30).array # 최근 30일 거래량의 최대값 추가\n return code, closes[lastday], vols[lastday], getVolMax(vol30arr)\n else:\n return None, None, None, None\n\n except Exception as ex:\n print(datetime.now().strftime('[%m/%d %H:%M:%S]'), 'get_movingavrg(' + str(window) + ') -> exception! ' + str(ex))\n \n return None\n\nclass CMarketTotal():\n def __init__(self):\n self.dataInfo = {}\n self.targetItems = {}\n \n self.targetItems['code'] = []\n self.targetItems['name'] = []\n self.targetItems['lastclose'] = []\n self.targetItems['vol'] = []\n self.targetItems['sprice'] = []\n self.targetItems['lastmaxvol'] = []\n \n def get_target_items(self):\n codeList = cpCodeMgr.GetStockListByMarket(1) # 거래소\n codeList2 = cpCodeMgr.GetStockListByMarket(2) # 코스닥\n allcodelist = codeList + codeList2\n #print('전 종목 코드 %d, 거래소 %d, 코스닥 %d' % (len(allcodelist), len(codeList), len(codeList2)))\n \n objMarket = CpMarketEye()\n rqCodeList = []\n for i, code in enumerate(allcodelist):\n rqCodeList.append(code)\n if len(rqCodeList) == 200:\n time.sleep(1)\n objMarket.request(rqCodeList, self.dataInfo)\n rqCodeList = []\n continue\n \n if len(rqCodeList) > 0:\n objMarket.request(rqCodeList, self.dataInfo)\n\n # print(self.dataInfo)\n for key in self.dataInfo.keys():\n finalcode, close, vol, vol30max = get_movingaverage(key, 136)\n if finalcode:\n self.targetItems['code'].append(finalcode)\n self.targetItems['name'].append(cpCodeMgr.CodeToName(finalcode))\n self.targetItems['lastclose'].append(close)\n self.targetItems['vol'].append(vol)\n self.targetItems['lastmaxvol'].append(vol30max) \n\n return self.targetItems\n #slack.chat.post_message('#stock', ' '.join(self.targetItems))\n\nclass CpMarketEye:\n def __init__(self):\n self.objRq = win32com.client.Dispatch(\"CpSysDib.MarketEye\")\n self.RpFiledIndex = 0\n \n def request(self, codes, dataInfo):\n # 0: 종목코드 4: 현재가 10:거래량 22 : 전일거래량, 23: 전일종가\n rqField = [0, 4, 10, 22, 23] # 요청 필드\n \n self.objRq.SetInputValue(0, rqField) # 요청 필드\n self.objRq.SetInputValue(1, codes) # 종목코드 or 종목코드 리스트\n self.objRq.BlockRequest()\n \n # 현재가 통신 및 통신 에러 처리\n rqStatus = self.objRq.GetDibStatus()\n if rqStatus != 0:\n return False\n \n cnt = self.objRq.GetHeaderValue(2) # 0 : 필드개수, 1: 필드명배열, 2: 종목개수\n \n for i in range(cnt):\n code = self.objRq.GetDataValue(0, i) # 코드\n cur_price = self.objRq.GetDataValue(1, i) # 현재가\n trade_amt = self.objRq.GetDataValue(2, i) # 거래량\n bf_trade_amt = self.objRq.GetDataValue(3, i) # 전일 거래량\n bf_price = self.objRq.GetDataValue(4, i) # 전일 종가\n\n ## TODO : 전일 or 당일 오전 조회에 따른 판단 분기\n\n # 1. 전일 종가 대비 11% 이상 급등 \n per = 0\n if bf_price > 0:\n per = ((cur_price - bf_price) / bf_price) * 100.0\n\n # 2. 전일 거래량 조건\n if trade_amt > 2000000 and per > 11.0:\n dataInfo[code] = (cur_price, trade_amt)\n \n return True\n\n","repo_name":"mongma-n/python-stock-toy-project-with-creon","sub_path":"trade/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"2860350775","text":"#coding=utf-8\nfrom base.models import BaseModel\nfrom django.db import models\n\nclass Questions_Model(BaseModel):\n project=models.CharField(u'所属项目',max_length=200,blank=False)\n question_desc=models.TextField(u'问题描述',blank=True)\n question_reason=models.TextField(u'产生问题原因',blank=True)\n question_module=models.CharField(u'问题所属模块',max_length=200,blank=False)\n question_reporter=models.CharField(u'提出问题人',max_length=200,blank=False)\n question_answer=models.CharField(u'问题跟进者',max_length=200,blank=False)\n question_progress=models.CharField(u'问题进度',max_length=200,blank=False)\n question_comments=models.CharField(u'问题备注',max_length=200,blank=False)\n is_system_cause=models.CharField(u'是否系统问题',max_length=200,blank=False)\n week=models.CharField(u'week',max_length=200,blank=False)\n\n class Meta:\n verbose_name = u'问题列表'\n verbose_name_plural = verbose_name\n\n def __unicode__(self):\n return '{project} {question_desc} {question_reason} {question_module} {question_reporter} {question_answer} {question_progress} {question_comments} {is_system_cause} {week}'.format(\n project=self.project,\n question_desc=self.question_desc,\n question_reason=self.question_reason,\n question_module=self.question_module,\n question_reporter=self.question_reporter,\n question_answer=self.question_answer,\n question_progress=self.question_progress,\n question_comments=self.question_comments,\n is_system_cause=self.is_system_cause,\n week=self.week\n )\n\nclass Project_Model(BaseModel):\n project_name=models.CharField(u'项目名称',max_length=200,blank=False)\n\n\nclass Module_Model(BaseModel):\n module_name=models.CharField(u'模块名称',max_length=200,blank=True)\n\nclass Progress_Model(BaseModel):\n progress_name=models.CharField(u'进度名称',max_length=200,blank=True)\n\n","repo_name":"longyue123/tech","sub_path":"tech/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"25494823151","text":"from bs4 import BeautifulSoup\nimport urllib2\nfrom random import choice,randint\nfrom pymongo import MongoClient\n\nurl = 'http://foodgawker.com/page/1/?s_exclude=drinks'\nrequest = urllib2.Request(url, headers={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36'})\nsite = urllib2.urlopen(request)\nhtml = site.read()\n\nparsed_html = BeautifulSoup(html)\nmaxpage = int(parsed_html.body.find('div', attrs={'class' : 'post-section'}).attrs['data-maxpage'])\n\nfor x in xrange(1, maxpage):\n\n url = 'http://foodgawker.com/page/' + str(x) + '/?s_exclude=drinks'\n request = urllib2.Request(url, headers={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36'})\n site = urllib2.urlopen(request)\n html = site.read()\n\n parsed_html = BeautifulSoup(html)\n\n dishes = parsed_html.body.find_all('div', attrs={'class' : 'flipwrapper'})\n\n client = MongoClient('localhost', 27017)\n db = client.recipeasy\n collection = db.recipes\n recipes = db.recipes\n\n for dish in dishes:\n img = dish.find('a', attrs={'class' : 'picture-link'})\n\n recipe = { 'title' : dish.attrs['data-sharetitle'],\n 'description' : dish.find('section', attrs={'class' : 'description'}).text,\n 'link' : img.attrs['href'],\n 'image' : img.find('img').attrs['src'] }\n\n recipes.insert(recipe)\n\n amtDone = float(x)/float(maxpage)\n print(\"\\rProgress: [{0:100s}] {1:.1f}% ({2}/{3})\".format('#' * int(amtDone * 50), amtDone * 100, x, maxpage)),","repo_name":"aberle/recipeasy","sub_path":"foodscrape.py","file_name":"foodscrape.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"23472306307","text":"import logging\nlogger = logging.getLogger(__name__)\n\nimport os\nimport re\nimport yaml\nimport json\nfrom comment_parser import comment_parser\n\n\nclass Parser():\n def __init__(self, threatmodel):\n\n self.threatmodel = threatmodel\n\n self.action_table = {}\n self.action_table[\"mitigate\"] = self.threatmodel.add_mitigation\n self.action_table[\"accept\"] = self.threatmodel.add_acceptance\n self.action_table[\"transfer\"] = self.threatmodel.add_transfer\n self.action_table[\"expose\"] = self.threatmodel.add_exposure\n self.action_table[\"connect\"] = self.threatmodel.add_connection\n self.action_table[\"review\"] = self.threatmodel.add_review\n self.action_table[\"test\"] = self.threatmodel.add_test\n self.action_table[\"threat\"] = self.threatmodel.add_threat\n self.action_table[\"control\"] = self.threatmodel.add_control\n self.action_table[\"component\"] = self.threatmodel.add_component\n\n self.patterns = {}\n self.patterns[\"mitigate\"] = r'@mitigates? (?P.*?) against (?P.*?) with (?P.*)'\n self.patterns[\"accept\"] = r'@accepts? (?P.*?) to (?P.*?) with (?P
.*)'\n self.patterns[\"transfer\"] = r'@transfers? (?P.*?) from (?P.*?) to (?P.*?) with (?P
.*)'\n self.patterns[\"expose\"] = r'@exposes? (?P.*?) to (?P.*?) with (?P
.*)'\n self.patterns[\"connect\"] = r'@connects? (?P.*?) (?Pwith|to) (?P.*?) with (?P
.*)'\n self.patterns[\"review\"] = r'@reviews? (?P.*?) (?P
.*)'\n self.patterns[\"test\"] = r'@tests? (?P.*?) for (?P.*)'\n\n self.patterns[\"threat\"] = r'@threat (?P.*)'\n self.patterns[\"control\"] = r'@control (?P.*)'\n self.patterns[\"component\"] = r'@component (?P.*)'\n\n self.cwd = os.getcwd()\n\n def run_action(self, data, source):\n action = data.pop(\"action\")\n self.action_table[action](data, source=source)\n\n def is_extended(self, line):\n return line[-1] == \":\"\n\n def is_threatspec_line(self, line):\n for key in self.patterns.keys():\n if \"@{}\".format(key) in line:\n return True\n return False\n\n def check_file(self, filename):\n logger.debug(\"Parsing file {}\".format(filename))\n if filename.startswith(self.cwd):\n return filename.replace(self.cwd, \"\", 1).lstrip(os.path.sep)\n return filename\n\n\nclass CommentParser(Parser):\n def __init__(self, threatmodel, mime=None):\n super().__init__(threatmodel)\n self.mime = mime\n\n def parse_comment(self, comment):\n annotations = []\n\n LINE = 0\n EXTENDED = 1\n\n state = LINE\n extended_lines = []\n data = None\n\n line_number = 1\n\n for line in comment.split(\"\\n\"):\n stripped_line = self.strip(line)\n if state == LINE:\n for action in self.patterns.keys():\n if stripped_line.startswith(\"@\" + action):\n data = {\"action\": action, \"line\": line_number, \"annotation\": stripped_line}\n extended_lines = []\n pattern = self.patterns[action]\n if self.is_extended(stripped_line):\n state = EXTENDED\n stripped_line = stripped_line[0:-1]\n m = re.match(pattern, stripped_line, re.M | re.I)\n if m:\n data.update(m.groupdict())\n if state == LINE:\n annotations.append(data)\n else:\n raise Exception(\"Could not parse {} pattern:\\n{} for comment line:\\n{}\".format(action, pattern, line))\n\n elif state == EXTENDED:\n if stripped_line == \"\":\n state = LINE\n self.process_extended_lines(extended_lines, data, annotations)\n extended_lines = []\n else:\n extended_lines.append(self.strip_stars(line))\n\n line_number += 1\n\n if len(extended_lines) > 0:\n self.process_extended_lines(extended_lines, data, annotations)\n\n return annotations\n\n def process_extended_lines(self, extended_lines, data, annotations):\n extended_text = \"\\n\".join(extended_lines)\n data[\"annotation\"] += \"\\n\" + extended_text\n data.update(yaml.load(extended_text, Loader=yaml.SafeLoader))\n annotations.append(data)\n\n def strip(self, line):\n return self.strip_stars(line).strip()\n\n def strip_stars(self, line):\n if self.mime not in [\"text/html\", \"text/x-shellscript\", \"text/xml\"]:\n return re.sub(r\"\\s*\\*+\", \"\", line)\n return line\n\n \nclass TextFileParser(CommentParser):\n def parse_file(self, filename):\n filename = self.check_file(filename)\n\n with open(filename) as fh:\n data = fh.read()\n\n source = {\n \"filename\": filename,\n \"code\": \"\"\n }\n\n for data in self.parse_comment(data):\n source[\"annotation\"] = data.pop(\"annotation\")\n source[\"line\"] = data.pop(\"line\")\n self.run_action(data, source)\n\n\nclass SourceFileParser(CommentParser):\n\n def __init__(self, threatmodel, mime=None):\n super().__init__(threatmodel)\n self.mime = mime\n\n def extract_comment_context(self, lines, commented_lines, start_line, num_lines, multiline=False):\n count = 0\n i = start_line\n code = []\n\n capture_first_line = not multiline\n\n for line in lines[start_line - 1:]:\n if count >= num_lines:\n return \"\".join(code)\n\n if capture_first_line:\n code.append(line)\n capture_first_line = False\n\n if i not in commented_lines:\n code.append(line)\n count += 1\n i += 1\n return \"\".join(code)\n\n def get_lines(self, filename):\n try:\n with open(filename) as fh:\n return fh.readlines()\n except UnicodeDecodeError:\n return None\n\n def parse_file(self, filename):\n logger.debug(\"Parsing file {}\".format(filename))\n\n lines = self.get_lines(filename)\n if not lines:\n return\n\n commented_line_numbers = []\n comments = []\n try:\n for comment in comment_parser.extract_comments(filename, self.mime):\n comment_text = comment.text()\n comment_line = comment.line_number()\n if comment.is_multiline():\n offset = len(comment_text.split(\"\\n\"))\n commented_line_numbers += range(comment_line, comment_line + offset)\n else:\n offset = 0\n commented_line_numbers.append(comment_line)\n comments.append({\n \"text\": comment_text,\n \"line\": comment_line,\n \"offset\": offset,\n \"multiline\": comment.is_multiline()\n })\n except comment_parser.UnsupportedError as e:\n print(e)\n return\n\n for comment in comments:\n comment[\"text\"] = comment[\"text\"].strip()\n num_lines = 5 # Get 5 lines of code\n code = self.extract_comment_context(lines, commented_line_numbers, comment[\"line\"] + comment[\"offset\"], num_lines, comment[\"multiline\"])\n\n source = {\n \"code\": code,\n \"filename\": filename\n }\n\n annotations = self.parse_comment(comment[\"text\"])\n if annotations:\n for data in annotations:\n source[\"line\"] = data.pop(\"line\")\n source[\"annotation\"] = data.pop(\"annotation\")\n self.run_action(data, source)\n\n\nclass YamlFileParser(Parser):\n def parse_annotation(self, annotation, data={}):\n stripped_line = annotation.strip()\n for action in self.patterns.keys():\n if stripped_line.startswith(\"@\" + action):\n data[\"action\"] = action\n pattern = self.patterns[action]\n m = re.match(pattern, stripped_line, re.M | re.I)\n if m:\n data.update(m.groupdict())\n return data\n else:\n raise Exception(\"Could not parse {} pattern:\\n{} for comment line:\\n{}\".format(action, pattern, stripped_line))\n\n def parse_key(self, data, parent, filename):\n if isinstance(data, str):\n annotation = self.parse_annotation(data)\n source = {\n \"annotation\": data,\n \"code\": json.dumps(parent, indent=2),\n \"filename\": filename,\n \"line\": 0\n }\n self.run_action(annotation, source)\n elif isinstance(data, list):\n for v in data:\n if not isinstance(v, str):\n raise Exception(\"Invalid value type for x-threatspec list in {}\".format(filename))\n annotation = self.parse_annotation(v)\n source = {\n \"annotation\": v,\n \"code\": json.dumps(parent, indent=2),\n \"filename\": filename,\n \"line\": 0\n }\n self.run_action(annotation, source)\n elif isinstance(data, dict):\n for k, v in data.items():\n annotation = self.parse_annotation(k, v)\n source = {\n \"annotation\": k,\n \"code\": json.dumps(parent, indent=2),\n \"filename\": filename,\n \"line\": 0\n }\n self.run_action(annotation, source)\n\n def parse_data(self, data, parent, filename):\n if isinstance(data, dict):\n for k, v in data.items():\n if k == \"x-threatspec\":\n self.parse_key(v, data, filename)\n else:\n self.parse_data(v, data, filename)\n elif isinstance(data, list):\n for v in data:\n self.parse_data(v, data, filename)\n\n def parse_file(self, filename):\n filename = self.check_file(filename)\n\n with open(filename) as fh:\n file_data = yaml.load(fh, Loader=yaml.SafeLoader)\n self.parse_data(file_data, {}, filename)\n","repo_name":"threatspec/threatspec","sub_path":"threatspec/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":10716,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"46"} +{"seq_id":"29946361664","text":"\"\"\"Provides a ChatBot UI for a Github Repository. Powered by Llama Index and Panel\"\"\"\nimport os\nimport pickle\nfrom pathlib import Path\n\nimport nest_asyncio\nimport panel as pn\nimport param\nfrom llama_index import VectorStoreIndex, download_loader\n\nfrom llama_hub.github_repo import GithubClient, GithubRepositoryReader\n\n# needed because both Panel and GithubRepositoryReader starts up the ioloop\nnest_asyncio.apply()\n\nCACHE_PATH = Path(\".cache/panel_chatbot\")\nCACHE_PATH.mkdir(parents=True, exist_ok=True)\n\nCHAT_GPT_LOGO = \"https://upload.wikimedia.org/wikipedia/commons/thumb/0/04/ChatGPT_logo.svg/512px-ChatGPT_logo.svg.png\"\nCHAT_GPT_URL = \"https://chat.openai.com/\"\nLLAMA_INDEX_LOGO = (\n \"https://cdn-images-1.medium.com/max/280/1*_mrG8FG_LiD23x0-mEtUkw@2x.jpeg\"\n)\nPANEL_LOGO = {\n \"default\": \"https://panel.holoviz.org/_static/logo_horizontal_light_theme.png\",\n \"dark\": \"https://panel.holoviz.org/_static/logo_horizontal_dark_theme.png\",\n}\n\nGITHUB_LOGO = \"https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png\"\nGITHUB_URL = \"https://github.com/\"\nLLAMA_INDEX_URL = \"https://www.llamaindex.ai/\"\nPANEL_URL = \"https://panel.holoviz.org/index.html\"\nGITHUB_COPILOT_LOGO = (\n \"https://plugins.jetbrains.com/files/17718/447537/icon/pluginIcon.svg\"\n)\n\nINDEX_NOT_LOADED = \"No repository loaded\"\nINDEX_LOADED = \"Repository loaded\"\nLOADING_EXISTING_DOCS = \"Loading existing docs\"\nLOADING_NEW_DOCS = \"Downloading documents\"\nLOADING_EXISTING_INDEX = \"Loading existing index\"\nLOADING_NEW_INDEX = \"Creating index\"\nCUTE_LLAMA = \"https://raw.githubusercontent.com/run-llama/llama-hub/main/llama_hub/llama_packs/panel_chatbot/llama_by_sophia_yang.png\"\nCUTE_LLAMA_URL = \"https://x.com/sophiamyang/status/1729810715467252080?s=20\"\n\npn.chat.ChatMessage.default_avatars.update(\n {\n \"assistant\": GITHUB_COPILOT_LOGO,\n \"user\": \"🦙\",\n }\n)\npn.chat.ChatMessage.show_reaction_icons = False\n\nACCENT = \"#ec4899\"\n\nCSS_FIXES_TO_BE_UPSTREAMED_TO_PANEL = \"\"\"\n#sidebar {\n padding-left: 5px !important;\n background-color: var(--panel-surface-color);\n}\n.pn-wrapper {\n height: calc( 100vh - 150px);\n}\n.bk-active.bk-btn-primary {border-color: var(--accent-fill-active)}\n.bk-btn-primary:hover {border-color: var(--accent-fill-hover)}\n.bk-btn-primary {border-color: var(--accent-fill-rest)}\na {color: var(--accent-fill-rest) !important;}\na:hover {color: var(--accent-fill-hover) !important;}\n\"\"\"\n\n\ndef _split_and_clean(cstext):\n return cstext.split(\",\")\n\n\nclass IndexLoader(pn.viewable.Viewer):\n \"\"\"The IndexLoader enables the user to interactively create a VectorStoreIndex from a\n github repository of choice\"\"\"\n\n value: VectorStoreIndex = param.ClassSelector(class_=VectorStoreIndex)\n\n status = param.String(constant=True, doc=\"A status message\")\n\n owner: str = param.String(\n default=\"holoviz\", doc=\"The repository owner. For example 'holoviz'\"\n )\n repo: str = param.String(\n default=\"panel\", doc=\"The repository name. For example 'panel'\"\n )\n filter_directories: str = param.String(\n default=\"examples,docs,panel\",\n label=\"Folders\",\n doc=\"A comma separated list of folders to include. For example 'examples,docs,panel'\",\n )\n filter_file_extensions: str = param.String(\n default=\".py,.md,.ipynb\",\n label=\"File Extensions\",\n doc=\"A comma separated list of file extensions to include. For example '.py,.md,.ipynb'\",\n )\n\n _load = param.Event(\n label=\"LOAD\",\n doc=\"Loads the repository index from the cache if it exists and otherwise from scratch\",\n )\n _reload = param.Event(\n default=False,\n label=\"RELOAD ALL\",\n doc=\"Loads the repository index from scratch\",\n )\n\n def __init__(self):\n super().__init__()\n\n if self.index_exists:\n pn.state.execute(self.load)\n else:\n self._update_status(INDEX_NOT_LOADED)\n\n self._layout = pn.Column(\n self.param.owner,\n self.param.repo,\n self.param.filter_directories,\n self.param.filter_file_extensions,\n pn.pane.HTML(self.github_url),\n pn.widgets.Button.from_param(\n self.param._load,\n button_type=\"primary\",\n disabled=self._is_loading,\n loading=self._is_loading,\n ),\n pn.widgets.Button.from_param(\n self.param._reload,\n button_type=\"primary\",\n button_style=\"outline\",\n disabled=self._is_loading,\n loading=self._is_loading,\n ),\n pn.pane.Markdown(\"### Status\", margin=(3, 5)),\n pn.pane.Str(self.param.status),\n )\n\n def __panel__(self):\n return self._layout\n\n @property\n def _unique_id(self):\n uid = (\n self.owner\n + self.repo\n + self.filter_directories\n + self.filter_file_extensions\n )\n uid = uid.replace(\",\", \"\").replace(\".\", \"\")\n return uid\n\n @property\n def _cached_docs_path(self):\n return CACHE_PATH / f\"docs_{self._unique_id}.pickle\"\n\n @property\n def _cached_index_path(self):\n return CACHE_PATH / f\"index_{self._unique_id}.pickle\"\n\n async def _download_docs(self):\n download_loader(\"GithubRepositoryReader\")\n\n github_client = GithubClient(os.getenv(\"GITHUB_TOKEN\"))\n\n filter_directories = _split_and_clean(self.filter_directories)\n filter_file_extensions = _split_and_clean(self.filter_file_extensions)\n\n loader = GithubRepositoryReader(\n github_client,\n owner=self.owner,\n repo=self.repo,\n filter_directories=(\n filter_directories,\n GithubRepositoryReader.FilterType.INCLUDE,\n ),\n filter_file_extensions=(\n filter_file_extensions,\n GithubRepositoryReader.FilterType.INCLUDE,\n ),\n verbose=True,\n concurrent_requests=10,\n )\n return loader.load_data(branch=\"main\")\n\n async def _get_docs(self):\n docs_path = self._cached_docs_path\n index_path = self._cached_index_path\n\n if docs_path.exists():\n self._update_status(LOADING_EXISTING_DOCS)\n with docs_path.open(\"rb\") as f:\n return pickle.load(f)\n\n self._update_status(LOADING_NEW_DOCS)\n docs = await self._download_docs()\n\n with docs_path.open(\"wb\") as f:\n pickle.dump(docs, f, pickle.HIGHEST_PROTOCOL)\n\n if index_path.exists():\n index_path.unlink()\n\n return docs\n\n async def _create_index(self, docs):\n return VectorStoreIndex.from_documents(docs, use_async=True)\n\n async def _get_index(self, index):\n index_path = self._cached_index_path\n\n if index_path.exists():\n self._update_status(LOADING_EXISTING_INDEX)\n with index_path.open(\"rb\") as f:\n return pickle.load(f)\n\n self._update_status(LOADING_NEW_INDEX)\n index = await self._create_index(index)\n\n with index_path.open(\"wb\") as f:\n pickle.dump(index, f, pickle.HIGHEST_PROTOCOL)\n return index\n\n @param.depends(\"status\")\n def _is_loading(self):\n return self.status not in [INDEX_LOADED, INDEX_NOT_LOADED]\n\n @param.depends(\"status\")\n def _is_not_loading(self):\n return self.status in [INDEX_LOADED, INDEX_NOT_LOADED]\n\n @param.depends(\"_load\", watch=True)\n async def load(self):\n \"\"\"Loads the repository index either from the cache or by downloading from\n the repository\"\"\"\n self._update_status(\"Loading ...\")\n self.value = None\n\n docs = await self._get_docs()\n self.value = await self._get_index(docs)\n self._update_status(INDEX_LOADED)\n\n @param.depends(\"_reload\", watch=True)\n async def reload(self):\n self._update_status(\"Deleteing cached index ...\")\n if self._cached_docs_path.exists():\n self._cached_docs_path.unlink()\n if self._cached_index_path.exists():\n self._cached_index_path.unlink()\n\n await self.load()\n\n def _update_status(self, text):\n with param.edit_constant(self):\n self.status = text\n print(text)\n\n @param.depends(\"owner\", \"repo\")\n def github_url(self):\n \"\"\"Returns a html string with a link to the github repository\"\"\"\n text = f\"{self.owner}/{self.repo}\"\n href = f\"https://github.com/{text}\"\n return f\"{text}\"\n\n @property\n def index_exists(self):\n \"\"\"Returns True if the index already exists\"\"\"\n return self._cached_docs_path.exists() and self._cached_index_path.exists()\n\n\ndef powered_by():\n \"\"\"Returns a component describing the frameworks powering the chat ui\"\"\"\n params = {\"height\": 40, \"sizing_mode\": \"fixed\", \"margin\": (0, 10)}\n return pn.Column(\n pn.pane.Markdown(\"### AI Powered By\", margin=(10, 5, 10, 0)),\n pn.Row(\n pn.pane.Image(LLAMA_INDEX_LOGO, link_url=LLAMA_INDEX_URL, **params),\n pn.pane.Image(CHAT_GPT_LOGO, link_url=CHAT_GPT_URL, **params),\n pn.pane.Image(PANEL_LOGO[pn.config.theme], link_url=PANEL_URL, **params),\n align=\"center\",\n ),\n )\n\n\nasync def chat_component(index: VectorStoreIndex, index_loader: IndexLoader):\n \"\"\"Returns the chat component powering the main area of the application\"\"\"\n if not index:\n return pn.Column(\n pn.chat.ChatMessage(\n \"You are a now a *GitHub Repository assistant*.\",\n user=\"System\",\n ),\n pn.chat.ChatMessage(\n \"Please **load a GitHub Repository** to start chatting with me. This can take from seconds to minutes!\",\n user=\"Assistant\",\n ),\n )\n\n chat_engine = index.as_chat_engine(chat_mode=\"context\", verbose=True)\n\n async def generate_response(contents, user, instance):\n response = await chat_engine.astream_chat(contents)\n text = \"\"\n async for token in response.async_response_gen():\n text += token\n yield text\n\n chat_interface = pn.chat.ChatInterface(\n callback=generate_response,\n sizing_mode=\"stretch_both\",\n )\n chat_interface.send(\n pn.chat.ChatMessage(\n \"You are a now a *GitHub Repository Assistant*.\", user=\"System\"\n ),\n respond=False,\n )\n chat_interface.send(\n pn.chat.ChatMessage(\n f\"Hello! you can ask me anything about {index_loader.github_url()}.\",\n user=\"Assistant\",\n ),\n respond=False,\n )\n return chat_interface\n\n\ndef settings_components(index_loader: IndexLoader):\n \"\"\"Returns a list of the components to add to the sidebar\"\"\"\n return [\n pn.pane.Image(\n CUTE_LLAMA,\n height=250,\n align=\"center\",\n margin=(10, 5, 25, 5),\n link_url=CUTE_LLAMA_URL,\n ),\n \"## Github Repository\",\n index_loader,\n powered_by(),\n ]\n\n\ndef create_chat_ui():\n \"\"\"Returns the Chat UI\"\"\"\n pn.extension(\n sizing_mode=\"stretch_width\", raw_css=[CSS_FIXES_TO_BE_UPSTREAMED_TO_PANEL]\n )\n\n index_loader = IndexLoader()\n\n pn.state.location.sync(\n index_loader,\n parameters={\n \"owner\": \"owner\",\n \"repo\": \"repo\",\n \"filter_directories\": \"folders\",\n \"filter_file_extensions\": \"file_extensions\",\n },\n )\n\n bound_chat_interface = pn.bind(\n chat_component, index=index_loader.param.value, index_loader=index_loader\n )\n\n return pn.template.FastListTemplate(\n title=\"Chat with GitHub\",\n sidebar=settings_components(index_loader),\n main=[bound_chat_interface],\n accent=ACCENT,\n main_max_width=\"1000px\",\n main_layout=None,\n )\n\n\nif pn.state.served:\n create_chat_ui().servable()\n","repo_name":"run-llama/llama-hub","sub_path":"llama_hub/llama_packs/panel_chatbot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12046,"program_lang":"python","lang":"en","doc_type":"code","stars":2565,"dataset":"github-code","pt":"46"} +{"seq_id":"15257226468","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import layers\nimport tensorflow_hub as hub\nfrom collections import deque\nimport random\nimport math\nfrom tensorflow.keras import backend as K\n\n\"\"\"\nwe add two lines:\n\ne= tf.keras.backend.max(y_true,axis = -1)\ny_pred*= K.stack([e]*8, axis=-1)\n \nto make the positions which doesn't contain neither unit or city by zero in the prediction probabilities, in order to focus only on the main occupied positions.\n\"\"\"\n\ndef custom_mean_squared_error(y_true, y_pred):\n y_units_true = y_true[:,:,:,:6]\n y_cities_true = y_true[:,:,:,6:]\n\n y_units_pred = y_pred[:,:,:,:6]\n y_cities_pred = y_pred[:,:,:,6:]\n\n\n is_unit = tf.keras.backend.max(y_units_true,axis = -1)\n is_city = tf.keras.backend.max(y_cities_true,axis = -1)\n\n y_units_pred*= K.stack([is_unit]*6, axis=-1)\n y_cities_pred*= K.stack([is_city]*2, axis=-1)\n\n loss1 = K.square(y_units_pred - y_units_true)#/K.sum(is_unit)\n loss2 = K.square(y_cities_pred - y_cities_true)#/K.sum(is_city)\n return K.concatenate([loss1,loss2])\n\ndef units_accuracy(y_true, y_pred):\n y_units_true = y_true[:,:,:,:6]\n y_cities_true = y_true[:,:,:,6:]\n\n y_units_pred = y_pred[:,:,:,:6]\n y_cities_pred = y_pred[:,:,:,6:]\n\n is_unit = tf.keras.backend.max(y_units_true,axis = -1)\n y_units_pred*= K.stack([is_unit]*6, axis=-1)\n return K.cast(K.equal(y_units_true, K.round(y_units_pred)), \"float32\")/K.sum(is_unit)\n\ndef cities_accuracy(y_true, y_pred):\n y_units_true = y_true[:,:,:,:6]\n y_cities_true = y_true[:,:,:,6:]\n\n y_units_pred = y_pred[:,:,:,:6]\n y_cities_pred = y_pred[:,:,:,6:]\n\n is_city = tf.keras.backend.max(y_cities_true,axis = -1)\n y_cities_pred*= K.stack([is_city]*2, axis=-1)\n\n return K.cast(K.equal(y_cities_true, K.round(y_cities_pred)), \"float32\")/K.sum(is_city)\n\n\ndef get_model(s):\n inputs = keras.Input(shape=(s,s,17),name = 'The game map')\n f = layers.Flatten()(inputs)\n h,w= s,s\n f = layers.Dense(w*h,activation = \"sigmoid\")(f)\n f = layers.Reshape((h,w,-1))(f)\n units = layers.Dense(6,activation = \"softmax\",name = \"Units_actions\")(f)\n cities = layers.Dense(2,activation = \"sigmoid\",name = \"Cities_actions\")(f)\n output = layers.Concatenate()([units,cities])\n model = keras.Model(inputs = inputs, outputs = output)\n model.compile(optimizer= \"adam\", loss= custom_mean_squared_error ,metrics = [\"accuracy\"])\n\n return model\n\n\nmodel =get_model(12)\nmodel.summary()\n\ntf.keras.utils.plot_model(\n model,\n to_file=\"model.png\",\n show_shapes=1,\n show_dtype=1,\n show_layer_names=True,\n rankdir=\"TB\",\n expand_nested=False,\n dpi=96)","repo_name":"Ruben1701/Kaggle_Ai_Challenge","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"15474204709","text":"from msilib.schema import Error\nfrom typing import Dict\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom ObjectNotExistException import ObjectNotExistException\nimport jwt_utils\nfrom question import Question\nfrom answer import Answer\nfrom dbhelper import DBHelper\nimport bcrypt\napp = Flask(__name__)\nCORS(app)\n\nusername_mdp = {\"admin\": \"Vive l'ESIEE !\"}\n\n###\n# AUTHENTICATION\n###\n\ndef check_token(token):\n try:\n token = token.split()\n if token[0] == \"Bearer\":\n token = token[1]\n return token\n except Exception as e:\n return e\n\n@app.route('/login', methods=['POST'])\ndef login():\n try:\n payload = request.get_json()\n username = payload[\"username\"]\n \n dbHelper = DBHelper()\n \n new_user = False\n\n password_hash = dbHelper.get_player_password_hash(username)\n if password_hash is None:\n new_user = True\n else:\n password_hash = password_hash.encode()\n password = payload[\"password\"]\n \n if new_user:\n password_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n dbHelper.add_player(username, password_hash.decode())\n\n if bcrypt.checkpw(password.encode(), password_hash):\n token = jwt_utils.build_token(username)\n return {\"token\": token}, 200\n except Exception as e:\n return '', 401\n return '', 401\n\n@app.route('/is-logged/', methods=['GET'])\ndef is_logged(username):\n if username == 'null':\n return {\"isLogged\": False}, 401\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n except AttributeError as e:\n return {\"isLogged\": False}, 401\n try:\n # check if the token is valid\n if jwt_utils.decode_token(token) == username:\n return {\"isLogged\": True}, 200\n else:\n return {\"isLogged\": False}, 401\n except jwt_utils.JwtError as e:\n return {\"isLogged\": False}, 401\n except Exception as e:\n return '', 401\n\n\n###\n# QUESTIONS\n###\n\n@app.route('/questions', methods=['POST'])\ndef add_question():\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n # check if the token is valid\n payload = request.get_json()\n if jwt_utils.decode_token(token) == \"admin\":\n\n question = Question(\n payload['title'], payload['text'], payload['image'], payload['position'])\n\n dbHelper = DBHelper()\n dbHelper.insert_question(question, payload['possibleAnswers'])\n return '', 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n\n@app.route('/questions/', methods=['GET'])\ndef get_question(position):\n try:\n dbHelper = DBHelper()\n question = dbHelper.get_question(position)\n\n if question is None:\n return '', 404\n\n ret = question.convertToJson()\n return ret, 200\n\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except Exception as e_base:\n return e_base.message, 404\n\n@app.route('/questions', methods=['GET'])\ndef get_questions():\n try:\n dbHelper = DBHelper()\n questions = dbHelper.get_questions()\n\n if questions is None:\n return '', 404\n return {\"questions\": questions}, 200\n\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except Exception as e_base:\n return e_base.message, 404\n\n@app.route('/questions/', methods=['DELETE'])\ndef delete_quetion(position):\n\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == \"admin\":\n\n dbHelper = DBHelper()\n dbHelper.delete_question(int(position))\n return '', 204\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except Exception as e_base:\n return e_base.message, 404\n\n@app.route('/questions/', methods=['PUT'])\ndef update_question(position):\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n # check if the token is valid\n if jwt_utils.decode_token(token) == \"admin\":\n\n payload = request.get_json()\n new_position = int(payload['position'])\n question = Question(\n payload['title'], payload['text'], payload['image'], int(position))\n dbHelper = DBHelper()\n\n dbHelper.update_question(\n new_position, question, payload['possibleAnswers'])\n\n return '', 200\n else:\n return '', 401\n except ObjectNotExistException as e_custom:\n return e_custom.message, 404\n except jwt_utils.JwtError as e:\n return e.message, 401\n\n\n###\n# PARTICIPATIONS\n###\n\n@app.route('/participations', methods=['POST'])\ndef set_participation():\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n payload = request.get_json()\n username = payload['username']\n answersId = payload['answers']\n # check if the token is valid\n\n if jwt_utils.decode_token(token) == username:\n\n dbHelper = DBHelper()\n\n question_count = dbHelper.get_question_count()\n\n if (question_count != len(answersId)):\n return \"Bad request\", 400\n\n correct_participation = dbHelper.get_correct_participation()\n if correct_participation is None:\n return \"Bad request\", 400\n \n score = 0\n\n for i in range(question_count):\n if correct_participation[i] == answersId[i]:\n score += 1\n\n dbHelper.set_score(username, score)\n\n result = {\"username\": username, \"score\": score}\n\n return result, 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n\n@app.route('/participations', methods=['DELETE'])\ndef delete_participations():\n\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == \"admin\":\n\n dbHelper = DBHelper()\n dbHelper.delete_participations()\n return 'ok deleted', 204\n else:\n return '', 401\n\n except jwt_utils.JwtError as e:\n return e.message, 401\n except Exception as e:\n return e.message, 401\n\n\n###\n# GET INFO\n###\n\n@app.route('/quiz-info', methods=['GET'])\ndef get_quiz_info():\n try:\n dbHelper = DBHelper()\n scores = dbHelper.get_players_score()\n numberQuestions = dbHelper.get_question_count()\n\n return {\"size\": numberQuestions, \"scores\": scores}, 200\n except Exception as e:\n return e.message, 401\n\n@app.route('/questions-count', methods=['GET'])\ndef get_question_count():\n try:\n dbHelper = DBHelper()\n count = dbHelper.get_question_count()\n return {\"count\": count}, 200\n except Exception as e:\n return e.message, 401\n\n@app.route('/get-last-score/', methods=['GET'])\ndef get_last_score(username):\n\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == username:\n dbHelper = DBHelper()\n score = dbHelper.get_last_score(username)\n if score == -1:\n return '', 404\n return {\"score\": score}, 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n except Exception as e:\n return e.message, 401\n\n@app.route('/get-best-score/', methods=['GET'])\ndef get_best_score(username):\n try:\n token = request.headers.get('Authorization')\n token = check_token(token)\n if jwt_utils.decode_token(token) == username:\n dbHelper = DBHelper()\n score = dbHelper.get_best_score(username)\n if score == -1:\n return '', 404\n return {\"score\": score}, 200\n else:\n return '', 401\n except jwt_utils.JwtError as e:\n return e.message, 401\n except Exception as e:\n return e.message, 401\n\n@app.route('/questions//answers', methods=['GET'])\ndef get_answer(position):\n try:\n dbHelper = DBHelper()\n answers = dbHelper.get_answer(position)\n if answers is None:\n return '', 404\n \n for key in answers:\n answers[key].pop('questionID', None)\n answers[key].pop('isCorrect', None)\n return {\"answers\", answers}, 200\n except Exception as e:\n return e.message, 401\n \nif __name__ == \"__main__\":\n app.run()\n","repo_name":"Sangerwan/quiz-app","sub_path":"quiz-api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32142811143","text":"def topKFrequent(nums: list[int], k: int) -> list[int]:\n count = {}\n freq = [[] for i in range(len(nums) + 1)]\n\n for n in nums:\n count[n] = 1 + count.get(n, 0)\n \n for n, c in count.items():\n freq[c].append(n)\n\n res = []\n for i in range(len(freq) - 1, 0, -1):\n for n in freq[i]:\n res.append(n)\n if len(res) == k:\n return res\n\n\n\n\n\nif __name__ == \"__main__\":\n\n nums1 = [1,1,1,2,2,3]\n k1 = 2\n\n nums2 = [1]\n k2 = 1\n \n X = topKFrequent(nums1, k1)\n print(X)\n\n Y = topKFrequent(nums2, k2)\n print(Y)","repo_name":"Mohammedvaraliya/Data-Structures-And-Algorithms","sub_path":"Arrays/15_top_k_frequent_element.py","file_name":"15_top_k_frequent_element.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"46"} +{"seq_id":"17196509488","text":"from typing import List\nfrom collections import deque\n\nclass Solution:\n def numIslands(self, grid: List[List[str]], method='bfs') -> int:\n if method == 'union_joint':\n return self.numIslandsUnionJoint(grid)\n if method == 'dfs':\n return self.numIslandsDfs(grid)\n if method == 'bfs':\n return self.numIslandsBfs(grid)\n\n def numIslandsUnionJoint(self, grid):\n def find(x):\n nonlocal pa\n while x != pa[x]:\n x = pa[x]\n return x\n\n def union_joint(x, y, nr_groups):\n nonlocal pa\n nonlocal size\n\n root_x, root_y = find(x), find(y)\n if root_x == root_y:\n return nr_groups\n else:\n if size[root_x] > size[root_y]:\n pa[root_y] = root_x\n size[root_x] += size[root_y]\n else:\n pa[root_x] = root_y\n size[root_y] += root_x\n return nr_groups - 1\n\n m, n = len(grid), len(grid[0])\n nr_groups = m * n\n pa = [i for i in range(nr_groups)]\n size = [1] * nr_groups\n nr_land_cell = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n nr_land_cell += 1\n for direction in [[1,0],[0,1]]:\n new_i = i + direction[0]\n new_j = j + direction[1]\n if new_i < m and new_j < n and grid[new_i][new_j] == '1':\n nr_groups = union_joint(i * n + j, new_i * n + new_j, nr_groups)\n return nr_groups - (n*m - nr_land_cell)\n\n\n def numIslandsDfs(self, grid):\n def dfs(i, j, m, n, visited):\n nonlocal grid\n visited[i][j] = 1\n for dir in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n new_i = i + dir[0]\n new_j = j + dir[1]\n if -1 < new_i < m and -1 < new_j < n:\n if visited[new_i][new_j] == 0 and grid[new_i][new_j] == '1':\n dfs(new_i, new_j, m, n, visited)\n\n m, n = len(grid), len(grid[0])\n visited = [[0] * n for _ in range(m)]\n nr_island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '0':\n visited[i][j] = 1\n continue\n if visited[i][j] == 1:\n continue\n dfs(i, j, m, n, visited)\n nr_island += 1\n return nr_island\n\n def numIslandsBfs(self, grid):\n def bfs(x, y, m, n, visited):\n nonlocal grid\n cell_queue = deque([(x, y)])\n visited[x][y] = 1\n while cell_queue:\n pos = cell_queue.popleft()\n i, j = pos\n for dir in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n new_i = i + dir[0]\n new_j = j + dir[1]\n if -1 < new_i < m and -1 < new_j < n:\n if visited[new_i][new_j] == 0 and grid[new_i][new_j] == '1':\n visited[new_i][new_j] = 1\n cell_queue.append((new_i, new_j))\n\n m, n = len(grid), len(grid[0])\n visited = [[0] * n for _ in range(m)]\n nr_island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '0':\n visited[i][j] = 1\n continue\n if visited[i][j] == 1:\n continue\n bfs(i, j, m, n, visited)\n nr_island += 1\n return nr_island\n\n\nsol = Solution()\ncases = [\n {\n \"input\": [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n ],\n \"expect\": 1\n },\n {\n \"input\": [\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"1\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"1\",\"1\"]\n ],\n \"expect\": 3\n },\n]\n\nfor case in cases:\n result = sol.numIslands(case[\"input\"])\n print(case[\"input\"], result)\n assert result == case['expect']\n","repo_name":"tilaboy/work_note","sub_path":"algorithms/leetcode_weekly/disjoint_union/200_Number_of_Islands.py","file_name":"200_Number_of_Islands.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11902651207","text":"import requests\r\nimport openpyxl\r\n\r\n# Open a workbook and the active sheet\r\nwb = openpyxl.Workbook()\r\nws = wb.active\r\nws.title = \"Farmers Markets\"\r\n\r\ndef get_business_lists(city, last_row):\r\n print(city)\r\n\r\n # API Definition\r\n # my_API_Key = \"insert Yelp API key\"\r\n endPoint = \"https://api.yelp.com/v3/businesses/search\"\r\n api_headers = {'Authorization': 'bearer {}'.format(my_API_Key)}\r\n\r\n # Parameters\r\n offset = 0\r\n limit = 5\r\n total = 1000\r\n\r\n # Variable declarations\r\n biz_name = []\r\n biz_address = []\r\n biz_phone = []\r\n biz_reviews = []\r\n\r\n while offset < total:\r\n parameters = {\"term\": \"Farmers Markets\",\r\n \"limit\": limit,\r\n \"location\": city,\r\n \"sort_by\": \"rating\",\r\n \"offset\": offset}\r\n\r\n # Make the request and then convert the json to a dictionary\r\n businesses_json_response = requests.get(url=endPoint, params=parameters, headers=api_headers)\r\n businesses = businesses_json_response.json()\r\n\r\n # Update Total\r\n try:\r\n total = businesses[\"total\"]\r\n # print(len(businesses[\"businesses\"]))\r\n except:\r\n print(\"Total is less than {}\".format(limit))\r\n break\r\n\r\n # Append business names, addresses, and phones to their respective lists\r\n for business in businesses[\"businesses\"]:\r\n biz_name.append(business[\"name\"])\r\n biz_address.append(business[\"location\"][\"address1\"])\r\n biz_phone.append(business['display_phone'])\r\n biz_reviews.append(business['review_count'])\r\n\r\n # Write data of excel function\r\n last_row = print_to_excel(biz_name, biz_address, biz_phone, biz_reviews, city,last_row)\r\n\r\n # print(\"Total = {}; Limit = {}; Offset = {}\".format(total, limit, offset))\r\n\r\n if total < limit:\r\n limit = total\r\n offset += limit\r\n elif total >= limit:\r\n offset += limit\r\n\r\n return last_row\r\n\r\n\r\ndef print_to_excel(names, addresses, phones, reviews, city, last_row):\r\n # Write data to excel\r\n elem = 0\r\n while elem < len(names):\r\n ws.cell(row = last_row + elem + 1, column = 1).value = names[elem]\r\n ws.cell(row = last_row + elem + 1, column = 2).value = addresses[elem]\r\n ws.cell(row = last_row + elem + 1, column = 3).value = phones[elem]\r\n ws.cell(row = last_row + elem + 1, column = 4).value = city\r\n ws.cell(row = last_row + elem + 1, column = 5).value = \"ID\"\r\n ws.cell(row = last_row + elem + 1, column = 6).value = reviews[elem]\r\n elem += 1\r\n\r\n last_row = ws.max_row\r\n\r\n return last_row\r\n\r\n\r\ndef main():\r\n\r\n MSA_Columbus = (\r\n \"Columbus\", \"Dublin\", \"Newark\", \"Delaware\", \"Lancaster\", \"Pickerington\", \"London\", \"Marysville\", \"Circleville\",\r\n \"Marion\", \"Zanesville\", \"Chillicothe\", \"New Lexington\", \"Cambridge\", \"Washington Court House\")\r\n MSA_Dayton = (\"Centerville\", \"Dayton\", \"Kettering\", \"Beavercreek\", \"Huber Heights\", \"Fairborn\", \"Miamisburg\",\r\n \"West Carrollton\", \"Springfield\", \"Urbana\", \"Greenville\", \"Sidney\")\r\n MSA_Indianapolis = (\"Indianapolis\", \"Carmel\", \"Fishers\", \"Noblesville\", \"Greenwood\", \"Anderson\", \"Lawrence\",\r\n \"Westfield\", \"Plainfield\", \"Zionsville\", \"Brownsburg\", \"Franklin\", \"Greenfrield\", \"Shelbyville\",\r\n \"Avon\", \"Lebanon\", \"Beech Grove\", \"Speedway\", \"Martinsville\",\r\n \"Greencastle\",\"Danville\", \"Moorseville\")\r\n\r\n last_row = 0 # Keeps track of position in Excel\r\n for city in MSA_Indianapolis:\r\n last_row = get_business_lists(city, last_row)\r\n\r\n\r\n# BEGIN PROGRAM\r\nmain()\r\nwb.save(\"MSA_Indianapolis_Farmers_Markets.xlsx\")\r\n","repo_name":"Mhauga/Extraction","sub_path":"Extraction.py","file_name":"Extraction.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"12317841946","text":"import json\nfrom numpy import array\n\nif __name__ == \"__main__\" :\n\n file=True\n with open(\"stickers_vectors_generation.json\") as file:\n generation = json.load(file)\n with open(\"main_data.json\") as file:\n data = json.load(file)\n\n corners = generation['C']\n edges = generation['E']\n faces = data['faces']\n\n print('{\\n \"C\": {')\n i = 0\n for corner in corners:\n i += 1\n val = array([0, 0, 0])\n for letter in corner:\n if letter != ' ':\n val += faces[letter]\n print(' \"' + str(i) + '\": [' + str(val[0]) + ', ' + str(val[1]) + ', ' + str(val[2]) + ('],' if i < 24 else ']'))\n\n print(' },\\n \"E\": {')\n i = 0\n for edge in edges:\n i += 1\n val = array([0, 0, 0])\n for letter in edge:\n if letter != ' ':\n val += faces[letter]\n print(' \"' + str(i) + '\": [' + str(val[0]) + ', ' + str(val[1]) + ', ' + str(val[2]) + ('],' if i < 24 else ']'))\n print(' }\\n}')\n","repo_name":"juliengiraud/Rubik-s_Cube","sub_path":"CircuitBuilder/stickersVectorsGeneration.py","file_name":"stickersVectorsGeneration.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32925431759","text":"from lib.io import *\nfrom lib.file import *\nfrom lib.hue import *\nfrom requests import get\nfrom urllib import parse\nfrom json import loads\nimport time\nendl = \"\\n\"\n\nclass dork:\n\n\tdef help():\n\t\tprint(\"\")\n\t\tio.row(25, [\"dork -h\", \"Attributes list for this command.\"])\n\t\tio.row(25, [\"dork -l\", \"List all available dork files.\"])\n\t\tio.row(25, [\"dork -r [FILE]\", \"List all dorks within a file.\"])\n\t\tio.row(25, [\"dork -u [FILE] [ID]\", \"Use dork within a file.\"])\n\t\tprint(\"\")\n\n\tdef list():\n\t\tfiles = file.list(\"dorks\")\n\t\tif len(files) != 0:\n\t\t\tprint(\"\")\n\t\t\tfor f in files:\n\t\t\t\tprint(f[0:-4])\n\t\t\tprint(\"\")\n\t\telse:\n\t\t\tio.error(\"There ain't files to show up.\")\n\n\tdef read(f):\n\t\tcontent = file.readfile(\"dorks/\" + f + \".txt\")\n\t\tif content != False:\n\t\t\tif len(content) != 0:\n\t\t\t\tprint(\"\")\n\t\t\t\tio.row(5, [fore.blue + \"ID\", \"DORK\"])\n\t\t\t\ti = 1\n\t\t\t\tfor d in content:\n\t\t\t\t\tio.row(5, [fore.yellow + str(i), fore.white + d])\n\t\t\t\t\tio.prevline(1)\n\t\t\t\t\ti += 1\n\t\t\t\tprint(endl)\n\t\t\telse: io.error(\"There ain't dorks to show up.\")\n\t\telse: io.error(\"File doesn't exist.\")\n\n\tdef search(query):\n\t\t_google = \"https://www.googleapis.com/customsearch/v1\"\n\t\t_params = {\n\t\t\t\"key\": \"AIzaSyANm8farYg7FBUl49FRSMDURp4a7VDEyEY\",\n\t\t\t\"cx\": \"017576662512468239146:omuauf_lfve\",\n\t\t\t\"q\": query\n\t\t}\n\t\t_data = get(_google, params=_params).text\n\t\t_data = loads(_data)\n\t\ttry:\n\t\t\tfor item in _data[\"items\"]:\n\t\t\t\tprint(item[\"link\"])\n\t\t\tprint(\"\")\n\t\t\treturn True\n\t\texcept: return False\n\n\tdef use(f, n):\n\t\tcontent = file.readfile(\"dorks/\" + f + \".txt\")\n\t\tif content != False:\n\t\t\tif len(content) != 0:\n\t\t\t\ti = 1\n\t\t\t\tfor _dork in content:\n\t\t\t\t\tif n == i:\n\t\t\t\t\t\tio.quote(\"Dork: \" + fore.magenta + _dork + fore.white)\n\t\t\t\t\t\tio.prevline(1)\n\t\t\t\t\t\tif not dork.search(_dork):\n\t\t\t\t\t\t\tio.prevline(2)\n\t\t\t\t\t\t\tio.error(\"No results gathered.\")\n\t\t\t\t\t\ti = -1\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse: i += 1\n\t\t\t\tif i != -1: io.error(\"Doesn't exist dork with given ID.\")\n\t\t\telse: io.error(\"This file doesn't contain any dorks to use.\")\n\t\telse: io.error(\"File doesn't exist.\")\n\n\tdef all(f):\n\t\tcontent = file.readfile(\"dorks/\" + f + \".txt\")\n\t\tif content != False:\n\t\t\tif len(content) != 0:\n\t\t\t\tio.quote(\"Gathering dorks data...\")\n\t\t\t\ti = 0\n\t\t\t\tfor _dork in content:\n\t\t\t\t\tif dork.search(_dork): i += 1\n\t\t\t\t\ttime.sleep(5)\n\t\t\t\tif i == 0:\n\t\t\t\t\tio.prevline(2)\n\t\t\t\t\tio.error(\"No results gathered.\")\n\t\t\telse: io.error(\"This file doesn't contain any dorks to use.\")\n\t\telse: io.error(\"File doesn't exist.\")","repo_name":"skollprog/tochi","sub_path":"lib/dork.py","file_name":"dork.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"10801109810","text":"pessoas = []\npessoa = []\n\nwhile True:\n continuar = \"continuar\"\n pessoa.append(input(\"Nome: \"))\n pessoa.append(float(input(\"Peso: \")))\n\n pessoas.append(pessoa[:])\n pessoa.clear()\n\n while continuar != \"S\" and continuar != \"N\": \n continuar = input(\"Você quer continuar? [S/N] \").upper()\n if continuar == \"N\":\n break\n\n#maior peso\nposicao = 0\nmaior_peso = pessoas[posicao][1]\nwhile True:\n peso = pessoas[posicao][1]\n if peso > maior_peso:\n maior_peso = peso\n\n posicao += 1\n if posicao == len(pessoas):\n break\n\n#mais pesados\nmais_pesados = []\nfor c in pessoas:\n if c[1] == maior_peso:\n mais_pesados.append(c[0])\n\n#menor peso\nposicao = 0\nmenor_peso = pessoas[posicao][1]\nwhile True:\n peso = pessoas[posicao][1]\n if peso < menor_peso:\n menor_peso = peso\n\n posicao += 1\n if posicao == len(pessoas):\n break\n\n#mais leves\nmais_leves = []\nfor l in pessoas:\n if l[1] == menor_peso:\n mais_leves.append(l[0])\n\nprint(\"=-\" * 30) \nprint(f\"Foram cadastradas {len(pessoas)} pessoas.\")\nprint(f\"O maior peso foi de {maior_peso}Kg. Peso de {mais_pesados}\")\nprint(f\"O menor peso foi de {menor_peso}Kg. Peso de {mais_leves}\")","repo_name":"joao-medina/Studies_-_Python_CursoEmVideo","sub_path":"mundo-3/ex084.py","file_name":"ex084.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"6121768640","text":"#pip install slack_sdk, aiohttp, selenium, bs4, requests\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\n\r\ndef get_asin(url):\r\n asin = url.split('/')\r\n for i, dp in enumerate(asin):\r\n if dp == \"dp\":\r\n return (asin[i+1])\r\n\r\ndef get_domain(url):\r\n uri = url.split('/')\r\n for i in uri:\r\n if \"amazon\" in i:\r\n return i\r\n print(\"not supported, amazon only\")\r\n exit()\r\n\r\ndef get_itemcode(url):\r\n return get_asin(url)\r\n\r\ndef getprice_amazon(parse_html):\r\n parse_price = parse_html.find(id=\"twister-plus-price-data-price\")\r\n parse_curr = parse_html.find(id=\"twister-plus-price-data-price-unit\")\r\n\r\n if parse_price == None:\r\n item_price = \"none found\"\r\n currency = \"none found\"\r\n else:\r\n item_price = parse_price.get(\"value\")\r\n currency = parse_curr.get(\"value\")\r\n\r\n return item_price, currency\r\n\r\ndef read_html(html_file):\r\n with open(html_file, \"r\", encoding=\"utf-8\") as f:\r\n content = f.read()\r\n\r\n parse_html = BeautifulSoup(content,\"html.parser\", multi_valued_attributes=None)\r\n item_price, currency = getprice_amazon(parse_html)\r\n\r\n return item_price, currency\r\n\r\n\r\ndef send_request(s, prepared):\r\n response = s.send(prepared)\r\n return response\r\n\r\ndef write_response_tofile(response, item_code):\r\n html_file = \"ASIN - \" + item_code + \".html\"\r\n\r\n with open(html_file,\"w\", encoding=\"utf-8\") as f:\r\n f.write(response.text)\r\n return html_file\r\n\r\ndef pretty_print_POST(req):\r\n print('{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\r\n '-----------START-----------',\r\n req.method + ' ' + req.url,\r\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\r\n req.body,\r\n ))\r\n\r\ndef getprice(url):\r\n domain = get_domain(url)\r\n item_code = get_itemcode(url)\r\n\r\n headers = {'authority' :'www.amazon.com', 'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36'}\r\n req = requests.Request(\"GET\", url, headers=headers)\r\n prepared = req.prepare()\r\n s = requests.Session()\r\n pretty_print_POST(prepared)\r\n\r\n response = send_request(s, prepared)\r\n #print(response.text)\r\n #print(response.status_code)\r\n html_file = write_response_tofile(response, item_code)\r\n price_curr = read_html(html_file)\r\n price = price_curr[0]\r\n currency = price_curr[1]\r\n price_curr_json = {\"price\": price,\r\n \"currency\": currency,\r\n \"item\": item_code}\r\n return price_curr_json","repo_name":"marcus081c/pricecheck","sub_path":"getpriceamazon.py","file_name":"getpriceamazon.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"16852461173","text":"from numpy import array\nfrom os import listdir\nfrom pickle import load\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical, plot_model\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Dense, LSTM, Embedding, Dropout\nfrom keras.layers.merge import add\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.applications.vgg16 import preprocess_input\n\ndef load_doc(filename):\n with open(filename, 'r') as f:\n text = f.read()\n return text\n\ndef load_set(filename):\n doc = load_doc(filename)\n dataset = set()\n for line in doc.split('\\n'):\n if len(line) < 1:\n continue\n key = line.split('.')[0]\n dataset.add(key)\n return dataset\n\n# load clean descriptions into memory\ndef load_clean_descriptions(filename, dataset):\n doc = load_doc(filename)\n texts = {}\n for line in doc.split('\\n'):\n if len(line) == 0:\n continue\n tokens = line.split()\n image_id, text = tokens[0], tokens[1:]\n if image_id in dataset:\n desc = 'BOS ' + ' '.join(text) + ' EOS'\n texts.setdefault(image_id, []).append(desc)\n return texts\n\n#load photo features\ndef load_photo_features(filename, dataset):\n with open(filename, 'rb') as f:\n all_features = load(f)\n features = {key: all_features[key] for key in dataset}\n return features\n\n# convert a dictionary of clean descriptions to list\ndef to_lines(descriptions):\n all_desc = []\n for key in descriptions.keys():\n all_desc += descriptions[key]\n return all_desc\n\n# fit a tokenizer given caption descriptions\ndef create_tokenizer(descriptions):\n lines = to_lines(descriptions)\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(lines)\n return tokenizer\n\ndef max_length(descriptions):\n lines = to_lines(descriptions)\n return max(map(lambda d: len(d.split()), lines))\n\ndef create_sequences(tokenizer, max_length, descriptions, photos):\n X1, X2, y = list(), list(), list()\n # walk through each image identifier\n for key, desc_list in descriptions.items():\n # walk through each description for the image\n for desc in desc_list:\n # encode the sequence\n seq = tokenizer.texts_to_sequences([desc])[0]\n # split one sequence into multiple X,y pairs\n for i in range(1, len(seq)):\n # split into input and output pair\n in_seq, out_seq = seq[:i], seq[i]\n # pad input sequence\n in_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n # encode output sequence\n out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n # store\n X1.append(photos[key][0])\n X2.append(in_seq)\n y.append(out_seq)\n return array(X1), array(X2), array(y)\n\ndef create_sequences_single(tokenizer, max_length, desc_list, photo):\n X1, X2, y = [], [], []\n vocab_size = len(tokenizer.word_index)+1\n for desc in desc_list:\n seq = tokenizer.texts_to_sequences([desc])[0]\n for i in range(1, len(seq)):\n in_seq, out_seq = seq[:i], seq[i]\n in_seq = pad_sequences([in_seq], maxlen = max_length)[0]\n out_seq = to_categorical([out_seq], num_classes = vocab_size)[0]\n X1.append(photo[0])\n X2.append(in_seq)\n y.append(out_seq)\n return [array(X1), array(X2), array(y)]\n\ndef define_model(vocab_size, max_length, filename = None):\n # feature extractor model\n inputs1 = Input(shape=(4096,))\n fe1 = Dropout(0.5)(inputs1)\n fe2 = Dense(256, activation='relu')(fe1)\n # sequence model\n inputs2 = Input(shape=(max_length,))\n se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)\n se2 = Dropout(0.5)(se1)\n se3 = LSTM(256)(se2)\n\n # decoder model\n decoder1 = add([fe2, se3])\n decoder2 = Dense(256, activation='relu')(decoder1)\n outputs = Dense(vocab_size, activation='softmax')(decoder2)\n \n \n # tie it together [image, seq] [word]\n model = Model(inputs=[inputs1, inputs2], outputs=outputs)\n if filename:\n model = load_model(filename)\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n \n # summarize model\n # print(model.summary())\n # plot_model(model, to_file='model.png', show_shapes=True)\n return model\n\n# train dataset\n\n# load training dataset (6K)\nfilename = 'Flickr8k_text/Flickr_8k.trainImages.txt'\ntrain = load_set(filename)\nprint('Dataset: %d' % len(train))\n# descriptions\ntrain_descriptions = load_clean_descriptions('descriptions.txt', train)\nprint('Descriptions: train=%d' % len(train_descriptions))\n# photo features\ntrain_features = load_photo_features('features.pkl', train)\nprint('Photos: train=%d' % len(train_features))\n# prepare tokenizer\ntokenizer = create_tokenizer(train_descriptions)\nvocab_size = len(tokenizer.word_index)+1\nprint('Vocabulary Size: %d' % vocab_size)\n\n# determine max sequence length\nmax_length = max_length(train_descriptions)\nprint('Description Length: %d' % max_length)\n'''\n# prepare sequences\nX1train, X2train, ytrain = create_sequences(tokenizer, max_length, train_descriptions, train_features)\n'''\ndef data_generator(descriptions, tokenizer, max_length):\n directory = 'Flicker8k_Dataset'\n while True:\n for image_id in train:\n image = train_features[image_id]\n desc = descriptions[image_id]\n in_img, in_seq, out_word = create_sequences_single(tokenizer, max_length, desc, image)\n yield[[in_img, in_seq], out_word]\n# dev dataset\n\n# load test set\nfilename = 'Flickr8k_text/Flickr_8k.devImages.txt'\ntest = load_set(filename)\nprint('Dataset: %d' % len(test))\n# descriptions\ntest_descriptions = load_clean_descriptions('descriptions.txt', test)\nprint('Descriptions: test=%d' % len(test_descriptions))\n# photo features\ntest_features = load_photo_features('features.pkl', test)\nprint('Photos: test=%d' % len(test_features))\n# prepare sequences\nX1test, X2test, ytest = create_sequences(tokenizer, max_length, test_descriptions, test_features)\n\n# fit model\n\nmodel = define_model(vocab_size, max_length, 'model-ep010-loss3.900-val_loss3.950.h5')\n\nfilepath = 'model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n\n# fit model\nmodel.fit_generator(data_generator(train_descriptions, tokenizer, max_length), epochs=80, steps_per_epoch=1200, callbacks=[checkpoint], validation_data=([X1test, X2test], ytest))\n\n\n\n\n\n\n\n","repo_name":"tyge318/Keras-LSTM-Exercise","sub_path":"Captions/caption_train.py","file_name":"caption_train.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11303693181","text":"import os, sys\nimport logging\nimport multiprocessing as mp\n\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\n\nsys.path.insert(0, os.path.dirname(os.getcwd()))\nfrom Core.CoreSystem import Helper\n\n\nclass clsParameters():\n\n def __init__(self, options):\n self.strUser = options.user_name\n self.strProject = options.project_name.replace('.txt', '') ## A user can be confused the input. So I prevented from it using 'replace'.\n self.strGroup = options.group\n self.intCore = options.thread\n\n self.strSampleList = 'User/{user}/{project}.txt'.format(user=options.user_name, project=options.project_name)\n\n\ndef SummaryRandomBarcode(sFile_path):\n\n \"\"\"\n /Tmp\n 190819_Nahye_24k_2_D0_2-24kLib_Classified_Indel_barcode.fastq* -> process target\n 190819_Nahye_24k_2_D0_2-24kLib_Indel_freq.txt*\n 190819_Nahye_24k_2_D0_2-24kLib_Indel_summary.txt*\n 190819_Nahye_24k_2_D0_2-24kLib_Summary.txt*\n Pickle\n\n dBarcode_cnt = {'ACGTACTC_sorting_barcode': {'ACATACAC_random': 5, 'CGTGTTGA_random': 3, ...}\n \"\"\"\n dictBarcodeCnt = {}\n strClassCheck = ''\n\n strSample = sFile_path.split('/')[-1]\n logging.info('Summary_random_barcode start : %s, %s' % (sFile_path, strSample))\n\n for sFile in os.listdir(sFile_path+'/Tmp/'):\n if '.fastq' in sFile:\n with open(sFile_path+'/Tmp/'+sFile) as Input:\n for i, strRow in enumerate(Input):\n\n # @D00235:683:CE1P6ANXX:6:1114:2135:5231 1:N:0:CTGAAGCT+CCTATCCT:Barcode_TTTGCTATCTCGACGTATGGACAGTG:total\n if i % 4 == 0:\n listBarClass = strRow.replace('\\n','').split('Barcode_')[1].split(':')\n strBarcode = listBarClass[0]\n strClass = listBarClass[1]\n\n if strClass == 'total':\n strClassCheck = 'total'\n\n if i % 4 == 1 and strClassCheck == 'total':\n strRow = strRow.replace('\\n','').upper()\n intBarcodeStart = strRow.find(strBarcode)\n strRandom_barcode = strRow[intBarcodeStart-8:intBarcodeStart]\n\n try:\n _ = dictBarcodeCnt[strBarcode]\n except KeyError:\n dictBarcodeCnt[strBarcode] = {}\n try:\n dictBarcodeCnt[strBarcode][strRandom_barcode] += 1\n except KeyError:\n dictBarcodeCnt[strBarcode][strRandom_barcode] = 1\n #print(sBarcode, sRandom_barcode, iBarcode_start, sRow)\n\n strClassCheck = ''\n\n if not os.path.isdir(sFile_path + '/Summary_Random_barcode'): os.mkdir(sFile_path + '/Summary_Random_barcode')\n with open(sFile_path + '/Summary_Random_barcode/%s_all_random_barcode.txt' % strSample, 'w') as All_random,\\\n open(sFile_path + '/Summary_Random_barcode/%s_Unique_RandomBarcodeNumber_In_SortingBarcode.txt' % strSample, 'w') as Random_sorting:\n\n All_random.write('Sorting_barcode\\tUnique_RandomBarcodeNumber_In_SortingBarcode\\tRandomBarcode\\tEach_RandomBarcode_read_count\\n')\n Random_sorting.write('Sorting_barcode\\tUnique_RandomBarcodeNumber_In_SortingBarcode\\n')\n\n for sBarcode, dRandom_barcode_cnt in dictBarcodeCnt.items():\n iRandom_barcode_num = len(dRandom_barcode_cnt.keys())\n Random_sorting.write('\\t'.join(map(str, [sBarcode, iRandom_barcode_num]))+'\\n')\n\n for sRandom_barcode, iCnt in dRandom_barcode_cnt.items():\n All_random.write('\\t'.join(map(str, [sBarcode, iRandom_barcode_num, sRandom_barcode, iCnt]))+'\\n')\n\n logging.info('Summary_random_barcode end: %s' % sFile_path)\n\n## on going\ndef CountGroup(InstParameters):\n \"\"\"\n Sorting_barcode Unique_RandomBarcodeNumber_In_SortingBarcode RandomBarcode Each_RandomBarcode_read_count\n TATATCATAGCGTACTCATC 8 TGCGTTTG 3\n TATATCATAGCGTACTCATC 8 CGCGTTTG 3\n TATATCATAGCGTACTCATC 8 TAGTTTTG 1\n TATATCATAGCGTACTCATC 8 ATAGTTTG 1\n \"\"\"\n\n sHeader = ''\n\n with open(InstParameters.strSampleList) as Sample: ## tmp input\n\n listSample = Sample.readlines()\n\n setGroup = set([strRow.replace('\\n', '').split('\\t')[2].upper() for strRow in listSample])\n\n for strGroup in setGroup:\n if strGroup == 'CTRL': continue\n\n for strRow in listSample:\n if strGroup == strGroupOfSample: ## matched group names -> Sum the counts\n listCol = strRow.replace('\\n', '').split('\\t')\n strSample = listCol[0]\n strRef = listCol[1]\n strGroupOfSample = listCol[2]\n\n strProjectDir = './Output/{user}/{project}'.format(user=InstParameters.strUser,\n project=InstParameters.strProject)\n strGroupDir = os.path.join(strProjectDir, 'Group_result')\n Helper.MakeFolderIfNot(strGroupDir)\n\n dTotal_RandomBarcode_cnt_in_SortingBarcode = OrderedDict() ## ('GECKO_6367_GATCTGCTC', ['GECKO_6367', 'GATCTGCTC', 2, 156, '0.0128']),\n ## Unique key, only one list.\n\n with open('{project_dir}/{sample}_all_random_barcode.txt'.format(project_dir=strProjectDir,\n sample=strSample)) as RandomBarcode_SeqFreq:\n sHeader = RandomBarcode_SeqFreq.readline()\n\n for sRow in RandomBarcode_SeqFreq:\n lCol = sRow.replace('\\n', '').split('\\t')\n\n sSortingBarcode = lCol[0]\n #iTotal_RandomBarcode_cnt_in_SortingBarcode = int(lCol[1])\n sSorting_and_Random_barcode_seq = lCol[0] + '_' + lCol[2] ## Unique name : Doench2014_1000_CTCTGGGGT\n iRandomBarcode_count = int(lCol[3])\n\n lCol[3] = iRandomBarcode_count\n\n try:\n _ = dTotal_RandomBarcode_cnt_in_SortingBarcode[sSorting_and_Random_barcode_seq]\n\n dTotal_RandomBarcode_cnt_in_SortingBarcode[sSorting_and_Random_barcode_seq][3] += iRandomBarcode_count\n\n except KeyError:\n dTotal_RandomBarcode_cnt_in_SortingBarcode[sSorting_and_Random_barcode_seq] = lCol ## initial assignment\n #END for\n dRecal_total_kind_of_RandomBarcode = OrderedDict()\n for sSort_Rand_seq in dTotal_RandomBarcode_cnt_in_SortingBarcode: ## sSorting_and_Random_barcode_seq\n sSortBarcode = sSort_Rand_seq.split('_')[0]\n try:\n dRecal_total_kind_of_RandomBarcode[sSortBarcode].append(dTotal_RandomBarcode_cnt_in_SortingBarcode[sSort_Rand_seq])\n except KeyError:\n dRecal_total_kind_of_RandomBarcode[sSortBarcode] = [dTotal_RandomBarcode_cnt_in_SortingBarcode[sSort_Rand_seq]]\n\n for sKey, llValue in dRecal_total_kind_of_RandomBarcode.items():\n ## sKey: TATATCATAGCGTACTCATC, llValue : [[TATATCATAGCGTACTCATC, 8, TGCGTTTG, 3],[],[] ...\n iKind_of_RandomBarcode = len(llValue) ################## why do I make like this ?????\n for lValue in llValue:\n lValue[1] = iKind_of_RandomBarcode ## Recal using group total cnt.\n\n llValue = sorted(llValue, key=lambda x:x[3], reverse=True)\n dRecal_total_kind_of_RandomBarcode[sKey] = llValue\n\n strEachGroup = './Output/Group_result/%s' % strGroup\n Helper.MakeFolderIfNot(strEachGroup)\n\n with open(os.path.join(strEachGroup, 'Summary_all_random_barcode_in_group.txt'), 'w') as Sort_Random_cnt,\\\n open(os.path.join(strEachGroup, 'Summary_Unique_RandomBarcodeNumber_in_group.txt'), 'w') as Uniq_random_cnt:\n\n Sort_Random_cnt.write(sHeader)\n Uniq_random_cnt.write('Sorting_barcode\\tUnique_RandomBarcodeNumber_In_SortingBarcode\\n')\n\n for sSortBarcode, llCol in dRecal_total_kind_of_RandomBarcode.items():\n Uniq_random_cnt.write('\\t'.join(map(str, [sSortBarcode, len(llCol)]))+'\\n')\n for lCol in llCol:\n Sort_Random_cnt.write('\\t'.join(map(str, lCol))+'\\n')\n #END: for\n #END: with\n\n\ndef Main():\n\n logging.info('Program Start')\n logging.info('Make commands for a multiple processing')\n\n parser = ArgumentParser(description='Script for counting the random barcodes')\n\n parser.add_argument('-u', '--user_name', type=str, dest='user_name', help='The user name in the /user subdir')\n parser.add_argument('-p', '--project_name', type=str, dest='project_name', help='The project name in the /user/user_name/ subdir')\n parser.add_argument('-g', '--group', type=str, dest='group', default='false', help='The group sum run of the barcodes, default: false')\n parser.add_argument('-t', '--thread', type=int, dest='thread', default='15', help='The multicore number 1~15')\n options = parser.parse_args()\n\n InstParameters = clsParameters(options)\n\n lPara = []\n\n with open(InstParameters.strSampleList) as SampleList:\n\n for strSample in SampleList:\n if strSample[0] == '#' or strSample[0] in ['', ' ', '\\r', '\\n', '\\r\\n']: continue\n strSample = strSample.replace('\\n', '').replace('\\r', '').split('\\t')[0]\n sFile_path = './Output/{user}/{project}/{sample}'.format(user=options.user_name,\n project=options.project_name,\n sample=strSample)\n #print('sFile_path', sFile_path)\n lPara.append(sFile_path)\n\n ## single_test\n #Summary_random_barcode(lPara[0])\n\n logging.info('Multiple processing Start')\n p = mp.Pool(options.thread)\n p.map_async(SummaryRandomBarcode, lPara).get()\n logging.info('Multiple processing End')\n\n #logging.info('Count group Start')\n #CountGroup(InstParameters)\n #logging.info('Count group End')\n\n #logging.info('Program End')\n\nMain()\n","repo_name":"CRISPRJWCHOI/CRISPR_toolkit","sub_path":"Indel_searcher_2/Summary_Random_barcode.py","file_name":"Summary_Random_barcode.py","file_ext":"py","file_size_in_byte":10848,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"46"} +{"seq_id":"20977108120","text":"from math import log\n\n\ndef fast_sort(tab, a):\n n = len(tab)\n rng = 1/n\n\n buckets = [[] for _ in range(n)]\n for i in tab:\n b = int(log(i, a) / rng)\n buckets[b].append(i)\n\n for b in buckets:\n b.sort()\n\n res = []\n for b in buckets:\n for i in b:\n res.append(i)\n\n print(res)\n\n\nT = [1, 1.01, 1.4, 1.2, 1.31]\na = 2\nfast_sort(T, a)\n","repo_name":"paliwodam/Algorithms-and-Data-Structures","sub_path":"Exams/2019_term_1/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"26330479726","text":"from datetime import datetime\nfrom flask import render_template, session, redirect, url_for\n\nfrom . import main\nfrom .forms import NewBlogForm, EditContentForm, EditBlogForm, ContentForm\nfrom .. import db\nfrom ..models import Blog, BlogContent\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n blogs = Blog.query.order_by(Blog.timestamp.desc()).all()\n return render_template('index.html', blogs=blogs, current_time=datetime.utcnow(),\n name=session.get('name'), text=session.get('text'))\n\n\n@main.route('/blogs/new', methods=['GET', 'POST'])\ndef new_blog():\n blogform = NewBlogForm()\n if blogform.validate_on_submit():\n blog = Blog(title=blogform.title.data,\n body=blogform.body.data,\n timestamp=datetime.utcnow()\n )\n db.session.add(blog)\n return redirect(url_for('.index'))\n return render_template('newblog.html', blogform=blogform, current_time=datetime.utcnow())\n\n\n@main.route('/blogs/', methods=['GET', 'POST'])\ndef blog(id):\n content_form = ContentForm()\n blog = Blog.query.get_or_404(id)\n session['id'] = blog.id\n if content_form.validate_on_submit():\n new_content = BlogContent(body=content_form.body.data,\n blog=blog,\n timestamp=datetime.utcnow())\n db.session.add(new_content)\n return redirect(url_for('.blog', id=blog.id))\n return render_template('blog.html', blog=blog, content_form=content_form)\n\n\n@main.route('/edit/', methods=['GET', 'POST'])\ndef edit_blog(id):\n blog = Blog.query.get_or_404(id)\n form = EditBlogForm()\n if form.validate_on_submit():\n blog.title = form.title.data\n blog.body = form.body.data\n return redirect(url_for('.blog', id=blog.id))\n form.title.data = blog.title\n form.body.data = blog.body\n return render_template('edit_blog.html', blog=blog, form=form)\n\n\n@main.route('/edit/contents/', methods=['GET', 'POST'])\ndef edit_content(id):\n content = BlogContent.query.get_or_404(id)\n form = EditContentForm()\n if form.validate_on_submit():\n content.body = form.body.data\n return redirect(url_for('.blog', id=session.get('id')))\n form.body.data = content.body\n return render_template('edit_blog.html', content=content, form=form)\n\n\n@main.route('/delete/content/')\ndef delete_content(id):\n content = BlogContent.query.get_or_404(id)\n db.session.delete(content)\n\n return redirect(url_for('.blog', id=session.get('id')))\n\n\n@main.route('/delete/blog/')\ndef delete_blog(id):\n blog = Blog.query.get_or_404(id)\n db.session.delete(blog)\n\n return redirect(url_for('.index'))","repo_name":"JunliuHub/MyBlog","sub_path":"src/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"33299107648","text":"# 백준 단어공부\nimport collections\n\nstring1 = input()\n\ndef strstudy(s):\n s = s.upper()\n string = []\n max_dict = []\n for i in s:\n string += i\n string_dict = collections.Counter(string)\n for key, value in string_dict.items():\n if value == max(string_dict.values()):\n max_dict.append(key)\n if len(max_dict) >= 2:\n return \"?\"\n else:\n return max_dict[0]\na = strstudy(string1)\n\nprint(a)","repo_name":"ohjooyeong/baekjoonalgo","sub_path":"string/1157.py","file_name":"1157.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"34238119393","text":"# 计算时间差值\nimport time as t\n# perf_counter() 返回计时器的精准时间\n# process_time() 返回当前进程执行 CPU 的时间总和\n \nclass Time:\n def __init__(self, name=\"perf_counter\"):\n # self.unit = [\"年\", \"月\", \"日\", \"时\", \"分\", \"秒\"]\n self.begin = 0\n self.end = 0\n self.prompt = \"未开始计时\"\n self.name = name\n \n def __str__(self):\n return self.prompt\n \n __repr__ = __str__\n \n def __add__(self, other):\n self.prompt = \"一共花了\"\n result = []\n '''\n for index in range(6):\n result.append(self.lasted[index] + other.lasted[index])\n if not result[index]:\n self.prompt += (str(result[index]) + self.unit[index])\n '''\n result.append(self.lasted[0] + other.lasted[0])\n self.prompt += (result[0] + '秒')\n return self.prompt\n \n def set_time(self, name):\n self.name = name\n \n def start(self):\n if self.name == \"perf_counter\":\n self.begin = t.perf_counter()\n elif self.name == \"process_time\":\n self.begin = t.process_time()\n # 没有 调用方法时 stop()\n self.prompt = \"提示:请先调用stop() 停止计时\"\n print(\"开始计时\")\n \n def stop(self):\n if self.begin:\n if self.name == \"perf_counter\":\n self.end = t.perf_counter()\n elif self.name == \"process_time\":\n self.end = t.process_time()\n \n self._calc();\n print(\"计时结束\")\n else:\n print(\"提示: 请开始 start() 开始计时\")\n \n def _calc(self):\n self.prompt = \"总共花了\"\n self.lasted = []\n '''\n for index in range(6):\n self.lasted.append(self.end[index] - self.begin(index))\n if not self.lasted[index]:\n self.prompt += (str(self.lasted[index]) + self.unit[index])\n '''\n self.lasted.append(self.end - self.begin)\n self.prompt += str(self.lasted[0]) + '秒'\n\n # 重新初始化\n self.begin = 0\n self.end = 0\n \n \n","repo_name":"quadrant26/python","sub_path":"magic/m_ex_41.py","file_name":"m_ex_41.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"31116568005","text":"#-*- coding:utf-8 -*-\n\nfrom tkinter import *\nfrom tkinter.simpledialog import askinteger\n\n\nclass Application(Frame):\n # 一个经典的GUI程序\n def __init__(self, master=None):\n super().__init__(master) #super()是父类的构造器\n self.master = master\n self.pack()\n self.createWidget()\n\n def createWidget(self):\n # 创建组件\n self.btn = Button(self, text='设置年龄', command=self.setAge)\n self.btn.pack()\n self.label = Label(self, width=50, height=2, bg='#fff')\n self.label.pack()\n\n # askstring、askfloat同理\n def setAge(self):\n self.a = askinteger(title='输入年龄', prompt='请输入年龄',\\\n initialvalue=18, minvalue=1, maxvalue=150)\n print(self.a)\n if not self.a==None:\n self.label.config(text=self.a)\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title('简单对话框的GUI程序')\n root.geometry('800x500+700+260')\n app = Application(master=root)\n root.mainloop()","repo_name":"YG-07/PyTkVideo","sub_path":"3-Gui编程-tkinter/src/21-simpledialog简单对话框.py","file_name":"21-simpledialog简单对话框.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"2232916424","text":"import talib\nimport numpy as np\nfrom binance.client import Client\nimport time\nfrom tradingview_ta import TA_Handler, Interval, Exchange\ntesla = TA_Handler(\n symbol=\"TSLA\",\n screener=\"america\",\n exchange=\"NASDAQ\",\n interval=Interval.INTERVAL_1_MINUTE\n)\n\napi_key = ''\napi_secret = ''\n\nclient = Client(api_key, api_secret, testnet=True)\n\nsymbol = 'BTCUSDT'\nquantity = 0.001\n\ndef get_indicators(symbol):\n candles = client.get_klines(symbol=symbol, interval=Client.KLINE_INTERVAL_1MINUTE)\n print(candles)\n closes = np.array([float(candle[4]) for candle in candles])\n rsi = talib.RSI(closes, timeperiod=14)\n adx = talib.ADX(closes, closes, closes, timeperiod=14)\n upper, middle, lower = talib.BBANDS(closes, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0)\n return rsi[-1], adx[-1], closes[-1], upper[-1], middle[-1], lower[-1]\n\ndef place_order(symbol, side, quantity):\n try:\n order = client.create_order(symbol=symbol, side=side, type=Client.ORDER_TYPE_MARKET, quantity=quantity)\n print(order)\n except Exception as e:\n print(e)\n\nwhile True:\n try:\n rsi, adx, close, upper, middle, lower = get_indicators(symbol)\n print(\"rsi:\", rsi)\n print(\"adx:\", adx)\n print(\"close:\", close)\n print(\"upper:\", upper)\n print(\"middle:\", middle)\n print(\"lower:\", lower)\n if rsi < 30 and adx > 25 and close < lower:\n print('Placing buy order')\n place_order(symbol, Client.SIDE_BUY, quantity)\n elif rsi > 70 and adx > 25 and close > upper:\n print('Placing sell order')\n place_order(symbol, Client.SIDE_SELL, quantity)\n except Exception as e:\n print(e)\n\n time.sleep(60)","repo_name":"RezaHedayatkhah/CryptoTrader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"27803808861","text":"# 142, Toma Alexandra, alexandra.toma1@s.unibuc.ro\n# 142, Macovei Catalina, catalina.macovei@s.unibuc.ro\n\nimport PDA\nimport CFG as c\nimport converter_CFG_to_PDA\n\"\"\"\nsigma contine toate caracterele limbii eng, asta inseamna ca si alte mii de cuvinte sunt incluse aici\ndar invalide pentru joc\n\"\"\"\n\n\n# cititi de la stdin/fisier linie cu linie si de afisat\n# Note that each section (Sigma, States, Transition) should have written at the end the word: \"End\"\n# For example:\n# States\n# q0,S\n# q1\n# q2\n# q3,F\n\n\ndef reading_data_set(f):\n d = {'Sigma': [], 'Theta': [], 'States': [[], [], []], 'Transitions': []} # data will be stored in a dictionary with this sections\n try:\n f = open(f)\n section = ''\n\n for l in f:\n linie = l.strip(\"\\n\")\n linie = linie.strip(\" \")\n\n if linie == '': # ignoring empty lines\n continue\n if linie.startswith('#'): # just ignoring the comment lines from the file\n continue\n if linie.lower().startswith('theta'): # if I found theta section -> the list attached to my DFA\n section = 'theta'\n continue\n if linie.lower().startswith('sigma'): # if I found sigma section\n section = 'sigma'\n continue\n elif linie.lower().startswith('states'): # if I found states section\n section = 'states'\n continue\n elif linie.lower().startswith('transitions'): # if I found transitions section\n section = 'transitions'\n continue\n\n if section == 'sigma':\n if ',' in linie:\n print(\n \"Please write each symbol on a new line, whithout ','! \") # make sure each simbol is ona new line\n return 0\n var = linie.split()\n d['Sigma'].extend(var)\n\n elif section == 'theta':\n if ',' in linie:\n print(\n \"Please write each symbol on a new line, whithout ','! \") # make sure each simbol is ona new line\n return 0\n var = linie.split()\n d['Theta'].extend(var)\n\n elif section == 'states':\n state = []\n if ', ' in linie:\n state = linie.split(', ')\n elif ',' in linie:\n state = linie.split(',')\n else:\n state = linie.split()\n\n if state != []:\n if 'F' in state: # adaug final states in lista de stari finale si cea cu toate starile\n state.remove('F')\n d['States'][2].extend(state)\n d['States'][0].extend(state)\n elif 'S' in state: # adaug start state in lista de stari initiale si cea cu toate starile\n state.remove('S')\n d['States'][1].extend(state)\n d['States'][0].extend(state)\n else:\n d['States'][0].extend(state)\n\n\n elif section == 'transitions': # citesc tranzitiile\n transitions = []\n if ', ' in linie:\n transitions = linie.split(', ')\n elif ',' in linie:\n transitions = linie.split(',')\n else:\n transitions = linie.split()\n\n d['Transitions'].append(tuple(transitions)) # each transition is a tuple, like ('q1', '0', 'q1')\n\n except:\n print(\"Could not load the file!\")\n\n print(d) # show dictionary with data for testing\n return d\n\n\n# validation sigma, are the simbols/alphabet for dfa, checks if it's empty\ndef validation_Sigma(d):\n if d['Sigma'] != []:\n s1 = ','\n s2 = 'q'\n for simbol in d['Sigma']:\n if s1 in simbol and s2 in simbol:\n print(f'Please check the Sigma section, you are not allowed to have -> {s1 if s1 else s2} in simbols list')\n return 1\n else:\n print('You can\\'t have an empty alphabet. Introduce the symbols!')\n return 0\n\n\ndef validation_Theta(d): # verifica daca lista e goala\n if d['Theta'] == []:\n print('Error:Your LA list is empty, sorry!')\n return 0\n else:\n return 1\n\n\n# validation states:\n# states can be succeeded by ”F”, ”S”, both or nothing\n# ”S” symbol can succeed only one state. FOR EXAMPLE:\n# States\n# q0,S\n# q1\n# q2\n# q3,F\n\ndef validation_states(d):\n start_state = d['States'][1]\n final_states = d['States'][2]\n len_start_state = len(start_state)\n\n if len_start_state > 1:\n print(f\"You must have only 1 state! Found {len_start_state} states\")\n return 0\n if len_start_state == 1 and len(final_states) < 1:\n print(f\"You can't have an empty list of final states while having a start state!\")\n return 0\n if len_start_state < 1:\n print(\"It makes no sense to have no start state!\")\n return 0\n\n return 1\n\n\n# validation transitions:\ndef validation_transitions(d):\n transitions = d['Transitions']\n\n for transition in transitions: # for each transition check if it has the necessary length, valid states and sigma\n if len(transition) > 4: # daca are lungime necesara\n print(\"Too many items in transition tuple, there can be only 4!\")\n return 0\n\n if len(transition) < 4: # daca are lungime necesara\n print(\"Don't have enough transitions! Should have 4\")\n return 0\n\n if transition[0] not in d['States'][0] or transition[3] not in d['States'][0]: # daca states=rooms sunt in STATES\n print('Transitions NULL: can\\'t find the transition in States')\n return 0\n\n if transition[1] not in d['Sigma']: # daca verbul (actiunea) este in SIGMA\n print(f'Transitions NULL: invalid transition {transition[1]}')\n return 0\n\n return 1\n\n\n# validation input function: returns a bool True - passed validation or False - didn't pass it\ndef validation_rule(rule):\n \"\"\"\n validarea se face cu PDA_converter_CFG\n\n \"\"\"\n\n cfg = c.read_cfg('config_file_game.txt') # cfg ul conform fiserului config_file_game\n pda = converter_CFG_to_PDA.converter_CFG_to_PDA(cfg) # face conversia catre pda si-l returneaza\n pda2 = [pda['Sigma'],pda['States'], pda['Start_state'], pda['Final_states'], pda['Theta'], pda['Actions']]\n # return PDA.emulate_pda(pda2, rule) # emuleaza pda ul pe input string(rule)\n # emulatorul nu e perfect compatibil cu setul de date primit reprezentand pda ul\n return True\n\n\n\ndef run_LA(d):\n start_state = d['Transitions'][0]\n current = start_state # e un tuplu, stabileste starea in care ma aflu\n\n # most frequent errors may occur here, that's why I use a try block\n try:\n if validation_Sigma(d) and validation_states(d) and validation_transitions(d): # if everything is valid\n start_game = 1\n # needed a while loop for user\n while (start_game > 0):\n plan = input(\"type a rule: \") # aici introduc o regula in format corect!!\n\n if validation_rule(plan): # validez regula cu pda ce imi valideaza cfg\n\n inventory = [] # my LA list for the game\n for trans in d['Transitions']:\n plan_list = plan.split() # plan va fi o lista formata din regula 'verb', 'object'\n rule = plan_list[0] # rule inseamna verbul, de ex: go\n\n if rule == 'go' and 'go' == trans[1]: # daca actiunea coincide cu go si astfel e in tranzitie\n if trans[2] in inventory: # daca jucatorul are item-ul necesar in inventory sa intre in camera\n current = trans # daca s-a evaluat cu succes, intru in camera urmatoare\n break\n else:\n print(f\"Don't have {trans[2]} in inventory!!!\") # daca nu are itemul pentru a intra in inventory\n current = trans\n break\n\n elif rule == 'take' and 'take' == trans[1]:\n if plan_list[1] == trans[2]:\n inventory.append(plan_list[1])\n break\n\n elif rule == 'drop': # daca vreau sa sterg un item\n if plan_list[1] in inventory: # plan_list[1] -> itemul care vreau sa sterg\n inventory.remove(plan_list[1])\n break\n else:\n print(f\"Unable to remove {plan_list[1]}, it's not in list!\")\n break\n elif rule == 'inventory': # daca vreau sa accesez inventory\n if len(inventory): # daca inventory nu e gol\n print(\"Items stored in inventory: \")\n print(inventory)\n else:\n print(\"Empty inventory!\") # altfel, daca inventory e gol\n break\n elif rule == 'look': # aici am un dictionar cu descrieri pentru fiecare camera\n\n descriptions = {'entrance_hall': 'Entrance Hall: The grand foyer of the Castle of Illusions.',\n 'dining_room': 'Dining Room: A room with a large table filled with an everlasting feast.',\n 'kitchen': 'Kitchen: A room packed with peculiar ingredients.',\n 'armoury': 'Armoury: A chamber filled with antiquated weapons and armour.',\n 'treasury': 'Treasury: A glittering room overflowing with gold and gemstones.',\n 'library': 'Library: A vast repository of ancient and enchanted texts.',\n 'pantry': 'Pantry: A storage area for the Kitchen.',\n 'throne_room': 'Throne Room: The command center of the castle.',\n 'wizard_s_study': 'Wizard’s Study: A room teeming with mystical artifacts.',\n 'secret_exit': 'Secret Exit: The hidden passage that leads out of the Castle of Illusions.'}\n\n current_place = trans[0] # ma aflu in current_place\n described_rooms = [] # adaug adiacentele cu alte camere\n\n print(f\"You're in the {descriptions[trans[0]]} \")\n print(\"Adiacente:\")\n\n for trans in d['Transitions']: # cauta adiacentele in tranzitiile din config file\n if current_place == trans[0] and trans[1] == 'go' and trans[3] not in described_rooms:\n described_rooms.append(trans[3])\n\n for room in described_rooms: # afiseaza adiacentele si descrierele lor\n print(descriptions[room])\n break\n\n\n start_game = int(input(\"still want to play? 1:Y 0:No\")) # conditia de stop a loop-ului\n\n\n check = False # check se schimba true daca sunt in starea finala\n\n if current[3] in d['States'][2]: # check if final state\n check = True\n else:\n print(\"Game over! Automata didn't stop in final state!\")\n\n if check:\n return 1 # return 1 meaning: accept plan, move forward\n\n else:\n return 0 # return 0 meaning: reject plan\n else:\n print(\"Automata didn't pass the validation!\")\n return 0 # return 0 meaning: reject plan\n\n except:\n print(\"Automata failed running process!\")\n return 0\n return 0\n\nd = reading_data_set(\"mechanicsLA.in.txt\") # name of file goes here !!!!!!!\n\nprint(run_LA(d))\n\n","repo_name":"catalina-macovei/PDA-Game","sub_path":"mechanicsLA.py","file_name":"mechanicsLA.py","file_ext":"py","file_size_in_byte":12439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"23186544917","text":"from bson import ObjectId\nfrom dataclasses import field\nfrom typing import Optional\n\nfrom aenum import Enum\nfrom isodate import parse_duration\n\nfrom extensions.authorization.models.authorized_user import AuthorizedUser\nfrom extensions.authorization.models.invitation import Invitation, InvitationType\nfrom extensions.authorization.models.role.default_roles import DefaultRoles\nfrom extensions.authorization.models.role.role import RoleName\nfrom extensions.authorization.validators import (\n check_role_id_valid_for_organization,\n is_common_role,\n)\nfrom extensions.common.sort import SortField\nfrom sdk.auth.use_case.auth_request_objects import BaseAuthRequestObject\nfrom sdk.common.exceptions.exceptions import InvalidRequestException, PermissionDenied\nfrom sdk.common.localization.utils import Language\nfrom sdk.common.usecase.request_object import RequestObject\nfrom sdk.common.utils import inject\nfrom sdk.common.utils.convertible import (\n convertibleclass,\n meta,\n required_field,\n positive_integer_field,\n default_field,\n)\nfrom sdk.common.utils.inject import autoparams\nfrom sdk.common.utils.validators import (\n validate_id,\n validate_email_list,\n validate_object_id,\n validate_object_ids,\n must_be_present,\n must_not_be_present,\n validate_email,\n incorrect_language_to_default,\n must_be_at_least_one_of,\n not_empty,\n)\nfrom sdk.phoenix.config.server_config import Client\n\nINVITATION_PERMISSIONS_PER_ROLE: dict[str, list[str]] = {\n RoleName.ADMINISTRATOR: [\n RoleName.ADMINISTRATOR,\n RoleName.CLINICIAN,\n RoleName.SUPERVISOR,\n RoleName.SUPPORT,\n RoleName.USER,\n ],\n RoleName.CLINICIAN: [RoleName.USER, RoleName.PROXY],\n}\n\n\ndef validate_role_id(field_value: str) -> bool:\n if validate_default_role_id(field_value):\n return True\n if validate_id(field_value):\n return True\n return False\n\n\n@autoparams(\"default_roles\")\ndef validate_default_role_id(value: str, default_roles: DefaultRoles) -> bool:\n return value in default_roles\n\n\n@convertibleclass\nclass SendInvitationRequestObject(RequestObject):\n INVITATION = \"invitation\"\n CLIENT = \"client\"\n SENDER = \"sender\"\n LANGUAGE = \"language\"\n EXTRA_INFO = \"extraInfo\"\n\n invitation: Invitation = required_field()\n client: Client = required_field()\n sender: AuthorizedUser = required_field()\n language: str = field(\n default=Language.EN, metadata=meta(value_to_field=incorrect_language_to_default)\n )\n extraInfo: dict = default_field()\n\n\n@convertibleclass\nclass SendInvitationsRequestObject(BaseAuthRequestObject):\n DEPLOYMENT_IDS = \"deploymentIds\"\n ORGANIZATION_ID = \"organizationId\"\n EMAILS = \"emails\"\n ROLE_ID = \"roleId\"\n PATIENT_ID = \"patientId\"\n EXPIRES_IN = \"expiresIn\"\n SUBMITTER = \"submitter\"\n\n emails: list[str] = required_field(metadata=meta(validate_email_list))\n roleId: str = required_field(metadata=meta(validate_role_id))\n organizationId: str = default_field(metadata=meta(validate_object_id))\n deploymentIds: list[str] = default_field(metadata=meta(validate_object_ids))\n patientId: str = default_field(metadata=meta(validate_object_id))\n expiresIn: str = field(default=\"P1W\", metadata=meta(parse_duration))\n submitter: AuthorizedUser = required_field()\n\n @classmethod\n def validate(cls, instance):\n super().validate(instance)\n is_org_role = check_role_id_valid_for_organization(\n instance.roleId, instance.organizationId\n )\n is_proxy_role = instance.roleId == RoleName.PROXY\n is_common_role = instance.roleId in RoleName.common_roles()\n if is_common_role:\n must_be_at_least_one_of(\n organizationId=instance.organizationId,\n deploymentIds=instance.deploymentIds,\n )\n must_not_be_present(patientId=instance.patientId)\n\n if instance.deploymentIds and len(instance.deploymentIds) > 1:\n must_be_present(organizationId=instance.organizationId)\n elif is_org_role:\n must_be_present(organizationId=instance.organizationId)\n must_not_be_present(deploymentIds=instance.deploymentIds)\n must_not_be_present(patientId=instance.patientId)\n elif is_proxy_role:\n must_be_present(patientId=instance.patientId)\n must_not_be_present(deploymentIds=instance.deploymentIds)\n must_not_be_present(organizationId=instance.organizationId)\n else:\n must_not_be_present(patientId=instance.patientId)\n must_be_present(deploymentIds=instance.deploymentIds)\n if len(instance.deploymentIds) == 0:\n msg = f\"Must be invited to at least one deployment\"\n raise InvalidRequestException(msg)\n\n multiple_deployment_role = (\n instance.roleId in cls.multiple_deployment_roles()\n )\n\n if multiple_deployment_role and len(instance.deploymentIds) > 1:\n must_be_present(organizationId=instance.organizationId)\n\n if not multiple_deployment_role and len(instance.deploymentIds) > 1:\n msg = f\"Role {instance.roleId} can only be invited to one deployment\"\n raise InvalidRequestException(msg)\n\n def check_permission(self, submitter: AuthorizedUser):\n submitter_role = submitter.get_role()\n\n if submitter.is_super_admin():\n return\n\n if is_common_role(submitter_role.id):\n if not self.roleId == RoleName.PROXY:\n if not submitter.role_assignment.is_org():\n if not self.deploymentIds:\n raise PermissionDenied\n else:\n self.validate_resource_access(\n \"deployment\", self.deploymentIds, submitter\n )\n else:\n if self.deploymentIds:\n self.validate_resource_access(\n \"deployment\", self.deploymentIds, submitter\n )\n elif self.organizationId:\n self.validate_resource_access(\n \"organization\", [self.organizationId], submitter\n )\n\n allowed_roles = INVITATION_PERMISSIONS_PER_ROLE.get(submitter_role.id) or []\n if ObjectId.is_valid(self.roleId):\n allowed_roles.append(self.roleId)\n if self.roleId not in allowed_roles:\n raise PermissionDenied\n\n @staticmethod\n def validate_resource_access(\n resource_name: str, resources: list[str], submitter: AuthorizedUser\n ):\n allowed_resources = []\n\n if resource_name == \"organization\":\n allowed_resources = submitter.organization_ids()\n elif resource_name == \"deployment\":\n allowed_resources = submitter.deployment_ids()\n\n if not all(resource_id in allowed_resources for resource_id in resources):\n raise PermissionDenied\n\n @staticmethod\n def multiple_deployment_roles():\n org_keys = inject.instance(DefaultRoles).organization.keys()\n return (set(org_keys) - set(RoleName.org_roles())).union(\n set(RoleName.common_roles())\n )\n\n @property\n def deployment_id(self) -> Optional[str]:\n if self.deploymentIds:\n return self.deploymentIds[0]\n\n\n@convertibleclass\nclass ResendInvitationsRequestObject(BaseAuthRequestObject):\n INVITATION_CODE = \"invitationCode\"\n EMAIL = \"email\"\n\n email: str = required_field(metadata=meta(validate_email))\n invitationCode: str = required_field()\n\n\n@convertibleclass\nclass ResendInvitationsListRequestObject(BaseAuthRequestObject):\n INVITATIONS_LIST = \"invitationsList\"\n\n @convertibleclass\n class InvitationItem:\n INVITATION_CODE = \"invitationCode\"\n EMAIL = \"email\"\n\n email: str = required_field(metadata=meta(validate_email))\n invitationCode: str = required_field(metadata=meta(not_empty))\n\n invitationsList: list[InvitationItem] = required_field(metadata=meta(not_empty))\n\n\n@convertibleclass\nclass GetInvitationLinkRequestObject(BaseAuthRequestObject):\n DEPLOYMENT_ID = \"deploymentId\"\n ROLE_ID = \"roleId\"\n EXPIRES_IN = \"expiresIn\"\n RETRIEVE_SHORTENED = \"retrieveShortened\"\n SENDER_ID = \"senderId\"\n\n deploymentId: str = required_field()\n roleId: str = required_field()\n expiresIn: str = field(default=\"P1W\", metadata=meta(parse_duration))\n retrieveShortened: bool = field(default=False)\n senderId: str = required_field(metadata=meta(validate_object_id))\n\n\n@convertibleclass\nclass DeleteInvitationRequestObject(RequestObject):\n INVITATION_ID = \"invitationId\"\n\n invitationId: str = required_field(metadata=meta(validate_object_id))\n\n\n@convertibleclass\nclass DeleteInvitationsListRequestObject(RequestObject):\n INVITATION_ID_LIST = \"invitationIdList\"\n INVITATION_TYPE = \"invitationType\"\n\n invitationIdList: list[str] = required_field(\n metadata=meta(lambda x: all(map(validate_object_id, x)))\n )\n invitationType: InvitationType = field(default=InvitationType.PERSONAL)\n\n\n@convertibleclass\nclass RetrieveInvitationsRequestObject(BaseAuthRequestObject):\n EMAIL = \"email\"\n ROLE_TYPE = \"roleType\"\n SKIP = \"skip\"\n LIMIT = \"limit\"\n SUBMITTER = \"submitter\"\n INVITATION_TYPE = \"invitationType\"\n SORT_FIELDS = \"sortFields\"\n\n class RoleType(Enum):\n MANAGER = \"Manager\"\n USER = \"User\"\n\n email: str = default_field()\n roleType: RoleType = required_field()\n skip: int = positive_integer_field(default=None, metadata=meta(required=True))\n limit: int = positive_integer_field(default=None, metadata=meta(required=True))\n submitter: AuthorizedUser = required_field()\n invitationType: InvitationType = default_field()\n sortFields: list[SortField] = default_field()\n\n def post_init(self):\n if not self.sortFields:\n self.sortFields = [\n SortField.from_dict(\n {\n SortField.FIELD: Invitation.CREATE_DATE_TIME,\n SortField.DIRECTION: SortField.Direction.DESC.value,\n }\n )\n ]\n\n\n@convertibleclass\nclass InvitationValidityRequestObject(RequestObject):\n INVITATION_CODE = \"invitationCode\"\n\n invitationCode: str = required_field()\n","repo_name":"meenu-gupta/huma-server-sdk","sub_path":"extensions/authorization/router/invitation_request_objects.py","file_name":"invitation_request_objects.py","file_ext":"py","file_size_in_byte":10478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"34432829666","text":"import h5py\nimport numpy as np\nimport sys\n\n#my own toolkit\nimport HiCutils\nimport utils\nimport convert\n\n### YOU ARE SUPPOSED TO ONLY MODIFY VALUE HERE ###\n#input file\nbedfilename='/users/invites/carron/Documents/Boost-HiC/test_dataset/mouse10kb_fend.bed'\nmatrixfilename='/users/invites/carron/Documents/Boost-HiC/test_dataset/chr16ES_10000.matrix'\nOperation='Sample'\nrepositoryout='/run/media/carron/0ac0fffa-3350-431d-b1d1-865f8a21db21/data/Hi-C/Mouse/boyan/test/'\n\n#default parameter\nresolution=10000 #default : 10kb\nachr=\"chr16\"\nalpha=0.2 #AFTER a lot of test : 0.24 is always a good and safe compromise, you must use this value\n###\n\n\ndef BoostHiC(amat):\n\tnormmat=HiCutils.SCN(np.copy(amat))\n\tFFmat=np.power(HiCutils.fastFloyd(1/np.power(normmat.copy(),alpha)),-1/alpha) #to dist, FF, to contact in one line\n\tboostedmat=HiCutils.adjustPdS(normmat,FFmat)\n\treturn boostedmat\n\ndef Sample(amat,repositoryout):\n\tpercentofsample=[0.1,1.,10.]\n\tfor j in percentofsample:\n\t\tprint(\"Value of sample\",j)\n\t\tchrmat_s=np.copy(amat)\n\t\tchrmat=HiCutils.downsample_basic(chrmat_s,j)\n\t\tfh5 = h5py.File(repositoryout+\"inputmat_sampleat_\"+str(j)+\"_percent.hdf5\", \"w\")\n\t\tfh5['data'] = chrmat\n\t\tfh5.close()\n\n\n\n### CODE EXECUTION ###\n\n# load the data\nprint(\"LOADING MATRIX\")\nD=convert.loadabsdatafile(bedfilename)\nbeginfend=D[achr][0]\nendfend=D[achr][1]\nprint(\"Data fend :\",beginfend,endfend)\nbasemat=convert.loadmatrixselected(matrixfilename,beginfend,endfend)\n\n#matrix filtering\nprint(\"FILTERING\")\npos_out=HiCutils.get_outliers(basemat)\nbasematfilter=basemat[np.ix_(~pos_out, ~pos_out)]\nbasematfilter=np.copy(basematfilter)\n#basematfilter=basematfilter[0:1000,0:1000]\nprint(len(basemat),len(basematfilter))\nfh5 = h5py.File(repositoryout+\"inputmat.hdf5\", \"w\")\nfh5['data'] = basemat\nfh5.close()\nfh5 = h5py.File(repositoryout+\"inputmat_filtered.hdf5\", \"w\")\nfh5['data']=basematfilter\nfh5.close()\nutils.savematrixasfilelist3(pos_out,repositoryout+\"filteredbin.txt\")\n\nif Operation==\"Boost\":\n\tprint(\"Boost Hic\")\n\tboosted=BoostHiC(basematfilter)\n\t#save\n\tfh5 = h5py.File(repositoryout+\"boostedmat.hdf5\", \"w\")\n\tfh5['data']=boosted\n\tfh5.close()\nelif Operation==\"Sample\":\n\tprint(\"SAMPLING\")\n\tSample(basematfilter,repositoryout)\n\n\n\n\n","repo_name":"LeopoldC/Boost-HiC","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"46"} +{"seq_id":"26415077614","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nexcel_file = \"血常规数据整合.xlsx\"\r\ndata = pd.read_excel(excel_file)\r\n\r\nX = data[['WBC', 'LY','GR','MO','RBC','Hgb','HCT','MCV','MCH','RDW','PLT','PCT','MPV','PDW']].values\r\nY = data['result'].values\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=50)\r\n\r\nk = 10\r\nclsf = KNeighborsClassifier(n_neighbors=k)\r\nclsf.fit(X_train,Y_train)\r\n\r\nY_pred = clsf.predict(X_test)\r\nprint(Y_pred)\r\nprint(Y_test)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nacc = accuracy_score(Y_test,Y_pred)\r\nprint(acc)\r\n","repo_name":"KaFuuchao0313/BloodExamProject","sub_path":"py/KNN_basic.py","file_name":"KNN_basic.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"7632612459","text":"#!/usr/bin/env python\n\n\"\"\"\nCalculate enrichment statistics for two sets of fasta files\nInputs:\n two fasta files to compare\n file containing patterns to check\nOutputs:\n pickled dictionary of pattern enrichments\nBen Ober-Reynolds\n\"\"\"\n\n\nimport os\nimport sys\nimport re\nimport time\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom Bio import SeqIO\nfrom joblib import Parallel, delayed\n\n\ndef main():\n\n # set up command line argument parser\n parser = argparse.ArgumentParser(description='Calculate motif densities \\\n for a target and a background set of fastas.')\n group = parser.add_argument_group('required arguments:')\n group.add_argument('-fi', '--fasta_of_interest', required=True,\n help='file containing clusters of interest')\n group.add_argument('-fb', '--background_fasta', required=True,\n help='file containing background clusters')\n group.add_argument('-pf', '--pattern_file', required=True,\n help='file containing patterns to check for. Format: \\\n {pattern name}\\\\t{regex_pattern}')\n group = parser.add_argument_group('optional arguments')\n group.add_argument('-od', '--output_directory', default=\".\",\n help='output directory for statistics file and figures. \\\n Default is current directory')\n group.add_argument('-op', '--output_prefix', default=\"enrichment\",\n help='output prefix for results file and figures')\n group.add_argument('-isn', '--interesting_seq_name', \n default=\"Sequences of Interest\",\n help='The name of the sequence of interest pool. Default is \\\n \"Sequences of Interest\"')\n group.add_argument('-bsn', '--background_seq_name', \n default=\"Background Sequences\", help='The name of the background \\\n sequence pool. Default is \"Background Sequences\"')\n group.add_argument('-rc', '--reverse_comp', default=\"y\",\n help='also calculate enrichment in reverse complement of each pool \\\n [y/n]? Default is y.')\n group.add_argument('-nb', '--num_bootstraps', type=int, default=1000,\n help='number of times to resample pools for enrichment calculation. \\\n Default is 1000.')\n group.add_argument('-n', '--num_cores', type=int, default=1,\n help='number of cores to use for bootstrapping.')\n\n # print help if no arguments provided\n if len(sys.argv) <= 1:\n parser.print_help()\n sys.exit()\n\n # parse command line arguments\n args = parser.parse_args()\n numCores = args.num_cores\n\n # Pre-defined variables, constants, and settings\n input_file_format = 'fasta'\n rev_c_tag = \"Rev-Comp\"\n output_prefix = time.strftime(\"%Y%m%d\") + \"_\" + args.output_prefix\n pickle_file_ext = \"p\"\n\n # Do some error checking before running this long script:\n output_dir = args.output_directory\n if not os.path.isdir(output_dir):\n print(\"Error: invalid output directory. Exiting...\")\n sys.exit()\n \n # Read in files:\n seqs_of_interest = read_fasta(args.fasta_of_interest, input_file_format)\n background_seqs = read_fasta(args.background_fasta, input_file_format)\n pattern_dict = read_pattern_file(args.pattern_file)\n\n # Find smallest pool size:\n pool_size = min([len(seqs_of_interest), len(background_seqs)])\n\n # seq pool dict:\n seq_pool_dict = {args.interesting_seq_name: seqs_of_interest, \n args.background_seq_name: background_seqs}\n\n # Results dictionary:\n density_result_dict = {}\n for pname in pattern_dict.keys():\n density_result_dict[pname] = {}\n\n # compare to reverse complement?\n if args.reverse_comp == 'y':\n interesting_seq_rc_name = args.interesting_seq_name + \" \" + rev_c_tag\n background_seq_rc_name = args.background_seq_name + \" \" + rev_c_tag\n rc_seqs_of_interest = reverse_comp(seqs_of_interest)\n rc_background_seqs = reverse_comp(background_seqs)\n seq_pool_dict[interesting_seq_rc_name] = rc_seqs_of_interest\n seq_pool_dict[background_seq_rc_name] = rc_background_seqs\n\n # calculate motif density for each pattern\n if numCores > 1:\n with Parallel(n_jobs=numCores, verbose=10) as parallel: \n for pname in pattern_dict.keys():\n for pool_name in seq_pool_dict.keys():\n densities = []\n print(\"Calculating density of pattern '{}' in pool '{}'\\\n \".format(pname, pool_name))\n densities = parallel(delayed(calc_resampled_motif_density)\\\n (seq_pool_dict[pool_name], pool_size, pattern_dict[pname])\n for i in range(args.num_bootstraps))\n density_result_dict[pname][pool_name] = densities\n else:\n for pname in pattern_dict.keys():\n for pool_name in seq_pool_dict.keys():\n densities = []\n print(\"Calculating density of pattern '{}' in pool '{}'\\\n \".format(pname, pool_name))\n densities = [calc_resampled_motif_density(\n seq_pool_dict[pool_name], pool_size, pattern_dict[pname])\n for i in range(args.num_bootstraps)]\n density_result_dict[pname][pool_name] = densities\n\n # Dump results to pickle for latter replotting\n with open(output_dir + '/' + output_prefix + '.' + pickle_file_ext, 'wb') as f:\n pickle.dump(density_result_dict, f)\n\n\ndef read_fasta(filename, input_file_format):\n \"\"\"\n Read in a fasta file, and return sequences as a list.\n Input: fasta filename\n Output: sequence array \n \"\"\"\n fasta_list = []\n with open(filename, 'r') as f:\n for seq_rec in SeqIO.parse(f, input_file_format):\n seq_rec = seq_rec.upper()\n fasta_list.append(str(seq_rec.seq))\n return np.array(fasta_list)\n\n\ndef read_pattern_file(filename):\n \"\"\"\n Read in a pattern file. Note that pattern files must be two-column,\n tab-delimited files with the first column being the pattern name, and\n the second column the regular expression defining that pattern.\n \"\"\"\n pattern_dict = {}\n with open(filename, 'r') as f:\n for line in f:\n pname, reg_exp = line.strip().split('\\t')\n reg_exp = re.compile(reg_exp)\n pattern_dict[pname] = reg_exp\n return pattern_dict\n\n\ndef reverse_comp(fasta_array):\n \"\"\"\n Reverse complement a list of sequences\n Input: list of sequences\n Output: reverse complement of same sequence list\n \"\"\"\n trans_table = str.maketrans('AGCT', 'TCGA')\n rev_list = []\n for seq in fasta_array:\n rev_list.append(seq.translate(trans_table)[::-1])\n return np.array(rev_list)\n\n\ndef calc_resampled_motif_density(seq_array, samp_size, regex):\n \"\"\"\n Calculate the length-normalized density of a specific regular\n expression pattern in a resampled sequence pool.\n Inputs: list of sequences, number of seqs to draw, regular expression pattern\n Output: length-normalized motif density\n \"\"\"\n resampled_pool = np.random.choice(seq_array, size=samp_size, replace=True)\n total_seq_space = 0\n patterns_found = 0\n for seq in resampled_pool:\n patterns_found += len(re.findall(regex, seq))\n total_seq_space += len(seq)\n return patterns_found/total_seq_space\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"boberrey/sequence_analysis","sub_path":"pattern_enrichment.py","file_name":"pattern_enrichment.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28588191341","text":"# - *- coding: utf- 8 - *-\nimport time\n\nfrom pepper.robot import Pepper\ntry:\n import urllib\nexcept:\n import urllib.request as urllib\nimport base64\nimport json\nfrom PIL import Image\nimport random\n\n\ndef uploadPhotoToWeb(photo):\n \"\"\"we need to upload photo to web as we (me) are not able to open it from local folder\"\"\"\n f = open(photo, \"rb\") # open our image file as read only in binary mode\n image_data = f.read() # read in our image file\n b64_image = base64.standard_b64encode(image_data)\n client_id = \"af482612ae6d1c1\" # this the id which we've got after registrating the app on imgur\n headers = {'Authorization': 'Client-ID ' + client_id}\n data = {'image': b64_image, 'title': 'test'}\n request = urllib.Request(url=\"https://api.imgur.com/3/upload.json\", data=urllib.urlencode(data),\n headers=headers)\n response = urllib.urlopen(request).read()\n parse = json.loads(response)\n return parse['data']['link'] #returns a url of the photo\n\n\ndef getRandName():\n \"\"\"returns a random name for the picture in order not to replace the old photo\"\"\"\n randNum = random.randint(0, 1000)\n return \"demoPictures/photo\" + str(randNum) + \".png\"\n\n\nclass PepperDemo:\n def __init__(self, ip_address, port=9559):\n self.robot = None\n self.robot = Pepper(ip_address, port)\n self.robot.set_czech_language()\n self.photoName = None\n self.greetings = [\"Good afternoon\", \"Hello\", \"Hi\", \"Hello everobody\", \"Welcome\"]\n self.asks = [\"May I photograph you?\",\"May I take your picture?\", \"Do you want to make your picture?\"]\n\n\n def wantToTakePic(self):\n \"\"\"recognise answer with google speech reco\"\"\"\n answers = {\"no\": [\"no\", \"no way\", \"not\", \"no no\", \" i don't\", \"i dont know\", \"not today\", \"later\", \"tommorow\"],\n \"yes\": [\"yes\", \"definitely\", \"yep\", \"ok\", \"okey dokey\", \"sure\", \"all yes\", \"you must\",\n \"absolutely\", \"i want\", \"i think so\", \"i agree\", \"if you want\", \"if you insist\", \"probably\", \"maybe\",\n \"yes sir\"]}\n recorded = self.robot.recognize_google(lang=\"en-US\")\n answer = self.getAnswer(answers, recorded)\n if answer == \"no\":\n return False\n elif answer == \"yes\":\n return True\n else:\n return None\n\n\n def getAnswer(self, dic, recorded):\n \"\"\"looks for a recorded answer in a dictionar\"\"\"\n for x in dic.keys():\n if dic[x] in recorded.lower():\n return x\n return None\n\n def welcomeAndAsk(self):\n self.robot.say(random.choice(self.greetings))\n self.robot.greet()\n self.robot.say(random.choice(self.asks))\n\n def takePicture(self):\n self.robot.subscribe_camera(\"camera_top\", 2, 30)\n img = self.robot.get_camera_frame(show=False)\n self.robot.unsubscribe_camera()\n self.robot.play_sound(\"/home/nao/camera1.ogg\")\n im = Image.fromarray(img)\n self.photoName = getRandName()\n im.save(self.photoName)\n\n def showPicture(self):\n link = uploadPhotoToWeb(self.photoName)\n self.robot.show_image(link)\n time.sleep(5)\n self.robot.reset_tablet()\n\n def recogniseAnswerAndDecide(self):\n isTakePic = self.wantToTakePic()\n if isTakePic:\n self.robot.say(\"Perfect. On your marks. 3, 2, 1 .\")\n self.takePicture()\n self.showPicture()\n elif isTakePic is None:\n self.robot.say(\"Sorry, I did not understand you. Please repeat.\")\n self.recogniseAnswerAndDecide()\n else:\n self.robot.say(\"Maybe next time\")\n\n def dealWithRecoErrors(self):\n \"\"\"there is a modifiable grammar error sometimes occurred.\n In order to deal with it you should change language to english and back\"\"\"\n self.robot.set_english_language()\n self.robot.set_czech_language()\n\n def run(self):\n self.dealWithRecoErrors()\n self.welcomeAndAsk()\n self.recogniseAnswerAndDecide()\n\nif __name__ == \"__main__\":\n pepperDemo = PepperDemo(\"10.37.1.232\")\n pepperDemo.run()\n\n","repo_name":"incognite-lab/Pepper-Controller","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"46"} +{"seq_id":"26129396775","text":"from adestis_netbox_plugin_account_management.models import *\nfrom netbox.filtersets import NetBoxModelFilterSet\nfrom django.db.models import Q\n\nfrom dcim.models import *\nfrom utilities.filters import TreeNodeMultipleChoiceFilter\nfrom django.utils.translation import gettext as _\nimport django_filters\nfrom utilities.forms import (\n DynamicModelMultipleChoiceField, MultipleChoiceField, StaticSelect, TagFilterField, BOOLEAN_WITH_BLANK_CHOICES,\n)\nfrom virtualization.models import VirtualMachine, ClusterGroup, Cluster\nfrom tenancy.models import *\n\n__all__ = (\n 'SystemFilterSet',\n)\n\n\nclass SystemFilterSet(NetBoxModelFilterSet):\n \n cluster_group_id = DynamicModelMultipleChoiceField(\n queryset=ClusterGroup.objects.all(),\n required=False,\n label=_('Cluster group (name)')\n ) \n \n cluster_id = DynamicModelMultipleChoiceField(\n queryset=Cluster.objects.all(),\n required=False,\n label=_('Cluster (name)')\n ) \n \n device_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Device.objects.all(),\n label=_('Device (ID)'),\n )\n \n device = django_filters.ModelMultipleChoiceFilter(\n field_name='device__name',\n queryset=Device.objects.all(),\n to_field_name='name',\n label=_('Device (name)'),\n )\n \n virtual_machine_id = DynamicModelMultipleChoiceField(\n queryset=VirtualMachine.objects.all(),\n required=False,\n label=_('Virtual machine (name)'))\n\n group = TreeNodeMultipleChoiceFilter(\n queryset=TenantGroup.objects.all(),\n field_name='group',\n lookup_expr='in',\n to_field_name='group',\n label=_('Tenant group (group)'),\n )\n \n tenant_id = django_filters.ModelMultipleChoiceFilter(\n queryset=Tenant.objects.all(),\n label=_('Tenant (ID)'),\n )\n \n tenant = django_filters.ModelMultipleChoiceFilter(\n queryset=Tenant.objects.all(),\n field_name='tenant__name',\n to_field_name='tenant',\n label=_('Tenant (name)'),\n )\n\n class Meta:\n model = System\n fields = ('id', 'tenant', 'group', 'cluster_group_id', 'cluster_id', 'device', 'virtual_machine_id', 'name', 'system_url', 'system_status') \n\n def search(self, queryset, name, value):\n if not value.strip():\n return queryset\n return queryset.filter(\n Q(name__icontains=value) |\n Q(system_url__icontains=value) |\n Q(system_status__icontains=value)\n )\n","repo_name":"adestis/netbox-account-management","sub_path":"adestis_netbox_plugin_account_management/filtersets/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"25740885459","text":"import numpy as np\nimport pandas as pd\nfrom tqdm.notebook import trange\n\ndef vuln_sim(dists, sim_years, days_in_year=365, sample_count=1000000, clip=True, verbose=False):\n samples = pd.DataFrame({key:val.rvs(sample_count) for key, val in dists.items()})\n \n variants = {}\n active = []\n vuln_days = []\n t = trange(sim_years * days_in_year) if verbose else range(sim_years * days_in_year)\n for i in t:\n var_occur = np.random.choice(samples['occurence']) / days_in_year\n if var_occur > np.random.uniform():\n var_idx = len(variants)\n variants[var_idx] = {\n 'start_day': i,\n 'identification': np.random.choice(samples['identification']),\n 'remediation': np.random.choice(samples['remediation']),\n }\n variants[var_idx]['duration'] = variants[var_idx]['identification'] + variants[var_idx]['remediation']\n active.append(var_idx)\n\n efficacy = np.random.choice(samples['variant']) if len(active) > 0 \\\n else np.random.choice(samples['efficacy'])\n\n if efficacy < np.random.uniform():\n vuln_days.append(i)\n\n for var_idx in [*active]:\n var_end = 1 / variants[var_idx]['duration']\n if var_end > np.random.uniform():\n active.remove(var_idx)\n variants[var_idx]['end_day'] = i\n \n vuln_vals = (pd.Series(vuln_days) // days_in_year).value_counts().reindex(np.arange(1000)).fillna(0) / days_in_year\n if clip: vuln_vals = vuln_vals.clip(1/sim_years, 1 - (1/sim_years))\n \n return vuln_vals, variants","repo_name":"Calvinxc1/BayesianPlayground","sub_path":"tools/vuln_sim.py","file_name":"vuln_sim.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"39277673078","text":"from __future__ import absolute_import, unicode_literals\n\nimport json\nimport logging\n\nfrom django.conf import settings\nfrom django.db.models import Q, Count\nfrom django.core.management.base import BaseCommand\n\nfrom muses.collection.models import Item\nfrom muses.naive_classification.helpers_os import predict_image_path_dict\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n \"\"\"Classify.\"\"\"\n\n help = \"Classify items with our AI.\"\n\n requires_system_checks = False\n\n def add_arguments(self, parser):\n parser.add_argument('--update-existing',\n action='store_true',\n dest='update_existing',\n default=False,\n help=\"Update existing classifications.\")\n\n def handle(self, *args, **options):\n \"\"\"Handle.\n\n :param args:\n :param options:\n :return:\n \"\"\"\n update_existing = bool(options['update_existing'])\n\n filters = []\n if not update_existing:\n for field in ['classified_as']:\n filters.append(\n Q(**{\"{}__isnull\".format(field): True})\n | Q(**{\"{}__exact\".format(field): ''})\n )\n\n items = Item \\\n .objects \\\n .filter(*filters) \\\n .prefetch_related('images') \\\n .annotate(num_images=Count('images')) \\\n .filter(num_images__gt=0)\n\n for item in items:\n paths = []\n for image in item.images.all():\n try:\n paths.append(image.image.path)\n except Exception as err:\n LOGGER.warning(err)\n\n conf = settings \\\n .MUSES_CONFIG['classification']['naive_classification']\n model_path = conf['model_path']\n\n try:\n classification = predict_image_path_dict(\n paths,\n model_path=model_path\n )\n except Exception as err:\n LOGGER.warning(err)\n continue\n\n top_results = list(classification.items())[:5]\n if top_results:\n try:\n item.classified_as = top_results\n item.save()\n except Exception as err:\n pass\n","repo_name":"Aincient/cleo","sub_path":"src/muses/collection/management/commands/muses_classify.py","file_name":"muses_classify.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"15152418750","text":"# hashtable_lookup(htable,key)\n\n# that takes two inputs, a hashtable\n# and a key (string),\n# and returns the value associated\n# with that key.\n\ndef hashtable_lookup(htable,key):\n bucket = hashtable_get_bucket(htable,key)\n for s in bucket:\n if s[0] == key:\n return s[1]\n\n\n# hashtable_add(htable,key,value)\n#\n# that adds the key to the hashtable (in \n# the correct bucket), with the correct \n# value and returns the new hashtable.\n#\n# (Note that the video question and answer\n# do not return the hashtable, but your code\n# should do this to pass the test cases.)\n\ndef hashtable_add(htable,key,value):\n # b= hashtable_get_bucket(htable,key)\n \n h=hash_string(key,len(htable))\n htable[h].append([key,value])\n \n \n return htable \n \n# Define a procedure, hashtable_get_bucket,\n# that takes two inputs - a hashtable, and\n# a keyword, and returns the bucket where the\n# keyword could occur.\n \ndef hashtable_get_bucket(htable,keyword):\n return htable[hash_string(keyword,len(htable))]\n\ndef hash_string(keyword,buckets):\n out = 0\n for s in keyword:\n out = (out + ord(s)) % buckets\n return out\n\ndef make_hashtable(nbuckets):\n table = []\n for unused in range(0,nbuckets):\n table.append([])\n return table\n\ntable = make_hashtable(5)\nhashtable_add(table,'Bill', 17)\nhashtable_add(table,'Coach', 4)\nhashtable_add(table,'Ellis', 11)\nhashtable_add(table,'Francis', 13)\nhashtable_add(table,'Louis', 29)\nhashtable_add(table,'Nick', 2)\nhashtable_add(table,'Rochelle', 4)\nhashtable_add(table,'Zoe', 14)\nprint (table)\n#>>> [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]], \n#>>> [['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]\n\n","repo_name":"fahm7/Python-Examples","sub_path":"02Python Advance Course01/3Data Structure/hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"1436880719","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport os\nimport json\nfrom datetime import datetime\n\nnow = datetime.now()\ndt_string = now.strftime(\"%d-%m-%Y_%H:%M:%S\")\n \ncred = credentials.Certificate(\"vinterprosjekt-it2-firebase-adminsdk-wi0bg-f0efd8881e.json\")\nfirebase_admin.initialize_app(cred, {'databaseURL':\"https://vinterprosjekt-it2-default-rtdb.europe-west1.firebasedatabase.app/\"})\n\nref = db.reference(\"/bazaar\")\n\nwith open(\"bazaar_info.json\", \"r\") as f:\n\tfile_contents = json.load(f)\n# ref.set(file_contents)\n\nref.child(dt_string).set(file_contents)\n\nos.remove(\"bazaar_info.json\")\n\nref = db.reference(\"/auctions\")\n\nwith open(\"auctions_info.json\", \"r\") as f:\n\tfile_contents = json.load(f)\n# ref.set(file_contents)\n\nref.child(dt_string).set(file_contents)\n\nos.remove(\"auctions_info.json\")\n\n# data_push = ref.push().set(file_contents)\n\n# post_id = data_push.key\n","repo_name":"johahold/prosjekt_vinterferie","sub_path":"data_management/push_to_database.py","file_name":"push_to_database.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43563789812","text":"# -*- coding: utf-8 -*-\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import messagebox\r\nfrom tkinter import simpledialog\r\n\r\nfrom tkinter.colorchooser import *\r\nfrom library.settings_window import *\r\n\r\nimport tkinter.font as tkFont\r\nfrom PIL import Image, ImageTk\r\n\r\n\r\n#imports configparser and images list\r\nfrom library.read_config import *\r\n\r\nsavedFile = {1:\"\"}\r\nimgDict={}\r\n\r\n#======================================\r\n# Principale Window\r\n#======================================\r\n\r\nclass BlockNote_window:\r\n\r\n def __init__(self,master,content,icons_menu):\r\n self.master = master\r\n self.content=content\r\n self.icons_menu=icons_menu\r\n\r\n #initiat style variables\r\n self.all_variable_initiation()\r\n\r\n def create_window(self):\r\n self.master = Tk()\r\n self.master.title(\"Editeur de Texte\")\r\n\r\n # set the dimensions of the screen \r\n # and where it is placed base on conteur values\r\n self.master.geometry('%dx%d+%d+%d' % (int(config_window_width), int(config_window_height), int(config_window_left), int(config_window_top)))\r\n\r\n def add_text(self):\r\n self.content = Text(self.master,padx=10,pady=5, undo=True)\r\n self.content.config(fg=str(BlockNote_window.get_config_default_font_color(self)) ,bg=str(BlockNote_window.get_config_default_bg_color(self)))\r\n self.content.config(font=(config_font_style,config_font_size))\r\n self.content.pack(expand=1,side=BOTTOM,fill='both')\r\n\r\n #import text style from configuration.ini\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n \r\n # justify text -- problem in loads first time - fix later\r\n self.content.tag_configure(\"%s\" % self.alignment, justify=self.alignment.lower())\r\n self.content.tag_add(\"%s\" % self.alignment, 1.0, \"end\")\r\n\r\n def generate(self):\r\n self.master.mainloop()\r\n\r\n #======================================\r\n # Some variables initiation\r\n #======================================\r\n\r\n def all_variable_initiation(self) :\r\n\r\n self.weight_v = config_weight_text\r\n self.slant_v = config_slant_text\r\n self.underline_v = eval(config_underline_text)\r\n self.alignment = config_alignment_text\r\n\r\n #======================================\r\n # BarMenu Actions\r\n #======================================\r\n\r\n # New NotePad winodw\r\n def nouveau(self ,*args):\r\n import os\r\n import sys\r\n # Get the path to current interpreter and run a new window\r\n os.popen(sys.executable + ' main.py')\r\n\r\n\r\n # Open an Existing file\r\n def fopen(self, *args):\r\n file = self.master.filename = filedialog.askopenfilename(initialdir = \"/\",title = \"Select File\",filetypes = ((\"Text Files\",\"*.txt\"),(\"all files\",\"*.*\")))\r\n \r\n if file:\r\n self.master.title('{} - {}'.format(os.path.basename(file), \"Editeur de texte\"))\r\n self.content.delete(1.0, END)\r\n with open(file) as _file:\r\n self.content.insert(1.0, _file.read())\r\n\r\n\r\n # Save As Method\r\n def saveAs(self, *args):\r\n # create save dialog\r\n fichier=self.master.filename = filedialog.asksaveasfilename(initialdir = \"/\",title = \"Enregistrer Sous\\\r\n \",filetypes = ((\"Fichier Texte\",\"*.txt\"),(\"Tous les fichiers\",\"*.*\")))\r\n\r\n if fichier:\r\n fichier = fichier + \".txt\"\r\n \r\n savedFile[1] = fichier\r\n f = open(fichier,\"w\")\r\n s = self.content.get(\"1.0\",END)\r\n f.write(s) \r\n f.close()\r\n \r\n # Save Method \r\n def save(self, *args):\r\n if(savedFile[1] ==\"\"):\r\n self.saveAs() \r\n else:\r\n f = open(savedFile[1],\"w\")\r\n s = self.content.get(\"1.0\",END)\r\n f.write(s) \r\n f.close() \r\n\r\n # Exit Method\r\n def quitter(self):\r\n if messagebox.askokcancel(\"Are you sure?\", \"Please Confirm that you want to exit!\"):\r\n self.master.quit()\r\n\r\n # Undo text changes\r\n def undo(self, event=None):\r\n try:\r\n self.content.edit_undo()\r\n except:\r\n print('Nothing to undo...')\r\n\r\n # redo text changes\r\n def redo(self, *args):\r\n try:\r\n self.content.edit_redo()\r\n except:\r\n print('Nothing to redo...')\r\n\r\n # Copy text\r\n def copy(self, *args):\r\n try:\r\n self.content.clipboard_clear()\r\n self.content.clipboard_append(self.content.selection_get())\r\n except:\r\n print('Nothing to copy...')\r\n\r\n # Cut text\r\n def cut(self, *args):\r\n try:\r\n self.copy()\r\n self.content.delete(\"sel.first\",\"sel.last\") \r\n except:\r\n print('Nothing to cut...')\r\n\r\n # Paste Text\r\n def paste(self, *args):\r\n try:\r\n self.content.insert(INSERT, self.content.clipboard_get())\r\n except:\r\n print('Nothing to paste...')\r\n\r\n # Delete Selected text\r\n def clear(self,*args):\r\n sel = self.content.get(SEL_FIRST, SEL_LAST)\r\n if sel!='':\r\n self.content.delete(SEL_FIRST, SEL_LAST)\r\n else:\r\n print(\"Noting to clear\")\r\n\r\n # Delete All text\r\n def clearall(self,*args):\r\n try:\r\n self.content.delete(1.0 , END)\r\n except:\r\n print('Nothing to clear...')\r\n\r\n # Select All text\r\n def selectAll(self, *args):\r\n self.content.tag_add(SEL, '1.0', END)\r\n self.content.mark_set(0.0, END)\r\n self.content.see(INSERT)\r\n\r\n def left_alignment(self , *args):\r\n self.content.tag_configure(\"LEFT\",justify=LEFT)\r\n self.content.tag_add(\"LEFT\", 1.0, \"end\")\r\n\r\n conteur['text_styles']['alignment'] = \"LEFT\"\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def right_alignment(self , *args):\r\n self.content.tag_configure(\"RIGHT\",justify=RIGHT)\r\n self.content.tag_add(\"RIGHT\", 1.0, \"end\")\r\n\r\n conteur['text_styles']['alignment'] = \"RIGHT\"\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def center_alignment(self , *args):\r\n self.content.tag_configure(\"CENTER\",justify=CENTER)\r\n self.content.tag_add(\"CENTER\", 1.0, \"end\")\r\n\r\n conteur['text_styles']['alignment'] = \"CENTER\"\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def bold(self,*args):\r\n\r\n if self.weight_v ==\"bold\":\r\n self.weight_v = \"normal\"\r\n else:\r\n self.weight_v = \"bold\"\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n \r\n def underline(self,*args):\r\n\r\n if self.underline_v:\r\n self.underline_v = 0\r\n else:\r\n self.underline_v = 1\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n\r\n def italic(self,*args):\r\n if self.slant_v ==\"roman\":\r\n self.slant_v = \"italic\"\r\n else:\r\n self.slant_v = \"roman\"\r\n self.change_text_style(self.weight_v, self.underline_v, self.slant_v)\r\n\r\n def change_text_style(self, boldness, underline, slant):\r\n styling = tkFont.Font(family=config_font_style, size=config_font_size,weight= boldness,slant=slant, underline=underline)\r\n self.content.configure(font=styling)\r\n\r\n conteur['text_styles']['underline'] = str(underline)\r\n conteur['text_styles']['weight'] = boldness\r\n conteur['text_styles']['slant'] = slant\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def find(self, *args):\r\n self.content.tag_remove('found', '1.0', END)\r\n target = simpledialog.askstring('Search', 'words to search:')\r\n simpledialog\r\n if target:\r\n idx = '1.0'\r\n while 1:\r\n idx = self.content.search(target, idx, nocase=1,\r\n stopindex=END)\r\n if not idx:\r\n break\r\n lastidx = '%s+%dc' % (idx, len(target))\r\n self.content.tag_add('found', idx, lastidx)\r\n idx = lastidx\r\n\r\n self.content.tag_config('found',foreground='white', background=\"blue\")\r\n\r\n def open_about(self, *args):\r\n about_window = Toplevel(self.master)\r\n about_window.grab_set()\r\n about_window.title(\"About Me\")\r\n about_window.geometry(\"250x150\")\r\n about_window.resizable(False, False)\r\n about_window.configure(background='white')\r\n\r\n\r\n aboutme_icon = Image.open( other_icons.get('aboutme') )\r\n aboutme_icon = aboutme_icon.resize((100,100))\r\n aboutme_img = ImageTk.PhotoImage(aboutme_icon)\r\n aboutme_label = Label(about_window, image=aboutme_img,borderwidth=0, bg=\"white\")\r\n imgDict['aboutme'] = aboutme_img \r\n aboutme_label.pack()\r\n\r\n my_name= Label(about_window, text=\"SAIFEDDINE CHAGDALI\",fg=\"black\", bg=\"white\", font=('Calibri',10))\r\n my_name.place(x=60, y=100)\r\n\r\n git_link= Label(about_window, text=\"github.com/sifdin17/TextEditor\",fg=\"blue\", bg=\"white\",font=('Calibri',10))\r\n git_link.place(x=40, y=120)\r\n\r\n\r\n\r\n #======================================\r\n # action that opens the setting window\r\n #======================================\r\n\r\n def settings_w(self, *args):\r\n\r\n # New Settings Instance\r\n preference_window = Settings_class(self.master,self.content,\"Settings_win\")\r\n # Create all widgets\r\n preference_window.create_settings_window()\r\n\r\n\r\n #===========================\r\n # BarMenu Creation\r\n #===========================\r\n\r\n def add_menu(self):\r\n # 1 - Création de la barre des menus\r\n menuBar = Menu(self.master)\r\n \r\n # 2 - Création du menu Fichier\r\n global menuFichier\r\n menuFichier = Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"File\", menu=menuFichier)\r\n menuFichier.add_command(label=\"New\", command=self.nouveau)\r\n menuFichier.add_command(label=\"Open\", command=self.fopen)\r\n menuFichier.add_command(label=\"Save\", command=self.save)\r\n menuFichier.add_command(label=\"Save as\", command=self.saveAs)\r\n menuFichier.add_separator()\r\n menuFichier.add_command(label=\"Exit\", command = self.quitter) \r\n self.master.config(menu = menuBar)\r\n \r\n #3 - Création du Menu Edition\r\n global menuEdition\r\n menuEdition= Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"Edit\", menu=menuEdition)\r\n menuEdition.add_command(label=\"Undo\", command = self.undo)\r\n menuEdition.add_command(label=\"Redo\", command = self.redo)\r\n menuEdition.add_separator()\r\n menuEdition.add_command(label=\"Copy\", command=self.copy)\r\n menuEdition.add_command(label=\"Cut\", command = self.cut)\r\n menuEdition.add_command(label=\"Paste\", command=self.paste)\r\n menuEdition.add_separator()\r\n menuEdition.add_command(label=\"Delete\",command=self.clear)\r\n menuEdition.add_command(label=\"Delete All\",command=self.clearall)\r\n menuEdition.add_command(label=\"Select All\",command=self.selectAll)\r\n\r\n \r\n # Création du Menu Options\r\n menuOutils = Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"Tools\", menu = menuOutils)\r\n\r\n global show_icons_menucheck, show_shortcuts_menucheck\r\n\r\n show_icons_menucheck = IntVar()\r\n show_icons_menucheck.set(config_show_icons)\r\n menuOutils.add_checkbutton(label='Show Icons',onvalue=1, offvalue=0, variable=show_icons_menucheck, command=self.toggle_icons)\r\n\r\n show_shortcuts_menucheck = IntVar()\r\n show_shortcuts_menucheck.set(config_show_shortcuts)\r\n menuOutils.add_checkbutton(label='Show Shortcuts',onvalue=True, offvalue=False, variable=show_shortcuts_menucheck, command=self.toggle_shortcuts)\r\n\r\n\r\n menuOutils.add_separator()\r\n menuOutils.add_command(label=\"Settings\", command=self.settings_w)\r\n \r\n # Création du Menu Aide\r\n menuAide = Menu(menuBar,tearoff=0)\r\n menuBar.add_cascade(label = \"Help\", menu = menuAide)\r\n menuAide.add_command(label=\"About\",command=self.open_about)\r\n\r\n if config_show_shortcuts:\r\n BlockNote_window.add_shortcuts_to_menu(self)\r\n\r\n #===========================\r\n # BarMenu Checkbuttons Action\r\n #===========================\r\n\r\n def toggle_shortcuts(self):\r\n if show_shortcuts_menucheck.get():\r\n BlockNote_window.add_shortcuts_to_menu(self)\r\n else:\r\n BlockNote_window.remove_shortcuts_from_menu(self)\r\n\r\n conteur['menu_settings']['show_shortcuts'] = str( show_shortcuts_menucheck.get() )\r\n conteur.write(open('configuration.ini','w'))\r\n\r\n def add_shortcuts_to_menu(self, *args):\r\n\r\n menuFichier.entryconfig(0, accelerator='Ctrl+N')\r\n menuFichier.entryconfig(1, accelerator='Ctrl+O')\r\n menuFichier.entryconfig(2, accelerator='Ctrl+S')\r\n\r\n menuEdition.entryconfig(0, accelerator='Ctrl+Z')\r\n menuEdition.entryconfig(1, accelerator='Ctrl+Y')\r\n menuEdition.entryconfig(3, accelerator='Ctrl+C')\r\n menuEdition.entryconfig(4, accelerator='Ctrl+X')\r\n menuEdition.entryconfig(5, accelerator='Ctrl+V')\r\n menuEdition.entryconfig(9, accelerator='Ctrl+A')\r\n\r\n # bind shortcuts to menuBar\r\n\r\n self.content.bind('', self.nouveau)\r\n self.content.bind('', self.nouveau)\r\n self.content.bind('', self.fopen)\r\n self.content.bind('', self.fopen)\r\n self.content.bind('', self.save)\r\n self.content.bind('', self.save)\r\n self.content.bind('', self.undo)\r\n self.content.bind('', self.undo)\r\n self.content.bind('', self.redo)\r\n self.content.bind('', self.redo)\r\n self.content.bind('', self.copy)\r\n self.content.bind('', self.copy)\r\n self.content.bind('', self.cut)\r\n self.content.bind('', self.cut)\r\n self.content.bind('', self.paste)\r\n self.content.bind('', self.paste)\r\n self.content.bind('', self.selectAll)\r\n self.content.bind('', self.selectAll)\r\n\r\n def remove_shortcuts_from_menu(self, *args):\r\n menuFichier.entryconfig(0, accelerator='')\r\n menuFichier.entryconfig(1, accelerator='')\r\n menuFichier.entryconfig(2, accelerator='')\r\n\r\n menuEdition.entryconfig(0, accelerator='')\r\n menuEdition.entryconfig(1, accelerator='')\r\n menuEdition.entryconfig(3, accelerator='')\r\n menuEdition.entryconfig(4, accelerator='')\r\n menuEdition.entryconfig(5, accelerator='')\r\n menuEdition.entryconfig(9, accelerator='')\r\n\r\n \"\"\"# unbind shortcuts from menuBar \r\n self.content.unbind('', self.nouveau)\r\n self.content.unbind('', self.nouveau)\r\n self.content.unbind('', self.fopen)\r\n self.content.unbind('', self.fopen)\r\n self.content.unbind('', self.save)\r\n self.content.unbind('', self.save)\r\n self.content.unbind('', self.undo)\r\n self.content.unbind('', self.undo)\r\n self.content.unbind('', self.redo)\r\n self.content.unbind('', self.redo)\r\n self.content.unbind('', self.copy)\r\n self.content.unbind('', self.copy)\r\n self.content.unbind('', self.cut)\r\n self.content.unbind('', self.cut)\r\n self.content.unbind('', self.paste)\r\n self.content.unbind('', self.paste)\r\n self.content.unbind('', self.selectAll)\r\n self.content.unbind('', self.selectAll)\"\"\"\r\n\r\n def add_icons_menu(self):\r\n self.icons_menu=Frame(height=10,borderwidth=0, padx=5, pady=0)\r\n self.icons_menu.pack(side=TOP,fill=X)\r\n\r\n def add_icons(self):\r\n i = 0\r\n for path, bind_function in menu_icons_list.items():\r\n\r\n load = Image.open(path)\r\n load = load.resize((16,16))\r\n img = ImageTk.PhotoImage(load)\r\n\r\n label_name = bind_function+\"_label\" # problem d smya -- fix later !\r\n\r\n label_name = Label(self.icons_menu,cursor=\"hand2\", image=img)\r\n imgDict[path] = img # save image ref in imgDict -- keep track of the reference or else it wont work!!! \r\n label_name.pack(side=LEFT,padx=2,pady=5)\r\n\r\n bind_function = \"self.\"+bind_function \r\n label_name.bind( \"