repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
beepee14/Spatial-Role-Labelling
https://github.com/beepee14/Spatial-Role-Labelling
466444f6d2fd31a411ca84630dc27358d0462c45
8a63a11a146792b80ad6f8c74316b561950cc8e2
30bdf2bf000e9f25d83e63f3b6017df59cb5c0a9
refs/heads/master
2020-05-19T22:25:29.115823
2014-03-19T23:10:18
2014-03-19T23:10:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7753164768218994, "alphanum_fraction": 0.7848101258277893, "avg_line_length": 56.45454406738281, "blob_id": "9a9daf4bbbb20edf27ba342ac3b66ec3fa58d800", "content_id": "3be977c63326c7592e44d400be02e933a6057f17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 269, "num_lines": 22, "path": "/README.txt", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "To run the spatial role labelling project you need to have jython in your system\nTo install jython:\n\nJython 2.2.1 is distributed as an executable jar file installer. The installer file is included in the project directory. To run the file either double click the jython_installer-2.2.1.jar or run java with the -jar option\n\n>java -jar jython_installer-2.2.1.jar\n\nThis will start the regular GUI installer on most systems, or a consoler installer on headless systems. To force the installer to work in headless mode invoke the installer with a console switch\n\njava -jar jython_installer-2.2.1.jar --console\n\nThe installer will then walk through a similar set of steps in graphical or console mode: showing the license, selecting an install directory and JVM and actually copying Jython to the filesystem. After this completes, Jython is installed in the directory you selected.\n\n*******Input to be given through the file named input.txt\n*******Each line should contain one sentence only \n\n*******The spatial indicator, trajector and landmark are printed for each such set in a differnet line in the file output.txt\n\nAfter installing you need to make the script file run_nlp.sh executable\nchmod +x run_nlp.sh\nand then run the script from the terminal\n> ./run_nlp.sh\n" }, { "alpha_fraction": 0.7835051417350769, "alphanum_fraction": 0.7835051417350769, "avg_line_length": 18.200000762939453, "blob_id": "473cbe0d5fde615879f50b21354c7f34b6491b94", "content_id": "a6dbece63f8f8350c2a2b161f021c4f6f784856d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 194, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/run_nlp.sh", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "#!/bin/bash\ng++ convert_xml.cpp\n./a.out input.txt input.xml\npython tokenize.py\npython postagger.py\njython parser.py\npython prepositionfeatures.py\npython spatialindicators.py\necho \"DONE!\"\nexit\n\n\n" }, { "alpha_fraction": 0.6004583835601807, "alphanum_fraction": 0.628724217414856, "avg_line_length": 19.77777862548828, "blob_id": "c614f1a67b420f3f65dc8dbe225c30156c81dd44", "content_id": "f323f46bc087bf7698dbe4467241f730e2a5a1e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1309, "license_type": "no_license", "max_line_length": 81, "num_lines": 63, "path": "/convert_xml.cpp", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<algorithm>\n#include<stdio.h>\n#include<cstdlib>\n#include<sstream>\n#include<string.h>\n#include<set>\n#include<map>\n#include<assert.h>\n#include<ctime>\n#include<queue>\n#include<vector>\n#include<stack>\n#include<list>\n#include<math.h>\n#include<fstream>\nusing namespace std;\ntypedef vector<int> vi;\ntypedef vector<string> vs;\ntypedef pair<int,int> pii;\ntypedef long long int lli;\n\n#define MAXN 1000005\n#define INF 2147483647\n#define MOD 1000000007\n#define pb push_back \n#define sz(a) int((a).size())\n#define FOR(x,a,b) for(int (x) = (a);(x)<=(b);(x)++)\n#define rep(x,n) for(int (x)=0;(x)<(n);(x)++)\n#define tr(c,it) for(typeof((c).begin()) it = (c).begin(); it != (c).end(); it++)\n#define all(c) c.begin(),c.end()\n#define mset(a,b) memset(a,b,sizeof(a))\n\n\n\nint main(int num,char *args[])\n{\n\t//freopen(\"input.txt\",\"r\",stdin);\n\t//freopen(\"output.xml\",\"w\",stdout);\n\tchar line[MAXN];\n\tchar x[MAXN/10];\n\tint c = 0;\n\tstring temp = \"<SENTENCE id=\\\"s\";\n\tifstream in;\n\tin.open(args[1],ifstream::in);\n\tofstream out;\n\tout.open(args[2],ofstream::out);\n\tout<<\"<DOC>\\n\";\n\twhile(in.getline(line,MAXN-100))\n\t{ \n\t\tc++;\t\n\t\tstringstream ss(line);\n\t\tout<<temp<<c<<\"\\\">\";\n\t\tout<<\"\\n<CONTENT>\";\n\t\twhile(ss>>x)\n\t\t\tout<<x<<\" \";\n\t\tout<<\"</CONTENT>\\n\";\n\t\tout<<\"</SENTENCE>\\n\";\n\t}\n\t out<<\"</DOC>\\n\";\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5534053444862366, "alphanum_fraction": 0.5643564462661743, "avg_line_length": 40.14814758300781, "blob_id": "32a019041cbc1ee6ef8a42ecc7da1bb29966fbf3", "content_id": "45e3350ac78ea652cbf088e8defa6c232f20624f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6666, "license_type": "no_license", "max_line_length": 165, "num_lines": 162, "path": "/prepositionfeatures.py", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "from nltk.tree import *\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport xml.dom.minidom\nfrom pprint import pprint\n\ndef prepfeatures_dataset():\n doc = xml.dom.minidom.parse(\"parsedtest.xml\")\n contents = doc.getElementsByTagName(\"CONTENT\")\n \n for content in contents:\n sentence = content.childNodes[0].data\n taggedwords = content.parentNode.getElementsByTagName(\"POSTAGS\")[0].childNodes[0].data\n taggedwords = taggedwords.split(\" \")\n tw = []\n for pair in taggedwords:\n tw.append([pair.split(\"/\")[0], pair.split(\"/\")[1]])\n taggedwords = tw\n\n stringtree = content.parentNode.getElementsByTagName(\"TREE\")[0].childNodes[0].data\n\n dependencies = content.parentNode.getElementsByTagName(\"DEPENDENCIES\")[0]\n \n\n preps = doc.createElement(\"PREPS\")\n\n for depen in dependencies.childNodes:\n stringdepen = depen.childNodes[0].data\n if \"name:prep\" in stringdepen:\n #Cast to dict\n listdepen = stringdepen.split(\" \")\n name = listdepen[0].split(\":\")[1]\n specific = listdepen[1].split(\":\")[1] \n gov = listdepen[2].split(\":\")[1] \n dep = listdepen[3].split(\":\")[1]\n dictdepen = {'name': name, 'specific': specific, 'gov': gov, 'dep': dep}\n\n features = extract_features(dictdepen, taggedwords, stringtree)\n #print \"Features extracted:\"\n #pprint(features)\n \n # Introduce preps and features in the xml \n head1 = doc.createElement('HEAD1')\n head1_LEMMA = doc.createElement('HEAD1_LEMMA')\n head1_POS = doc.createElement('HEAD1_POS')\n head2 = doc.createElement('HEAD2')\n head2_LEMMA = doc.createElement('HEAD2_LEMMA')\n head2_POS = doc.createElement('HEAD2_POS')\n prep = doc.createElement('PREP')\n prep_POS = doc.createElement('PREP_POS')\n prep_spatial = doc.createElement('PREP_SPATIAL')\n\n head1_text = doc.createTextNode(features['head1'])\n head1_LEMMA_text = doc.createTextNode(features['head1_LEMMA'])\n head1_POS_text = doc.createTextNode(features['head1_POS'])\n head2_text = doc.createTextNode(features['head2'])\n head2_LEMMA_text = doc.createTextNode(features['head2_LEMMA'])\n head2_POS_text = doc.createTextNode(features['head2_POS'])\n prep_text = doc.createTextNode(str(features['preposition'])) #str to avoid Nones \n prep_POS_text = doc.createTextNode(str(features['preposition_POS']))\n prep_spatial_text = doc.createTextNode(str(features['preposition_spatial']))\n \n head1.appendChild(head1_text)\n head1_LEMMA.appendChild(head1_LEMMA_text)\n head1_POS.appendChild(head1_POS_text)\n head2.appendChild(head2_text)\n head2_LEMMA.appendChild(head2_LEMMA_text)\n head2_POS.appendChild(head2_POS_text)\n prep.appendChild(prep_text)\n prep_POS.appendChild(prep_POS_text)\n prep_spatial.appendChild(prep_spatial_text)\n\n prepnode = doc.createElement(\"PREPOSITION\")\n prepnode.appendChild(head1)\n prepnode.appendChild(head1_LEMMA)\n prepnode.appendChild(head1_POS)\n prepnode.appendChild(head2)\n prepnode.appendChild(head2_LEMMA)\n prepnode.appendChild(head2_POS)\n prepnode.appendChild(prep)\n prepnode.appendChild(prep_POS)\n prepnode.appendChild(prep_spatial)\n\n preps.appendChild(prepnode)\n\n content.parentNode.appendChild(preps)\n\n\n f = open('input_data.xml', 'w')\n doc.writexml(f)\n f.close()\n \n\n\n\ndef preposition_deps(dependencies):\n \"\"\"\n Extract prepositions from a dependency list (Stanford).\n The dependencies are expected to be a list of dictionaries like:\n {'name': name, 'specific': specific, 'gov': gov, 'dep': dep}\n \"\"\"\n preps = [dep for dep in dependencies if dep['name'] == 'prep']\n return preps\n\ndef extract_features(prepdeps, taggedwords, stringtree):\n \"\"\" Extract preposition features as explained in the SpRL paper \"\"\"\n features = {}\n lmtzr = WordNetLemmatizer()\n \n def getPOS(word, tagged):\n if word is None: return None\n try:\n #print \"**debug: Looking word POS\"\n #print word\n #print tagged\n pos = filter(lambda t: t[0] == word, tagged)\n if len(pos) > 0:\n return pos[0][1]\n else:\n return None\n\n except Exception as e:\n print \"Error getting POS of:\", word\n print e\n\n prep = prepdeps['specific']\n features['preposition'] = prepdeps['specific']\n features['head1'] = prepdeps['gov']\n features['head2'] = prepdeps['dep']\n features['preposition_POS'] = getPOS(prepdeps['specific'], taggedwords)\n features['head1_POS'] = getPOS(prepdeps['gov'], taggedwords)\n features['head2_POS'] = getPOS(prepdeps['dep'], taggedwords)\n features['head1_LEMMA'] = lmtzr.lemmatize(prepdeps['gov'])\n features['head2_LEMMA'] = lmtzr.lemmatize(prepdeps['dep'])\n features['preposition_spatial'] = get_spatial_sense(prepdeps['specific'])\n\n return features\n\ndef get_spatial_sense(preposition):\n \"\"\"\n Gets proportion of spatial sense for preposition \n in PPT dictionary (The Preposition Project)\n \"\"\"\n #print \"GEtting spatial sense of\", preposition \n doc = xml.dom.minidom.parse(\"tpp.xml\")\n entries = [node for node in doc.getElementsByTagName(\"hw\")\n if node.firstChild.nodeValue == preposition]\n try:\n entry = entries[0].parentNode.parentNode.parentNode\n except Exception as e:\n print e\n return -1\n\n spatial = 0 #percentage of spatial \n for sup in entry.getElementsByTagName('sup'):\n if sup.firstChild.nodeValue == 'Spatial': spatial += 1\n spatial = float(spatial)/len(entry.getElementsByTagName('sup'))\n\n return spatial\n\n\nif __name__ == \"__main__\":\n prepfeatures_dataset()\n" }, { "alpha_fraction": 0.5946816802024841, "alphanum_fraction": 0.6051571369171143, "avg_line_length": 34.4571418762207, "blob_id": "4ba58703685c3f7a64957bb7eb96d641dc7ac03d", "content_id": "5823ccaafffbb9a5c3fcfb43540ad9c54e3d978c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 99, "num_lines": 35, "path": "/SImarker.py", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "import xml.dom.minidom\n\ndef generate_dataset():\n doc = xml.dom.minidom.parse(\"output.xml\")\n preps = doc.getElementsByTagName(\"PREPOSITION\")\n multiwords = 0\n uniwords = 0\n redundancies = 0\n for prep in preps:\n classnode = doc.createElement(\"CLASS\")\n tag = \"NSI\"\n prep_index=prep.getAttribute(\"id\")\n preptext = prep.getElementsByTagName(\"PREP\")[0].childNodes[0].data\n sis = prep.parentNode.parentNode.getElementsByTagName(\"SPATIAL_INDICATOR\")\n sentence = prep.parentNode.parentNode.getElementsByTagName(\"CONTENT\")[0].childNodes[0].data\n index=\"garbage\"\n for si in sis:\n sitext = si.childNodes[0].data.split() # split to delete spureous white spaces\n temp_index=si.childNodes[0].getAttribute(\"id\")\n index=temp_index[2:]\n if len(sitext) > 1: multiwords +=1\n else: uniwords +=1\n\n sitext = \" \".join(sitext)\n if index == prep_index:\n tag = \"SI\"\n textnode = doc.createTextNode(tag)\n classnode.appendChild(textnode)\n prep.appendChild(classnode)\n\n f = open('SItrain.xml', 'w')\n doc.writexml(f)\n f.close()\nif __name__ == \"__main__\":\n generate_dataset()\n" }, { "alpha_fraction": 0.5136427879333496, "alphanum_fraction": 0.5359278321266174, "avg_line_length": 39.88888931274414, "blob_id": "60a3f9ede226936477ec1f5bdbb18a3184b990d7", "content_id": "209fa249c05c9e5852686f789381d537bf9a9b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9199, "license_type": "no_license", "max_line_length": 135, "num_lines": 225, "path": "/spatialindicators.py", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "#This is the place where all the shit REALLY HAPPENS\nimport xml.dom.minidom\nimport nltk\nfrom pprint import pprint\nfrom random import shuffle\nfrom sklearn import svm\ndef generate_dataset():\n doc = xml.dom.minidom.parse(\"SItrain.xml\")\n preps = doc.getElementsByTagName(\"PREPOSITION\")\n instances = []\n for prep in preps:\n multiword=0\n features = []\n sis = prep.parentNode.parentNode.getElementsByTagName(\"SPATIAL_INDICATOR\")\n for si in sis:\n sitext = si.childNodes[0].data.split() # split to delete spureous white spaces\n if len(sitext) > 1:\n multiword=1\n\n sitext = \" \".join(sitext)\n head1 = prep.getElementsByTagName(\"HEAD1\")[0].childNodes[0].data\n head1_lemma = prep.getElementsByTagName(\"HEAD1_LEMMA\")[0].childNodes[0].data\n head1_pos = prep.getElementsByTagName(\"HEAD1_POS\")[0].childNodes[0].data\n head2 = prep.getElementsByTagName(\"HEAD2\")[0].childNodes[0].data\n head2_lemma = prep.getElementsByTagName(\"HEAD2_LEMMA\")[0].childNodes[0].data\n head2_pos = prep.getElementsByTagName(\"HEAD2_POS\")[0].childNodes[0].data\n prepos = prep.getElementsByTagName(\"PREP\")[0].childNodes[0].data\n \n prepos_pos = prep.getElementsByTagName(\"PREP_POS\")[0].childNodes[0].data\n prepos_spatial = prep.getElementsByTagName(\"PREP_SPATIAL\")[0].childNodes[0].data\n tag = prep.getElementsByTagName(\"CLASS\")[0].childNodes[0].data\n if tag == \"SI\": \n tag = 1\n else: \n tag = 0 \n features = {}\n features[\"head1\"] = head1\n features[\"head1_lemma\"] = head1_lemma\n features[\"head1_pos\"] = head1_pos\n features[\"head2\"] = head2\n features[\"head2_lemma\"] = head2_lemma\n features[\"head2_pos\"] = head2_pos\n features[\"prep\"] = prepos\n features[\"prep_pos\"] = prepos_pos\n features[\"prep_spatial\"] = prepos_spatial\n if multiword==0:\n instances.append([features, tag])\n return instances\ndef get_multiwords_preps():\n doc = xml.dom.minidom.parse(\"tpp.xml\")\n entries = doc.getElementsByTagName(\"hw\")\n multiword_preps=[entry.childNodes[0].data for entry in entries if len(entry.childNodes[0].data.split())>1]\n #print multiword_preps\n return multiword_preps\n\ndef generate_testdata():\n doc = xml.dom.minidom.parse(\"input_data.xml\")\n preps = doc.getElementsByTagName(\"PREPOSITION\")\n instances = []\n for prep in preps:\n features = []\n head1 = prep.getElementsByTagName(\"HEAD1\")[0].childNodes[0].data\n head1_lemma = prep.getElementsByTagName(\"HEAD1_LEMMA\")[0].childNodes[0].data\n head1_pos = prep.getElementsByTagName(\"HEAD1_POS\")[0].childNodes[0].data\n head2 = prep.getElementsByTagName(\"HEAD2\")[0].childNodes[0].data\n head2_lemma = prep.getElementsByTagName(\"HEAD2_LEMMA\")[0].childNodes[0].data\n head2_pos = prep.getElementsByTagName(\"HEAD2_POS\")[0].childNodes[0].data\n prepos = prep.getElementsByTagName(\"PREP\")[0].childNodes[0].data\n prepos_pos = prep.getElementsByTagName(\"PREP_POS\")[0].childNodes[0].data\n prepos_spatial = prep.getElementsByTagName(\"PREP_SPATIAL\")[0].childNodes[0].data\n tag =-1\n features = {}\n features[\"head1\"] = head1\n features[\"head1_lemma\"] = head1_lemma\n features[\"head1_pos\"] = head1_pos\n features[\"head2\"] = head2\n features[\"head2_lemma\"] = head2_lemma\n features[\"head2_pos\"] = head2_pos\n features[\"prep\"] = prepos\n features[\"prep_pos\"] = prepos_pos\n features[\"prep_spatial\"] = prepos_spatial\n sentence_id=prep.parentNode.parentNode.getAttribute(\"id\")\n sentence_id=sentence_id[1:]\n dependencies=prep.parentNode.parentNode.getElementsByTagName(\"DEPENDENCIES\")[0]\n all_dependencies=[]\n for depen in dependencies.childNodes:\n #Cast to dict\n stringdepen = depen.childNodes[0].data\n listdepen = stringdepen.split(\" \")\n name = listdepen[0].split(\":\")[1]\n specific = listdepen[1].split(\":\")[1] \n gov = listdepen[2].split(\":\")[1] \n dep = listdepen[3].split(\":\")[1]\n dictdepen = {'name': name, 'specific': specific, 'gov': gov, 'dep': dep}\n all_dependencies.append(dictdepen)\n taggedwords = prep.parentNode.parentNode.getElementsByTagName(\"POSTAGS\")[0].childNodes[0].data\n taggedwords = taggedwords.split(\" \")\n tw = []\n for pair in taggedwords:\n tw.append([pair.split(\"/\")[0], pair.split(\"/\")[1]])\n taggedwords = tw\n sentence=prep.parentNode.parentNode.getElementsByTagName(\"CONTENT\")[0].childNodes[0].data\n instances.append([features, tag,sentence_id,all_dependencies,taggedwords,sentence])\n \n return instances\n\ndef cvtrain(train, K):\n\n shuffle(train)\n total_accuracy = 0\n\n for k in xrange(K):\n training = [x for i, x in enumerate(train) if i % K != k]\n validation = [x for i, x in enumerate(train) if i % K == k]\n classifier = nltk.NaiveBayesClassifier.train(training)\n #print(classifier.classify(validation))\n accuracy = nltk.classify.accuracy(classifier, validation)\n total_accuracy += accuracy\n\n avg_accuracy = float(total_accuracy)/K\n #print (\"Naive Bayes accuracy on CV: \",str(avg_accuracy))\n return avg_accuracy\n \ndef getPOS(word, tagged):\n if word is None: return None\n try:\n pos = filter(lambda t: t[0] == word, tagged)\n if len(pos) > 0:\n return pos[0][1]\n else:\n return None\n\n except Exception as e:\n print \"Error getting POS of:\", word\n print e\ndef isObject(tag):\n if tag==\"NN\" or tag==\"NNS\" or tag==\"NNP\" or tag==\"NNPS\" or tag==\"PRP\" or tag==\"PRP$\":\n return 1\n else:\n return 0\n\ndef test():\n filer= open('output.txt','w')\n instances=generate_dataset()\n test_instances=generate_testdata()\n multiword_preps=get_multiwords_preps()\n classifier= nltk.NaiveBayesClassifier.train(instances)\n for x in test_instances:\n checker=0\n for prep in multiword_preps:\n if prep in x[5]:\n checker=1\n sentence=x[5].split()\n for word in sentence:\n if isObject(getPOS(word,x[4]))==1:\n object1=word\n break\n for word in reversed(sentence):\n if isObject(getPOS(word,x[4]))==1:\n object2=word\n break\n filer.write(x[2]+'. \\\"'+prep+'\\\" \\\"'+object1+ '\\\" \\\"' +object2+'\\\"\\n')\n break\n if checker == 1 :\n continue\n predicted=classifier.classify(x[0])\n object1=\"none\"\n object2=\"none\"\n if x[0]['prep']==\"beneath\" :\n predicted=1\n if(predicted==1):\n if isObject(getPOS(x[0][\"head1\"],x[4]))==1 and isObject(getPOS(x[0][\"head2\"],x[4]))==1 : \n filer.write(x[2]+'. \\\"'+x[0][\"prep\"]+'\\\" \\\"'+x[0][\"head1\"]+ '\\\" \\\"' +x[0][\"head2\"]+'\\\"\\n')\n else:\n if isObject(getPOS(x[0][\"head1\"],x[4]))==0 :\n for i in x[3]:\n if i['dep']==x[0][\"head1\"]:\n if isObject(getPOS(i['gov'],x[4]))==1:\n object1=i['gov']\n break\n elif i['gov']==x[0][\"head1\"]:\n if isObject(getPOS(i['dep'],x[4]))==1:\n object1=i['dep']\n break\n else:\n object1=x[0][\"head1\"]\n if isObject(getPOS(x[0][\"head2\"],x[4]))==0 :\n for i in x[3]:\n if i['gov']==x[0][\"head2\"]:\n if isObject(getPOS(i['dep'],x[4]))==1:\n object2=i['dep']\n break\n elif i['dep']==x[0][\"head2\"]:\n if isObject(getPOS(i['gov'],x[4]))==1:\n object1=i['gov']\n break\n else:\n object2=x[0][\"head2\"]\n filer.write(x[2]+'. \\\"'+x[0][\"prep\"]+'\\\" \\\"'+object1+ '\\\" \\\"' +object2+'\\\"\\n')\n filer.close()\n\nif __name__ == \"__main__\":\n \"\"\"\n instances = generate_dataset()\n size = len(instances)\n cut = int(size*0.7)\n train = instances[:cut]\n test = instances[cut:]\n examples=0\n counter=0\n classifier = nltk.NaiveBayesClassifier.train(train)\n for x in test:\n pred = classifier.classify(x[0])\n gold = x[1]\n if(pred==gold):\n counter+=1\n else:\n print x[0]\n print \"___________\"\n print x[1]\n examples+=1\n print(float(counter)/examples)\n print(len(instances))\"\"\"\n test()\n get_multiwords_preps()" }, { "alpha_fraction": 0.5736514329910278, "alphanum_fraction": 0.5757261514663696, "avg_line_length": 27.352941513061523, "blob_id": "4a28a16bfa87f95401f7ac98a7e62523c9a0600a", "content_id": "8c82c82ce17bf2ec24ac405e15708b56ef6b2586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 964, "license_type": "no_license", "max_line_length": 91, "num_lines": 34, "path": "/tokenize.py", "repo_name": "beepee14/Spatial-Role-Labelling", "src_encoding": "UTF-8", "text": "import nltk\nimport xml.dom.minidom\nfrom nltk.tokenize import RegexpTokenizer\nimport cPickle\nfrom subprocess import call\nfrom pprint import pprint\n\ndef tokenize_dataset():\n # Read xml file \n doc = xml.dom.minidom.parse(\"input.xml\")\n contents = doc.getElementsByTagName(\"CONTENT\")\n\n for content in contents:\n sentence = content.childNodes[0].data\n #print \"\\n***Original sentence:\\n\", sentence \n words = tokenize(sentence)\n content.childNodes[0].data = ' '.join(words)\n\n f = open('tokenized.xml', 'w')\n doc.writexml(f)\n f.close()\n\ndef tokenize(string):\n \n tokenizer = RegexpTokenizer('\\w+|\\$[\\d\\.]+|\\S+')\n #tokenizer = RegexpTokenizer('[a-zA-Z]+') #palabras\n words = tokenizer.tokenize(string)\n for w in words:\n if \"'t\" is w:\n w = \"not\"\n return words\n\nif __name__ == '__main__':\n tokenize_dataset()\n" } ]
7
riti121/cafe
https://github.com/riti121/cafe
a513e97a25fe0f6bce524636dc84fbdd5fb3edf1
ede4fa89b5f1ac891d0df37f629d7db277e36e58
384197542ae1a325685072c4de908c050e1b9c27
refs/heads/master
2022-08-01T17:32:49.208032
2020-05-25T18:18:59
2020-05-25T18:18:59
266,846,642
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4850078821182251, "alphanum_fraction": 0.5160441994667053, "avg_line_length": 30.147541046142578, "blob_id": "0c4580d5521fb38b57e0b709d1770ffe417e36cb", "content_id": "ec0044c62665cf3543ab69544cc2dd452e2a3de1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1901, "license_type": "permissive", "max_line_length": 83, "num_lines": 61, "path": "/facialrecognition.py", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "\"\"\"\nimport tensorflow as tf \nmodeltest=tf.keras.models.load_model(\"facial_1 (1)\")\nprint(\"--model loaded successfully--\")\n\"\"\"\nimport cv2\nimport sys\nimport os\n\nclass FaceCropper(object):\n cascades_path = 'haarcascade_frontalface_default.xml'\n\n def __init__(self):\n self.face_cascade = cv2.CascadeClassifier(self.cascades_path)\n\n def generate(self, image_path, show_result):\n name=\"\"\n img = cv2.imread(image_path)\n if (img is None):\n print(\"Can't open image file\")\n return 0\n\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = self.face_cascade.detectMultiScale(img, 1.1, 3, minSize=(100, 100))\n if (faces is None):\n print('Failed to detect face')\n return 0\n\n if (show_result):\n for (x, y, w, h) in faces:\n\n cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)\n roi_color = img[y:y + h, x:x + w] \n print(\"[INFO] Object found. Saving locally.\")\n name= str(w) + str(h) + '_faces.jpg'\n cv2.imwrite(str(w) + str(h) + '_faces.jpg', roi_color)\n #cv2.imshow('cropped image',roi_color) \n #cv2.imshow('marked image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n facecnt = len(faces)\n print(\"Detected faces: %d\" % facecnt)\n i = 0\n height, width = img.shape[:2]\n\n for (x, y, w, h) in faces:\n r = max(w, h) / 2\n centerx = x + w / 2\n centery = y + h / 2\n nx = int(centerx - r)\n ny = int(centery - r)\n nr = int(r * 2)\n\n faceimg = img[ny:ny+nr, nx:nx+nr]\n lastimg = cv2.resize(faceimg, (32, 32))\n i += 1\n cv2.imwrite(\"image%d.jpg\" % i, lastimg)\n return name\n\n#fc=FaceCropper().generate(\"IMG_20200226_000431.png\",True)\n\n" }, { "alpha_fraction": 0.7418830990791321, "alphanum_fraction": 0.7418830990791321, "avg_line_length": 40.06666564941406, "blob_id": "99e6b03d3fcb35df96e94cf34e5214ca4a616ce4", "content_id": "d68d980948a0340ed267d65eefa74005524e6f7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 616, "license_type": "permissive", "max_line_length": 112, "num_lines": 15, "path": "/README.md", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "# PClub-Project\nProgramming Club IITK project on depression therapy using chatbot\n\nContents\n--------\n* *bot.py* : The python file for the bot.\n* *Team-Documentation.pdf* : The .pdf file for the documentation of the team's overall progress and work so far.\n* *Team-Documentation.tex* : The .tex file used to create *Team-Documentation.pdf*\n* *Presentation.pptx* : The mid-term presentation for the project\n* SentimentAnalysis : The datasets and python files for sentiment analysis\n* docs : The privacy policy for the Messenger bot.\n\nMessenger Bot\n-------------\nFacebook page: *https://www.facebook.com/Chatbot.Brad/*\n" }, { "alpha_fraction": 0.7668593525886536, "alphanum_fraction": 0.7745664715766907, "avg_line_length": 50.900001525878906, "blob_id": "97b7ae992f0576f74d2984ff06b5d5e931719428", "content_id": "a5902bb1a9f469eba249b526a773d5f5626c4e0c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 519, "license_type": "permissive", "max_line_length": 135, "num_lines": 10, "path": "/SentimentAnalysis/README.md", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "# Sentiment Analysis folder\n\nContents\n--------\n* *sentiment_vocab.py* : The python file to train the sentiment analysis model by building a vocabulary\n* *sentiment_wv.py* : Deprecated; The python file to train the sentiment analysis model by first building a Word2Vec model using Gensim\n* *model_nn.tar.gz* : The tarball containing the sentiment analysis model\n* *vocab_sentiment* : The vocabulary model for the sentiment analysis model\n\nDataset for learning obtained from *http://help.sentiment140.com/for-students/*.\n" }, { "alpha_fraction": 0.6080933809280396, "alphanum_fraction": 0.6211673021316528, "avg_line_length": 35.305084228515625, "blob_id": "3c197084b79965506f501afbcf95ef96692a67c5", "content_id": "edd408d59527a2aca6099c515d5126dce4e0932b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6425, "license_type": "permissive", "max_line_length": 327, "num_lines": 177, "path": "/backup(main).py", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "import kivy\nkivy.require('1.11.1')\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nfrom collections import Counter\nimport bot\nimport time\nimport tensorflow as tf\nimport facialrecognition as fr\nimport cv2\n\nclass Home(Screen):\n pass\n\n\nclass Questions(Screen):\n \n \n ques_path='Personality Test(base)\\Questions.txt'\n personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}\n personality=''\n questions=[]\n question_1 = ObjectProperty(None)\n question_2 = ObjectProperty(None)\n counter=1\n answers=[0]*20\n with open(ques_path) as quest_file:\n questions=[r.split('SPLIT') for r in quest_file.readlines()]\n \n def personality_exam(self,answers):\n e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']\n e.extend([answers[r] for r in range(0,20,4)]) \n s.extend([answers[r] for r in range(1,20,4)]) \n t.extend([answers[r] for r in range(2,20,4)]) \n j.extend([answers[r] for r in range(3,20,4)]) \n personality='' \n for option in e,s,t,j: \n temp=Counter(option) \n personality+=option[0] if temp['a']>temp['b'] else option[1] \n Report.personality=personality\n \n def on_enter(self, *args):\n self.question_1.text=self.questions[0][0]\n self.question_2.text=self.questions[0][1]\n \n def ask_question1(self):\n if(self.counter==20):\n self.answers[self.counter-1]='a'\n self.personality_exam(self.answers)\n self.counter=1\n sm.current = 'rep'\n \n else:\n self.question_1.text=self.questions[self.counter][0]\n self.question_2.text=self.questions[self.counter][1] \n self.answers[self.counter-1]='a'\n self.counter+=1\n\n def ask_question2(self):\n if(self.counter==20):\n self.answers[self.counter-1]='b'\n self.personality_exam(self.answers)\n self.counter=1\n sm.current = 'rep'\n \n else:\n self.question_1.text=self.questions[self.counter][0]\n self.question_2.text=self.questions[self.counter][1] \n self.answers[self.counter-1]='b'\n self.counter+=1\n\n \n\nclass Report(Screen):\n personality=''\n def on_enter(self, *args):\n self.per.text=Questions.personalities[self.personality]+'\\n'+'('+self.personality+')'\n self.image.source= Report.personality+'\\INTRODUCTION\\Image.png'\nclass Description(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\INTRODUCTION\\Introduction.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass CareerOptions(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\career.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass Strengths(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\STRENGTHS\\Strengths.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass CameraClick(Screen):\n\n emo = ['Angry', 'Fear', 'Happy',\n 'Sad', 'Surprise', 'Neutral']\n model = tf.keras.models.load_model(\"facial_1 (1)\")\n buddy=''\n mood=''\n\n\n def prepare(self, filepath):\n IMG_SIZE = 48 \n img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) \n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) \n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1) \n \n def capture(self):\n \n camera = self.ids['camera']\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n name=\"IMG_{}.png\".format(timestr)\n camera.export_to_png(name)\n print(\"Captured\")\n fc=fr.FaceCropper().generate(name,True)\n try:\n prediction = self.model.predict([self.prepare(fc)])\n prediction=list(map(float,prediction[0]))\n except:\n prediction=\"prepare function could not run(0 faces detected)\"\n self.mood='Neutral'\n print(prediction)\n try:\n self.mood=self.emo[prediction.index(max(prediction))] # self.emo[list(prediction[0]).index(1)]\n except:\n print(\"Exception handled..!! Picture could not be cleared properly. Please check lighting\")\n self.mood='Neutral'\n bot.setname(self.textforcamera.text) \n print(bot.getname())\n ChatWindow.mood=self.mood\n\n\nclass ChatWindow(Screen):\n mood=''\n bot.pre_processing()\n #bot.chatcode()\n def on_enter(self, *args):\n self.chat_history.text=\"Hey \"+bot.getname()+\", what brings you here today!!\\n Current Mood: \"+self.mood+\" !! \"\n def send_message(self):\n message=self.text.text\n self.text.text=''\n #self.history.update_chat_history(f'[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}')\n self.chat_history.text += '\\n' +\"User: \"+message\n\n # Set layout height to whatever height of chat history text is + 15 pixels\n # (adds a bit of space at teh bottom)\n # Set chat history label to whatever height of chat history text is\n # Set width of chat history text to 98 of the label width (adds small margins)\n #self.layout.height = self.chat_history.texture_size[1] + 15\n self.chat_history.text_size = (self.chat_history.width * 0.98, None)\n\nclass WindowManager(ScreenManager):\n pass\n\nkv=Builder.load_file('design.kv')\n\nsm = WindowManager()\nscreens=[Home(name=\"home\"), Questions(name=\"quest\"), Report(name=\"rep\"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]\nfor screen in screens:\n sm.add_widget(screen)\nsm.current = \"home\"\n\nclass CafeApp(App):\n def build(self):\n return sm\n\nif __name__=='__main__':\n CafeApp().run()" }, { "alpha_fraction": 0.508571445941925, "alphanum_fraction": 0.5257142782211304, "avg_line_length": 20.625, "blob_id": "dd74f3ec8b083d2f9bfe3a4984bd89ffdcde08ee", "content_id": "90bb6c8fe9bc3053167ee82bd89a8b84e15f655b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "permissive", "max_line_length": 48, "num_lines": 8, "path": "/test.py", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "lst=sorted(list(map(int,input().split())))[::-1]\ntemp=lst[0]\nfor i in range(1,len(lst)):\n if temp==lst[i]:\n continue\n else:\n print(lst[i])\n break\n\n\n" }, { "alpha_fraction": 0.5556330680847168, "alphanum_fraction": 0.5636969804763794, "avg_line_length": 44.57597351074219, "blob_id": "eca32935335c9404fa62b9b83c336c4fa3bd9521", "content_id": "edec192c17b25785708c64578a29d1834ab0b86d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12897, "license_type": "permissive", "max_line_length": 327, "num_lines": 283, "path": "/CafeApp.py", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "import kivy\nkivy.require('1.11.1')\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nfrom collections import Counter\nfrom gensim.parsing.preprocessing import strip_non_alphanum, preprocess_string\nimport bot\nimport time\nimport tensorflow as tf\nimport facialrecognition as fr\nimport cv2\n\nclass Home(Screen):\n pass\n\n\nclass Questions(Screen):\n \n \n ques_path='Personality Test(base)\\Questions.txt'\n personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}\n personality=''\n questions=[]\n question_1 = ObjectProperty(None)\n question_2 = ObjectProperty(None)\n counter=1\n answers=[0]*20\n with open(ques_path) as quest_file:\n questions=[r.split('SPLIT') for r in quest_file.readlines()]\n \n def personality_exam(self,answers):\n e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']\n e.extend([answers[r] for r in range(0,20,4)]) \n s.extend([answers[r] for r in range(1,20,4)]) \n t.extend([answers[r] for r in range(2,20,4)]) \n j.extend([answers[r] for r in range(3,20,4)]) \n personality='' \n for option in e,s,t,j: \n temp=Counter(option) \n personality+=option[0] if temp['a']>temp['b'] else option[1] \n Report.personality=personality\n \n def on_enter(self, *args):\n self.question_1.text=self.questions[0][0]\n self.question_2.text=self.questions[0][1]\n \n def ask_question1(self):\n if(self.counter==20):\n self.answers[self.counter-1]='a'\n self.personality_exam(self.answers)\n self.counter=1\n sm.current = 'rep'\n \n else:\n self.question_1.text=self.questions[self.counter][0]\n self.question_2.text=self.questions[self.counter][1] \n self.answers[self.counter-1]='a'\n self.counter+=1\n\n def ask_question2(self):\n if(self.counter==20):\n self.answers[self.counter-1]='b'\n self.personality_exam(self.answers)\n self.counter=1\n sm.current = 'rep'\n \n else:\n self.question_1.text=self.questions[self.counter][0]\n self.question_2.text=self.questions[self.counter][1] \n self.answers[self.counter-1]='b'\n self.counter+=1\n\n \n\nclass Report(Screen):\n personality=''\n def on_enter(self, *args):\n self.per.text=Questions.personalities[self.personality]+'\\n'+'('+self.personality+')'\n self.image.source= Report.personality+'\\INTRODUCTION\\Image.png'\nclass Description(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\INTRODUCTION\\Introduction.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass CareerOptions(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\career.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass Strengths(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\STRENGTHS\\Strengths.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass CameraClick(Screen):\n\n emo = ['Angry', 'Fear', 'Happy',\n 'Sad', 'Surprise', 'Neutral']\n model = tf.keras.models.load_model(\"facial_1 (1)\")\n buddy=''\n mood=''\n\n\n def prepare(self, filepath):\n IMG_SIZE = 48 \n img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) \n new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) \n return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1) \n \n def capture(self):\n \n camera = self.ids['camera']\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n name=\"IMG_{}.png\".format(timestr)\n camera.export_to_png(name)\n print(\"Captured\")\n fc=fr.FaceCropper().generate(name,True)\n try:\n prediction = self.model.predict([self.prepare(fc)])\n prediction=list(map(float,prediction[0]))\n except:\n prediction=\"prepare function could not run(0 faces detected)\"\n self.mood='Neutral'\n print(prediction)\n try:\n self.mood=self.emo[prediction.index(max(prediction))] # self.emo[list(prediction[0]).index(1)]\n except:\n print(\"Exception handled..!! Picture could not be captured properly. Please check lighting\")\n self.mood='Neutral'\n bot.setname(self.textforcamera.text) \n print(bot.getname())\n ChatWindow.mood=self.mood\n self.textforcamera.text = ''\n\n\nclass ChatWindow(Screen):\n one=True\n prev=\"\"\n mood=''\n bot.pre_processing()\n counter=1\n #bot.chatcode()\n def on_enter(self, *args):\n print(self.mood)\n greeting_msg=\"Hey \"+bot.getname()+\", my name is Cafe Buddy consider me a friend of yours!!\\n\"\n #self.chat_history.text=\"Hey \"+bot.getname()+\", what brings you here today!!\\n Current Mood: \"+self.mood+\" !! \"\n #emo = ['Angry', 'Fear', 'Happy','Sad', 'Surprise', 'Neutral']\n \n if self.mood=='Happy':\n buddy_msg=\"you seem quite happy. Is there still anything that disturbs you?\\n\"\n self.chat_history.text=greeting_msg+buddy_msg\n if self.mood=='Angry':\n buddy_msg=\"you seem quite disturbed. Is there anything that disturbs you?\\n\"\n self.chat_history.text=greeting_msg+buddy_msg\n if self.mood=='Fear' or self.mood=='Surprise' or self.mood=='Neutral':\n buddy_msg=\"Is everything okay? You are looking stressed?\\n\"\n self.chat_history.text=greeting_msg+buddy_msg\n if self.mood=='Sad':\n buddy_msg=\"hey, what is it that worries you so much? Why are you looking so sad?\\n\"\n self.chat_history.text=greeting_msg+buddy_msg\n \n\n \n\n\n def send_message(self):\n message=self.text.text\n self.text.text=''\n #self.history.update_chat_history(f'[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}')\n self.chat_history.text = '\\n' +\"User: \"+message\n if self.mood=='Happy':\n if self.counter==1:\n if (bot.predict(message) >= 0.55):\n buddy_msg='That is good. In case you ever feel otherways. Please feel free to have a session with me\\n'\n else:\n self.mood='Neutral' \n buddy_msg = 'Please express yourself freely, i am hearing.\\n' \n self.chat_history.text += '\\n'+\"Cafe Buddy: \"+buddy_msg\n else:\n print(self.counter)\n if self.counter==1:\n keyword=[word for word in preprocess_string(message.lower()) if word in ('friend','work','education','school','college','family','studi','exam','fight')]\n print(keyword)\n if len(keyword)>0:\n buddy_msg = 'Will you please tell me in a bit more detail about it?'\n self.one=True\n else:\n buddy_msg='I understand. Seems like something\\'s bothering you. '\\\n 'Could you further describe it, in short?'\n self.one=False\n self.counter+=1\n self.chat_history.text += '\\n'+\"Cafe Buddy: \"+buddy_msg\n elif self.counter==2:\n if self.one==True:\n keyword=[]\n print(bot.predict(message))\n keyword.extend([preprocess_string(message.lower())][0])\n print(keyword)\n if 'friend' in keyword and bot.predict(message)[0][0] <= 0.6:\n buddy_msg = \"Many people tend to expect too much of others, their family, \"\\\n \"their friends or even just acquaintances. It's a usual mistake\"\\\n \", people don't think exactly the way you do.\\nDon't let the \"\\\n \"opinions of others make you forget what you deserve. You are \"\\\n \"not in this world to live up to the expectations of others, \"\\\n \"nor should you feel that others are here to live up to yours.\"\\\n \"\\nThe first step you should take if you want to learn how to \"\\\n \"stop expecting too much from people is to simply realize and \"\\\n \"accept the fact that nobody is perfect and that everyone \"\\\n \"makes mistakes every now and then.\"\n elif 'work' in keyword or 'studi' in keyword or 'exam' in keyword:\n if bot.predict(message)[0][0] <= 0.6:\n buddy_msg = bot.getname() + \", don't take too much stress. I can list some really cool \"\\\n \"ways to handle it.\\nYou should develop healthy responses which \"\\\n \"include doing regular exercise and taking good quality sleep. \"\\\n \"You should have clear boundaries between your work or academic \"\\\n \"life and home life so you make sure that you don't mix them.\\n\"\\\n \"Tecniques such as meditation and deep breathing exercises can be \"\\\n \"really helping in relieving stress.\\n Always take time to \"\\\n \"recharge so as to avoid the negative effects of chronic stress \"\\\n \"and burnout. We need time to replenish and return to our pre-\"\\\n \"stress level of functioning.\"\n elif 'famili' in keyword and bot.predict(message)[0][0]<=0.6:\n buddy_msg=bot.getname() + \", don't take too much stress. All you need to do is adjust \"\\\n \"your priorities. Don't take on unnecessary duties and \"\\\n \"responsibilities.\\nTake advice from people whose opinion you \"\\\n \"trust, and get specific advice when issues arise.\\nYou should \"\\\n \"use stress management techniques and always hope for the best. \"\\\n \"These situations arise in everyone's life and what matters the \"\\\n \"most is taking the right decision at such moments.\"\n else:\n if self.prev == \"\":\n buddy_msg=\"It's ohk can you tell me something about your day... Did anything happen today that made you feel worried?\\n\"\n self.prev=\"same\"\n self.one=False\n \n \n \n else:\n buddy_msg='It looks like you might be feeling comfortable talking '\\\n 'about yourself. Could you share your feelings?\\n'\n self.one=False\n self.counter+=1\n\n self.chat_history.text += '\\n'+\"Cafe Buddy: \"+buddy_msg\n\n elif self.counter==3:\n if not self.one:\n print(\"Welcome to level 3\")\n keyword=[word for word in preprocess_string(message.lower()) if word in ('friend','work','education','school','college','family','studi','exam','fight')]\n if len(keyword)>0:\n buddy_msg = 'Will you please tell me in a bit more detail about it?'\n self.one=True\n self.counter=2\n else:\n buddy_msg= 'I see. Among the thoughts occuring in your mind, which one upsets you the most and why?\\n'\n self.chat_history.text += '\\n'+\"Cafe Buddy: \"+buddy_msg\n self.chat_history.text_size = (self.chat_history.width * 0.98, None)\n\nclass WindowManager(ScreenManager):\n pass\n\nkv=Builder.load_file('design.kv')\n\nsm = WindowManager()\nscreens=[Home(name=\"home\"), Questions(name=\"quest\"), Report(name=\"rep\"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]\nfor screen in screens:\n sm.add_widget(screen)\nsm.current = \"home\"\n\nclass CafeApp(App):\n def build(self):\n return sm\n\nif __name__=='__main__':\n CafeApp().run()" }, { "alpha_fraction": 0.6161518096923828, "alphanum_fraction": 0.6297737956047058, "avg_line_length": 32.98347091674805, "blob_id": "3e690ded22a26e13139aa3f582873c9f2c7fe3bc", "content_id": "821c74866385db9dad8a909107fd85b6922ae93b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4111, "license_type": "permissive", "max_line_length": 327, "num_lines": 121, "path": "/CafeBuddy-Ayushi/temp.py", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "import kivy\nkivy.require('1.11.1')\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nfrom collections import Counter\nimport time\n\n\nclass Home(Screen):\n pass\n\n\nclass Questions(Screen):\n \n \n ques_path='Personality Test(base)\\Questions.txt'\n personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}\n personality=''\n questions=[]\n question_1 = ObjectProperty(None)\n question_2 = ObjectProperty(None)\n counter=1\n answers=[0]*20\n with open(ques_path) as quest_file:\n questions=[r.split('SPLIT') for r in quest_file.readlines()]\n \n def personality_exam(self,answers):\n e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']\n e.extend([answers[r] for r in range(0,20,4)]) \n s.extend([answers[r] for r in range(1,20,4)]) \n t.extend([answers[r] for r in range(2,20,4)]) \n j.extend([answers[r] for r in range(3,20,4)]) \n personality='' \n for option in e,s,t,j: \n temp=Counter(option) \n personality+=option[0] if temp['a']>temp['b'] else option[1] \n Report.personality=personality\n \n def on_enter(self, *args):\n self.question_1.text=self.questions[0][0]\n self.question_2.text=self.questions[0][1]\n \n def ask_question1(self):\n if(self.counter==20):\n self.answers[self.counter-1]='a'\n self.personality_exam(self.answers)\n self.counter=1\n sm.current = 'rep'\n \n else:\n self.question_1.text=self.questions[self.counter][0]\n self.question_2.text=self.questions[self.counter][1] \n self.answers[self.counter-1]='a'\n self.counter+=1\n\n def ask_question2(self):\n if(self.counter==20):\n self.answers[self.counter-1]='b'\n self.personality_exam(self.answers)\n self.counter=1\n sm.current = 'rep'\n \n else:\n self.question_1.text=self.questions[self.counter][0]\n self.question_2.text=self.questions[self.counter][1] \n self.answers[self.counter-1]='b'\n self.counter+=1\n\n \n\nclass Report(Screen):\n personality=''\n def on_enter(self, *args):\n self.per.text=Questions.personalities[self.personality]+'\\n'+'('+self.personality+')'\n self.image.source= Report.personality+'\\INTRODUCTION\\Image.png'\nclass Description(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\INTRODUCTION\\Introduction.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass CareerOptions(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\career.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass Strengths(Screen):\n def on_enter(self, *args):\n self.persona.text=Questions.personalities[Report.personality]\n file_path=Report.personality+'\\STRENGTHS\\Strengths.txt'\n with open(file_path) as file:\n self.detail.text=file.read()\n\nclass CameraClick(Screen):\n pass\n\nclass ChatWindow(Screen):\n pass\n\nclass WindowManager(ScreenManager):\n pass\n\nkv=Builder.load_file('design_edit.kv')\n\nsm = WindowManager()\nscreens=[Home(name=\"home\"), Questions(name=\"quest\"), Report(name=\"rep\"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]\nfor screen in screens:\n sm.add_widget(screen)\nsm.current = \"home\"\n\nclass CafeApp(App):\n def build(self):\n return sm\n\nif __name__=='__main__':\n CafeApp().run()" }, { "alpha_fraction": 0.6567164063453674, "alphanum_fraction": 0.6635006666183472, "avg_line_length": 20.705883026123047, "blob_id": "ffd2d41d70fffb398d537672c52f34f583ddbadd", "content_id": "6a6130d4ccbfaa1d80a2d5680640b85c72826d00", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 737, "license_type": "permissive", "max_line_length": 57, "num_lines": 34, "path": "/temp.py", "repo_name": "riti121/cafe", "src_encoding": "UTF-8", "text": "import kivy\nkivy.require('1.11.1')\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nimport time\n\nclass Home(Screen):\n def animation_begins(self):\n textvalue=self.labelvalue.text.split()\n var=\" \"\n for i in textvalue:\n var+=i\n self.labelvalue.text=var\n time.sleep(3)\n\nclass WindowManager(ScreenManager):\n pass\n\nkv=Builder.load_file('designing.kv')\n\nsm = WindowManager()\nscreens=[Home(name=\"home\")]\nfor screen in screens:\n sm.add_widget(screen)\nsm.current = \"home\"\n\nclass CafeApp(App):\n def build(self):\n return sm\n\nif __name__=='__main__':\n CafeApp().run()" } ]
8
xjwang-cs/flownet2
https://github.com/xjwang-cs/flownet2
616618ccb5eeef5315971505af01d0f681d2c520
0aae99beb235b9776a8cb5f792a9e2682a4d9c42
8b804607082b6b999c72a5b6af1c7839a7b74f49
refs/heads/master
2021-05-24T14:58:44.204022
2020-05-08T04:52:46
2020-05-08T04:52:46
253,616,209
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.740029513835907, "alphanum_fraction": 0.7636632323265076, "avg_line_length": 66.69999694824219, "blob_id": "2142b5047085fc048a21f74d6d48d9ffc491083b", "content_id": "c9cf4f6552c9ecfba6ee841bcdf728297c1122a4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 677, "license_type": "permissive", "max_line_length": 113, "num_lines": 10, "path": "/scripts/inference.sh", "repo_name": "xjwang-cs/flownet2", "src_encoding": "UTF-8", "text": "# Example on MPISintel Final and Clean, with L1Loss on FlowNet2 model\npython main.py --inference --model FlowNet2 --save_flow --inference_visualize \\\n--inference_dataset Nba2k_players_images --inference_dataset_root '/projects/grail/xiaojwan/nba2k_players_flow' \\\n--inference_dataset_dstype 'val' \\\n--inference_dataset_img1_dirname '/projects/grail/xiaojwan/2k_frames_masked_players' \\\n--inference_dataset_img2_dirname '/projects/grail/xiaojwan/2k_players_mesh_blender_est_camera' \\\n--number_gpus 1 --gpu_ids '1' \\\n--resume './pretrained_models/FlowNet2_checkpoint.pth.tar' \\\n--inference_n_batches 10 --inference_batch_size 1 \\\n--save './val_results_pretrained_flownet2_model'\n" }, { "alpha_fraction": 0.6950067281723022, "alphanum_fraction": 0.7435897588729858, "avg_line_length": 66.18181610107422, "blob_id": "1eec1a3c7e0b709026414de1e6be0c8e55a573d0", "content_id": "b1c0ee1e322b3602530a8a9aaee21626739a9284", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 741, "license_type": "permissive", "max_line_length": 117, "num_lines": 11, "path": "/scripts/train_simple.sh", "repo_name": "xjwang-cs/flownet2", "src_encoding": "UTF-8", "text": "# Example on MPISintel Final and Clean, with L1Loss on FlowNet2 model\npython main.py --model FlowNet2 --loss=L1Loss --optimizer=Adam --optimizer_lr=1e-5 \\\n--training_dataset Nba2k --training_dataset_dstype 'train' --training_dataset_img1_dirname '2k_mesh_rasterized' \\\n--validation_dataset Nba2k --validation_dataset_dstype 'val' --validation_dataset_img1_dirname '2k_mesh_rasterized' \\\n--inference_dataset Nba2k --inference_dataset_dstype 'val' --inference_dataset_img1_dirname '2k_mesh_rasterized' \\\n--crop_size 768 1280 --batch_size 1 \\\n--number_gpus 4 --gpu_ids 0 2 3 4 \\\n--validation_frequency 5 \\\n--render_validation --save_flow --inference_visualize \\\n--inference_n_batches 40 --inference_batch_size 1 \\\n--save './work_simple_1280'\n\n\n" }, { "alpha_fraction": 0.5512843728065491, "alphanum_fraction": 0.5740973353385925, "avg_line_length": 34.45222854614258, "blob_id": "4bd443e05373c30e5b5d1c097e06b49d35f7aeb1", "content_id": "2fb76209045ac0dfda15ae2b5c9739cd61c524b7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5567, "license_type": "permissive", "max_line_length": 120, "num_lines": 157, "path": "/mydatasets.py", "repo_name": "xjwang-cs/flownet2", "src_encoding": "UTF-8", "text": "import torch\nimport torch.utils.data as data\n\nimport os, math, random\nfrom os.path import *\nimport numpy as np\n\nfrom glob import glob\nimport utils.frame_utils as frame_utils\n\nclass StaticRandomCrop(object):\n def __init__(self, image_size, crop_size):\n self.th, self.tw = crop_size\n h, w = image_size\n self.h1 = random.randint(0, h - self.th)\n self.w1 = random.randint(0, w - self.tw)\n\n def __call__(self, img):\n return img[self.h1:(self.h1+self.th), self.w1:(self.w1+self.tw),:]\n\nclass StaticCenterCrop(object):\n def __init__(self, image_size, crop_size):\n self.th, self.tw = crop_size\n self.h, self.w = image_size\n def __call__(self, img):\n return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2,:]\n\nclass Nba2k(data.Dataset):\n def __init__(self, args, is_cropped = False, root = '', img1_dirname='2k_mesh_rasterized', \n img2_dirname='2k_mesh_rasterized_noised_camera_sigma_5', \n dstype = 'train', replicates = 1):\n self.args = args\n self.is_cropped = is_cropped\n self.crop_size = args.crop_size\n self.render_size = args.inference_size\n self.replicates = replicates\n\n # read 'dstype' list of names\n with open(os.path.join(root, dstype+'.txt')) as f:\n frame_names = f.read().splitlines()\n\n self.flow_list = []\n self.image_list = []\n for frame_name in frame_names:\n flow = os.path.join(img2_dirname, frame_name+'.flo.npy') \n img1 = os.path.join(img1_dirname, frame_name+'.png')\n img2 = os.path.join(img2_dirname, frame_name+'.png')\n\n if not isfile(img1) or not isfile(img2) or not isfile(flow):\n continue\n\n self.image_list += [[img1, img2]]\n self.flow_list += [flow]\n \n self.size = len(self.image_list)\n self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape\n\n if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):\n self.render_size[0] = ( (self.frame_size[0])//64 ) * 64\n self.render_size[1] = ( (self.frame_size[1])//64 ) * 64\n\n args.inference_size = self.render_size\n\n assert (len(self.image_list) == len(self.flow_list))\n print('There are {} frames in the dataset'.format(self.size))\n\n def __getitem__(self, index):\n\n index = index % self.size\n\n img1 = frame_utils.read_gen(self.image_list[index][0])\n img2 = frame_utils.read_gen(self.image_list[index][1])\n\n flow = frame_utils.read_gen(self.flow_list[index])\n\n images = [img1, img2]\n image_size = img1.shape[:2]\n\n if self.is_cropped:\n cropper = StaticRandomCrop(image_size, self.crop_size)\n else:\n cropper = StaticCenterCrop(image_size, self.render_size)\n images = list(map(cropper, images))\n flow = cropper(flow)\n\n images = np.array(images).transpose(3,0,1,2)\n flow = flow.transpose(2,0,1)\n\n images = torch.from_numpy(images.astype(np.float32))\n flow = torch.from_numpy(flow.astype(np.float32))\n\n return [images], [flow]\n\n def __len__(self):\n return self.size * self.replicates\n\nclass Nba2k_players_images(data.Dataset):\n def __init__(self, args, is_cropped = False, root = '',\n img1_dirname='2k_frames_masked_players', \n img2_dirname='2k_players_mesh_blender_est_camera', \n dstype = 'train', replicates = 1):\n self.args = args\n self.is_cropped = is_cropped\n self.crop_size = args.crop_size\n self.render_size = args.inference_size\n self.replicates = replicates\n\n # read 'dstype' list of names\n with open(os.path.join(root, dstype+'.txt')) as f:\n frame_names = f.read().splitlines()\n\n self.image_list = []\n for frame_name in frame_names:\n img1 = os.path.join(img1_dirname, frame_name+'.png')\n img2 = os.path.join(img2_dirname, frame_name+'.png')\n \n if not isfile(img1) or not isfile(img2):\n continue\n\n self.image_list += [[img1, img2]]\n \n self.size = len(self.image_list)\n self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape\n\n if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):\n self.render_size[0] = ( (self.frame_size[0])//64 ) * 64\n self.render_size[1] = ( (self.frame_size[1])//64 ) * 64\n\n args.inference_size = self.render_size\n\n print('There are {} frames in the dataset'.format(self.size))\n\n def __getitem__(self, index):\n\n index = index % self.size\n\n img1 = frame_utils.read_gen(self.image_list[index][0])\n img2 = frame_utils.read_gen(self.image_list[index][1])\n\n\n images = [img1, img2]\n image_size = img1.shape[:2]\n\n if self.is_cropped:\n cropper = StaticRandomCrop(image_size, self.crop_size)\n else:\n cropper = StaticCenterCrop(image_size, self.render_size)\n images = list(map(cropper, images))\n\n images = np.array(images).transpose(3,0,1,2)\n images = torch.from_numpy(images.astype(np.float32))\n \n\n return [images], [torch.zeros(images.size()[0:1] + (2,) + images.size()[-2:])]\n\n def __len__(self):\n return self.size * self.replicates\n\n" }, { "alpha_fraction": 0.6141263842582703, "alphanum_fraction": 0.6412639617919922, "avg_line_length": 33.47435760498047, "blob_id": "ed4c5cd880fb7c2216cff3d77c6a235c3868acd2", "content_id": "a8604dab12d0363bc5aedff7b7335eaa9045b143", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2690, "license_type": "permissive", "max_line_length": 102, "num_lines": 78, "path": "/warp_image.py", "repo_name": "xjwang-cs/flownet2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os, glob\nfrom skimage.io import imread, imsave\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom utils import flow_utils\n\nclass StaticCenterCrop(object):\n def __init__(self, image_size, crop_size):\n self.th, self.tw = crop_size\n self.h, self.w = image_size\n def __call__(self, img):\n return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2,:]\n\n \ndevice = torch.device(\"cuda:3\")\ntorch.cuda.set_device(device)\n\nframes_folder = '/homes/grail/xiaojwan/flownet2_validation_examples'\nframes = sorted(glob.glob(os.path.join(frames_folder, '*.pred.flo')))\nprint(len(frames))\nfor frame in frames:\n flow_file = frame\n frame_name = frame[:-9]\n image_with_noised_camera_file=frame_name+'_noised_sigma_5.png'\n image_file=frame_name+'_frame.png'\n output_prefix=frame_name\n\n image_with_noised_camera = imread(image_with_noised_camera_file)\n image=imread(image_file)[...,:3]\n flow=flow_utils.readFlow(flow_file)\n flow_zero_mask = np.all((flow==0), axis=2, keepdims=True)\n flow_zero_mask = np.expand_dims(flow_zero_mask, axis=0)\n\n image_size = image.shape[:2]\n render_size = flow.shape[:2]\n print(image_size)\n print(render_size)\n cropper = StaticCenterCrop(image_size, render_size)\n image_with_noised_camera = cropper(image_with_noised_camera)\n image = cropper(image)\n\n \n \n image_with_noised_camera = np.expand_dims(image_with_noised_camera, axis=0)\n image_with_noised_camera = image_with_noised_camera.transpose(0, 3, 1, 2)\n image_with_noised_camera = torch.from_numpy(image_with_noised_camera).float().to(device)\n\n B, C, H, W = image_with_noised_camera.size() ## B=1\n \n # mesh grid \n xx = torch.arange(0, W).view(1,-1).repeat(H,1)\n yy = torch.arange(0, H).view(-1,1).repeat(1,W)\n xx = xx.view(1,1,H,W).repeat(B,1,1,1)\n yy = yy.view(1,1,H,W).repeat(B,1,1,1)\n grid = torch.cat((xx,yy),1).float().to(device)\n\n flow = np.expand_dims(flow, axis=0)\n flow = flow.transpose(0, 3, 1, 2)\n flow = torch.from_numpy(flow).float().to(device)\n vgrid = Variable(grid)+flow\n\n # scale grid to [-1,1] \n vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0\n vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0\n\n vgrid = vgrid.permute(0,2,3,1) \n output = nn.functional.grid_sample(image_with_noised_camera, vgrid)\n\n\n\n output = output.cpu().numpy()\n output = output.transpose(0,2,3,1)\n\n # for flow value = 0, use original pxiel value\n output = output * (1-flow_zero_mask) + image * flow_zero_mask\n imsave(output_prefix+'.pred.warp.png', output[0])\n\n" }, { "alpha_fraction": 0.6408668756484985, "alphanum_fraction": 0.7151702642440796, "avg_line_length": 26, "blob_id": "2baafcf8b3c35f4ccbe95972aa5a58873f87d70b", "content_id": "697e1f2ec5827268778477ae38057ed130456dac", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "permissive", "max_line_length": 99, "num_lines": 12, "path": "/visualize_flow.py", "repo_name": "xjwang-cs/flownet2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os.path\nfrom skimage.io import imread, imsave\nfrom utils import flow_utils\n\ndef main():\n\tflow_filename = '../flownet2_validation_examples/NBA2K19_2019.01.31_23.50.52_frame124678.pred.flo'\n\tsave_dir = '../'\n\tflow_utils.visulize_flow_file(flow_filename, save_dir)\n\nif __name__ == \"__main__\":\n\tmain()" } ]
5
chaositect/artificial_neural_network_examples
https://github.com/chaositect/artificial_neural_network_examples
29157bbc48a262910b9e3cfd6b854a2dd2b33657
052967ff8218e88627e9dd6f6c97dab4124f5db8
a8003982b98128318d3acbabb6d7f16d08ead73e
refs/heads/master
2023-07-15T20:04:30.113095
2021-08-20T17:24:37
2021-08-20T17:24:37
398,347,248
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6469215154647827, "alphanum_fraction": 0.6752261519432068, "avg_line_length": 32.94059371948242, "blob_id": "4dbfe027372846027ea79e90709695c12ebd86a5", "content_id": "c337a73a6ce4f3612f1ff505f06aca62ad46d728", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3427, "license_type": "permissive", "max_line_length": 106, "num_lines": 101, "path": "/ann_probability_output.py", "repo_name": "chaositect/artificial_neural_network_examples", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 26 15:20:48 2021\n\n@author: Grant Isaacs\n\"\"\"\n\n#IMPORT LIBRARIES------------------------------------------------------------->\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\n\n\n\n#PREPROCESS DATA-------------------------------------------------------------->\n#load data\ndataset = pd.read_csv(\"_\")\nX = dataset.iloc[:, 3:-1].values\ny = dataset.iloc[:, -1].values\n\n#check for missing values\nprint(sum(np.equal(X, None)))\n\n#encode categorical variables\nlencoder = LabelEncoder()\nX[: , 2] = lencoder.fit_transform(X[:, 2])\n\nctransform = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')\nX = np.array(ctransform.fit_transform(X))\n\n#split dataset into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=361981)\n\n#scale the features\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n\n\n#STRUCTURE THE ANN------------------------------------------------------------>\n#initialize the neural network\nneural_net = tf.keras.models.Sequential()\n\n#create the input layer and first hidden layer to form a shallow learning model.\n\"\"\"Layer quantity is determined by experimentation and expertise.\"\"\"\nneural_net.add(tf.keras.layers.Dense(units=6, activation='relu'))\n\n#create the second hidden layer to form a deep learning model.\nneural_net.add(tf.keras.layers.Dense(units=6, activation='relu'))\n\n#add the output layer\n\"\"\"Output units equals the output dimensions minus 1.\nThis model generates a probability between 0 and 1 (Sigmoid)\"\"\"\nneural_net.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))\n\n\n\n\n#TRAIN THE ANN---------------------------------------------------------------->\n#compile the neural network\n\"\"\"In this example the adam optimizer is used for stochastic gradient desecent.\n The output is binary so binary cross entropy is selected for the loss function.\"\"\"\nneural_net.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n#train the neural network\n\"\"\"Batch and Epoch arguments were chose based on previous training data.\nModify accordingly.\"\"\"\nneural_net.fit(X_train, y_train, batch_size=32, epochs=100)\n\n\n\n\n#GENERATE PREDICTIONS--------------------------------------------------------->\n#predict test set\ny_pred = neural_net.predict(X_test)\ny_pred = (y_pred > 0.5)\ny_test = y_test.reshape(len(y_test), 1)\ny_pred = y_pred.reshape(len(y_pred), 1)\nprint(np.concatenate((y_pred, y_test), 1))\n\n#build confusion matrix\ncmatrix = confusion_matrix(y_test, y_pred)\nprint(cmatrix)\nprint(accuracy_score(y_test, y_pred))\n\n#individual prediction\n\"\"\"Apply the transform method to scale the variables to the same distribution as the training data.\"\"\"\npred = neural_net.predict(scaler.transform([[1.0, 0.0, 0.0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]]))\nprint(\"Predicted Probabilty the Customer will leave: {}\".format(pred))\n\npred = neural_net.predict(scaler.transform([[1.0, 0.0, 0.0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])) > 0.5\nprint(\"Binary statement will the customer leave: {}\".format(pred))" }, { "alpha_fraction": 0.7981407642364502, "alphanum_fraction": 0.7981407642364502, "avg_line_length": 49.20000076293945, "blob_id": "db0407bd049a4e8b09223717c69b1bf3939f1392", "content_id": "ea6a86fc79a14c9b42e5f8136a7b117ecf391bcd", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 753, "license_type": "permissive", "max_line_length": 157, "num_lines": 15, "path": "/README.md", "repo_name": "chaositect/artificial_neural_network_examples", "src_encoding": "UTF-8", "text": "# artificial_neural_network_examples\nA collection of Artificial Neural Networks to use as templates or case studies.\n\nIn my time studying ANN's I have worked along with examples, built my own test cases and constructed ready to use templates for quickstarting an ANN project.\nThis is just a small collection of ANN structures that I have put together over the years and have chosen to share for educational purposes.\n\nWhen applicable, the name of the course from which the example was drawn is included in the file.\n\nEach file is a stand alone script that can be run independently of all others.\n\n(Some modifications may be required to work with a particular data set.)\n\nShould you have any questions or comments, feel free to email me: [email protected]\n\nEnjoy!\n" } ]
2
Farshad-Hasanpour/dataset-loading-class
https://github.com/Farshad-Hasanpour/dataset-loading-class
cc5549af3db121d390e0cf10191424ad4cdb1085
1088e5459da6478f289497590e1a5f0debe8924e
621c7c5f0a90fe6ca3e65ac73788343367c1a484
refs/heads/master
2023-01-08T14:11:11.971848
2020-11-08T17:44:09
2020-11-08T17:44:09
310,672,607
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6708464026451111, "alphanum_fraction": 0.6730407476425171, "avg_line_length": 33.290321350097656, "blob_id": "4b62b4ecb519fc70e5d685d0a7f6499377466c19", "content_id": "fb2d9db29330fd624caee921bdcc7e4c1d6f01ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3190, "license_type": "permissive", "max_line_length": 142, "num_lines": 93, "path": "/Dataset.py", "repo_name": "Farshad-Hasanpour/dataset-loading-class", "src_encoding": "UTF-8", "text": "import gzip\nimport json\nimport csv\n\n\nclass Dataset:\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\n\tdef read_json(self, useful_keys=(), required_keys=(), is_gzip=False, encoding='utf8'):\n\t\t\"\"\"\n\t\t:param useful_keys: (tuple): Keys to return for each dataset record. Pass empty to return all keys.\n\t\t:param required_keys: (tuple): Required keys for each record. If one of these keys does not exist, this function ignores the dataset record.\n\t\t:param is_gzip: (boolean): Whether the file is a compressed file or not.\n\t\t:param encoding: (string): The default is 'utf8'.\n\t\t:return: (list of dictionary): For each JSON record, return a dictionary inside a list.\n\t\t\"\"\"\n\t\tdataset = list()\n\t\tif is_gzip:\n\t\t\topen_function = gzip.GzipFile\n\t\telse:\n\t\t\topen_function = open\n\t\t# Load dataset file\n\t\twith open_function(self.filename, 'rb') as file:\n\t\t\t# For each record in dataset\n\t\t\tfor line in file:\n\t\t\t\tdata = json.loads(line, encoding=encoding)\n\t\t\t\t# By default get the dataset record\n\t\t\t\tappend_record = True\n\t\t\t\t# If required keys does not exist do not get the record otherwise\n\t\t\t\tfor key in required_keys:\n\t\t\t\t\tif not data.get(key):\n\t\t\t\t\t\tappend_record = False\n\t\t\t\t\t\tbreak\n\t\t\t\t# get useful reviews\n\t\t\t\tif append_record:\n\t\t\t\t\t# Determine useful keys\n\t\t\t\t\tuseful = ()\n\t\t\t\t\tif 0 == len(useful_keys):\n\t\t\t\t\t\tuseful = data.keys()\n\t\t\t\t\telse:\n\t\t\t\t\t\tuseful = useful_keys\n\t\t\t\t\ttemp = {}\n\t\t\t\t\tfor key in useful:\n\t\t\t\t\t\ttemp[key] = data.get(key)\n\t\t\t\t\tdataset.append(temp)\n\t\treturn dataset\n\n\tdef read_csv(self, useful_keys=(), required_keys=(), delimiter=',', is_gzip=False, encoding='utf8'):\n\t\t\"\"\"\n\t\t:param useful_keys: (tuple or string): Keys to return for each dataset record. Pass empty to return all keys.\n\t\t:param required_keys: (tuple): Required keys for each record. If one of these keys does not exist, this function ignores the dataset record.\n\t\t:param delimiter: (string): CSV delimiter\n\t\t:param is_gzip: (boolean): Whether the file is a compressed file or not.\n\t\t:param encoding: (string): The default is 'utf8'.\n\t\t:return: (list of list | list): For each CSV row, return a list inside another list and a list of headers.\n\t\t\"\"\"\n\t\tdataset = list()\n\t\tif is_gzip:\n\t\t\topen_function = gzip.open\n\t\telse:\n\t\t\topen_function = open\n\t\t# Load dataset file\n\t\twith open_function(self.filename, mode='rt', encoding=encoding) as file:\n\t\t\tcontent = csv.reader((line.replace('\\0', '') for line in file), delimiter=delimiter)\n\t\t\t# Get keys of dataset\n\n\t\t\theaders = next(content)\n\t\t\t# Transform keys to index\n\t\t\tuseful = []\n\t\t\trequired = []\n\t\t\tif 0 == len(useful_keys):\n\t\t\t\titeration = headers\n\t\t\telse:\n\t\t\t\titeration = useful_keys\n\t\t\tfor key in iteration:\n\t\t\t\tuseful.append(headers.index(key))\n\t\t\tfor key in required_keys:\n\t\t\t\trequired.append(headers.index(key))\n\t\t\t# For each record in dataset\n\t\t\tfor row in content:\n\t\t\t\tif not row:\n\t\t\t\t\tcontinue\n\t\t\t\t# By default get the record from dataset\n\t\t\t\tappend_record = True\n\t\t\t\t# If one of required keys does not exists ignore this dataset record otherwise get the record\n\t\t\t\tfor i in required:\n\t\t\t\t\tif row[i] == '':\n\t\t\t\t\t\tappend_record = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif append_record:\n\t\t\t\t\tdataset.append(list(row[index] for index in useful))\n\t\t\treturn dataset, headers\n\n" }, { "alpha_fraction": 0.7103825211524963, "alphanum_fraction": 0.7122039794921875, "avg_line_length": 34.04255294799805, "blob_id": "256ab3892c69b303fea3b6d50747bb765f035b40", "content_id": "5a56ff274dcd78444809d0dac0657ab82f3df83b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1647, "license_type": "permissive", "max_line_length": 145, "num_lines": 47, "path": "/README.md", "repo_name": "Farshad-Hasanpour/dataset-loading-class", "src_encoding": "UTF-8", "text": "# Dataset Loading Class\n\nThis python class helps you to read various dataset file types including, JSON, CSV, etc. Each function is for a specific type of dataset file. \n\n## Alternatives\n\n- [pandas](https://pandas.pydata.org/docs/index.html)\n\n## Requirements\n\nThis library uses modules below:\n\n- gzip\n- json\n- csv\n\n## Usage\n\nThe code below is an example to load a compressed JSON dataset.\n\n```python\nFILENAME = 'Movies_and_TV_5core.json.gz'\nUSEFUL_KEYS = {'overall', 'vote', 'reviewText', 'summary'}\nREQUIRED_KEYS = {'vote', 'reviewText'}\ndataset = Dataset(FILENAME).read_json(useful_keys=USEFUL_KEYS, required_keys=REQUIRED_KEYS, is_zip=True)\n```\n\n## Functions\n\n### read_json()\n\nFor each JSON record, return a dictionary inside a list.\n\n - **useful_keys** (type: tuple): Keys to return for each dataset record. Pass empty to return all keys.\n - **required_keys** (type: tuple): Required keys for each record. If one of these keys does not exist, this function ignores the dataset record.\n - **is_gzip** (type: boolean): Whether the file is a compressed file or not.\n - **encoding** (type: string): The default is 'utf8'.\n\n### read_csv()\n\nFor each CSV row, return a list inside another list and a list of headers.\n\n - **useful_keys** (type: tuple): Keys to return for each dataset record. Pass empty to return all keys.\n - **required_keys** (type: tuple): Required keys for each record. If one of these keys does not exist, this function ignores the dataset record.\n - **delimiter** (type: string): CSV delimiter\n - **is_gzip** (type: boolean): Whether the file is a compressed file or not.\n - **encoding** (type: string): The default is 'utf8'.\n" } ]
2
hamzaalkharouf/House-Price-prediction
https://github.com/hamzaalkharouf/House-Price-prediction
3426c12645edbaf27dde08360937a8d0f2257ec9
d8ec2109dbc7b8b0ccd19af191e73f5f655ce365
06cd248dd1243886d5461f9701a3bfc42f3ed7ac
refs/heads/master
2022-12-26T03:13:52.690377
2020-10-08T09:10:17
2020-10-08T09:10:17
297,780,351
0
1
null
2020-09-22T21:48:59
2020-09-30T12:29:19
2020-09-30T12:50:38
Python
[ { "alpha_fraction": 0.6391304135322571, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 24.55555534362793, "blob_id": "c49a43db1ec81f91ce16cb3fc8b3a005380ba985", "content_id": "f6bfd8f85991785f6c3648e60bcecea144390ec4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 53, "num_lines": 18, "path": "/scikit_learn.py", "repo_name": "hamzaalkharouf/House-Price-prediction", "src_encoding": "UTF-8", "text": "import pickle\nimport argparse\nimport numpy as np\n\n\n#take model\n#Calculate price from scikit\ndef path(list_data):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-path\",\"--path\",type = str)\n args = parser.parse_args()\n # './model.pickle'\n loaded_model = pickle.load(open(args.path, 'rb'))\n x = np.array(list_data).reshape(1,6)\n result = loaded_model.predict(x)\n if x.shape[0] == 1:\n result = result[0]\n return result\n" }, { "alpha_fraction": 0.6460732817649841, "alphanum_fraction": 0.6785340309143066, "avg_line_length": 41.44444274902344, "blob_id": "7674dfc9586784e097a57f8ac1d4bcb0ed3bf422", "content_id": "1a2576466fdb58f1a31b60fdfcfa80995dbd5545", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1910, "license_type": "no_license", "max_line_length": 180, "num_lines": 45, "path": "/app.py", "repo_name": "hamzaalkharouf/House-Price-prediction", "src_encoding": "UTF-8", "text": "from flask import Flask,request\nimport scikit_learn\nimport Write_Csv\napp = Flask(__name__)\n#append data(from url) to list\ndef Data_append(x1,x2,x3,x4,x5,x6):\n list_data=[]\n list_data.append(x1)\n list_data.append(x2)\n list_data.append(x3)\n list_data.append(x4)\n list_data.append(x5)\n list_data.append(x6)\n return list_data\n\n#route /\n#take data from url then send them to scikit_learn of Calculate price from scikit\n#return information\[email protected]('/')\ndef hello_world():\n transaction_date=float(request.args.get('transaction_date'))\n house_age=float(request.args.get('house_age'))\n distance_to_the__nearest_MRT_station=float(request.args.get('distance_to_the__nearest_MRT_station'))\n number_of_convenience_stores=float(request.args.get('number_of_convenience_stores'))\n latitude=float(request.args.get('latitude'))\n longitude=float(request.args.get('longitude'))\n list_data=[]\n list_data=Data_append(transaction_date,house_age,distance_to_the__nearest_MRT_station,number_of_convenience_stores,latitude,longitude)\n price=scikit_learn.path(list_data)\n list_data.append(price)\n Write_Csv.Write_Csv(list_data)\n return '''<h3>\n transaction date : {}<br>\n house age= {}<br>\n distance to the nearest MRT station= {}<br>\n number of convenience stores= {}<br>\n latitude= {}<br>\n longitude= {}<br>\n price ={}\n </h3>'''.format(transaction_date,house_age,distance_to_the__nearest_MRT_station,number_of_convenience_stores,latitude,longitude,price)\n\n#to run servier => py app.py -path ./model.pickle\nif __name__ == '__main__':\n app.run(port=5060,debug=False,use_reloader=False)\n# http://127.0.0.1:5060/?transaction_date=2017.917&house_age=10&distance_to_the__nearest_MRT_station=306.59470&number_of_convenience_stores=15&latitude=24.98034&longitude=121.53951\n" }, { "alpha_fraction": 0.5992292761802673, "alphanum_fraction": 0.6050096154212952, "avg_line_length": 29.52941131591797, "blob_id": "6da901289b84785e927c7338a6c9eda462b48a02", "content_id": "19af70cf934adece3d77c3573a1966ab7c64cec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 65, "num_lines": 17, "path": "/Write_Csv.py", "repo_name": "hamzaalkharouf/House-Price-prediction", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport os\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\nmy_file_estate = os.path.join(THIS_FOLDER,'csv\\\\Real estate.csv')\n\ndef Write_Csv(list_data):\n df = pd.read_csv(my_file_estate)\n file = open(my_file_estate,\"a\")\n number=df['No'].values[-1]\n number+=1\n file.write(str(number)+\",\")\n for i in list_data:\n if i != list_data[6]:\n file.write(str(i)+\",\")\n else :file.write(str(i)+\"\\n\")\n file.close()\n df.reset_index(drop = True,inplace=True)\n" }, { "alpha_fraction": 0.8209876418113708, "alphanum_fraction": 0.8209876418113708, "avg_line_length": 53, "blob_id": "7156b93241e64d8bc9c430e604de3b98d225e4f8", "content_id": "6fa47c290e84ee5164a8cac6d958465d3a4b4bb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 162, "license_type": "no_license", "max_line_length": 97, "num_lines": 3, "path": "/README.md", "repo_name": "hamzaalkharouf/House-Price-prediction", "src_encoding": "UTF-8", "text": "# House-Price-prediction\nbuild a Flask web service that gets features about a new house and it returns the predicted price\nAdapt microservice to use scikit learn\n" } ]
4
rakshitshah-28/APSITSkills-Project
https://github.com/rakshitshah-28/APSITSkills-Project
9ecd65e6f3e3a16d575e913a3fbed44d4cde8362
41d9082a4ddd2ffb7589379917369978134a3942
bc398b41849184f1cafd6171037a5f1da0015e38
refs/heads/master
2022-11-22T06:35:39.604758
2020-07-15T18:20:19
2020-07-15T18:20:19
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6770370602607727, "alphanum_fraction": 0.6992592811584473, "avg_line_length": 38.70588302612305, "blob_id": "2de07d3494d0ed86d4cc8932f1114a7d3e570d6e", "content_id": "5ca3baebd6304cc823e6560bca2574fb0b7ae29b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 675, "license_type": "no_license", "max_line_length": 149, "num_lines": 17, "path": "/README.md", "repo_name": "rakshitshah-28/APSITSkills-Project", "src_encoding": "UTF-8", "text": "# APSITSkills-Project\n\nChecking with Program -> \n\nA.\tWrite a program to print all prime nos between 1 to 1000\n\nB. Write a Python program to find whether a given number (accept from the user) is even or odd, print out an appropriate message to the user.\n\nC. Write a Python program to find whether a given number (accept from the user) is Prime or composite, print out an appropriate message to the user.\n\nD. Write a Python program to test whether a passed letter is a vowel or not.\n\nE. Write a Python program to check whether a specified value is contained in a group of values.\n \n Test Data:\n \t\n 3 -> [1, 5, 8, 3] : True\t\t-1 -> [1, 5, 8, 3] : False\n" }, { "alpha_fraction": 0.46717655658721924, "alphanum_fraction": 0.4799235165119171, "avg_line_length": 25.593219757080078, "blob_id": "cfbc6cdc8b6182405a9d207babb8b7058b6b8a98", "content_id": "6e2618cde43cb80151ea8ace788ba12484c126ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1569, "license_type": "no_license", "max_line_length": 62, "num_lines": 59, "path": "/Part_5.py", "repo_name": "rakshitshah-28/APSITSkills-Project", "src_encoding": "UTF-8", "text": "# Write a Python program to check whether a \n# specified value is contained in a group of values.\n\n# 3 -> [1, 5, 8, 3] : True\t\t-1 -> [1, 5, 8, 3] : False\n\nimport random\n\ndef check_in_group():\n while True:\n test_case = [1, 5, 8, 3]\n print('\\nEnter \\'-1\\' to QUIT.')\n value = input('Enter - ')\n try:\n value = int(value)\n except:\n print('\\tINVALID CHOICE.\\n\\tTRY AGAIN.\\n')\n continue\n if value == -1:\n print('\\tTHANK YOU.\\n\\tRETURNING TO MAIN MENU.\\n')\n break\n if value in test_case:\n print('True')\n break\n else:\n print('False')\n continue\n\n\n# in case needed.\ndef check_random():\n while True:\n test_case = list()\n length = input('\\nEnter Length of the test_case - ')\n try:\n length = int(length)\n except:\n print('\\tINVALID CHOICE.\\n\\tTRY AGAIN.\\n')\n continue\n for _ in range(length):\n test_case.append(random.choice(range(10)))\n break\n# print(test_case)\n while True:\n print('\\nEnter \\'-1\\' to QUIT.')\n value = input('Enter - ')\n try:\n value = int(value)\n except:\n print('\\tINVALID CHOICE.\\n\\tTRY AGAIN.\\n')\n continue\n if value == -1:\n print('\\tTHANK YOU.\\n\\tRETURNING TO MAIN MENU.\\n')\n break\n if value in test_case:\n print('True')\n break\n else:\n print('False')\n continue\n" }, { "alpha_fraction": 0.5879354476928711, "alphanum_fraction": 0.6091758608818054, "avg_line_length": 25.75, "blob_id": "77421bead51feed79f4f925da5b585000558850d", "content_id": "7bd6251f2075ac052886266d6f1d0ed9a06e6607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 61, "num_lines": 44, "path": "/CheckingWithProgram.py", "repo_name": "rakshitshah-28/APSITSkills-Project", "src_encoding": "UTF-8", "text": "# Menu-Driven program\n\nimport string\nimport random\n\n# just for reference purposes.\nfrom Part_1 import all_prime\nfrom Part_2 import even_odd\nfrom Part_3 import prime_composite\nfrom Part_4 import vowel_consonant\nfrom Part_5 import check_in_group\n\nwhile True:\n print('\\nChoose your Option - ')\n print('0. Exit')\n print('1. Print Prime Numbers between 1 to 1000.')\n print('2. To Find whether Number is ODD or EVEN.')\n print('3. To Find whether Number is PRIME or COMPOSITE.')\n print('4. To Find whether Alphabet is VOWEL or NOT.')\n print('5. To Check specified Value n Group of Values')\n option = input('Enter - ')\n try:\n option = int(option)\n except:\n print('\\tINVALID CHOICE.\\n\\tTRY AGAIN.\\n')\n continue\n \n if (option < 0 or option > 5):\n print('\\tINVALID CHOICE.\\n\\tTRY AGAIN.\\n')\n continue\n \n if option == 0:\n print('\\n\\tTHANK YOU FOR JOINING US!')\n exit(-1)\n elif option == 1:\n all_prime()\n elif option == 2:\n even_odd()\n elif option == 3:\n prime_composite()\n elif option == 4:\n vowel_consonant()\n elif option == 5:\n check_in_group()\n" } ]
3
RafaelDiaz7/some-programs-and-exercises
https://github.com/RafaelDiaz7/some-programs-and-exercises
d7300bd8337a5802fde8944b6a7b72f0c7f84d5f
ab1df666128974d19e088caa8ee8a9aba4d77419
ba1e416949d5e350948172a221e1e91ea5a9c4eb
refs/heads/main
2023-05-05T05:32:21.686097
2021-05-24T18:45:53
2021-05-24T18:45:53
320,138,832
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6461538672447205, "avg_line_length": 16.5, "blob_id": "22a80e398ede53687920102d2849ff5e00056b4f", "content_id": "6874dd7a9522731de445d643bfff4d046a6bf621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 455, "license_type": "no_license", "max_line_length": 91, "num_lines": 26, "path": "/recursion/recursive-fibonacci/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\n/*\nIntroducing go exercise pg 53\n5. The Fibonacci sequence is defined as: \nfib(0) = 0, fib(1) = 1, fib(n) = fib(n-1) + fib(n-2).\nWrite a recursive function that can find fib(n). \n\n*/\n\nfunc main() {\n\tfmt.Println(fib(10))\n}\n\n// This recursive fibonacci its working well! I will also make the dynamic fibonacci luegoo\nfunc fib(n uint32) uint32 {\n\tif n == 0 {\n\t\treturn 0\n\t}\n\tif n == 1 {\n\t\treturn 1\n\t}\n\treturn fib(n-1) + fib(n-2)\n}\n" }, { "alpha_fraction": 0.593406617641449, "alphanum_fraction": 0.6556776762008667, "avg_line_length": 29.33333396911621, "blob_id": "c74ba4cf17928b6510db8b0f4a3836c33765e791", "content_id": "b473a9304d5bb4788fd3b16d6cd0d9abad2738e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 94, "num_lines": 9, "path": "/bucle_simple/bucle_simple.py", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "# Realizar un programa que utilizando una estructura repetitiva (bucle) presente por pantalla \n# los números del 1 al 10 separados por un guión.\n# El resultado debe ser similar a esto:\n\n# 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10\n\n\nfor i in range(1,11):\n\tprint(i, end=\"-\")\n" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 10.454545021057129, "blob_id": "f972bc84bb8d8fa760d4061fee66b03ec688dfc7", "content_id": "b2a5914fcefca6b0f74af79c5b8c4d1fdb2873b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 126, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/composition-experiment-01/models/beverage.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package models\n\nimport \"fmt\"\n\ntype Beverage struct {\n\tColor string\n}\n\nfunc (b *Beverage) isColor() {\n\tfmt.Println(b.Color)\n}\n" }, { "alpha_fraction": 0.5791726112365723, "alphanum_fraction": 0.6048502326011658, "avg_line_length": 15.714285850524902, "blob_id": "6ac5587f1cde398db68d50d289908a394b245e72", "content_id": "b4b9cc1ddbbbe998d80d35370b9ed29535d43da3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 701, "license_type": "no_license", "max_line_length": 38, "num_lines": 42, "path": "/closures/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc main() {\n\tnextEven := makeEvenGenerator()\n\tfmt.Println(nextEven()) //0\n\tfmt.Println(nextEven()) //2\n\tfmt.Println(nextEven()) //4\n\tfmt.Println(nextEven()) //6\n\tfmt.Println(nextEven()) //6\n\tfmt.Println()\n\tnextOdd:= makeOddGenerator()\n\tfmt.Println(nextOdd()) //0\n\tfmt.Println(nextOdd()) //1\n\tfmt.Println(nextOdd()) //3\n\tfmt.Println(nextOdd()) //5\n\tfmt.Println(nextOdd()) //7\n\tfmt.Println(nextOdd()) //7\n\n}\n\nfunc makeEvenGenerator() func() uint {\n\ti := uint(0)\n\treturn func() (ret uint) {\n\t\tret = i\n\t\ti += 2\n\t\treturn\n\t}\n}\n\nfunc makeOddGenerator() func() uint {\n\ti := uint(0)\n\treturn func() (ret uint) {\n\t\tret = i\n\t\ti += 1\n\t\tif i != 0 && i != 1 {\n\t\t\ti+=1\n\t\t}\n\t\treturn\n\t}\n}" }, { "alpha_fraction": 0.6777493357658386, "alphanum_fraction": 0.6841432452201843, "avg_line_length": 20.72222137451172, "blob_id": "07c82f22c6899f97a76c0336476b09cf80c740c8", "content_id": "c36f34cd4d27a383763e32a9cb71b49fc294c83f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 782, "license_type": "no_license", "max_line_length": 92, "num_lines": 36, "path": "/serve-http/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n)\n\nconst httpAddr = \":8080\"\n\nfunc main() {\n\tfmt.Println(\"Server running on\",httpAddr)\n\n\tmux:= http.NewServeMux()\n\n\tmux.HandleFunc(\"/\", homeHandler)\n\tmux.HandleFunc(\"/standard-library-rocks\", stdLibHandler)\n\n\tlog.Fatal(http.ListenAndServe(httpAddr, mux))\n}\n\nfunc homeHandler(w http.ResponseWriter, r *http.Request) {\n\t//_ := json.NewDecoder(r.Body).Decode(&)\n\t_, err := fmt.Fprintf(w, \"Hi there, I love %s!\", r.URL.Path[1:])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n}\n\nfunc stdLibHandler(w http.ResponseWriter, _ *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\t_, err := w.Write([]byte(\"I invite you to get curious about The Go programming language!\"))\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n}\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5646258592605591, "avg_line_length": 10.307692527770996, "blob_id": "9ec6aa96433787019740dd5d9120084107295749", "content_id": "20f348f4fd2475c249f22d50d8754e348727a6c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 147, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/conditionals/comparing-int8-numbers/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n)\n\nfunc main() {\n\tvar a int8 = 0x24\n\tvar b int8 = 0xF\n\tif a > b{\n\t\tfmt.Printf(\"%d is greater than %d\\n\\n\",a,b)\n\t}\n}\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 11, "blob_id": "44fc4e524b8bc9ffc81dede2c9839cce5888e355", "content_id": "9a8ac4ef1388df59b9a0972a3295998ed25e9378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 72, "license_type": "no_license", "max_line_length": 19, "num_lines": 6, "path": "/composition-experiment-01/models/brand.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package models\n\ntype Brand struct {\n\tBrandname string\n\tCountry string\n}\n" }, { "alpha_fraction": 0.5685339570045471, "alphanum_fraction": 0.570321798324585, "avg_line_length": 30.074073791503906, "blob_id": "bd54c34ebdaa70ba0db422f9742bd5502b604754", "content_id": "5322c61799ed513d0db96134ff0ad0f67aaaafc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1678, "license_type": "no_license", "max_line_length": 88, "num_lines": 54, "path": "/cantidad_letras/cantidad_letras.py", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "import string\n\n\ndef open_file():\n fname = input('Type the filename: ')\n fhandler = open(fname, encoding='utf-8')\n return fhandler\n\n\ndef count_characters(filehandler, text):\n # print('------Printin filehandler: ', filehandler)\n\n char_count = dict()\n for line in filehandler:\n line = line.strip().lower()\n line = line.translate(line.maketrans('', '', string.punctuation))\n # print('--After doing strip each char, Character: ', line)\n text = text + line\n print('')\n print('____________________________________________')\n print('The text after concatenate lines: ', text)\n print('|')\n print('|___Type of the text variable: ', type(text))\n print('____________________________________________')\n # tratar de no hacer un for anidado aca, eso es lo que hay que mejorar de este codio\n\n for character in text:\n # print('')\n # print('Char in text:', character)\n if character.isalpha():\n if character in char_count:\n char_count[str(character)] += 1\n else:\n char_count[str(character)] = 1\n # char_count[character] = char_count.get(character)\n else:\n continue\n return char_count\n\n\ndef order_by_decreasing(counter):\n inverse_counter_lst = list()\n for element in counter:\n inverse_counter_lst.append((counter[element], element))\n\n inverse_counter_lst.sort(reverse=True)\n\n for number, element in inverse_counter_lst:\n print(f'{element} -> {number}')\n\n\nfirst_text = \"\"\n# order_by_decreasing(count_characters(open_file(), first_text))\nprint(count_characters(open_file(), first_text))\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.614814817905426, "avg_line_length": 12.5, "blob_id": "9c8d92b67925f128fd4dd22a6c3f79ae2e8f9af1", "content_id": "54422d00cddd27c2d574601cc9247f561b94c8c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 270, "license_type": "no_license", "max_line_length": 88, "num_lines": 20, "path": "/pointers-experiment/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc zero(xPtr *int) {\n\t*xPtr = 0\n}\n\nfunc four(xPtr *int) {\n\t*xPtr = 4\n}\n\nfunc main() {\n\tx := 1\n\tfmt.Println(x)\n\tzero(&x)\n\tfmt.Println(x) // Its going to happen that x value would not be changed. X its still 1.\n\tfour(&x)\n\tfmt.Println(x)\n}\n" }, { "alpha_fraction": 0.6578947305679321, "alphanum_fraction": 0.6947368383407593, "avg_line_length": 26.14285659790039, "blob_id": "9334fa829ae9fdf054bbb3c5c824068e07e2f057", "content_id": "4a38f2bfc245e0db26b8a66d347342b5699a2a7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 380, "license_type": "no_license", "max_line_length": 136, "num_lines": 14, "path": "/binary-operators/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport \"fmt\"\n\nfunc main() {\n\tvar a uint8 = 0xF\n\t// I want to add a number to 'a' uint8 variable using binary operators and hexadecimal numbers\n\ta |= 0x10\n\n\t/* The |= operator in this case means: a = a | 0x10. That statement means: assign to 'a' the bits that belong to either 'a' or 0x10 */ \n\n\t//This would print 31 if the operation was succesfull\n\tfmt.Println(a)\n}\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 10.421052932739258, "blob_id": "3776c4898d08a5fce3c1568d6ad9a393792585a3", "content_id": "aa18d38349c7c8f75f9a781e36d4dc2e02eddf88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 217, "license_type": "no_license", "max_line_length": 37, "num_lines": 19, "path": "/swap-exercise/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n)\n\nfunc swap(xPtr *int,yPtr *int) {\n\tcache := *xPtr\n\t//fmt.Println(reflect.TypeOf(cache))\n\t*xPtr = *yPtr\n\t*yPtr = cache\n}\n\nfunc main() {\n\tx := 1\n\ty := 2\n\tswap(&x,&y)\n\tfmt.Println(x,y)\n}\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6399840712547302, "avg_line_length": 21.428571701049805, "blob_id": "ecc00ffc0338f48247a78b992995acddc5899bb3", "content_id": "39e7339bafcbee870072758d79fb6821f3065bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2511, "license_type": "no_license", "max_line_length": 75, "num_lines": 112, "path": "/json-array-to-slice-of-structs/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n)\n\n\ntype Welcome struct {\n\tSendMsg *SendMsg `json:\"sendMsg,omitempty\"`\n\tExampleStruct *ExampleStruct `json:\"example-struct,omitempty\"`\n}\n\ntype SendMsg struct {\n\tUser *string `json:\"user,omitempty\"`\n\tMsg *string `json:\"msg,omitempty\"`\n\tTag *string `json:\"tag1,omitempty\"`\n}\n\ntype ExampleStruct struct {\n\tExampleField *float64 `json:\"example\"`\n\tSecondExampleField *float64 `json:\"second-example\"`\n}\n\nfunc main() {\n\tdata := []byte(`{\n\t\t\t\t\t \"sendMsg\":{\n\t\t\t\t\t\t\"user\":\"ANisus\",\n\t\t\t\t\t\t\"msg\":\"Trying to send a message\"\n\t\t\t\t\t\t},\n\t\t\t\t\t \"example-struct\":{\n\t\t\t\t\t \t\"example\":45.0,\n\t\t\t\t\t\t\"second-example\":6.3\n\t\t\t\t\t }\n\t\t\t\t\t}`)\n\twelcome,err := UnmarshalWelcome(data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// This will print welcome.SendMsg.Msd address because its not deferenced.\n\t// That field was defined as a pointer to string type\n\t//fmt.Printf(\"%v\\n\",welcome.SendMsg.Msg)\n\t//fmt.Printf(\"%v\\n\\n\",welcome.SendMsg.Tag)\n\tfmt.Printf(\"%v\\n\", *welcome.ExampleStruct.ExampleField)\n\tfmt.Printf(\"%v\\n\\n\", *welcome.ExampleStruct.SecondExampleField)\n\n\tdata2 := []byte(`[{\n\t\t\t\t\t \t\"sendMsg\":{\n\t\t\t\t\t\t\t\"user\":\"ANisus\",\n\t\t\t\t\t\t\t\"msg\":\"Trying to send a message\"\n\t\t\t\t\t\t },\n\t\t\t\t\t \t\"example-struct\":{\n\t\t\t\t\t \t\t\"example\":45.0,\n\t\t\t\t\t\t\t\"second-example\":6.3\n\t\t\t\t\t\t}\n\t\t\t\t\t },\n\t\t\t\t\t {\n\t\t\t\t\t \t\"sendMsg\":{\n\t\t\t\t\t\t\t\"user\":\"OTRAPELSONA\",\n\t\t\t\t\t\t\t\"msg\":\"Trying asdadsxage\"\n\t\t\t\t\t\t},\n\t\t\t\t\t \t\"example-struct\":{\n\t\t\t\t\t \t\t\"example\":5.0,\n\t\t\t\t\t\t\t\"second-example\":8.03\n\t\t\t\t\t\t}\n\t\t\t\t\t } \n\t\t\t\t\t]`)\n\n\n\tstructsSlice, err := UnmarshalWelcomeElements(data2)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"%v\\n\", structsSlice[1].SendMsg.AnExampleMethod())\n\tfmt.Printf(\"%v\\n\", structsSlice[1].ExampleStruct.AnExampleMethod())\n\n\tcallElementsMethods := func (w []Welcome) {\n\t\tfor _ , element := range w {\n\t\t\telement.ExampleStruct.AnExampleMethod()\n\t\t\telement.SendMsg.AnExampleMethod()\n\t\t}\n\t}\n\n\tcallElementsMethods(structsSlice)\n}\n\nfunc UnmarshalWelcome(data []byte) (Welcome, error) {\n\tvar r Welcome\n\terr := json.Unmarshal(data, &r)\n\treturn r, err\n}\n\nfunc UnmarshalWelcomeElements(data []byte) ([]Welcome, error) {\n\tvar r = []Welcome{}\n\terr := json.Unmarshal(data, &r)\n\treturn r, err\n}\n\nfunc (r *Welcome) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}\n\nfunc (receiver *SendMsg) AnExampleMethod() string {\n\t*receiver.Msg = \"Changing fields content\"\n\treturn *receiver.Msg\n}\n\nfunc (receiver *ExampleStruct) AnExampleMethod() string {\n\t*receiver.ExampleField = 9\n\treturn \"Changing value of ExampleField of ExampleStruct to 9\"\n}" }, { "alpha_fraction": 0.6812933087348938, "alphanum_fraction": 0.6812933087348938, "avg_line_length": 20.649999618530273, "blob_id": "165eefe43f74cc76dc2c12adf4e747f5338a72dc", "content_id": "d4db0baea4b558092ffc403bb74ea0008fb64b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 433, "license_type": "no_license", "max_line_length": 76, "num_lines": 20, "path": "/scan-input/scan-string/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport(\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc main() {\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfmt.Print(\"Ingresa una oracion o una cadena de caracteres: \")\n\tscanner.Scan()\n\n\tinput := scanner.Text()\n\tfmt.Println()\n\tfmt.Printf(\"Ingresaste: %s y tiene %d caracteres. \\n\\n\", input, len(input))\n\t\n\tfmt.Printf(\"El tipo de dato de la entrada ya procesada es: %T \\n\",input)\n\tfmt.Println(\"Por lo tanto es una cadena de caracteres.\")\n}\n" }, { "alpha_fraction": 0.6708464026451111, "alphanum_fraction": 0.6771159768104553, "avg_line_length": 15.736842155456543, "blob_id": "7d1f20d489e809bc86da69cbceb1419f0a2be8af", "content_id": "615d26e9ecc34d28ecf9f7e69863c913a567a55d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 319, "license_type": "no_license", "max_line_length": 41, "num_lines": 19, "path": "/composition-experiment-01/models/beer.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package models\n\nimport \"fmt\"\n\ntype Beer struct {\n\tName string\n\tAlcohol float32\n\t//price\n\tBeverage\n\tBrand\n}\n\nfunc (b *Beer) IsBeer() {\n\tfmt.Println(\"The\", b.Name,\"beer\")\n\tfmt.Println(\"-color:\",b.Color)\n\tfmt.Println(\"-alcohol grade:\",b.Alcohol)\n\tfmt.Println(\"-brand:\",b.Brandname)\n\tfmt.Println(\"-country:\",b.Country)\n}\n\n" }, { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6184107065200806, "avg_line_length": 19.174602508544922, "blob_id": "ee062e82d21c79639a56f594e7994b66366bf39e", "content_id": "973f4b228c61ff65f68ec4302fd684cd59f2ba11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 1271, "license_type": "no_license", "max_line_length": 126, "num_lines": 63, "path": "/json-to-struct-with-optional-fields/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n)\n\nfunc UnmarshalWelcome(data []byte) (Welcome, error) {\n\tvar r Welcome\n\terr := json.Unmarshal(data, &r)\n\treturn r, err\n}\n\nfunc (r *Welcome) Marshal() ([]byte, error) {\n\treturn json.Marshal(r)\n}\n\ntype Welcome struct {\n\tSendMsg *SendMsg `json:\"sendMsg,omitempty\"`\n\tSay *string `json:\"say,omitempty\"`\n}\n\ntype SendMsg struct {\n\tUser *string `json:\"user,omitempty\"`\n\tMsg *string `json:\"msg,omitempty\"`\n\tTag *string `json:\"tag1\"`\n}\n\nfunc main() {\n\tdata := []byte(`{\n\t\t\t\t\t \"sendMsg\":{\n\t\t\t\t\t\t\"user\":\"ANisus\",\n\t\t\t\t\t\t\"msg\":\"Trying to send a message\"\n\t\t\t\t\t\t},\n\t\t\t\t\t \"say\":\"Hello\"\n\t\t\t\t\t}`)\n\n\tdata2 := []byte(`{\n\t\t\t\t\t \"sendMsg\":{\n\t\t\t\t\t\t\"user\":\"ANisus\",\n\t\t\t\t\t\t\"msg\":\"Trying to send a message that is different\",\n\t\t\t\t\t\t\"tag1\":\"one-tag\"\n\t\t\t\t\t\t},\n\t\t\t\t\t \"say\":\"Hello\"\n\t\t\t\t\t}`)\n\n\n\twelcome,err := UnmarshalWelcome(data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// This will print welcome.SendMsg.Msd address because its not deferenced. That field was defined as a pointer to string type\n\tfmt.Printf(\"%v\\n\",welcome.SendMsg.Msg)\n\tfmt.Printf(\"%v\\n\\n\",welcome.SendMsg.Tag)\n\n\twelcome2, err := UnmarshalWelcome(data2)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%v\\n\",*welcome2.SendMsg.Msg)\n\tfmt.Printf(\"%v\",*welcome2.SendMsg.Tag)\n}\n" }, { "alpha_fraction": 0.6598240733146667, "alphanum_fraction": 0.6686217188835144, "avg_line_length": 21.733333587646484, "blob_id": "81ce548795386d4dced12b8ffd1d654e02c275f1", "content_id": "cd8a61d61a939ef4bd91580e910fbe71f0e875e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 341, "license_type": "no_license", "max_line_length": 74, "num_lines": 15, "path": "/composition-experiment-01/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"experiments/composition-experiment-01/models\"\n\t\"fmt\"\n)\n\nfunc main (){\n\tb := models.Beverage{Color: \"yellow\"}\n\tpolar := models.Brand{Brandname: \"Polar\", Country: \"Venezuela\"}\n\tbeer := models.Beer{Name: \"Solera Azul\", Alcohol: 7, Beverage:b, Brand: polar}\n\tbeer.IsBeer()\n\n\tfmt.Printf(\"\\n----Type of b variable is: %T\",b)\n}\n" }, { "alpha_fraction": 0.6280992031097412, "alphanum_fraction": 0.6280992031097412, "avg_line_length": 13.235294342041016, "blob_id": "a1028d45454797930d5ec62689117fd28c25d7b5", "content_id": "c6c285a54bd67e151316a281ceda15cab345b422", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 242, "license_type": "no_license", "max_line_length": 54, "num_lines": 17, "path": "/conditionals/verify-if-file-exist/main.go", "repo_name": "RafaelDiaz7/some-programs-and-exercises", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc main() {\n\tvar fileRoute string\n\tfileRoute = \"main.c\"\n\n\tif _, err := os.Stat(fileRoute); os.IsNotExist(err) {\n\t\tfmt.Println(fileRoute, \"not exist.\")\n\t} else {\n\t\tfmt.Println(fileRoute, \"exist.\")\n\t}\n}\n" } ]
17
alexeal90/X-Serv-13.6-Calculadora
https://github.com/alexeal90/X-Serv-13.6-Calculadora
dcf618c7d7f98df3f21f188a1ed3606c9ed4d246
b10101a5ca7539152fbd9b47831c6bc8e10c809f
33ef2e9ad2b30425ef0c828675ec5e781fb0d66b
refs/heads/master
2021-01-16T13:42:43.364097
2015-02-04T19:44:22
2015-02-04T19:44:22
30,317,145
0
0
null
2015-02-04T19:36:12
2015-02-04T19:36:12
2015-02-02T15:04:35
Python
[ { "alpha_fraction": 0.5952551960945129, "alphanum_fraction": 0.6261682510375977, "avg_line_length": 21.80327796936035, "blob_id": "024f4eaa8601d921a43cad558acd3186e1bf08b0", "content_id": "af586111574a9258fc1f1c45449115a99ab1f18f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 92, "num_lines": 61, "path": "/calculadora.py", "repo_name": "alexeal90/X-Serv-13.6-Calculadora", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\n\tAlejandro Valeriano Fernandez - GITT\n\tEjercicio 13.6\n\tCalculadora\n\n\"\"\"\n\nimport sys\n\ndef suma(operando1, operando2):\n\ttry:\n\t\treturn int(operando1) + int(operando2)\n\texcept NameError:\n\t\tprint (\"Invalid arguments\")\n\ndef rest(operando1, operando2):\n\ttry:\n\t \treturn int(operando1) - int(operando2)\n\texcept NameError:\n\t\tprint (\"Invalid arguments\")\n\ndef mult(operando1, operando2):\n\ttry:\n\t\treturn int(operando1) * int(operando2)\n\texcept NameError:\n\t\tprint (\"Invalid arguments\")\n\ndef div(operando1, operando2):\n\ttry:\n\t\treturn float(operando1) / float(operando2)\n\texcept NameError:\n\t\tprint (\"Invalid arguments\") \n\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) != 4:\n\t\tprint\n\t\tsys.exit(\"Usage: $ python calculadora.py funcion operando1 operando2\")\n\t\t\n\tif sys.argv[1] == 'add':\n\t\tprint sys.argv[2] + ' mas ' + sys.argv[3] + ' = ' + str(suma (sys.argv[2], sys.argv[3]))\n\n\tif sys.argv[1] == 'substract':\n\t\tprint sys.argv[2] + ' menos ' + sys.argv[3] + ' = ' + str(rest (sys.argv[2], sys.argv[3]))\n\n\tif sys.argv[1] == 'multiply':\n\t\tprint sys.argv[2] + ' por ' + sys.argv[3] + ' = ' + str(mult (sys.argv[2], sys.argv[3]))\n\n\tif sys.argv[1] == 'divide':\n\t\ttry:\n\t\t\tprint sys.argv[2] + ' entre ' + sys.argv[3] + ' = ' + str(div (sys.argv[2], sys.argv[3]))\n\n\t\texcept:\n\t\t\tprint 'error al dividir'\n\telse:\n\t\tprint 'Las posibles operaciones son \"add\", \"substract\", \"multiply\" y \"divide\"'\n" } ]
1
bdflemin/scripts
https://github.com/bdflemin/scripts
a8293699e972b4e670da0054c53a2e6e5f7d1c09
2d75c4d27b9f1be3292ab622ee97c9031d360584
5a1152173bc6537be9e47d85ccbd0dec9d7f1c32
refs/heads/master
2020-04-08T22:26:50.736722
2013-09-06T23:22:09
2013-09-06T23:22:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5673892498016357, "alphanum_fraction": 0.5909519195556641, "avg_line_length": 30.205883026123047, "blob_id": "079608da45e301004144118c860cd980d610b2ad", "content_id": "9ac93cccaa9c4bfb842870b78d14d2003618f8f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 94, "num_lines": 34, "path": "/max_clients.sh", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPROCNAME=httpd\nPARENTPIDS=`comm -12 <(ps -C $PROCNAME -o ppid | sort -u) <(ps -C $PROCNAME -o pid | sort -u)`\n\nfor ParPID in $PARENTPIDS; do\n SUM=0\n COUNT=0\n for x in `ps f --ppid $ParPID -o rss | tail -n +2`; do\n SUM=$(( $SUM + $x ))\n COUNT=$(( $COUNT + 1 ))\n done\n\n MEMPP=$(( $SUM / $COUNT / 1024 ))\n FREERAM=$(( `free | tail -2 | head -1 | awk '{print $4}'` / 1024 ))\n APACHERAM=$(( $SUM / 1024 ))\n APACHEMAX=$(( $APACHERAM + $FREERAM ))\n\n (\n echo\n echo \"Info for the following parent apache process:\"\n echo \" \"`ps f --pid $ParPID -o command | tail -n +2`\n echo\n echo \"Current # of apache processes: $COUNT\"\n echo \"Average memory per apache process: $MEMPP MB\"\n echo \"Free RAM (including cache & buffers): $FREERAM MB\"\n echo \"RAM currently in use by apache: $APACHERAM MB\"\n echo \"Max RAM available to apache: $APACHEMAX MB\"\n echo \n echo \"Theoretical maximum MaxClients: $(( $APACHEMAX / $MEMPP ))\"\n echo \"Recommended MaxClients: $(( $APACHEMAX / 10 * 9 / $MEMPP ))\"\n echo\n )\ndone\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 12, "blob_id": "e77ab500e2e15dfc7a92e478a95d1d9f78d89866", "content_id": "d78e11c131b0c12ad7ff7defeac3f4ffbf69e960", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/README.md", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "scripts\n=======\n\nHelpful Scripts I Made or Maintain\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5048308968544006, "avg_line_length": 32.15999984741211, "blob_id": "6ca443666f6e1ea24a71c19a9da96e5d4184baf5", "content_id": "a06cbb5002d7a8a7d42b9bb3b69267a27e22c2d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 828, "license_type": "no_license", "max_line_length": 261, "num_lines": 25, "path": "/dirCount.sh", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nusage() {\n\techo \"Please use process as root.. exiting\"\n\texit 1\n}\n\n[[ `id -u` -ne 0 ]] && usage\n\necho \"-------\";\n# Complete a search on root directory (exclude root)\nfor i in $(find / -maxdepth 1 -type d | egrep -v \"^/$\" | sort); \ndo \n\t#If the directory is /proc, skip\n\t[[ \"$i\" == \"/proc\" ]] && continue;\n\t#If the directory is /sys, skip\n\t[[ \"$i\" == \"/sys\" ]] && continue;\n\t#If the directory \n\techo -ne \"$i:\\t\\t\\t\"; find $i -type f | wc -l; echo \"-------\"; \ndone\necho -ne \"/:\\t\\t\\t\"; find / -type f | wc -l;\necho \"-------\";\n\n## One Liner (run as root) ##\n#for i in $(find / -maxdepth 1 -type d | egrep -v \"^/$\" | sort); do [ \"$i\" == \"/proc\" ] && continue; [ \"$i\" == \"/sys\" ] && continue; echo -ne \"$i:\\t\\t\\t\"; find $i -type f | wc -l; echo \"-------\"; done echo -ne \"/:\\t\\t\\t\"; find / -type f | wc -l; echo \"-------\";" }, { "alpha_fraction": 0.6219838857650757, "alphanum_fraction": 0.6353887319564819, "avg_line_length": 21, "blob_id": "99a9269ad6bc0c699ebdca8785ad482142618be0", "content_id": "9d4f6e9f53fc354f616f498373017b5bc6a915ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 70, "num_lines": 17, "path": "/find_users.py", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "from re import findall\nfrom subprocess import check_output\nfrom pwd import getpwall\n\nd = {}\nfilesearch = open(\"/etc/login.defs\",\"r\")\n\nfor line in filesearch:\n\tif findall(\"^UID_(MIN|MAX)\",line):\n\t\ta = line.strip().split()\n\t\td[str(a[0])] = str(a[1])\n\nfilesearch.close()\n\nfor p in getpwall():\n\tif int(p[2]) >= int(d['UID_MIN']) and int(p[2]) <= int(d['UID_MAX']):\n\t\tprint p[0]" }, { "alpha_fraction": 0.6926605701446533, "alphanum_fraction": 0.6987767815589905, "avg_line_length": 32.27118682861328, "blob_id": "71a9933c26b48dfc242805223b97482d3efd6e10", "content_id": "545cf90661d3d3fd10e88f9d8fdef58df31bd5f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1962, "license_type": "no_license", "max_line_length": 122, "num_lines": 59, "path": "/cfdelete.py", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport pyrax, time, sys, multiprocessing, argparse, os, __builtin__\n\nparser = argparse.ArgumentParser(description='Remove Cloud Files Fast')\nparser.add_argument('--container', nargs='?', dest='cont', required=True, help=\"The Cloud Contain To Remove Objects From\")\nparser.add_argument('--username', nargs='?', dest='username', help=\"Your Cloud Username\")\nparser.add_argument('--password', nargs='?', dest='password', help=\"Your Cloud API Key\")\nparser.add_argument('--file', nargs='?', dest='file', help=\"Your Cloud API Key File\")\nparser.add_argument('--region', nargs='?', dest='region', help=\"Set the Cloud File region\")\nargs = parser.parse_args()\n\ndef authenticate(username='', passwd='', path=''):\n\tif username or passwd:\n\t\tpyrax.set_credentials(username,passwd)\n\telif path:\n\t\tpyrax.set_credential_file(os.path.expanduser(path))\n\telse:\n\t\tprint \"Authentication Failed... please use username/password or file to authenticate\"\n\t\tsys.exit()\n\ndef worker(num):\n\ttry:\n\t\tglobal obj\n\t\tprint \"Deleting:\", obj[num].name\n\t\tobj[num].delete()\n\t\t#time.sleep(1 + random.random()*5)\n\t\t#print num\n\texcept:\n\t\tprint \"Unexpected error in worker:\", sys.exc_info()\n\t\traise\n\ndef pooling(length):\n\ttry:\n\t\tpool = multiprocessing.Pool(processes=20)\n\t\tfor num in xrange(length):\n\t\t\tpool.apply_async(worker, [num])\n\t\t\t#pool.apply(worker,[num])\n\t\t\tpool.apply_async(time.sleep, 5)\n\t\tpool.close()\n\t\tpool.join()\n\texcept:\n\t\tprint \"Unexpected error in pooling:\", sys.exc_info()[0]\n\t\traise\n\nif __name__ == \"__main__\":\n\tauthenticate(username=args.username,passwd=args.password,path=args.file)\n\tcf = pyrax.connect_to_cloudfiles(region=args.region)\n\tlimit = 10000\n\tmarker = \"\"\n\tobj = cf.get_container(args.cont).get_objects(limit=limit, marker=marker)\n\twhile obj:\n\t\ttry:\n\t\t\tmarker = obj.pop()\n\t\t\tlength = len(obj)\n\t\t\tpooling(length)\n\t\t\tobj = cf.get_container(args.cont).get_objects(limit=limit, marker=marker.name)\n\t\texcept:\n\t\t\tprint \"Unexpected error:\", sys.exc_info()[0]\n\t\t\traise" }, { "alpha_fraction": 0.5914315581321716, "alphanum_fraction": 0.6280041933059692, "avg_line_length": 28.030303955078125, "blob_id": "8ecea1d371fe2028601af6ca1d2f2ab9b5a1a9bb", "content_id": "463b7f3080aae81ccfc156f294a134133bf77424", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 957, "license_type": "no_license", "max_line_length": 149, "num_lines": 33, "path": "/mtr_stats.py", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "import sqlite3 as lite\nimport sys\nimport argparse\nfrom time import strftime\nfrom subprocess import check_output\n\n\ndef collect():\n # Grab information from mtr\n output = check_output([\"mtr\",\"-nr\",\"-c5\",\"50.56.142.146\"])\n date = strftime('%Y%m%d %H%M')\n \n # split the data into an array and clean up array a bit\n a = output.split(\"\\n\")\n del a[0]\n del a[-1]\n\n # Connect to the sqlite3 server to place the information into it\n con = lite.connect('/root/icmp/data.db')\n cur = con.cursor()\n\n # loop through the data and store information into sqlite\n for i in a:\n array = i.replace(\"%\",\"\").split()\n del array[0]\n cur.execute(\"insert into netreport values ('%s','%s',%0.1f,%i,%0.1f,%0.1f,%0.1f,%0.1f,%0.1f);\" % \n (str(date), str(array[0]), float(array[1]), int(array[2]), float(array[3]), float(array[4]), float(array[5]), float(array[6]), float(array[7]),))\n con.commit()\n if con:\n con.close()\n\nif __name__ == '__main__':\n collect()" }, { "alpha_fraction": 0.5521191358566284, "alphanum_fraction": 0.5704467296600342, "avg_line_length": 37, "blob_id": "4baea87c5321bf0738c910ff5a6303aecdbe5d0d", "content_id": "2abc39118d7c0443ed03ddbaab6f4e703ea867b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 102, "num_lines": 23, "path": "/findLargestFile.py", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "import os, re\ndef filePop(top):\n sizeList = []\n #exclude = \"^/proc.*|^/sys.*|^/boot.*|^/tmp.*|^/mnt.*\"\n exclude = \"^/proc.*|^/sys.*|^/boot.*|/tmp.*|/home.*|/var.*|/data.*\"\n # Skip any files that are located in /proc, /sys or /boot\n for root,dirs,files in os.walk(top):\n if re.findall(exclude,root):\n continue\n for f in files:\n fullpath = os.path.join(root,f)\n if (os.path.isfile(fullpath) or os.path.isdir(fullpath)) and not os.path.islink(fullpath):\n sizeList.append((os.path.getsize(fullpath),fullpath))\n return sizeList\n\ndef fileSort(fileList,top=15):\n sList = sorted(fileList, key=lambda a: a[0], reverse=True)\n for i in xrange(0,15):\n size = ((sList[i][0] / 1024) / 1024)\n directory = sList[i][1]\n print '%s MB --> %s' % (size,directory)\n\nfileSort(filePop(\"/\"))" }, { "alpha_fraction": 0.5925176739692688, "alphanum_fraction": 0.6056622862815857, "avg_line_length": 51.105262756347656, "blob_id": "cf77095d901d961f9342d13a3d549876fc3d4f75", "content_id": "fd9f012f08c3d36fcaa256e217659237a450aadd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 989, "license_type": "no_license", "max_line_length": 365, "num_lines": 19, "path": "/swappiness", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nfor dir in `ls -ad /proc/{1..9}*`;\ndo\n\t# check to see if the smaps file is there and if the 'Swap' variable is in the file \n\t# I wanted to check if the file was not empty but all smaps files show a size of 0\n\tif [ -f \"$dir/smaps\" -a -n \"`grep -i swap $dir/smaps | head -1`\" ]; then\n\t\tpid=`echo $dir | cut -d / -f3`\n\t\tprocess=`ps -p $pid -o comm --no-headers`\n\t\techo -n \"$process : \"\n\t\tawk '/Swap/{sum+=$2}END{print sum}' $dir/smaps | column -t\n\tfi\ndone\n\n# Show the total from all the processes running at the end.\necho -en \"\\nTotal : \"\nawk '/Swap/{sum+=$2}END{print sum}' /proc/*/smaps | column -t\n\n## One Liner\n# for dir in `ls -ad /proc/{1..9}*`; do if [ -f \"$dir/smaps\" -a -n \"`grep -i swap $dir/smaps | head -1`\" ]; then pid=`echo $dir | cut -d / -f3`; process=`ps -p $pid -o comm --no-headers`; echo -n \"$process : \"; awk '/Swap/{sum+=$2}END{print sum}' $dir/smaps | column -t; fi; done; echo -en \"\\nTotal : \"; awk '/Swap/{sum+=$2}END{print sum}' /proc/*/smaps | column -t" }, { "alpha_fraction": 0.7030162215232849, "alphanum_fraction": 0.7262181043624878, "avg_line_length": 26, "blob_id": "c017af3444a82376c4a4c9e25268d6846a588d23", "content_id": "cac96d6a0aeef0ab0bdf39d02687b8e09a48daa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 84, "num_lines": 16, "path": "/pyinotify.py", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport pyinotify\n\nclass MyEventHandler(pyinotify.ProcessEvent):\n\tdef process_IN_CREATE(self, event):\n\t\tprint \"CREATE event:\", event.pathname\n\tdef process_IN_DELETE(self, event):\n\ndef main():\n\twm = pyinotify.WatchManager()\n\twm.add_watch('/home/brya5376/test'), pyinotify.ALL_EVENTS, rec=True)\n\nif __name__ == '__main__':\n\tmain()\n\n# http://www.saltycrane.com/blog/2010/04/monitoring-filesystem-python-and-pyinotify/" }, { "alpha_fraction": 0.6361607313156128, "alphanum_fraction": 0.65625, "avg_line_length": 20.33333396911621, "blob_id": "aa33a951130727e93f2e7c3bbb3105ae081a2aa2", "content_id": "1d8a18dac4e6d42854147b0831fe2b66d8ff5664", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 448, "license_type": "no_license", "max_line_length": 107, "num_lines": 21, "path": "/changepw.sh", "repo_name": "bdflemin/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nusage() {\n\techo \"Usage: $0 <username> <password>\"\n\texit 1\n}\n\n[[ $# -ne 2 ]] && usage;\n\nHISTFILE=/dev/null\nUSERNAME=$1\nPASSWORD=$2\nCHPASSWD=`which chpasswd`\nUSERADD=`which useradd`\n\ngetent passwd $1 > /dev/null 2>&1\nstatus=$?\n\n[[ $status -gt 0 ]] && $USERADD -m -s /bin/bash $USERNAME && echo \"User $USERNAME has been created\"\n\necho \"$USERNAME:$PASSWORD\" |$CHPASSWD && echo \"The user $USERNAME has had their password set to $PASSWORD\"\n" } ]
10
Maveric4/SudokuSolver
https://github.com/Maveric4/SudokuSolver
2cf330e10e7450fe1de7168d9b7d7426353a5ba0
8b016997474de65cc8680f52d5c6de246bf88b7c
1e9a81275b175e81e3c25e4a5af442374f06de73
refs/heads/master
2021-01-07T06:52:19.508599
2020-03-08T22:45:34
2020-03-08T22:45:34
241,611,013
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6081730723381042, "alphanum_fraction": 0.6097756624221802, "avg_line_length": 28.738094329833984, "blob_id": "a73c93751af710f1587690339f72ac61fbe72875", "content_id": "520cf481fd20c2a7ff1b39ecbb15f67e86cdfb0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1250, "license_type": "no_license", "max_line_length": 94, "num_lines": 42, "path": "/mobile_app/SudokuSolver/SudokuSolver/SudokuSolver/BrokerSetupPage.xaml.cs", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Diagnostics;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing uPLibrary.Networking.M2Mqtt;\nusing Xamarin.Forms;\nusing Xamarin.Forms.Xaml;\n\nnamespace SudokuSolver\n{\n [XamlCompilation(XamlCompilationOptions.Compile)]\n public partial class BrokerSetupPage : ContentPage\n {\n private MqttClient client;\n private string clientId;\n\n public BrokerSetupPage()\n {\n InitializeComponent();\n }\n\n private void OnConnectBrokerButtonClicked(object sender, EventArgs e)\n {\n Application.Current.Properties[\"BrokerIP\"] = EntryBrokerIP.Text;\n try\n {\n clientId = Guid.NewGuid().ToString();\n client = new MqttClient(Application.Current.Properties[\"BrokerIP\"] as string);\n client.Connect(clientId);\n Application.Current.Properties[\"client\"] = client;\n Navigation.PushAsync(new MainPage());\n }\n catch (Exception ex)\n {\n //Navigation.PushAsync(new BrokerSetupPage());\n Debug.WriteLine(\"{0} Exception caught.\", ex);\n }\n }\n }\n}" }, { "alpha_fraction": 0.4483378231525421, "alphanum_fraction": 0.5031446814537048, "avg_line_length": 26.8125, "blob_id": "776784148bb40f3c92b8cdef0444a6e5a9812b9a", "content_id": "3fbc01144502dd58eb8adece01d32cbbf1c3d02c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2226, "license_type": "no_license", "max_line_length": 75, "num_lines": 80, "path": "/sudoku_solver.py", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom random import randint\nfrom copy import deepcopy\nimport cv2\nimport utils\nimport grid\n\n# Global variables\nsudoku_grid = [[5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 0, 9]]\ncounter = 0\nsolutions = []\nrecur_cnt = 0\n\n\ndef is_possible(y, x, n):\n global sudoku_grid\n for i in range(0, 9):\n if sudoku_grid[y][i] == n:\n return False\n for j in range(0, 9):\n if sudoku_grid[j][x] == n:\n return False\n x0 = (x//3)*3\n y0 = (y//3)*3\n for k in range(0, 3):\n for l in range(0, 3):\n if sudoku_grid[y0+k][x0+l] == n:\n return False\n return True\n\n\ndef solve_recursion():\n global sudoku_grid, counter, solutions, recur_cnt\n recur_cnt += 1\n if recur_cnt > 10**5:\n return\n for y in range(9):\n for x in range(9):\n if sudoku_grid[y][x] == 0:\n for n in range(1, 10):\n if is_possible(y, x, n):\n sudoku_grid[y][x] = n\n solve_recursion()\n sudoku_grid[y][x] = 0\n return\n counter += 1\n solutions.append(deepcopy(sudoku_grid))\n\n\ndef main():\n global sudoku_grid, counter, solutions\n model = utils.load_mnist_model()\n img = cv2.imread(\"./SudokuOnline/puzzle1.jpg\")\n\n sudoku_grid = grid.recognize_grid(model, img)\n\n solve_recursion()\n print(\"Number or recurrent function invocations: {}\".format(recur_cnt))\n print(\"There are {} possible solutions\".format(counter))\n if len(solutions) > 0:\n print(\"Random solution:\")\n solved_grid = solutions[randint(0, counter - 1)]\n print(np.matrix(solved_grid))\n\n img_solved = grid.draw_solved_grid(model, img, solved_grid)\n cv2.imwrite(\"./results/result1.jpg\", img_solved)\n cv2.imshow(\"Solved sudoku\", img_solved)\n cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.559440553188324, "alphanum_fraction": 0.5635727643966675, "avg_line_length": 30.77777862548828, "blob_id": "a5c1884a50a05a1b6549726ebfe0a0759fc53b68", "content_id": "20193e6c493e31f6d41916da528c3b52e0e90507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3148, "license_type": "no_license", "max_line_length": 128, "num_lines": 99, "path": "/mobile_app/SudokuSolver/SudokuSolver/SudokuSolver/MainPage.xaml.cs", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.ComponentModel;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing Xamarin.Forms;\nusing uPLibrary.Networking.M2Mqtt;\nusing uPLibrary.Networking.M2Mqtt.Messages;\nusing System.Net;\nusing Plugin.Media;\nusing Plugin.Media.Abstractions;\nusing System.IO;\n\nnamespace SudokuSolver\n{\n\n [DesignTimeVisible(false)]\n public partial class MainPage : ContentPage\n {\n private MqttClient client;\n string mqttMSG = string.Empty;\n string topic = string.Empty;\n public MainPage()\n {\n Application.Current.Properties[\"BrokerIP\"] = \"192.168.9.201\";\n client = new MqttClient(Application.Current.Properties[\"BrokerIP\"] as string);\n string clientId = Guid.NewGuid().ToString();\n try\n {\n client.Connect(clientId);\n }\n catch(Exception e){\n Navigation.PushAsync(new BrokerSetupPage());\n }\n // register to message received \n client.MqttMsgPublishReceived += client_MqttMsgPublishReceived;\n\n // subscribe to every sudoku topic\n client.Subscribe(new string[] { \"sudoku/#\" }, new byte[] { MqttMsgBase.QOS_LEVEL_EXACTLY_ONCE });\n }\n\n private void client_MqttMsgPublishReceived(object sender, MqttMsgPublishEventArgs e)\n {\n topic = e.Topic;\n if (topic == \"sudoku/solution/photo\")\n {\n Application.Current.Properties[\"SolvedSudokuImage\"] = ImageSource.FromStream(() => new MemoryStream(e.Message));\n Device.BeginInvokeOnMainThread(() =>\n {\n ShowSolutionButton.IsEnabled = true;\n });\n }\n else\n {\n mqttMSG = System.Text.Encoding.UTF8.GetString(e.Message);\n }\n }\n\n void OnShowSolutionButtonClicked(object sender, EventArgs e)\n {\n ShowSolutionButton.IsEnabled = false;\n Navigation.PushAsync(new SolutionPage());\n }\n\n private async void OnSendPhotoButtonClicked(object sender, EventArgs e)\n {\n await CrossMedia.Current.Initialize();\n\n if (!CrossMedia.Current.IsPickPhotoSupported)\n {\n await DisplayAlert(\"Sorry. \", \"Pick photo is not supported!\", \"OK\");\n return;\n }\n\n var file = await CrossMedia.Current.PickPhotoAsync();\n\n if (file == null)\n return;\n\n byte[] imageArray = null;\n if (file != null)\n {\n using (MemoryStream ms = new MemoryStream())\n {\n var stream = file.GetStream();\n stream.CopyTo(ms);\n imageArray = ms.ToArray();\n }\n }\n\n client = Application.Current.Properties[\"client\"] as MqttClient;\n client.Publish(\"sudoku/photo\", imageArray);\n\n ImageChosen.Source = ImageSource.FromStream(() => new MemoryStream(imageArray));\n\n }\n }\n}\n" }, { "alpha_fraction": 0.7966101765632629, "alphanum_fraction": 0.8022598624229431, "avg_line_length": 43.5, "blob_id": "e7b825dfc18788bfe91f813b255326bd6049ac0e", "content_id": "69583b1a4ec669d18e01ed3a320ef52a82466ec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 177, "license_type": "no_license", "max_line_length": 135, "num_lines": 4, "path": "/README.md", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "# Sudoku Solver\nSolving sudoku using OpenCV library to extract sudoku grid from image and convolutional neural network (MNIST set) to recognize digits.\n\n![](results/result1.jpg)" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.6919729113578796, "avg_line_length": 34.63793182373047, "blob_id": "b06bdf1f696199c296f796067eac02c92b74429b", "content_id": "80518f80b9528d3c6077ad9c90b1e0d53a444135", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2068, "license_type": "no_license", "max_line_length": 93, "num_lines": 58, "path": "/MNISTmodel/train_mnist1.py", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport datetime\nimport os\nimport numpy as np\nfrom tensorflow.python.keras.callbacks import TensorBoard\n\nmnist = tf.keras.datasets.mnist\n(training_images, training_labels), (test_images, test_labels) = mnist.load_data()\n\n# print(training_images.shape)\n# print(test_images.shape)\ntraining_images = training_images / 255.0\ntest_images = test_images / 255.0\ntraining_images = training_images.reshape(training_images.shape[0], 28, 28, 1)\ntest_images = test_images.reshape(test_images.shape[0], 28, 28, 1)\n\ntest_images, validation_images = np.split(test_images, [int(test_images.shape[0]*0.4)])\ntest_labels, validation_labels = np.split(test_labels, [int(test_labels.shape[0]*0.4)])\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\n\n## Designing callbacks\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n print(\"\\nReached {} epoch\".format(epoch + 1))\n if logs.get('accuracy') > 0.997:\n print(\"Reached 99.99% accuracy so cancelling training!\")\n self.model.stop_training = True\n\n\nlog_dir = os.path.join(\n \"logs\",\n \"fit\",\n datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(training_images,\n training_labels,\n validation_data=(validation_images, validation_labels),\n epochs=20,\n callbacks=[myCallback(), tensorboard_callback],\n verbose=2)\n\n# model.summary()\nmetrics = model.evaluate(test_images, test_labels)\nprint(\"[Loss, Accuracy]\")\nprint(metrics)\nmodel.save(\"./models/train_mnist1_model3.h5\")\n\n" }, { "alpha_fraction": 0.6280992031097412, "alphanum_fraction": 0.6280992031097412, "avg_line_length": 24.964284896850586, "blob_id": "9f5af753e3c15dddbf31cab0edbd1dce85cf8d34", "content_id": "154558f28fb456bdff596025ed251bf47626bd35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 728, "license_type": "no_license", "max_line_length": 110, "num_lines": 28, "path": "/mobile_app/SudokuSolver/SudokuSolver/SudokuSolver/SolutionPage.xaml.cs", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\n\nusing Xamarin.Forms;\nusing Xamarin.Forms.Xaml;\n\nnamespace SudokuSolver\n{\n [XamlCompilation(XamlCompilationOptions.Compile)]\n public partial class SolutionPage : ContentPage\n {\n public SolutionPage()\n {\n InitializeComponent();\n if (Application.Current.Properties.ContainsKey(\"SolvedSudokuImage\"))\n {\n SolvedSudokuImage.Source = Application.Current.Properties[\"SolvedSudokuImage\"] as ImageSource;\n }\n else\n {\n TopLabel.Text = \"SolvedSudokuImage not found :(\";\n }\n }\n }\n}" }, { "alpha_fraction": 0.5337318778038025, "alphanum_fraction": 0.5354397892951965, "avg_line_length": 23.914894104003906, "blob_id": "da4739a1a0f4dc62ef354d7403d5fbfa1ad4cda1", "content_id": "be554ab8da7406982b2b6aa0213ebaaeb5a2f8e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1173, "license_type": "no_license", "max_line_length": 103, "num_lines": 47, "path": "/mobile_app/SudokuSolver/SudokuSolver/SudokuSolver/App.xaml.cs", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "using System;\nusing System.Net;\nusing Xamarin.Forms;\nusing Xamarin.Forms.Xaml;\nusing uPLibrary.Networking.M2Mqtt;\nusing uPLibrary.Networking.M2Mqtt.Messages;\n\nnamespace SudokuSolver\n{\n public partial class App : Application\n {\n\n public App()\n {\n InitializeComponent();\n MainPage = new NavigationPage(new MainPage());\n }\n\n protected override void OnStart()\n {\n // Handle when your app starts\n //string clientId = Guid.NewGuid().ToString();\n //MqttClient client = new MqttClient(Application.Current.Properties[\"BrokerIP\"] as string);\n //try\n //{\n // client.Connect(clientId);\n // MainPage = new NavigationPage(new MainPage());\n //}\n //catch(Exception e)\n //{\n // MainPage = new NavigationPage(new BrokerSetupPage());\n //}\n }\n\n protected override void OnSleep()\n {\n // Handle when your app sleeps\n\n }\n\n protected override void OnResume()\n {\n // Handle when your app resumes\n \n }\n }\n}\n" }, { "alpha_fraction": 0.509246826171875, "alphanum_fraction": 0.5473063588142395, "avg_line_length": 27.69230842590332, "blob_id": "bfc4edb9c7db0f35c8c07cf5521e059dbaca23c4", "content_id": "682841b145b708b6e0a7740dd60d53c107c22c68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3731, "license_type": "no_license", "max_line_length": 85, "num_lines": 130, "path": "/solve_sudoku_from_app.py", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom random import randint\nfrom copy import deepcopy\nimport cv2\nimport utils\nimport grid\nimport paho.mqtt.client as mqtt\nimport io\nfrom PIL import Image\n\n# Global variables\nBROKER_ADRESS = \"192.168.9.201\"\nsudoku_grid = [[5, 3, 0, 0, 7, 0, 0, 0, 0],\n [6, 0, 0, 1, 9, 5, 0, 0, 0],\n [0, 9, 8, 0, 0, 0, 0, 6, 0],\n [8, 0, 0, 0, 6, 0, 0, 0, 3],\n [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6],\n [0, 6, 0, 0, 0, 0, 2, 8, 0],\n [0, 0, 0, 4, 1, 9, 0, 0, 5],\n [0, 0, 0, 0, 8, 0, 0, 0, 9]]\ncounter = 0\nsolutions = []\nrecur_cnt = 0\nIMG_NAME = 'puzzle1.jpg'\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected to broker with result code \" + str(rc))\n client.subscribe(\"sudoku/#\")\n\n\ndef on_message(client, userdata, msg):\n global counter\n counter = 0\n if msg.topic == \"sudoku/photo\":\n try:\n stream = io.BytesIO(msg.payload)\n open_cv_image = np.array(Image.open(stream).convert('RGB'))\n # Convert RGB to BGR\n open_cv_image = open_cv_image[:, :, ::-1].copy()\n cv2.imwrite('./mqtt_com/' + IMG_NAME, open_cv_image)\n except Exception as e:\n print(\"Exception: \")\n print(e)\n solve_sudoku()\n send_solution(client)\n if msg.payload.decode() == \"End\":\n print(\"Okey! I'm disconnecting :)\")\n client.disconnect()\n\n\ndef send_message(client, topic, msg):\n client.publish(topic, msg)\n\n\ndef is_possible(y, x, n):\n global sudoku_grid\n for i in range(0, 9):\n if sudoku_grid[y][i] == n:\n return False\n for j in range(0, 9):\n if sudoku_grid[j][x] == n:\n return False\n x0 = (x//3)*3\n y0 = (y//3)*3\n for k in range(0, 3):\n for l in range(0, 3):\n if sudoku_grid[y0+k][x0+l] == n:\n return False\n return True\n\n\ndef solve_recursion():\n global sudoku_grid, counter, solutions, recur_cnt\n recur_cnt += 1\n if recur_cnt > 10**5:\n return\n for y in range(9):\n for x in range(9):\n if sudoku_grid[y][x] == 0:\n for n in range(1, 10):\n if is_possible(y, x, n):\n sudoku_grid[y][x] = n\n solve_recursion()\n sudoku_grid[y][x] = 0\n return\n counter += 1\n solutions.append(deepcopy(sudoku_grid))\n\n\ndef solve_sudoku():\n global sudoku_grid, counter, solutions\n model = utils.load_mnist_model()\n img = cv2.imread(\"./mqtt_com/\" + IMG_NAME)\n sudoku_grid = grid.recognize_grid(model, img)\n\n solve_recursion()\n print(\"Number or recurrent function invocations: {}\".format(recur_cnt))\n print(\"There are {} possible solutions\".format(counter))\n if len(solutions) > 0:\n print(\"Random solution:\")\n solved_grid = solutions[randint(0, counter - 1)]\n print(np.matrix(solved_grid))\n\n img_solved = grid.draw_solved_grid(model, img, solved_grid)\n cv2.imwrite(\"./results/\" + IMG_NAME, img_solved)\n # cv2.imshow(\"Solved sudoku\", img_solved)\n # cv2.waitKey(0)\n\n\ndef send_solution(client):\n global solutions, counter\n with open(\"./results/\" + IMG_NAME, \"rb\") as f:\n fileContent = f.read()\n byteArrayPhoto = bytearray(fileContent)\n client.publish(\"sudoku/solution/photo\", byteArrayPhoto)\n # client.publish(\"sudoku/solution/grid\", str(solutions[randint(0, counter - 1)]))\n\n\ndef main():\n client = mqtt.Client()\n client.connect(BROKER_ADRESS, 1883, 60)\n client.on_connect = on_connect\n client.on_message = on_message\n client.loop_forever()\n\n\nif __name__ == \"__main__\":\n main()\n\n" }, { "alpha_fraction": 0.6845169067382812, "alphanum_fraction": 0.7054714560508728, "avg_line_length": 25.030303955078125, "blob_id": "56d632fce3517a3dc9c21ba3b03b597b45612905", "content_id": "109562f87b2b83994e33dab4f8f1b8345459040a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 859, "license_type": "no_license", "max_line_length": 83, "num_lines": 33, "path": "/MNISTmodel/test_mnist.py", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "# import tensorflow as tf\nimport cv2\nimport sys\nsys.path.append(\"..\")\nimport utils\nimport numpy as np\n\nmodel_path = \"./models/train_mnist1_model3.h5\"\nimg_path = \"../img/seven.png\"\n# img_path = \"../img/one.png\"\n# img_path = \"../img/six.png\"\n\nmnist_model = utils.load_model(model_path)\n\n## Way 1\nprint(\"Way 1\")\ndigit_img = utils.standarize_digit_img_to_model_input(img_path, 28)\nbin_digit_img = utils.binarize_img(digit_img)\nimg = utils.prepare_to_predict(bin_digit_img)\n\ncv2.imshow(\"Digit\", digit_img)\ncv2.imshow(\"Binary digit\", bin_digit_img)\ncv2.waitKey(50)\n\nprob_predictions = mnist_model.predict(img)\nprediction = [(np.where(item == np.amax(item)))[0][0] for item in prob_predictions]\nprint(\"Prediction: {}\".format(prediction[0]))\n\n\n## Way 2\nprint(\"Way 2\")\nprediction = utils.predict_digit(mnist_model, img_path)\nprint(\"Prediction: {}\".format(prediction))\n" }, { "alpha_fraction": 0.5934712290763855, "alphanum_fraction": 0.6259124279022217, "avg_line_length": 43.83636474609375, "blob_id": "453ce429863387af16e50c0dae4c436bcd60ce65", "content_id": "7dd3352bdb14ba10a45c314b2b5c499765ac369e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4932, "license_type": "no_license", "max_line_length": 173, "num_lines": 110, "path": "/grid.py", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "import cv2\nfrom copy import deepcopy\nimport numpy as np\nimport utils\n\nRESCALE = 3\n\ndef find_cell_param(joints):\n # Set up the detector with default parameters.\n params = cv2.SimpleBlobDetector_Params()\n # filter by area\n params.filterByArea = True\n params.minArea = 1\n params.maxArea = 50\n detector = cv2.SimpleBlobDetector_create(params)\n # Detect blobs\n keypoints = detector.detect(~joints)\n sorted_keypoints = sorted(keypoints, key=lambda x: (x.pt[0], x.pt[1]))\n min_keypoint = sorted_keypoints[0]\n max_keypoint = sorted_keypoints[-1]\n # for it, keypoint in enumerate(keypoints):\n # img_contours = deepcopy(img)\n # Draw detected blobs as red circles.\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob\n # im_with_keypoints = cv2.drawKeypoints(img_contours, [min_keypoint, max_keypoint], np.array([]), (0, 0, 255),\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n # cv2.imshow(\"Keypoints\", im_with_keypoints)\n # cv2.waitKey(0)\n return (max_keypoint.pt[0] - min_keypoint.pt[0]) / 7, (max_keypoint.pt[1] - min_keypoint.pt[1]) / 7, min_keypoint.pt, max_keypoint.pt\n\n\ndef get_joints(img):\n img = cv2.resize(img, (int(img.shape[1]/RESCALE), int(img.shape[0]/RESCALE)))\n # retval = cv2.getPerspectiveTransform(img) TO DO https://blog.ayoungprogrammer.com/2013/03/tutorial-creating-multiple-choice.html/\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n bin_img = cv2.adaptiveThreshold(cv2.bitwise_not(img_gray), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)\n # cv2.imshow(\"Bin: \", bin_img)\n # cv2.waitKey(0)\n scale = 20\n horizontal_size = bin_img.shape[0] // scale\n horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))\n img_eroded_horizontal = cv2.erode(bin_img, horizontal_structure, anchor=(-1, -1))\n img_dilated_horizontal = cv2.erode(img_eroded_horizontal, horizontal_structure, anchor=(-1, -1))\n\n vertical_size = bin_img.shape[1] // scale\n vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))\n img_eroded_vertical = cv2.erode(bin_img, vertical_structure, anchor=(-1, -1))\n img_dilated_vertical = cv2.erode(img_eroded_vertical, vertical_structure, anchor=(-1, -1))\n\n # mask = img_dilated_vertical + img_dilated_horizontal\n joints = cv2.bitwise_and(img_dilated_horizontal, img_dilated_vertical)\n # cv2.imshow(\"joints: \", joints)\n # cv2.waitKey(0)\n return bin_img, joints\n\n\ndef recognize_grid(model, img):\n bin_img, joints = get_joints(img)\n cell_height, cell_width, min_pt, max_pt = find_cell_param(joints)\n grid = []\n for x in range(-1, 8):\n row = []\n for y in range(-1, 8):\n roi = bin_img[int(min_pt[1]+cell_width*x):int(min_pt[1]+cell_width*(x+1)),\n int(min_pt[0]+cell_height*y):int(min_pt[0]+cell_height*(y+1))]\n alpha = 0.1\n roi = roi[int(roi.shape[1]*alpha):int(roi.shape[1]*(1-alpha)), int(roi.shape[0]*alpha):int(roi.shape[0]*(1-alpha))]\n row.append(utils.predict_digit(model, roi))\n # cv2.imshow(\"ROI: \", roi)\n # cv2.waitKey(0)\n grid.append(row)\n return grid\n\n\ndef draw_solved_grid(model, img, solved_sudoku):\n solved_img = deepcopy(cv2.resize(img, (int(img.shape[1] / RESCALE), int(img.shape[0] / RESCALE))))\n bin_img, joints = get_joints(img)\n cell_height, cell_width, min_pt, max_pt = find_cell_param(joints)\n for x in range(-1, 8):\n for y in range(-1, 8):\n roi = bin_img[int(min_pt[1]+cell_width*x):int(min_pt[1]+cell_width*(x+1)),\n int(min_pt[0]+cell_height*y):int(min_pt[0]+cell_height*(y+1))]\n alpha = 0.1\n roi = roi[int(roi.shape[1]*alpha):int(roi.shape[1]*(1-alpha)), int(roi.shape[0]*alpha):int(roi.shape[0]*(1-alpha))]\n if utils.predict_digit(model, roi) == 0:\n pt = (int((min_pt[0] + cell_height * y + min_pt[0] + cell_height * (y + 1))/2) - 5, int((min_pt[1] + cell_width * x + min_pt[1] + cell_width * (x + 1))/2)+8)\n cv2.putText(solved_img, str(solved_sudoku[x+1][y+1]), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n return solved_img\n\n\ndef main():\n model = utils.load_mnist_model()\n img = cv2.imread(\"./SudokuOnline/puzzle1.jpg\")\n\n sudoku_grid = recognize_grid(model, img)\n print(np.matrix(sudoku_grid))\n\n img = cv2.resize(img, (int(img.shape[1]/RESCALE), int(img.shape[0]/RESCALE)))\n cv2.imshow(\"Img: \", img)\n # cv2.imshow(\"Gray: \", img_gray)\n # cv2.imshow(\"Bin: \", bin_img)\n # cv2.imshow(\"Dilated horizontal: \", img_dilated_horizontal)\n # cv2.imshow(\"Dilated vertical: \", img_dilated_vertical)\n # cv2.imshow(\"Joints: \", joints)\n # cv2.imshow(\"Mask: \", mask)\n cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6298342347145081, "alphanum_fraction": 0.6614049077033997, "avg_line_length": 27.133333206176758, "blob_id": "3d39079658c02fee0131a7d9b76921c66c4a89f8", "content_id": "5f9a6462f845a36ae4075f6a42b81baefd21ebed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 91, "num_lines": 45, "path": "/utils.py", "repo_name": "Maveric4/SudokuSolver", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport tensorflow\n\n\ndef standarize_digit_img_to_model_input(img, size):\n if isinstance(img, str):\n img = cv2.imread(img)\n img_resized = cv2.resize(img, (size, size))\n return img_resized\n\n\ndef binarize_img(img):\n gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blur = cv2.GaussianBlur(gray_img, (5, 5), 0)\n ret, th = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return cv2.bitwise_not(th)\n\n\ndef prepare_to_predict(img):\n return img.reshape(1, 28, 28, 1) / 255.0\n\n\ndef predict_digit(model, img):\n digit_img = standarize_digit_img_to_model_input(img, 28)\n if len(img.shape) == 3:\n bin_digit_img = binarize_img(digit_img)\n else:\n bin_digit_img = digit_img\n img = prepare_to_predict(bin_digit_img)\n prob_predictions = model.predict(img)\n if np.any(prob_predictions > 0.7):\n prediction = [(np.where(item == np.amax(item)))[0][0] for item in prob_predictions]\n return prediction[0]\n else:\n return 0\n\n\ndef load_model(model_path):\n return tensorflow.keras.models.load_model(model_path)\n\n\ndef load_mnist_model():\n model_path = \"./MNISTmodel/models/train_mnist1_model3.h5\"\n return tensorflow.keras.models.load_model(model_path)\n\n" } ]
11
bernarducs/mei
https://github.com/bernarducs/mei
d91c3069432682092ee87156da4be8bc832ada49
c21c8e72aa9d6666f82e62a1982ed5d8d6fe2b5f
439daa54575a54de3503db33c6a24b71a90507ab
refs/heads/master
2021-06-19T16:30:03.609662
2021-04-17T14:50:02
2021-04-17T14:50:02
216,574,022
0
0
null
2019-10-21T13:23:07
2021-04-17T14:50:16
2021-06-02T01:00:19
Python
[ { "alpha_fraction": 0.7317784428596497, "alphanum_fraction": 0.7332361340522766, "avg_line_length": 35.105262756347656, "blob_id": "d212018fe3ddf01dffaa7199c0ac5bc632a492ba", "content_id": "b4a45e8f9173fa19cbba6e6aefce9050bed82ded", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/init.py", "repo_name": "bernarducs/mei", "src_encoding": "UTF-8", "text": "from selenium import webdriver\n\n\ndef config(path_folder: str, headless: bool):\n fp = webdriver.FirefoxProfile()\n fp.set_preference(\"browser.download.folderList\", 2)\n fp.set_preference(\"browser.download.manager.showWhenStarting\", False)\n fp.set_preference(\"browser.download.dir\", path_folder)\n fp.set_preference(\"browser.helperApps.neverAsk.saveToDisk\", \"application/csv\")\n fp.set_preference(\"dom.disable_beforeunload\", True)\n fp.set_preference(\"browser.download.manager.closeWhenDone\", True)\n\n options = webdriver.FirefoxOptions()\n if headless:\n options.add_argument('-headless')\n\n driver = webdriver.Firefox(fp, options=options)\n\n return driver\n" }, { "alpha_fraction": 0.569753110408783, "alphanum_fraction": 0.5762345790863037, "avg_line_length": 37.343196868896484, "blob_id": "97e04bbe3d8ba7df18a81bab0ff053a6d21bbe0f", "content_id": "d7cefe08306775068291043c05ef6b4af49b5a79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6494, "license_type": "no_license", "max_line_length": 110, "num_lines": 169, "path": "/mei.py", "repo_name": "bernarducs/mei", "src_encoding": "UTF-8", "text": "import os\nimport time\n\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom helpers import print_timestamp\n\n\nclass Mei:\n def __init__(self, driver, files_path, uf):\n self.driver = driver\n self.files_path = os.path.join(os.getcwd(), files_path)\n # print(self.files_path)\n self.uf = uf\n\n def _retorna_xpath(self, driver, timeout, freq, xpath):\n wbw = WebDriverWait(driver=driver,\n timeout=timeout,\n poll_frequency=freq)\n wbw.until(presence_of_element_located((By.XPATH, xpath)),\n \"Elemento não encontrado.\")\n xpath = driver.find_element_by_xpath(xpath)\n return xpath\n\n def retorna_tabela(self, xpath_btn_consulta, xpath_tab_completa):\n time.sleep(2)\n print('Extraindo tabela.', print_timestamp())\n tentativas = [1, 2, 3]\n for i in tentativas:\n print(f\"Tentativa {i} de 3...\")\n self.driver.find_element_by_xpath(xpath_btn_consulta).click()\n try:\n self._retorna_xpath(self.driver, 150, 5, xpath_tab_completa)\n print('Tabela carregada.', print_timestamp())\n return True\n except TimeoutException:\n print('Tabela não foi carregada.')\n return False\n\n def del_arquivos_inuteis(self):\n files_path = self.files_path\n for file in os.listdir(files_path):\n if file[:13] == 'relatorio_mei':\n os.remove(os.path.join(files_path, file))\n\n def renomeia_arquivo(self):\n files_path = self.files_path\n uf = self.uf\n file = r'relatorio_mei.csv'\n if file in os.listdir(files_path):\n old_file = os.path.join(files_path, file)\n new_file = self.nome_arquivo(uf)\n new_file = os.path.join(files_path, new_file)\n try:\n os.rename(old_file, new_file)\n print(f\"Arquivo renomeado para {new_file} \" + print_timestamp())\n except FileExistsError:\n print(\"Arquivo já existe.\")\n\n def verifica_arquivo(self):\n files_path = self.files_path\n if not os.path.exists(files_path):\n os.mkdir(files_path)\n print(f\"Arquivos baixados ficarão na pasta {files_path}.\")\n uf = self.uf\n name = self.nome_arquivo(uf)\n if name in os.listdir(files_path):\n return name\n else:\n return False\n\n def nome_arquivo(self, uf):\n data = print_timestamp(now=False)\n return f\"{uf}_cnae_e_municipios_{data}.csv\"\n\n def exporta_csv(self):\n driver = self.driver\n xpath_btn_exportar = '//*[@id=\"form:botaoExportarCsv\"]'\n driver.find_element_by_xpath(xpath_btn_exportar).click()\n time.sleep(10)\n print('Download concluído.', print_timestamp())\n\n def abre_browser(self):\n url = 'http://www22.receita.fazenda.gov.br/inscricaomei/private/pages/relatorios/opcoesRelatorio.jsf#'\n xpath = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/p'\n\n while True:\n driver = self.driver\n try:\n driver.get(url)\n print('Browser iniciado. ' + print_timestamp())\n print('Extraindo ' + self.uf + '...')\n self._retorna_xpath(driver, 15, 5, xpath)\n break\n except TimeoutException as e:\n driver.quit()\n print(e)\n\n def carrega_pagina_relatorio(self, xpath_page):\n driver = self.driver\n page = driver.find_element_by_xpath(xpath_page)\n page.click()\n\n def uf_listbox(self, xpath_listbox):\n time.sleep(5)\n driver = self.driver\n uf = self.uf\n el = driver.find_element_by_xpath(xpath_listbox)\n for option in el.find_elements_by_tag_name('option'):\n if option.text == uf:\n option.click()\n break\n\n\nclass MeiCnaeMunicipio(Mei):\n xpath_page = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/ul/li[6]/a'\n xpath_listbox = '//*[@id=\"form:uf\"]'\n xpath_municipios = '//*[@id=\"form:listaMunicipiosUF\"]'\n xpath_relatorio = '//*[@id=\"form:listaMunicipiosRelatorio\"]'\n xpath_btn_inserir = '//*[@id=\"form:btnInserir\"]'\n xpath_btn_consulta = '//*[@id=\"form:botaoConsultar\"]'\n xpath_tab_completa = '//*[@id=\"form:j_id62\"]'\n\n def __init__(self, driver, files_path, uf):\n super().__init__(driver, files_path, uf)\n\n def verifica_listbox_municipios(self):\n driver = self.driver\n for tries in [1, 2, 3]:\n print(f\"Carregando municípios. Tentativa {tries}/3.\", print_timestamp())\n time.sleep(5)\n # verifica se a 1a listbox está preenchida\n cities = driver.find_element_by_xpath(self.xpath_municipios)\n n_cities = len(cities.text.split('\\n'))\n if n_cities > 1 or cities.text == 'BRASILIA':\n cities.find_elements_by_tag_name('option')[0].click()\n cities.send_keys(Keys.SHIFT, Keys.END)\n driver.find_element_by_xpath(self.xpath_btn_inserir).click()\n time.sleep(5)\n # verifica se a 2a listbox está preenchida\n rel = driver.find_element_by_xpath(self.xpath_relatorio)\n n_rel = len(rel.text.split('\\n'))\n if n_rel > 1 or rel.text == 'BRASILIA':\n print(\"Municipíos carregados.\")\n break\n # se nao atenderem as condições\n if n_cities <= 1 and tries == 3:\n print(\"Não foi possível carregar os municípios.\")\n return False\n return True\n\n\nclass MeiCnaeSexoUF(Mei):\n xpath_page = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/ul/li[7]/a'\n xpath_listbox = '//*[@id=\"form:uf\"]'\n xpath_municipios = '//*[@id=\"form:municipioUF\"]'\n xpath_btn_consulta = '//*[@id=\"form:botaoConsultar\"]'\n xpath_tab_completa = '//*[@id=\"form:botaoExportarCsv\"]'\n\n def __init__(self, driver, files_path, uf):\n super().__init__(driver, files_path, uf)\n\n def nome_arquivo(self, uf):\n data = print_timestamp(now=False)\n return f\"{uf}_cnae_e_sexo_{data}.csv\"\n" }, { "alpha_fraction": 0.734950602054596, "alphanum_fraction": 0.7385444641113281, "avg_line_length": 26.14634132385254, "blob_id": "dd83cdea421cdfb2b13a4856f53a04356c68f363", "content_id": "1435abd51d37bd450762ae18b50598a04fb8896f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1122, "license_type": "no_license", "max_line_length": 108, "num_lines": 41, "path": "/README.md", "repo_name": "bernarducs/mei", "src_encoding": "UTF-8", "text": "## Bot Microempreendedor Individual\nUm bot que raspa dados da base de MEIs da Receita Federal\n\n### Relatórios implementados\n\n* MEIs por Municípios e CNAE (um arquivo por UF)\n* MEIs por sexo e CNAE (um arquivo por UF)\n\n### Atenção\n\nRodando apenas com [Firefox browser](https://www.mozilla.org/en-US/firefox/new/)\n\n### Pré-requisitos\n\nInstale [geckodriver for Firefox](https://github.com/mozilla/geckodriver/releases)\n\nInstale [selenium](https://selenium-python.readthedocs.io/installation.html)\n\n### Exemplos\n\nExtraindo todas UFs por município e CNAE (cria e armazena numa subpasta \"arquivos\"):\n\n```console\npython bot.py ufs_por_municipio_cnae\n```\n\nUma UF específica:\n```console\npython bot.py uf_por_municipio_e_cnae --uf=\"PARÁ\"\n```\n###### *Dê uma olhada na [lista de ufs](https://github.com/bernarducs/mei/blob/master/lista%20de%20uf.txt)*\n\nTodas as UF por sexo e cnae, determinado uma pasta para donwload (windows):\n```console\npython bot.py uf_por_sexo_e_cnae --pasta=\"C\\:Documents\\bot_files\"\n```\n\nRodando o comando anterior sem o modo headless:\n```console\npython bot.py uf_por_sexo_e_cnae --headless=False\n``` " }, { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.5866666436195374, "avg_line_length": 33.411766052246094, "blob_id": "5b3944abfa3fd7ac96bae9ba2496ec9138990a14", "content_id": "34a04bb2d48e1933796ce01834184b29e3412cf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2937, "license_type": "no_license", "max_line_length": 77, "num_lines": 85, "path": "/bot.py", "repo_name": "bernarducs/mei", "src_encoding": "UTF-8", "text": "import os\nimport fire\n\nfrom selenium.common.exceptions import NoSuchElementException, \\\n WebDriverException, NoSuchWindowException\n\nfrom init import config\nfrom mei import MeiCnaeMunicipio, MeiCnaeSexoUF\nfrom helpers import retorna_ufs\n\n\ndef ufs_por_municipio_cnae(pasta=\"arquivos\", invisivel=True):\n ufs = retorna_ufs()\n for uf in ufs:\n uf_por_municipio_cnae(uf=uf, pasta=pasta, invisivel=invisivel)\n\n\ndef uf_por_municipio_cnae(uf=\"PERNAMBUCO\", pasta=\"arquivos\", invisivel=True):\n path_file = os.path.join(os.getcwd(), pasta)\n driver = config(path_file, headless=invisivel)\n mei = MeiCnaeMunicipio(driver, path_file, uf)\n file = mei.verifica_arquivo()\n if not file:\n mei.del_arquivos_inuteis()\n try:\n mei.abre_browser()\n mei.carrega_pagina_relatorio(mei.xpath_page)\n mei.uf_listbox(mei.xpath_listbox)\n checkbox = mei.verifica_listbox_municipios()\n if checkbox:\n table = mei.retorna_tabela(mei.xpath_btn_consulta,\n mei.xpath_tab_completa)\n if table:\n mei.exporta_csv()\n mei.renomeia_arquivo()\n else:\n print(f\"Não foi possível exportar o arquivo\")\n else:\n print(f\"Não foi possível exportar o arquivo.\")\n driver.quit()\n except (NoSuchElementException, WebDriverException,\n NoSuchWindowException) as e:\n print(e)\n driver.quit()\n print(\"Não foi possível exportar o arquivo.\")\n else:\n print(f\"O arquivo {file} já existe.\")\n\n\ndef ufs_por_sexo_cnae(pasta=\"arquivos\", invisivel=True):\n ufs = retorna_ufs()\n for uf in ufs:\n uf_por_sexo_cnae(uf=uf, pasta=pasta, invisivel=invisivel)\n\n\ndef uf_por_sexo_cnae(uf=\"PERNAMBUCO\", pasta=\"arquivos\", invisivel=True):\n path_file = os.path.join(os.getcwd(), pasta)\n driver = config(path_file, headless=invisivel)\n mei = MeiCnaeSexoUF(driver, path_file, uf)\n file = mei.verifica_arquivo()\n if not file:\n mei.del_arquivos_inuteis()\n try:\n mei.abre_browser()\n mei.carrega_pagina_relatorio(mei.xpath_page)\n mei.uf_listbox(mei.xpath_listbox)\n table = mei.retorna_tabela(mei.xpath_btn_consulta,\n mei.xpath_tab_completa)\n if table:\n mei.exporta_csv()\n mei.renomeia_arquivo()\n else:\n print(f\"Não foi possível exportar o arquivo\")\n driver.quit()\n except (NoSuchElementException, WebDriverException,\n NoSuchWindowException) as e:\n print(e)\n driver.quit()\n print(\"Não foi possível exportar o arquivo.\")\n else:\n print(f\"O arquivo {file} já existe.\")\n\n\nif __name__ == '__main__':\n fire.Fire()\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5844155550003052, "avg_line_length": 33.22222137451172, "blob_id": "3d2dd25a7232eedd5449170a11c45971735f37fe", "content_id": "9e17d320d4c6c48b139032742ef6946b1cd34c8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 103, "num_lines": 18, "path": "/helpers.py", "repo_name": "bernarducs/mei", "src_encoding": "UTF-8", "text": "import time\n\n\ndef print_timestamp(now=True):\n timestamp = time.localtime(time.time())\n if now:\n print_time = '{}/{}/{} {}:{}:{}'.format(timestamp.tm_mday, timestamp.tm_mon, timestamp.tm_year,\n timestamp.tm_hour, timestamp.tm_min, timestamp.tm_sec)\n return print_time\n print_time = '{:04d}{:02d}{:02d}'.format(timestamp.tm_year, timestamp.tm_mon, timestamp.tm_mday)\n return print_time\n\n\ndef retorna_ufs():\n with open('lista de uf.txt', 'r', encoding='latin-1') as f:\n file = f.readlines()\n ufs = [uf[:-1] for uf in file]\n return ufs\n" } ]
5
dleonard203/cs50_01
https://github.com/dleonard203/cs50_01
a02af689f8eb7703b0663b78c0015bcfb66fc5e1
877baa1847ca4f18f21b6f8233cfc3903dd52b19
20afa362c39ba8ab191d47eb74da440d74af273b
refs/heads/master
2020-05-01T13:16:35.229924
2019-04-03T02:06:52
2019-04-03T02:06:52
177,487,561
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5956392288208008, "alphanum_fraction": 0.5996035933494568, "avg_line_length": 26.885713577270508, "blob_id": "244e30ac78d8b52face215d5e8a39b6937451e79", "content_id": "9882778790cc91c2858905aff5724fd621984232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1009, "license_type": "no_license", "max_line_length": 100, "num_lines": 35, "path": "/templates/landing.html", "repo_name": "dleonard203/cs50_01", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\r\n\r\n\r\n{% block header %}\r\n<head>\r\n <link rel=\"stylesheet\" href=\"{{ url_for('static', filename='stylesheets/landing.css') }}\">\r\n \r\n</head>\r\n\r\n<h1 class=\"welcome\">Welcome to Bookworm! Please log in below or register</h1>\r\n{% endblock %}\r\n\r\n{% block body %}\r\n<div class=\"container error\">\r\n <p>{{ msg }}</p>\r\n</div>\r\n\r\n<form method=\"post\">\r\n <div class=\"form-group container login\">\r\n <label for=\"username\">Username</label>\r\n <input type=\"text\" placeholder=\"Username\" class=\"form-control\" maxlength=12 name=\"username\">\r\n <label for=\"password\">Password</label>\r\n <input type=\"password\" placeholder=\"Password\" class=\"form-control\" name=\"password\">\r\n <input type=\"submit\", class=\"btn btn-primary\" action=\"/welcome\"></button>\r\n </div>\r\n</form>\r\n\r\n<div class=\"register container\">\r\n <p>No account yet? Sign up for a FREE account below!</p>\r\n <a href=\"{{ url_for('register') }}\">Register</a>\r\n</div>\r\n\r\n\r\n\r\n{% endblock %}" }, { "alpha_fraction": 0.5546059012413025, "alphanum_fraction": 0.6220322847366333, "avg_line_length": 30.96875, "blob_id": "ed3685aef04f8d9f321ae3ea9de10b58b6b2227f", "content_id": "7fb453be10393cb082c9229d8734498d481d081f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 170, "num_lines": 32, "path": "/import.py", "repo_name": "dleonard203/cs50_01", "src_encoding": "UTF-8", "text": "from sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import scoped_session, sessionmaker\r\nimport csv\r\n\r\ndatabase_url = \"postgres://ioovleludzhxle:a966a7d36f9e61edd437415d538afd38b89ab723d71177647d3766c32e0b2106@ec2-54-221-243-211.compute-1.amazonaws.com:5432/d3a4ekarkutr2s\"\r\nengine = create_engine(database_url)\r\ndb = scoped_session(sessionmaker(bind=engine))\r\n\r\ndef import_books():\r\n csv_file = open('books.csv', 'r')\r\n rows = csv.reader(csv_file, delimiter=',')\r\n cur_row = 0\r\n db.execute(\"\"\"CREATE TABLE books (\r\n id SERIAL PRIMARY KEY,\r\n isbn varchar NOT NULL,\r\n title varchar NOT NULL,\r\n author varchar NOT NULL,\r\n year INTEGER NOT NULL)\"\"\")\r\n for row in rows:\r\n if cur_row != 0:\r\n db.execute(\"INSERT INTO books (isbn, title, author, year) VALUES (:isbn, :title, :author, :year)\",\r\n {\"isbn\": row[0], \"title\": row[1], \"author\": row[2], \"year\": int(row[3])})\r\n cur_row += 1\r\n db.commit()\r\n \r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n import_books()" }, { "alpha_fraction": 0.6287758350372314, "alphanum_fraction": 0.6387122273445129, "avg_line_length": 32.40265655517578, "blob_id": "3a932d21e575756e412f039b3a7931b0a2f08edf", "content_id": "9d3e7e3eabc312300129728c378950b97925fcf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7548, "license_type": "no_license", "max_line_length": 221, "num_lines": 226, "path": "/application.py", "repo_name": "dleonard203/cs50_01", "src_encoding": "UTF-8", "text": "import os\n\nfrom flask import Flask, session, render_template, request, url_for, redirect, jsonify\nfrom flask_session import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nimport requests\nimport datetime\nimport json\n#res=requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"e3j4VgqHagE14fcn1XjkXg\",\"isbns\": \"9781632168146\" })\n\n#database_url = \"postgres://ioovleludzhxle:a966a7d36f9e61edd437415d538afd38b89ab723d71177647d3766c32e0b2106@ec2-54-221-243-211.compute-1.amazonaws.com:5432/d3a4ekarkutr2s\"\n\napp = Flask(__name__)\n\n# Check for environment variable\n# if not os.getenv(\"DATABASE_URL\"):\n# raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\n#engine = create_engine(database_url)\ndb = scoped_session(sessionmaker(bind=engine))\n\ndef assert_login():\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n if not is_logged_in():\n session['messages'] = \"Please login to view that page\"\n return redirect(url_for('index')) \n else:\n return f(*args, **kwargs)\n wrapped_f.__name__ = f.__name__\n return wrapped_f\n return wrap \n\n\[email protected](\"/\", methods = [\"GET\", \"POST\"])\ndef index(msg=''):\n if 'messages' in session:\n error_message = session['messages']\n session.pop('messages')\n return render_template('landing.html', msg=error_message)\n if msg != '':\n return render_template(\"landing.html\", msg=msg)\n if request.method == \"POST\":\n return try_login(request.form)\n else:\n return render_template(\"landing.html\")\n \n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"POST\":\n return account_creation_handler(request.form)\n return render_template(\"register.html\")\n\[email protected](\"/success\")\ndef success(username):\n return render_template(\"success.html\", username=username)\n\[email protected](\"/logout\")\n@assert_login()\ndef logout():\n name = session['username']\n session.pop('username')\n return render_template('goodbye.html', name=name)\n\n\[email protected]('/welcome', methods=[\"GET\"])\n@assert_login()\ndef welcome():\n return render_template('welcome.html', username=session['username'])\n\n\ndef search(req):\n title = req.form['title'].upper()\n isbn = req.form['isbn'].upper()\n author = req.form['author'].upper()\n books = all_books()\n matches = []\n for book in books:\n if book[1].upper().find(isbn) > -1 and book[2].upper().find(title) > -1 and book[3].upper().find(author) > -1:\n matches.append(book)\n return matches\n\n\n \n\[email protected]('/results', methods=[\"POST\", \"GET\"])\n@assert_login()\ndef results():\n books = search(request)\n if len(books) == 0:\n return render_template('results.html', msg = 'Sorry, no books meeting that criteria are available')\n else:\n return render_template('results.html', books = books)\n \n\[email protected]('/book/<string:isbn>', methods=[\"POST\", \"GET\"])\n@assert_login()\ndef display_results(isbn):\n book = get_book_by_isbn(isbn)\n reviews = get_reviews_by_isbn(isbn)\n goodreads = goodreads_res(isbn)\n if goodreads.status_code == 200:\n content = json.loads(goodreads.content)\n rating = content['books'][0]['average_rating']\n review_count = content['books'][0]['reviews_count']\n else:\n rating = 'N/A'\n review_count = 'N/A'\n if request.method == \"GET\":\n return render_template('book_details.html', book = book, reviews=reviews, rating=rating, review_count=review_count)\n else:\n username = session['username']\n if user_reviewed(username, isbn):\n msg = 'Sorry, you have already reviewed this book'\n else:\n update_reviews(username, isbn, request.form['content'], request.form['rating'])\n msg = 'Thanks for your review, ' + username\n reviews = get_reviews_by_isbn(isbn)\n return render_template('book_details.html', book = book, reviews=reviews, msg=msg, rating=rating, review_count=review_count)\n\n\[email protected]('/api/<string:isbn>')\ndef goodreads_api(isbn):\n res = goodreads_res(isbn)\n if res.status_code == 200:\n api_content = json.loads(res.content)\n my_book = get_book_by_isbn(isbn)\n if my_book:\n return_dict = {'title': my_book[2], 'author': my_book[3], 'year': my_book[4], 'isbn': isbn, 'review_count': api_content['books'][0]['reviews_count'], 'average_score': api_content['books'][0]['average_rating']}\n return jsonify(return_dict)\n else:\n return not_found(isbn)\n\[email protected]('/not_found')\ndef not_found(isbn):\n return render_template('not_found.html', isbn=isbn), 404\n\n\ndef goodreads_res(isbn):\n return requests.get(\"https://www.goodreads.com/book/review_counts.json\", params={\"key\": \"e3j4VgqHagE14fcn1XjkXg\",\"isbns\": isbn})\n\ndef get_book_by_isbn(isbn):\n book = db.execute('SELECT * FROM books where isbn = :isbn', {'isbn': isbn}).first()\n return list(book)\n\ndef all_books():\n return db.execute('SELECT * FROM books').fetchall()\n\ndef update_reviews(username, isbn, review, rating):\n db.execute(\"INSERT INTO reviews (isbn, username, date, content, rating) VALUES (:isbn, :username, :date, :content, :rating)\",\n {'isbn': isbn, 'username': username, 'date': pretty_date(), 'content': review, 'rating': rating})\n db.commit()\n\ndef get_reviews_by_isbn(isbn):\n res = db.execute('SELECT * FROM reviews WHERE isbn = :isbn', {'isbn': isbn}).fetchall()\n cache = []\n for rev in res:\n cache.append(rev)\n return cache\n\ndef user_reviewed(username, isbn):\n res = db.execute(\"SELECT * FROM reviews where username = :username and isbn = :isbn\", {\"username\": username, \"isbn\": isbn}).first()\n if res:\n return True\n return False\n\ndef pretty_date():\n res = str(datetime.datetime.now())[:10]\n final = res[5:7] + '/' + res[8:10] + '/' + res[0:4]\n return final\n\n\ndef is_logged_in():\n if 'username' not in session:\n return False \n else:\n return True\n\n\ndef try_login(form):\n username = form.get(\"username\")\n password = form.get(\"password\")\n db_entry = db.execute(\"SELECT username, password from users where username = :username\", {\"username\": username}).first()\n if db_entry is None:\n return index(msg = 'No user \\'' + username + '\\' found')\n elif db_entry[1] != password:\n return index(msg = 'Incorrect password')\n else:\n session['username'] = username\n return welcome()\n\n\n\n\n \n\ndef account_creation_handler(form):\n username = form.get(\"username\")\n password = form.get(\"password\")\n email = form.get(\"email\")\n if username_taken(username):\n return render_template(\"register.html\", err_msg = \"Sorry, but the username \" + username + \" is already in use. Please pick another one.\")\n else:\n create_account(username, password, email)\n return success(username)\n\ndef username_taken(username):\n sqla_res = db.execute(\"select count(*) from users where username = :username\", {\"username\": username})\n res = sqla_res.first()[0]\n if res == 0:\n return False\n return True\n\ndef create_account(username, password, email):\n sql = \"INSERT INTO users (username, password, email) VALUES (:username, :password, :email)\"\n db.execute(sql, {\"username\": username, \"password\": password, \"email\": email})\n db.commit()" }, { "alpha_fraction": 0.7585470080375671, "alphanum_fraction": 0.7756410241127014, "avg_line_length": 51, "blob_id": "bef877dd866b4018763fd12ca03d4120000bce2f", "content_id": "919e6535133d0b20748ddd1330c9016102e4a7a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 936, "license_type": "no_license", "max_line_length": 155, "num_lines": 18, "path": "/README.md", "repo_name": "dleonard203/cs50_01", "src_encoding": "UTF-8", "text": "# Welcome to Bookworm!\n\nThis is my CS50 Project 1 website, Bookworm. It was made with Flask, Jinja2, and CSS. Users are able to log in, read reviews,\nsubmit reviews, and call the goodreads API through this website. On windows, if you run `$Env:FLASK_APP=\"application.py` while in the directory of cs50_01,\nrunning `flask run` will start the web server on localhost port 5000. In a browser, you can then go to `http://localhost:500` to get\nto the landing page. To start leaving reviews, register for an account and sign in!\n\n## Files\n\napplication.py is where the flask app lives. All routing, as well as some helper functions, live here.\n\nimport.py is how books.csv gets inserted into the 'books' PostgreSQL table that supports this project.\n\n/templates folder contains all Jinja2 templates that are used throughout the app.\n\n/static/css contains the two CSS files used for styling the website\n\n/static/images contains the Bookworm logo\n" } ]
4
DamienPond001/Udemy_API
https://github.com/DamienPond001/Udemy_API
b33626df9be625c4b0bce217fee6b779b4382315
497dac931f81ab9ba845c3f0a969acc8ca1997d5
da8213535a1d099a1c39e5fb3334d54286945891
refs/heads/master
2020-03-24T00:27:25.616840
2019-01-16T14:27:21
2019-01-16T14:27:21
142,292,232
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5344768166542053, "alphanum_fraction": 0.5416809320449829, "avg_line_length": 30.354839324951172, "blob_id": "4c648eecce1de05ff8fa49a6b27212e487e0f52b", "content_id": "15788e5d35b52b9a5e835f1ddc369a553d773fbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2915, "license_type": "no_license", "max_line_length": 79, "num_lines": 93, "path": "/API/Section9/1_recap_of_code/start/resources/user.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, reqparse\nfrom werkzeug.security import safe_str_cmp\nfrom flask_jwt_extended import (\n create_access_token,\n create_refresh_token,\n jwt_refresh_token_required,\n get_jwt_identity\n )\nfrom models.user import UserModel\n\n\nclass UserRegister(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username',\n type=str,\n required=True,\n help=\"This field cannot be blank.\"\n )\n parser.add_argument('password',\n type=str,\n required=True,\n help=\"This field cannot be blank.\"\n )\n\n def post(self):\n data = UserRegister.parser.parse_args()\n\n if UserModel.find_by_username(data['username']):\n return {\"message\": \"A user with that username already exists\"}, 400\n\n user = UserModel(**data)\n user.save_to_db()\n\n return {\"message\": \"User created successfully.\"}, 201\n\nclass User(Resource):\n @classmethod\n def get(cls, user_id):\n user = UserModel.find_by_id(user_id)\n \n if user is None:\n return {'message' :'user not found'}, 404\n else:\n return user.json()\n \n \n @classmethod\n def delete(cls, user_id):\n user = UserModel.find_by_id(user_id)\n \n if user is None:\n return {'message' : 'User not found'}, 404\n else:\n user.delete_from_db()\n return {'message' : 'User deleted'}\n\nclass UserLogin(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('username',\n type=str,\n required=True,\n help=\"This field cannot be blank.\"\n )\n parser.add_argument('password',\n type=str,\n required=True,\n help=\"This field cannot be blank.\"\n )\n \n def post(self):\n data = self.parser.parse_args()\n \n user = UserModel.find_by_username(data['username'])\n \n #This is what 'authenticate()' used to do\n if user is not None and safe_str_cmp(user.password, data['password']):\n #What the 'identity()' function used to do\n access_token = create_access_token(identity = user.id, fresh = True)\n refresh_token = create_refresh_token(user.id)\n \n return {\n 'access_token' : access_token,\n 'refresh_token' : refresh_token\n }, 200\n else:\n return {'message' : 'Invalid credentials'}, 401\n\nclass TokenRefresh(Resource):\n @jwt_refresh_token_required\n def post(self):\n current_user = get_jwt_identity()\n new_token = create_access_token(identity = current_user, fresh = False)\n return {'access_token' : new_token}, 200" }, { "alpha_fraction": 0.5194686055183411, "alphanum_fraction": 0.6010077595710754, "avg_line_length": 24.69411849975586, "blob_id": "613be0dc5bcd198ce946d7c22769254e869c7fb5", "content_id": "3ade3420bed9cdd03ec57a87cf4149fbfe81ad4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2183, "license_type": "no_license", "max_line_length": 76, "num_lines": 85, "path": "/Datacamp/manipulating_indices.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import pandas\nimport pandas as pd\n\n# Read 'monthly_max_temp.csv' into a DataFrame: weather1\nweather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month')\n\n# Print the head of weather1\nprint(weather1.head())\n\n# Sort the index of weather1 in alphabetical order: weather2\nweather2 = weather1.sort_index()\n\n# Print the head of weather2\nprint(weather2.head())\n\n# Sort the index of weather1 in reverse alphabetical order: weather3\nweather3 = weather1.sort_index(ascending=False)\n\n# Print the head of weather3\nprint(weather3.head())\n\n# Sort weather1 numerically using the values of 'Max TemperatureF': weather4\nweather4 = weather1.sort_values('Max TemperatureF')\n\n# Print the head of weather4\nprint(weather4.head())\n\n# Import pandas\nimport pandas as pd\n\n# Reindex weather1 using the list year: weather2\nweather2 = weather1.reindex(year)\n\n# Print weather2\nprint(weather2)\n\n# Reindex weather1 using the list year with forward-fill: weather3\nweather3 = weather1.reindex(year).ffill()\n\n# Print weather3\nprint(weather3)\n\n Mean TemperatureF\n Month \n Jan 32.133333\n Feb NaN\n Mar NaN\n Apr 61.956044\n May NaN\n Jun NaN\n Jul 68.934783\n Aug NaN\n Sep NaN\n Oct 43.434783\n Nov NaN\n Dec NaN\n Mean TemperatureF\n Month \n Jan 32.133333\n Feb 32.133333\n Mar 32.133333\n Apr 61.956044\n May 61.956044\n Jun 61.956044\n Jul 68.934783\n Aug 68.934783\n Sep 68.934783\n Oct 43.434783\n Nov 43.434783\n Dec 43.434783\n \n # Import pandas\nimport pandas as pd\n\n# Reindex names_1981 with index of names_1881: common_names\ncommon_names = names_1981.reindex(names_1881.index)\n\n# Print shape of common_names\nprint(common_names.shape)\n\n# Drop rows with null counts: common_names\ncommon_names = common_names.dropna()\n\n# Print shape of new common_names\nprint(common_names.shape)" }, { "alpha_fraction": 0.6283987760543823, "alphanum_fraction": 0.6642749309539795, "avg_line_length": 26.59375, "blob_id": "b7ef10bdae36ca9100f96732184bfc9106c20b89", "content_id": "d35e096d5bea07b0147174b1be4f574d88ff4fe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2648, "license_type": "no_license", "max_line_length": 93, "num_lines": 96, "path": "/Datacamp/grouby.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Group titanic by 'pclass'\nby_class = titanic.groupby('pclass')\n\n# Aggregate 'survived' column of by_class by count\ncount_by_class = by_class['survived'].count()\n\n# Print count_by_class\nprint(count_by_class)\n\n# Group titanic by 'embarked' and 'pclass'\nby_mult = titanic.groupby(['embarked', 'pclass'])\n\n# Aggregate 'survived' column of by_mult by count\ncount_mult = by_mult['survived'].count()\n\n# Print count_mult\nprint(count_mult)\n\n# Read life_fname into a DataFrame: life\nlife = pd.read_csv(life_fname, index_col='Country')\n\n# Read regions_fname into a DataFrame: regions\nregions = pd.read_csv(regions_fname, index_col='Country')\n\n# Group life by regions['region']: life_by_region. This is doable because of the same indexes\nlife_by_region = life.groupby(regions.region)\n\n# Print the mean over the '2010' column of life_by_region\nprint(life_by_region['2010'].mean())\n\n\n# Group titanic by 'pclass': by_class\nby_class = titanic.groupby('pclass')\n\n# Select 'age' and 'fare'\nby_class_sub = by_class[['age','fare']]\n\n# Aggregate by_class_sub by 'max' and 'median': aggregated\naggregated = by_class_sub.agg(['max', 'median'])\n\n age fare \n max median max median\npclass \n1 80.0 39.0 512.3292 60.0000\n2 70.0 29.0 73.5000 15.0458\n3 74.0 24.0 69.5500 8.0500\n\n# Print the maximum age in each class\nprint(aggregated.loc[:, ('age','max')])\n pclass\n 1 80.0\n 2 70.0\n 3 74.0\n Name: (age, max), dtype: float64\n\n# Print the median fare in each class\nprint(aggregated.loc[:, ('fare', 'median')])\n pclass\n 1 80.0\n 2 70.0\n 3 74.0\n Name: (age, max), dtype: float64\n\n\n\n# Read the CSV file into a DataFrame and sort the index: gapminder\ngapminder = pd.read_csv('gapminder.csv', index_col=['Year','region','Country']).sort_index()\n\n# Group gapminder by 'Year' and 'region': by_year_region\nby_year_region = gapminder.groupby(level = ['Year', 'region'])\n\n# Define the function to compute spread: spread\ndef spread(series):\n return series.max() - series.min()\n\n# Create the dictionary: aggregator\naggregator = {'population':'sum', 'child_mortality':'mean', 'gdp':spread}\n\n# Aggregate by_year_region using the dictionary: aggregated\naggregated = by_year_region.agg(aggregator)\n\n# Print the last 6 entries of aggregated \nprint(aggregated.tail(6))\n\n\n# Read file: sales\nsales = pd.read_csv('sales.csv', index_col='Date', parse_dates=True)\n\n# Create a groupby object: by_day\nby_day = sales.groupby(sales.index.strftime('%a'))\n\n# Create sum: units_sum\nunits_sum = by_day['Units'].sum()\n\n# Print units_sum\nprint(units_sum)" }, { "alpha_fraction": 0.7476038336753845, "alphanum_fraction": 0.7518636584281921, "avg_line_length": 31.413793563842773, "blob_id": "a71f4e82b2a7b68c32ab520867cb6f4202910546", "content_id": "9de7b6aeb5ebf3f32f65437b6f5317614329f67d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 939, "license_type": "no_license", "max_line_length": 108, "num_lines": 29, "path": "/Datacamp/bokeh_tooltips.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Add the first circle glyph to the figure p\np.circle('fertility', 'female_literacy', source=latin_america, size=10, color='red', legend='Latin America')\n\n# Add the second circle glyph to the figure p\np.circle('fertility', 'female_literacy', source=africa, size=10, color='blue', legend='Africa')\n\n# Assign the legend to the bottom left: p.legend.location\np.legend.location = 'bottom_left'\n\n# Fill the legend background with the color 'lightgray': p.legend.background_fill_color\np.legend.background_fill_color='lightgray'\n\n# Specify the name of the output_file and show the result\noutput_file('fert_lit_groups.html')\nshow(p)\n\n\n# Import HoverTool from bokeh.models\nfrom bokeh.models import HoverTool\n\n# Create a HoverTool object: hover\nhover = HoverTool(tooltips=[('Country','@Country')])\n\n# Add the HoverTool object to figure p\np.add_tools(hover)\n\n# Specify the name of the output_file and show the result\noutput_file('hover.html')\nshow(p)" }, { "alpha_fraction": 0.7239999771118164, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 21.244443893432617, "blob_id": "39d0199060cb023340f179c620cfeb6874c9b527", "content_id": "8bbe6ce1a2045f7a034d14b19b623f996d1ef67c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "no_license", "max_line_length": 60, "num_lines": 45, "path": "/Datacamp/Working_with_DB.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import necessary module\nfrom sqlalchemy import create_engine\nimport pandas as pd\n\n# Create engine: engine\nengine = create_engine('sqlite:///Chinook.sqlite')\n\n# Save the table names to a list: table_names\ntable_names = engine.table_names()\n\n# Print the table names to the shell\nprint(table_names)\n\n\n#Executing a query\ncon = engine.connect()\n\n# Perform query: rs\nrs = con.execute(\"SELECT * from Album\")\n\n# Save results of the query to DataFrame: df\ndf = pd.DataFrame(rs.fetchall())\ndf.columns = rs.keys()\n\n\n# Close connection\ncon.close()\n\n#auto close connection\nwith engine.connect() as con:\n rs = con.execute(\"SELECT LastName, Title FROM Employee\")\n df = pd.DataFrame(rs.fetchmany(3))\n df.columns = rs.keys()\n \n \n#ALTERNATIVELY\n# Import packages\nfrom sqlalchemy import create_engine\nimport pandas as pd\n\n# Create engine: engine\nengine = create_engine('sqlite:///Chinook.sqlite')\n\n# Execute query and store records in DataFrame: df\ndf = pd.read_sql_query('SELECT * FROM Album', engine)" }, { "alpha_fraction": 0.6812688708305359, "alphanum_fraction": 0.708459198474884, "avg_line_length": 17.41666603088379, "blob_id": "81fd4649ff92dc63fefd1522aa572f7e6e6d7070", "content_id": "712ec39f948ff8577a6554ade37cc6e7d1192e27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "no_license", "max_line_length": 74, "num_lines": 36, "path": "/Datacamp/data_explore.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "import pandas as pd\n\ndf = pd.read_csv('....')\n\ndf.head()\ndf.tail()\ndf.columns\ndf.shape\n\n#Display summary stats of numeric columns\ndf.describe()\n\n\n#Display frequencies of categorical columns\ndf['Borough'].value_counts(dropna=False)\n\n#display means and counts of columns\ndf[['col1', 'col2']].count()\ndf[['col1', 'col2']].mean()\n\ndf['2015'].quantile([0.05, 0.95])\n\n# Import matplotlib.pyplot\nimport matplotlib.pyplot as plt\n\n# Plot the histogram\ndf['Existing Zoning Sqft'].plot(kind='hist', rot=70, logx=True, logy=True)\n\n# Display the histogram\nplt.show()\n\n# Create the boxplot\ndf.boxplot(column='initial_cost', by='Borough', rot=90)\n\n# Display the plot\nplt.show()" }, { "alpha_fraction": 0.6137512922286987, "alphanum_fraction": 0.7199190855026245, "avg_line_length": 34.35714340209961, "blob_id": "521fa2215723a32189eccc03dfaa02a23723f4cc", "content_id": "0bfc2fee9978b0a18c9ad5e043dd74f66308e2db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "no_license", "max_line_length": 81, "num_lines": 28, "path": "/Datacamp/datetime_indices.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#To read in\ndf = pd.read_csv('data.csv', parse_dates=True, index_col='Date)\n\n# Prepare a format string: time_format\ntime_format = '%Y-%m-%d %H:%M'\n\n# Convert date_list into a datetime object: my_datetimes\nmy_datetimes = pd.to_datetime(date_list, format=time_format) \n\n# Construct a pandas Series using temperature_list and my_datetimes: time_series\ntime_series = pd.Series(temperature_list, index=my_datetimes)\n\n# Extract the hour from 9pm to 10pm on '2010-10-11': ts1\nts1 = ts0.loc['2010-10-11 21:00:00':'2010-10-11 22:00:00']\n\n# Extract '2010-07-04' from ts0: ts2\nts2 = ts0.loc['2010-07-04']\n\n# Extract data from '2010-12-15' to '2010-12-31': ts3\nts3 = ts0.loc['2010-12-15':'2010-12-31']\n\n#Sometimes we may wnat to reindex a df using the timeseries index of another df. \n#python fills in non-matching indices with Nan values\n# Reindex without fill method: ts3\nts3 = ts2.reindex(ts1.index)\n\n# Reindex with fill method, using forward fill: ts4\nts4 = ts2.reindex(ts1.index, method='ffill')" }, { "alpha_fraction": 0.772857129573822, "alphanum_fraction": 0.7764285802841187, "avg_line_length": 40.20588302612305, "blob_id": "2d46f7ffbf4bc13388ef0ec3a6ef1ed0a193b71d", "content_id": "2d070355b5737c0c07063ae49679ed8a1cf1bc8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 128, "num_lines": 34, "path": "/Datacamp/sqlalchemy.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import create_engine\nfrom sqlalchemy import create_engine\n\n# Create an engine that connects to the census.sqlite file: engine\nengine = create_engine('sqlite:///census.sqlite')# Create an engine to the census database\nengine = create_engine('mysql+pymysql://'+'student:datacamp'+'@courses.csrrinzqubik.us-east-1.rds.amazonaws.com:3306/'+'census')\n\n# Print table names\nprint(engine.table_names())\n\n#Reflection is the process of reading the database and building the metadata \n#based on that information. It's the opposite of creating a Table by hand and \n#is very useful for working with existing databases. To perform reflection, you need to import \n#the Table object from the SQLAlchemy package. Then, you use this Table object to read \n#your table from the engine and autoload the columns. Using the Table object in this manner \n#is a lot like passing arguments to a function. For example, to autoload the columns with the engine, \n#you have to specify the keyword arguments autoload=True and autoload_with=engine to Table().\n\n# Import Table\nfrom sqlalchemy import Table, MetaData\n\nmetadata = MetaData()\n\n# Reflect census table from the engine: census\ncensus = Table('census', metadata, autoload=True, autoload_with=engine)\n\n# Print the column names\nprint(census.columns.keys())\n\n# Print full table metadata\nprint(repr(metadata.tables['census']))\n\n# Print census table metadata\nprint(repr(census))" }, { "alpha_fraction": 0.6997371912002563, "alphanum_fraction": 0.7049934267997742, "avg_line_length": 28.843137741088867, "blob_id": "cbfd4c4227d5cffa36807cbd523d08731e4d42e7", "content_id": "10ed452c401b5671eee152789496dd99e8ad6107", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1522, "license_type": "no_license", "max_line_length": 85, "num_lines": 51, "path": "/Datacamp/bokeh_interaction.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Create a figure with the \"box_select\" tool: p\np = figure(x_axis_label='Year', y_axis_label='Time', tools='box_select')\n\n# Add circle glyphs to the figure p with the selected and non-selected properties\np.circle('Year', 'Time', source=source,selection_color='red', nonselection_alpha=0.1)\n\n# Specify the name of the output file and show the result\noutput_file('selection_glyph.html')\nshow(p)\n\n\n\n# import the HoverTool\nfrom bokeh.models import HoverTool\n\n# Add circle glyphs to figure p\np.circle(x, y, size=10,\n fill_color='grey', alpha=0.1, line_color=None,\n hover_fill_color='firebrick', hover_alpha=0.5,\n hover_line_color='white')\n\n# Create a HoverTool: hover\nhover = HoverTool(tooltips=None, mode='vline')\n\n# Add the hover tool to the figure p\np.add_tools(hover)\n\n# Specify the name of the output file and show the result\noutput_file('hover_glyph.html')\nshow(p)\n\n\n\n#Import CategoricalColorMapper from bokeh.models\nfrom bokeh.models import CategoricalColorMapper\n\n# Convert df to a ColumnDataSource: source\nsource = ColumnDataSource(df)\n\n# Make a CategoricalColorMapper object: color_mapper\ncolor_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'],\n palette=['red', 'green', 'blue'])\n\n# Add a circle glyph to the figure p\np.circle('weight', 'mpg', source=source,\n color=dict(field='origin', transform=color_mapper),\n legend='origin')\n\n# Specify the name of the output file and show the result\noutput_file('colormap.html')\nshow(p)\n" }, { "alpha_fraction": 0.843137264251709, "alphanum_fraction": 0.843137264251709, "avg_line_length": 9.399999618530273, "blob_id": "3ce0465247e58878b7752b6ca8a0d1eff4fad9f2", "content_id": "6458660e5f173837bde5fd9e9926d94c1ac071e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 51, "license_type": "no_license", "max_line_length": 16, "num_lines": 5, "path": "/API/Section9/1_recap_of_code/start/requirements.txt", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "Flask-JWT\n-Extended\nFlask-RESTful\n\nFlask-SQLAlchemy" }, { "alpha_fraction": 0.6530920267105103, "alphanum_fraction": 0.7028657793998718, "avg_line_length": 32.20000076293945, "blob_id": "d894ff4a04fbdc4faed501cdfff05b9199de5162", "content_id": "d11c54af660d7bb05f1425f2e8ab7fb1639cfa88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 663, "license_type": "no_license", "max_line_length": 129, "num_lines": 20, "path": "/API/app.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 15:04:52 2018\n\n@author: Damien\n\"\"\"\n\nfrom flask import Flask\n\napp = Flask(__name__) #unique __name__ - special python variable\n\n#What requests we need to understand\[email protected]('/') #http://www.google.com/ - '/' represents the home page [http://www.google.com/maps represents a '/maps' endpoint]\ndef home(): #whatever this does it must return a reponse to the browser\n return \"Hello, world!\"\n\napp.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests\n\n#run from conda \"python app.py\"\n#copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer)" }, { "alpha_fraction": 0.6379726529121399, "alphanum_fraction": 0.6481630206108093, "avg_line_length": 42.372093200683594, "blob_id": "1e5c95b9c12a6173702d8127b3907b6063c1c8e5", "content_id": "89853327f468d54b71fd24070e1fa701d87dc66c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3729, "license_type": "no_license", "max_line_length": 154, "num_lines": 86, "path": "/API/Section5/code/app.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "'''This was created after installing virtualenv. This allows use to create a virtual environment that mimics \na fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.\n\nRun: conda create -n venv python=3.5.0 anaconda\nto create a virtual env called venv with python 3.5.0\n\nconda activate venv\nconda deactivate'''\n\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt import JWT, jwt_required\n\nfrom security import authenticate, identity\n\napp = Flask(__name__)\napp.secret_key = \"secret_key\" #this should be long and complicated in a production sense\napi = Api(app)\n\njwt = JWT(app, authenticate, identity) \n'''\nJWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity\nIf authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT\nThe JWT calls the identity function which gets the correct id and returns the user\n'''\n\nitems = []\n\nclass Item(Resource):\n parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class\n parser.add_argument('price', \n type = float, \n required = True,\n help = \"This field cannot be left blank\") \n \n \n @jwt_required()\n def get(self, name):\n item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None\n return {\"item\" : item}, 200 if item is not None else 404\n \n def post(self, name): \n #Note that the 'Header' and 'Body' need to be set\n if next(filter(lambda x: x['name'] == name, items), None) is not None:\n return {\"message\" : \"an item with name '{}' already exists.\".format(name)}, 400 #400 = bad request\n \n data = Item.parser.parse_args()\n #data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use)\n item = {'name' : name, 'price' : data['price']}\n items.append(item)\n \n return item, 201 #201 is code for created\n \n def delete(self, name):\n global items\n items = list(filter(lambda x : x['name'] != name, items))\n return {\"message\" : \"Item deleted\"}\n \n def put(slef, name):\n# parser = reqparse.RequestParser() #reqparse allows us to specify which items in the JSON payload can be used for the variable updates\n# parser.add_argument('price', #we add which arguments we can allow through. The request gets run through the parser\n# type = float, \n# required = True,\n# help = \"This field cannot be left blank\") #and many more!\n data = Item.parser.parse_args() #any args other than \"price\" will just get erased\n #data = request.get_json() #this is sone in the above\n \n #print(data['another']) --- this would return an error, even if 'another' was in the json payload as by this point it has been removed by the parser\n \n item = next(filter(lambda x: x['name'] == name, items), None)\n \n if item is None:\n item = {\"name\" : name, \"price\" : data['price']}\n items.append(item)\n else:\n item.update(data) #Note, item is a reference to the items entry and so will be updated there as well\n print(items)\n return item\n\nclass ItemList(Resource):\n def get(self):\n return{\"items\" : items}\n \napi.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name\napi.add_resource(ItemList, '/items')\napp.run(port=5000, debug=True) #debug gives better error messages" }, { "alpha_fraction": 0.33131369948387146, "alphanum_fraction": 0.5877168774604797, "avg_line_length": 32.321102142333984, "blob_id": "9c5626eb3f547b80e39163fedefc299d206db1ab", "content_id": "12b993df86bad8f3858493d509f721140cccfee7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3631, "license_type": "no_license", "max_line_length": 110, "num_lines": 109, "path": "/Datacamp/merge_ordered.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Used for mereging when there is an ordering (eg dates)\n\n# Perform the first ordered merge: tx_weather\ntx_weather = pd.merge_ordered(austin, houston)\n\n# Print tx_weather\nprint(tx_weather)\n\n# Perform the second ordered merge: tx_weather_suff\ntx_weather_suff = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'])\n\n# Print tx_weather_suff\nprint(tx_weather_suff)\n\n# Perform the third ordered merge: tx_weather_ffill\ntx_weather_ffill = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'], fill_method='ffill')\n\n# Print tx_weather_ffill\nprint(tx_weather_ffill)\n\n#Similar to pd.merge_ordered(), the pd.merge_asof() function will also merge \n#values in order using the on column, but for each row in the left DataFrame, \n#only rows from the right DataFrame whose 'on' column values are less than the \n#left value will be kept.\n#This function can be used to align disparate datetime frequencies without having to first resample.\n\noil.head()\n Date Price\n0 1970-01-01 3.35\n1 1970-02-01 3.35\n2 1970-03-01 3.35\n3 1970-04-01 3.35\n4 1970-05-01 3.35\n\nauto.head()\n mpg cyl displ hp weight accel yr origin \\\n0 18.0 8 307.0 130 3504 12.0 1970-01-01 US \n1 15.0 8 350.0 165 3693 11.5 1970-01-01 US \n2 18.0 8 318.0 150 3436 11.0 1970-01-01 US \n3 16.0 8 304.0 150 3433 12.0 1970-01-01 US \n4 17.0 8 302.0 140 3449 10.5 1970-01-01 US \n\n name \n0 chevrolet chevelle malibu \n1 buick skylark 320 \n2 plymouth satellite \n3 amc rebel sst \n4 ford torino\n\n\n# Merge auto and oil: merged\nmerged = pd.merge_asof(auto, oil, left_on='yr', right_on='Date')\n\n# Print the tail of merged\nprint(merged.tail())\n\n mpg cyl displ hp weight accel yr origin name \\\n 387 27.0 4 140.0 86 2790 15.6 1982-01-01 US ford mustang gl \n 388 44.0 4 97.0 52 2130 24.6 1982-01-01 Europe vw pickup \n 389 32.0 4 135.0 84 2295 11.6 1982-01-01 US dodge rampage \n 390 28.0 4 120.0 79 2625 18.6 1982-01-01 US ford ranger \n 391 31.0 4 119.0 82 2720 19.4 1982-01-01 US chevy s-10 \n \n Date Price \n 387 1982-01-01 33.85 \n 388 1982-01-01 33.85 \n 389 1982-01-01 33.85 \n 390 1982-01-01 33.85 \n 391 1982-01-01 33.85 \n# Resample merged: yearly\nyearly = merged.resample('A', on='Date')[['mpg','Price']].mean()\n\n# Print yearly\nprint(yearly)\n\n mpg Price\n Date \n 1970-12-31 17.689655 3.35\n 1971-12-31 21.111111 3.56\n 1972-12-31 18.714286 3.56\n 1973-12-31 17.100000 3.56\n 1974-12-31 22.769231 10.11\n 1975-12-31 20.266667 11.16\n 1976-12-31 21.573529 11.16\n 1977-12-31 23.375000 13.90\n 1978-12-31 24.061111 14.85\n 1979-12-31 25.093103 14.85\n 1980-12-31 33.803704 32.50\n 1981-12-31 30.185714 38.00\n 1982-12-31 32.000000 33.85\n\n# print yearly.corr()\nprint(yearly.corr())\n\n mpg Price\n Date \n 1970-12-31 17.689655 3.35\n 1971-12-31 21.111111 3.56\n 1972-12-31 18.714286 3.56\n 1973-12-31 17.100000 3.56\n 1974-12-31 22.769231 10.11\n 1975-12-31 20.266667 11.16\n 1976-12-31 21.573529 11.16\n 1977-12-31 23.375000 13.90\n 1978-12-31 24.061111 14.85\n 1979-12-31 25.093103 14.85\n 1980-12-31 33.803704 32.50\n 1981-12-31 30.185714 38.00\n 1982-12-31 32.000000 33.85" }, { "alpha_fraction": 0.643579363822937, "alphanum_fraction": 0.6501516699790955, "avg_line_length": 24.35897445678711, "blob_id": "ccc33a63b3d911cdfcaa3177fedae451c064bb4e", "content_id": "facaaab484bebb027159143d61bdf8c439bb62cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1978, "license_type": "no_license", "max_line_length": 62, "num_lines": 78, "path": "/Datacamp/twitter_example.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import package\nimport tweepy\n\n# Store OAuth authentication credentials in relevant variables\naccess_token = \"1092294848-aHN7DcRP9B4VMTQIhwqOYiB14YkW92fFO8k8EPy\"\naccess_token_secret = \"X4dHmhPfaksHcQ7SCbmZa2oYBBVSD2g8uIHXsp5CTaksx\"\nconsumer_key = \"nZ6EA0FxZ293SxGNg8g8aP0HM\"\nconsumer_secret = \"fJGEodwe3KiKUnsYJC3VRndj7jevVvXbK2D5EiJ2nehafRgA6i\"\n\n# Pass OAuth details to tweepy's OAuth handler\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n\n\n####################################################\n#Need to define a Stream Listener class\nclass MyStreamListener(tweepy.StreamListener):\n def __init__(self, api=None):\n super(MyStreamListener, self).__init__()\n self.num_tweets = 0\n self.file = open(\"tweets.txt\", \"w\")\n\n def on_status(self, status):\n tweet = status._json\n self.file.write( json.dumps(tweet) + '\\n' )\n self.num_tweets += 1\n if self.num_tweets < 100:\n return True\n else:\n return False\n self.file.close()\n\n def on_error(self, status):\n print(status)\n#####################################################\n \n# Initialize Stream listener\nl = MyStreamListener()\n\n# Create your Stream object with authentication\nstream = tweepy.Stream(auth, l)\n\n\n# Filter Twitter Streams to capture data by the keywords:\nstream.filter(track = ['clinton', 'trump', 'sanders', 'cruz'])\n\n\n\n#Once the twitter data is sitting locally:\n# Import package\nimport json\n\n# String of path to file: tweets_data_path\ntweets_data_path = \"tweets.txt\"\n\n# Initialize empty list to store tweets: tweets_data\ntweets_data = []\n\n# Open connection to file\ntweets_file = open(tweets_data_path, \"r\")\n\n# Read in tweets and store in list: tweets_data\nfor line in tweets_file:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n\n# Close connection to file\ntweets_file.close()\n\n# Import package\nimport pandas as pd\n\n# Build DataFrame of tweet texts and languages\ndf = pd.DataFrame(tweets_data, columns=['text', 'lang'])\n\n# Print head of DataFrame\nprint(df.head())\n" }, { "alpha_fraction": 0.6353591084480286, "alphanum_fraction": 0.6390423774719238, "avg_line_length": 20.760000228881836, "blob_id": "2eba7d37b6e1763f7440ec3146edea6f7467099b", "content_id": "7e41c383e9b64943e3a7236692e4edcca51cefaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 67, "num_lines": 25, "path": "/Datacamp/EDA_ECDF.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "def ecdf(data):\n \"\"\"Compute ECDF for a one-dimensional array of measurements.\"\"\"\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y\n \n# Compute ECDF for versicolor data: x_vers, y_vers\nx_vers, y_vers = ecdf(versicolor_petal_length)\n\n# Generate plot\n_ = plt.plot(x_vers, y_vers, marker='.', linestyle='none')\n\n# Label the axes\nplt.xlabel('versicolor_petal_length')\nplt.ylabel('ECDF')\n\n# Display the plot\nplt.show()" }, { "alpha_fraction": 0.5775978565216064, "alphanum_fraction": 0.6261808276176453, "avg_line_length": 16.66666603088379, "blob_id": "65eafbd79675f1a513525e47d2a84d026764553f", "content_id": "d90f9dfa3449bfdf9746d944f78b23780c2a9163", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 81, "num_lines": 42, "path": "/Datacamp/pandas.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Dataframes are made up of Series objects. Each Series is labelled 1D numpy array\n\nimport pandas as pd\n#df is some DataFrame\ndf.head()\ndf.tail()\n\ndf.iloc[1, :]\ndf.loc['row_index', :]\n\n#to return column info\ndf.info()\n\n#to convert DataFrame to numpy array:\ndf.values\n\n#note though that many numpy methods work on pandas dfs\n\n########\n#creating Dataframes from scratch\n########\n\nd = {\"col1\" :[1,3,4,5], \"col2\" : [4,5,6,7]}\ndf = pd.DataFrame(d)\n\ncol1 = [1, 3, 5, 6]\ncol2 = [6, 7, 8, 9]\n\ncols = [col1, col2]\nindices = [\"col1\", \"col2\"]\n\nd = zip(indices, cols)\nd = dict(list(d))\ndf = pd.DataFramed\n\ndf.columns = [\"newcol1\", \"newcol2\"]\n\n#Broadcasting\ndf['col3'] = \"M\"\n\nd = {\"col1\" : [1, 3, 4, 5], \"col2\" : \"M\"}\ndf = pd.DataFrame(d) #Broadcasts col2" }, { "alpha_fraction": 0.7157894968986511, "alphanum_fraction": 0.7204678654670715, "avg_line_length": 22.77777862548828, "blob_id": "494b3849feb4ea4cba0da076de6b2e454ac46ee2", "content_id": "017e529b6cd19e70e56be543675be8e89db4916a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "no_license", "max_line_length": 69, "num_lines": 36, "path": "/Datacamp/Joining_data.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Row concatenation\nrow_concat = pd.concat([uber1, uber2, uber3])\n#where each is a df\n#Note though that the original row indices will be maintained\n#Use the ignore_index = True to reset the indices in sequential order\n\n#Use the axis =1 to do column concatenation\n\n#If we have many files to concatenate:\n# Import necessary modules\nimport glob\nimport pandas as pd\n\n# Write the pattern: pattern\npattern = '*.csv'\n# * = all strings \n# ? = single character\n\n# Save all file matches: csv_files\ncsv_files = glob.glob(pattern)\n#this gives a list of files that match the pattern\n\n# Create an empty list: frames\nframes = []\n\n# Iterate over csv_files\nfor csv in csv_files:\n\n # Read csv into a DataFrame: df\n df = pd.read_csv(csv)\n \n # Append df to frames\n frames.append(df)\n\n# Concatenate frames into a single DataFrame: uber\nuber = pd.concat(frames)" }, { "alpha_fraction": 0.6553303003311157, "alphanum_fraction": 0.6801831126213074, "avg_line_length": 37.25, "blob_id": "b91417e72720fb958f592e15b3e6ea9e0b720c2d", "content_id": "3efc0c0f8788cc4bf6fc84f31cbf3027d2bb81a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1529, "license_type": "no_license", "max_line_length": 136, "num_lines": 40, "path": "/API/Section4/code/app.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "'''This was created after installing virtualenv. This allows use to create a virtual environment that mimics \na fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.\n\nRun: conda create -n venv python=3.5.0 anaconda\nto create a virtual env called venv with python 3.5.0\n\nconda activate venv\nconda deactivate'''\n\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api\n\napp = Flask(__name__)\napi = Api(app)\n\nitems = []\n\nclass Item(Resource):\n def get(self, name):\n item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None\n return {\"item\" : item}, 200 if item is not None else 404\n \n def post(self, name):\n #Note that the 'Header' and 'Body' need to be set\n if next(filter(lambda x: x['name'] == name, items), None) is not None:\n return {\"message\" : \"an item with name '{}' already exists.\".format(name)}, 400 #400 = bad request\n \n data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use)\n item = {'name' : name, 'price' : data['price']}\n items.append(item)\n \n return item, 201 #201 is code for created\n\nclass ItemList(Resource):\n def get(self):\n return{\"items\" : items}\n \napi.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name\napi.add_resource(ItemList, '/items')\napp.run(port=5000, debug=True) #debug gives better error messages" }, { "alpha_fraction": 0.49200376868247986, "alphanum_fraction": 0.6547507047653198, "avg_line_length": 23.744186401367188, "blob_id": "09324d64b08bb86610c0a910903efe1e29207ab4", "content_id": "abfd12d62e15923d3e2dbc5720e249b9c223b169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 116, "num_lines": 43, "path": "/Datacamp/idxmax_idxmin.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Create the pivot table: medals_won_by_country\nmedals_won_by_country = medals.pivot_table(index = 'Edition', columns='NOC', values= \"Athlete\", aggfunc='count')\n\n# Slice medals_won_by_country: cold_war_usa_urs_medals\ncold_war_usa_urs_medals = medals_won_by_country.loc[1952:1988, ['USA','URS']]\nNOC USA URS\nEdition \n1952 130.0 117.0\n1956 118.0 169.0\n1960 112.0 169.0\n1964 150.0 174.0\n1968 149.0 188.0\n1972 155.0 211.0\n1976 155.0 285.0\n1980 NaN 442.0\n1984 333.0 NaN\n1988 193.0 294.0\n\n# If .max() returns the maximum value of Series or 1D array, .idxmax() returns the index of the maximizing element. \n# Create most_medals \nmost_medals = cold_war_usa_urs_medals.idxmax(axis='columns')\nEdition\n1952 USA\n1956 URS\n1960 URS\n1964 URS\n1968 URS\n1972 URS\n1976 URS\n1980 URS\n1984 USA\n1988 URS\ndtype: object\n# Print most_medals.value_counts()\nprint(most_medals.value_counts())\n\n\nIn [5]: cold_war_usa_urs_medals.idxmax()\nOut[5]: \nNOC\nUSA 1984\nURS 1980\ndtype: int64" }, { "alpha_fraction": 0.7245131731033325, "alphanum_fraction": 0.7273768782615662, "avg_line_length": 25.469696044921875, "blob_id": "a44f9d239935104a61c3de5c58fa2d860fa0c19f", "content_id": "5251ace1b21dbd3a52e2e0380efdaab036cabea3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1746, "license_type": "no_license", "max_line_length": 104, "num_lines": 66, "path": "/Datacamp/bokeh.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import figure from bokeh.plotting\nfrom bokeh.plotting import figure\n\n# Import output_file and show from bokeh.io\nfrom bokeh.io import output_file, show\n\n# Create the figure: p\np = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')\n\n# Add a circle glyph to the figure p\np.circle(fertility,female_literacy)\n\n# Call the output_file() function and specify the name of the file\noutput_file('fert_lit.html')\n\n# Display the plot\nshow(p)\n\n\n\n# Create the figure: p\np = figure(x_axis_label='fertility', y_axis_label='female_literacy (% population)')\n\n# Add a circle glyph to the figure p\np.circle(fertility_latinamerica, female_literacy_latinamerica, size=10, alpha=0.8, color='blue')\n\n# Add an x glyph to the figure p\np.x(fertility_africa, female_literacy_africa)\n\n# Specify the name of the file\noutput_file('fert_lit_separate.html')\n\n# Display the plot\nshow(p)\n\n\n\n#lines\n# Import figure from bokeh.plotting\nfrom bokeh.plotting import figure\n\n# Create a figure with x_axis_type=\"datetime\": p\np = figure(x_axis_type='datetime', x_axis_label='Date', y_axis_label='US Dollars')\n\n# Plot date along the x axis and price along the y axis\np.line(date, price)\np.circle(date, price, fill_color='white', size=4)\n\n# Specify the name of the output file and show the result\noutput_file('line.html')\nshow(p)\n\n\n#patches\n# Create a list of az_lons, co_lons, nm_lons and ut_lons: x\nx = [az_lons, co_lons, nm_lons, ut_lons]\n\n# Create a list of az_lats, co_lats, nm_lats and ut_lats: y\ny = [az_lats, co_lats, nm_lats, ut_lats]\n\n# Add patches to figure p with line_color=white for x and y\np.patches(x,y, line_color='white')\n\n# Specify the name of the output file and show the result\noutput_file('four_corners.html')\nshow(p)" }, { "alpha_fraction": 0.48697710037231445, "alphanum_fraction": 0.5288082361221313, "avg_line_length": 29.926828384399414, "blob_id": "017367c079788231215b983775dd4d78b21a1d08", "content_id": "fcaaecd21f6a7ae556306baef6007424360e0ee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 56, "num_lines": 41, "path": "/Datacamp/stack_unstack.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#stack does something similar to pivot using the indices\n# Unstack users by 'weekday': byweekday\n\nusers = \n visitors signups\ncity weekday \nAustin Mon 326 3\n Sun 139 7\nDallas Mon 456 5\n Sun 237 12\n \nbyweekday = users.unstack(level = 'weekday')\n\n# Print the byweekday DataFrame\nprint(byweekday)\n visitors signups \n weekday Mon Sun Mon Sun\n city \n Austin 326 139 3 7\n Dallas 456 237 5 12\n \n# Stack byweekday by 'weekday' and print it\nprint(byweekday.stack(level = 'weekday'))\n visitors signups\n city weekday \n Austin Mon 326 3\n Sun 139 7\n Dallas Mon 456 5\n Sun 237 12\n \n# Stack 'city' back into the index of bycity: newusers\nnewusers = bycity.stack(level = \"city\")\n\n# Swap the levels of the index of newusers: newusers\nnewusers = newusers.swaplevel(0,1)\n\n# Print newusers and verify that the index is not sorted\nprint(newusers)\n\n# Sort the index of newusers: newusers\nnewusers = newusers.sort_index()" }, { "alpha_fraction": 0.7475247383117676, "alphanum_fraction": 0.7616690397262573, "avg_line_length": 39.42856979370117, "blob_id": "84e099f94c229c5210715fec59cbfedaaf5636d8", "content_id": "f80dca0d2e3514ff69040791645d8bab57c9bb8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 136, "num_lines": 35, "path": "/API/Section6/code/UseDB/app.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "'''This was created after installing virtualenv. This allows use to create a virtual environment that mimics \na fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.\n\nRun: conda create -n venv python=3.5.0 anaconda\nto create a virtual env called venv with python 3.5.0\n\nconda activate venv\nconda deactivate'''\n\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT\n\nfrom security import authenticate, identity\nfrom user import UserRegister\nfrom item import Item, ItemList\n\napp = Flask(__name__)\napp.secret_key = \"secret_key\" #this should be long and complicated in a production sense\napi = Api(app)\n\njwt = JWT(app, authenticate, identity) \n'''\nJWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity\nIf authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT\nThe JWT calls the identity function which gets the correct id and returns the user\n'''\n\n \napi.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name\napi.add_resource(ItemList, '/items')\napi.add_resource(UserRegister, '/register')\n\nif __name__ == '__main__': #This ensures that this is not run if app.py is imported, but only when called\n app.run(port=5000, debug=True) #debug gives better error messages" }, { "alpha_fraction": 0.45719999074935913, "alphanum_fraction": 0.6435999870300293, "avg_line_length": 27.420454025268555, "blob_id": "0698f00905ff6280ea2f4964b31bb91f679c6aa2", "content_id": "f25ea8089ea342c3b4c6a06a7a21fd81e17db067", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2500, "license_type": "no_license", "max_line_length": 118, "num_lines": 88, "path": "/Datacamp/dataframe_arithmetic.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Extract selected columns from weather as new DataFrame: temps_f\ntemps_f = weather[['Min TemperatureF', 'Mean TemperatureF', 'Max TemperatureF']]\n\n# Convert temps_f to celsius: temps_c\ntemps_c = (temps_f - 32) * 5/9 #broadcasting\n\n# Rename 'F' in column names with 'C': temps_c.columns\ntemps_c.columns = ['Min TemperatureC', 'Mean TemperatureC', 'Max TemperatureC']\n\n# Print first 5 rows of temps_c\nprint(temps_c.head())\n\n\nimport pandas as pd\n\n# Read 'GDP.csv' into a DataFrame: gdp\ngdp = pd.read_csv('GDP.csv', index_col='DATE', parse_dates=True)\n\n# Slice all the gdp data from 2008 onward: post2008\npost2008 = gdp.loc['2008':, :]\n\n# Print the last 8 rows of post2008\nprint(post2008.tail(8))\n VALUE\n DATE \n 2014-07-01 17569.4\n 2014-10-01 17692.2\n 2015-01-01 17783.6\n 2015-04-01 17998.3\n 2015-07-01 18141.9\n 2015-10-01 18222.8\n 2016-01-01 18281.6\n 2016-04-01 18436.5\n \n# Resample post2008 by year, keeping last(): yearly\nyearly = post2008.resample('A').last()\n\n# Print yearly\nprint(yearly)\n VALUE\n DATE \n 2014-07-01 17569.4\n 2014-10-01 17692.2\n 2015-01-01 17783.6\n 2015-04-01 17998.3\n 2015-07-01 18141.9\n 2015-10-01 18222.8\n 2016-01-01 18281.6\n 2016-04-01 18436.5\n \n# Compute percentage growth of yearly: yearly['growth']\nyearly['growth'] = yearly.pct_change()*100\n\n# Print yearly again\nprint(yearly)\n VALUE growth\n DATE \n 2008-12-31 14549.9 NaN\n 2009-12-31 14566.5 0.114090\n 2010-12-31 15230.2 4.556345\n 2011-12-31 15785.3 3.644732\n 2012-12-31 16297.3 3.243524\n 2013-12-31 16999.9 4.311144\n 2014-12-31 17692.2 4.072377\n 2015-12-31 18222.8 2.999062\n 2016-12-31 18436.5 1.172707\n \n# Import pandas\nimport pandas as pd\n\n# Read 'sp500.csv' into a DataFrame: sp500\nsp500 = pd.read_csv('sp500.csv', index_col='Date', parse_dates=True)\n\n# Read 'exchange.csv' into a DataFrame: exchange\nexchange = pd.read_csv('exchange.csv', index_col='Date', parse_dates=True)\n\n# Subset 'Open' & 'Close' columns from sp500: dollars\ndollars = sp500[['Open', 'Close']]\n\n# Print the head of dollars\nprint(dollars.head())\n\n# Convert dollars to pounds: pounds\npounds = dollars.multiply(exchange['GBP/USD'], axis='rows')\n#NOTE: similar add(), subtract(), divide() methods. These offer more flexibility than using standard +, -, / operators\n\n# Print the head of pounds\nprint(pounds.head())" }, { "alpha_fraction": 0.7212475538253784, "alphanum_fraction": 0.7212475538253784, "avg_line_length": 34, "blob_id": "6d2e6a25588c47041ffe56ddc445fa0a6cf7ae83", "content_id": "20c2113d39d955232fa19a5ed4f13db97664298d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "no_license", "max_line_length": 124, "num_lines": 14, "path": "/API/Section7/code/security.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "from models.user import UserModel\nfrom werkzeug.security import safe_str_cmp\n\n\ndef authenticate(username, password):\n user = UserModel.find_by_username(username)\n if user is not None and safe_str_cmp(user.password, password): #safe_str_cmp() alleviates issues with string comparison\n return user\n \n#identity function is unique to flask JWT\n#payload is the contents on the JWT Token\ndef identity(payload):\n user_id = payload['identity']\n return UserModel.find_by_id(user_id)\n \n \n \n \n \n " }, { "alpha_fraction": 0.5059101581573486, "alphanum_fraction": 0.5586288571357727, "avg_line_length": 31.79069709777832, "blob_id": "e4860733cb17e4ea1a25b39abd8f478cce08d06d", "content_id": "2253ffc116e3231888f05d2a26a09697c0f46115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4230, "license_type": "no_license", "max_line_length": 112, "num_lines": 129, "path": "/Datacamp/append_concatdf.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "\n# Append names_1981 after names_1881 with ignore_index=True: combined_names\ncombined_names = names_1881.append(names_1981, ignore_index=True)\n#ignore_index resets the index, else the indices from the original dfs are placed on top of one another\n\n\n# Concatenate weather_max and weather_mean horizontally: weather\nweather = pd.concat([weather_max, weather_mean], axis=1)\n#axis=1 means concat horizontally (this does something similar to a full outer join)\n Max TemperatureF Mean TemperatureF\n Apr 89.0 53.100000\n Aug NaN 70.000000\n Dec NaN 34.935484\n Feb NaN 28.714286\n Jan 68.0 32.354839\n Jul 91.0 72.870968\n Jun NaN 70.133333\n Mar NaN 35.000000\n May NaN 62.612903\n Nov NaN 39.800000\n Oct 84.0 55.451613\n Sep NaN 63.766667\n \nfor medal in medal_types:\n\n # Create the file name: file_name\n file_name = \"%s_top5.csv\" % medal\n \n # Create list of column names: columns\n columns = ['Country', medal]\n \n # Read file_name into a DataFrame: df\n medal_df = pd.read_csv(file_name, header=0, index_col='Country', names=columns) #names sets the column names\n \n # Append medal_df to medals\n medals.append(medal_df)\n\n# Concatenate medals horizontally: medals\nmedals = pd.concat(medals, axis='columns') #same as axis=1\n\n# Print medals\nprint(medals)\n\n\n#using multi level indexes:\nfor medal in medal_types:\n\n file_name = \"%s_top5.csv\" % medal\n \n # Read file_name into a DataFrame: medal_df\n medal_df = pd.read_csv(file_name, index_col='Country')\n \n # Append medal_df to medals\n medals.append(medal_df)\n \n# Concatenate medals: medals\nmedals = pd.concat(medals, axis='rows', keys=['bronze', 'silver', 'gold'])\n\n# Print medals in entirety\nprint(medals)\n Total\n Country \n bronze United States 1052.0\n Soviet Union 584.0\n United Kingdom 505.0\n France 475.0\n Germany 454.0\n silver United States 1195.0\n Soviet Union 627.0\n United Kingdom 591.0\n France 461.0\n Italy 394.0\n gold United States 2088.0\n Soviet Union 838.0\n United Kingdom 498.0\n Italy 460.0\n Germany 407.0\n \n# Sort the entries of medals: medals_sorted\nmedals_sorted = medals.sort_index(level=0)\n\n# Print the number of Bronze medals won by Germany\nprint(medals_sorted.loc[('bronze','Germany')])\n\n# Print data about silver medals\nprint(medals_sorted.loc['silver'])\n\n# Create alias for pd.IndexSlice: idx\n#A slicer pd.IndexSlice is required when slicing on the inner level of a MultiIndex\nidx = pd.IndexSlice\n\n# Print all the data on medals won by the United Kingdom\nprint(medals_sorted.loc[idx[:,'United Kingdom'], :])\n\n# Make the list of tuples: month_list\nmonth_list = [('january', jan), ('february', feb), ('march', mar)]\n\n# Create an empty dictionary: month_dict\nmonth_dict = {}\n\nfor month_name, month_data in month_list:\n\n # Group month_data: month_dict[month_name]\n month_dict[month_name] = month_data.groupby('Company').sum()\n\n# Concatenate data in month_dict: sales\nsales = pd.concat(month_dict)\n\n# Print sales\nprint(sales)\n Units\n Company \n february Acme Coporation 34\n Hooli 30\n Initech 30\n Mediacore 45\n Streeplex 37\n january Acme Coporation 76\n Hooli 70\n Initech 37\n Mediacore 15\n Streeplex 50\n march Acme Coporation 5\n Hooli 37\n Initech 68\n Mediacore 68\n Streeplex 40\n# Print all sales by Mediacore\nidx = pd.IndexSlice\nprint(sales.loc[idx[:, 'Mediacore'], :])" }, { "alpha_fraction": 0.748314619064331, "alphanum_fraction": 0.748314619064331, "avg_line_length": 39.45454406738281, "blob_id": "fd4db3fc706e0dc47e68af9f0f06830da996f2b4", "content_id": "a8c6b73ceeb205f82c6950328231c813e18b425f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 110, "num_lines": 11, "path": "/Datacamp/data-types.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Convert the sex column to type 'category'\ntips.sex = tips.sex.astype('category') #converting to categorical vars helps with memory and further analysis\n\n# Convert the smoker column to type 'category'\ntips.smoker = tips.smoker.astype('category')\n\n# Print the info of tips\nprint(tips.info())\n\n#sometimes we may need to tell python how to deal with values it can't convert\ntips['total_bill'] = pd.to_numeric(tips['total_bill'], errors='coerce')\n" }, { "alpha_fraction": 0.5775862336158752, "alphanum_fraction": 0.5927750468254089, "avg_line_length": 27.67058753967285, "blob_id": "061c4832e03aac88ce22f1be72b9e87d90b9143a", "content_id": "6cd92a235400083ce98b8406cd6b50c9ac0b845b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2436, "license_type": "no_license", "max_line_length": 118, "num_lines": 85, "path": "/API/storeapp.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 16:43:47 2018\n\n@author: Damien\n\"\"\"\n\nfrom flask import Flask, jsonify, request, render_template\n\n#NOTE on JSON: JSON are essentially dictionaries but in string format. Thus we need to convert of Python dicts to text\napp = Flask(__name__) #unique __name__ - special python variable\nstores = [\n {\n 'name': 'My Store',\n 'items': [\n {\n 'name':'My Item',\n 'price':15.99\n }\n ]\n }\n]\n\[email protected]('/')\ndef home():\n return render_template('index.html') #Looks in template folder\n\n#POST - recieves data\n#GET - send data back\n\n##End points we are going to define\n#POST /store data: {name:}\[email protected]('/store', methods = ['POST']) #default to GET\ndef create_store():\n request_data = request.get_json()\n new_store = {\n 'name': request_data['name'],\n 'items' : []\n }\n \n stores.append(new_store)\n return jsonify(new_store)\n\n#GET /store/<string:name>\[email protected]('/store/<string:name>') #<string:name> is a flask keyword\ndef get_store(name):\n for store in stores:\n if store['name'] == name:\n return jsonify(store)\n else:\n return jsonify({'message' : 'No such store'})\n\n#GET /store\[email protected]('/store')\ndef get_stores():\n return jsonify({'stores' : stores}) #convert list to dictionary\n\n#POST /store/<string:name>/item {name:, price:}\[email protected]('/store/<string:name>/item', methods = ['POST']) #default to GET\ndef create_item(name):\n request_data = request.get_json()\n for store in stores:\n if store['name'] == name:\n new_item = {\n 'name' : request_data['name'],\n 'price' : request_data['price']\n }\n store['items'].append(new_item)\n return jsonify(new_item)\n else:\n return jsonify({\"message\" : \" No such store\"})\n#GET /store/<string:name>/item\[email protected]('/store/<string:name>/item') #<string:name> is a flask keyword\ndef get_item_in_store(name):\n for store in stores:\n if store['name'] == name:\n return jsonify({'items' : store['items']})\n else:\n return jsonify({'message' : 'No such store'})\n\n\napp.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests\n\n#run from conda \"python app.py\"\n#copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer)" }, { "alpha_fraction": 0.6980727910995483, "alphanum_fraction": 0.700214147567749, "avg_line_length": 36.95833206176758, "blob_id": "1b703f65bd3f130824a7919173af37711cbbd0b4", "content_id": "6d4f0a1639e4b093298748180c48d6d9994b803b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "no_license", "max_line_length": 124, "num_lines": 24, "path": "/API/Section6/code/NoDB/security.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "from user import User\nfrom werkzeug.security import safe_str_cmp\n\n#some database\nusers = [\n User(1, \"bob\", \"asdf\"), \n User(2, \"Damien\", \"bitches\")\n ]\n\n\n#below allows us to find the user by username or ID without having to iterate over the above list\nusername_mapping = {u.username : u for u in users} #list comprehension where the function is a key:value pair\nuserid_mapping = {u.id : u for u in users}\n\ndef authenticate(username, password):\n user = username_mapping.get(username, None) #note that this is the same as the [] notation, but allows a default value\n if user is not None and safe_str_cmp(user.password, password): #safe_str_cmp() alleviates issues with string comparison\n return user\n \n#identity function is unique to flask JWT\n#payload is the contents on the JWT Token\ndef identity(payload):\n user_id = payload['identity']\n return userid_mapping.get(user_id, None)\n \n \n \n \n \n " }, { "alpha_fraction": 0.682675838470459, "alphanum_fraction": 0.7092624306678772, "avg_line_length": 25.522727966308594, "blob_id": "5fae63c4fcddc793a8aa6ca40c0a159388202bc6", "content_id": "6856e5a7b69b4e44194abfaef0997a921a0a907c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 105, "num_lines": 44, "path": "/Datacamp/bokeh_linked_plots.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Link the ranges with panning\n# Link the x_range of p2 to p1: p2.x_range\np2.x_range = p1.x_range\n\n# Link the y_range of p2 to p1: p2.y_range\np2.y_range = p1.y_range\n\n# Link the x_range of p3 to p1: p3.x_range\np3.x_range = p1.x_range\n\n# Link the y_range of p4 to p1: p4.y_range\np4.y_range = p1.y_range\n\n# Specify the name of the output_file and show the result\noutput_file('linked_range.html')\nshow(layout)\n\n\n\n#Link selection\n# Create ColumnDataSource: source\nsource = ColumnDataSource(data)\n\n# Create the first figure: p1\np1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female literacy (% population)',\n tools='box_select,lasso_select')\n\n# Add a circle glyph to p1\np1.circle('fertility', 'female literacy', source=source)\n\n# Create the second figure: p2\np2 = figure(x_axis_label='fertility (children per woman)', y_axis_label='population (millions)',\n tools='box_select,lasso_select')\n\n# Ad\np2.circle('fertility', 'population', source=source)\n\n\n# Create row layout of figures p1 and p2: layout\nlayout = row(p1, p2)\n\n# Specify the name of the output_file and show the result\noutput_file('linked_brush.html')\nshow(layout)" }, { "alpha_fraction": 0.686274528503418, "alphanum_fraction": 0.6980392336845398, "avg_line_length": 25.010204315185547, "blob_id": "d2fda93ac691b58c4d7835301449c30f2f6bdef7", "content_id": "3ff0abcdd36fb64fc684799c17be5cc4ceae3804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2550, "license_type": "no_license", "max_line_length": 148, "num_lines": 98, "path": "/Datacamp/Reading_Data.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Basics of reading in:\nfilename = 'file.txt'\nfile = open(filename, mode = 'r') #'r' is top read, 'w' is to write\ntext = file.read()\nfile.close()\n\nwith open('huck_finn.txt', 'r') as file: #with is referred to as the context manager\n print(file.read()) \n \n \n#Using NumPy - for numeric arrays\n #This allows use of sci-kit learn\nimport numpy as np\n\n#Can use: \ndata = np.loadtxt(filename, delimiter = \"'\", skiprows = 1, usecols=[0, 2], dtype=str)\n\n#Alternatively, use Pandas (this is preferable)\n\nimport pandas as pd\n\ndata = pd.read_csv(filename, sep = '\\t', comment='#', na_values='Nothing') #comment drops everything after '#', na_values are user specified nulls\n#header=0 and names=new_names will label the rows\n#parse_date does something\n#index_col specifies which col should be the index\n \n \ndata.head() #prints first 5 rows .head(10) displays 10 rows\n\ndata_array = data.values #converts to numpy array\n\n#Other types of import files:\n\n#Pickled file: files containing python data structures that don't traslate to an obvious readible form (i.e. dicts, lists, tuples)\n# Import pickle package\nimport pickle\n\n# Open pickle file and load data: d\nwith open('data.pkl', 'rb') as file:\n d = pickle.load(file)\n \n#Excel\nfile = \"excel.xlsx\"\ndata = pd.ExcelFile(file)\n\nprint(data.sheet_names)\n\ndf1 = data.parse('name_of_sheet')\ndf2 = data.parse(1) #index of sheet\ndf1 = data.parse(0, skiprows=[1], names=['Country', 'AAM due to War (2002)'])\n\n#SAS \n# Import sas7bdat package\nfrom sas7bdat import SAS7BDAT\n\n# Save file to a DataFrame: df_sas\nwith SAS7BDAT('sales.sas7bdat') as file:\n df_sas = file.to_data_frame()\n \n#Stata\n# Import pandas\nimport pandas as pd\n\n# Load Stata file into a pandas DataFrame: df\ndf = pd.read_stata('disarea.dta')\n\n#HDF5 (Hierarchical Data Format version 5)\nimport h5py\n\n# Assign filename: file\nfile = 'LIGO_data.hdf5'\n\n# Load file: data\ndata = h5py.File(file, 'r')\n\n# Print the datatype of the loaded file\nprint(type(data))\n\n# Print the keys of the file. HDF5 files have a heirarchical structure that can be drilled down using the keys\nfor key in data.keys():\n print(key)\n\ngroup = data['strain']\n\n# Check out keys of group\nfor key in group.keys():\n print(key)\n\n# Set variable equal to time series data: strain\nstrain = data['strain']['Strain'].value \n\n\n#MATLAB\n# Import package\nimport scipy.io\n\n# Load MATLAB file: mat\nmat = scipy.io.loadmat('albeck_gene_expression.mat') #loads a dict with the variables : values of thingfs that were saved in the MATLAB workspace\n\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.7128919959068298, "avg_line_length": 29.553192138671875, "blob_id": "b3d42b754bf5eb7d3ede18029da70881e863fb03", "content_id": "98a4cae4065c2a44a3744a2334de496a49d0a0ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "no_license", "max_line_length": 121, "num_lines": 47, "path": "/Datacamp/seaborn.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import plotting modules\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Plot a linear regression between 'weight' and 'hp'\nsns.lmplot(x='weight', y='hp', data=auto)\n\n# Display the plot\nplt.show()\n\n#RESIDUALS\n# Import plotting modules\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Generate a green residual plot of the regression between 'hp' and 'mpg'\nsns.residplot(x='hp', y='mpg', data=auto, color='green')\n\n# Display the plot\nplt.show()\n\n#HIGHER ORDER\n# Generate a scatter plot of 'weight' and 'mpg' using red circles\nplt.scatter(auto['weight'], auto['mpg'], label='data', color='red', marker='o')\n\n# Plot in blue a linear regression of order 1 between 'weight' and 'mpg'\nsns.regplot(x='weight', y='mpg', data=auto, label='order 1', color='blue', order=1, scatter=None)\n\n# Plot in green a linear regression of order 2 between 'weight' and 'mpg'\nsns.regplot(x='weight', y='mpg', data=auto, label='order 2', color='green', order=2, scatter=None)\n\n# Add a legend and display the plot\nplt.legend(loc='upper right')\nplt.show()\n\n\n# Plot a linear regression between 'weight' and 'hp', with a hue (specifies categories) of 'origin' and palette of 'Set1'\nsns.lmplot('weight', 'hp', data=auto, hue='origin', palette='Set1')\n\n# Display the plot\nplt.show()\n\n# Plot linear regressions between 'weight' and 'hp' grouped row-wise by 'origin'\nsns.lmplot('weight', 'hp', data=auto, row='origin')\n\n# Display the plot\nplt.show()" }, { "alpha_fraction": 0.7501837015151978, "alphanum_fraction": 0.7663483023643494, "avg_line_length": 27.35416603088379, "blob_id": "c98bbd90ad0b681c72b2fcab869e24011b9df5c7", "content_id": "a8d51c913ad634708e09d6fb62534bd48a1d9e54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1361, "license_type": "no_license", "max_line_length": 75, "num_lines": 48, "path": "/Datacamp/sqlalchemy_grouping_labeling.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "\n# Build a query to count the distinct states values: stmt\nstmt = select([func.count(census.columns.state.distinct())])\n\n# Execute the query and store the scalar result: distinct_state_count\ndistinct_state_count = connection.execute(stmt).scalar()\n\n# Print the distinct_state_count\nprint(distinct_state_count)\n\n# Import func\nfrom sqlalchemy import func\n\n# Build a query to select the state and count of ages by state: stmt\nstmt = select([census.columns.state, func.count(census.columns.age)])\n\n# Group stmt by state\nstmt = stmt.group_by(census.columns.state)\n\n# Execute the statement and store all the records: results\nresults = connection.execute(stmt).fetchall()\n\n# Print results\nprint(results)\n\n# Print the keys/column names of the results returned\nprint(results[0].keys())\n\n\n# Import func\nfrom sqlalchemy import func\n\n# Build an expression to calculate the sum of pop2008 labeled as population\npop2008_sum = func.sum(census.columns.pop2008).label('population')\n\n# Build a query to select the state and sum of pop2008: stmt\nstmt = select([census.columns.state, pop2008_sum])\n\n# Group stmt by state\nstmt = stmt.group_by(census.columns.state)\n\n# Execute the statement and store all the records: results\nresults = connection.execute(stmt).fetchall()\n\n# Print results\nprint(results)\n\n# Print the keys/column names of the results returned\nprint(results[0].keys())" }, { "alpha_fraction": 0.5472972989082336, "alphanum_fraction": 0.5957207083702087, "avg_line_length": 27.677419662475586, "blob_id": "68eea3d2477e8c996e8b1de0bc910dd72dffeb53", "content_id": "fda0da5fdffefaaaaed55ad9f2f3c31ad3e7a9a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 888, "license_type": "no_license", "max_line_length": 88, "num_lines": 31, "path": "/Datacamp/melting.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#melting restores pivoted dfs\n\nvisitors = pd.melt(visitors_by_city_weekday, id_vars=['weekday'], value_name='visitors')\n#id_vars specify columns to maintain\n#value_names specify name of column containing the values\n\n# Set the new index: users_idx\nusers_idx = users.set_index(['city', 'weekday'])\n\n# Print the users_idx DataFrame\nprint(users_idx)\nvisitors signups\n city weekday \n Austin Sun 139 7\n Dallas Sun 237 12\n Austin Mon 326 3\n Dallas Mon 456 5\n# Obtain the key-value pairs: kv_pairs\nkv_pairs = pd.melt(users_idx, col_level=0)\n\n# Print the key-value pairs\nprint(kv_pairs)\n variable value\n 0 visitors 139\n 1 visitors 237\n 2 visitors 326\n 3 visitors 456\n 4 signups 7\n 5 signups 12\n 6 signups 3\n 7 signups 5" }, { "alpha_fraction": 0.5257675647735596, "alphanum_fraction": 0.5657894611358643, "avg_line_length": 34.096153259277344, "blob_id": "6a1ba6a92ca68e929103dead89df8ab7d5e7088f", "content_id": "205d44063743dca29e0b916dd8763d1cc65e0595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1824, "license_type": "no_license", "max_line_length": 92, "num_lines": 52, "path": "/Datacamp/pivit_tables.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#pivot tables aggregate data with duplicate indices\n\n weekday city visitors signups\n0 Sun Austin 139 7\n1 Sun Dallas 237 12\n2 Mon Austin 326 3\n3 Mon Dallas 456 5\n\n# Create the DataFrame with the appropriate pivot table: by_city_day\nby_city_day = users.pivot_table(index = 'weekday', columns = \"city\")\n\n# Print by_city_day\nprint(by_city_day)\n signups visitors \n city Austin Dallas Austin Dallas\n weekday \n Mon 3 5 326 456\n Sun 7 12 139 237\n \n # Use a pivot table to display the count of each column: count_by_weekday1\ncount_by_weekday1 = users.pivot_table(index='weekday', aggfunc='count')\n\n# Print count_by_weekday\nprint(count_by_weekday1)\n city signups visitors\n weekday \n Mon 2 2 2\n Sun 2 2 2\n\n# Replace 'aggfunc='count'' with 'aggfunc=len': count_by_weekday2\ncount_by_weekday2 = users.pivot_table(index='weekday', aggfunc=len)\n\n# Create the DataFrame with the appropriate pivot table: signups_and_visitors\nsignups_and_visitors = users.pivot_table(index = \"weekday\", aggfunc=sum)\n\n# Print signups_and_visitors\nprint(signups_and_visitors)\n signups visitors\n weekday \n Mon 8 782\n Sun 19 376\n\n# Add in the margins: signups_and_visitors_total \nsignups_and_visitors_total = users.pivot_table(index = \"weekday\", aggfunc=sum, margins=True)\n\n# Print signups_and_visitors_total\nprint(signups_and_visitors_total)\n signups visitors\n weekday \n Mon 8 782\n Sun 19 376\n All 27 1158" }, { "alpha_fraction": 0.6972222328186035, "alphanum_fraction": 0.7277777791023254, "avg_line_length": 33.870967864990234, "blob_id": "fc9f5b60ddb7902b882a7e14fafb27735f0a7e34", "content_id": "355a832bdaf0a3bc477969815d991e428d0f1c1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 80, "num_lines": 31, "path": "/Datacamp/hypothesis_testing_with_one_dataset.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Make an array of translated impact forces: translated_force_b\ntranslated_force_b = force_b - np.mean(force_b) + 0.55\n\n# Take bootstrap replicates of Frog B's translated impact forces: bs_replicates\nbs_replicates = draw_bs_reps(translated_force_b, np.mean, 10000)\n\n# Compute fraction of replicates that are less than the observed Frog B force: p\np = np.sum(bs_replicates <= np.mean(force_b)) / 10000\n\n# Print the p-value\nprint('p = ', p)\n\n\n\n# Compute mean of all forces: mean_force\nmean_force = np.mean(forces_concat)\n\n# Generate shifted arrays\nforce_a_shifted = force_a - np.mean(force_a) + mean_force\nforce_b_shifted = force_b - np.mean(force_b) + mean_force \n\n# Compute 10,000 bootstrap replicates from shifted arrays\nbs_replicates_a = draw_bs_reps(force_a_shifted, np.mean, 10000)\nbs_replicates_b = draw_bs_reps(force_b_shifted, np.mean, 10000)\n\n# Get replicates of difference of means: bs_replicates\nbs_replicates = bs_replicates_a-bs_replicates_b\n\n# Compute and print p-value: p\np = np.sum(bs_replicates >= (np.mean(force_a)-np.mean(force_b))) / 10000\nprint('p-value =', p)" }, { "alpha_fraction": 0.48089590668678284, "alphanum_fraction": 0.5612648129463196, "avg_line_length": 29.399999618530273, "blob_id": "e8cf67340758e302f57e1ab644c3290cd3c34214", "content_id": "e0abefbd2359a45702d874c6c4d6c258482ae692", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "no_license", "max_line_length": 66, "num_lines": 25, "path": "/Datacamp/multi_indexing.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Sometimes we may want multiple row indexes in a heirachical order\n# Set the index to be the columns ['state', 'month']: sales\nsales = sales.set_index(['state', 'month'])\n\n# Sort the MultiIndex: sales\nsales = sales.sort_index()\n\nsales = \n eggs salt spam\nstate month \nCA 1 47 12.0 17\n 2 110 50.0 31\nNY 1 221 89.0 72\n 2 77 87.0 20\nTX 1 132 NaN 52\n 2 205 60.0 55\n \n# Look up data for NY in month 1: NY_month1\nNY_month1 = sales.loc[('NY', 1)]\n\n# Look up data for CA and TX in month 2: CA_TX_month2\nCA_TX_month2 = sales.loc[(['CA', 'TX'], 2),:]\n\n# Look up data for all states in month 2: all_month2\nall_month2 = sales.loc[(slice(None), 2),:]" }, { "alpha_fraction": 0.6647214889526367, "alphanum_fraction": 0.6843501329421997, "avg_line_length": 25.53521156311035, "blob_id": "658e0554c38ab7420d6f9bb6714962af0d3d8612", "content_id": "97487dce35423798196d127435e76cad374a570f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1885, "license_type": "no_license", "max_line_length": 95, "num_lines": 71, "path": "/Datacamp/bootstrapping.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "\nfor i in range(50):\n # Generate bootstrap sample: bs_sample\n bs_sample = np.random.choice(rainfall, size=len(rainfall))\n\n # Compute and plot ECDF from bootstrap sample\n x, y = ecdf(bs_sample)\n _ = plt.plot(x=x, y=y, marker='.', linestyle='none',\n color='gray', alpha=0.1)\n\n# Compute and plot ECDF from original data\nx, y = ecdf(rainfall)\n_ = plt.plot(x=x, y=y, marker='.')\n\n# Make margins and label axes\nplt.margins(0.02)\n_ = plt.xlabel('yearly rainfall (mm)')\n_ = plt.ylabel('ECDF')\n\n# Show the plot\nplt.show()\n\n\ndef draw_bs_reps(data, func, size=1):\n \"\"\"Draw bootstrap replicates.\"\"\"\n\n # Initialize array of replicates: bs_replicates\n bs_replicates = np.empty(size)\n\n # Generate replicates\n for i in range(size):\n bs_replicates[i] = bootstrap_replicate_1d(data, func) #applies func to bootstrap sample\n\n return bs_replicates\n \n# Take 10,000 bootstrap replicates of the mean: bs_replicates\nbs_replicates = draw_bs_reps(rainfall,np.mean,10000)\n\n# Compute and print SEM\nsem = np.std(rainfall) / np.sqrt(len(rainfall))\nprint(sem)\n\n# Compute and print standard deviation of bootstrap replicates\nbs_std = np.std(bs_replicates)\nprint(bs_std)\n\n# Make a histogram of the results\n_ = plt.hist(bs_replicates, bins=50, normed=True)\n_ = plt.xlabel('mean annual rainfall (mm)')\n_ = plt.ylabel('PDF')\n\n# Show the plot\nplt.show()\n\n\n\n# Draw bootstrap replicates of the mean no-hitter time (equal to tau): bs_replicates\nbs_replicates = draw_bs_reps(nohitter_times, np.mean, 10000)\n\n# Compute the 95% confidence interval: conf_int\nconf_int = np.percentile(bs_replicates, [2.5, 97.5])\n\n# Print the confidence interval\nprint('95% confidence interval =', conf_int, 'games')\n\n# Plot the histogram of the replicates\n_ = plt.hist(bs_replicates, bins=50, normed=True)\n_ = plt.xlabel(r'$\\tau$ (games)')\n_ = plt.ylabel('PDF')\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.6865900158882141, "alphanum_fraction": 0.6896551847457886, "avg_line_length": 30.85365867614746, "blob_id": "267775da1cb2f296837c11f6ed42f6563009d6c8", "content_id": "dda3aa04cf40c096c7afd9d6fa95c2d63bae764b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1305, "license_type": "no_license", "max_line_length": 114, "num_lines": 41, "path": "/Datacamp/merging.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Merge revenue with managers on 'city': merge_by_city\nmerge_by_city = pd.merge(revenue, managers, on='city')\n\n# Print merge_by_city\nprint(merge_by_city)\n\n# Merge revenue with managers on 'branch_id': merge_by_id\nmerge_by_id = pd.merge(revenue, managers, on='branch_id')\n\n# Print merge_by_id\nprint(merge_by_id)\n\n# Add 'state' column to revenue: revenue['state']\nrevenue['state'] = ['TX','CO','IL','CA']\n\n# Add 'state' column to managers: managers['state']\nmanagers['state'] = ['TX','CO','CA', 'MO']\n\n# Merge revenue & managers on 'branch_id', 'city', & 'state': combined\ncombined = pd.merge(revenue, managers, on=['branch_id', 'city','state'])\n\n# Print combined\nprint(combined)\n\n#matching columns are suffixed with _x, _y. This can be changed with 'suffixes = [..., ...]' arg\n\n\no2o = pd.merge(left=site, right=visited, left_on='name', right_on='site')\n#This will handle 1-to-1, many-to-1 and many-to-many merges\n\n# Merge revenue and sales: revenue_and_sales\nrevenue_and_sales = pd.merge(revenue, sales, how='right',on=['city', 'state'])\n\n# Print revenue_and_sales\nprint(revenue_and_sales)\n\n# Merge sales and managers: sales_and_managers\nsales_and_managers = pd.merge(sales, managers, how='left',left_on=['city', 'state'], right_on=['branch', 'state'])\n\n# Print sales_and_managers\nprint(sales_and_managers)" }, { "alpha_fraction": 0.7153638601303101, "alphanum_fraction": 0.7261455655097961, "avg_line_length": 31, "blob_id": "0b893cb1290fde2d65b70989afb83b034351b194", "content_id": "e1e20f622fd12191ce36b5c1f207e57227b3957d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "no_license", "max_line_length": 82, "num_lines": 58, "path": "/Datacamp/sqlalchemy_more_statements.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Create a select query: stmt\nstmt = select([census])\n\n# Add a where clause to filter the results to only those for New York\nstmt = stmt.where(census.columns.state == 'New York')\n\n# Execute the query to retrieve all the data returned: results\nresults = connection.execute(stmt).fetchall()\n\n# Loop over the results and print the age, sex, and pop2008\nfor result in results:\n print(result.age, result.sex, result.pop2008)\n \n # Create a query for the census table: stmt\nstmt = select([census])\n\n# Append a where clause to match all the states in_ the list states\nstmt = stmt.where(census.columns.state.in_(states))\n\n# Loop over the ResultProxy and print the state and its population in 2000\nfor i in connection.execute(stmt).fetchall():\n print(i.state, i.pop2000)\n \n# Import and_\nfrom sqlalchemy import and_\n\n# Build a query for the census table: stmt\nstmt = select([census])\n\n# Append a where clause to select only non-male records from California using and_\nstmt = stmt.where(\n # The state of California with a non-male sex\n and_(census.columns.state == 'California',\n census.columns.sex != 'M'\n )\n)\n\n# Loop over the ResultProxy printing the age and sex\nfor result in connection.execute(stmt).fetchall():\n print(result.age, result.sex)\n \n# Build a query to select the state column: stmt\nstmt = select([census.columns.state])\n\n# Order stmt by the state column\nstmt = stmt.order_by(census.columns.state) #desc(census.columns.state)\n\n## Build a query to select state and age: stmt\n#stmt = select([census.columns.state, census.columns.age])\n#\n## Append order by to ascend by state and descend by age\n#stmt = stmt.order_by(census.columns.state, desc(census.columns.age))\n\n# Execute the query and store the results: results\nresults = connection.execute(stmt).fetchall()\n\n# Print the first 10 results\nprint(results[:10])" }, { "alpha_fraction": 0.41860464215278625, "alphanum_fraction": 0.5581395626068115, "avg_line_length": 13.166666984558105, "blob_id": "7f616d2452b0f35b11b5c8b5882793c8a93fd067", "content_id": "69f5f70fced56cb167467ac3a7f1e7a2f44ce853", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/API/Section7/code/models/__init__.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 7 11:20:39 2018\n\n@author: Damien\n\"\"\"\n\n" }, { "alpha_fraction": 0.7604422569274902, "alphanum_fraction": 0.7616707682609558, "avg_line_length": 29.16666603088379, "blob_id": "8864966866270a5f35685260bd0bbde4327bcaa5", "content_id": "0676a72749d4ce6d28e049ad69032763a65ab3ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1628, "license_type": "no_license", "max_line_length": 138, "num_lines": 54, "path": "/Datacamp/sqlalchemy_statements.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import create_engine\nfrom sqlalchemy import create_engine\n\n# Create an engine that connects to the census.sqlite file: engine\nengine = create_engine('sqlite:///census.sqlite')\n\nconnection = engine.connect()\n\n# Build select statement for census table: stmt\nstmt = \"SELECT * FROM census\"\n\n# Execute the statement and fetch the results: results\nresults = connection.execute(stmt).fetchall()\n\n# Print results\nprint(results)\n\n#ALTERNATIVELY\n# Import select\nfrom sqlalchemy import select\n\n# Reflect census table via engine: census\ncensus = Table('census', metadata, autoload=True, autoload_with=engine)\n\n# Build select statement for census table: stmt\nstmt = select([census])\n\n# Print the emitted statement to see the SQL emitted\nprint(stmt)\n\n# Execute the statement and print the results\nprint(connection.execute(stmt).fetchall())\n\n#\n#Recall the differences between a ResultProxy and a ResultSet:\n#\n# ResultProxy: The object returned by the .execute() method. It can be used in a variety of ways to get the data returned by the query.\n# ResultSet: The actual data asked for in the query when using a fetch method such as .fetchall() on a ResultProxy.\n\n#This separation between the ResultSet and ResultProxy allows us to fetch as much or as little data as we desire.\n\nresults = connection.execute(stmt).fetchall()\n\n# Get the first row of the results by using an index: first_row\nfirst_row = results[0]\n\n# Print the first row of the results\nprint(first_row)\n\n# Print the first column of the first row by using an index\nprint(first_row[0])\n\n# Print the 'state' column of the first row by using its name\nprint(first_row['state'])" }, { "alpha_fraction": 0.6944257616996765, "alphanum_fraction": 0.7132303714752197, "avg_line_length": 23.409835815429688, "blob_id": "feae05fbf008b92af98d9f1c8c36382a619f3f65", "content_id": "cd52443c53333f801f0f84a34c44a4f86f8e75de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1489, "license_type": "no_license", "max_line_length": 81, "num_lines": 61, "path": "/Datacamp/parameter_optimisation.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Seed random number generator\nnp.random.seed(42)\n\n# Compute mean no-hitter time: tau\ntau = np.mean(nohitter_times)\n\n# Draw out of an exponential distribution with parameter tau: inter_nohitter_time\ninter_nohitter_time = np.random.exponential(tau, 100000)\n\n# Plot the PDF and label axes\n_ = plt.hist(inter_nohitter_time,\n bins=50, normed=True, histtype='step')\n_ = plt.xlabel('Games between no-hitters')\n_ = plt.ylabel('PDF')\n\n# Show the plot\nplt.show()\n\n#Verigy using cdf\n# Create an ECDF from real data: x, y\nx, y = ecdf(nohitter_times)\n\n# Create a CDF from theoretical samples: x_theor, y_theor\nx_theor, y_theor = ecdf(inter_nohitter_time)\n\n# Overlay the plots\nplt.plot(x=x_theor, y=y_theor)\nplt.plot(x=x, y=y, marker='.', linestyle='none')\n\n# Margins and axis labels\nplt.margins(0.02)\nplt.xlabel('Games between no-hitters')\nplt.ylabel('CDF')\n\n# Show the plot\nplt.show()\n\n\n# Plot the theoretical CDFs\nplt.plot(x_theor, y_theor)\nplt.plot(x, y, marker='.', linestyle='none')\nplt.margins(0.02)\nplt.xlabel('Games between no-hitters')\nplt.ylabel('CDF')\n\n# Take samples with half tau: samples_half\nsamples_half = np.random.exponential(tau/2,10000)\n\n# Take samples with double tau: samples_double\nsamples_double = np.random.exponential(2*tau,10000)\n\n# Generate CDFs from these samples\nx_half, y_half = ecdf(samples_half)\nx_double, y_double = ecdf(samples_double)\n\n# Plot these CDFs as lines\n_ = plt.plot(x_half, y_half)\n_ = plt.plot(x_double, y_double)\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.7155796885490417, "alphanum_fraction": 0.7228260636329651, "avg_line_length": 20.627450942993164, "blob_id": "3121d8d1f5e25a5c2ca12f4a73f07994ed0dd0bf", "content_id": "ef7534b95118b4ef45499f318cbf195078552af4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1104, "license_type": "no_license", "max_line_length": 61, "num_lines": 51, "path": "/Datacamp/bokeh_numpy_pandas.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "\n# Import numpy as np\nimport numpy as np\n\n# Create array using np.linspace: x\nx = np.linspace(0,5,100)\n\n# Create array using np.cos: y\ny = np.cos(x)\n\n# Add circles at x and y\np.circle(x,y)\n\n# Specify the name of the output file and show the result\noutput_file('numpy.html')\nshow(p)\n\n\n#pandas\n# Import pandas as pd\nimport pandas as pd\n\n# Read in the CSV file: df\ndf = pd.read_csv('auto.csv')\n\n# Import figure from bokeh.plottin\nfrom bokeh.plotting import figure\n\n# Create the figure: p\np = figure(x_axis_label='HP', y_axis_label='MPG')\n\n# Plot mpg vs hp by color\np.circle( df['hp'], df['mpg'], color=df['color'], size=10)\n\n# Specify the name of the output file and show the result\noutput_file('auto-df.html')\nshow(p)\n\n\n#ColumnDataSource\n# Import the ColumnDataSource class from bokeh.plotting\nfrom bokeh.plotting import ColumnDataSource\n\n# Create a ColumnDataSource from df: source\nsource = ColumnDataSource(df)\n\n# Add circle glyphs to the figure p\np.circle('Year', 'Time', source=source, color='color',size=8)\n\n# Specify the name of the output file and show the result\noutput_file('sprint.html')\nshow(p)\n" }, { "alpha_fraction": 0.6654970645904541, "alphanum_fraction": 0.7134503126144409, "avg_line_length": 33.220001220703125, "blob_id": "ff5b6a9c804ae4a0a6ac168325c067bb8925c4f9", "content_id": "38721349b7d9ec2596b0ae9b730c81ff526cbf3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1710, "license_type": "no_license", "max_line_length": 111, "num_lines": 50, "path": "/Datacamp/EDA_distributions.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "np.random.binomial(trails, probablity_of_success, size=number_of_reps)\n\nnp.random.poisson(average_rate, size=number_of_reps)\n\nnp.random.normal(mean, std, size=)\n# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10\nsamples_std1 = np.random.normal(20,1,size=100000)\nsamples_std3 = np.random.normal(20,3,size=100000)\nsamples_std10 = np.random.normal(20,10,size=100000)\n\n# Make histograms\n_ = plt.hist(samples_std1, normed=True, histtype='step', bins=100)\n_ = plt.hist(samples_std3, normed=True, histtype='step', bins=100)\n_ = plt.hist(samples_std10, normed=True, histtype='step', bins=100)\n\n# Make a legend, set limits and show plot\n_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))\nplt.ylim(-0.01, 0.42)\nplt.show()\n\n\n# Compute mean and standard deviation: mu, sigma\nmu = np.mean(belmont_no_outliers)\nsigma = np.std(belmont_no_outliers)\n\n# Sample out of a normal distribution with this mu and sigma: samples\nsamples = np.random.normal(mu, sigma, size=10000)\n\n# Get the CDF of the samples and of the data\nx_theor, y_theor = ecdf(samples)\nx, y = ecdf(belmont_no_outliers)\n\n# Plot the CDFs and show the plot\n_ = plt.plot(x_theor, y_theor)\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('Belmont winning time (sec.)')\n_ = plt.ylabel('CDF')\nplt.show()\n\n\nnp.randm.exponential(mean, size=)\ndef successive_poisson(tau1, tau2, size=1):\n \"\"\"Compute time for arrival of 2 successive Poisson processes.\"\"\"\n # Draw samples out of first exponential distribution: t1\n t1 = np.random.exponential(tau1, size=size)\n\n # Draw samples out of second exponential distribution: t2\n t2 = np.random.exponential(tau2, size=size)\n\n return t1 + t2" }, { "alpha_fraction": 0.7313432693481445, "alphanum_fraction": 0.7640369534492493, "avg_line_length": 38.08333206176758, "blob_id": "63b073f562fda58e4229f2a5b20d8847d5b7e6aa", "content_id": "6809d92ed81e46f11f7a56e6b2bbfa15b56205f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1407, "license_type": "no_license", "max_line_length": 113, "num_lines": 36, "path": "/Datacamp/resampling.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#If a df is indexed by date-time, we can perform resampling.\n#Downsampling is when we go to a lower unit, lower unit being one with fewer units in a period (lowere frequency)\n#Downsample from hours to days\n\n#Upsampling is the opposite and will introduce Nana, unless otherwise catered for through filling methods\n\n# Downsample to 6 hour data and aggregate by mean: df1\ndf1 = df.Temperature.resample('6h').mean()\n\n# Downsample to daily data and count the number of data points: df2\ndf2 = df.Temperature.resample('D').count()\n\n# Extract temperature data for August: august\naugust = df.Temperature.loc['2010-08']\n\n# Downsample to obtain only the daily highest temperatures in August: august_highs\naugust_highs = august.resample('D').max()\n\n# Extract temperature data for February: february\nfebruary = df.Temperature.loc['2010-02']\n\n# Downsample to obtain the daily lowest temperatures in February: february_lows\nfebruary_lows = february.resample('D').min()\n\n# Extract data from 2010-Aug-01 to 2010-Aug-15: unsmoothed\nunsmoothed = df['Temperature']['2010-Aug-01':'2010-Aug-15']\n\n# Apply a rolling mean with a 24 hour window: smoothed\nsmoothed = unsmoothed.rolling(window=24).mean()\n\n# Create a new DataFrame with columns smoothed and unsmoothed: august\naugust = pd.DataFrame({'smoothed':smoothed, 'unsmoothed':unsmoothed})\n\n# Plot both smoothed and unsmoothed data using august.plot().\naugust.plot()\nplt.show()\n" }, { "alpha_fraction": 0.7363690733909607, "alphanum_fraction": 0.7435590028762817, "avg_line_length": 35.30434799194336, "blob_id": "196c9c6bd8b36f37f5a1017fe6c4a5e41df3c3fb", "content_id": "7ef49fa66285d8719826ef920651e8e0f144616b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1669, "license_type": "no_license", "max_line_length": 109, "num_lines": 46, "path": "/Datacamp/sqlalchemy_joins.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#IF a table has an already defined relationship:\n# Build a statement to join census and state_fact tables: stmt\nstmt = select([census.columns.pop2000, state_fact.columns.abbreviation])\n\n# Execute the statement and get the first result: result\nresult = connection.execute(stmt).first()\n\n# Loop over the keys in the result object and print the key and value\nfor key in result.keys():\n print(key, getattr(result, key))\n \n \n# Build a statement to select the census and state_fact tables: stmt\nstmt = select([census, state_fact])\n\n# Add a select_from clause that wraps a join for the census and state_fact\n# tables where the census state column and state_fact name column match\nstmt = stmt.select_from(\n census.join(state_fact, census.columns.state == state_fact.columns.name))\n\n# Execute the statement and get the first result: result\nresult = connection.execute(stmt).first()\n\n# Loop over the keys in the result object and print the key and value\nfor key in result.keys():\n print(key, getattr(result, key))\n \n \n # Build a statement to select the state, sum of 2008 population and census\n# division name: stmt\nstmt = select([\n census.columns.state,\n func.sum(census.columns.pop2008),\n state_fact.columns.census_division_name\n])\n\n# Append select_from to join the census and state_fact tables by the census state and state_fact name columns\nstmt = stmt.select_from(\n census.join(state_fact, census.columns.state == state_fact.columns.name)\n)\n\n# Append a group by for the state_fact name column\nstmt = stmt.group_by(state_fact.columns.name)\n\n# Execute the statement and get the results: results\nresults = connection.execute(stmt).fetchall()" }, { "alpha_fraction": 0.7061662077903748, "alphanum_fraction": 0.725469172000885, "avg_line_length": 20.95294189453125, "blob_id": "1e7d2501e70e407088ea5a4b29525ebf58b6e67f", "content_id": "f228a4180e045780a66c705772e7860c1c1f86e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1865, "license_type": "no_license", "max_line_length": 82, "num_lines": 85, "path": "/Datacamp/SQL.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "SELECT * FROM table\n\nSELECT COUNT(*) FROM table\n#counts number of rows\n\nSELECT DISTINCT row FROM table\n#selects unique entries in row\n\nSELECT COUNT(row) FROM table\n#counts non-null entries\n\nSELECT COUNT(DISTINCT row) FROM table\n#returns count of distinct entries\n\nSELECT * FROM table\nWHERE column_value = 'some_value' #Use boolean operators, note that <> is !=\n\nSELECT * FROM table\nWHERE column1 = 'some_value' AND/OR column2 > some_value; \n\nSELECT * FROM table\nWHERE column BETWEEN value1 AND value2;\n#Returns a range (inclusive) \n\nSELECT * FROM table\nWHERE column IN ('...', '....', '....')\n#use this instead of multiple ORs\n\nSELECT * FROM table\nWHERE column IS NULL\\IS NOT NULL\n#filter column on null\\not null values\n\nSELECT * FROM table \nWHERE column LIKE 'Data%'\n# % wildcard matches none, one or many\n\nSELECT * FROM table \nWHERE column NOT LIKE 'Data%'\n# % wildcard matches none, one or many. Here we return all entrie that DON'T match\n\nSELECT * FROM table \nWHERE column LIKE 'Data_'\n# _ wildcard matches a single char\n\n\n###AGGREGATION####\n\nSELECT SUM(column) FROM table #AVG, MIN, MAX\n\nSELECT (col1 + col2)*3 AS new_col FROM table #Note: (3/2) = 1, (3.0/2.0) = 1.5\n\n#Can combine aggregations with arithmetic\n\n\n####ORDERING####\nSELECT column FROM table\nORDER BY col1 DESC\n\n#NOTE comes after WHERE clauses\n\n\n###GROUPING###\n\nSELECT col1, COUNT(col2) FROM table\nGROUP BY col1\n#NOTE can't SELECT a column that isn't the GROUP BY, unless we aggregate it\n\n\n###HAVING###\n\nSELECT column FROM table\nHAVING AVG(col1) > ...\n\n###FULL EG###\nSELECT release_year, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films\nWHERE release_year > 1990\nGROUP BY release_year\nHAVING AVG(budget) > 60000000\nORDER BY avg_gross DESC\n\nSELECT country, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films\nGROUP BY country\nHAVING COUNT(title) > 10\nORDER BY country\nLIMIT 5" }, { "alpha_fraction": 0.7383592128753662, "alphanum_fraction": 0.7405765056610107, "avg_line_length": 26.363636016845703, "blob_id": "1852d32c58a0933f065e4cf68eaccee6e11f7c5c", "content_id": "2a1ebf47c525403ae757d202bee1ab6da762c778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 902, "license_type": "no_license", "max_line_length": 67, "num_lines": 33, "path": "/Datacamp/seaborn_multivariate.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "kind='scatter' uses a scatter plot of the data points\nkind='reg' uses a regression plot (default order 1)\nkind='resid' uses a residual plot\nkind='kde' uses a kernel density estimate of the joint distribution\nkind='hex' uses a hexbin plot of the joint distribution\n\n# Generate a joint plot of 'hp' and 'mpg'\nsns.jointplot(x='hp', y='mpg', data=auto)\n\n# Generate a joint plot of 'hp' and 'mpg' using a hexbin plot\nsns.jointplot(x='hp', y='mpg', data=auto, kind='hex')\n\n# Display the plot\nplt.show()\n\n#Plot of all numeric columns against one another\n# Print the first 5 rows of the DataFrame\nprint(auto.head())\n\n# Plot the pairwise joint distributions from the DataFrame \nsns.pairplot(auto, hue='origin', kind='reg')\n\n# Display the plot\nplt.show()\n\n# Print the covariance matrix\nprint(cov_matrix)\n\n# Visualize the covariance matrix using a heatmap\nsns.heatmap(cov_matrix)\n\n# Display the heatmap\nplt.show()" }, { "alpha_fraction": 0.7167919874191284, "alphanum_fraction": 0.7176274061203003, "avg_line_length": 28.219512939453125, "blob_id": "a3954ecde89c9bf2b2ca171a1788206fa45b217f", "content_id": "b9e809dae4e206d84c30562935fbd36d101cedba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1197, "license_type": "no_license", "max_line_length": 89, "num_lines": 41, "path": "/Datacamp/indexing.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#indexing as:\ndf[['...', '....']]\n#returns a DataFrame\n\np_counties = election.loc['Perry':'Potter', :]\n# Slice the row labels 'Potter' to 'Perry' in reverse order: p_counties_rev\np_counties_rev = election.loc['Potter':'Perry':-1, :]\n\n# Slice the columns from the starting column to 'Obama': left_columns\nleft_columns = election.loc[:, :'Obama']\n\n# Print the output of left_columns.head()\nprint(left_columns.head())\n\n# Slice the columns from 'Obama' to 'winner': middle_columns\nmiddle_columns = election.loc[:, 'Obama':'winner']\n\n# Print the output of middle_columns.head()\nprint(middle_columns.head())\n\n# Slice the columns from 'Romney' to the end: 'right_columns'\nright_columns = election.loc[:, 'Romney':]\n\n#inddexes are immutables, therefore to change it the whole index needs to be overwritten;\n# Create the list of new indexes: new_idx\nnew_idx = [ind.upper() for ind in sales.index]\n\n# Assign new_idx to sales.index\nsales.index = new_idx\n\n# Assign the string 'MONTHS' to sales.index.name\nsales.index.name = 'MONTHS'\n\n# Print the sales DataFrame\nprint(sales)\n\n# Assign the string 'PRODUCTS' to sales.columns.name \nsales.columns.name = 'PRODUCTS'\n\n# Print the sales dataframe again\nprint(sales)" }, { "alpha_fraction": 0.7377167344093323, "alphanum_fraction": 0.7391618490219116, "avg_line_length": 42.28125, "blob_id": "7ed6843c3e78e7937f14ada88fe3413609fbbef1", "content_id": "639932cda7a2e1e1c9a0b04666f982cd8bc27f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 127, "num_lines": 32, "path": "/Datacamp/tidy_data.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#Melting data is the process of turning columns of your data into rows of data. \nairquality_melt = pd.melt(airquality_melt, id_vars=['Month', 'Day'])\n#id_vars = columns not wishing to melt\n#value_vars = columns wishing to melt (deafult to all not in id_vars)\n\n#Pivoting data is the opposite of melting it.\nairquality_pivot = airquality_melt.pivot_table(index=[\"Month\", \"Day\"], columns=\"measurement\", values=\"reading\")\n#columns=\"measurement\" : columns to pivot\n#values=\"reading\" : values to fill columns with\n\n#the above create a heirarchical header format. To fix this:\nairquality_pivot_reset = airquality_pivot.reset_index()\n\n#Often there are duplicate values, these can be handled as follows:\nairquality_pivot = airquality_dup.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading', aggfunc=np.mean)\n#where the mean is taken\n\n#Note in the below that Series atributes and functions are accessed on the .str function\n# Melt ebola: ebola_melt\nebola_melt = pd.melt(ebola, id_vars=['Date', 'Day'], var_name='type_country', value_name='counts')\n\n# Create the 'str_split' column\nebola_melt['str_split'] = ebola_melt.type_country.str.split('_')\n\n# Create the 'type' column\nebola_melt['type'] = ebola_melt.str_split.str.get(0)\n\n# Create the 'country' column\nebola_melt['country'] = ebola_melt.str_split.str.get(1)\n\n# Print the head of ebola_melt\nprint(ebola_melt.head())" }, { "alpha_fraction": 0.6568182110786438, "alphanum_fraction": 0.6803030371665955, "avg_line_length": 31.219512939453125, "blob_id": "8753df27e0f6169c986dd1255f031093ff732393", "content_id": "f863a15c9ff9f65ed7d5003fa064c84766ee5fdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1320, "license_type": "no_license", "max_line_length": 66, "num_lines": 41, "path": "/Datacamp/A_B_testing.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Construct arrays of data: dems, reps\ndems = np.array([True] * 153 + [False] * 91)\nreps = np.array([True] * 136 + [False] * 35)\n\ndef frac_yea_dems(dems, reps):\n \"\"\"Compute fraction of Democrat yea votes.\"\"\"\n frac = np.sum(dems) / len(dems)\n return frac\n\n# Acquire permutation samples: perm_replicates\nperm_replicates = draw_perm_reps(dems, reps, frac_yea_dems, 10000)\n\n# Compute and print p-value: p\np = np.sum(perm_replicates <= 153/244) / len(perm_replicates)\nprint('p-value =', p)\n\n\n\n# Compute the difference in mean sperm count: diff_means\ndiff_means = diff_of_means(control, treated)\n\n# Compute mean of pooled data: mean_count\nmean_count = np.mean(np.concatenate([control, treated]))\n\n# Generate shifted data sets\ncontrol_shifted = control - np.mean(control) + mean_count\ntreated_shifted = treated - np.mean(treated) + mean_count\n\n# Generate bootstrap replicates\nbs_reps_control = draw_bs_reps(control_shifted,\n np.mean, size=10000)\nbs_reps_treated = draw_bs_reps(treated_shifted,\n np.mean, size=10000)\n\n# Get replicates of difference of means: bs_replicates\nbs_replicates = bs_reps_control- bs_reps_treated\n\n# Compute and print p-value: p\np = np.sum(bs_replicates >= np.mean(control) - np.mean(treated)) \\\n / len(bs_replicates)\nprint('p-value =', p)" }, { "alpha_fraction": 0.40771350264549255, "alphanum_fraction": 0.45179063081741333, "avg_line_length": 23.266666412353516, "blob_id": "969c52672ff1666c2537f3192b12b139568de1f3", "content_id": "552f5ca17aa3355ad5664834d7368869910524d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 70, "num_lines": 15, "path": "/Datacamp/pivoting_tables.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "#EG:\n id treatment gender response\n0 1 A F 5\n1 2 A M 3\n2 3 B F 8\n3 4 B M 9\n\ndf.pivot(index = \"treatment\", columns = \"gender\", values = \"response\")\n#pivot\ngender F M\ntreatment \nA 5 3\nB 8 9\n\n#Not specifying the values will pivot all columns" }, { "alpha_fraction": 0.7352342009544373, "alphanum_fraction": 0.7413442134857178, "avg_line_length": 20.676469802856445, "blob_id": "efb39580e7055d74f34540fc60472d7574ef5635", "content_id": "8dbb9a0bf21da1a90fb8a1903198f1367388c2ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1473, "license_type": "no_license", "max_line_length": 104, "num_lines": 68, "path": "/Datacamp/web_import.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import package\nfrom urllib.request import urlretrieve\n\n# Import pandas\nimport pandas as pd\n\n# Assign url of file: url\nurl = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'\n\n# Save file locally\nurlretrieve(url, 'winequality-red.csv')\n\n# Read file into a DataFrame and print its head\ndf = pd.read_csv('winequality-red.csv', sep=';')\n#Alternatively\ndf = pd.read_csv(url, sep = \";\") #does not save the file locally\n\n#If file is an excel file\nxl = pd.read_excel(url, sheetname = None)\n\n# Print the sheetnames to the shell\nprint(xl.keys())\n\n# Print the head of the first sheet (using its name, NOT its index)\nprint(xl['1700'].head())\n\n\n##HTTP requests\n# Import packages\nfrom urllib.request import urlopen, Request\n\n# Specify the url\nurl = \"http://www.datacamp.com/teach/documentation\"\n\n# This packages the request: request\nrequest = Request(url)\n\n# Sends the request and catches the response: response\nresponse = urlopen(request)\n\n# Print the datatype of response\nprint(type(response))\n\n# Extract the response: html\nhtml = response.read()\n\n# Be polite and close the response!\nresponse.close()\n\n\n#The requests package simplifies this:\n\n# Import package\nimport requests\n\n# Specify the url: url\nurl = \"http://www.datacamp.com/teach/documentation\"\n\n# Packages the request, send the request and catch the response: r\nr = requests.get(url)\n\n# Extract the response: text\ntext = r.text\n\n#NO NEED TO CLOSE\n\n# Print the html\nprint(text)" }, { "alpha_fraction": 0.6920995712280273, "alphanum_fraction": 0.7234848737716675, "avg_line_length": 40.06666564941406, "blob_id": "9df147a91a855944cdb25e828d4269154abcc0aa", "content_id": "8cf1c8606c014b2d80af9a3da16089ec351bfb56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "no_license", "max_line_length": 89, "num_lines": 45, "path": "/Datacamp/readin_and_cleaning.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "\n# Read in the data file with header=None: df_headers\ndf_headers = pd.read_csv(data_file, header=None)\n\n# Print the output of df_headers.head()\nprint(df_headers.head())\n\n# Split on the comma to create a list: column_labels_list\ncolumn_labels_list = column_labels.split(\",\")\n\n# Assign the new column labels to the DataFrame: df.columns\ndf.columns = column_labels_list\n\n# Remove the appropriate columns: df_dropped\ndf_dropped = df.drop(list_to_drop, axis = 'columns')\n\n# Print the output of df_dropped.head()\nprint(df_dropped.head())\n\n# Convert the date column to string: df_dropped['date']\ndf_dropped['date'] = df_dropped['date'].astype(str)\n\n# Pad leading zeros to the Time column: df_dropped['Time']\ndf_dropped['Time'] = df_dropped['Time'].apply(lambda x:'{:0>4}'.format(x))\n\n# Concatenate the new date and Time columns: date_string\ndate_string = df_dropped.date + df_dropped.Time\n\n# Convert the date_string Series to datetime: date_times\ndate_times = pd.to_datetime(date_string, format='%Y%m%d%H%M')\n\n# Set the index to be the new date_times container: df_clean\ndf_clean = df_dropped.set_index(date_times)\n\n# Print the dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011\nprint(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren'])\n\n# Convert the dry_bulb_faren column to numeric values: df_clean['dry_bulb_faren']\ndf_clean['dry_bulb_faren'] = pd.to_numeric(df_clean['dry_bulb_faren'], errors='coerce')\n\n# Print the transformed dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011\nprint(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren'])\n\n# Convert the wind_speed and dew_point_faren columns to numeric values\ndf_clean['wind_speed'] = pd.to_numeric(df_clean['wind_speed'], errors='coerce')\ndf_clean['dew_point_faren'] = pd.to_numeric(df_clean['dew_point_faren'], errors='coerce')" }, { "alpha_fraction": 0.703045666217804, "alphanum_fraction": 0.7233502268791199, "avg_line_length": 24.69565200805664, "blob_id": "387be676c68bdc7f6e32711f8f7026a971edc24f", "content_id": "f05a16b073fe906e8456de4b9c106a6506e95994", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1182, "license_type": "no_license", "max_line_length": 96, "num_lines": 46, "path": "/Datacamp/EDA_boxplot_percentile.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "import numpy as np\n\nnp.mean(data)\nnp.median(data)\nnp.var(versicolor_petal_length)\nnp.std(versicolor_petal_length)\n\n#covariance matrix: \n# returns a 2D array where entries [0,1] and [1,0] are the covariances.\n# Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of the data in y\nnp.cov(versicolor_petal_length, versicolor_petal_width)\n\ndef pearson_r(x, y):\n \"\"\"Compute Pearson correlation coefficient between two arrays.\"\"\"\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]\n\n# Compute Pearson correlation coefficient for I. versicolor: r\nr = pearson_r(versicolor_petal_length, versicolor_petal_width)\n\n# Print the result\nprint(r)\n\n\n# Specify array of percentiles: percentiles\npercentiles = np.array([2.5, 25, 50, 75, 97.5])\n\n# Compute percentiles: ptiles_vers\nptiles_vers= np.percentile(versicolor_petal_length, percentiles)\n\n# Print the result\nprint(ptiles_vers)\n\n\n# Create box plot with Seaborn's default settings\n_ = sns.boxplot(x='species', y='petal length (cm)', data=df)\n\n# Label the axes\nplt.xlabel('species')\nplt.ylabel('petal length (cm)')\n\n# Show the plot\nplt.show()\n" }, { "alpha_fraction": 0.5403442978858948, "alphanum_fraction": 0.5543302893638611, "avg_line_length": 27.121212005615234, "blob_id": "9928b308c64eb34a0e99e4db72f420e571e11016", "content_id": "a26e1cd61089968f8b3e4489d0b58b91ee24941b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3718, "license_type": "no_license", "max_line_length": 103, "num_lines": 132, "path": "/API/Section6/code/UseDB/item.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nimport sqlite3\n\n\nclass Item(Resource):\n parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class\n parser.add_argument('price', \n type = float, \n required = True,\n help = \"This field cannot be left blank\") \n \n \n @jwt_required()\n def get(self, name):\n item = self.find_by_name(name)\n \n #http://127.0.0.1:5000/item/wine?price=17 will pass 17 to the args\n #args = Item.parser.parse_args()\n #print(args['price'])\n \n \n if item is not None:\n return item, 200\n else:\n return {\"message\" : \"Item not found\"}, 404\n\n @classmethod\n def find_by_name(cls, name):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n \n select_query = \"SELECT * FROM items WHERE name = ?\"\n result = cursor.execute(select_query, (name,))\n item_in_db = result.fetchone()\n connection.close()\n \n if item_in_db is not None:\n return {'item' : {'name' : item_in_db[0], 'price': item_in_db[1]}}\n \n #We could use the get() method but that requires a JWT \n #Thus we use the alternative class method\n def post(self, name): \n \n item = self.find_by_name(name)\n if item is not None:\n return {\"message\":\"item already in database\"}, 400\n \n data = Item.parser.parse_args()\n item = {'name' : name, 'price': data['price']}\n \n try:\n self.insert_item(item)\n except:\n return {\"message\" : \"An error occurred\"}, 500\n \n return {'name' : name, 'price' : data['price']}, 201 #201 is code for created\n \n \n @classmethod\n def insert_item(cls, item):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n \n insert_query = \"INSERT INTO items VALUES (?, ?)\"\n cursor.execute(insert_query, (item['name'], item['price']))\n \n connection.commit()\n connection.close()\n \n def delete(self, name):\n\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n \n delete_query = \"DELETE FROM items WHERE name = ?\"\n \n cursor.execute(delete_query, (name,))\n \n connection.commit()\n connection.close()\n \n return {\"message\" : \"Item deleted\"}\n \n def put(self, name):\n\n item = self.find_by_name(name)\n data = Item.parser.parse_args()\n updated_item = {'name' : name, 'price': data['price']}\n\n if item is None:\n try:\n self.insert_item(updated_item)\n except:\n {\"message\" : \"an error occurred\"}, 500\n else:\n try:\n self.update(updated_item)\n except:\n {\"message\" : \"an error occurred\"}, 500 \n\n return updated_item, 201 #201 is code for created\n \n\n @classmethod\n def update(cls, item):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n \n insert_query = \"UPDATE items SET price = ? WHERE name = ?\"\n cursor.execute(insert_query, (item['price'], item['name']))\n \n connection.commit()\n connection.close()\n \n\n\nclass ItemList(Resource):\n def get(self):\n connection = sqlite3.connect('data.db')\n cursor = connection.cursor()\n \n query = \"SELECT * FROM items\"\n \n result = cursor.execute(query)\n items = result.fetchall()\n connection.close()\n \n if items is not None:\n return {'items' : items}\n else:\n return {\"message\" : \"No items in database\"}\n " }, { "alpha_fraction": 0.6875800490379333, "alphanum_fraction": 0.6952624917030334, "avg_line_length": 25.066667556762695, "blob_id": "dd85e338fcfdc856a67d51fa97c3eef17206a554", "content_id": "7fd04d299fa3591bd0ae2c424af932d170fb2555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 124, "num_lines": 30, "path": "/API/Section6/test.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "import sqlite3\n\nconnection = sqlite3.connect('data.db')\n\ncursor = connection.cursor() #similar to a screen cursor, it allows us to selct and start thinigs. It executes the queries\n\ncreate_table = \"CREATE TABLE users (id int, username text, password text)\"\ncursor.execute(create_table)\n\n\nuser = (1, \"damien\", \"bitches\")\ninsert_query = \"INSERT INTO users VALUES (?, ?, ?)\"\ncursor.execute(insert_query, user)\n\nusers = [\n (2, \"not damien\", \"notbitches\"),\n (3, \"other\", \"otherps\")\n ]\ncursor.executemany(insert_query, users)\n\nselect_query = \"SELECT * from users\"\na = cursor.execute(select_query)\nres = connection.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\nfor name in res:\n print (name[0])\nprint(next(a))\n\nconnection.commit()\n\nconnection.close()" }, { "alpha_fraction": 0.7089136242866516, "alphanum_fraction": 0.7353760600090027, "avg_line_length": 25.280487060546875, "blob_id": "347b73000850c846d0a36ccc29bdb9fcc156622a", "content_id": "bb071e7a81ad80c57a142923827cf723ade0b711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2154, "license_type": "no_license", "max_line_length": 105, "num_lines": 82, "path": "/Datacamp/bokeh_layouts.py", "repo_name": "DamienPond001/Udemy_API", "src_encoding": "UTF-8", "text": "# Import row from bokeh.layouts\nfrom bokeh.layouts import row, column\n\n# Create the first figure: p1\np1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')\n\n# Add a circle glyph to p1\np1.circle('fertility', 'female_literacy', source=source)\n\n# Create the second figure: p2\np2 = figure(x_axis_label='population', y_axis_label='female_literacy (% population)')\n\n# Add a circle glyph to p2\np2.circle('population', 'female_literacy', source=source)\n\n# Put p1 and p2 into a horizontal row: layout\nlayout = row(p1,p2)\n#layout = column(p1, p2)\n\n# Specify the name of the output_file and show the result\noutput_file('fert_row.html')\nshow(layout)\n\n\n# Import column and row from bokeh.layouts\nfrom bokeh.layouts import row, column\n\n# Make a column layout that will be used as the second row: row2\nrow2 = column([mpg_hp, mpg_weight], sizing_mode='scale_width')\n\n# Make a row layout that includes the above column layout: layout\nlayout = row([avg_mpg, row2], sizing_mode='scale_width')\n\n# Specify the name of the output_file and show the result\noutput_file('layout_custom.html')\nshow(layout)\n\n\n\n# Import gridplot from bokeh.layouts\nfrom bokeh.layouts import gridplot\n\n# Create a list containing plots p1 and p2: row1\nrow1 = [p1, p2]\n\n# Create a list containing plots p3 and p4: row2\nrow2 = [p3, p4]\n\n# Create a gridplot using row1 and row2: layout\nlayout = gridplot([row1, row2])\n\n# Specify the name of the output_file and show the result\noutput_file('grid.html')\nshow(layout)\n\n\n\n#TABS\n# Import Panel from bokeh.models.widgets\nfrom bokeh.models.widgets import Panel\n\n# Create tab1 from plot p1: tab1\ntab1 = Panel(child=p1, title='Latin America')\n\n# Create tab2 from plot p2: tab2\ntab2 = Panel(child=p2, title='Africa')\n\n# Create tab3 from plot p3: tab3\ntab3 = Panel(child=p3, title='Asia')\n\n# Create tab4 from plot p4: tab4\ntab4 = Panel(child=p4, title='Europe')\n\n# Import Tabs from bokeh.models.widgets\nfrom bokeh.models.widgets import Tabs\n\n# Create a Tabs layout: layout\nlayout = Tabs(tabs=[tab1, tab2, tab3, tab4])\n\n# Specify the name of the output_file and show the result\noutput_file('tabs.html')\nshow(layout)" } ]
58
vgrichina/ios-autocomplete
https://github.com/vgrichina/ios-autocomplete
d08ad4e1d52d786b85174241b18edb8f427d1c2c
4ce212409258c13993f3686739a573a43c19e3a3
3c523795e8fe4d7992c5556c1696b4de7eb216ac
refs/heads/master
2021-01-01T15:55:45.170299
2012-03-04T12:14:55
2012-03-04T12:14:55
3,604,273
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6322213411331177, "alphanum_fraction": 0.6346623301506042, "avg_line_length": 28.261905670166016, "blob_id": "3c071093077b865282a54ff9e779b849d39daa64", "content_id": "887a721ce1b482b405e63ea7ddec36e0d59463d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "no_license", "max_line_length": 82, "num_lines": 42, "path": "/gen_index.py", "repo_name": "vgrichina/ios-autocomplete", "src_encoding": "UTF-8", "text": "import os\nimport sqlite3\n\ndb_path = \"Autocomplete/names.sqlite\"\nos.remove(db_path)\n\ndb = sqlite3.connect(db_path)\n\ndb.execute(\"pragma synchronous=off\")\ndb.execute(\"pragma journal_mode=memory\")\ndb.execute(\"pragma temp_store=memory\")\n\ndb.execute(\"create table names (name text)\")\ndb.execute(\"create table parts (part text collate nocase)\")\ndb.execute(\"\"\"create table names_parts (part_id integer, name_id integer,\n foreign key(name_id) references names(rowid),\n foreign key(part_id) references parts(rowid))\n\"\"\")\ndb.execute(\"create index parts_idx on parts (part)\")\ndb.execute(\"create index names_parts_idx on names_parts (part_id, name_id)\")\n\nc = db.cursor()\n\nall_parts = {}\n\nfor name in open(\"Autocomplete/fake-full-names.txt\", \"r\"):\n name = name.replace(\"\\n\", \"\")\n\n c.execute(\"insert into names values (?)\", (name,))\n name_id = c.lastrowid\n for part in name.split(\" \"):\n if len(part) > 1:\n if part in all_parts:\n part_id = all_parts[part]\n else:\n c.execute(\"insert into parts values(?)\", (part,))\n part_id = c.lastrowid\n\n c.execute(\"insert into names_parts values (?, ?)\", (part_id, name_id))\n\ndb.commit()\ndb.close()\n" } ]
1
hplan/fw
https://github.com/hplan/fw
03c925295bcc3b25982d2f6a92d407743105b6ac
e957482d150f6224e2be1ab5df0c6897f0cf2c60
3672e134acc0b31171690f032c6628fc0a2d03ae
refs/heads/master
2023-09-01T11:54:24.420569
2023-08-23T05:34:05
2023-08-23T05:34:36
196,965,461
0
0
Apache-2.0
2019-07-15T09:14:44
2019-07-17T09:33:37
2019-07-29T08:34:26
Python
[ { "alpha_fraction": 0.45672574639320374, "alphanum_fraction": 0.4593326449394226, "avg_line_length": 25.63888931274414, "blob_id": "5bf6fdd21e18a280598d04dd8863f082efde63c2", "content_id": "e3bbf2e30a03de422c957b46e519a21ae2e4ac3a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1918, "license_type": "permissive", "max_line_length": 92, "num_lines": 72, "path": "/fw_session/model/DatabaseHelper.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n__metaclass__ = type\nimport sqlite3\n\n\nclass DatabaseHelper:\n\n def __init__(self):\n self.db = \"./db/fw.db\"\n\n def create(self):\n conn = sqlite3.connect(self.db)\n try:\n db = conn.cursor()\n db.execute('''\n CREATE TABLE IF NOT EXISTS bz(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n bz_name TEXT NOT NULL,\n bz_pwd TEXT NOT NULL,\n bz_host TEXT NOT NULL,\n bz_token TEXT\n )\n ''')\n db.execute('''\n CREATE TABLE IF NOT EXISTS product(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT NOT NULL,\n alias TEXT NOT NULL,\n dir TEXT NOT NULL,\n prev_tag TEXT\n )\n ''')\n # only when bz was empty, initial one\n # cursor = db.execute(\"select id from bz\")\n # r = cursor.fetchall()\n # if len(r) == 0:\n # db.execute(\"INSERT INTO bz(bz_name, bz_pwd, bz_host) VALUES ('', '', '')\")\n conn.commit()\n except Exception as e:\n print \"*** Something went wrong ***\"\n print e.message\n finally:\n conn.close()\n\n def query(self, sql):\n conn = sqlite3.connect(self.db)\n try:\n db = conn.cursor()\n cursor = db.execute(sql)\n return conn, cursor\n finally:\n pass\n\n def cud(self, sql):\n conn = sqlite3.connect(self.db)\n try:\n db = conn.cursor()\n db.execute(sql)\n conn.commit()\n except Exception as e:\n raise e\n finally:\n conn.close()\n\n def insert(self, sql):\n self.cud(sql)\n\n def delete(self, sql):\n self.cud(sql)\n\n def update(self, sql):\n self.cud(sql)\n" }, { "alpha_fraction": 0.4717504382133484, "alphanum_fraction": 0.480415940284729, "avg_line_length": 25.4678897857666, "blob_id": "d93871cb1706d0a837fbf65adca0f5e8fc097f2c", "content_id": "3a7fcfcc3b789461e25ad60fac9ae8fef878603c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2885, "license_type": "permissive", "max_line_length": 64, "num_lines": 109, "path": "/fw_session/setup/product_setup.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\nsys.path.append(\"./model\")\nfrom model import DatabaseExecutor as db\nfrom model import Product\n\nexecutor = db.DatabaseExecutor()\n\n\ndef new_product():\n print \"\\n>> New product:\\n\"\n # print host\n name = raw_input(\" Product name: \")\n # print name\n alias = raw_input(\" Product alias: \")\n # print pwd\n pdir = raw_input(\" Product dir: \")\n\n pd = Product.Product()\n pd.set(\"name\", name)\n pd.set(\"alias\", alias)\n pd.set(\"dir\", pdir)\n executor.prod_add(pd)\n print \"\\n Product created.\"\n\n\ndef edit_product(pds):\n print \"\\n>> Products:\"\n print \" \" + \"-\" * 60\n for pd in pds:\n print \" id: |\", pd['id']\n print \" name: |\", pd['name']\n print \" alias: |\", pd['alias']\n print \" dir: |\", pd['dir']\n print \" \" + \"-\" * 60\n\n # get user input\n while True:\n rid = raw_input(\" * Choose product id: \")\n if not rid.isdigit():\n print \" Unknown combo {0}\\n\".format(rid)\n else:\n break\n print \"\"\"\n >> Operations:\\n\n 1.Update name\n 2.Update alias\n 3.Update dir\n 4.Exit\n\"\"\"\n while True:\n combo = raw_input(\" * What do you want to do: \")\n if not combo.isdigit():\n print \" Unknown combo {0}\\n\".format(combo)\n elif int(combo) == 1:\n name = raw_input(\" Product name: \")\n pd = Product.Product()\n pd.set(\"name\", name)\n prod_update(pd, rid)\n elif int(combo) == 2:\n alias = raw_input(\" Product alias: \")\n pd = Product.Product()\n pd.set(\"alias\", alias)\n prod_update(pd, rid)\n elif int(combo) == 3:\n pdir = raw_input(\" Product dir: \")\n pd = Product.Product()\n pd.set(\"dir\", pdir)\n prod_update(pd, rid)\n elif int(combo) == 4:\n exit(0)\n else:\n print \" Unknown combo {0}\\n\".format(combo)\n\n\ndef prod_update(pd, rid):\n executor.prod_update(pd, int(rid))\n print \" product updated.\\n\"\n\n\ndef product_setup():\n print \"\\n>> Operations:\"\n products = executor.prod_query()\n if not products:\n new_product()\n else:\n print \"\"\"\n 1.Add product\n 2.Update product\n 3.Exit\n\"\"\"\n while True:\n combo = raw_input(\"* What do you want to do: \")\n if not combo.isdigit():\n print \"Unknown combo {0}\\n\".format(combo)\n elif int(combo) == 1:\n new_product()\n print \"\"\n elif int(combo) == 2:\n edit_product(products)\n elif int(combo) == 3:\n exit(0)\n else:\n print \"Unknown combo {0}\\n\".format(combo)\n\n\ndef run():\n product_setup()\n" }, { "alpha_fraction": 0.49197688698768616, "alphanum_fraction": 0.4967907667160034, "avg_line_length": 32.505374908447266, "blob_id": "8fb7fda414cd80ec53289748bb13b6af160b6aa9", "content_id": "36a20d4c8fe19c0141951712df88e9a74c4bb8f0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3116, "license_type": "permissive", "max_line_length": 85, "num_lines": 93, "path": "/fw_session/model/DatabaseExecutor.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n__metaclass__ = type\nimport DatabaseHelper as helper\nimport Product as pdo\nimport Bz as bzo\n\n\nclass DatabaseExecutor:\n\n def __init__(self):\n self.db = helper.DatabaseHelper()\n pass\n\n def create_db(self):\n self.db.create()\n\n def bz_query(self):\n sql = \"select id, bz_name, bz_pwd, bz_host, bz_token from bz;\"\n (conn, cursor) = self.db.query(sql)\n try:\n bz = bzo.Bz()\n for row in cursor:\n bz.set(\"id\", row[0])\n bz.set(\"bz_name\", row[1])\n bz.set(\"bz_pwd\", row[2])\n bz.set(\"bz_host\", row[3])\n bz.set(\"bz_token\", row[4])\n return bz\n finally:\n conn.close()\n\n def bz_add(self, bz):\n if 'bz_name' not in bz and 'bz_host' not in bz and 'bz_pwd' not in bz:\n raise Exception(\"IllegalArgument product %s \" % bz)\n sql = \"INSERT INTO bz(bz_name, bz_host, bz_pwd) VALUES ('%s', '%s', '%s')\" \\\n % (bz['bz_name'], bz['bz_host'], bz['bz_pwd'])\n self.db.insert(sql)\n\n def bz_update(self, bz):\n sql_template = \"UPDATE bz set '%s' = '%s'\"\n for key in bz.keys():\n sql = sql_template % (key, bz[key])\n self.db.update(sql)\n\n def prod_add(self, product):\n if 'name' not in product and 'alias' not in product and 'dir' not in product:\n raise Exception(\"IllegalArgument product %s \" % product)\n\n sql = \"INSERT INTO product(name, alias, dir) VALUES ('%s', '%s', '%s')\" \\\n % (product['name'], product['alias'], product['dir'])\n self.db.insert(sql)\n\n def prod_update(self, product, rid):\n sql_template = \"UPDATE product set '%s' = '%s' where id = %d\"\n for key in product.keys():\n sql = sql_template % (key, product[key], rid)\n self.db.update(sql)\n\n def prod_delete(self, rid):\n sql = \"delete from product where id = %d\" % rid\n self.db.delete(sql)\n\n def prod_query(self):\n sql = \"select id, name, alias, dir, prev_tag from product;\"\n (conn, cursor) = self.db.query(sql)\n try:\n pds = []\n for row in cursor:\n product = pdo.Product()\n product.set(\"id\", row[0])\n product.set(\"name\", row[1])\n product.set(\"alias\", row[2])\n product.set(\"dir\", row[3])\n product.set(\"prev_tag\", row[4])\n pds.append(product)\n return pds\n finally:\n conn.close()\n\n def get_product(self, rid):\n sql = \"select id, name, alias, dir, prev_tag from product where id=%d;\" % rid\n (conn, cursor) = self.db.query(sql)\n try:\n for row in cursor:\n product = pdo.Product()\n product.set(\"id\", row[0])\n product.set(\"name\", row[1])\n product.set(\"alias\", row[2])\n product.set(\"dir\", row[3])\n product.set(\"prev_tag\", row[4])\n return product\n finally:\n conn.close()\n" }, { "alpha_fraction": 0.7235772609710693, "alphanum_fraction": 0.7235772609710693, "avg_line_length": 29.75, "blob_id": "f05bb3fba94842a6d9c19c8b29fdd78d74cdb046", "content_id": "4516860466ecb023a14c001cd494805b9e2fd911", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 123, "license_type": "permissive", "max_line_length": 57, "num_lines": 4, "path": "/dailybuild/integrated/32_dailybuild_one_by_one.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n/home/hplan/fw/dailybuild/eagle/dailybuild.sh -b -c -u -s\n/home/hplan/fw/dailybuild/eagle/dailybuild.sh -b -c\n" }, { "alpha_fraction": 0.7067669034004211, "alphanum_fraction": 0.7067669034004211, "avg_line_length": 25.600000381469727, "blob_id": "639208393078272ea95a129e479f11db5a8c3283", "content_id": "4f78f0d5d1ad12ca6ddefb93a8252982191698eb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 133, "license_type": "permissive", "max_line_length": 80, "num_lines": 5, "path": "/dailybuild/env.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport LC_ALL=C\nexport SHELL=/bin/bash\nexport PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:${PATH}\n" }, { "alpha_fraction": 0.6989796161651611, "alphanum_fraction": 0.7168367505073547, "avg_line_length": 38.20000076293945, "blob_id": "dac4f82286d8be6a25fd008a8a462266e06b3d06", "content_id": "e45a63fe6f4d75797331dc8d3945adaa49e661fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "permissive", "max_line_length": 127, "num_lines": 10, "path": "/tools/rgba2rgb.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom PIL import Image\n\nif __name__ == '__main__':\n img = Image.open('/home/hplan/project/alpaca7/android/hardware/intel/kernelflinger/libkernelflinger/res/images/oem_82.png')\n print(img.mode)\n img = img.convert('RGB')\n img.save('/home/hplan/project/alpaca7/android/hardware/intel/kernelflinger/libkernelflinger/res/images/oem_82_2.png')\n print(img.mode)\n" }, { "alpha_fraction": 0.7137255072593689, "alphanum_fraction": 0.7254902124404907, "avg_line_length": 41.5, "blob_id": "68eee0453adccd453545aa393955ba3eab2e50c4", "content_id": "a0fd11792fce5ea93d45ef8553459c758c3f4fc8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 255, "license_type": "permissive", "max_line_length": 72, "num_lines": 6, "path": "/dailybuild/openjdk-8-env", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\nexport PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\nexport JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/\nexport PATH=$JAVA_HOME/bin:$PATH\nexport JRE_HOME=${JAVA_HOME}/jre\nexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib\n" }, { "alpha_fraction": 0.526627242565155, "alphanum_fraction": 0.55266273021698, "avg_line_length": 26.704917907714844, "blob_id": "19ca6856e8959f31a20629402c22d083287f726a", "content_id": "59cb9da616d6a8245e5984d263f1f75a696c90b2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1690, "license_type": "permissive", "max_line_length": 287, "num_lines": 61, "path": "/release/nec/git-log.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport VERSION=\"0.0.0.0\"\nexport MAIL_TO=\"[email protected]\"\nexport PROJ_TOP=/home/hplan/project/itx-3370\n\nLog_Raw=\"/tmp/logNecRaw.html\"\nLog_Pretty=\"/tmp/logNecPretty.html\"\n\nprint_help() {\n echo \"\n release tool for GXV3370 OEM NEC\n \n # -h: print this help document\n # -r: specify email addressee. default: [email protected]\n # -t: previous tag name \n # -v: set version\n \"\n}\n\nwhile getopts \"v:r:t:h\" arg\ndo\n case ${arg} in\n h)\n print_help\n exit 0\n ;;\n\n v)\n export VERSION=$OPTARG\n ;;\n\n r)\n export MAIL_TO=$OPTARG\n ;;\n\n t)\n export TAG=$OPTARG\n ;;\n\n ?)\n echo \"unknown argument $OPTARG\"\n exit 1\n ;;\n esac\ndone\n\ncat /dev/null > ${Log_Raw}\ncat /dev/null > ${Log_Pretty}\n\ncd ${PROJ_TOP} && repo forall -p -c git log --graph --name-status ...${TAG} --pretty=format:\"<span style='color:#00cc33'>%ci</span> <span style='color:yellow'>%an %ae</span>%n<span style='color:#00cc33'>Log:</span> <span style='color:yellow'> %s </span>%nFiles List:\" > ${Log_Raw}\n\nif [[ $(stat -c %s ${Log_Raw}) -eq 0 ]]; then\n echo \"There is no commit, nothing to do.\"\nelse\n echo \"<html> <body style='background-color:#151515; font-size: 14pt; color: white'><div style='background-color:#151515; color: white'>\" > ${Log_Pretty}\n sed -e 's/$/<br>/g' ${Log_Raw} >> ${Log_Pretty}\n echo \"</div></body></html>\" >> ${Log_Pretty}\n\n sendemail -f [email protected] -t ${MAIL_TO} -s smtp.grandstream.cn -o tls=no message-charset=utf-8 -xu [email protected] -xp S1pTestH2 -v -u \"GXV3370 OEM NEC ${VERSION} git log\" < ${Log_Pretty}\nfi\n" }, { "alpha_fraction": 0.4937388300895691, "alphanum_fraction": 0.5277280807495117, "avg_line_length": 16.46875, "blob_id": "0d106a4ea6d0d62bc0f1d53f201d5090349a92b1", "content_id": "b9dcd0e34259f87d5b554f0d7036d79ed3668751", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 559, "license_type": "permissive", "max_line_length": 34, "num_lines": 32, "path": "/fw_session/git-griddle.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nFILE=git-logs.txt\n\n\nsed -i \"/add daily/d\" ${FILE}\nsed -i \"/update avs/d\" ${FILE}\n\nsed -i \"/\\[Bug/d\" ${FILE}\nsed -i \"/Revert/d\" ${FILE}\n\nsed -i \"/GVC/d\" ${FILE}\nsed -i \"/gvc/d\" ${FILE}\n\nsed -i \"/GSC/d\" ${FILE}\nsed -i \"/gsc/d\" ${FILE}\nsed -i \"/GAC/d\" ${FILE}\nsed -i \"/gac/d\" ${FILE}\n\nsed -i \"/WP/d\" ${FILE}\nsed -i \"/wp820/d\" ${FILE}\n\nsed -i \"/3350/d\" ${FILE}\nsed -i \"/gxv3350/d\" ${FILE}\nsed -i \"/GXV3350/d\" ${FILE}\n\n\nsed -i \"/Update version/d\" ${FILE}\nsed -i \"/nec/d\" ${FILE}\nsed -i \"/NEC/d\" ${FILE}\nsed -i \"/H60/d\" ${FILE}\nsed -i \"/H51/d\" ${FILE}\n" }, { "alpha_fraction": 0.5541414022445679, "alphanum_fraction": 0.577246904373169, "avg_line_length": 27.530725479125977, "blob_id": "7ab567ed811d0d90ea65e7f1f45c33b2d6012f65", "content_id": "ab6442b95670a61a2c73dde8023e7c5dd042a445", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5165, "license_type": "permissive", "max_line_length": 97, "num_lines": 179, "path": "/fw_session/features/feature_requests.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport fw_session.BugzillaApi\nfrom fw_session.model import DatabaseExecutor as db\nimport xlrd\nimport xlwt\nimport json\nfrom fw_session.model import Bug\n\nexecutor = db.DatabaseExecutor()\ncontroller = fw_session.BugzillaApi.BugzillaApi()\n\n\ndef filtering(bugs, reporter):\n bs = []\n for bug in bugs:\n if bug.get_creator() != reporter:\n continue\n bs.append(bug)\n return bs\n\n\ndef filtering2(bugs):\n bs = []\n for bug in bugs:\n if bug.get_creator().endswith(\".com\"):\n bs.append(bug)\n return bs\n\n\ndef run(comments):\n bz = executor.bz_query()\n prods = executor.prod_query()\n if not bz or not prods:\n print \"Please continue after your personal information is perfected. \"\n exit(0)\n # search security bugs\n bugs = controller.search_bug()\n bugs = bugs[::-1]\n to_json(bugs)\n bugs = to_beans()\n book = xlwt.Workbook()\n fbs = filtering(bugs, \"[email protected]\")\n write_excel(book, 'security', fbs, comments)\n\n # for repo in repos:\n fbs = filtering2(bugs)\n write_excel(book, \"features\", fbs, comments)\n\n book.save(\"demo1.xlsx\")\n\n\ndef set_style(name, height, row, bold=False):\n style = xlwt.XFStyle() # 初始化样式\n font = xlwt.Font() # 为样式创建字体\n font.name = name # 'Times New Roman'\n font.bold = bold\n font.color_index = 4\n font.height = height\n\n alignment = xlwt.Alignment()\n alignment.vert = alignment.VERT_CENTER\n alignment.wrap = 1\n style.alignment = alignment\n borders = xlwt.Borders()\n borders.left = 1\n borders.right = 1\n borders.top = 1\n borders.bottom = 1\n style.font = font\n if row % 2 == 0:\n pattern = xlwt.Pattern()\n pattern.pattern = xlwt.Pattern.SOLID_PATTERN\n pattern.pattern_fore_colour = xlwt.Style.colour_map['silver_ega']\n style.pattern = pattern\n style.borders = borders\n\n return style\n\n\n# 写excel\ndef write_excel(book, sheet, bugs, comments):\n # fr = xlrd.open_workbook(\"demo1.xlsx\")\n # f = copy(fr)\n # row = f.sheet_by_name(\"security\").nrows\n f = book\n '''\n 创建第一个sheet:\n sheet1\n '''\n sheet1 = f.add_sheet(sheet, cell_overwrite_ok=True) # 创建sheet\n row0 = [u'ID', u'Product', u'Component', u'Status', u'Resolution', u'Summary', u'SW Comment',\n u'Create Time', u'Last Change']\n\n # 生成第一行\n for i in range(0, len(row0)):\n sheet1.write(0, i, row0[i], set_style('Courier New', 220, 2, True))\n\n row = 1\n for bug in bugs:\n style = set_style('Courier New', 220, row, False)\n sheet1.write(row, 0, bug.get_id(), style)\n sheet1.write(row, 1, bug.get_product(), style)\n sheet1.write(row, 2, bug.get_component(), style)\n sheet1.write(row, 3, bug.get_status(), style)\n sheet1.write(row, 4, bug.get_resolution(), style)\n sheet1.write(row, 5, bug.get_summary(), style)\n sheet1.write(row, 6, comments.get(str(int(bug.get_id()))), style)\n sheet1.write(row, 7, bug.get_creation_time(), style)\n sheet1.write(row, 8, bug.get_last_change_time(), style)\n\n sheet1.row(row).height_mismatch = True\n sheet1.row(row).height = 40 * 20\n\n row += 1\n\n sheet1.col(0).width = 256 * 15\n sheet1.col(1).width = 256 * 15\n sheet1.col(2).width = 256 * 15\n sheet1.col(3).width = 256 * 15\n sheet1.col(4).width = 256 * 15\n sheet1.col(5).width = 256 * 120\n sheet1.col(6).width = 256 * 33\n sheet1.col(7).width = 256 * 33\n\n # f.save('demo1.xlsx') # 保存文件\n\n\ndef read_books(datas):\n book = xlrd.open_workbook(\"demo1.xlsx\")\n sheet = book.sheet_by_name(\"features\")\n for i in range(1, sheet.nrows):\n c_cell_id = sheet.cell_value(i, 0)\n c_cell_comment = sheet.cell_value(i, 6)\n datas[str(int(c_cell_id))] = c_cell_comment\n\n\ndef to_json(bugs):\n bug_arr = []\n for bug in bugs:\n t = dict()\n t['id'] = bug.get_id()\n t['product'] = bug.get_product()\n t['component'] = bug.get_component()\n t['status'] = bug.get_status()\n t['resolution'] = bug.get_resolution()\n t['summary'] = bug.get_summary()\n t['creation_time'] = bug.get_creation_time()\n t['last_change_time'] = bug.get_last_change_time()\n t['creator'] = bug.get_creator()\n bug_arr.append(t)\n\n with open('data.json', 'w') as json_f:\n json_f.write(json.dumps(bug_arr))\n\n\ndef to_beans():\n with open(\"data.json\", \"r\") as f:\n data = json.load(f)\n ds = []\n for d in data:\n b = Bug.Bug()\n b.set('id', d['id'])\n b.set('product', d['product'])\n b.set('component', d['component'])\n b.set('status', d['status'])\n b.set('resolution', d['resolution'])\n b.set('summary', d['summary'])\n b.set('creator', d['creator'])\n b.set('creation_time', d['creation_time'])\n b.set('last_change_time', d['last_change_time'])\n ds.append(b)\n return ds\n\n\nif __name__ == '__main__':\n sw_comments = {}\n read_books(sw_comments)\n run(sw_comments)\n" }, { "alpha_fraction": 0.4726707935333252, "alphanum_fraction": 0.4903726577758789, "avg_line_length": 22.669116973876953, "blob_id": "ecd13275d49af3104f61aab741373d639cf8e4df", "content_id": "499db63f7cd2f324dfda2d4ea07ac28a72df6d7b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3220, "license_type": "permissive", "max_line_length": 59, "num_lines": 136, "path": "/opengoo/utils.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nimport json\nimport sys, os\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass JSFormat:\n def __init__(self):\n self.state = 0\n\n def push(self, ch):\n ch = ord(ch)\n if self.state == 0:\n if ch == ord('\"'):\n self.state = 1\n return to_str(chr(ch))\n elif ch == ord('/'):\n self.state = 3\n else:\n return to_str(chr(ch))\n elif self.state == 1:\n if ch == ord('\"'):\n self.state = 0\n return to_str(chr(ch))\n elif ch == ord('\\\\'):\n self.state = 2\n return to_str(chr(ch))\n elif self.state == 2:\n self.state = 1\n if ch == ord('\"'):\n return to_str(chr(ch))\n return \"\\\\\" + to_str(chr(ch))\n elif self.state == 3:\n if ch == ord('/'):\n self.state = 4\n else:\n return \"/\" + to_str(chr(ch))\n elif self.state == 4:\n if ch == ord('\\n'):\n self.state = 0\n return \"\\n\"\n return \"\"\n\ndef remove_comment(json):\n fmt = JSFormat()\n return \"\".join([fmt.push(c) for c in json])\n\n\ndef parse_json_in_str(data):\n # parse json and convert everything from unicode to str\n return json.loads(data, object_hook=_decode_dict)\n\n\ndef to_bytes(s):\n if bytes != str:\n if type(s) == str:\n return s.encode('utf-8')\n return s\n\n\ndef to_str(s):\n if bytes != str:\n if type(s) == bytes:\n return s.decode('utf-8')\n return s\n\ndef _decode_list(data):\n rv = []\n for item in data:\n if hasattr(item, 'encode'):\n item = item.encode('utf-8')\n elif isinstance(item, list):\n item = _decode_list(item)\n elif isinstance(item, dict):\n item = _decode_dict(item)\n rv.append(item)\n return rv\n\n\ndef _decode_dict(data):\n rv = {}\n for key, value in data.items():\n if hasattr(value, 'encode'):\n value = value.encode('utf-8')\n elif isinstance(value, list):\n value = _decode_list(value)\n elif isinstance(value, dict):\n value = _decode_dict(value)\n rv[key] = value\n return rv\n\ndef find_project_id(response, proname):\n for project in response:\n if project['name'] == proname:\n return project['id']\n return 0\n\ndef find_module_id(response, modulename):\n for module in response:\n if module['name'] == modulename:\n return module['id']\n return 0\n\ndef get_file_size(path):\n size = os.path.getsize(path)\n rsize = size\n unit = ' B'\n ksize = size/float(1024)\n if ksize > 1:\n unit = ' KB'\n rsize = ksize\n else:\n return str(rsize) + unit\n\n msize = ksize/float(1024)\n if msize > 1:\n unit = ' MB'\n rsize = msize\n else:\n return str(rsize) + unit\n\n gsize = msize/float(1024)\n if gsize > 1:\n unit = ' GB'\n rsize = gsize\n\n return str(rsize) + unit\n\n\ndef print_err(msg):\n print(\"\\033[0;31;42m\" + msg + \"\\033[0m\")\n\ndef print_info(msg):\n print(\"\\033[0;34m\" + msg + \"\\033[0m\")\n\n" }, { "alpha_fraction": 0.544240415096283, "alphanum_fraction": 0.547579288482666, "avg_line_length": 25.577777862548828, "blob_id": "dd6f6ee525a8b067cc2c80e29149bceacecb4c9f", "content_id": "81a5911efe80b1ff3fab66ecf6b4e64465911e6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1198, "license_type": "permissive", "max_line_length": 103, "num_lines": 45, "path": "/fw_session/model/git_utils.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport re\nimport ConfigParser\nimport DatabaseExecutor as db\nexecutor = db.DatabaseExecutor()\n\nproject_dir = None\n\n\ndef __load_config():\n config = ConfigParser.ConfigParser()\n config.readfp(open('./.product.ini'))\n product_id = config.getint(\"product_section\", \"product_id\")\n product = executor.get_product(product_id)\n\n global project_dir\n project_dir = product.get_product_dir()\n\n\ndef get_git_logs(tag):\n if not project_dir:\n __load_config()\n\n t = os.getcwd() + '/git-logs.txt'\n os.system(\"cd \" + str(project_dir)\n + \" && repo forall -c 'git log --format=\\\"%<|(15)%an %s\\\" --no-merges ...\" + tag + \"' > \"\n + t + \" 2> /dev/null\")\n return t\n\n\ndef parse_git_logs(fp):\n bugs = []\n with open(fp, 'r') as load_f:\n iterator = load_f.readlines()\n for line in iterator:\n if not line:\n continue\n summary = re.search('bug([\\t ]*)(.*)\\d+', line.lower())\n if summary:\n num_list = re.findall('\\d+\\.?\\d*', summary.group())\n if num_list:\n bugs.append(num_list[0])\n load_f.close()\n return bugs\n\n\n" }, { "alpha_fraction": 0.6457564830780029, "alphanum_fraction": 0.6863468885421753, "avg_line_length": 37.71428680419922, "blob_id": "c69667d616fc81c8730fdfdda37b18eaab71f782", "content_id": "6d1ef970dce07a425d5550986ca62e8b590e867c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 271, "license_type": "permissive", "max_line_length": 147, "num_lines": 7, "path": "/release/eagle/sendMail.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\nexport mailTo=\"[email protected]\"\n#export mailTo=\"[email protected]\"\nexport title=\"GXV3350 Auto Build version 1.0.0.1 user done\"\nsendemail -f [email protected] -t ${mailTo} -s smtp.grandstream.cn -o tls=no message-charset=utf-8 -xu [email protected] -xp S1pTestH2 -v -u \"${title}\" < ./message.txt\n" }, { "alpha_fraction": 0.5179716348648071, "alphanum_fraction": 0.5281593203544617, "avg_line_length": 32.86046600341797, "blob_id": "760cd4da2d5759cbaec8aacc51cc2563700fee79", "content_id": "e4d61893c5ac7dd0933ff443762d1ece0bb5ac46", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8736, "license_type": "permissive", "max_line_length": 101, "num_lines": 258, "path": "/fw_upload", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport sys\nimport signal\nimport getopt\nimport ConfigParser\nimport os\n\n\nclass Firmware(dict):\n def __init__(self, *args, **kwargs):\n super(dict, self).__init__(*args, **kwargs)\n self.build_type = \"eng\"\n self.upload_bin_only = 0\n self.upload_to_ipvt = 0\n pass\n\n def __getattr__(self, name):\n value = self[name]\n if isinstance(value, dict):\n value = dict(value)\n return value\n\n def __setattr__(self, key, value):\n self[key] = value\n\n # string: eagle/d33/bat/alpaca/nec/d13\n def set_product(self, product):\n setattr(self, \"product\", product)\n\n def get_product(self):\n return getattr(self, \"product\")\n\n # string: version 1.0.0.1\n def set_version(self, version):\n setattr(self, \"version\", version)\n\n def get_version(self):\n return getattr(self, \"version\")\n\n # string: btype user or eng\n def set_build_type(self, btype):\n setattr(self, \"build_type\", btype)\n\n def get_build_type(self):\n return getattr(self, \"build_type\")\n\n # int: up_type 1: upload bin only 0: not only\n def set_upload_bin_only(self, up_type):\n setattr(self, \"upload_bin_only\", up_type)\n\n def get_upload_bin_only(self):\n return getattr(self, \"upload_bin_only\")\n\n # int: ipvt 1: up 0: don't up\n def set_upload_to_ipvt(self, ipvt):\n setattr(self, \"upload_to_ipvt\", ipvt)\n\n def get_upload_to_ipvt(self):\n return getattr(self, \"upload_to_ipvt\")\n\n\nclass Upload:\n def __init__(self):\n pass\n\n @staticmethod\n def upload(fw):\n print \"*\"\n print \"*\" * 100\n print \"* start uploading...\"\n print \"*\"\n\n fw_name_pack = fw.__getattr__(\"fw_name_pack\")\n product = fw.__getattr__(\"opengoo_product\")\n module = fw.__getattr__(\"opengoo_module\")\n ipvt_fw_path = fw.__getattr__(\"ipvt_fw_path\")\n pack_path = os.path.join(os.path.abspath(os.path.curdir), fw.get_product(), fw.get_version())\n\n print \"* cd ./opengoo/ && ./OpengooMain.py -p '%s' -m '%s' -l %s\" \\\n % (product, module, os.path.join(pack_path, fw_name_pack))\n os.system(\n \"cd ./opengoo/ && ./OpengooMain.py -p '%s' -m '%s' -l %s\"\n % (product, module, os.path.join(pack_path, fw_name_pack)))\n\n if not fw.get_upload_bin_only() == 1:\n img_name_pack = fw.__getattr__(\"img_name_pack\")\n print \"* cd ./opengoo/ && ./OpengooMain.py -p '%s' -m '%s' -l %s\" \\\n % (product, module, os.path.join(pack_path, img_name_pack))\n os.system(\n \"cd ./opengoo/ && ./OpengooMain.py -p '%s' -m '%s' -l %s\"\n % (product, module, os.path.join(pack_path, img_name_pack)))\n\n if fw.get_upload_to_ipvt() == 1:\n fw_name_origin = fw.__getattr__(\"fw_name_origin\")\n print \"scp %s [email protected]:%s\" % (os.path.join(pack_path, fw_name_origin), ipvt_fw_path)\n os.system(\"scp %s [email protected]:%s\" % (os.path.join(pack_path, fw_name_origin), ipvt_fw_path))\n\n\nclass Pack:\n def __init__(self):\n pass\n\n @staticmethod\n def check_exists(path):\n\n if not \"*\" in path and not os.path.exists(path):\n print \"* \"\n print \"* Error: %s is not exists.\" % path\n sys.exit()\n\n @staticmethod\n def pack(fw):\n print \"*\"\n print \"* start packing...\"\n print \"*\"\n product = fw.get_product()\n version = fw.get_version()\n upload_bin_only = fw.get_upload_bin_only() == 1\n fw_name_origin = fw.__getattr__(\"fw_name_origin\")\n fw_name_pack = fw.__getattr__(\"fw_name_pack\")\n img_name_origin = fw.__getattr__(\"img_name_origin\")\n img_name_pack = fw.__getattr__(\"img_name_pack\")\n fw_path = fw.__getattr__(\"fw_path\")\n\n new_dir = os.path.join('.', product, version)\n print \"* mkdir -p %s'\" % new_dir\n os.system(\"mkdir -p %s\" % new_dir)\n print \"* cd %s'\" % new_dir\n os.walk(new_dir)\n\n fw_origin = os.path.join(fw_path, fw_name_origin)\n img_origin = os.path.join(fw_path, img_name_origin)\n\n # check file exists\n Pack.check_exists(fw_origin)\n # copy fw.bin\n print \"* cd %s && cp %s %s\" % (new_dir, fw_origin, '.')\n os.system(\"cd %s && cp %s %s\" % (new_dir, fw_origin, '.'))\n # shutil.copy(fw_origin, '.')\n # pack fw.bin\n print \"* cd %s && tar -cf %s %s\" % (new_dir, fw_name_pack, fw_name_origin)\n os.system(\"cd %s && tar -cf %s %s\" % (new_dir, fw_name_pack, fw_name_origin))\n\n # check if upload bin only\n if not upload_bin_only:\n print \"*\"\n Pack.check_exists(img_origin)\n # copy img\n if \"gvc3212\" == product or \"gvc3212_c03\" == product:\n print \"* cd %s && cp %s %s\" % (new_dir, img_origin, img_name_origin)\n os.system(\"cd %s && cp %s %s\" % (new_dir, img_origin, img_name_origin))\n os.system(\"cd %s && zip %s %s\" % (new_dir, img_name_pack, img_name_origin))\n else:\n print \"* cd %s && cp %s %s\" % (new_dir, img_origin, img_name_pack)\n os.system(\"cd %s && cp %s %s\" % (new_dir, img_origin, img_name_pack))\n\n # shutil.copy(img_origin, img_name_pack)\n # md5sum check\n print \"*\"\n print \"* MD5SUM:\"\n os.system(\"cd %s && md5sum *\" % new_dir)\n print \"*\"\n print \"* pack done.\"\n\n\ndef help():\n print \"\"\"\nSYNOPSIS\n fw_upload [-h] [-p product] [-s] [-v version] [-o] [-t]\n \nDESCRIPTION\n copy firmware and pack it.\n upload firmware to OpenGoo.\n upload firmware to fw.ipvideotalk.com \n \nOPTIONS\n -h Print this help document\n \n -p --product=[product] Specify the firmware product, product arguments can be specified as:\n eagle GXV3350\n bat GXV3370\n alpaca GXV3380\n nec GXV3370 OEM (NEC)\n d33 GXV3380 OEM (CMCC)\n gvc3212 GVC3212\n gvc3212_c03 GVC3212 HW C03\n itsv-3 GXV3370 OEM (ZENITEL)\n nec-rcp GXV3370 OEM (NEC Reception Terminal)\n \n -s --user Specify firmware build type as user, default was eng.\n \n -o --upload-bin-only Specify only upload ${PRODUCT}fw.bin, do not upload flash img.\n \n -t --upload-to-ipvt Specify will upload this ${PRODUCT}fw.bin to fw.ipvideotalk.com\n \n -v --version=[version] Specify the firmware version\n \"\"\"\n\n\ndef load_ini(fw):\n version = fw.get_version()\n product = fw.get_product()\n\n parser = ConfigParser.ConfigParser()\n parser.read(\".fw_upload.ini\")\n\n fw.__setattr__(\"fw_name_origin\", parser.get(product, \"fw_name_origin\"))\n fw.__setattr__(\"fw_name_pack\", parser.get(product, \"fw_name_pack\") % version)\n fw.__setattr__(\"img_name_pack\", parser.get(product, \"img_name_pack\") % version)\n fw.__setattr__(\"opengoo_product\", parser.get(product, \"opengoo_product\"))\n fw.__setattr__(\"opengoo_module\", parser.get(product, \"opengoo_module\"))\n fw.__setattr__(\"ipvt_fw_path\", parser.get(product, \"ipvt_fw_path\"))\n fw.__setattr__(\"fw_path\", parser.get(product, \"fw_path_%s\" % fw.get_build_type()) % version)\n img_name_origin = parser.get(product, \"img_name_origin\")\n if \"%s\" in img_name_origin:\n img_name_origin = img_name_origin % version\n fw.__setattr__(\"img_name_origin\", img_name_origin)\n\n\ndef run(argv):\n try:\n opts, args = getopt.getopt(argv, \"v:p:host\", [\"version=\", \"product=\", \"help\",\n \"upload-bin-only\", \"user\", \"upload-to-ipvt\"])\n except getopt.GetoptError as e:\n raise e\n\n fw = Firmware()\n for opt, arg in opts:\n if opt == '-h':\n help()\n sys.exit()\n elif opt in (\"-v\", \"--version\"):\n fw.set_version(arg)\n elif opt in (\"-p\", \"--product\"):\n fw.set_product(arg)\n elif opt in (\"-s\", \"--user\"):\n fw.set_build_type(\"user\")\n elif opt in (\"-o\", \"--upload-bin-only\"):\n fw.set_upload_bin_only(1)\n elif opt in (\"-t\", \"--upload-to-ipvt\"):\n fw.set_upload_to_ipvt(1)\n else:\n pass\n\n load_ini(fw)\n Pack.pack(fw)\n Upload.upload(fw)\n\n\ndef on_signal_interrupt(sig, frame):\n print \"RCV: signal(%s)\" % sig\n sys.exit(0)\n\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, on_signal_interrupt)\n run(sys.argv[1:])\n" }, { "alpha_fraction": 0.5330827236175537, "alphanum_fraction": 0.5642856955528259, "avg_line_length": 29.399999618530273, "blob_id": "88ba4e1ae555722016dfa298ae67124466e00f60", "content_id": "73c6cdd0fc13ca18e12920d66bd27d71aa33e996", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5662, "license_type": "permissive", "max_line_length": 113, "num_lines": 175, "path": "/monitor/blk_monitor.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport collections\nimport smtplib\nfrom email.mime.text import MIMEText\n\nPER_THRESHOLD = 90 # more than 95% used\nAVL_THRESHOLD = 20 * 1024 * 1024 * 1024 # available less than 10GB\nSVR_LIST = ['192.168.120.166', '192.168.120.239', '192.168.120.32', '192.168.120.33', '192.168.120.240']\nMAIL_TO = \"[email protected],[email protected],[email protected],[email protected],[email protected],\" \\\n \"[email protected],[email protected],[email protected],[email protected]\"\nTMP_FILE = \"/tmp/snmpdf.html\"\nmessage = \"\"\n\n\ndef snmp_walk(host, oid):\n result = os.popen('snmpwalk -v 2c -c hplan ' + host + ' ' + oid).read().split(\"\\n\")[:-1]\n return result\n\n\n# 测试用\ndef snmp_walk_raw(host, oid):\n result = os.popen('snmpwalk -v 2c -c hplan ' + host + ' ' + oid).read()\n return result\n\n\ndef parse_storage_desc(data, oid):\n if data:\n respond = collections.OrderedDict()\n for d in data:\n d = d.replace(\" \", \"\").replace(\"HOST-RESOURCES-MIB::\", \"\").split(\"=\")\n index = d[0][len(oid) + 1:]\n name = d[1].replace(\"STRING:\", \"\")\n if name.startswith(\"/\"):\n respond[index] = name\n return respond\n else:\n pass\n\n\ndef parse_int(data, oid):\n if data:\n respond = collections.OrderedDict()\n for d in data:\n d = d.replace(\" \", \"\").replace(\"HOST-RESOURCES-MIB::\", \"\").split(\"=\")\n index = d[0][len(oid) + 1:]\n v = int(d[1].replace(\"INTEGER:\", \"\").replace(\"Bytes\", \"\"))\n respond[index] = v\n return respond\n else:\n pass\n\n\ndef send_mail(username, passwd, recv, title, content, mail_host='smtp.qiye.163.com', port=25):\n \"\"\"\n 发送邮件函数,默认使用163smtp\n :param username: 邮箱账号 [email protected]\n :param passwd: 邮箱密码\n :param recv: 邮箱接收人地址,多个账号以逗号隔开\n :param title: 邮件标题\n :param content: 邮件内容\n :param mail_host: 邮箱服务器\n :param port: 端口号\n :return:\n \"\"\"\n\n msg = MIMEText(content, 'html', 'utf-8') # 邮件内容\n msg['Subject'] = title # 邮件主题\n msg['From'] = username # 发送者账号\n msg['To'] = recv # 接收者账号列表\n smtp = smtplib.SMTP(mail_host, port=port) # 连接邮箱,传入邮箱地址,和端口号,smtp的端口号是25\n smtp.login(username, passwd) # 发送者的邮箱账号,密码\n smtp.sendmail(username, recv.split(\",\"), msg.as_string())\n # 参数分别是发送者,接收者,第三个是把上面的发送邮件的内容变成字符串\n smtp.quit() # 发送完毕后退出smtp\n print('email send success.')\n\n\ndef get_human_reading_size(size):\n a = round(1.0 * size / 1024 / 1024, 2)\n if a > 2048:\n return str(round(1.0 * a / 1024, 2)) + \"G\"\n else:\n return str(a) + \"M\"\n\n\ndef print_sys(dev_desc, dev_size, dev_used, cb):\n column = [\"Mount on\", \"Total\", \"Used\", \"Avail\", \"Percent\"]\n\n f.writelines(\"<pre>%-30s%-10s%-10s%-10s%-10s</pre>\" % tuple(column))\n\n for k, v in dev_desc.iteritems():\n total = dev_size.get(k)\n used = dev_used.get(k)\n if not total:\n total = 0\n if not used:\n used = 0\n\n avail = total - used\n if used == 0 and total == 0:\n used_per = 0\n else:\n used_per = round((1.0 * used / total) * 100, 2)\n\n fmt_val = (v, get_human_reading_size(total), get_human_reading_size(used), get_human_reading_size(avail),\n str(used_per) + \"%\")\n\n if used_per > PER_THRESHOLD and avail < AVL_THRESHOLD:\n cb[\"warning\"] = True\n f.writelines(\"<pre style='color:Red;font-weight:bold;'>%-30s%-10s%-10s%-10s%-10s</pre>\" % fmt_val)\n else:\n f.writelines(\"<pre>%-30s%-10s%-10s%-10s%-10s</pre>\" % fmt_val)\n f.writelines(\"<br/><HR/>\")\n\n\ndef walk_server(address, callback, f):\n if address:\n target = address\n else:\n target = \"localhost\"\n\n f.writelines(\"<h1>%s</h1>\" % target)\n\n if sys.argv.__len__() != 1:\n target = sys.argv[1]\n\n #\n data = snmp_walk(target, \"hrStorageDescr\")\n dev_desc_resp = parse_storage_desc(data, \"hrStorageDescr\")\n\n # 簇的大小\n cluster_units = snmp_walk(target, \"hrStorageAllocationUnits\")\n cluster_units_resp = parse_int(cluster_units, \"hrStorageAllocationUnits\")\n\n # 簇的数目\n cluster_size = snmp_walk(target, \"hrStorageSize\")\n cluster_size_resp = parse_int(cluster_size, \"hrStorageSize\")\n\n # 使用多少, 跟总容量相除就是占用率\n used_data = snmp_walk(target, \"hrStorageUsed\")\n cluster_used_resp = parse_int(used_data, \"hrStorageUsed\")\n\n dev_size_resp = collections.OrderedDict()\n dev_used_resp = collections.OrderedDict()\n for k, v in cluster_units_resp.iteritems():\n if k in cluster_size_resp:\n dev_size_resp[k] = cluster_size_resp.get(k) * v\n dev_used_resp[k] = cluster_used_resp.get(k) * v\n else:\n continue\n\n print_sys(dev_desc_resp, dev_size_resp, dev_used_resp, callback)\n\n\nif __name__ == '__main__':\n level = {\"warning\": False}\n\n with open(TMP_FILE, \"w+\") as f:\n f.writelines(\"<html><body>\")\n for svr in SVR_LIST:\n walk_server(svr, level, f)\n f.writelines(\"</body></html>\")\n f.close()\n\n if level[\"warning\"]:\n email_user = '[email protected]'\n email_pwd = 'Dev_1_3614'\n # TEST_MAIL_TO = \"[email protected]\"\n subject = 'Warning: No space left on build server.'\n with open(TMP_FILE, \"r\") as f:\n content = f.read()\n send_mail(email_user, email_pwd, MAIL_TO, subject, content)\n" }, { "alpha_fraction": 0.5730858445167542, "alphanum_fraction": 0.6508120894432068, "avg_line_length": 18.56818199157715, "blob_id": "556f721ab8ec630eb624a23bcd2ec4035f9d1906", "content_id": "cc6a9d94a7ee43370ed8afc7ef6a9e346267dc22", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 862, "license_type": "permissive", "max_line_length": 55, "num_lines": 44, "path": "/tools/snmp_install.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsudo apt-get install snmpd snmp snmp-mibs-downloader -y\nsudo download-mibs\n\n\nsudo cp /etc/snmp/snmpd.conf /etc/snmp/snmpd.conf.ori\nsudo vim /etc/snmp/snmpd.conf\n\n#1\n# Change\n# agentAddress udp:127.0.0.1:161\n# to\n# #agentAddress udp:127.0.0.1:161\n#\n# Change\n# agentAddress udp:161,udp6:[::1]:161 \n# to\n# agentAddress udp:161\n\n#2\n# Change\n# view systemonly included .1.3.6.1.2.1.1\n# to\n# #view systemonly included .1.3.6.1.2.1.1\n#\n# Change\n# view systemonly included .1.3.6.1.2.1.25.1\n# to\n# #view systemonly included .1.3.6.1.2.1.25.1\n#\n# Add\n# view systemonly included .1\n\n#3\n# Change \n# rocommunity public default -V systemonly\n# to\n# rocommunity hplan default -V systemonly\n#\n# Change\n# rocommunity6 public default -V systemonly\n# to\n# rocommunity6 hplan default -V systemonly\n\n" }, { "alpha_fraction": 0.7155963182449341, "alphanum_fraction": 0.7155963182449341, "avg_line_length": 26.25, "blob_id": "92a793e7d258edfd6e3ff8774650198b0fdd5972", "content_id": "409520342a59dbfceef09dec874998e49dc22319", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 109, "license_type": "permissive", "max_line_length": 72, "num_lines": 4, "path": "/release/env.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport SHELL=/bin/bash\nexport PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\n" }, { "alpha_fraction": 0.4994714558124542, "alphanum_fraction": 0.5089852213859558, "avg_line_length": 19.565217971801758, "blob_id": "d6c42473ccd870107064974280a442aa935a2ced", "content_id": "6d2c19f72772b8fc26db8676b8fcb2aa395acd50", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1892, "license_type": "permissive", "max_line_length": 81, "num_lines": 92, "path": "/release/bat/release.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\nDEBUG=false\nexport ENG=true\nexport VERSION=\"0.0.0.0\"\nexport BUILD_KERNEL=false\nexport MAIL_TO_DEBUG=\"[email protected]\"\nexport MAIL_TO=\"[email protected]\"\nexport SH_PATH=\"$(cd \"$(dirname \"$0\")\";pwd)\"\nPROJ_TOP=/home/hplan/project/bat\nexport BUILD_CMD=\"./autoBuild.sh\"\n\nprint_help() {\necho \"\n release tool for GXV3370\n\n # -h: print this help document\n # -r: specify email addressee. default: [email protected]\n # -b: build kernel\n # -s: user build. default: eng\n # -c: clean build. default: not clean\n # -v: set version\n\"\n}\n\nwhile getopts \"v:r:csbh\" arg\ndo\n case ${arg} in\n h)\n print_help\n exit 0\n ;;\n\n v)\n export BUILD_CMD=\"${BUILD_CMD} -v ${OPTARG}\"\n ;;\n\n r)\n if ${DEBUG}; then\n export MAIL_TO=${MAIL_TO_DEBUG}\n else\n export MAIL_TO=${OPTARG}\n fi\n ;;\n\n s)\n export ENG=false\n export BUILD_CMD=\"${BUILD_CMD} -s\"\n ;;\n\n c)\n export BUILD_CMD=\"${BUILD_CMD} -c\"\n ;;\n\n b)\n export BUILD_KERNEL=true\n ;;\n\n ?)\n echo \"unknown argument ${OPTARG}\"\n exit 1\n ;;\n esac\ndone\n\nif ${DEBUG}; then\n echo \"under test, break;\"\n exit 0\nfi\n\nbuild() {\n source ${SH_PATH}/../env.sh\n source ${SH_PATH}/../openjdk-8-env\n cd ${PROJ_TOP}/android && source ${PROJ_TOP}/android/build/envsetup.sh\n if ${ENG}; then\n cd ${PROJ_TOP}/android && lunch full_bat-eng\n else\n cd ${PROJ_TOP}/android && lunch full_bat-user\n fi\n\n if ${BUILD_KERNEL}; then\n cd ${PROJ_TOP}/kernel-3.18 && ./buildkernel.sh -k\n\n if [ $? != 0 ]; then\n echo \"FATAL: build bootimage failed!\"\n exit 1\n fi\n fi\n\n cd ${PROJ_TOP}/android/vendor/grandstream/build && ${BUILD_CMD} -r ${MAIL_TO}\n}\n\nbuild\n" }, { "alpha_fraction": 0.6887966990470886, "alphanum_fraction": 0.7095435857772827, "avg_line_length": 29.125, "blob_id": "601e2c77af27b8dd083e02eeb6edd2d03557f14f", "content_id": "b81a22fc049ae52ea4664e9ab3de31820ffb4b5d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 241, "license_type": "permissive", "max_line_length": 72, "num_lines": 8, "path": "/dailybuild/env_java6.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport SHELL=/bin/bash\nexport PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\nexport JAVA_HOME=/opt/jdk1.6.0_37/\nexport CLASSPATH=.:${JAVA_HOME}/lib:${CLASSPATH}\nexport PATH=${JAVA_HOME}/bin:${PATH}\necho $PATH\n" }, { "alpha_fraction": 0.49824970960617065, "alphanum_fraction": 0.5064177513122559, "avg_line_length": 24.969696044921875, "blob_id": "53ef7d003198cc7e9e79d7841e7abc213374444d", "content_id": "6189cbccb93b98e055687d5ae084a6182f2e7733", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2571, "license_type": "permissive", "max_line_length": 71, "num_lines": 99, "path": "/fw_session/setup/bz_setup.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom model import DatabaseExecutor as db\nfrom model import Bz\nimport getpass\nimport sys\nsys.path.append(\"./model\")\nexecutor = db.DatabaseExecutor()\n\n\ndef new_profile():\n # print host\n print \" Bugzilla host: https://bugzilla.grandstream.com/bugzilla\"\n host = \"https://bugzilla.grandstream.com/bugzilla\"\n # print name\n name = raw_input(\" Bugzilla name: \")\n # print pwd\n pwd = getpass.getpass(\" Bugzilla password: \")\n\n bz = Bz.Bz()\n bz.set(\"bz_name\", name)\n bz.set(\"bz_host\", host)\n bz.set(\"bz_pwd\", pwd)\n executor.bz_add(bz)\n print \"\\n Profile created.\"\n\n\ndef edit_profile(bz):\n host = bz.get(\"bz_host\")\n print \" \" + \"-\" * (len(host) + 25)\n print \" Bugzilla host: |\", bz.get(\"bz_host\")\n print \" Bugzilla name: |\", bz.get(\"bz_name\")\n print \" Bugzilla password: |\", \"******\"\n print \" \" + \"-\" * (len(host) + 23)\n print \"\\n>> Operations:\"\n print \"\"\"\n 1.update host\n 2.update name\n 3.update password\n 4.show password\n 5.clear token\n 6.exit\n\"\"\"\n\n while True:\n # get user input\n combo = raw_input(\"* What do you want to do: \")\n if not combo.isdigit():\n print \"Unknown combo {0}\\n\".format(combo)\n elif int(combo) == 1:\n host = raw_input(\" new host: \")\n bz = Bz.Bz()\n bz.set(\"bz_host\", host)\n bz_update(bz)\n print \"\"\n elif int(combo) == 2:\n name = raw_input(\" new name: \")\n bz = Bz.Bz()\n bz.set(\"bz_name\", name)\n bz_update(bz)\n print \"\"\n elif int(combo) == 3:\n pwd = getpass.getpass(\" new password: \")\n bz = Bz.Bz()\n bz.set(\"bz_pwd\", pwd)\n bz_update(bz)\n print \"\"\n elif int(combo) == 4:\n bz = executor.bz_query()\n print \" password: {0}\\n\".format(bz['bz_pwd'])\n elif int(combo) == 5:\n bz = Bz.Bz()\n bz.set(\"bz_token\", \"\")\n executor.bz_update(bz)\n print \" Cleared.\\n\"\n elif int(combo) == 6:\n exit(0)\n else:\n print \"Unknown combo {0}\\n\".format(combo)\n\n\ndef bz_update(bz):\n executor.bz_update(bz)\n print \" profile updated.\"\n\n\ndef profile_setup():\n print \"\\n>> Profile:\"\n bz = executor.bz_query()\n if not bz:\n print \">> New profile:\\n\"\n new_profile()\n exit(0)\n else:\n print \" Profile already exists.\\n\"\n edit_profile(bz)\n\n\ndef run():\n profile_setup()\n" }, { "alpha_fraction": 0.8873239159584045, "alphanum_fraction": 0.8873239159584045, "avg_line_length": 16.75, "blob_id": "6d6d7188cf8852340448f673f3c7f6e16be15caf", "content_id": "7c6375bdbc1f501db5579c66f020ec25f798f940", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "permissive", "max_line_length": 23, "num_lines": 4, "path": "/fw_session/model/__init__.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "import Bz\nimport Product\nimport DatabaseHelper\nimport DatabaseExecutor\n" }, { "alpha_fraction": 0.5498489141464233, "alphanum_fraction": 0.5513595342636108, "avg_line_length": 21.066667556762695, "blob_id": "719273ff32cef9aef8a40c557502785acc190a72", "content_id": "51141db5a17e5471c443ef0abe696d29a742c8ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "permissive", "max_line_length": 51, "num_lines": 30, "path": "/fw_session/model/Product.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__metaclass__ = type\n\n\nclass Product(dict):\n\n def __init__(self, *args, **kwargs):\n super(dict, self).__init__(*args, **kwargs)\n\n def __getattr__(self, name):\n value = self[name]\n if isinstance(value, dict):\n value = dict(value)\n return value\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def set(self, key, value):\n setattr(self, key, value)\n\n def get(self, key):\n return getattr(self, key)\n\n def get_product_name(self):\n return getattr(self, \"name\")\n\n def get_product_dir(self):\n return getattr(self, \"dir\")\n" }, { "alpha_fraction": 0.5801687836647034, "alphanum_fraction": 0.5910689234733582, "avg_line_length": 28.31958770751953, "blob_id": "36d2a34694ff4de3a77197933e6f1984d0923e0f", "content_id": "c132b29a45d4554584b099c65df249968fceae9f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2844, "license_type": "permissive", "max_line_length": 85, "num_lines": 97, "path": "/tools/prepare-commit-msg-deploy.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncat << EOF > ./repo/hooks/prepare-commit-msg\n#!/bin/bash\n#\n# An example hook script to prepare the commit log message.\n# Called by \"git commit\" with the name of the file that has the\n# commit message, followed by the description of the commit\n# message's source. The hook's purpose is to edit the commit\n# message file. If the hook fails with a non-zero status,\n# the commit is aborted.\n#\n# To enable this hook, rename this file to \"prepare-commit-msg\".\n\n# This hook includes three examples. The first comments out the\n# \"Conflicts:\" part of a merge commit.\n#\n# The second includes the output of \"git diff --name-status -r\"\n# into the message, just before the \"git status\" output. It is\n# commented because it doesn't cope with --amend or with squashed\n# commits.\n#\n# The third example adds a Signed-off-by line to the message, that can\n# still be edited. This is rarely a good idea.\n\naddMsg() {\n # Get name of current branch\n BRANCH_NAME=\\$(git symbolic-ref --short -q HEAD)\n\n # First blank line is title, second is break for body, third is start of body\n BODY=\\`cut -d \\| -f 6 \\$1 | grep -v -E .\\+ -n | cut -d ':' -f1 | sed '3q;d'\\`\n\n # Put in string \"(branch_name/): \" at start of commit message body.\n # For templates with commit bodies\n if test ! -z \\$BODY; then\n awk 'NR=='\\$BODY'{\\$0=\"\\('\\$NAME'/\\): \"}1;' \\$1 > tmp_msg && mv tmp_msg \"\\$1\"\n else\n sed -i \"1i[NBF/INTERNAL/Bug]\" \\$1 \n sed -i \"2i\\\\\\\\\" \\$1\n sed -i \"3iCause: None\" \\$1\n sed -i \"4iSolution: None\" \\$1\n sed -i \"5iFixed Version: \\`date \"+%Y.%m.%d\"\\`\" \\$1\n sed -i \"6iBranch: \\$BRANCH_NAME\" \\$1\n fi\n}\n\n\n# You might need to consider squashes\ncase \"\\$2,\\$3\" in\n # Commits that already have a message\n commit,?*)\n ## git commit --amend\n ;;\n\n # Messages are one line messages you decide how to handle\n message,)\n ## git commit -m \"msg\"\n BRANCH_NAME=\\$(git symbolic-ref --short -q HEAD)\n echo \"[INTERNAL] \\`cat \\$1\\`\" > \\$1\n echo \"\" >> \\$1\n echo \"Cause: None\" >> \\$1\n echo \"Solution: None\" >> \\$1\n echo \"Fixed Version: \\`date \"+%Y.%m.%d\"\\`\" >> \\$1\n echo \"Branch: \\$BRANCH_NAME\" >> \\$1\n ;;\n\n # Merge commits\n merge,)\n # Comments out the \"Conflicts:\" part of a merge commit.\n perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' \"\\$1\"\n ;;\n\n # Non-merges with no prior messages\n *)\n addMsg \\$1\n ;;\nesac\nEOF\n\nchmod +x ./repo/hooks/prepare-commit-msg\n\ngit config --global --unset commit.template > /dev/null\n\nPCM_PATT=\"`pwd`/repo/hooks/prepare-commit-msg\"\n\nhook_dirs=`find -name hooks`\nfor d in $hook_dirs; do\n cd $d\n if [ \"./repo/hooks\" == \"$d\" ]; then\n cd - > /dev/null\n continue\n fi\n sudo ln -sf $PCM_PATT . \n cd - > /dev/null\ndone\n\necho \"done\"\n" }, { "alpha_fraction": 0.5493861436843872, "alphanum_fraction": 0.55859375, "avg_line_length": 26.984375, "blob_id": "b4f6860ee80021308d11b20ad8a55945bc8684a2", "content_id": "5dde2c71c7065b99303081b9682be65a36840917", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3584, "license_type": "permissive", "max_line_length": 94, "num_lines": 128, "path": "/translations/parseXLS.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding=utf-8 -*-\nimport xlrd\nimport collections\n\n\nINPUT_XLS = \"gxv33xx-hebrew_Excel.xlsx\"\nOUTPUT_EN = \"output_en.txt\"\nOUTPUT_ZH = \"output_zh.txt\"\nOUTPUT_HE = \"output_he.txt\"\n\n\ndef read_sheet1(en_list, zh_list, excelFile):\n data = xlrd.open_workbook(excelFile)\n table = data.sheet_by_index(0)\n\n for rowNum in range(table.nrows):\n if rowNum > 4:\n row = table.row_values(rowNum)\n en_list[int(row[0])] = row[1].encode(\"utf-8\")\n zh_list[int(row[0])] = row[2].encode(\"utf-8\")\n\n\ndef export_lcd_languages():\n en_list = collections.OrderedDict()\n zh_list = collections.OrderedDict()\n\n read_sheet1(en_list, zh_list, excelFile=excelFile)\n en_list = sorted(en_list.iteritems(), key=lambda x: int(x[0]), reverse=False)\n zh_list = sorted(zh_list.iteritems(), key=lambda x: int(x[0]), reverse=False)\n\n with open(OUTPUT_ZH, \"w+\") as f_zh:\n for k, v in zh_list:\n f_zh.write(str(k) + \",\" + v + '\\n')\n pass\n\n with open(OUTPUT_EN, \"w+\") as f_en:\n for k, v in en_list:\n f_en.write(str(k) + \",\" + v + '\\n')\n\n print \"Export successfully.\"\n\n\ndef read_sheet2(excelFile):\n data = xlrd.open_workbook(excelFile)\n # table = data.sheet_by_index(2)\n # with open(\"web_zh.js\", \"w+\") as f:\n table = data.sheet_by_index(1)\n with open(\"web_en.js\", \"w+\") as f:\n for rowNum in range(table.nrows):\n row = table.row_values(rowNum)\n line = row[0].encode(\"utf-8\")\n if len(line) == 0:\n continue\n\n if not line.startswith(\"/*\") and not line.endswith(\";\"):\n line = line + \";\"\n f.write(line + \"\\n\")\n\n\ndef export_web_languages():\n read_sheet2(excelFile=excelFile)\n\n\ndef export_tooltips():\n data = xlrd.open_workbook(excelFile)\n table = data.sheet_by_index(4)\n with open(\"tips_zh.js\", \"w+\") as f:\n # table = data.sheet_by_index(3)\n # with open(\"tips_en.js\", \"w+\") as f:\n f.write(\"\"\"function TipsDef(id, content) {\n this.id = id;\n this.content = content;\n}\n\n\nvar tip_item_en = new Array;\ntip_item_en.push (\n\n /*account status*/\n /*new TipsDef(\"Accounts' name\",\n \"All the SIP accounts on the phone. Each account will show its status on this page.\"),\n new TipsDef(\"Number\",\n \"SIP User ID for the account.\"),\n new TipsDef(\"Registered Status\",\n \"Registration status for the SIP account.\"),\n new TipsDef(\"SIP Server\",\n \"URL or IP address, and port of the SIP server.\"),*/\n \n\"\"\")\n\n for rowNum in range(table.nrows):\n row = table.row_values(rowNum)\n line = row[0].encode(\"utf-8\")\n if len(line) == 0:\n continue\n\n if not line.startswith(\"/*\") and not line.endswith(\",\"):\n line = line + \",\"\n f.write(\" \" + line + \"\\n\")\n f.write(\");\")\n\n\ndef export_hebrew():\n he_list = collections.OrderedDict()\n\n data = xlrd.open_workbook(excelFile)\n table = data.sheet_by_index(0)\n\n for rowNum in range(table.nrows):\n if rowNum > 4:\n row = table.row_values(rowNum)\n he_list[int(row[0])] = row[4].encode(\"utf-8\")\n\n he_list = sorted(he_list.iteritems(), key=lambda x: int(x[0]), reverse=False)\n\n with open(OUTPUT_HE, \"w+\") as f_zh:\n for k, v in he_list:\n f_zh.write(str(k) + \",\" + v + '\\n')\n pass\n\n\nif __name__ == '__main__':\n excelFile = INPUT_XLS\n # export_lcd_languages()\n # export_web_languages()\n # export_tooltips()\n export_hebrew()\n\n\n" }, { "alpha_fraction": 0.5564424395561218, "alphanum_fraction": 0.557582676410675, "avg_line_length": 21.487178802490234, "blob_id": "9f7e3c7ee1f39128ce6d04f1ad193fe6f96728d9", "content_id": "3e8a3ce6ad347ce997dae19d5a9bd83df33d833f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 877, "license_type": "permissive", "max_line_length": 51, "num_lines": 39, "path": "/fw_session/model/Bz.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__metaclass__ = type\n\n\nclass Bz(dict):\n\n def __init__(self, *args, **kwargs):\n super(dict, self).__init__(*args, **kwargs)\n\n def __getattr__(self, name):\n value = self[name]\n if isinstance(value, dict):\n value = dict(value)\n return value\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def set(self, key, value):\n setattr(self, key, value)\n\n def get(self, key):\n return getattr(self, key)\n\n def get_host_url(self):\n return getattr(self, \"bz_host\")\n\n def get_config_usr(self):\n return getattr(self, \"bz_name\")\n\n def get_config_pwd(self):\n return getattr(self, \"bz_pwd\")\n\n def get_config_token(self):\n return getattr(self, \"bz_token\")\n\n def set_token(self, token):\n setattr(self, \"bz_token\", token)\n" }, { "alpha_fraction": 0.5914068818092346, "alphanum_fraction": 0.6529064774513245, "avg_line_length": 30.236841201782227, "blob_id": "deb876a5f32cccf01aefa2a222bf77799686faf6", "content_id": "3a70becef28251b7515d56c5cc145464c970e1c7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1187, "license_type": "permissive", "max_line_length": 75, "num_lines": 38, "path": "/dailybuild/integrated/166_dailybuild_one_by_one.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndailybuild() {\n# start GXV33X5\n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_gxv3355.sh -s -u\n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_gxv3355.sh \n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_d33_plus.sh\n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_gxv3385.sh\n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_gxv3385.sh -s\n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_gxv3375.sh -s\n# /home/hplan/fw/dailybuild/gxv33x5/dailybuild_gxv3375.sh\n\n# start GXV33XX \n# /home/hplan/fw/dailybuild/bat/dailybuild.sh -b -c -s -u\n\n# mod=`expr $(date +%d) % 3`\n# if [[ ${mod} -eq 0 ]]; then\n /home/hplan/fw/dailybuild/bat/dailybuild.sh -b -c -u\n# /home/hplan/fw/dailybuild/bat/dailybuild.sh -b -c\n /home/hplan/fw/dailybuild/alpaca7/eng-166/dailybuild.sh -b -c -u -s\n# /home/hplan/fw/dailybuild/alpaca7/eng-166/dailybuild.sh -b -c\n# /home/hplan/fw/dailybuild/d33/dailybuild.sh -b -c\n# fi\n}\n\n#WEEK_DAY=\"`date +%u`\"\n#echo -n \"$WEEK_DAY is \"\n#if [ ${WEEK_DAY} -lt 6 ] ; then\n# echo \"week day\"\n# dailybuild\n#else\n# echo \"week end\"\n#fi\n\n#hhh=\"`date +%Y%m%d`\"\n#if [ ${hhh} -ge 20221008 ]; then\n dailybuild\n#fi\n" }, { "alpha_fraction": 0.58074951171875, "alphanum_fraction": 0.621191680431366, "avg_line_length": 24.067567825317383, "blob_id": "71fc456737c20adc9556a701ac08a579cfbb8e3d", "content_id": "c1bcf8ff1dd2b2def1e2c293754f1f4c471402eb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3807, "license_type": "permissive", "max_line_length": 185, "num_lines": 148, "path": "/README.md", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "# fw\ngs release tools\n\n初次使用:\n1.配置个人bugzilla账号信息\n```\nhplan@hplan:~/fw/fw_session$ ./configure.py\n\n Release tool setup\n 1.setup bugzilla profile\n 2.setup product\n 3.exit setup\n\n* What do you want to do: 1\n\n>> Profile:\n>> New profile:\n\n Bugzilla host: \n Bugzilla name: \n Bugzilla password:\n\n Profile created.\n```\n2.配置项目\n```\nhplan@hplan:~/fwfwfw/fw/fw_session$ ./configure.py \n\n Release tool setup\n 1.setup bugzilla profile\n 2.setup product\n 3.exit setup\n \n* What do you want to do: 2\n\n>> Operations:\n\n>> New product:\n\n Product name: GXV3480\n Product alias: GXV3480\n Product dir: /home/hplan/project/gxv33x5\n\n Product created.\n\n```\n\n3、获取日志信息\n```\nhplan@hplan:~/fwfwfw/fw/fw_session$ ./release_note.py \n\nRelease tools\n\n All git logs will saved in fw/fw_session/git-logs.txt\n All resolved bugs will saved in fw/fw_session/resolved_bugs.txt\n\n>> Products:\n --------------------------------------------------------\n id: | 1\n name: | GXV3480\n alias: | GXV3480\n dir: | /home/hplan/project/gxv33x5\n --------------------------------------------------------\n* Choose product id: 1\n* Input the compare tag: GXV3480/GXV3450_Fox_Beta_1.0.1.33\n \n>> Git commit:\n >> Git logs since GXV3480/GXV3450_Fox_Beta_1.0.1.33:\n GXV3480 RESOLVED FIXED [Bug 256452] GXV34x0_1.0.2.1_Zoom:Zoom mode, press the home button in the settings to return to the general interface, and the screen will flash once\n ...\n >> Total commits: 7\n \n>> Post plan2fix\n* Do you want to mark plan2fix on these bug? (Y/N) n\n \n>> Post Comment\n* Do you want to comment on these bug? (Y/N) n\n \n>> Bugzilla:\n* Do you want to list all RESOLVED FIXED bugs from Bugzilla? (Y/N) y\n >> RESOLVED FIXED bugs:\n [Bug 214233] GXV33X5_1.0.0.5_Message_SMS is full, via CFG After the maximum value of P1690 is increased, the message space is still full\n [Bug 214649] GXV3385_1.0.0.5_Recorder:Build automatic dialing + recording performance, the recording file does not reach the maximum storage space\n ...\n >> Total bugs: 149\n \n>> Post plan2fix\n* Do you want to mark plan2fix on these bug? (Y/N) n\n \n>> Post Comment\n* Do you want to comment on these bug? (Y/N) n\n```\n\n4、上传固件至FengOffice\n配置FengOffice个人账户信息\n```\nhplan@hplan:~/fw/opengoo$ vi config.json\n{ \n \"user\": \"\",\n \"password\": \"\",\n \"project\": \"\", \n \"module\": \"\", \n \"localpath\": \"\", \n \"remotename\": \"\"\n}\n```\n添加项目编译产物\n```\nhplan@hplan:~/fwfwfw/fw$ vi .fw_upload.ini\n[bat]\nfw_path_user=/var/www/html/hz/firmware/bat/%s/\nfw_path_eng=/var/www/html/hz/firmware/bat/%s/eng/\nfw_name_origin=gxv3370fw.bin\nfw_name_pack=GXV3370_FW_%s.tar\nimg_name_origin=%s.tar.gz\nimg_name_pack=GXV3370_IMG_%s.tar.gz\nipvt_fw_path=/var/www/html/3370/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3370/firmware\n```\n上传固件\n```\nhplan@hplan:~/fwfwfw/fw$ ./fw_upload -h\n\nSYNOPSIS\n fw_upload [-h] [-p product] [-s] [-v version] [-o] [-t]\n \nDESCRIPTION\n copy firmware and pack it.\n upload firmware to OpenGoo.\n upload firmware to fw.ipvideotalk.com \n \nOPTIONS\n -h Print this help document\n \n -p --product=[product] Specify the firmware product, product arguments can be specified as:\n eagle GXV3350\n bat GXV3370\n alpaca GXV3380\n \n -s --user Specify firmware build type as user, default was eng.\n \n -o --upload-bin-only Specify only upload ${PRODUCT}fw.bin, do not upload flash img.\n \n -t --upload-to-ipvt Specify will upload this ${PRODUCT}fw.bin to fw.ipvideotalk.com\n \n -v --version=[version] Specify the firmware version\n```" }, { "alpha_fraction": 0.6897580623626709, "alphanum_fraction": 0.7646669149398804, "avg_line_length": 29.474746704101562, "blob_id": "2524e9090a634091ce0891d2ef3eb7894a047924", "content_id": "423a1a02d680a3d1f4253072876c19b419cafd87", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 3017, "license_type": "permissive", "max_line_length": 65, "num_lines": 99, "path": "/.fw_upload.ini", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "[eagle]\nfw_path_user=/var/www/html/hz/firmware/eagle/%s/\nfw_path_eng=/var/www/html/hz/firmware/eagle/%s/eng/\nfw_name_origin=gxv3350fw.bin\nfw_name_pack=GXV3350_FW_%s.tar\nimg_name_origin=%s.tar.gz\nimg_name_pack=GXV3350_IMG_%s.tar.gz\nipvt_fw_path=/var/www/html/GXV3350/\nopengoo_product=Android Phone\nopengoo_module=GXV3350/Firmware\n\n[bat]\nfw_path_user=/var/www/html/hz/firmware/bat/%s/\nfw_path_eng=/var/www/html/hz/firmware/bat/%s/eng/\nfw_name_origin=gxv3370fw.bin\nfw_name_pack=GXV3370_FW_%s.tar\nimg_name_origin=%s.tar.gz\nimg_name_pack=GXV3370_IMG_%s.tar.gz\nipvt_fw_path=/var/www/html/3370/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3370/firmware\n\n[nec]\nfw_path_user=/var/www/html/hz/firmware/bat/%s/\nfw_path_eng=/var/www/html/hz/firmware/bat/%s/eng/\nfw_name_origin=itx-3370fw.bin\nfw_name_pack=ITX-3370_FW_%s.tar\nimg_name_origin=%s.tar.gz\nimg_name_pack=ITX-3370_IMG_%s.tar.gz\nipvt_fw_path=/var/www/html/3370/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3370/firmware\n\n[alpaca]\nfw_path_user=/var/www/html/hz/firmware/GXV3380/%s/user/\nfw_path_eng=/var/www/html/hz/firmware/GXV3380/%s/\nfw_name_origin=gxv3380fw.bin\nfw_name_pack=GXV3380_FW_%s.tar\nimg_name_origin=cht_alpaca-flashfiles*.zip\nimg_name_pack=GXV3380_IMG_%s.zip\nipvt_fw_path=/var/www/html/GXV3380/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3380/Firmware\n\n[d33]\nfw_path_user=/var/www/html/hz/firmware/GXV3380/D33/%s/user/\nfw_path_eng=/var/www/html/hz/firmware/GXV3380/D33/%s/\nfw_name_origin=d33fw.bin\nfw_name_pack=D33_FW_%s.tar\nimg_name_origin=cht_alpaca-flashfiles*.zip\nimg_name_pack=D33_IMG_%s.zip\nipvt_fw_path=/var/www/html/GXV3380/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3380/Firmware\n\n[gvc3212]\nfw_path_user=/var/www/html/hz/firmware/gvc3212/%s/v1.0A/\nfw_path_eng=/var/www/html/hz/firmware/gvc3212/%s/v1.0A/\nfw_name_origin=gvc3212fw.bin\nfw_name_pack=GVC3212_FW_%s.tar\nimg_name_origin=update.img\nimg_name_pack=GVC3212_IMG_%s.zip\nipvt_fw_path=\nopengoo_product=GVC3212\nopengoo_module=Firmware\n\n\n[gvc3212_c03]\nfw_path_user=/var/www/html/hz/firmware/gvc3212/%s/hw_c03/\nfw_path_eng=/var/www/html/hz/firmware/gvc3212/%s/hw_c03/\nfw_name_origin=gvc3212fw.bin\nfw_name_pack=GVC3212_FW_%s_HW_C03.tar\nimg_name_origin=update.img\nimg_name_pack=GVC3212_IMG_%s_HW_C03.zip\nipvt_fw_path=\nopengoo_product=GVC3212\nopengoo_module=Firmware\n\n[nec-rcp]\nfw_path_user=/var/www/html/hz/firmware/bat/%s/\nfw_path_eng=/var/www/html/hz/firmware/bat/%s/eng/\nfw_name_origin=itx-3370-rcpfw.bin\nfw_name_pack=ITX-3370_RCP_FW_%s.tar\nimg_name_origin=%s.tar.gz\nimg_name_pack=ITX-3370_RCP_IMG_%s.tar.gz\nipvt_fw_path=/var/www/html/3370/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3370/firmware\n\n[itsv-3]\nfw_path_user=/var/www/html/hz/firmware/oem/zenitel/GXV3370/%s/\nfw_path_eng=/var/www/html/hz/firmware/oem/zenitel/GXV3370/%s/eng/\nfw_name_origin=itsv-3fw.bin\nfw_name_pack=ITSV-3_FW_%s.tar\nimg_name_origin=%s.tar.gz\nimg_name_pack=ITSV-3_IMG_%s.tar.gz\nipvt_fw_path=/var/www/html/3370/tmp/\nopengoo_product=Android Phone\nopengoo_module=GXV3370/firmware\n" }, { "alpha_fraction": 0.47566908597946167, "alphanum_fraction": 0.49756690859794617, "avg_line_length": 19.296297073364258, "blob_id": "e5b7c09f6e9526373a61ac81beba2427df4507c7", "content_id": "28e5d4c0905e971db04d167019a8ebfd2494aab9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1644, "license_type": "permissive", "max_line_length": 94, "num_lines": 81, "path": "/release/gvc3212/release.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\nDEBUG=false\nexport ENG=true\nexport VERSION=\"0.0.0.0\"\nexport BUILD_KERNEL=false\nexport MAIL_TO_DEBUG=\"[email protected]\"\nexport MAIL_TO=\"[email protected]\"\nexport SH_PATH=\"$(cd \"$(dirname \"$0\")\";pwd)\"\nPROJ_TOP=/media/gshz/4T_Disk/gvc3212\nexport BUILD_CMD=\"./autoBuild.sh\"\n\nprint_help() {\necho \"\n release tool for GVC3212\n\n # -h: print this help document\n # -r: specify email addressee. default: [email protected]\n # -s: build as secure boot. default: false\n # -c: clean build. default: not clean\n # -p: hw_c03 / hw_gvc3212. default: hw_gvc3212s\n # -v: set version\n\"\n}\n\nwhile getopts \"v:r:p:csh\" arg\ndo\n case ${arg} in\n h)\n print_help\n exit 0\n ;;\n\n v)\n export BUILD_CMD=\"${BUILD_CMD} -v ${OPTARG}\"\n ;;\n\n r)\n if ${DEBUG}; then\n export MAIL_TO=${MAIL_TO_DEBUG}\n else\n export MAIL_TO=${OPTARG}\n fi\n ;;\n\n s)\n export BUILD_CMD=\"${BUILD_CMD} -s\"\n ;;\n\n c)\n export BUILD_CMD=\"${BUILD_CMD} -c\"\n ;;\n\n b)\n export BUILD_KERNEL=true\n ;;\n\n p)\n if [[ ${OPTARG} == \"hw_c03\" ]]; then\n export BUILD_CMD=\"${BUILD_CMD} -k v1.1A\"\n fi\n ;;\n ?)\n echo \"unknown argument ${OPTARG}\"\n exit 1\n ;;\n esac\ndone\n\nif ${DEBUG}; then\n echo \"under test, break;\"\n exit 0\nfi\n\nbuild() {\n source ${SH_PATH}/../env_java6.sh\n source ${PROJ_TOP}/jdks/env.sh\n\n cd ${PROJ_TOP}/android/vendor/grandstream/build && ${BUILD_CMD} -r ${MAIL_TO} -p \"gvc3212\"\n}\n\nbuild\n" }, { "alpha_fraction": 0.576724112033844, "alphanum_fraction": 0.5775862336158752, "avg_line_length": 21.745098114013672, "blob_id": "424601f3253678dee361437cace1c95a5d6c716f", "content_id": "871e9203bd94107d1b1607f09e6e52e3b1fed252", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1160, "license_type": "permissive", "max_line_length": 51, "num_lines": 51, "path": "/fw_session/model/Bug.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__metaclass__ = type\n\n\nclass Bug(dict):\n\n def __init__(self, *args, **kwargs):\n super(dict, self).__init__(*args, **kwargs)\n\n def __getattr__(self, name):\n value = self[name]\n if isinstance(value, dict):\n value = dict(value)\n return value\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def set(self, key, value):\n setattr(self, key, value)\n\n def get(self, key):\n return getattr(self, key)\n\n def get_id(self):\n return getattr(self, \"id\")\n\n def get_product(self):\n return getattr(self, \"product\")\n\n def get_status(self):\n return getattr(self, \"status\")\n\n def get_component(self):\n return getattr(self, \"component\")\n\n def get_summary(self):\n return getattr(self, \"summary\")\n\n def get_resolution(self):\n return getattr(self, \"resolution\")\n\n def get_creator(self):\n return getattr(self, \"creator\")\n\n def get_creation_time(self):\n return getattr(self, \"creation_time\")\n\n def get_last_change_time(self):\n return getattr(self, \"last_change_time\")\n" }, { "alpha_fraction": 0.5612511038780212, "alphanum_fraction": 0.565377950668335, "avg_line_length": 26.568862915039062, "blob_id": "faae51f3a2723cb5ad5d5cfb6a3637cf6fa9a394", "content_id": "47d8f7e0bdd2bae8111ce221ba64eaa48b73fede", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4604, "license_type": "permissive", "max_line_length": 91, "num_lines": 167, "path": "/fw_session/release_note.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport BugzillaApi\nimport ConfigParser\nimport model.DatabaseExecutor as model\nimport model.git_utils as git_utils\nimport signal\n\nexecutor = model.DatabaseExecutor()\ncontroller = BugzillaApi.BugzillaApi()\n\n\ndef save_product(rid):\n config = ConfigParser.ConfigParser()\n config.add_section(\"product_section\")\n config.set(\"product_section\", \"product_id\", rid)\n config.write(open('.product.ini', \"w\"))\n\n\ndef find_bugs_from_git_logs(__tag):\n git_log_dir = git_utils.get_git_logs(__tag)\n print \" \"\n print \">> Git commit:\"\n print \" >> Git logs since %s:\" % __tag\n bugs = git_utils.parse_git_logs(git_log_dir)\n\n pcs_per_page = 50\n bug_total_size = len(bugs)\n if bug_total_size == 0:\n pass\n if bug_total_size >= pcs_per_page:\n pages = bug_total_size / pcs_per_page\n start = 0\n for i in range(pages):\n controller.get_bugs_info(bugs[start:start + pcs_per_page])\n start = start + pcs_per_page\n\n last_page_pcs = bug_total_size % pcs_per_page\n if last_page_pcs > 0:\n controller.get_bugs_info(bugs[-last_page_pcs:])\n else:\n controller.get_bugs_info(bugs)\n print \" >> Total commits: %d\" % bug_total_size\n return bugs\n\n\ndef list_bugs_from_bugzilla():\n # add support of list bugs from bugzilla\n print \" \"\n print \">> Bugzilla:\"\n post = raw_input(\"* Do you want to list all RESOLVED FIXED bugs from Bugzilla? (Y/N) \")\n if post == 'Y' or post == 'y':\n pass\n else:\n exit(0)\n\n print \" >> RESOLVED FIXED bugs:\"\n bug_ids = controller.get_release_note()\n return bug_ids\n\n\ndef post_plan2fix(__bugs):\n print \" \"\n print \">> Post plan2fix\"\n post = raw_input(\"* Do you want to mark plan2fix on these bug? (Y/N) \")\n if post == 'Y' or post == 'y':\n pass\n else:\n return\n # get input comment\n __plan2fix = raw_input(\"* Input the plan2fix: \\n\")\n # do comment request\n for bug_id in __bugs:\n controller.plan2fix(bug_id, __plan2fix)\n\n\ndef post_comment(__bugs):\n # add comment support\n # ask for comment intent\n print \" \"\n print \">> Post Comment\"\n post = raw_input(\"* Do you want to comment on these bug? (Y/N) \")\n if post == 'Y' or post == 'y':\n pass\n else:\n return\n\n # get input comment\n comment = raw_input(\"* Input the comment: \\n\")\n print \" \"\n print \"* your comment is: \\n %s\" % comment\n\n # secondary confirm\n print \" \"\n post = raw_input(\"* Are you sure to comment on these bug? (Y/N) \")\n if post == 'Y' or post == 'y':\n pass\n else:\n return\n\n # do comment request\n for bug_id in __bugs:\n controller.comment(bug_id, comment)\n\n\ndef run():\n # check bugzilla profile\n bz = executor.bz_query()\n prods = executor.prod_query()\n if not bz or not prods:\n print \"Please continue after your personal information is perfected. \"\n exit(0)\n\n print \"\\nRelease tools\\n\"\n print \" All git logs will saved in fw/fw_session/git-logs.txt\"\n print \" All resolved bugs will saved in fw/fw_session/resolved_bugs.txt\"\n print \"\"\n print \">> Products:\"\n for pd in prods:\n print \" --------------------------------------------------------\"\n print \" id: |\", pd['id']\n print \" name: |\", pd['name']\n print \" alias: |\", pd['alias']\n print \" dir: |\", pd['dir']\n print \" --------------------------------------------------------\"\n\n while True:\n rid = raw_input(\"* Choose product id: \")\n if not rid.isdigit():\n print \"Unknown combo {0}\\n\".format(rid)\n else:\n save_product(rid)\n controller.load_cfg()\n break\n\n tag = raw_input(\"* Input the compare tag: \")\n #\n # Find bugs id from git log\n #\n __bugs_from_git_log = find_bugs_from_git_logs(tag)\n post_plan2fix(__bugs_from_git_log)\n post_comment(__bugs_from_git_log)\n\n\n\n #\n # List RESOLVED FIXED bugs from Bugzilla\n #\n __bugs_from_bugzilla = list_bugs_from_bugzilla()\n if __bugs_from_bugzilla:\n for bug in __bugs_from_git_log:\n if int(bug) in __bugs_from_bugzilla:\n __bugs_from_bugzilla.remove(int(bug))\n post_plan2fix(__bugs_from_bugzilla)\n post_comment(__bugs_from_bugzilla)\n else:\n print \"Failed to list all RESOLVED FIXED bugs.\"\n\n\ndef on_signal_interrupt(signal, frame):\n print \"\\n\\n bye~\"\n exit(0)\n\n\nif __name__ == \"__main__\":\n signal.signal(signal.SIGINT, on_signal_interrupt)\n run()\n" }, { "alpha_fraction": 0.5615866184234619, "alphanum_fraction": 0.5720250606536865, "avg_line_length": 20.772727966308594, "blob_id": "a9c30897e29d74025095f2265d60903552ae6db3", "content_id": "2bc5250935a626f66a57ddfcb48664a547b1fdae", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "permissive", "max_line_length": 55, "num_lines": 44, "path": "/fw_session/configure.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nfrom model import DatabaseExecutor as db\nfrom setup import product_setup\nfrom setup import bz_setup\nimport signal\n\nexecutor = db.DatabaseExecutor()\n\n\ndef run():\n print \"\"\"\n Release tool setup\n 1.setup bugzilla profile\n 2.setup product\n 3.exit setup\n \"\"\"\n while True:\n combo = raw_input(\"* What do you want to do: \")\n if not combo.isdigit():\n print \"Unknown combo {0}\\n\".format(combo)\n elif int(combo) == 3:\n exit(0)\n elif int(combo) == 1:\n bz_setup.run()\n elif int(combo) == 2:\n product_setup.run()\n pass\n else:\n print \"Unknown combo {0}\\n\".format(combo)\n\n\ndef on_signal_interrupt(signal, frame):\n print \"\\n\\n bye ~\"\n exit(0)\n\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, on_signal_interrupt)\n\n if not os.path.exists(\"./db/fw.db\"):\n executor.create_db()\n run()\n" }, { "alpha_fraction": 0.577235758304596, "alphanum_fraction": 0.6883468627929688, "avg_line_length": 72.80000305175781, "blob_id": "63f9522261d96ab7bc091e344b8e38328c232285", "content_id": "27d453dfd63502a9c5fb8178ce4e049e0a1fa4bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 369, "license_type": "permissive", "max_line_length": 121, "num_lines": 5, "path": "/opengoo/upload.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "./OpengooMain.py -p \"Android Phone\" -m \"GXV3370/firmware\" -l /var/www/html/hz/firmware/bat/1.0.1.5/GXV3370_FW_1.0.1.5.tar\n./OpengooMain.py -p \"Android Phone\" -m \"GXV3380/Firmware\" -l /var/www/html/hz/firmware/GXV3380/0.0.0.20/\n./OpengooMain.py -p \"GAC2510\" -m \"Firmware\" -l \n./OpengooMain.py -p \"GAC2500\" -m \"Firmware\" -l\n./OpengooMain.py -p \"GVC3210\" -m \"Firmware\" -l\n" }, { "alpha_fraction": 0.4923817217350006, "alphanum_fraction": 0.5124298334121704, "avg_line_length": 22.961538314819336, "blob_id": "0c4ee0c7593de25e4ece923e05f491a1753b1a79", "content_id": "fcd7a3e6f18ddebea1e41133d5a81002402135fa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1247, "license_type": "permissive", "max_line_length": 68, "num_lines": 52, "path": "/tools/fws_gc.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport datetime\nimport calendar\nimport os\n\nFW_PREFIX = \"21\"\n\n\ndef get_months(n):\n date = datetime.datetime.today()\n month = date.month\n year = date.year\n for i in range(n):\n if month == 1:\n year -= 1\n month = 12\n else:\n month -= 1\n return datetime.date(year, month, 1)\n\n\ndef gs_months(fmt):\n fws = []\n print \"ls | grep \" + FW_PREFIX + fmt\n with os.popen(\"ls | grep \" + FW_PREFIX + fmt) as b:\n for fwv in b.readlines():\n fws.append(fwv.replace(\"\\n\", \"\"))\n\n if fws.__len__() != 0:\n # remove the last item in queue\n fws.pop()\n # remove the first item in queue\n fws.__delitem__(0)\n mid = fws.__len__() / 2\n # remove the middle item in queue\n fws.__delitem__(mid)\n # keep 1, 15, 30\n # keep 1, 16, 31\n\n for fwv in fws:\n # os.system(\"rm -rf \" + fwv)\n print \"rm -rf \", fwv\n\n\nif __name__ == '__main__':\n today = datetime.datetime.today()\n for i in range(12):\n today = get_months(i + 4)\n weekday, days = calendar.monthrange(today.year, today.month)\n fmt_ym = today.strftime(\".%y.%m.\")\n gs_months(fmt_ym)\n\n" }, { "alpha_fraction": 0.5347384810447693, "alphanum_fraction": 0.5468384027481079, "avg_line_length": 27.46666717529297, "blob_id": "809617c29cb2edef1952fb19fc9c47854e4c21d1", "content_id": "2d95964d0882bd6b1fc890826886edc374d85c6c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2562, "license_type": "permissive", "max_line_length": 91, "num_lines": 90, "path": "/opengoo/config.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom utils import *\nimport logging, os\nimport getopt\n\nconfig_path = 'config.json'\nhelpinfo = '''\n -h show this information\n -v show version info\n -u set username\n -w set password\n -l set local file path to upload\n -r set remote file name\n -p set project name like WP800/GXV3370/GVC3210 in opengoo workspace\n -m set module name like Dev,Doc,Firmware use / for more module levels like GXV3370/Doc \n'''\n\nversion = '1.0.1'\n\ndef get_config(args):\n config = {}\n with open(config_path, 'rb') as f:\n try:\n config = parse_json_in_str(remove_comment(f.read().decode('utf8')))\n except ValueError as e:\n logging.error('found an error in config.json: %s', str(e))\n sys.exit(1)\n\n if not config.has_key('desc'):\n config['desc'] = '-- Initial version by script --'\n\n shortopt = 'hvp:m:l:r:u:w:d:'\n optlist, arg = getopt.getopt(args[1:], shortopt)\n for key, value in optlist:\n if key == '-h':\n print(helpinfo)\n exit(0)\n elif key == '-v':\n print(version)\n exit(0)\n elif key == '-p':\n config['project'] = value\n elif key == '-m':\n config['module'] = value\n elif key == '-l':\n config['localpath'] = value\n elif key == '-r':\n config['remotename'] = value\n elif key == '-u':\n config['user'] = value\n elif key == '-w':\n config['password'] = value\n elif key == '-d':\n config['desc'] = value\n else:\n continue\n #print(config)\n return config\n\ndef check_config(config):\n if len(config) == 0:\n print('error! config is empty')\n exit(-1)\n\n if not config.has_key('user') or not config['user']:\n print('error! user is empty')\n exit(-1)\n\n if not config.has_key('password') or not config['password']:\n print('error! password is empty')\n exit(-1)\n\n if not config.has_key('localpath') or not config['localpath']:\n print('error! localpath is empty')\n exit(-1)\n\n if not os.access(config['localpath'], os.R_OK):\n print('error! cannot read ' + config['localpath'])\n exit(-1)\n\n if not config.has_key('project') or not config['project']:\n print('error! project is empty')\n exit(-1)\n\n if not config.has_key('remotename') or not config['remotename']:\n config['remotename'] = config['localpath'].split('/')[-1]\n\n #print(config)\n return config\n" }, { "alpha_fraction": 0.6690140962600708, "alphanum_fraction": 0.7535211443901062, "avg_line_length": 34.75, "blob_id": "e61fa9aede6b4fb43abd5bd8f06ca63ebd945f75", "content_id": "5aad5c366e9610d44295584021ef0d9e2b425a41", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 142, "license_type": "permissive", "max_line_length": 66, "num_lines": 4, "path": "/dailybuild/gvc3212/dailybuild.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n/media/gshz/4T_Disk/hplan/fw/dailybuild/gvc3212/build.sh -u -c\n/media/gshz/4T_Disk/hplan/fw/dailybuild/gvc3212/build.sh -p hw_c03" }, { "alpha_fraction": 0.8301886916160583, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 34.33333206176758, "blob_id": "467311a6552cabe78f8f30a9e7cfaf5aeef51293", "content_id": "c1bac81f341c2b8000639dba520cafc0afc8f26e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "permissive", "max_line_length": 37, "num_lines": 3, "path": "/fw_session/rest_api/__init__.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "from bugzilla_api import login\nfrom bugzilla_api import valid_login\nfrom bugzilla_api import get_req_path\n" }, { "alpha_fraction": 0.5399354696273804, "alphanum_fraction": 0.5873648524284363, "avg_line_length": 68.31578826904297, "blob_id": "8f66a020a91cf67130276879252aac6ae7385446", "content_id": "d564c083a0adbbf04a1e8c74dff5a8b768a1f75c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5271, "license_type": "permissive", "max_line_length": 188, "num_lines": 76, "path": "/opengoo/OpengooApi.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport requests\nimport mimetypes\n\nclass OpengooApi:\n def __init__(self):\n self.default_headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0; Gecko/20100101 Firefox/58.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'}\n self.default_url = 'https://192.168.120.248/fengoffice_new/index.php'\n requests.packages.urllib3.disable_warnings()\n self.session = requests.Session()\n self.session.headers.update(self.default_headers)\n self.session.verify = False\n\n def login(self, user, pwd, configOptionSelect='Default', remember='checked'):\n # https://192.168.120.248/fengoffice_new/index.php?c=access&a=login\n r = self.session.post(self.default_url, params = {'c':'access','a':'login'}, data={'login[username]':user,\n 'login[password]':pwd, 'configOptionSelect': configOptionSelect, 'login[remember]':remember})\n return r\n\n def init(self):\n #https://192.168.120.248/fengoffice_new/index.php?context={}&currentdimension=0&ajax=true&c=files&a=init\n r = self.session.post(self.default_url, params = {'context':'{}','currentdimension':'0','ajax':'true','c':'files','a':'init'})\n return r\n\n def loaddimeninfo(self):\n # https://192.168.120.248/fengoffice_new/index.php?context=\\{\\}&ajax=true&currentdimension=0&c=dimension&a=load_dimensions_info\n r = self.session.post(self.default_url, params = {'context':'{}','currentdimension':'0','ajax':'true','c':'dimension','a':'load_dimensions_info'})\n return r\n\n def getdimenProjects(self, dimenid):\n #https://192.168.120.248/fengoffice_new/index.php?c=dimension&ajax=true&a=initial_list_dimension_members_tree_root&dimension_id=1&avoid_session=1&limit=100\n r = self.session.post(self.default_url, params = {'c':'dimension','a':'initial_list_dimension_members_tree_root','dimension_id':str(dimenid),\n 'avoid_session':'1', 'ajax':'true','limit':'100'})\n return r\n\n def getmemberchilds(self, memberid):\n #https://192.168.120.248/fengoffice_new/index.php?context=\\{%221%22:\\[0\\],%222%22:\\[0\\]\\}&currentdimension=0&ajax=true&c=dimension&a=get_member_childs&member=533&limit=100&offset=0\n r = self.session.post(self.default_url, params = {'context':'{\"1\":[0],\"2\":[0]}','currentdimension':'0','c':'dimension',\n 'a':'get_member_childs', 'ajax':'true','member':str(memberid), 'limit':'100','offset':'0'})\n return r\n\n def checkfilename(self, memberid, remotename):\n #check filename\n # https://192.168.120.248/fengoffice_new/index.php?context={\"1\":[0,1217],\"2\":[0]}&currentdimension=1&ajax=true&c=files&a=check_filename&id=0&current=1217\n contextstr = '{\\\"1\\\":[0,\\\"' + str(memberid) + '],\\\"2\\\":[0]}'\n r = self.session.post(self.default_url, params = {'context': contextstr, 'currentdimension':'1', 'c': 'files', 'ajax':'true',\n 'a':'check_filename', 'id': '0', 'current':str(memberid)},\n data={'members':'[' + str(memberid) + ']', 'filename':remotename})\n return r\n\n def tmpuploadfile(self, genid, localpath, remotename):\n # tmply add file\n #https://192.168.120.248/fengoffice_new/index.php?&c=files&a=temp_file_upload&id=og_1516933330_518465&upload=true\n\n type = mimetypes.guess_type(localpath)[0]\n filedata = {'file_file[]':(remotename, open(localpath, 'rb'), type)}\n r = self.session.post(self.default_url, params = {'c':'files','a':'temp_file_upload', 'id': genid,\n 'upload':'true'}, files=filedata)\n return r\n\n def addmultifile(self, genid, remotename, memberid, uid, option=-1, desc='-- Initial version by script --'):\n # judge r ok\n # add multi file\n #https://192.168.120.248/fengoffice_new/index.php?context={\"1\":[0,1217],\"2\":[0]}&currentdimension=1&ajax=true&c=files&a=add_multiple_files\n param_rg = genid + '_rg'\n contextstr = '{\\\"1\\\":[0,\\\"' + str(memberid) + '],\\\"2\\\":[0]}'\n data = {'modal':'1','':'145','file[add_type]':'regular','file[file_id]':'','file[type]':'','file[upload_id]':genid,\n param_rg:'0','file_file[0]':'C:\\\\fakepath\\\\'+remotename,'file[url]':'','file[name]':remotename,\n 'file[upload_option]':str(option),'file[revision_comment]': desc,'members':'['+str(memberid)+']',\n 'custom_prop_names[0]':'','custom_prop_values[0]':'','original_subscribers':str(uid),'file[attach_to_notification]':'0',\n 'file[notify_myself_too]':'1','file[default_subject_sel]':'default','file[default_subject_text]':'','file[description]':'', 'subscribers[user_'+ str(uid) + ']':'1'}\n r = self.session.post(self.default_url, params = {'context':contextstr,'currentdimension':'1', 'ajax':'true','c': 'files',\n 'a':'add_multiple_files'}, data=data)\n return r\n\n\n\n" }, { "alpha_fraction": 0.567717969417572, "alphanum_fraction": 0.5830241441726685, "avg_line_length": 22.69230842590332, "blob_id": "2db2f6d435b37a34cd84e36e91fb5423030ff682", "content_id": "b86a1c412e4ecfa70b1f4001977a2617e662cff0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2156, "license_type": "permissive", "max_line_length": 92, "num_lines": 91, "path": "/fw_session/rest_api/bugzilla_api.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/env python\nimport requests\nimport json\n\n\nrest_api = None\n\n\nclass REQ_INTERFACE:\n Login = 1\n Valid_Login = 2\n Logout = 3\n Bug = 4\n Search = 5\n Comment = 6\n\n\ndef login(url, usr, pwd):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/user.html#login\n\n Request:\n GET /rest/[email protected]&password=toosecrettoshow\n\n Response:\n {\n \"token\": \"786-OLaWfBisMY\",\n \"id\": 786\n }\n \"\"\"\n query = {\"login\": usr, \"password\": pwd}\n r = requests.get(url, params=query)\n if 200 <= r.status_code <= 299:\n data = r.json()\n if 'token' in data:\n return data['token']\n else:\n print(\"login failed, please check your bugzilla configurations.\")\n return None\n # elif r.status_code == 401:\n # print 401\n\n\ndef valid_login(url, user, token):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/user.html#valid-login\n\n Request:\n GET /rest/[email protected]&token=1234-VWvO51X69r\n\n Response:\n Returns true/false depending on if the current token is valid for the provided username.\n \"\"\"\n query = {\"login\": user, \"token\": token}\n r = requests.get(url, params=query)\n if r.status_code == 200:\n data = r.json()\n if data:\n return bool(data['result'])\n return False\n\n\ndef __load_rest_api():\n with open('./rest_api/rest_api.json') as load_f:\n data = json.load(load_f)\n global rest_api\n rest_api = data\n load_f.close()\n\n\ndef get_req_path(req_method):\n if req_method < 0:\n return \"\"\n\n if not rest_api:\n __load_rest_api()\n\n if req_method == REQ_INTERFACE.Login:\n return rest_api['login']\n elif req_method == REQ_INTERFACE.Valid_Login:\n return rest_api['valid_login']\n elif req_method == REQ_INTERFACE.Logout:\n return rest_api['logout']\n elif req_method == REQ_INTERFACE.Bug:\n return rest_api['bug']\n elif req_method == REQ_INTERFACE.Search:\n return rest_api['search']\n elif req_method == REQ_INTERFACE.Comment:\n return rest_api['comment']\n else:\n return \"\"\n" }, { "alpha_fraction": 0.6157212853431702, "alphanum_fraction": 0.6334033608436584, "avg_line_length": 34.6875, "blob_id": "ccb8007ec7ab28bfd72c166c5c90acfa4acd3f44", "content_id": "aaa5f475ab5e6a10c88685ce4d5bbc3a21b36bcc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5712, "license_type": "permissive", "max_line_length": 136, "num_lines": 160, "path": "/opengoo/OpengooMain.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom OpengooApi import OpengooApi\nimport random\nfrom config import *\n\ndef main():\n args = sys.argv\n progname = args[0]\n dirname = os.path.dirname(progname)\n print(\"dirname is \" + dirname)\n sys.path.append(dirname)\n os.chdir(dirname)\n config = check_config(get_config(args))\n og = OpengooApi()\n\n # login\n uid = login(og, config)\n\n # get project id\n projectid = get_project_id(og, config)\n\n print(\"find project \" + config['project'])\n\n if not config.has_key('module') or not config['module']:\n moduleid = projectid\n else:\n # get module id\n modules = config['module'].split(\"/\")\n findid = projectid\n for module in modules:\n moduleid = get_module_id(og, module, findid)\n findid = moduleid\n print(\"find module \" + module)\n\n # check filename\n upload_option_id = check_file_name(og, config, moduleid)\n if upload_option_id != -1:\n print(\"replace already exist file\")\n\n # tmp upload file\n genid = tmp_upload_file(og, config)\n\n # add file to module\n fileid = add_file_to_module(og, config, genid, moduleid, uid, upload_option_id)\n\n print('add file to ' + config.get('project', '') + '/' + config.get('module', '') + ' done')\n print(\"\")\n print(\"\")\n print_info(config['remotename'] + ': https://115.236.68.174:8888/fengoffice_new/index.php?c=files&a=download_file&id=' + str(fileid))\n\n\ndef login(og, config):\n loginr = og.login(config['user'], config['password'])\n check_expect(loginr.status_code, 200, loginr, 'login failed!')\n check_not_expect(loginr.cookies.get('https___192_168_120_248_fengoffice_newtoken'), None, loginr, 'login failed!')\n check_not_expect(loginr.cookies.get('https___192_168_120_248_fengoffice_newid'), None, loginr, 'login failed!')\n print('login use ' + config['user'] + \" success\")\n uid = loginr.cookies.get('https___192_168_120_248_fengoffice_newid')\n return uid\n\ndef get_project_id(og, config):\n dimenr = og.getdimenProjects(\"1\")\n err_msg = 'get dimen failed!'\n check_expect(dimenr.status_code, 200, dimenr, err_msg)\n check_not_expect(dimenr.content, None, dimenr, err_msg)\n projects_response = parse_json_in_str(dimenr.content)\n\n if not 'dimension_members' in projects_response:\n print_err('unexpect projects response with ' + projects_response)\n exit(-3)\n\n dimens = projects_response['dimension_members']\n projectid = find_project_id(dimens, config['project'])\n if projectid <= 0:\n print_err(\"failed to find project \" + config['project'] + \" in Opengoo!!!\")\n exit(-4)\n return projectid\n\ndef get_module_id(og, module, projectid):\n moduler = og.getmemberchilds(str(projectid))\n err_msg = 'get module info failed!!!'\n check_expect(moduler.status_code, 200, moduler, err_msg)\n check_not_expect(moduler.content, None, moduler, err_msg)\n module_response = parse_json_in_str(moduler.content)\n modules = module_response['members']\n moduleid = find_module_id(modules, module)\n if moduleid <= 0:\n print_err(\"failed to find module \" + module + \" in Opengoo!!!\")\n exit(-4)\n return moduleid\n\n\ndef check_file_name(og, config, moduleid):\n cfnr = og.checkfilename(moduleid, config['remotename'])\n err_msg = 'check filename failed!!!'\n check_expect(cfnr.status_code, 200, cfnr, err_msg)\n check_not_expect(cfnr.content, None, cfnr, err_msg)\n\n upload_option_id = -1\n cfnr_response = parse_json_in_str(cfnr.content)\n if 'files' in cfnr_response:\n # has same name file, update it\n upload_option_id = cfnr_response['files'][0]['id']\n\n return upload_option_id\n\n\ndef tmp_upload_file(og, config):\n random.seed()\n rand1 = int(random.random()*10000000)\n random.seed()\n rand2 = int(random.random()*10000)\n genid = 'og_' + str(rand1) + '_' + str(rand2)\n #print('genid is ' + genid)\n\n print('start upload file ' + config['localpath'] + ' to ' + config['remotename'])\n print_info('in uploading ... please wait. file size ' + get_file_size(config['localpath']))\n tufr = og.tmpuploadfile(genid, config['localpath'], config['remotename'])\n err_msg = 'upload file failed!!!'\n check_expect(tufr.status_code, 200, tufr, err_msg)\n check_not_expect(tufr.content, None, tufr, err_msg)\n print('upload file ' + config['localpath'] + ' complete')\n return genid\n\n\ndef add_file_to_module(og, config, genid, moduleid, uid, upload_option_id):\n amfr = og.addmultifile(genid, config['remotename'], moduleid, uid, upload_option_id, config['desc'])\n err_msg = 'add file to project failed!!!'\n check_expect(amfr.status_code, 200, amfr, err_msg)\n check_not_expect(amfr.content, None, amfr, err_msg)\n amfr_response = parse_json_in_str(amfr.content)\n if not 'errorCode' in amfr_response or not 'file' in amfr_response:\n print_err('error add file response with ' + amfr_response)\n exit(-5)\n\n if amfr_response['errorCode'] != 0:\n print_err('error add file response code :' + amfr_response['errorCode'])\n exit(-5)\n\n print(amfr_response['errorMessage'])\n projectfile = amfr_response['file'][0]\n return projectfile.split(':')[1]\n\ndef check_expect(real, expect, r, msg):\n if real != expect:\n print_err('error result ' + str(real) + \", while expect \" + str(expect) + \" in \" + str(r))\n if msg is not None:\n print_err(msg)\n exit(-2)\n\ndef check_not_expect(real, expect, r, msg):\n if real == expect:\n print_err('error not expect result ' + str(real) + \" in \" + str(r))\n if msg is not None:\n print_err(msg)\n exit(-2)\n\n\nif __name__ == '__main__':\n main()\n\n\n" }, { "alpha_fraction": 0.538710355758667, "alphanum_fraction": 0.554881751537323, "avg_line_length": 28.801204681396484, "blob_id": "0411422af413a88803ba0423f63c726d01ff84ae", "content_id": "8ae396b4a75af6570ddad3c61de928563b61ed6a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4947, "license_type": "permissive", "max_line_length": 288, "num_lines": 166, "path": "/dailybuild/bat/dailybuild.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/bash\nsource /etc/profile\nexport ENG=true\nexport DEBUG=false\nexport BUILD_KERNEL=false\nexport REPO_SYNC_CODE=false\nexport TARGET_BRANCH=\"Bat\"\nexport CURRENT_BRANCH=\"git symbolic-ref --short HEAD\"\nexport MAIL_TO=\"[email protected]\"\nexport MAIL_TO_DEBUG=\"[email protected]\"\nexport MAIL_TITLE=\"GXV3370 git log\"\nexport SH_PATH=\"$(cd \"$(dirname \"$0\")\";pwd)\"\nexport PROJ_PATH=\"/home/hplan/project/dailybuild/bat\"\nexport BUILD_CMD=\"./autoBuild.sh\"\nexport LOG_FILE=\"/home/hplan/BuildLog/bat/`whoami`_bat_20_\"`date -d\"today\" +%y_%m_%d`\"_build_Log\"\n\nLog_Raw=\"/tmp/logRaw_Bat.html\"\nLog_Pretty=\"/tmp/logPretty_Bat.html\"\n\nprint_help() {\necho \"\n dailybuild tool for GXV3370\n\n # -h: print this help document\n # -r: specify email addressee. default: [email protected]\n # -s: user build. default: eng\n # -c: clean build. default: not clean\n # -b: build kernel\n # -v: set version\n # -u: update source code. default: not update\n\"\n}\n\nrepo_sync() {\n source ${SH_PATH}/../env.sh\n # clear previous log\n cat /dev/null > ${Log_Raw}\n cat /dev/null > ${Log_Pretty}\n\n cd ${PROJ_PATH}\n\n while true\n do\n repo forall -c \"git checkout . && git reset --hard \\`git remote\\`/\\`${CURRENT_BRANCH}\\` && git checkout ${TARGET_BRANCH} && git reset --hard m/master && git pull \\`git remote\\` ${TARGET_BRANCH}\" | tee ${LOG_FILE}\n repo sync -c -j16 | tee ${LOG_FILE}\n\n if [[ $? -eq 0 ]]; then\n break\n fi\n done\n\n repo forall -c \"git pull \\`git remote\\` ${TARGET_BRANCH} && git rebase m/master\" | tee ${LOG_FILE}\n repo forall -p -c git log --graph --name-status --since=\"24 hours ago\" --pretty=format:\"<span style='color:#00cc33'>%ci</span> <span style='color:yellow'>%an %ae</span>%n<span style='color:#00cc33'>Log:</span> <span style='color:yellow'> %B</span>%nFiles List:\" > ${Log_Raw}\n}\n\nmail() {\n if [[ $(stat -c %s ${Log_Raw}) -eq 0 ]]; then\n echo \"There is no new commit, nothing to do\" | tee ${LOG_FILE}\n return 1\n else\n echo \"<html><body style='background-color:#151515; font-size: 14pt; color: white'><div style='background-color:#151515;color: white'>\" > ${Log_Pretty}\n sed -e 's/$/<br>/g' ${Log_Raw} >> ${Log_Pretty}\n echo \"</div></body></html>\" >> ${Log_Pretty}\n if ! ${DEBUG}; then\n export version=\"20.\"`date -d\"today\" +%y.%m.%d`\n# sendemail -f [email protected] -t $1 -s smtp.grandstream.cn -o tls=no message-charset=utf-8 -xu [email protected] -xp hzS1pTestH2 -v -u \"GXV3370 ${version} git log\" < ${Log_Pretty}\n sendemail -f [email protected] -t $1 -s smtp.grandstream.cn -o tls=no message-charset=utf-8 -xu [email protected] -xp hzS1pTestH2 -v -u \"GXV3370 git log\" < ${Log_Pretty}\n fi\n return 0\n fi\n}\n\nbuild() {\n source ${SH_PATH}/../env.sh\n source ${SH_PATH}/../openjdk-8-env\n\n cd ${PROJ_PATH}/android && source ${PROJ_PATH}/android/build/envsetup.sh\n if ${ENG}; then\n cd ${PROJ_PATH}/android && lunch full_bat-eng\n else\n cd ${PROJ_PATH}/android && lunch full_bat-user\n fi\n\n if ${BUILD_KERNEL}; then\n cd ${PROJ_PATH}/kernel-3.18 && ./buildkernel.sh -k | tee ${LOG_FILE}\n # check if there is boot.img\n if [[ ! -e ${PROJ_PATH}/android/out/target/product/bat/boot.img ]]; then\n ## regenerate boot.img\n cd ${PROJ_PATH}/android && make bootimage -j16\n cd ${PROJ_PATH}/kernel-3.18 && ./buildkernel.sh -k | tee ${LOG_FILE}\n fi\n # check again\n if [[ ! -e ${PROJ_PATH}/android/out/target/product/bat/boot.img ]]; then\n sendemail -f [email protected] -t $1 -s smtp.grandstream.cn -o tls=no message-charset=utf-8 -xu [email protected] -xp hzS1pTestH2 -v -u \"GXV3370 build kernel failed.\"\n exit 1\n fi\n fi\n\n cd ${PROJ_PATH}/android/vendor/grandstream/build && ${BUILD_CMD} -d -r ${MAIL_TO} | tee ${LOG_FILE}\n# echo \"${BUILD_CMD} -d -r ${MAIL_TO}\"\n}\n\nentrance() {\n ## sync code\n if ${REPO_SYNC_CODE}; then\n repo_sync\n mail ${MAIL_TO}\n fi\n\n ## build code\n if [[ $? -eq 0 ]]; then\n build ${MAIL_TO}\n fi\n}\n\nwhile getopts \"v:r:csbuh\" arg\ndo\n case ${arg} in\n h)\n print_help\n exit 0\n ;;\n\n v)\n export BUILD_CMD=\"${BUILD_CMD} -v ${OPTARG}\"\n ;;\n\n r)\n if ${DEBUG}; then\n export MAIL_TO=${MAIL_TO_DEBUG}\n else\n export MAIL_TO=${OPTARG}\n fi\n ;;\n\n s)\n export ENG=false\n export BUILD_CMD=\"${BUILD_CMD} -s\"\n ;;\n\n b)\n export BUILD_KERNEL=true\n ;;\n\n c)\n export BUILD_CMD=\"${BUILD_CMD} -c\"\n ;;\n\n u)\n export REPO_SYNC_CODE=true\n ;;\n\n ?)\n echo \"unknown argument $OPTARG\"\n exit 1\n ;;\n esac\ndone\n\nif ${ENG}; then\n export LOG_FILE=\"${LOG_FILE}_eng\"\nelse\n export LOG_FILE=\"${LOG_FILE}_usr\"\nfi\n\nentrance\n" }, { "alpha_fraction": 0.6769616007804871, "alphanum_fraction": 0.7324708104133606, "avg_line_length": 34.74626922607422, "blob_id": "eb9d2d30be58cc89fc02e840a24c9f9676f2fd4c", "content_id": "81b9578442477eb6e2b1c74f45450734f1e17c70", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2396, "license_type": "permissive", "max_line_length": 106, "num_lines": 67, "path": "/tools/prebuild.sh", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#! /bin/bash\nsudo apt-get install libswitch-perl -y\nsudo apt-get install git gnupg flex bison gperf build-essential -y\nsudo apt-get install zip curl libc6-dev -y\nsudo apt-get install libncurses5-dev:i386 x11proto-core-dev -y\nsudo apt-get install libx11-dev:i386 libreadline6-dev:i386 -y\nsudo apt-get install libgl1-mesa-glx:i386 libgl1-mesa-dev -y\nsudo apt-get install g++-multilib mingw32 tofrodos python-markdown -y\nsudo apt-get install libxml2-utils xsltproc zlib1g-dev:i386 -y\nsudo ln -s /usr/lib/i386-linux-gnu/mesa/libGL.so.1 /usr/lib/i386-linux-gnu/libGL.so -y\nsudo apt-get install flex -y\nsudo apt-get install bison -y\nsudo apt-get install libglib2.0-0:i386 libpng12-0:i386 libfontconfig1:i386 libsm6:i386 libxrender1:i386 -y\nsudo apt-get install email mailutils -y\nsudo apt-get install autoconf -y\nsudo apt-get install bc -y\nsudo apt-get install sendemail -y\n\nsudo apt-get install -y device-tree-compiler\nsudo apt-get install -y liblz4-tool\nsudo apt-get install -y libssl-dev\nsudo apt-get install -y python\nsudo apt-get install -y python-pip\npip install pyelftools\n\ngit config --global alias.path '!echo \"\\033[32m `git branch | grep \"*\"` \\033[0m `pwd`\"'\ngit config --global alias.since '!sh -c \"git log ...$1\"'\ngit config --global user.email [email protected]\ngit config --global user.name hplan\ngit config --global core.editor vim\ngit config --global i18n.commitencoding utf-8\ngit config --global i18n.logoutputencoding utf-8\n\n\necho \"export LC_ALL=C\" >> ~/.bashrc\necho \"export LESSCHARSET=utf-8\" >> ~/.bashrc\n\nexit 0\n\n# Install NodeJS env\ncd /usr/local/etc\npath=`pwd`\nsudo wget https://nodejs.org/dist/v10.9.0/node-v10.9.0-linux-x64.tar.xz -T 60 -t 2\nsudo tar xf node-v10.9.0-linux-x64.tar.xz\nsudo ln -s $path/node-v10.9.0-linux-x64/bin/npm /usr/local/bin/npm\nsudo ln -s $path/node-v10.9.0-linux-x64/bin/node /usr/local/bin/node\ncd -\n\n## Charset UTF-8\ncd ~\nsudo locale-gen zh_CN.UTF-8\necho \"set autoindent\" >> .vimrc\necho \"set cindent\" >> .vimrc\necho \"set tabstop=4\" >> .vimrc\necho \"set softtabstop=4\" >> .vimrc\necho \"set shiftwidth=4\" >> .vimrc\necho \"set expandtab\" >> .vimrc\necho \"set number\" >> .vimrc\necho \"set history=1000\" >> .vimrc\necho \"set hlsearch\" >> .vimrc\necho \"set incsearch\" >> .vimrc\necho \"set cul\" >> .vimrc\necho \"set fileencodings=utf-8,ucs-bom,gb18030,gbk,gb2312,cp936\" >> .vimrc\necho \"set termencoding=utf-8\" >> .vimrc\necho \"set encoding=utf-8\" >> .vimrc\n\necho \"done\"\n\n" }, { "alpha_fraction": 0.45144885778427124, "alphanum_fraction": 0.4655536413192749, "avg_line_length": 32.511253356933594, "blob_id": "7c46d91554023d14267822f3a17eb18a67ae9548", "content_id": "4eaa2fa4d2e133bc0a7ae091bc4b9276c2beda7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10422, "license_type": "permissive", "max_line_length": 116, "num_lines": 311, "path": "/fw_session/BugzillaApi.py", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "#!/bin/env python\nimport rest_api.bugzilla_api as bugzilla_api\nimport requests\nimport ConfigParser\nimport model.Bug as Bug\nimport model.DatabaseExecutor as model\nimport json\n\nexecutor = model.DatabaseExecutor()\n\n\nclass BugzillaApi:\n def __init__(self):\n self.bz = executor.bz_query()\n self.host = self.bz.get_host_url()\n self.user = self.bz.get_config_usr()\n self.token = self.bz.get_config_token()\n self.pwd = self.bz.get_config_pwd()\n config = ConfigParser.ConfigParser()\n config.readfp(open('./.product.ini'))\n self.product_id = config.getint(\"product_section\", \"product_id\")\n self.product = executor.get_product(self.product_id)\n\n def load_cfg(self):\n self.bz = executor.bz_query()\n self.host = self.bz.get_host_url()\n self.user = self.bz.get_config_usr()\n self.token = self.bz.get_config_token()\n self.pwd = self.bz.get_config_pwd()\n config = ConfigParser.ConfigParser()\n config.readfp(open('./.product.ini'))\n self.product_id = config.getint(\"product_section\", \"product_id\")\n self.product = executor.get_product(self.product_id)\n\n def do_login(self):\n valid_path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Valid_Login)\n url = self.host + \"/\" + valid_path\n # valid login\n if bugzilla_api.valid_login(url, self.user, self.token):\n return\n else:\n print \"exists token was invalid. request login...\"\n\n # obtain new token\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Login)\n\n url = self.host + \"/\" + path\n tk = bugzilla_api.login(url, self.user, self.pwd)\n if tk:\n self.token = tk\n self.bz.set_token(tk)\n executor.bz_update(self.bz)\n\n def get_bug_info(self, bug_id):\n \"\"\"\n https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/bug.html#get-bug\n \"\"\"\n self.do_login()\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Bug) % str(bug_id)\n url = self.host + \"/\" + path\n\n query = {\"token\": self.token}\n r = requests.get(url, params=query)\n if r.status_code == 200:\n data = r.json()\n status = data['bugs'][0]['status']\n resolution = data['bugs'][0]['resolution']\n bug_id = data['bugs'][0]['id']\n summary = data['bugs'][0]['summary']\n print \"%8s %8s [Bug %s] %s\" % (\n self.compat(status),\n self.compat(resolution),\n self.compat(bug_id),\n self.compat(summary)\n )\n elif r.status_code == 401:\n self.do_login()\n self.load_cfg()\n self.get_bug_info(bug_id)\n\n def get_bugs_info(self, bug_ids):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/bug.html#get-bug\n \"\"\"\n self.do_login()\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Search)\n url = self.host + \"/\" + path\n query = {\n \"id\": bug_ids,\n \"token\": self.token\n }\n r = requests.get(url, params=query)\n if r.status_code == 200:\n data = r.json()\n if data:\n for d in data['bugs']:\n product = d['product']\n if product != self.product.get_product_name():\n continue\n status = d['status']\n resolution = d['resolution']\n bug_id = d['id']\n summary = d['summary']\n print \" %s %10s %10s [Bug %s] %s\" % (\n self.compat(product),\n self.compat(status),\n resolution.encode(\"ascii\", \"ignore\"),\n bug_id,\n self.compat(summary)\n )\n return data\n elif r.status_code == 401:\n self.do_login()\n self.load_cfg()\n self.get_bugs_info(bug_ids)\n# return data\n\n def search_bug(self):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/bug.html#search-bugs\n\n Request:\n GET /rest/bug?product=Foo&product=Bar\n \"\"\"\n self.do_login()\n url = self.host + \"/rest/bug?product=GXV3350&product=GXV3380&product=GXV33xx\"\n print url\n query = {\n \"token\": self.token\n }\n r = requests.get(url, params=query)\n search_ret = []\n if r.status_code == 200:\n data = r.json()\n if data:\n for d in data['bugs']:\n bug = Bug.Bug()\n bug.set(\"id\", d['id'])\n bug.set(\"creator\", d['creator'])\n bug.set(\"product\", d['product'])\n bug.set(\"status\", d['status'])\n bug.set(\"component\", d['component'])\n bug.set(\"resolution\", d['resolution'])\n bug.set(\"summary\", d['summary'])\n bug.set(\"creation_time\", d['creation_time'])\n bug.set(\"last_change_time\", d['last_change_time'])\n\n # if bug.get_creator() != reporter:\n # continue\n status = bug.get_status()\n if status == 'CLOSED' or status == 'VERIFIED':\n continue\n elif status == 'RESOLVED':\n resolution = bug.get_resolution()\n if resolution != 'FIXED':\n continue\n\n search_ret.append(bug)\n print \"%10s %10s %10s [Bug %s] %s\" % (bug.get_product(), bug.get_status(), bug.get_resolution(),\n bug.get_id(), bug.get_summary())\n print(\"\\n\")\n print \" total: %d\" % len(data['bugs'])\n return search_ret\n elif r.status_code == 401:\n self.do_login()\n self.load_cfg()\n self.search_bug()\n\n def get_release_note(self):\n self.do_login()\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Search)\n url = self.host + \"/\" + path\n query = {\n \"token\": self.token,\n \"product\": self.product.get_product_name(),\n \"resolution\": \"FIXED\",\n \"status\": \"RESOLVED\",\n }\n r = requests.get(url, params=query)\n if r.status_code == 200:\n data = r.json()\n bugs = data['bugs']\n if bugs:\n bug_ids = []\n with open(\"resolved_bugs.txt\", \"w+\") as f:\n for b in bugs:\n bug_ids.append(b['id'])\n print \" [Bug %s] %s\" % (b['id'], self.compat(b['summary']))\n f.write(\"[Bug %s] %s\" % (b['id'], self.compat(b['summary'])) + \"\\n\")\n\n print \" >> Total bugs: %d\" % len(bugs)\n return bug_ids\n elif r.status_code == 401:\n self.do_login()\n self.load_cfg()\n self.get_release_note()\n\n def comment(self, bug_id, comment):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/comment.html#create-comments\n\n Request:\n POST /rest/bug/(id)/comment\n {\n \"comment\" : \"This is an additional comment\",\n \"is_private\" : false\n }\n\n Response:\n {\n \"id\" : 789\n }\n \"\"\"\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Comment)\n path = path % bug_id\n url = self.host + \"/\" + path\n data = {\n \"token\": self.token,\n \"comment\": comment\n }\n # print url\n requests.post(url, data=data)\n # print r.json()\n\n def plan2fix(self, bug_id, __plan2fix):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/bug.html#update-bug\n\n Request:\n PUT /rest/bug/(id_or_alias)\n {\n \"cf_goal\" : \"test build 0.0.0.1\",\n }\n\n Response:\n {\n \"bugs\": [\n {\n \"alias\": [],\n \"changes\": {\n \"cf_goal\": {\n \"removed\": \"\",\n \"added\": \"test build 0.23.08.21\"\n }\n },\n \"last_change_time\": \"2023-08-21T09:20:03Z\",\n \"id\": 269428\n }\n ]\n }\n \"\"\"\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Bug)\n path = path % bug_id\n url = self.host + \"/\" + path\n data = {\n \"token\": self.token,\n \"cf_goal\": __plan2fix\n }\n requests.put(url, data=data)\n\n def get_comment(self, bug_id):\n \"\"\"\n Document: https://bugzilla.readthedocs.io/en/5.0.4/api/core/v1/comment.html#get-comments\n\n Request:\n > To get all comments for a particular bug using the bug ID or alias:\n GET /rest/bug/(id_or_alias)/comment\n\n > To get a specific comment based on the comment ID:\n GET /rest/bug/comment/(comment_id)\n\n Response:\n {\n \"bugs\": {\n \"35\": {\n \"comments\": [\n {\n \"time\": \"2000-07-25T13:50:04Z\",\n \"text\": \"test bug to fix problem in removing from cc list.\",\n \"bug_id\": 35,\n \"count\": 0,\n \"attachment_id\": null,\n \"is_private\": false,\n \"tags\": [],\n \"creator\": \"[email protected]\",\n \"creation_time\": \"2000-07-25T13:50:04Z\",\n \"id\": 75\n }\n ]\n }\n },\n \"comments\": {}\n }\n \"\"\"\n self.do_login()\n path = bugzilla_api.get_req_path(bugzilla_api.REQ_INTERFACE.Comment)\n path = path % bug_id\n url = self.host + \"/\" + path\n query = {\n \"token\": self.token\n }\n data = requests.get(url, params=query)\n print data.json()\n\n def compat(self, obj):\n if not obj:\n return 'None'\n elif isinstance(object, int):\n return obj\n\n return obj.encode('utf-8', 'ignore')\n" }, { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 15.5, "blob_id": "b000c5e8eefafec025ee9c135edaf7a0a707127d", "content_id": "67936c53fa693c92b09b8b944c0837d4377fb380", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 34, "license_type": "permissive", "max_line_length": 17, "num_lines": 2, "path": "/fw_session/.product.ini", "repo_name": "hplan/fw", "src_encoding": "UTF-8", "text": "[product_section]\nproduct_id = 1\n\n" } ]
44
zhouyichen/PGCN
https://github.com/zhouyichen/PGCN
ce0a07a0a6bab4e47df762a857544987ddb346e8
9011250f741b304baddcfa62dab2db29d6f0f148
05736a440f2d7e69a0769f1ab5e03488a177b4a1
refs/heads/master
2020-08-17T16:55:17.806452
2019-11-05T13:29:30
2019-11-05T13:29:30
215,689,448
0
0
null
2019-10-17T02:49:03
2019-10-16T05:29:21
2019-09-10T02:00:41
null
[ { "alpha_fraction": 0.4948537349700928, "alphanum_fraction": 0.505687952041626, "avg_line_length": 32.51818084716797, "blob_id": "f8d571a5c4d82ba249eb573372726a638f060b45", "content_id": "4e5ac18e099fa4e72ef4ddf42a9c8b1f386f10ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3692, "license_type": "no_license", "max_line_length": 102, "num_lines": 110, "path": "/generate_proposal.py", "repo_name": "zhouyichen/PGCN", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport os\nfrom random import shuffle\n\n\n\ndef generate_proposals(start_gt, end_gt, label, n_frame, alpha=5, beta=2.5, n_to_generate=100):\n duration = end_gt - start_gt\n proposals = []\n\n while n_to_generate:\n iou = np.random.beta(alpha, beta)\n not_success = True\n while not_success:\n is_start = np.random.randint(2)\n endpoint1 = np.random.randint(start_gt, end_gt)\n if is_start:\n start_ps = endpoint1\n intersection = end_gt - start_ps\n if intersection / duration < iou:\n continue\n x = (intersection - duration * iou) / iou\n end_ps = round(end_gt + x)\n if end_ps > n_frame:\n continue\n else:\n end_ps = endpoint1\n intersection = end_ps - start_gt\n x = (intersection - duration * iou) / iou\n if intersection / duration < iou:\n continue\n start_ps = round(start_gt - x)\n if start_ps < 0:\n continue\n not_success = False\n n_to_generate = n_to_generate - 1\n proposals.append([label, iou, intersection/(end_ps - start_ps), start_ps, end_ps])\n return proposals\n\n\ndef generate_proposal_file_per_video(index, video_path, gt_path, mapping, f, n_ps_per_gt):\n video = pd.read_csv(gt_path, header=None)\n video = video[video.columns[0]].values.tolist()\n n_frame = len(video)\n current_label = video[0]\n start_idx = 0\n n_gt = 0\n gt=[]\n proposals = []\n for i in range(n_frame):\n if video[i] == current_label:\n continue\n else:\n end_idx = i - 1\n label = mapping[current_label]\n\n if label != 0:\n n_gt = n_gt + 1\n gt.append([label, start_idx, end_idx])\n print(current_label, mapping[current_label], start_idx, end_idx)\n start_idx = i\n current_label = video[i]\n\n print(len(proposals))\n\n f.write(\"#%s\\n\" %index)\n f.write(video_path + \"\\n\")\n f.write(str(n_frame)+\"\\n\" + \"1\" + \"\\n\")\n f.write(str(n_gt) + \"\\n\")\n for i in range(n_gt):\n f.write(str(gt[i][0]) + \" \" + str(gt[i][1]) + \" \"+ str(gt[i][2]) + \"\\n\")\n ps = generate_proposals(start_gt=gt[i][1], end_gt=gt[i][2], label=gt[i][0], n_frame=n_frame,\n n_to_generate=n_ps_per_gt)\n proposals.extend(ps)\n shuffle(proposals)\n f.write(str(len(proposals)) + \"\\n\")\n for i in range(len(proposals)):\n f.write(str(proposals[i][0]) + \" \" + str(proposals[i][1]) + \" \" + str(proposals[i][2]) + \" \" +\n str(proposals[i][3]) + \" \" + str(proposals[i][4]) + \"\\n\")\n\n\n\n\n\n\ndef main():\n path = \"CS6101/\"\n mapping_filepath = path + \"splits/mapping_bf.txt\"\n mapping_df = pd.read_csv(mapping_filepath, header=None, sep=\" \")\n\n mapping = dict(zip(mapping_df[mapping_df.columns[1]], mapping_df[mapping_df.columns[0]]))\n print(mapping)\n\n videos = os.listdir(path + \"groundtruth\")\n print()\n print(len(videos))\n\n output_filepath = \"data/breakfast_proposal.txt\"\n f = open(output_filepath, \"w\")\n for i in range(len(videos)):\n generate_proposal_file_per_video(i, video_path= path + \"groundtruth/\" + videos[i],\n gt_path=path + \"groundtruth/\" + videos[i],\n mapping=mapping,\n f=f,\n n_ps_per_gt=100)\n f.close()\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n" } ]
1
EdgarOPG/Second-Partial-Proyect-Data-Mining
https://github.com/EdgarOPG/Second-Partial-Proyect-Data-Mining
4ea99239c51d4b60bd3b0e23eb74adb05495e5b6
438cf5ebd9acfa0b82813c900328335c476e274c
b5ae909fc066c20361c6088bd1b8d2cb5d7a564f
refs/heads/master
2021-01-19T08:46:52.589300
2017-05-11T04:37:35
2017-05-11T04:37:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6370322704315186, "alphanum_fraction": 0.6493444442749023, "avg_line_length": 27.688074111938477, "blob_id": "303be3f731acdf7d0d808c5f1120e04afbe69438", "content_id": "4ba748d217420c427dbfef12bd66d1c9ab6230c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6254, "license_type": "permissive", "max_line_length": 86, "num_lines": 218, "path": "/primera_iteracion.py", "repo_name": "EdgarOPG/Second-Partial-Proyect-Data-Mining", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Normando Ali Zubia Hernández\nThis file is created to explain the use of dimensionality reduction\nwith different tools in sklearn library.\nEvery function contained in this file belongs to a different tool.\n\"\"\"\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.feature_selection import RFE\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn import metrics\n\nimport pandas as pd\nimport numpy\n\ndef get_feacture_subset(data, *args):\n featureDic = []\n for arg in args:\n featureDic.append(arg)\n\n subset = data[featureDic]\n return subset\n\ndef attribute_subset_selection_with_trees(data):\n # import data\n X = data[:,1:-1]\n Y = numpy.asarray(data[:,-1], dtype=\"int16\")\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(X[:20]))\n print('\\n')\n print('Targets:\\n\\n' + str(Y[:20]))\n\n # Model declaration\n extra_tree = ExtraTreesClassifier()\n\n # Model training\n extra_tree.fit(X, Y)\n\n # Model information:\n print('\\nModel information:\\n')\n\n # display the relative importance of each attribute\n print('Importance of every feature:\\n' + str(extra_tree.feature_importances_))\n\n # If model was training before prefit = True\n model = SelectFromModel(extra_tree, prefit = True)\n\n # Model transformation\n new_feature_vector = model.transform(X)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(new_feature_vector[:10])\n\ndef principal_components_analysis(data, columns, n_components):\n # import data\n X = data[:,1:-1]\n Y = numpy.asarray(data[:,-1], dtype=\"int16\")\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(X[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(Y[:10]))\n\n # Model declaration\n if n_components < 1:\n pca = PCA(n_components = n_components, svd_solver = 'full')\n else:\n pca = PCA(n_components = n_components)\n\n # Model training\n pca.fit(X)\n\n # Model transformation\n new_feature_vector = pca.transform(X)\n\n # Model information:\n print('\\nModel information:\\n')\n print('Number of components elected: ' + str(pca.n_components))\n print('New feature dimension: ' + str(pca.n_components_))\n print('Variance of every feature: \\n' + str(pca.explained_variance_ratio_))\n\n # First 10 rows of new feature vector\n #print('\\nNew feature vector:\\n')\n #print(new_feature_vector[:10])\n\n #print(pd.DataFrame(pca.components_,columns=columns[1:-1]))\n\n # Print complete dictionary\n # print(pca.__dict__)\n\ndef z_score_normalization(data):\n print('----- z_score_normalization -------\\n')\n # import data\n X = data[:,1:-1]\n Y = numpy.asarray(data[:,-1], dtype=\"int16\")\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(X[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(Y[:10]))\n\n # Data standarization\n standardized_data = preprocessing.scale(X)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n\ndef min_max_scaler(data):\n print('----- min_max_scaler -------\\n')\n # import data\n X = data[:,1:-1]\n Y = numpy.asarray(data[:,-1], dtype=\"int16\")\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(X[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(Y[:10]))\n\n # Data normalization\n min_max_scaler = preprocessing.MinMaxScaler()\n\n min_max_scaler.fit(X)\n\n # Model information:\n print('\\nModel information:\\n')\n print('Data min: \\n' + str(min_max_scaler.data_min_))\n print('Data max: \\n' + str(min_max_scaler.data_max_))\n\n new_feature_vector = min_max_scaler.transform(X)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(new_feature_vector[:10])\n\ndef fill_missing_values_with_constant(data, column, constant):\n temp = data[column].fillna(constant)\n data[column] = temp\n return data\n\ndef fill_missing_values_with_mean(data, column):\n temp = data[column].fillna(data[column].mean())\n data[column] = temp\n return data\n\ndef fill_missing_values_with_mode(data, column):\n temp = data[column].fillna(data[column].mode()[0])\n data[column] = temp\n return data\n\ndef convert_data_to_numeric(data):\n numpy_data = data.values\n for i in range(len(numpy_data[0])):\n temp = numpy_data[:,i]\n dict = numpy.unique(numpy_data[:,i])\n # print(dict)\n for j in range(len(dict)):\n # print(numpy.where(numpy_data[:,i] == dict[j]))\n temp[numpy.where(numpy_data[:,i] == dict[j])] = j\n numpy_data[:,i] = temp\n return numpy_data\n\nif __name__ == '__main__':\n\n data = pd.read_csv('train.csv')\n data['LotFrontage'] = data['LotFrontage'].replace('NaN', -1, regex=False)\n\n #Outlier\n data = fill_missing_values_with_constant(data, 'MasVnrArea', 0)\n\n #Outlier\n data = fill_missing_values_with_constant(data, 'GarageYrBlt', -1)\n\n data = data.fillna('NaN')\n\n columns = data.columns\n #print(columns)\n\n data = convert_data_to_numeric(data)\n\n #z_score_normalization(data)\n min_max_scaler(data)\n\n attribute_subset_selection_with_trees(data)\n principal_components_analysis(data,columns,.9)\n\n feature_vector = data[:,1:-1]\n targets = data[:,-1]\n\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(feature_vector,\n targets,\n test_size=0.25)\n\n # Model declaration\n \"\"\"\n Parameters to select:\n criterion: \"mse\"\n max_depth: maximum depth of tree, default: None\n \"\"\"\n dec_tree_reg = DecisionTreeRegressor(criterion='mse', max_depth=7)\n dec_tree_reg.fit(data_features_train, data_targets_train)\n\n # Model evaluation\n test_data_predicted = dec_tree_reg.predict(data_features_test)\n\n error = metrics.mean_absolute_error(data_targets_test, test_data_predicted)\n\n print('Total Error: ' + str(error))\n" }, { "alpha_fraction": 0.5994884967803955, "alphanum_fraction": 0.6153452396392822, "avg_line_length": 24.389610290527344, "blob_id": "886e4407bcd61d6424b6f54b544e267cab7b838b", "content_id": "e480a97cc07c1f3e57880c8f5830dae84f30e39d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1955, "license_type": "permissive", "max_line_length": 66, "num_lines": 77, "path": "/normalization.py", "repo_name": "EdgarOPG/Second-Partial-Proyect-Data-Mining", "src_encoding": "UTF-8", "text": "\"\"\"\nAuthor: Normando Ali Zubia Hernández\n\nThis file is created to explain the use of normalization\nwith different tools in sklearn library.\n\nEvery function contained in this file belongs to a different tool.\n\"\"\"\n\nfrom sklearn import preprocessing\n\nimport pandas as pd\nimport numpy\n\ndef z_score_normalization(data):\n # import data\n X = data[:,0:-2]\n Y = numpy.asarray(data[:,-1], dtype=\"int16\")\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(X[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(Y[:10]))\n\n # Data standarization\n standardized_data = preprocessing.scale(X)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(standardized_data[:10])\n\ndef min_max_scaler(data):\n # import data\n X = data[:,0:-2]\n Y = numpy.asarray(data[:,-1], dtype=\"int16\")\n\n # First 10 rows\n print('Training Data:\\n\\n' + str(X[:10]))\n print('\\n')\n print('Targets:\\n\\n' + str(Y[:10]))\n\n # Data normalization\n min_max_scaler = preprocessing.MinMaxScaler()\n\n min_max_scaler.fit(X)\n\n # Model information:\n print('\\nModel information:\\n')\n print('Data min: ' + str(min_max_scaler.data_min_))\n print('Data max: ' + str(min_max_scaler.data_max_))\n\n new_feature_vector = min_max_scaler.transform(X)\n\n # First 10 rows of new feature vector\n print('\\nNew feature vector:\\n')\n print(new_feature_vector[:10])\n\ndef convert_data_to_numeric(data):\n numpy_data = data.values\n\n for i in range(len(numpy_data[0])):\n temp = numpy_data[:,i]\n dict = numpy.unique(numpy_data[:,i])\n # print(dict)\n for j in range(len(dict)):\n # print(numpy.where(numpy_data[:,i] == dict[j]))\n temp[numpy.where(numpy_data[:,i] == dict[j])] = j\n\n numpy_data[:,i] = temp\n\n return numpy_data\n\nif __name__ == '__main__':\n data = pd.read_csv('train.csv')\n data = convert_data_to_numeric(data)\n z_score_normalization(data)\n min_max_scaler(data)\n" }, { "alpha_fraction": 0.579365074634552, "alphanum_fraction": 0.5827664136886597, "avg_line_length": 23.5, "blob_id": "d866e5413c024e5742b9cd951aa1f3d0888fee00", "content_id": "e0afe37f5bd47f48e57bc1006570372446dddd5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "permissive", "max_line_length": 55, "num_lines": 36, "path": "/clean.py", "repo_name": "EdgarOPG/Second-Partial-Proyect-Data-Mining", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as ptl\nimport math as mt\n\ndef open_file(fileName):\n data = pd.read_csv(fileName)\n return data\n\ndef show_data_info(data):\n print(\"Number of instance:\" + str(data.shape[0]))\n print(\"Number of features:\" + str(data.shape[1]))\n print(\"------------------------------------------\")\n\n print(\"Initial instance:\\n\")\n print(data)\n\n print(\"Numerical info:\\n\")\n numerical_info = data.iloc[:, :data.shape[1]]\n print(numerical_info.describe())\n\ndef count_words(data, column):\n temp = []\n array = []\n for x in range(len(data)):\n array = data.iloc[x][column].split(' ')\n temp.append(len(array))\n data[column] = temp\n return data\n\ndef save(data):\n data.to_csv('clean.csv', index = False)\n\nif __name__ == '__main__':\n data = open_file('train.csv')\n show_data_info(data)\n #save(data);\n" }, { "alpha_fraction": 0.6340670585632324, "alphanum_fraction": 0.6443221569061279, "avg_line_length": 22.797618865966797, "blob_id": "083f7455e0a7177c917f114002c94be248b04c1f", "content_id": "364902814a32e6063425894aca868f6c8748cccf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3999, "license_type": "permissive", "max_line_length": 84, "num_lines": 168, "path": "/a.py", "repo_name": "EdgarOPG/Second-Partial-Proyect-Data-Mining", "src_encoding": "UTF-8", "text": "\"\"\"\n*This module was create for Data Mining subject in Universidad Autonóma de Chihuahua\n*Professor: M.I.C Normando Ali Zubia Hernández\n\nModule information:\nThe principal functions of this module are:\n*Create violin graphs\n*Create box-Graphs\n*Create Histograms\n\nInformation contact:\nemail: [email protected]\n\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pandas.tools.plotting import scatter_matrix\n\ndef open_file(fileName):\n '''\n This method will open a file in csv format\n :param fileName: file to open (Complete PATH)\n :return: Pandas Data Frame\n '''\n #TODO csv file validation\n\n data = pd.read_json(fileName)\n\n return data\n\ndef create_histogram(data):\n data.hist(column = 'bedrooms')\n\n plt.show()\n\ndef create_density_plot(data):\n data.plot(kind='density', subplots=True, layout=(3, 3), sharex=False)\n plt.show()\n\ndef create_whisker_plots(data):\n data.plot(kind='box', subplots=True, layout=(3, 3), sharex=False, sharey=False)\n plt.show()\n\ndef show_data_info(data):\n print(\"Number of instance: \" + str(data.shape[0]))\n print(\"Number of fetures: \" + str(data.shape[1]))\n\n print('------------------------------------------')\n\n print(\"Initial instances:\\n\")\n print(data.head(10))\n\n print(\"Numerical Information:\\n\")\n numerical_info = data.iloc[:, :data.shape[1]]\n print(numerical_info.describe())\n\ndef get_feature_subset(data, *args):\n featureDict = []\n for arg in args:\n featureDict.append(arg)\n\n subset = data[featureDict]\n\n return subset\n\ndef delete_column(data, *args):\n for arg in args:\n data = data.drop(arg, 1)\n\n return data\n\ndef delete_missing_objects(data, type):\n type = 0 if type == 'instance' else 1\n\n data = data.dropna(axis = type)\n\n return data\n\ndef replace_missing_values_with_constant(data, column, constant):\n temp = data[column].fillna(constant)\n data[column] = temp\n\n return data\n\ndef replace_missing_values_with_mean(data, column):\n temp = data[column].fillna(data[column].mean())\n data[column] = temp\n\n return data\n\ndef numero_banios_influye_precio(data):\n\n numbBath = data['bathrooms'].value_counts()\n numbBathKeys = numbBath.keys()\n\n priceArray = []\n for number in numbBathKeys:\n subset = data.loc[data['bathrooms'] == number]\n print('Numero de banios:' + str(number))\n print(subset['price'])\n priceArray.append(subset[\"price\"].mean())\n\n print(numbBathKeys)\n print(priceArray)\n\n width = .2\n plt.bar(numbBathKeys, priceArray, width, color=\"blue\")\n\n plt.ylabel('precio')\n plt.xlabel('#banios')\n plt.title('banios inlfuye precio')\n plt.xticks(np.arange(0, max(numbBathKeys), .5))\n plt.yticks(np.arange(0, 60000, 5000))\n\n\n plt.show()\n\ndef numero_habitaciones_influye_precio(data):\n\n numbHab = data['bedrooms'].value_counts()\n numbHabKeys = numbHab.keys()\n\n priceArray = []\n for number in numbHabKeys:\n subset = data.loc[data['bedrooms'] == number]\n print('Numero de habitaciones:' + str(number))\n print(subset['price'])\n priceArray.append(subset[\"price\"].mean())\n\n print(numbHabKeys)\n print(priceArray)\n\n width = .2\n plt.bar(numbHabKeys, priceArray, width, color=\"blue\")\n\n plt.ylabel('precio')\n plt.xlabel('#habitaciones')\n plt.title('Habitaciones influye precio')\n plt.xticks(np.arange(0, max(numbHabKeys), .5))\n plt.yticks(np.arange(0, 15000, 1000))\n\n\n plt.show()\n\nif __name__ == '__main__':\n filePath = \"train.json\"\n\n data = open_file(filePath)\n\n\n\n #headers = [x for x in data]\n #print(headers)\n #for head in headers:\n # if head != 'description' and head != 'features' and head != 'photos':\n # print(data[head].value_counts())\n #print(data.head)\n #show_data_info(data)\n #print(data[0:10])\n\n #numero_banios_influye_precio(data)\n numero_habitaciones_influye_precio(data)\n\n #create_histogram(data)\n #create_density_plot(data)\n #create_whisker_plots(data)\n" } ]
4
isabellaleehs/Data_Visualization
https://github.com/isabellaleehs/Data_Visualization
02f140fce716512d8def3cdd56262b037de1b3c9
91fd678f48fc39bef86a17efb573418e3f92dac6
f7b784588c260f4258361edb86ce1d719d62bac9
refs/heads/master
2021-09-01T01:28:52.194507
2017-12-24T05:10:17
2017-12-24T05:10:17
115,241,197
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8285714387893677, "alphanum_fraction": 0.8285714387893677, "avg_line_length": 34, "blob_id": "b663a9a9ec6e65ba77e98a3bc95889f199b99938", "content_id": "0b66cb08a94bfbaf74ad5acfff6152c662bc501c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "isabellaleehs/Data_Visualization", "src_encoding": "UTF-8", "text": "# Data Visualization Projects\nA collection of mini projects in Python\n" }, { "alpha_fraction": 0.5464021563529968, "alphanum_fraction": 0.5706119537353516, "avg_line_length": 28.445545196533203, "blob_id": "c1ec95c776e2a3d538e6fb48354d54a069c447b5", "content_id": "b7c97752591b960cad78ad5c5d08b6d33d25d7fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2974, "license_type": "no_license", "max_line_length": 117, "num_lines": 101, "path": "/Choropleth map/make_map.py", "repo_name": "isabellaleehs/Data_Visualization", "src_encoding": "UTF-8", "text": "# Create choropleth map\n#\n# Date: Dec 2017\n\nimport plotly as py\nimport pandas as pd\nimport pycountry\n\ndef get_data(filename):\n '''\n Loads data from file and cleans it.\n\n Inputs:\n filename: file directory\n\n Returns: a cleaned dataframe\n '''\n df = pd.read_csv(filename)\n\n # Reset header row\n df.columns = df.iloc[0] \n df = df[1:] \n\n # Rename column\n df = df.rename(index=str, columns={\"2016\": \"Estimated no. w/ HIV\"})\n\n # Remove all parenthesis and square brackets\n df['Country'] = df.Country.apply(lambda x: x.replace(' (',', ').replace(')',''))\n # Alternative to above: df['Country'] = df['Country'].str.replace(r\"\\s+\\((.*)\\)\", r\", \\1\")\n df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(r\"\\s+\\[.*\\]\",\"\")\n\n # Lower case, remove spaces between numbers, remove strings and set to 0\n df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(\" \",\"\")\n df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.strip(\"<>\")\n df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(\"Nodata\",\"\")\n\n # Modify names of countries not recognized by pycountry\n df['Country'] = df['Country'].replace('Democratic Republic of the Congo','Congo, the Democratic Republic of the')\n df['Country'] = df['Country'].replace('Republic of Korea',\"Korea, Democratic People's Republic of\")\n return df\n\n\ndef get_country_code(x):\n '''\n Finds the 3 letter alpha code for a country.\n\n Inputs:\n x: country name\n\n Returns: alpha_3 code for the country\n '''\n if pycountry.countries.lookup(x) != None:\n return pycountry.countries.lookup(x).alpha_3\n\n\n# Get and clean data\ndf = get_data('data.csv')\ndf['Code'] = df['Country'].apply(get_country_code)\n\n# Make choropleth map using data\ndata = [ dict(\n type = 'choropleth',\n locations = df['Code'],\n z = df['Estimated no. w/ HIV'],\n text = df['Country'],\n colorscale = [[0,\"#c6dbef\"],[0.2,\"#6baed6\"],[0.4,\"#4292c6\"],\\\n [0.6,\"#2171b5\"],[0.8,\"#0e5693\"],[1,\"#013e7c\"]],\n autocolorscale = False,\n reversescale = False,\n marker = dict(\n line = dict (\n color = 'rgb(180,180,180)',\n width = 0.5\n ) ),\n colorbar = dict(\n autotick = False,\n title = 'Estimated no.<br>w/ HIV'),\n ) ]\n\nlayout = dict(\n title = 'Number of people (all ages) living with HIV<br>Estimates by country<br><br>\\\n [Source:<a href=\"http://apps.who.int/gho/data/node.main.620?lang=en\"> World Health Organization</a>]',\n margin = dict(\n l=10,\n r=10,\n b=50,\n t=150,\n pad=4\n ),\n geo = dict(\n showframe = False,\n showcoastlines = False,\n projection = dict(\n type = 'Mercator'\n )\n )\n)\n\n# Display map\nfig = dict( data=data, layout=layout )\npy.offline.plot( fig, validate=False, filename='d3-world-map' )\n" } ]
2
chsoftworld/S-SEC-demo-
https://github.com/chsoftworld/S-SEC-demo-
c3bd4548d1817ccddf0f582029124a37245a4d2f
b1585e8bd0f9246877ea35209cc8dcbf71e90ebc
94a34bf072949836e51c08d9a4b6f18629f44014
refs/heads/master
2022-01-30T14:52:22.520358
2019-07-31T09:27:05
2019-07-31T09:27:05
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46599894762039185, "alphanum_fraction": 0.5666842460632324, "avg_line_length": 29.063491821289062, "blob_id": "ce01cf5e02f0676e59419c3b8cb833f9397b5750", "content_id": "01150040340177410fe80d38a00112c9f765f285", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2069, "license_type": "no_license", "max_line_length": 401, "num_lines": 63, "path": "/Stock SEC/demo.py", "repo_name": "chsoftworld/S-SEC-demo-", "src_encoding": "UTF-8", "text": "import requests\nfrom lxml import etree\nimport pymysql\n\nurl = 'http://data.10jqka.com.cn/funds/ddzz/#refCountId=db_50741cd6_397,db_509381c1_860'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;WOW64; rv:6.0) '\n 'Gecko/20100101 Firefox/6.0',\n}\n\nhtml = requests.get(url,headers=headers).text\nparse_html = etree.HTML(html)\n\nnum_list = parse_html.xpath('//tbody/tr/td[2]/a/text()')\nname_list = parse_html.xpath('//tbody/tr/td[3]/a/text()')\nstacks = []\ncount = 0\nfor i in range(len(num_list)):\n if count==20:\n break\n demo = [name_list[i],num_list[i],]\n if demo not in stacks:\n count+=1\n stacks.append(demo)\n else:\n continue\nprint(stacks)\nprint(len(stacks))\n\n\n# [['300785', 'N值得买'], ['002105', '信隆健康'], ['002453', '华软科技'], ['300167', '迪威迅'], ['600078', '澄星股份'], ['002473', '圣莱达'], ['002225', '濮耐股份'], ['000586', '汇源通信'], ['002124', '天邦股份'], ['300527', '中国应急'], ['603189', '网达软件'], ['300378', '鼎捷软件'], ['300417', '南华仪器'], ['300632', '光莆股份'], ['300424', '航新科技'], ['002915', '中欣氟材'], ['300769', '德方纳米'], ['603068', '博通集成'], ['002312', '三泰控股'], ['300253', '卫宁健康']]\ndb = pymysql.connect('localhost','root','123456','SSEC',charset='utf8')\ncursor = db.cursor()\ncount = 0\n\nfor i in stacks:\n cursor.execute('select count(id) from stacks')\n res = cursor.fetchall()\n if res[0][0] == 20:\n print('数据已满')\n break\n try:\n\n cursor.execute('insert into stacks values(Null,%s,%s)',[i[0],i[1]])\n db.commit()\n count += 1\n print(count/20*100,'%--完成')\n except Exception as e:\n print(e)\n result = input('>>r键返回')\n if result == 'r':\n db.rollback()\n break\n else:\n continue\ncursor.execute('select * from stacks')\nres = cursor.fetchall()\nprint(res)\nprint(len(res))\ncursor.close()\ndb.close()\nfor i in range(20):\n print(i//4+1,i%4+1,end=' ')\n\n\n\n" }, { "alpha_fraction": 0.4702053964138031, "alphanum_fraction": 0.5344722270965576, "avg_line_length": 30.21656036376953, "blob_id": "95f851cc57da1212a968ce905b857f285ab7a5dc", "content_id": "bc3eccf45af455a4548942f8ede7d9585fc40617", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5219, "license_type": "no_license", "max_line_length": 345, "num_lines": 157, "path": "/Stock SEC/UI.py", "repo_name": "chsoftworld/S-SEC-demo-", "src_encoding": "UTF-8", "text": "import tkinter as tk\nfrom threading import Thread\nfrom tkinter import messagebox\nimport pymysql as sql\nimport requests\nimport time\nfrom lxml import etree\nimport json\nfrom stack_detail import *\nfrom gevent import monkey # monkey 插件\nfrom queue import Queue\nimport os\n\n\n\nclass SSEC:\n \"\"\"\n 界面可视化\n \"\"\"\n def __init__(self,window):\n self.window = window\n self.table = tk.Label(self.window,bg='#2c3842')\n self.table.pack(fill='both', expand=1)\n self.image = tk.PhotoImage(file='stacks_SEG.png')\n self.db = sql.connect('localhost', 'root', '123456', 'SSEC', charset='utf8')\n self.cursor = self.db.cursor()\n\n self.index()\n def index(self):\n \"\"\"\n 主页面,默认有20枚股票\n :return:\n \"\"\"\n messagebox.showwarning(title='SSEC',message='准备获取实时数据,这会占用您几秒钟,\\n点击[ok]开始')\n self.label = tk.Label(self.table,bg='#2c3842')\n self.label.pack()\n self.cursor.execute('select * from stacks') # 从数据库提取股票数据(股票名称与股票编号)\n self.res = self.cursor.fetchall()\n count = -1\n stack_box = {}\n self.url = 'http://www.aigaogao.com/tools/action.aspx?act=apr'\n ths = []\n self.colors = {}\n for i in self.res:\n \"\"\"\n 使用多线程分别爬取20枚股票当前的涨跌状态\n \"\"\"\n name = i[1]\n number = i[2]\n t = Thread(target=self.get_color,args=(name,number))\n ths.append(t)\n t.start()\n for i in ths:\n i.join()\n for i in self.res:\n \"\"\"\n 根据当前的涨跌状态为每一枚股票上色\n \"\"\"\n count += 1\n name = i[1]\n number = i[2]\n stack_box[str(count)] = tk.Label(self.label, bg='#2c3842')\n stack_box[str(count)].grid(row=count // 4 + 1, column=count % 4 + 1, pady=6, padx=3)\n tk.Button(stack_box[str(count)], bd=1, text=name, width=10, height=2, font=('黑体', '12', 'bold'), bg=self.colors[name],\n fg='white', command=lambda num=number, name=name: self.detail(num, name)).grid(row=1, column=1)\n tk.Button(stack_box[str(count)], bd=1, text='X', bg='#f84b4c', font=('黑体', '12', 'bold'), fg='white',\n height=2).grid(row=1, column=2)\n self.entry = tk.Entry(self.table, width=30, font=('黑体', '12', 'bold'))\n self.entry.place(x=140, y=420)\n btn = tk.Button(self.table, width=20, text='搜索其他股票', fg='white', bg='#25a9e1')\n btn.place(x=420, y=420)\n\n def get_color(self,name,number):\n \"\"\"\n 每个线程爬取自己当前股票的颜色值\n :param name:\n :param number:\n :return:\n \"\"\"\n headers = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Content-Length': '11',\n 'Content-type': 'application/x-www-form-urlencoded',\n 'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; __utmt=1; s_histo=601678; __utmb=90353546.12.10.1563262167; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',\n 'Host': 'www.aigaogao.com',\n 'Origin': 'http://www.aigaogao.com',\n 'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(number),\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n 'X-Prototype-Version': '1.4.0',\n 'X-Requested-With': 'XMLHttpRequest',\n }\n\n data = {'s': str(number)}\n html = requests.post(self.url, headers=headers, data=data).text\n d = eval(html)\n num = float(d['data'][0]['change'])\n if num > 0:\n self.colors[name] = '#da7252'\n elif num == 0:\n self.colors[name] = '#747474'\n else:\n self.colors[name] = '#2db67a'\n\n\n\n\n\n\n\n\n\n\n\n def detail(self,num,name):\n\n \"\"\"\n 生成子进程,用于观察股票的走势\n :param num:\n :param name:\n :return:\n \"\"\"\n monkey.patch_all()\n pid = os.fork()\n if pid<0:\n print('子进程创建失败')\n elif pid==0:\n Details(num,name)\n else:\n while True:\n time.sleep(0.1)\n def back_to_index(self):\n \"\"\"\n 返回首页函数\n :return:\n \"\"\"\n os._exit(0) # 结束子进程\n self.label.destroy()\n self.index()\n\n def views(self):\n\n self.label = tk.Label(self.table, bg='#2c3842',image=self.image)\n tk.Button(self.table,bg='#25a9e1',command=self.back_to_index)\n\n\n\n\n\nif __name__=='__main__':\n window = tk.Tk(className='S-SEC')\n window.geometry('720x500')\n\n SSEC(window)\n window.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5254640579223633, "alphanum_fraction": 0.5764873623847961, "avg_line_length": 36.25531768798828, "blob_id": "8124f710c7f28a8a170f6fc2dd32201259e3c9c0", "content_id": "faa74736c17d4edf58a3e292c0144f5c5ffd9019", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11225, "license_type": "no_license", "max_line_length": 301, "num_lines": 282, "path": "/Stock SEC/stack_detail.py", "repo_name": "chsoftworld/S-SEC-demo-", "src_encoding": "UTF-8", "text": "# from lxml import etree\n# import requests\n# import numpy as np\n# import matplotlib.dates as md\n# import matplotlib.pyplot as mp\n# from UI import *\n# def details(num,name):\n# \"\"\"\n# 获取并绘制数据\n# :param num:\n# :return:\n# \"\"\"\n# print('start get')\n#\n# \"\"\"\n# 获取阶段\n# \"\"\"\n# url = 'http://www.aigaogao.com/tools/history.html?s={}'.format(num)\n# headers = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n# 'Accept-Encoding': 'gzip, deflate',\n# 'Accept-Language': 'zh-CN,zh;q=0.9',\n# 'Cache-Control': 'max-age=0',\n# 'Connection': 'keep-alive',\n# 'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',\n# 'Host': 'www.aigaogao.com',\n# 'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(num),\n# 'Upgrade-Insecure-Requests': '1',\n# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n#\n# }\n# print('web start get')\n# html = requests.get(url,headers).text\n# print('web get over')\n# dates =[]\n# opening_prices=[]\n# hightest_prices=[]\n# lowerest_price=[]\n# closing_prices=[]\n# volumns = []\n# for i in range(90,0,-1):\n# res = etree.HTML(html).xpath('//div[@id=\"ctl16_contentdiv\"]//tr[{}]//text()'.format(i+1))\n#\n# str_list = res[0].split('/')\n# date = '-'.join([str_list[-1],str_list[0],str_list[1]])\n# dates.append(date)\n# opening_prices.append(float(res[1].replace(',','')))\n# hightest_prices.append(float(res[2].replace(',','')))\n# lowerest_price.append(float(res[3].replace(',','')))\n# closing_prices.append(float(res[4].replace(',','')))\n# volumns.append(float(res[5].replace(',','')))\n# dates = np.array(dates,dtype='M8[D]')\n# opening_prices = np.array(opening_prices)\n# hightest_prices=np.array(hightest_prices)\n# lowerest_price=np.array(lowerest_price)\n# closing_prices=np.array(closing_prices)\n# volumns = np.array(volumns)\n# print('start draw')\n# \"\"\"\n# 绘制阶段\n# \"\"\"\n# mp.figure('S-SEC', facecolor='lightgray') # 设定窗口标题,窗口背景色\n# mp.title(num, fontsize=18) # 设定窗口内标题\n#\n# mp.xlabel('Date', fontsize=14) # 设定x轴标题\n# mp.ylabel('Price', fontsize=14) # 设定y轴标题\n# mp.grid(linestyle=':') # 设定图标网格线\n# mp.tick_params(labelsize=10) # 设定刻度参数文字大小\n# # 设置可定定位器\n# ax = mp.gca() # 获取当前坐标轴\n# maloc = md.WeekdayLocator(byweekday=md.MO) # 每周一 一个主刻度\n# miloc = md.DayLocator() # 每天一个子刻度\n# ax.xaxis.set_major_locator(maloc)\n# # 设置主刻度日期的格式\n# ax.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))\n#\n# ax.xaxis.set_minor_locator(miloc)\n# dates = dates.astype(md.datetime.datetime) # 转日期格式\n#\n# # 收盘走势线\n# mp.plot(dates, closing_prices, label='Closing_prices', linewidth=2, color='black', alpha=1.0)\n# # 绘制蜡烛图\n# # 调整颜色\n# rise = closing_prices >= opening_prices\n# color = [('white' if x else 'green') for x in rise]\n# ecolor = [('red' if x else 'green') for x in rise]\n# # 绘制实体\n# heights = closing_prices - opening_prices\n# mp.bar(dates, heights, 0.8, opening_prices, color=color, edgecolor=ecolor, align='center',zorder=-4)\n# # 绘制影线\n# mp.vlines(dates,lowerest_price, hightest_prices, color=ecolor, zorder=-5)\n#\n# # 实现加权卷积\n# # 通过指数函数,寻求一组卷积核\n# kernel = np.exp(np.linspace(-1, 0, 5))\n# kernel = kernel[::-1]\n#\n# # 绘制5日均线-加权卷积运算\n# sma53 = np.convolve(closing_prices, kernel, 'valid') / kernel.sum()\n# mp.plot(dates[4:], sma53, label='SMA-5days+', linewidth=2, color='gray', alpha=0.7, zorder=-4)\n# # print('sma5+:',sma53[-5:])\n# #  求5日布林带\n# stds = np.zeros(sma53.size)\n# for i in range(stds.size):\n# stds[i] = closing_prices[i:i + 5].std()\n# lowers = sma53 - 2 * stds\n# mp.plot(dates[4:], lowers, label='lowers', linewidth=2, color='gray', alpha=0.2)\n# # print('lowers:',lowers[-5:])\n# uppers = sma53 + 2 * stds\n# mp.plot(dates[4:], uppers, label='uppers', linewidth=2, color='gray', alpha=0.2)\n# # print('uppers:',uppers[-5:])\n# mp.fill_between(dates[4:], uppers, lowers, uppers > lowers, color='gray', alpha=0.2, zorder=-1)\n#\n# mp.legend(loc='lower right', fontsize=10, )\n# mp.gcf().autofmt_xdate() # 自动斜化\n#\n# mp.show()\n#\n#\n# if __name__=='__main__':\n# details(600745,'实验')\n#\n#\n#\n\nfrom lxml import etree\nimport requests\nimport numpy as np\nimport matplotlib.dates as md\nimport matplotlib.pyplot as mp\nfrom UI import *\nclass Details:\n def __init__(self,num,name):\n self.num = num\n self.name = name\n self.dates = []\n self.opening_prices = []\n self.hightest_prices = []\n self.lowerest_price = []\n self.closing_prices = []\n self.volumns = []\n self.plan = 0\n self.details()\n\n def details(self):\n \"\"\"\n 获取并绘制数据\n :param num:\n :return:\n \"\"\"\n print('start get')\n\n \"\"\"\n 获取阶段\n \"\"\"\n url = 'http://www.aigaogao.com/tools/history.html?s={}'.format(self.num)\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',\n 'Host': 'www.aigaogao.com',\n 'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(self.num),\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n\n }\n print('web start get')\n self.html = requests.get(url,headers).text\n print('web get over')\n\n\n self.jobs = []\n for i in range(90,0,-1):\n\n tt = Thread(target=self.get_msg,args=(i,))\n self.jobs.append(tt)\n tt.setDaemon(True)\n tt.start()\n\n\n # for job in self.jobs:\n # job.join()\n # print('回收')\n self.shows()\n def get_msg(self,i):\n \"\"\"\n 网页源码中获取股票日期,低开高收参数值\n :param i:\n :return:\n \"\"\"\n res = etree.HTML(self.html).xpath('//div[@id=\"ctl16_contentdiv\"]//tr[{}]//text()'.format(i+1))\n\n str_list = res[0].split('/')\n date = '-'.join([str_list[-1],str_list[0],str_list[1]])\n self.dates.append(date)\n self.opening_prices.append(float(res[1].replace(',','')))\n self.hightest_prices.append(float(res[2].replace(',','')))\n self.lowerest_price.append(float(res[3].replace(',','')))\n self.closing_prices.append(float(res[4].replace(',','')))\n self.volumns.append(float(res[5].replace(',','')))\n self.plan+=1\n print('进度:%.2f'%(self.plan/90*100)+'%')\n return\n\n\n\n\n\n def shows(self):\n \"\"\"\n 绘制阶段\n \"\"\"\n self.dates = np.array(self.dates, dtype='M8[D]')\n self.opening_prices = np.array(self.opening_prices)\n self.hightest_prices = np.array(self.hightest_prices)\n self.lowerest_price = np.array(self.lowerest_price)\n self.closing_prices = np.array(self.closing_prices)\n self.volumns = np.array(self.volumns)\n print('start draw')\n mp.figure('S-SEC', facecolor='lightgray') # 设定窗口标题,窗口背景色\n mp.title(self.num, fontsize=18) # 设定窗口内标题\n\n mp.xlabel('Date', fontsize=14) # 设定x轴标题\n mp.ylabel('Price', fontsize=14) # 设定y轴标题\n mp.grid(linestyle=':') # 设定图标网格线\n mp.tick_params(labelsize=10) # 设定刻度参数文字大小\n # 设置可定定位器\n ax = mp.gca() # 获取当前坐标轴\n maloc = md.WeekdayLocator(byweekday=md.MO) # 每周一 一个主刻度\n miloc = md.DayLocator() # 每天一个子刻度\n ax.xaxis.set_major_locator(maloc)\n # 设置主刻度日期的格式\n ax.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))\n\n ax.xaxis.set_minor_locator(miloc)\n dates = self.dates.astype(md.datetime.datetime) # 转日期格式\n\n # 收盘走势线\n mp.plot(dates, self.closing_prices, label='Closing_prices', linewidth=2, color='black', alpha=1.0)\n # 绘制蜡烛图\n # 调整颜色\n rise = self.closing_prices >= self.opening_prices\n color = [('white' if x else 'green') for x in rise]\n ecolor = [('red' if x else 'green') for x in rise]\n # 绘制实体\n heights = self.closing_prices - self.opening_prices\n mp.bar(dates, heights, 0.8, self.opening_prices, color=color, edgecolor=ecolor, align='center',zorder=-4)\n # 绘制影线\n mp.vlines(dates,self.lowerest_price, self.hightest_prices, color=ecolor, zorder=-5)\n\n # 实现加权卷积\n # 通过指数函数,寻求一组卷积核\n kernel = np.exp(np.linspace(-1, 0, 5))\n kernel = kernel[::-1]\n\n # 绘制5日均线-加权卷积运算\n sma53 = np.convolve(self.closing_prices, kernel, 'valid') / kernel.sum()\n mp.plot(dates[4:], sma53, label='SMA-5days+', linewidth=2, color='gray', alpha=0.7, zorder=-4)\n # print('sma5+:',sma53[-5:])\n #  求5日布林带\n stds = np.zeros(sma53.size)\n for i in range(stds.size):\n stds[i] = self.closing_prices[i:i + 5].std()\n lowers = sma53 - 2 * stds\n mp.plot(dates[4:], lowers, label='lowers', linewidth=2, color='gray', alpha=0.2)\n # print('lowers:',lowers[-5:])\n uppers = sma53 + 2 * stds\n mp.plot(dates[4:], uppers, label='uppers', linewidth=2, color='gray', alpha=0.2)\n # print('uppers:',uppers[-5:])\n mp.fill_between(dates[4:], uppers, lowers, uppers > lowers, color='gray', alpha=0.2, zorder=-1)\n\n mp.legend(loc='lower right', fontsize=10, )\n mp.gcf().autofmt_xdate() # 自动斜化\n\n mp.show()\n\n\nif __name__=='__main__':\n Details(600745,'实验')" }, { "alpha_fraction": 0.5208333134651184, "alphanum_fraction": 0.75, "avg_line_length": 53.46666717529297, "blob_id": "974801db11b6b692fcd96e10d66c7beda2aa1262", "content_id": "8d23c7e55253fbfaf38c24838f671147f36e45c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 288, "num_lines": 15, "path": "/Stock SEC/dictset.py", "repo_name": "chsoftworld/S-SEC-demo-", "src_encoding": "UTF-8", "text": "dic = {\n\n'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n'Accept-Encoding':'gzip, deflate',\n'Accept-Language':'zh-CN,zh;q=0.9',\n'Cache-Control':'max-age=0',\n'Connection':'keep-alive',\n'Cookie':'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',\n'Host':'www.aigaogao.com',\n'Referer':'http://www.aigaogao.com/tools/history.html?s=604675',\n'Upgrade-Insecure-Requests':'1',\n'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n\n\n}" } ]
4
okdshin/mutelang
https://github.com/okdshin/mutelang
120689684e389e33820fc213059d284baa1c8622
e49e47cab24b9202cb41b8f4fed7bc7a674911c2
779327c11b431e51c4d690ce40efcedd010076f4
refs/heads/master
2023-08-20T00:57:34.730783
2019-11-08T13:06:18
2019-11-08T13:06:18
202,376,020
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6630824208259583, "alphanum_fraction": 0.6810035705566406, "avg_line_length": 20.461538314819336, "blob_id": "804c2fd490231831f484bc9e15c671e5befb15e1", "content_id": "26d5d35fdc6f5abc75a12f7be646147929aa9de7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 48, "num_lines": 13, "path": "/midi2wav.py", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "import pretty_midi\nfrom scipy.io import wavfile\n\n\ndef main(midi_filename, wav_filename):\n midi = pretty_midi.PrettyMIDI(midi_filename)\n audio = midi.fluidsynth()\n wavfile.write(wav_filename, 44100, audio)\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire(main)\n" }, { "alpha_fraction": 0.4268125891685486, "alphanum_fraction": 0.43296852707862854, "avg_line_length": 32.609195709228516, "blob_id": "cef15b6bfe3cb378e05c7bd442a23e00c87cf318", "content_id": "c916388c50f6a09f02130c1969fa9294fe2fef6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5848, "license_type": "no_license", "max_line_length": 100, "num_lines": 174, "path": "/mutec.py", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "import subprocess\n\n\nclass EOL(Exception):\n pass\n\n\nclass Parser:\n def __init__(self, filename, code):\n self.filename = filename\n self.cur = 0\n self.code = code\n self.look_ahead = code[0]\n\n self.bpm = 120\n self.velocity = 90\n self.instrument = 'Cello'\n self.note_code = ''\n self.note_code_reset_flag = False\n\n self.middle_midi_list = []\n\n self.drum_mode = False\n self.instruments = []\n\n def _match(self, x):\n if self.look_ahead == x:\n self._consume()\n else:\n raise RuntimeError(\"not match {}\".format(x))\n\n def _match_str(self, xs):\n for x in xs:\n self._match(x)\n\n def _ignore_ws(self):\n while self.look_ahead in ' \\t\\n':\n self._consume()\n\n def _ws(self):\n if self.look_ahead not in ' \\t\\n':\n raise RuntimeError(\"not match white space\")\n self._ignore_ws()\n\n def _int(self):\n int_str = ''\n while self.look_ahead in '0123456789':\n int_str += self.look_ahead\n self._consume()\n return int(int_str)\n\n def _str(self):\n s = ''\n while self.look_ahead.isalpha() or self.look_ahead in \"0123456789\":\n s += self.look_ahead\n self._consume()\n return s\n\n def _consume(self):\n self.cur += 1\n if len(self.code) == self.cur:\n raise EOL\n self.look_ahead = self.code[self.cur]\n\n def process_note_code(self):\n print('note code', self.note_code)\n filename = '{0}-{2}-{1}.mid'.format(self.filename, self.instrument,\n len(self.middle_midi_list))\n print(\"process\", self.instrument)\n if '-' in self.instrument:\n subprocess.call(\n 'echo \\'{code}\\' | python3 drum_seq.py \\'[{insts}]\\' {bpm} {filename} {velocity}'\n .format(code=self.note_code,\n insts=','.join(['\"' + s + '\"' for s in self.instruments]),\n bpm=self.bpm,\n velocity=self.velocity,\n filename=filename),\n shell=True)\n else:\n subprocess.call(\n 'echo \\'{code}\\' | python3 chord_bass_seq.py \\'{inst}\\' {bpm} {filename} {velocity}'\n .format(code=self.note_code,\n inst=self.instrument,\n bpm=self.bpm,\n velocity=self.velocity,\n filename=filename),\n shell=True)\n self.middle_midi_list.append(filename)\n self.note_code = ''\n\n def parse(self):\n try:\n while True:\n self._ignore_ws()\n if self.look_ahead == 'b':\n self._match_str('bpm')\n self._ignore_ws()\n self._match('=')\n self._ignore_ws()\n self.bpm = self._int()\n print('bpm', self.bpm)\n self._ws()\n elif self.look_ahead == 'v':\n self._match_str('velocity')\n self._ignore_ws()\n self._match('=')\n self._ignore_ws()\n self.velocity = self._int()\n print('velocity', self.velocity)\n self._ws()\n elif self.look_ahead == 'i':\n if self.note_code != '':\n self.process_note_code()\n self._match_str('instrument')\n self._ignore_ws()\n self._match('=')\n self._ignore_ws()\n if self.drum_mode:\n self._match('{')\n self._ignore_ws()\n instruments = []\n instruments.append(self._str())\n self._ignore_ws()\n while self.look_ahead == ',':\n self._consume()\n self._ignore_ws()\n instruments.append(self._str())\n self._ignore_ws()\n self._match('}')\n self.instruments = instruments\n self.instrument = '-'.join(instruments)\n print('instrument detected', self.instrument)\n else:\n self.instrument = self._str()\n print('instrument detected', self.instrument)\n self._ws()\n elif self.look_ahead == 'd':\n print()\n print(self.code[self.cur:])\n self._match_str('drum')\n self.drum_mode = True\n print(\"drum_mode on\")\n elif self.look_ahead == '|':\n print('note code detect')\n while self.look_ahead != '\\n':\n self.note_code += self.look_ahead\n self._consume()\n except EOL:\n print(\"end\")\n if self.note_code != '':\n print('note code', self.note_code)\n self.process_note_code()\n\n print(\"stack\", self.middle_midi_list)\n subprocess.call('python3 stack_midi.py \\'[{0}]\\' {1}.mid'.format(\n ','.join(['\"' + s + '\"' for s in self.middle_midi_list]),\n self.filename),\n shell=True)\n\n\ndef main(filename):\n with open(filename, 'r') as f:\n code = f.read()\n parser = Parser(filename, code)\n try:\n parser.parse()\n except RuntimeError as e:\n print('\"{}\"'.format(parser.look_ahead))\n raise e\n\n\nif __name__ == \"__main__\":\n import fire\n fire.Fire(main)\n" }, { "alpha_fraction": 0.4613312780857086, "alphanum_fraction": 0.4715275168418884, "avg_line_length": 33.4238395690918, "blob_id": "b21231bf89136aa6855bade8808947f4a024afc4", "content_id": "f3d943e8b80740389677f955fef92072e9f015fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5198, "license_type": "no_license", "max_line_length": 78, "num_lines": 151, "path": "/chord_bass_seq.py", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "import sys\nimport math\nimport pretty_midi\n\n\nclass Note:\n def __init__(self, base: str, accidental: str, octave_num: int):\n self.base = base\n self.accidental = accidental\n self.octave_num = octave_num\n\n def name(self):\n return self.base + self.accidental + str(self.octave_num)\n\n def __repr__(self):\n return self.name()\n\n\nclass MidiGenerator:\n def __init__(self, instrument, bpm, velocity):\n self.dt4 = int((60 * 1000) / bpm)\n self.t = 0\n self.velocity = velocity\n\n program = pretty_midi.instrument_name_to_program(instrument)\n self.inst = pretty_midi.Instrument(program=program)\n\n def append_rest(self, rest_type):\n dt = self.dt4 * 2**(2 - math.log2(rest_type))\n self.t += dt\n\n def append_note(self, note_type, note_list):\n dt = self.dt4 * 2**(2 - math.log2(note_type))\n print(note_list, dt)\n for note in note_list:\n note_number = pretty_midi.note_name_to_number(note.name())\n note = pretty_midi.Note(velocity=self.velocity,\n pitch=note_number,\n start=self.t/1000,\n end=(self.t + dt)/1000)\n self.inst.notes.append(note)\n self.t += dt\n\n def finish_bar(self):\n left = self.t % (4*self.dt4)\n if left != 0:\n self.t += left\n\n def write(self, filename):\n midi = pretty_midi.PrettyMIDI()\n midi.instruments.append(self.inst)\n midi.write(filename)\n\n\nclass EOL(Exception):\n pass\n\n\nclass Parser:\n def __init__(self, midi_gen, code):\n self.cur = 0\n self.midi_gen = midi_gen\n self.code = code\n self.look_ahead = code[0]\n self.note_list = []\n self.note_list_reset_flag = False\n self.last_note_base = 'c'\n self.last_octave = 3\n\n def _match(self, x):\n if self.look_ahead == x:\n self._consume()\n else:\n raise RuntimeError(\"not match {}\".format(x))\n\n def _consume(self):\n self.cur += 1\n if len(self.code) == self.cur:\n raise EOL\n self.look_ahead = self.code[self.cur]\n\n def parse(self):\n try:\n while True:\n if self.look_ahead == '|':\n print('finish bar')\n self.midi_gen.finish_bar()\n self._consume()\n elif self.look_ahead in (' ', '\\t', '\\n'):\n print('ignore')\n self._consume()\n elif self.look_ahead in \"abcdefg\":\n print('set note', self.look_ahead)\n if self.note_list_reset_flag:\n self.note_list = []\n self.note_list_reset_flag = False\n note_base = self.look_ahead\n self._consume()\n if self.look_ahead in \"!#\":\n accidental = self.look_ahead\n self._consume()\n else:\n accidental = ''\n if self.look_ahead in \"0123456789\":\n octave = int(self.look_ahead)\n self._consume()\n else:\n octave = int(self.last_octave)\n if (ord(self.last_note_base) - ord(note_base)) > 0:\n print(\"+1 octave\")\n octave += 1\n self.note_list.append(\n Note(note_base.capitalize(), accidental, octave))\n self.last_note_base = note_base\n self.last_octave = octave\n elif self.look_ahead in \".*\":\n print('rest')\n if self.look_ahead == '.':\n self.midi_gen.append_rest(16)\n elif self.look_ahead == '*':\n self.midi_gen.append_rest(4)\n self._consume()\n elif self.look_ahead in \"ihqox\":\n self.note_list_reset_flag = True\n if self.look_ahead == 'i':\n self.midi_gen.append_note(1, self.note_list)\n elif self.look_ahead == 'h':\n self.midi_gen.append_note(2, self.note_list)\n elif self.look_ahead == 'q':\n self.midi_gen.append_note(4, self.note_list)\n elif self.look_ahead == 'o':\n self.midi_gen.append_note(8, self.note_list)\n elif self.look_ahead == 'x':\n self.midi_gen.append_note(16, self.note_list)\n self._consume()\n else:\n raise RuntimeError(\"invalid charactor: \", self.look_ahead)\n except EOL:\n print(\"end\")\n\n\ndef main(instrument: str, bpm: int, filename: str, velocity: int):\n midi_gen = MidiGenerator(instrument, bpm, velocity)\n parser = Parser(midi_gen, sys.stdin.read())\n parser.parse()\n midi_gen.write(filename)\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire(main)\n" }, { "alpha_fraction": 0.6723646521568298, "alphanum_fraction": 0.6723646521568298, "avg_line_length": 24.071428298950195, "blob_id": "159c30c628cbe97fff70033484d173ab8e3a0a69", "content_id": "fd1989e9bafe1f1610ec521941abab0d3b0f009f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 57, "num_lines": 14, "path": "/stack_midi.py", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "import pretty_midi\n\n\ndef main(src_filename_list, dst_filename):\n dst_midi = pretty_midi.PrettyMIDI()\n for filename in src_filename_list:\n src_midi = pretty_midi.PrettyMIDI(filename)\n dst_midi.instruments.extend(src_midi.instruments)\n dst_midi.write(dst_filename)\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire(main)\n" }, { "alpha_fraction": 0.40697672963142395, "alphanum_fraction": 0.6395348906517029, "avg_line_length": 13.333333015441895, "blob_id": "6bb811bed810abd34a05450a85fc9b141e2c7ac5", "content_id": "e171b9b3f3cfb0515743b81e89ce6b80520ed788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 86, "license_type": "no_license", "max_line_length": 18, "num_lines": 6, "path": "/requirements.txt", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "fire==0.2.1\nmido==1.2.9\nnumpy==1.17.0\npretty-midi==0.2.8\nsix==1.12.0\ntermcolor==1.1.0\n" }, { "alpha_fraction": 0.4744821786880493, "alphanum_fraction": 0.4853726327419281, "avg_line_length": 32.212764739990234, "blob_id": "2144c11f040785bef60a2e1bef0576145d5b8ca1", "content_id": "f6be6d75aedf65bdc386fa231fde4479de1c2272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4683, "license_type": "no_license", "max_line_length": 73, "num_lines": 141, "path": "/drum_seq.py", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "import sys\nimport math\nimport pretty_midi\n\n\nclass Note:\n def __init__(self, base: str, accidental: str, octave_num: int):\n self.base = base\n self.accidental = accidental\n self.octave_num = octave_num\n\n def name(self):\n return self.base + self.accidental + str(self.octave_num)\n\n def __repr__(self):\n return self.name()\n\n\nclass MidiGenerator:\n def __init__(self, instrument_list, bpm, velocity):\n self.dt4 = int((60 * 1000) / bpm)\n self.t = 0\n self.velocity = velocity\n self.instrument_list = instrument_list\n\n program = 20 #pretty_midi.instrument_name_to_program(instrument)\n self.inst = pretty_midi.Instrument(program=program, is_drum=True)\n\n def append_rest(self, rest_type):\n dt = self.dt4 * 2**(2 - math.log2(rest_type))\n self.t += dt\n\n def append_note(self, note_type, index_list):\n dt = self.dt4 * 2**(2 - math.log2(note_type))\n print(index_list, dt)\n for index in index_list:\n note_number = pretty_midi.drum_name_to_note_number(\n self.instrument_list[index])\n note = pretty_midi.Note(velocity=self.velocity,\n pitch=note_number,\n start=self.t / 1000,\n end=(self.t + dt) / 1000)\n self.inst.notes.append(note)\n self.t += dt\n\n def finish_bar(self):\n left = self.t % (4 * self.dt4)\n if left != 0:\n self.t += left\n\n def write(self, filename):\n midi = pretty_midi.PrettyMIDI()\n midi.instruments.append(self.inst)\n midi.write(filename)\n\n\nclass EOL(Exception):\n pass\n\n\nclass Parser:\n def __init__(self, midi_gen, code):\n self.cur = 0\n self.midi_gen = midi_gen\n self.code = code\n self.look_ahead = code[0]\n self.index_list = []\n self.index_list_reset_flag = False\n self.last_index = 'c'\n\n def _match(self, x):\n if self.look_ahead == x:\n self._consume()\n else:\n raise RuntimeError(\"not match {}\".format(x))\n\n def _consume(self):\n self.cur += 1\n if len(self.code) == self.cur:\n raise EOL\n self.look_ahead = self.code[self.cur]\n\n def parse(self):\n try:\n while True:\n if self.look_ahead == ';':\n print('end')\n return\n elif self.look_ahead == '|':\n print('finish bar')\n self.midi_gen.finish_bar()\n self._consume()\n elif self.look_ahead in (' ', '\\t', '\\n'):\n print('ignore')\n self._consume()\n elif self.look_ahead in \"0123456789\":\n print('set index', self.look_ahead)\n if self.index_list_reset_flag:\n self.index_list = []\n self.index_list_reset_flag = False\n index = int(self.look_ahead)\n self._consume()\n self.index_list.append(index)\n self.last_index = index\n elif self.look_ahead in \".*\":\n print('rest')\n if self.look_ahead == '.':\n self.midi_gen.append_rest(16)\n elif self.look_ahead == '*':\n self.midi_gen.append_rest(4)\n self._consume()\n elif self.look_ahead in \"ihqox\":\n self.index_list_reset_flag = True\n if self.look_ahead == 'i':\n self.midi_gen.append_note(1, self.index_list)\n elif self.look_ahead == 'h':\n self.midi_gen.append_note(2, self.index_list)\n elif self.look_ahead == 'q':\n self.midi_gen.append_note(4, self.index_list)\n elif self.look_ahead == 'o':\n self.midi_gen.append_note(8, self.index_list)\n elif self.look_ahead == 'x':\n self.midi_gen.append_note(16, self.index_list)\n self._consume()\n else:\n print(self.look_ahead)\n raise\n except EOL:\n print(\"end\")\n\n\ndef main(instrument_list: str, bpm: int, filename: str, velocity: int):\n midi_gen = MidiGenerator(instrument_list, bpm, velocity)\n parser = Parser(midi_gen, sys.stdin.read())\n parser.parse()\n midi_gen.write(filename)\n\n\nif __name__ == '__main__':\n import fire\n fire.Fire(main)\n" }, { "alpha_fraction": 0.6698337197303772, "alphanum_fraction": 0.6912114024162292, "avg_line_length": 11.630000114440918, "blob_id": "82336a054019c0f8dedb80e5341d6bd54ac7f5fd", "content_id": "104790135f20ed91d6ba3ebacfda2ebd3753acca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2181, "license_type": "no_license", "max_line_length": 87, "num_lines": 100, "path": "/README.md", "repo_name": "okdshin/mutelang", "src_encoding": "UTF-8", "text": "# Mutelang\n\nMutelangは音楽作成用の言語です.ソースコードからmidiファイルを生成することができます.\n\n## セットアップとサンプル実行\n\n```python\npip install -r requirements.txt\npython mutec.py test.mu\n```\n\n# Muteファイルの書き方\n\n## 非ドラムパートの書き方\n\n### 曲のスピード(bpm)を設定する\n\n`bpm = <num>`と書いてbpmを設定します.\n\n```\nbpm = 80\n```\n\n### 楽器(instrument)を設定する\n\n`instrument = <name>`と書いてinstrumentを設定します.\n\n```\ninstrument = AcousticBass\n```\n\n### 音の大きさ(velocity)を設定する\n\n`velocity = <num>`と書いてvelocityを設定します.\n\n```\nvelocity = 80\n```\n\n### 楽譜を書く\n(([a-g]|[a-g][!#])[0-9])\\*[ihqox]|[.\\*]\n\na-fは音階を表します.\n\n!はフラット,#はシャープを表します.\n\n0-9はオクターブを表します.\n\nihqoxは以下の通りです.\n\n- i:1分音符(*i*dentical)\n- h:2分音符(*h*alf)\n- q:4分音符(*q*uarter)\n- o:8分音符(*o*cta)\n- x:16分音符(he*x*)\n\n音階・オクターブは省略可能です.省略した場合は最後に設定した音階・オクターブが設定されます.\n\n.は16分休符,\\*は4分休符です.\n\n```\n| f3a3c3qqoo.xo |\n```\n\n## ドラムパートの書き方\n\n### ドラムモードに切り替える\n\n`drum`と書いた行以降はドラムモードになります.\n\n```\ndrum\n```\n\n### 打楽器のリストを設定する\n\nドラムモードでは楽器を複数設定できます.\n\n```\ninstrument = {BassDrum1, AcousticSnare, SplashCymbal}\n```\n\n### 音の大きさ(velocity)を設定する\n\n`velocity = <num>`と書いてvelocityを設定します.\n\n```\nvelocity = 80\n```\n\n### 楽譜を書く\n\nTODO\n\n# mutecの処理フロー\n\n1. mutecがmuteファイルをパースして通常はinstrumentごとに`chord_bass_seq`を呼び出す.drumモード時には`drum_seq`を呼び出す.\n1. `chord_bass_seq`と`drum_seq`はmidiファイルを生成する.\n1. 最後にmutecは`stack_midi`を呼び出す.\n1. `stack_midi`はmidiファイルを束ねてひとつのmidiファイルを生成する.\n" } ]
7
CNXTEoE/randomorg-python
https://github.com/CNXTEoE/randomorg-python
2ba6cdbf3d270fd870464d5203706dd805ec24dc
c706480b6a5945c42148eed6153e02fd4e1a3464
9ebaab1c58681e1c9ac760a6551259e804ff39be
refs/heads/master
2017-06-23T17:51:40.814327
2016-10-01T18:20:15
2016-10-01T18:20:15
83,178,003
1
0
null
2017-02-26T02:43:13
2016-10-01T18:19:39
2016-10-01T18:20:19
null
[ { "alpha_fraction": 0.6555023789405823, "alphanum_fraction": 0.6746411323547363, "avg_line_length": 25.125, "blob_id": "3cc6c4f2a795b3f23e1045c64c5ba94bb4e70121", "content_id": "4843a7facec472801fb8072fda8e16ac828756f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/randomorg.py", "repo_name": "CNXTEoE/randomorg-python", "src_encoding": "UTF-8", "text": "import requests\n\nurl = 'https://www.random.org/cgi-bin/randbyte/?nbytes=16384&format=f'\n\n\ndef get_random_bytes():\n response = requests.get(url)\n if 'You have used your quota' in response.text or response.status_code != 200:\n raise Exception('Done!')\n\n random_bytes = response.content\n with open('random.bin', 'ab') as outfile:\n outfile.write(random_bytes)\n\nwhile True:\n get_random_bytes()\n" } ]
1
KagenLH/forme-app
https://github.com/KagenLH/forme-app
80b7859b59869f3feec58878433b261a36369279
2da4f650dbd5ee43c45663ae3f0a03261a1aefcc
8a74ae0af37833d871408e708eb9eff6f4a59084
refs/heads/main
2023-08-11T01:29:15.236479
2021-10-05T20:16:31
2021-10-05T20:16:31
394,454,162
12
2
null
2021-08-09T22:17:55
2021-09-24T00:32:04
2021-10-05T20:16:31
CSS
[ { "alpha_fraction": 0.7196531891822815, "alphanum_fraction": 0.7196531891822815, "avg_line_length": 23.785715103149414, "blob_id": "384107cd4ce67def469580707c44843dfa809f64", "content_id": "9fe2d85a19e75aee11a023c7c9bbecda19ac306e", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 346, "license_type": "no_license", "max_line_length": 61, "num_lines": 14, "path": "/react-app/src/components/EditForm/EditFormContainer/EditFormContainer.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import styles from \"./EditFormContainer.module.css\";\nimport TopBar from \"../../CreateForm/FormContainer/TopBar\";\nimport EditFormEngine from \"./EditFormEngine/EditFormEngine\";\n\nconst EditFormContainer = () => {\n\treturn (\n\t\t<div className={styles.form_wrapper}>\n\t\t\t<TopBar />\n\t\t\t<EditFormEngine />\n\t\t</div>\n\t);\n};\n\nexport default EditFormContainer;" }, { "alpha_fraction": 0.6682242751121521, "alphanum_fraction": 0.6682242751121521, "avg_line_length": 20.399999618530273, "blob_id": "bcca4badd485d798892d036658430f1dbcc00e65", "content_id": "5287cb82c179629cc8bd71bc3b7a40c4a4af636b", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 214, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/react-app/src/components/auth/AuthHeader/index.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import styles from \"./AuthHeader.module.css\";\nconst AuthHeader = () => {\n\treturn (\n\t\t<div className={styles.header_wrapper}>\n\t\t\t<div className={styles.logo}>FORMe!</div>\n\t\t</div>\n\t);\n};\n\nexport default AuthHeader;\n" }, { "alpha_fraction": 0.7062706351280212, "alphanum_fraction": 0.7128713130950928, "avg_line_length": 39.400001525878906, "blob_id": "b9141b723ec4f866eafe7906b0013861c58ef0ec", "content_id": "4c06da91cea0a3c124a90c1df928d1de625c139e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 149, "num_lines": 30, "path": "/app/forms/signup_form.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField\nfrom wtforms.validators import Email, ValidationError, InputRequired, Length, EqualTo\nfrom app.models import User\n\n\ndef user_exists(form, field):\n # Checking if user exists\n email = field.data\n user = User.query.filter(User.email == email).first()\n if user:\n raise ValidationError('Email address is already in use.')\n\n\ndef username_exists(form, field):\n # Checking if username is already in use\n username = field.data\n user = User.query.filter(User.username == username).first()\n if user:\n raise ValidationError('Username is already in use.')\n\n\nclass SignUpForm(FlaskForm):\n username = StringField(\n 'username', validators=[InputRequired(message='Input Required'), Length(max=40, message='Must be less than 40 characters'), username_exists])\n email = StringField('email', validators=[InputRequired(), Length(\n max=40, message='Must be less than 40 characters'), Email(message='Invalid'), user_exists])\n password = PasswordField('password', validators=[\n InputRequired(), EqualTo('confirm', message='Passwords must match')])\n confirm = PasswordField('confirm')\n" }, { "alpha_fraction": 0.6354066729545593, "alphanum_fraction": 0.662200927734375, "avg_line_length": 27.243244171142578, "blob_id": "2bf863e385d576c2033b13e14b0745e5d07f7520", "content_id": "bf0f00f611d8c4eccf9242ab3fc7776eec0d1d27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 72, "num_lines": 37, "path": "/migrations/versions/20210816_135552_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: fa590b961f4f\nRevises: ffdc0a98111c\nCreate Date: 2021-08-16 13:55:52.581549\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fa590b961f4f'\ndown_revision = 'ffdc0a98111c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('forms',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=50), nullable=True),\n sa.Column('owner_id', sa.Integer(), nullable=False),\n sa.Column('description', sa.Text(), nullable=True),\n sa.Column('label_align', sa.String(length=10), nullable=True),\n sa.Column('description_align', sa.String(length=10), nullable=True),\n sa.Column('title_align', sa.String(length=10), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('forms')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6125497817993164, "alphanum_fraction": 0.6125497817993164, "avg_line_length": 24.743589401245117, "blob_id": "667ba729f12fec9b867e93bdf433dbcc83c370ca", "content_id": "ddcbb6dd5b52ef59e34ef299014130347e4d01a3", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 66, "num_lines": 39, "path": "/react-app/src/components/CreateForm/FormContainer/TopBar/TopBar.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import styles from \"./TopBar.module.css\";\nimport { Link, useHistory } from \"react-router-dom\";\nimport React from \"react\";\nimport { useDispatch } from \"react-redux\";\nimport { logout } from \"../../../../store/session\";\nimport logo from \"../../../../assets/images/forme-logo-white.png\";\n\nconst TopBar = () => {\n\tconst history = useHistory();\n\tconst dispatch = useDispatch();\n\n\tconst onLogout = async (e) => {\n\t\tawait dispatch(logout());\n\t\thistory.push(\"/\");\n\t};\n\treturn (\n\t\t<div className={styles.topbar}>\n\t\t\t<div className={styles.left_container}>\n\t\t\t\t<Link to=\"/forms\">\n\t\t\t\t\t<div className={styles.logo}></div>\n\t\t\t\t</Link>\n\t\t\t</div>\n\t\t\t<div className={styles.right_container}>\n\t\t\t\t<ul className={styles.right_list}>\n\t\t\t\t\t<li className={styles.options_list}>\n\t\t\t\t\t\t{/* <Link className={styles.options_link}>Logout</Link> */}\n\t\t\t\t\t\t<button\n\t\t\t\t\t\t\tclassName={styles.options_link}\n\t\t\t\t\t\t\tonClick={onLogout}>\n\t\t\t\t\t\t\tLogout\n\t\t\t\t\t\t</button>\n\t\t\t\t\t</li>\n\t\t\t\t</ul>\n\t\t\t</div>\n\t\t</div>\n\t);\n};\n\nexport default TopBar;\n" }, { "alpha_fraction": 0.7113019824028015, "alphanum_fraction": 0.7242715358734131, "avg_line_length": 29.921875, "blob_id": "ceca53a1b8a91bdded0f1b23b1bbaf5a1aa4088a", "content_id": "207dd62d00987015f6696a07f7a9508b01f30da8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5937, "license_type": "no_license", "max_line_length": 281, "num_lines": 192, "path": "/README.md", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "# ForMe\n\n[Feature List](https://github.com/KagenLH/forme-app/wiki/Feature-List)\n\n[Database Schema](https://github.com/KagenLH/forme-app/wiki/Database-Schema)\n\n[Frontend Routes](https://github.com/KagenLH/forme-app/wiki/Frontend-Routes)\n\n## About ForMe\n\n[ForMe](https://forme-live.herokuapp.com/), a Wufoo clone, is a fullstack app to created to build customizable forms. Users can create new forms by adding field types, field settings, and form settings. The created forms are then rendered for the user to preview, share, or delete.\n\n## Site in Action\n\n![](2021-09-08-14-27-47.png)\n![](2021-09-08-14-29-19.png)\n![](2021-09-08-14-30-42.png)\n\n## Application Architecture\n\nForMe's frontend is built on a React with Redux to manage the application's state. Python backend is used to serve the frontend using Flask. PostgreSQL is used as the application's database.\n\n## Frontend Overview\n\nForMe uses React for the frontedn to provid a smooth and snappy experience for the user\n\n### Frontend Technologies Used\n\n#### React\n\nReact is a front-end Javascript library used to handle webpage renderings and UI. Everything displayed to the user is served with React.\n\n#### Redux\n\nRedux is used by the Foodlewd app to make API calls the the backend and handle state in the Redux store.\n\n## Backend Overview\n\nForMe uses Python with a Flask framework to queery the PostgrSQL database.\n\n### Backend Technologies Used\n\n#### Flask\n\nFlask is an extensible framework used as ForMe's backend. Flask created the routes to respong to API calls made by the frontend.\n\n#### SQLAlchemy\n\nSQLAlchemy is an Object Relational Mapper used with Flask to write queries to to and from the database. Mapping classes are used to create relationships and columns between multiple tables in the database.\n\n#### Alembic\n\nAlembic is used to migrate the created class tables to the database and keep track of all the migration versions.\n\n## Conclusions and Next Steps\n\nUsers are able to create and save forms, so the next step would be to make a way for those forms to be shared and filled out. The resulting form information can then be used to compile data within a chart, graph, or any other metric for the form owner to use.\n\nForms right now are shared by a copy and paste link. Shared forms should be sent and managed by email, so non-users can still fill out the form without having to sign up.\n\n## Installation Instructions\n\n1. Clone this repository (only this branch)\n\n ```bash\n git clone https://github.com/appacademy-starters/python-project-starter.git\n ```\n\n2. Install dependencies\n\n ```bash\n pipenv install --dev -r dev-requirements.txt && pipenv install -r requirements.txt\n ```\n\n3. Create a **.env** file based on the example with proper settings for your\n development environment\n4. Setup your PostgreSQL user, password and database and make sure it matches your **.env** file\n\n5. Get into your pipenv, migrate your database, seed your database, and run your flask app\n\n ```bash\n pipenv shell\n ```\n\n ```bash\n flask db upgrade\n ```\n\n ```bash\n flask seed all\n ```\n\n ```bash\n flask run\n ```\n\n6. To run the React App in development, checkout the [README](./react-app/README.md) inside the `react-app` directory.\n\n---\n\n_IMPORTANT!_\nIf you add any python dependencies to your pipfiles, you'll need to regenerate your requirements.txt before deployment.\nYou can do this by running:\n\n```bash\npipenv lock -r > requirements.txt\n```\n\n_ALSO IMPORTANT!_\npsycopg2-binary MUST remain a dev dependency because you can't install it on apline-linux.\nThere is a layer in the Dockerfile that will install psycopg2 (not binary) for us.\n\n---\n\n## Deploy to Heroku\n\n1. Before you deploy, don't forget to run the following command in order to\n ensure that your production environment has all of your up-to-date\n dependencies. You only have to run this command when you have installed new\n Python packages since your last deployment, but if you aren't sure, it won't\n hurt to run it again.\n\n ```bash\n pipenv lock -r > requirements.txt\n ```\n\n2. Create a new project on Heroku\n3. Under Resources click \"Find more add-ons\" and add the add on called \"Heroku Postgres\"\n4. Install the [Heroku CLI](https://devcenter.heroku.com/articles/heroku-command-line)\n5. Run\n\n ```bash\n heroku login\n ```\n\n6. Login to the heroku container registry\n\n ```bash\n heroku container:login\n ```\n\n7. Update the `REACT_APP_BASE_URL` variable in the Dockerfile.\n This should be the full URL of your Heroku app: i.e. \"https://flask-react-aa.herokuapp.com\"\n8. Push your docker container to heroku from the root directory of your project.\n (If you are using an M1 mac, follow [these steps below](#for-m1-mac-users) instead, then continue on to step 9.)\n This will build the Dockerfile and push the image to your heroku container registry.\n\n ```bash\n heroku container:push web -a {NAME_OF_HEROKU_APP}\n ```\n\n9. Release your docker container to heroku\n\n ```bash\n heroku container:release web -a {NAME_OF_HEROKU_APP}\n ```\n\n10. set up your database\n\n ```bash\n heroku run -a {NAME_OF_HEROKU_APP} flask db upgrade\n heroku run -a {NAME_OF_HEROKU_APP} flask seed all\n ```\n\n11. Under Settings find \"Config Vars\" and add any additional/secret .env\n variables.\n\n12. profit\n\n### For M1 Mac users\n\n(Replaces **Step 8**)\n\n1. Build image with linux platform for heroku servers. Replace\n {NAME_OF_HEROKU_APP} with your own tag:\n\n ```bash=\n docker buildx build --platform linux/amd64 -t {NAME_OF_HEROKU_APP} .\n ```\n\n2. Tag your app with the url for your apps registry. Make sure to use the name\n of your Heroku app in the url and tag name:\n\n ```bash=2\n docker tag {NAME_OF_HEROKU_APP} registry.heroku.com/{NAME_OF_HEROKU_APP}/web\n ```\n\n3. Use docker to push the image to the Heroku container registry:\n\n ```bash=3\n docker push registry.heroku.com/{NAME_OF_HEROKU_APP}/web\n ```\n" }, { "alpha_fraction": 0.5347222089767456, "alphanum_fraction": 0.5370370149612427, "avg_line_length": 20.600000381469727, "blob_id": "2c409e108500f495099df08d0c28876cf466e574", "content_id": "c596558931857fbd38d6b90eb21034f4e214417f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/app/seeds/forms.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from app.models import db, Form\n\n\ndef seed_forms():\n test = Form(\n title = \"Test Form Render\",\n owner_id = 1,\n description = \"\",\n label_placement = \"\",\n description_align = \"\",\n title_align = \"\",\n )\n\n db.session.add(test)\n db.session.commit()\n\n\ndef undo_forms():\n db.session.execute('TRUNCATE forms RESTART IDENTITY CASCADE;')\n db.session.commit()\n" }, { "alpha_fraction": 0.5684729218482971, "alphanum_fraction": 0.6118226647377014, "avg_line_length": 25.710525512695312, "blob_id": "84b63f5ccfbdf4feace8df84ff3c27779fd2f0b2", "content_id": "3e53d46cd5b5ab2538a6eb86bc056e0a21899d94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 65, "num_lines": 38, "path": "/migrations/versions/20210820_171546_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 94f5eda37179\nRevises: b3e721c02f48\nCreate Date: 2021-08-20 17:15:46.455809\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '94f5eda37179'\ndown_revision = 'b3e721c02f48'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'label',\n existing_type=sa.VARCHAR(length=55),\n nullable=False)\n op.alter_column('forms', 'title',\n existing_type=sa.VARCHAR(length=50),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('forms', 'title',\n existing_type=sa.VARCHAR(length=50),\n nullable=True)\n op.alter_column('fields', 'label',\n existing_type=sa.VARCHAR(length=55),\n nullable=True)\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7233502268791199, "alphanum_fraction": 0.7233502268791199, "avg_line_length": 28.185184478759766, "blob_id": "9d006aaa6c016489a25deae67f9592a652e9a060", "content_id": "29b21db447658aea6d3a639cb399e363e87466ff", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 788, "license_type": "no_license", "max_line_length": 79, "num_lines": 27, "path": "/react-app/src/store/errors.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "// Type constants\nconst SET_ERRORS = \"errors/SET_ERRORS\";\n\n// export the Action Creators, so we can dispatch in other thunks\nexport const setErrors = (errors) => ({\n\ttype: SET_ERRORS,\n\terrors,\n});\n\n// Don't need Thunk Creator because never specifically fetching\n// for errors. We will receive them from all the other routes\n\n// Reducer\n// initial state is an array because want to display all errors no matter\n// what in the current slice of state. Don't need to key in\nconst initialState = [];\n\nconst errorReducer = (_state, action) => {\n\tswitch (action.type) {\n\t\tcase SET_ERRORS:\n\t\t\treturn action.errors; // completely replace, don't spread previous state in\n\t\tdefault:\n\t\t\treturn initialState; // never want to save the errors state, so empty it out\n\t}\n};\n\nexport default errorReducer;\n" }, { "alpha_fraction": 0.6063694357872009, "alphanum_fraction": 0.6700636744499207, "avg_line_length": 25.16666603088379, "blob_id": "e336a5713d61723a032f35c1c73ebbd15f1d845e", "content_id": "06a2d53146b4ed046a387de8e2a07219e0ba8d75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 78, "num_lines": 30, "path": "/migrations/versions/20210821_113310_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: d0c387e43ca4\nRevises: 94f5eda37179\nCreate Date: 2021-08-21 11:33:10.206199\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd0c387e43ca4'\ndown_revision = '94f5eda37179'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('forms', sa.Column('field_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'forms', 'fields', ['field_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'forms', type_='foreignkey')\n op.drop_column('forms', 'field_id')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5940321087837219, "alphanum_fraction": 0.600050151348114, "avg_line_length": 29.4427490234375, "blob_id": "a01ed17e9d566e722de1613de7bd36c0ed3b8acc", "content_id": "eea653f85dc730771dbea235b4f91681d3df1b40", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 3988, "license_type": "no_license", "max_line_length": 145, "num_lines": 131, "path": "/react-app/src/components/Forms/index.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React, { useEffect } from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { Link, Redirect } from \"react-router-dom\";\n\nimport { getUserForms, deleteForm, createForm } from \"../../store/forms.js\";\nimport FormsTable from \"./FormsTable.js\";\nimport \"./Forms.css\";\nimport NavBar from \"../NavBar/NavBar.js\";\n\n// TODO: Redirect unregistered users to a login page\n// TODO: Do something about share button on form creator\n// TODO: create read me for whole project\n//? forme-live.herokuapp.com\nfunction FormsManager() {\n\tconst dispatch = useDispatch();\n\tconst forms = useSelector((state) => state.forms);\n\tconst { user } = useSelector((state) => state.session); // get the logged in user's info\n\n\t// console.log('*** FORMS COMPONENT USER DATA ***', user)\n\n\tuseEffect(() => {\n\t\tdispatch(getUserForms(user.id)); // only gets forms owned by the current user\n\t}, [dispatch, user.id]);\n\n\t// console.log('*** FORMS COMPONENT FORM STATE DATA ***', forms)\n\n\tconst handleDeleteForm = async (formId) => {\n\t\tawait dispatch(deleteForm(formId));\n\t};\n\n // //! testing only\n // const fieldData1 = {\n // type: 'text',\n // label: 'Test Field 1',\n // required: false,\n // choices: ['choice1', 'choice2', 'choice3']\n // }\n\n // //! testing only\n // const fieldData2 = {\n // type: 'text',\n // label: 'Test Field 2',\n // required: false,\n // choices: ['choice1', 'choice2', 'choice3']\n // }\n\n // //! testing only\n // const fieldData3 = {\n // type: 'text',\n // label: 'Test Field 3',\n // required: false,\n // choices: ['choice1', 'choice2', 'choice3']\n // }\n\n\n\t// //! testing only\n\t// const formData = {\n\t// owner_id: user.id,\n\t// title: \"Testing form creation\",\n\t// description: `This form tests form creation for ${user.email}.`,\n\t// titleAlignment: null,\n\t// labelPlacement: null,\n\t// descriptionAlignment: null,\n\t// fields: [fieldData1, fieldData2, fieldData3]\n\t// }\n\n\t//! testing only\n\t// const formData2 = {\n\t// owner_id: 2, // should only show up if you are logged in as [email protected]\n\t// title: \"Testing form ownership\",\n\t// description: \"This form tests form ownership for Marnie.\",\n\t// title_align: null,\n\t// label_align: null,\n\t// description_align: null\n\t// }\n\n\t//! testing only\n\t// const formData3 = {\n\t// owner_id: 3, // should only show up if you are logged in as [email protected]\n\t// title: \"Testing form ownership\",\n\t// description: \"This form tests form ownership for Bobbie.\",\n\t// title_align: null,\n\t// label_align: null,\n\t// description_align: null\n\t// }\n\n\t// used for testing/handling new form creation\n\t// add the following click handler to button.form-create-button in the JSX below:\n\t// onClick={() => handleSubmit(formData)}\n\t//! move to another component?\n\t// const handleSubmit = async (formData) => {\n\t// await dispatch(createForm(formData))\n\t// }\n\treturn forms && user ? (\n\t\t<>\n\t\t\t<NavBar />\n\t\t\t<div className=\"form-manager-container\">\n\t\t\t\t<div className=\"form-manager-page-header\">\n\t\t\t\t\t<div className=\"form-manager-header\">\n\t\t\t\t\t\t<h1 id=\"form-manager-title\">Forms</h1>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div className=\"form-manager-actions\">\n\t\t\t\t\t\t<Link to=\"/forms/build\">\n\t\t\t\t\t\t\t<button className=\"form-create-button\">\n\t\t\t\t\t\t\t\t<div className=\"plus-logo\"></div>\n\t\t\t\t\t\t\t\t<span className=\"create-new-form\">\n\t\t\t\t\t\t\t\t\tCreate New Form\n\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t</Link>\n\t\t\t\t\t\t{/* <Link to=\"/forms\"><button onClick={() => handleSubmit(formData)} className=\"form-create-button\"> ! TEST FORM CREATE</button></Link> */}\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t\t<div className=\"forms-area\">\n\t\t\t\t\t<div className=\"utility-bar\">{/* search bar */}</div>\n\t\t\t\t\t<div className=\"form-manager-forms\">\n\t\t\t\t\t\t<FormsTable\n\t\t\t\t\t\t\tforms={forms}\n\t\t\t\t\t\t\thandleDeleteForm={handleDeleteForm}\n\t\t\t\t\t\t\tuser={user}\n\t\t\t\t\t\t/>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t</>\n\t) : (\n\t\t<Redirect to='/login' />\n\t);\n}\n\nexport default FormsManager;\n" }, { "alpha_fraction": 0.5715696215629578, "alphanum_fraction": 0.6130306124687195, "avg_line_length": 25.657894134521484, "blob_id": "70cab6a4c1d9a08cd28e77a7b349a9a8112d5007", "content_id": "237f64a0ed09f7075e058bb9e6aecf9496c55fb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 65, "num_lines": 38, "path": "/migrations/versions/20210820_103408_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: b05fdd14ae4f\nRevises: 4563136888fd\nCreate Date: 2021-08-20 10:34:08.171553\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b05fdd14ae4f'\ndown_revision = '4563136888fd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'label',\n existing_type=sa.VARCHAR(length=55),\n nullable=True)\n op.alter_column('fields', 'required',\n existing_type=sa.BOOLEAN(),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'required',\n existing_type=sa.BOOLEAN(),\n nullable=True)\n op.alter_column('fields', 'label',\n existing_type=sa.VARCHAR(length=55),\n nullable=False)\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6848341226577759, "alphanum_fraction": 0.6848341226577759, "avg_line_length": 30.259260177612305, "blob_id": "45a7dacf5e77578594fb95c26700c4d831358cd0", "content_id": "3950542f5b38404f67abf43feb0b426995b52730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 66, "num_lines": 27, "path": "/app/seeds/users.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from app.models import db, User\n\n\n# Adds a demo user, you can add other users here if you want\ndef seed_users():\n demo = User(\n username='Demo', email='[email protected]', password='password')\n marnie = User(\n username='marnie', email='[email protected]', password='password')\n bobbie = User(\n username='bobbie', email='[email protected]', password='password')\n\n db.session.add(demo)\n db.session.add(marnie)\n db.session.add(bobbie)\n\n db.session.commit()\n\n\n# Uses a raw SQL query to TRUNCATE the users table.\n# SQLAlchemy doesn't have a built in function to do this\n# TRUNCATE Removes all the data from the table, and RESET IDENTITY\n# resets the auto incrementing primary key, CASCADE deletes any\n# dependent entities\ndef undo_users():\n db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')\n db.session.commit()\n" }, { "alpha_fraction": 0.6018276810646057, "alphanum_fraction": 0.6318537592887878, "avg_line_length": 22.9375, "blob_id": "57bba70d278167ef26d4efd95f770acf5fc8b5d0", "content_id": "f47e8fc39b2efe966bd5d45b946fd808b0b470f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/migrations/versions/20210820_100009_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: beeeac90e4ba\nRevises: d25f4d1b7ea0\nCreate Date: 2021-08-20 10:00:09.924819\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'beeeac90e4ba'\ndown_revision = 'd25f4d1b7ea0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'required',\n existing_type=sa.BOOLEAN(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'required',\n existing_type=sa.BOOLEAN(),\n nullable=False)\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.5051643252372742, "alphanum_fraction": 0.5051643252372742, "avg_line_length": 27.399999618530273, "blob_id": "5bd40e548e31a188a47e7aedb23917d76a917ce9", "content_id": "06823fa70cee76c211962bdd56d5ef9c57fec60e", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 2130, "license_type": "no_license", "max_line_length": 91, "num_lines": 75, "path": "/react-app/src/components/Forms/FormsTable.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { Link } from \"react-router-dom\";\nimport ShareFormLinkModal from \"./ShareFormLinkModal.js\";\n\n// TODO: Make form names into links that lead to form render page\n\nfunction FormsTableBody({ forms, handleDeleteForm, user }) {\n\treturn (\n\t\t<div className=\"form-table\">\n\t\t\t<table>\n\t\t\t\t<thead className=\"table-head\">\n\t\t\t\t\t<tr className=\"column-title-container\">\n\t\t\t\t\t\t<th className=\"column-title-name\">Name</th>\n\t\t\t\t\t\t<th className=\"form-action-label\">\n\t\t\t\t\t\t\t<th className=\"share-label\">Share</th>\n\t\t\t\t\t\t\t<th className=\"edit-label\">Edit</th>\n\t\t\t\t\t\t\t<th className=\"delete-label\">Delete</th>\n\t\t\t\t\t\t</th>\n\t\t\t\t\t</tr>\n\t\t\t\t</thead>\n\t\t\t\t<tbody>\n\t\t\t\t\t{Object.values(forms).map((form) => {\n\t\t\t\t\t\treturn (\n\t\t\t\t\t\t\t<tr className=\"form-table-rows\">\n\t\t\t\t\t\t\t\t<td className=\"form-table-data\" key={form.id}>\n\t\t\t\t\t\t\t\t\t<Link to={`/forms/${form?.id}/shared`} className='form-title-link'>\n\t\t\t\t\t\t\t\t\t\t{form?.title}\n\t\t\t\t\t\t\t\t\t</Link>\n\t\t\t\t\t\t\t\t\t<td className='form-description'>\n\t\t\t\t\t\t\t\t\t\t{form?.description}\n\t\t\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t\t<td className=\"form-actions\">\n\t\t\t\t\t\t\t\t\t<td className=\"share-buttons\" key={form?.id}>\n\t\t\t\t\t\t\t\t\t\t{/* <i className=\"fa fa-share-alt-square\" title='Share' aria-hidden=\"true\" /> */}\n\t\t\t\t\t\t\t\t\t\t<ShareFormLinkModal formId={form?.id} />\n\t\t\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tuser.id === form?.owner_id ? (\n\t\t\t\t\t\t\t\t\t\t\t<td\n\t\t\t\t\t\t\t\t\t\t\t\tclassName=\"edit-button\"\n\t\t\t\t\t\t\t\t\t\t\t>\n\t\t\t\t\t\t\t\t\t\t\t\t<Link to={`forms/${form?.id}/edit`} className=\"forms-edit-button\">\n\t\t\t\t\t\t\t\t\t\t\t\t\t<i className=\"fa fa-edit\"></i>\n\t\t\t\t\t\t\t\t\t\t\t\t</Link>\n\t\t\t\t\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t\t\t\t): null\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t// only render delete button if user owns form\n\t\t\t\t\t\t\t\t\t\tuser.id === form?.owner_id ? (\n\t\t\t\t\t\t\t\t\t\t\t<td\n\t\t\t\t\t\t\t\t\t\t\t\tclassName=\"delete-buttons\"\n\t\t\t\t\t\t\t\t\t\t\t\tonClick={(e) =>\n\t\t\t\t\t\t\t\t\t\t\t\t\thandleDeleteForm(form?.id)\n\t\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\t\t<i\n\t\t\t\t\t\t\t\t\t\t\t\t\tclassName=\"fa fa-trash\"\n\t\t\t\t\t\t\t\t\t\t\t\t\ttitle=\"Delete\"\n\t\t\t\t\t\t\t\t\t\t\t\t\taria-hidden=\"true\"\n\t\t\t\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t\t\t\t) : null\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t</td>\n\t\t\t\t\t\t\t</tr>\n\t\t\t\t\t\t);\n\t\t\t\t\t})}\n\t\t\t\t</tbody>\n\t\t\t</table>\n\t\t</div>\n\t);\n}\n\nexport default FormsTableBody;\n" }, { "alpha_fraction": 0.5961039066314697, "alphanum_fraction": 0.6259739995002747, "avg_line_length": 23.0625, "blob_id": "5803b5eeaba97a90add052f7b7aaf2cf979c7136", "content_id": "f99092164a3a563c7b9b0064f9708452b53bb2de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/migrations/versions/20210821_161057_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4df12f583573\nRevises: 2453c767d036\nCreate Date: 2021-08-21 16:10:57.556468\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4df12f583573'\ndown_revision = '2453c767d036'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'form_id',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'form_id',\n existing_type=sa.INTEGER(),\n nullable=False)\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.8518518805503845, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 33, "blob_id": "1d5de4e5525929126f15f98cae0e7bb36e87c21f", "content_id": "8b8ba0755427b83c5a8f17e3ada0376fbd9fb976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 135, "license_type": "no_license", "max_line_length": 72, "num_lines": 4, "path": "/.env.example", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "FLASK_APP=app\nFLASK_ENV=development\nSECRET_KEY=dIamtheglobolob\nDATABASE_URL=postgresql://starter_app_dev:password@localhost/starter_app" }, { "alpha_fraction": 0.5994151830673218, "alphanum_fraction": 0.5994151830673218, "avg_line_length": 18, "blob_id": "afa4efaa6d637bbbf6f539cad5bcd4760d354793", "content_id": "3977737b2fb8d8a8a8e373c0d2465bdc84327a09", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 342, "license_type": "no_license", "max_line_length": 59, "num_lines": 18, "path": "/react-app/src/components/Errors/Errors.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import { useSelector } from \"react-redux\";\nimport styles from \"./Errors.module.css\";\n\nconst Errors = () => {\n\tconst errors = useSelector((state) => state.errorReducer);\n\n\treturn (\n\t\t<div>\n\t\t\t{errors?.map((error, ind) => (\n\t\t\t\t<div className={styles.error_div} key={ind}>\n\t\t\t\t\t{error}\n\t\t\t\t</div>\n\t\t\t))}\n\t\t</div>\n\t);\n};\n\nexport default Errors;\n" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5526315569877625, "avg_line_length": 20.714284896850586, "blob_id": "31214c30d83945dd8ac534d9d7974faa2739fd0a", "content_id": "63b2b3c0a2a3f2a8b91613c67d8e08c027f3c1c3", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 304, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/react-app/src/components/EditForm/EditForm.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import styles from \"./EditForm.module.css\";\nimport EditFormContainer from \"./EditFormContainer\";\n\nconst EditForm = () => {\n return (\n <>\n <div className={styles.form_container}>\n <EditFormContainer />\n </div>\n </>\n );\n};\n\nexport default EditForm;\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 21.5, "blob_id": "6d7670abbdebce67b78f92ce21d471a3cf1223b1", "content_id": "200da80666e6ce7883cac01d924b29887ea1d121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/app/models/__init__.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from .db import db\nfrom .user import User\nfrom .form import Form\nfrom .field import Field\n" }, { "alpha_fraction": 0.7103825211524963, "alphanum_fraction": 0.7103825211524963, "avg_line_length": 25.14285659790039, "blob_id": "08bc520feca32bd78ffee57d5caf5f7c65c26e2d", "content_id": "e3b086a26801f236e2ff6b35e549b34f030fe717", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 183, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/react-app/src/components/ContentWrap/ContentWrap.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import styles from \"./ContentWrap.module.css\";\n\nconst ContentWrap = ({ children }) => {\n\treturn <div className={styles.content_wrap}>{children}</div>;\n};\n\nexport default ContentWrap;\n" }, { "alpha_fraction": 0.6335766315460205, "alphanum_fraction": 0.6335766315460205, "avg_line_length": 23.03508758544922, "blob_id": "93c113ec5e72fe444945d590a8110ab5e62f3b7b", "content_id": "e73b7919bba8793ffa5ddac161fa03f966510469", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 1370, "license_type": "no_license", "max_line_length": 55, "num_lines": 57, "path": "/react-app/src/components/Forms/FormField.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React, { useState } from \"react\";\nimport {\n\tcreateTextInput,\n\tcreateMultiLineText,\n\tcreateSelectField,\n\tcreateMultipleChoice,\n\tcreateCheckboxField,\n\tcreateNumericInput,\n} from \"@kagenlh/jsxfields\";\nimport \"./Forms.css\";\n\nfunction FormField({ field, label_placement }) {\n\tconst [inputValue, setInputValue] = useState(\"\");\n\n\treturn (\n\t\t<div className=\"form-field\">\n\t\t\t{field.type === \"text\"\n\t\t\t\t? createTextInput(inputValue, setInputValue, {\n\t\t\t\t\t\t...field,\n\t\t\t\t\t\tlabelPlacement: label_placement,\n\t\t\t\t })\n\t\t\t\t: null}\n\t\t\t{field.type === \"textarea\"\n\t\t\t\t? createMultiLineText(inputValue, setInputValue, {\n\t\t\t\t\t\t...field,\n\t\t\t\t\t\tlabelPlacement: label_placement,\n\t\t\t\t })\n\t\t\t\t: null}\n\t\t\t{field.type === \"select\"\n\t\t\t\t? createSelectField(inputValue, setInputValue, {\n\t\t\t\t\t\t...field,\n\t\t\t\t\t\tlabelPlacement: label_placement,\n\t\t\t\t })\n\t\t\t\t: null}\n\t\t\t{field.type === \"multipleChoice\"\n\t\t\t\t? createMultipleChoice(inputValue, setInputValue, {\n\t\t\t\t\t\t...field,\n\t\t\t\t\t\tlabelPlacement: label_placement,\n\t\t\t\t })\n\t\t\t\t: null}\n\t\t\t{field.type === \"checkbox\"\n\t\t\t\t? createCheckboxField(inputValue, setInputValue, {\n\t\t\t\t\t\t...field,\n\t\t\t\t\t\tlabelPlacement: label_placement,\n\t\t\t\t })\n\t\t\t\t: null}\n\t\t\t{field.type === \"number\"\n\t\t\t\t? createNumericInput(inputValue, setInputValue, {\n\t\t\t\t\t\t...field,\n\t\t\t\t\t\tlabelPlacement: label_placement,\n\t\t\t\t })\n\t\t\t\t: null}\n\t\t</div>\n\t);\n}\n\nexport default FormField;\n" }, { "alpha_fraction": 0.47427353262901306, "alphanum_fraction": 0.47548842430114746, "avg_line_length": 25.87996482849121, "blob_id": "44cc6c08b17aa3174a0e73fdea721e5636543d15", "content_id": "dfbdd9e7abbf2ea6331e1cf4e0cc31e4648d0c3b", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 30455, "license_type": "no_license", "max_line_length": 151, "num_lines": 1133, "path": "/react-app/src/components/CreateForm/FormContainer/FormEngine/FormEngine.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import styles from \"./FormEngine.module.css\";\nimport { useState } from \"react\";\nimport { useHistory } from \"react-router-dom\";\nimport {\n\tcreateTextInput,\n\tcreateMultiLineText,\n\tcreateSelectField,\n\tcreateMultipleChoice,\n\tcreateCheckboxField,\n\tcreateNumericInput,\n} from \"@kagenlh/jsxfields\";\n\nconst initialFieldState = {\n\tlabel: \"Untitled\",\n\tmaxLength: 255, // Used with text inputs to determine a maximum number of characters\n\trequired: false,\n\tplaceholder: \"\",\n\tinstructions: \"\", // If not empty creates a blurb of grey text to the right of the field\n\tchoices: [\"First Choice\", \"Second Choice\", \"Third Choice\"], // Used to determine the available options with selects, multiple choices, and checkboxes.\n};\n\nfunction toBool(str) {\n\tif (typeof str === \"string\")\n\t\tswitch (str.toLowerCase().trim()) {\n\t\t\tcase \"true\":\n\t\t\tcase \"yes\":\n\t\t\tcase \"1\":\n\t\t\t\treturn true;\n\t\t\tcase \"false\":\n\t\t\tcase \"no\":\n\t\t\tcase \"0\":\n\t\t\tcase null:\n\t\t\t\treturn false;\n\t\t\tdefault:\n\t\t\t\treturn Boolean(str);\n\t\t}\n\telse {\n\t\treturn str;\n\t}\n}\n\nexport default function FormEngine() {\n\tconst history = useHistory();\n\tconst [activeField, setActiveField] = useState(null);\n\tconst [activeTab, setActiveTab] = useState(\"add\");\n\tconst [formTitle, setFormTitle] = useState(\"Untitled Form\");\n\tconst [formDescription, setFormDescription] = useState(\n\t\t\"This is my form. Please fill it out. It's awesome!\"\n\t);\n\tconst [textValue, setTextValue] = useState(\"\");\n\tconst [formSettings, setFormSettings] = useState({\n\t\ttitleAlignment: \"flex-start\",\n\t\tdescriptionAlignment: \"flex-start\",\n\t\tlabelPlacement: \"top\",\n\t});\n\n\tconst [fieldSettings, setFieldSettings] = useState({\n\t\tfieldType: \"text\",\n\t\tfieldSize: \"small\",\n\t});\n\n\tconst [fieldChoices, setFieldChoices] = useState([\n\t\t\"First Choice\",\n\t\t\"Second Choice\",\n\t\t\"Third Choice\",\n\t]);\n\n\tconst [isCheckedRequired, setIsCheckedRequired] = useState(false);\n\tconst [maxChar, setMaxChar] = useState(25);\n\tconst [placeholderText, setPlaceholderText] = useState(\"\");\n\tconst [instructions, setInstructions] = useState(\"\");\n\n\tconst [jsxContent, setJsxContent] = useState([]);\n\tconst [multiLineValue, setMultiLineValue] = useState(\"\");\n\tconst [multiChoiceValue, setMultiChoiceValue] = useState(\"\");\n\tconst [numberValue, setNumberValue] = useState(\"\");\n\tconst [selectValue, setSelectValue] = useState(\"\");\n\tconst [checkboxValue, setcheckboxValue] = useState(\"\");\n\n\tconst [fieldLabel, setFieldLabel] = useState(\"Untitled\");\n\n\tconst toggleTab = (tab) => {\n\t\tif (tab === \"add\") {\n\t\t\tsetActiveTab(\"add\");\n\t\t}\n\t\tif (tab === \"field\") {\n\t\t\tsetActiveTab(\"field\");\n\t\t}\n\t\tif (tab === \"form\") {\n\t\t\tsetActiveTab(\"form\");\n\t\t}\n\t};\n\n\tconst onSave = async () => {\n\t\tconst fieldSettings = jsxContent.map((pair) => {\n\t\t\treturn pair[1];\n\t\t});\n\t\tconst formData = {\n\t\t\ttitle: formTitle,\n\t\t\tdescription: formDescription,\n\t\t\t...formSettings,\n\t\t\tfields: [...fieldSettings],\n\t\t};\n\n\t\tconst res = await fetch(\"/api/forms/build\", {\n\t\t\tmethod: \"POST\",\n\t\t\theaders: { \"Content-Type\": \"application/json\" },\n\t\t\tbody: JSON.stringify(formData),\n\t\t});\n\n\t\tif (res.ok) {\n\t\t\tconst data = res.json();\n\t\t}\n\n\t\thistory.push(\"/forms\");\n\t};\n\n\tconst updateAllFields = (e, tag) => {\n\t\tconst currentJSX = activeField;\n\n\t\tsetJsxContent((prevState) => {\n\t\t\treturn prevState.map((jsx) => {\n\t\t\t\tconst oldSettings = jsx[1];\n\t\t\t\tconst newSettings = { ...oldSettings, [tag]: e.target.value };\n\t\t\t\tif (newSettings.type === \"text\") {\n\t\t\t\t\tconst newJsx = createTextInput(\n\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\tnewSettings\n\t\t\t\t\t);\n\t\t\t\t\tconst newState = [newJsx, newSettings];\n\n\t\t\t\t\treturn newState;\n\t\t\t\t} else if (newSettings.type === \"textarea\") {\n\t\t\t\t\tconst newJsx = createMultiLineText(\n\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\tnewSettings\n\t\t\t\t\t);\n\t\t\t\t\tconst newState = [newJsx, newSettings];\n\t\t\t\t\treturn newState;\n\t\t\t\t} else if (newSettings.type === \"select\") {\n\t\t\t\t\tconst newJsx = createSelectField(\n\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\tnewSettings\n\t\t\t\t\t);\n\t\t\t\t\tconst newState = [newJsx, newSettings];\n\t\t\t\t\treturn newState;\n\t\t\t\t} else if (newSettings.type === \"multipleChoice\") {\n\t\t\t\t\tconst newJsx = createMultipleChoice(\n\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\tnewSettings\n\t\t\t\t\t);\n\t\t\t\t\tconst newState = [newJsx, newSettings];\n\t\t\t\t\treturn newState;\n\t\t\t\t} else if (newSettings.type === \"checkbox\") {\n\t\t\t\t\tconst newJsx = createCheckboxField(\n\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\tnewSettings\n\t\t\t\t\t);\n\t\t\t\t\tconst newState = [newJsx, newSettings];\n\t\t\t\t\treturn newState;\n\t\t\t\t} else {\n\t\t\t\t\tconst newJsx = createNumericInput(\n\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\tnewSettings\n\t\t\t\t\t);\n\t\t\t\t\tconst newState = [newJsx, newSettings];\n\t\t\t\t\treturn newState;\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t\tsetActiveField(currentJSX);\n\t};\n\n\tconst updateFieldSettings = (e, tag) => {\n\t\tconst replacementIndex = jsxContent.findIndex(\n\t\t\t(jsx) => jsx[0] === activeField[0]\n\t\t);\n\t\tsetJsxContent((prevState) => {\n\t\t\tconst newState = [...prevState];\n\t\t\tconst oldSettings = jsxContent[replacementIndex][1];\n\t\t\tconst newSettings = { ...oldSettings, [tag]: e.target.value };\n\t\t\tif (newSettings.type === \"text\") {\n\t\t\t\tconst newJsx = createTextInput(\n\t\t\t\t\ttextValue,\n\t\t\t\t\tsetTextValue,\n\t\t\t\t\tnewSettings\n\t\t\t\t);\n\t\t\t\tnewState[replacementIndex] = [newJsx, newSettings];\n\t\t\t\tsetActiveField(newState[replacementIndex]);\n\t\t\t\treturn newState;\n\t\t\t} else if (newSettings.type === \"textarea\") {\n\t\t\t\tconst newJsx = createMultiLineText(\n\t\t\t\t\ttextValue,\n\t\t\t\t\tsetTextValue,\n\t\t\t\t\tnewSettings\n\t\t\t\t);\n\t\t\t\tnewState[replacementIndex] = [newJsx, newSettings];\n\t\t\t\tsetActiveField(newState[replacementIndex]);\n\t\t\t\treturn newState;\n\t\t\t} else if (newSettings.type === \"select\") {\n\t\t\t\tconst newJsx = createSelectField(\n\t\t\t\t\ttextValue,\n\t\t\t\t\tsetTextValue,\n\t\t\t\t\tnewSettings\n\t\t\t\t);\n\t\t\t\tnewState[replacementIndex] = [newJsx, newSettings];\n\t\t\t\tsetActiveField(newState[replacementIndex]);\n\t\t\t\treturn newState;\n\t\t\t} else if (newSettings.type === \"multipleChoice\") {\n\t\t\t\tconst newJsx = createMultipleChoice(\n\t\t\t\t\ttextValue,\n\t\t\t\t\tsetTextValue,\n\t\t\t\t\tnewSettings\n\t\t\t\t);\n\t\t\t\tnewState[replacementIndex] = [newJsx, newSettings];\n\t\t\t\tsetActiveField(newState[replacementIndex]);\n\t\t\t\treturn newState;\n\t\t\t} else if (newSettings.type === \"checkbox\") {\n\t\t\t\tconst newJsx = createCheckboxField(\n\t\t\t\t\ttextValue,\n\t\t\t\t\tsetTextValue,\n\t\t\t\t\tnewSettings\n\t\t\t\t);\n\t\t\t\tnewState[replacementIndex] = [newJsx, newSettings];\n\t\t\t\tsetActiveField(newState[replacementIndex]);\n\t\t\t\treturn newState;\n\t\t\t} else if (newSettings.type === \"number\") {\n\t\t\t\tconst newJsx = createNumericInput(\n\t\t\t\t\ttextValue,\n\t\t\t\t\tsetTextValue,\n\t\t\t\t\tnewSettings\n\t\t\t\t);\n\t\t\t\tnewState[replacementIndex] = [newJsx, newSettings];\n\t\t\t\tsetActiveField(newState[replacementIndex]);\n\t\t\t\treturn newState;\n\t\t\t}\n\t\t});\n\t};\n\n\treturn (\n\t\t<div className={styles.engine_container}>\n\t\t\t<div\n\t\t\t\tclassName={\n\t\t\t\t\tactiveTab === \"add\"\n\t\t\t\t\t\t? `${styles.settings_panel_add}`\n\t\t\t\t\t\t: activeTab === \"field\"\n\t\t\t\t\t\t? `${styles.settings_panel_field}`\n\t\t\t\t\t\t: `${styles.settings_panel_form}`\n\t\t\t\t}>\n\t\t\t\t<ul\n\t\t\t\t\tclassName={\n\t\t\t\t\t\tactiveTab === \"add\"\n\t\t\t\t\t\t\t? `${styles.add_field_tab_container}`\n\t\t\t\t\t\t\t: activeTab === \"field\"\n\t\t\t\t\t\t\t? `${styles.field_settings_tab_container}`\n\t\t\t\t\t\t\t: `${styles.form_settings_tab_container}`\n\t\t\t\t\t}>\n\t\t\t\t\t<li\n\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\ttoggleTab(\"add\");\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tclassName={`${styles.add_field_tab} ${styles.none_add_field_hide}`}>\n\t\t\t\t\t\t{activeTab === \"add\" ? (\n\t\t\t\t\t\t\t<span className={styles.caret_down}>\n\t\t\t\t\t\t\t\t<i class=\"fas fa-caret-down\"></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t) : (\n\t\t\t\t\t\t\t<span className={styles.caret_right}>\n\t\t\t\t\t\t\t\t<i className={\"fas fa-caret-right\"}></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t)}\n\t\t\t\t\t\t<span className={styles.option_text}>Add a Field</span>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li\n\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\ttoggleTab(\"field\");\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tclassName={` ${styles.field_settings_tab}`}>\n\t\t\t\t\t\t{activeTab === \"field\" ? (\n\t\t\t\t\t\t\t<span className={styles.caret_down}>\n\t\t\t\t\t\t\t\t<i class=\"fas fa-caret-down\"></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t) : (\n\t\t\t\t\t\t\t<span className={styles.caret_right}>\n\t\t\t\t\t\t\t\t<i className={\"fas fa-caret-right\"}></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t)}\n\t\t\t\t\t\t<span className={styles.option_text}>\n\t\t\t\t\t\t\tField Settings\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</li>\n\t\t\t\t\t<li\n\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\ttoggleTab(\"form\");\n\t\t\t\t\t\t}}\n\t\t\t\t\t\tclassName={`${styles.form_settings_tab}`}>\n\t\t\t\t\t\t{activeTab === \"form\" ? (\n\t\t\t\t\t\t\t<span className={styles.caret_down}>\n\t\t\t\t\t\t\t\t<i class=\"fas fa-caret-down\"></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t) : (\n\t\t\t\t\t\t\t<span className={styles.caret_right}>\n\t\t\t\t\t\t\t\t<i className={\"fas fa-caret-right\"}></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t)}\n\t\t\t\t\t\t<span className={styles.option_text}>\n\t\t\t\t\t\t\tForm Settings\n\t\t\t\t\t\t</span>\n\t\t\t\t\t</li>\n\t\t\t\t</ul>\n\t\t\t\t{activeTab === \"add\" ? (\n\t\t\t\t\t<div className={styles.add_field_constructor}>\n\t\t\t\t\t\t<div className={styles.button_container}>\n\t\t\t\t\t\t\t<h3 className={styles.standard_text}>Standard</h3>\n\t\t\t\t\t\t\t<ul className={styles.left_col_stand}>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\t\tconst jsx = createTextInput(\n\t\t\t\t\t\t\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\t\t\t\t\t\t\tsetTextValue\n\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\treturn [\n\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype: \"text\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...initialFieldState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.single_line_text_icon\n\t\t\t\t\t\t\t\t\t\t\t}></b>\n\t\t\t\t\t\t\t\t\t\t<span\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.single_line_text_text\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\tSingle Line Text\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\t\tconst jsx = createMultiLineText(\n\t\t\t\t\t\t\t\t\t\t\t\tmultiLineValue,\n\t\t\t\t\t\t\t\t\t\t\t\tsetMultiLineValue\n\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\treturn [\n\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype: \"textarea\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...initialFieldState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.paragraph_text_icon\n\t\t\t\t\t\t\t\t\t\t\t}></b>\n\t\t\t\t\t\t\t\t\t\t<span\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.paragraph_text_text\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\tParagraph Text\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\t\tconst jsx = createMultipleChoice(\n\t\t\t\t\t\t\t\t\t\t\t\tmultiChoiceValue,\n\t\t\t\t\t\t\t\t\t\t\t\tsetMultiChoiceValue\n\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\treturn [\n\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype: \"multipleChoice\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...initialFieldState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.multiple_choice_icon\n\t\t\t\t\t\t\t\t\t\t\t}></b>\n\t\t\t\t\t\t\t\t\t\t<span\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.multiple_choice_text\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\tMultiple Choice\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t{/* <button\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button} ${styles.section_break_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b></b>\n\t\t\t\t\t\t\t\t\t\t<span\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.section_break_text\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\tSection Break\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button> */}\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t</ul>\n\t\t\t\t\t\t\t<ul className={styles.right_col_stand}>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\t\tconst jsx = createNumericInput(\n\t\t\t\t\t\t\t\t\t\t\t\tnumberValue,\n\t\t\t\t\t\t\t\t\t\t\t\tsetNumberValue\n\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\treturn [\n\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype: \"number\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...initialFieldState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b className={styles.number_icon}></b>\n\t\t\t\t\t\t\t\t\t\t<span className={styles.number_text}>\n\t\t\t\t\t\t\t\t\t\t\tNumber\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\tconst jsx = createCheckboxField(\n\t\t\t\t\t\t\t\t\t\t\tcheckboxValue,\n\t\t\t\t\t\t\t\t\t\t\tsetcheckboxValue\n\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\treturn [\n\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\tjsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype: \"checkbox\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t...initialFieldState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b className={styles.checkbox_icon}></b>\n\t\t\t\t\t\t\t\t\t\t<span\n\t\t\t\t\t\t\t\t\t\t\tclassName={styles.checkboxes_text}>\n\t\t\t\t\t\t\t\t\t\t\tCheckboxes\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\t\tconst jsx = createSelectField(\n\t\t\t\t\t\t\t\t\t\t\t\tselectValue,\n\t\t\t\t\t\t\t\t\t\t\t\tsetSelectValue\n\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\treturn [\n\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype: \"select\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...initialFieldState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b className={styles.dropdown_icon}></b>\n\t\t\t\t\t\t\t\t\t\t<span className={styles.dropdown_text}>\n\t\t\t\t\t\t\t\t\t\t\tDropdown\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li\n\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\tstyles.standard_button_container\n\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t{/* <button\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button} ${styles.page_break_button}`}\n\t\t\t\t\t\t\t\t\t\thref=\"#\">\n\t\t\t\t\t\t\t\t\t\t<b></b>\n\t\t\t\t\t\t\t\t\t\t<span className={styles.pagebreak_text}>\n\t\t\t\t\t\t\t\t\t\t\tPage Break\n\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t</button> */}\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t</ul>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t) : null}\n\t\t\t\t{activeTab === \"field\" ? (\n\t\t\t\t\t<ul className={styles.field_settings_constructor}>\n\t\t\t\t\t\t{activeField !== null ? (\n\t\t\t\t\t\t\t<form>\n\t\t\t\t\t\t\t\t<label className={styles.field_settings_label}>\n\t\t\t\t\t\t\t\t\tField Label\n\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t<div className={styles.field_label_container}>\n\t\t\t\t\t\t\t\t\t<textarea\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.field_settings_field_label} ${styles.input_boxes}`}\n\t\t\t\t\t\t\t\t\t\tvalue={fieldLabel}\n\t\t\t\t\t\t\t\t\t\tplaceholder={fieldLabel}\n\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\tsetFieldLabel(e.target.value);\n\t\t\t\t\t\t\t\t\t\t\tupdateFieldSettings(e, \"label\");\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t<li className={styles.field_type_li}>\n\t\t\t\t\t\t\t\t\t<label\n\t\t\t\t\t\t\t\t\t\tclassName={styles.field_settings_label}>\n\t\t\t\t\t\t\t\t\t\tField Type\n\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t<div\n\t\t\t\t\t\t\t\t\t\tclassName={styles.field_type_container}>\n\t\t\t\t\t\t\t\t\t\t<select\n\t\t\t\t\t\t\t\t\t\t\tclassName={`${styles.field_settings_field_type} ${styles.dropdown_boxes}`}\n\t\t\t\t\t\t\t\t\t\t\tvalue={fieldSettings.fieldType}\n\t\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\t\tsetFieldSettings(\n\t\t\t\t\t\t\t\t\t\t\t\t\t(prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfieldType:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.target.value,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\tupdateFieldSettings(e, \"type\");\n\t\t\t\t\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t\t\t\t\t<option value=\"text\">\n\t\t\t\t\t\t\t\t\t\t\t\tSingle Line Text\n\t\t\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t\t\t<option value=\"textarea\">\n\t\t\t\t\t\t\t\t\t\t\t\tParagraph Text\n\t\t\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t\t\t<option value=\"multipleChoice\">\n\t\t\t\t\t\t\t\t\t\t\t\tMultiple Choice\n\t\t\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t\t\t<option value=\"number\">\n\t\t\t\t\t\t\t\t\t\t\t\tNumber\n\t\t\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t\t\t<option value=\"checkbox\">\n\t\t\t\t\t\t\t\t\t\t\t\tCheckboxes\n\t\t\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t\t\t<option value=\"select\">\n\t\t\t\t\t\t\t\t\t\t\t\tDropdown\n\t\t\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t\t</select>\n\t\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t{[\n\t\t\t\t\t\t\t\t\t\"select\",\n\t\t\t\t\t\t\t\t\t\"checkbox\",\n\t\t\t\t\t\t\t\t\t\"multipleChoice\",\n\t\t\t\t\t\t\t\t].includes(activeField[1].type) && (\n\t\t\t\t\t\t\t\t\t<fieldset\n\t\t\t\t\t\t\t\t\t\tclassName={styles.choices_container}>\n\t\t\t\t\t\t\t\t\t\t<legend\n\t\t\t\t\t\t\t\t\t\t\tclassName={`${styles.choices_legend}`}>\n\t\t\t\t\t\t\t\t\t\t\tChoices\n\t\t\t\t\t\t\t\t\t\t</legend>\n\t\t\t\t\t\t\t\t\t\t<ul>\n\t\t\t\t\t\t\t\t\t\t\t{fieldChoices.map((choice, i) => (\n\t\t\t\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\t\t\t\tclassName={`${styles.field_settings_choices} ${styles.input_boxes}`}\n\t\t\t\t\t\t\t\t\t\t\t\t\ttype=\"text\"\n\t\t\t\t\t\t\t\t\t\t\t\t\tvalue={choice}\n\t\t\t\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlet newFieldChoices;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetFieldChoices(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst newState =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst changedIndex =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState.findIndex(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfield\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t) =>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfield ===\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchoice\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchangedIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t] =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.target.value;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewFieldChoices =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn newState;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst replacementIndex =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsxContent.findIndex(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(jsx) =>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx[0] ===\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tactiveField[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetJsxContent(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(prevState) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst newState =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst oldSettings =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsxContent[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t][1];\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst newSettings =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t...oldSettings,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tchoices:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewFieldChoices,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings.type ===\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"select\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst newJsx =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcreateSelectField(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t] = [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewJsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetActiveField(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn newState;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t} else if (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings.type ===\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"multipleChoice\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst newJsx =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcreateMultipleChoice(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t] = [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewJsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetActiveField(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn newState;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t} else if (\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings.type ===\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"checkbox\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconst newJsx =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcreateCheckboxField(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttextValue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetTextValue,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t] = [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewJsx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewSettings,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t];\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetActiveField(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnewState[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treplacementIndex\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn newState;\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t\t\t\t))}\n\t\t\t\t\t\t\t\t\t\t</ul>\n\t\t\t\t\t\t\t\t\t</fieldset>\n\t\t\t\t\t\t\t\t)}\n\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t<li className={styles.options_li}>\n\t\t\t\t\t\t\t\t\t\t<fieldset\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.options_container\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\t<legend\n\t\t\t\t\t\t\t\t\t\t\t\tclassName={`${styles.option_legend}`}>\n\t\t\t\t\t\t\t\t\t\t\t\tOption\n\t\t\t\t\t\t\t\t\t\t\t</legend>\n\t\t\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\t\tstyles.required_checkbox\n\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\ttype=\"checkbox\"\n\t\t\t\t\t\t\t\t\t\t\t\tchecked={isCheckedRequired}\n\t\t\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\tsetIsCheckedRequired(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t!isCheckedRequired\n\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\te.target.value =\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t!isCheckedRequired;\n\t\t\t\t\t\t\t\t\t\t\t\t\tupdateFieldSettings(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\te,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"required\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t\t\t\t<label\n\t\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\t\tstyles.required_text\n\t\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\t\tRequired\n\t\t\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t\t</fieldset>\n\t\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t\t{[\"text\", \"textarea\"].includes(\n\t\t\t\t\t\t\t\t\t\tactiveField[1].type\n\t\t\t\t\t\t\t\t\t) && (\n\t\t\t\t\t\t\t\t\t\t<li className={styles.range_li}>\n\t\t\t\t\t\t\t\t\t\t\t<fieldset\n\t\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\t\tstyles.range_container\n\t\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\t\t<legend\n\t\t\t\t\t\t\t\t\t\t\t\t\tclassName={` ${styles.range_legend}`}>\n\t\t\t\t\t\t\t\t\t\t\t\t\tRange\n\t\t\t\t\t\t\t\t\t\t\t\t</legend>\n\t\t\t\t\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t\t\t\t\t<span>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<label\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tstyles.max_char_text\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tMax Characters\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttype=\"text\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tclassName={` ${styles.input_boxes} ${styles.max_char_input}`}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tplaceholder={\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmaxChar\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tvalue={maxChar}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsetMaxChar(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te.target\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t.value\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tupdateFieldSettings(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\te,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"maxLength\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t\t\t\t</fieldset>\n\t\t\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t\t)}\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t{[\"text\", \"textarea\", \"number\"].includes(\n\t\t\t\t\t\t\t\t\tactiveField[1].type\n\t\t\t\t\t\t\t\t) && (\n\t\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t\t<label\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.field_settings_label\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\tPlaceholder Text\n\t\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t\t<div\n\t\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\t\tstyles.placeholder_text_container\n\t\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\t\t\tclassName={`${styles.field_settings_placeholder_text}\n\t\t\t\t\t\t\t\t\t\t\t${styles.input_boxes}`}\n\t\t\t\t\t\t\t\t\t\t\t\tvalue={placeholderText}\n\t\t\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\t\t\tsetPlaceholderText(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\te.target.value\n\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t\tupdateFieldSettings(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\te,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"placeholder\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t\t\t\ttype=\"text\"\n\t\t\t\t\t\t\t\t\t\t\t\tmaxlength=\"50\"\n\t\t\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t)}\n\t\t\t\t\t\t\t\t<label className={styles.field_settings_label}>\n\t\t\t\t\t\t\t\t\tInstructions\n\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t<div className={styles.instructions_container}>\n\t\t\t\t\t\t\t\t\t<textarea\n\t\t\t\t\t\t\t\t\t\tclassName={`${styles.field_settings_instructions} ${styles.input_boxes}`}\n\t\t\t\t\t\t\t\t\t\tvalue={instructions}\n\t\t\t\t\t\t\t\t\t\tplaceholder={setInstructions}\n\t\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\t\tsetInstructions(e.target.value);\n\t\t\t\t\t\t\t\t\t\t\tupdateFieldSettings(\n\t\t\t\t\t\t\t\t\t\t\t\te,\n\t\t\t\t\t\t\t\t\t\t\t\t\"instructions\"\n\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\ttype=\"button\"\n\t\t\t\t\t\t\t\t\tclassName={`${styles.standard_button} ${styles.delete_buttons}`}\n\t\t\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\t\t\tconst active = [...activeField];\n\t\t\t\t\t\t\t\t\t\tsetActiveField(null);\n\t\t\t\t\t\t\t\t\t\tsetJsxContent((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\tconst newState = [...prevState];\n\t\t\t\t\t\t\t\t\t\t\tconst deletionIndex =\n\t\t\t\t\t\t\t\t\t\t\t\tnewState.findIndex(\n\t\t\t\t\t\t\t\t\t\t\t\t\t(jsx) =>\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tjsx[0] === active[0]\n\t\t\t\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\t\t\tnewState.splice(deletionIndex, 1);\n\t\t\t\t\t\t\t\t\t\t\treturn newState;\n\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t\t\t<b className={styles.delete_icons}></b>\n\t\t\t\t\t\t\t\t\t<span className={styles.delete_text}>\n\t\t\t\t\t\t\t\t\t\tDelete\n\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t</form>\n\t\t\t\t\t\t) : (\n\t\t\t\t\t\t\t<div className={styles.none_selected_notice}>\n\t\t\t\t\t\t\t\t<h3>\n\t\t\t\t\t\t\t\t\t<b\n\t\t\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\t\t\tstyles.no_field_selected_text\n\t\t\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t\t\tNo Field Selected\n\t\t\t\t\t\t\t\t\t</b>\n\t\t\t\t\t\t\t\t</h3>\n\t\t\t\t\t\t\t\t<p className={styles.please_click_text}>\n\t\t\t\t\t\t\t\t\tPlease click on a field in the form preview\n\t\t\t\t\t\t\t\t\ton the right to change its properties.\n\t\t\t\t\t\t\t\t</p>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t)}\n\t\t\t\t\t</ul>\n\t\t\t\t) : null}\n\t\t\t\t{activeTab === \"form\" ? (\n\t\t\t\t\t<div className={styles.form_settings_constructor}>\n\t\t\t\t\t\t<form>\n\t\t\t\t\t\t\t<label className={styles.form_settings_label}>\n\t\t\t\t\t\t\t\tForm Title\n\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t<div className={styles.form_title_container}>\n\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\tclassName={`${styles.form_settings_title}\n\t\t\t\t\t\t\t\t\t\t${styles.input_boxes}`}\n\t\t\t\t\t\t\t\t\tvalue={formTitle}\n\t\t\t\t\t\t\t\t\tplaceholder=\"Untitled Form\"\n\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\tsetFormTitle(e.target.value);\n\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t\ttype=\"text\"\n\t\t\t\t\t\t\t\t\tmaxlength=\"50\"\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<label className={styles.form_settings_label}>\n\t\t\t\t\t\t\t\tDescription\n\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t<div className={styles.form_description_container}>\n\t\t\t\t\t\t\t\t<textarea\n\t\t\t\t\t\t\t\t\tclassName={`${styles.form_settings_description} ${styles.input_boxes}`}\n\t\t\t\t\t\t\t\t\tvalue={formDescription}\n\t\t\t\t\t\t\t\t\tplaceholder=\"This is my form. Please fill it out. It's awesome!\"\n\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\tsetFormDescription(e.target.value);\n\t\t\t\t\t\t\t\t\t}}\n\t\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<label className={styles.form_settings_label}>\n\t\t\t\t\t\t\t\tForm Title Alignment\n\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t<div\n\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\tstyles.form_title_alignment_container\n\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t<select\n\t\t\t\t\t\t\t\t\tclassName={`${styles.form_settings_title_alignment} ${styles.dropdown_boxes}`}\n\t\t\t\t\t\t\t\t\tvalue={formSettings.titleAlignment}\n\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\tsetFormSettings((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\ttitleAlignment: e.target.value,\n\t\t\t\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t\t\t<option value=\"flex-start\">\n\t\t\t\t\t\t\t\t\t\tLeft Aligned\n\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t<option value=\"center\">\n\t\t\t\t\t\t\t\t\t\tCenter Aligned\n\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t<option value=\"flex-end\">\n\t\t\t\t\t\t\t\t\t\tRight Aligned\n\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t</select>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<label className={styles.form_settings_label}>\n\t\t\t\t\t\t\t\tDescription Alignment\n\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t<div\n\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\tstyles.form_description_alignment_container\n\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t<select\n\t\t\t\t\t\t\t\t\tclassName={`${styles.form_settings_description_alignment} ${styles.dropdown_boxes}`}\n\t\t\t\t\t\t\t\t\tvalue={formSettings.descriptionAlignment}\n\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\tsetFormSettings((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\tdescriptionAlignment:\n\t\t\t\t\t\t\t\t\t\t\t\t\te.target.value,\n\t\t\t\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t\t\t<option value=\"flex-start\">\n\t\t\t\t\t\t\t\t\t\tLeft Aligned\n\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t<option value=\"center\">\n\t\t\t\t\t\t\t\t\t\tCenter Aligned\n\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t\t<option value=\"flex-end\">\n\t\t\t\t\t\t\t\t\t\tRight Aligned\n\t\t\t\t\t\t\t\t\t</option>\n\t\t\t\t\t\t\t\t</select>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<label className={styles.form_settings_label}>\n\t\t\t\t\t\t\t\tLabel Placement\n\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t<div\n\t\t\t\t\t\t\t\tclassName={\n\t\t\t\t\t\t\t\t\tstyles.form_label_placement_container\n\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\t<select\n\t\t\t\t\t\t\t\t\tclassName={`${styles.form_settings_label_alignment} ${styles.dropdown_boxes}`}\n\t\t\t\t\t\t\t\t\tvalue={formSettings.labelPlacement}\n\t\t\t\t\t\t\t\t\tonChange={(e) => {\n\t\t\t\t\t\t\t\t\t\tsetFormSettings((prevState) => {\n\t\t\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\t\t\tlabelPlacement: e.target.value,\n\t\t\t\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\t\t\t});\n\n\t\t\t\t\t\t\t\t\t\tupdateAllFields(e, \"labelPlacement\");\n\t\t\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t\t\t<option value=\"top\">Top Aligned</option>\n\t\t\t\t\t\t\t\t\t<option value=\"left\">Left Aligned</option>\n\t\t\t\t\t\t\t\t</select>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</form>\n\t\t\t\t\t</div>\n\t\t\t\t) : null}\n\t\t\t</div>\n\t\t\t<div className={styles.form_preview}>\n\t\t\t\t<div className={styles.form_preview_title}>\n\t\t\t\t\t<div\n\t\t\t\t\t\tstyle={{\n\t\t\t\t\t\t\tdisplay: \"flex\",\n\t\t\t\t\t\t\tjustifyContent: formSettings.titleAlignment,\n\t\t\t\t\t\t}}>\n\t\t\t\t\t\t<div className={styles.actual_title}>{formTitle}</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div\n\t\t\t\t\t\tstyle={{\n\t\t\t\t\t\t\tdisplay: \"flex\",\n\t\t\t\t\t\t\tjustifyContent: formSettings.descriptionAlignment,\n\t\t\t\t\t\t}}>\n\t\t\t\t\t\t<div className={styles.form_description}>\n\t\t\t\t\t\t\t{formDescription}\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t\t<div className={styles.from_preview_body}>\n\t\t\t\t\t{jsxContent?.map((jsxcontent) => (\n\t\t\t\t\t\t<div\n\t\t\t\t\t\t\tclassName={styles.form_preview_field}\n\t\t\t\t\t\t\tkey={Math.random()}\n\t\t\t\t\t\t\tonClick={() => {\n\t\t\t\t\t\t\t\tsetActiveField(jsxcontent);\n\t\t\t\t\t\t\t\tsetFieldSettings((prevState) => {\n\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t...prevState,\n\t\t\t\t\t\t\t\t\t\tfieldType: jsxcontent[1].type,\n\t\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\tsetFieldLabel(jsxcontent[1].label);\n\t\t\t\t\t\t\t\tsetInstructions(jsxcontent[1].instructions);\n\t\t\t\t\t\t\t\tsetFieldChoices(jsxcontent[1].choices);\n\t\t\t\t\t\t\t\tsetMaxChar(jsxcontent[1].maxLength);\n\n\t\t\t\t\t\t\t\tsetIsCheckedRequired(\n\t\t\t\t\t\t\t\t\ttoBool(jsxcontent[1].required)\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tsetPlaceholderText(jsxcontent[1].placeholder);\n\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t{jsxcontent && jsxcontent[0]}\n\t\t\t\t\t\t</div>\n\t\t\t\t\t))}\n\t\t\t\t</div>\n\t\t\t\t<div className={styles.view_share_footer}>\n\t\t\t\t\t{/* <span className={styles.view_button_wrapper}>\n\t\t\t\t\t\t<button className={styles.view_button}>\n\t\t\t\t\t\t\t<b className={styles.view_button_icon}></b>\n\t\t\t\t\t\t\t<span className={styles.view_button_text}>\n\t\t\t\t\t\t\t\tView Form\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t</button>\n\t\t\t\t\t</span> */}\n\t\t\t\t\t<span className={styles.save_button_wrapper}>\n\t\t\t\t\t\t<button\n\t\t\t\t\t\t\tclassName={styles.save_button}\n\t\t\t\t\t\t\tonClick={() => onSave()}>\n\t\t\t\t\t\t\t<span className={styles.save_button_icon}>\n\t\t\t\t\t\t\t\t<i className=\"fas fa-check\"></i>\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t<span className={styles.save_button_text}>\n\t\t\t\t\t\t\t\tSave Form\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t</button>\n\t\t\t\t\t</span>\n\t\t\t\t\t{/* <span className={styles.share_button_wrapper}>\n\t\t\t\t\t\t<button className={styles.share_button}>\n\t\t\t\t\t\t\t<b className={styles.share_button_icon}></b>\n\t\t\t\t\t\t\t<span className={styles.share_button_text}>\n\t\t\t\t\t\t\t\tShare Form\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t</button>\n\t\t\t\t\t</span> */}\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>\n\t);\n}\n" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.675000011920929, "avg_line_length": 39, "blob_id": "6a58bb53c2d25459f03ee4d56d2a9956566090b8", "content_id": "6f21396cc92536159eb4bd7baf5f2582ab2136f3", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 40, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/react-app/src/components/CreateForm/FormContainer/FormEngine/index.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "export { default } from \"./FormEngine\";\n" }, { "alpha_fraction": 0.5920688509941101, "alphanum_fraction": 0.5945281386375427, "avg_line_length": 28.306306838989258, "blob_id": "5e401360f3c988df3d05e205805c897f072fc27a", "content_id": "f17b8c540d3d3615a8de287a97560ea8510590fe", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 3253, "license_type": "no_license", "max_line_length": 80, "num_lines": 111, "path": "/react-app/src/components/HomePage/HomePage.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import { NavLink, Redirect } from \"react-router-dom\";\nimport { useSelector } from \"react-redux\";\n\nimport \"./HomePage.css\";\nimport fillForm from \"../../assets/images/fill-in-form.png\";\nimport dinoForm from \"../../assets/images/wufoo-online-form-builder.png\";\nimport NavBar from \"../NavBar/NavBar\";\n\nexport default function HomePage() {\n\tconst user = useSelector(state => state.session.user);\n\n\tif(user) {\n\t\treturn (\n\t\t\t<Redirect to=\"/forms\"/>\n\t\t);\n\t}\n\t\n\treturn (\n\t\t<>\n\t\t\t<NavBar />\n\t\t\t<div className=\"homepage-container\">\n\t\t\t\t<div className=\"homepage-content\">\n\t\t\t\t\t<div className=\"homepage-banner\">\n\t\t\t\t\t\t<div className=\"homepage-banner-text\">\n\t\t\t\t\t\t\t<div className=\"homepage-banner-header\">\n\t\t\t\t\t\t\t\tEasy-to-Use Online Form Builder For Every\n\t\t\t\t\t\t\t\tOrganization\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div className=\"homepage-banner-subtext\">\n\t\t\t\t\t\t\t\tCloud-based form builder that makes it easy to\n\t\t\t\t\t\t\t\tcreate registration forms, application forms,\n\t\t\t\t\t\t\t\tsurveys, contact forms, payment forms and more.\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div className=\"homepage-banner-footer\">\n\t\t\t\t\t\t\t\tForme is and always will be FREE! No considering\n\t\t\t\t\t\t\t\tdifferent plans depending on your needs, the\n\t\t\t\t\t\t\t\tfree plan supports all functionality.\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<div className=\"homepage-banner-image\">\n\t\t\t\t\t\t\t<img\n\t\t\t\t\t\t\t\tclassName=\"fill-form-image\"\n\t\t\t\t\t\t\t\tsrc={fillForm}\n\t\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t\t/>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div className=\"homepage-dino\">\n\t\t\t\t\t\t<img\n\t\t\t\t\t\t\tclassName=\"homepage-dino-image\"\n\t\t\t\t\t\t\tsrc={dinoForm}\n\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t/>\n\t\t\t\t\t\t<div className=\"homepage-dino-text\">\n\t\t\t\t\t\t\t<div className=\"homepage-dino-header\">\n\t\t\t\t\t\t\t\tBuild powerful online forms and customize them\n\t\t\t\t\t\t\t\tto your heart's delight.\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div className=\"homepage-dino-footer\">\n\t\t\t\t\t\t\t\tOur form builder gives you an award-winning\n\t\t\t\t\t\t\t\tinterface, easy customization, galleries,\n\t\t\t\t\t\t\t\ttemplates and reporting. Check out some of our\n\t\t\t\t\t\t\t\tpopular features.\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div className=\"homepage-scene\">\n\t\t\t\t\t\t<div className=\"homepage-scene-text\">\n\t\t\t\t\t\t\t<div className=\"homepage-scene-header\">\n\t\t\t\t\t\t\t\tReady to Get Started?\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div className=\"homepage-scene-subtext\">\n\t\t\t\t\t\t\t\tSign up for a FREE account and start building\n\t\t\t\t\t\t\t\tforms now.\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t<div className=\"homepage-scene-signup\">\n\t\t\t\t\t\t\t\t<NavLink\n\t\t\t\t\t\t\t\t\tto=\"/sign-up\"\n\t\t\t\t\t\t\t\t\tclassName=\"homepage-scene-signup-button\">\n\t\t\t\t\t\t\t\t\t<span className=\"homepage-scene-signup-text\">\n\t\t\t\t\t\t\t\t\t\tSign Up Now\n\t\t\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t\t\t</NavLink>\n\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<img\n\t\t\t\t\t\t\tclassName=\"homepage-scene-raptor\"\n\t\t\t\t\t\t\tsrc=\"https://www.wufoo.com/wp-content/themes/wufoo-site/img/dino6.svg\"\n\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t/>\n\t\t\t\t\t\t<img\n\t\t\t\t\t\t\tclassName=\"homepage-scene-cloud1\"\n\t\t\t\t\t\t\tsrc=\"https://www.wufoo.com/wp-content/themes/wufoo-site/img/cloud-10.svg\"\n\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t/>\n\t\t\t\t\t\t<img\n\t\t\t\t\t\t\tclassName=\"homepage-scene-cloud2\"\n\t\t\t\t\t\t\tsrc=\"https://www.wufoo.com/wp-content/themes/wufoo-site/img/cloud-08.svg\"\n\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t/>\n\t\t\t\t\t\t<img\n\t\t\t\t\t\t\tclassName=\"homepage-scene-dino\"\n\t\t\t\t\t\t\tsrc=\"https://www.wufoo.com/wp-content/themes/wufoo-site/img/dino8.svg\"\n\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t/>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t</>\n\t);\n}\n" }, { "alpha_fraction": 0.5839793086051941, "alphanum_fraction": 0.6356589198112488, "avg_line_length": 23.1875, "blob_id": "74e0d19ba59d2f2b0b75a08c55dcde999b35088b", "content_id": "4d8eb69266b00a5d6e6b62d1a6af7baa03b65922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 774, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/migrations/versions/20210820_100524_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: b8ec5632d693\nRevises: beeeac90e4ba\nCreate Date: 2021-08-20 10:05:24.638509\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b8ec5632d693'\ndown_revision = 'beeeac90e4ba'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'label',\n existing_type=sa.VARCHAR(length=55),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'label',\n existing_type=sa.VARCHAR(length=55),\n nullable=True)\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6182472705841064, "alphanum_fraction": 0.6710684299468994, "avg_line_length": 26.766666412353516, "blob_id": "3d121e4ee6f918c45b8df2e7e63337fcf6df9ade", "content_id": "7a2a2eb110e48c7705c11cac1d1a6951735ba515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 99, "num_lines": 30, "path": "/migrations/versions/20210821_145311_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 2453c767d036\nRevises: d0c387e43ca4\nCreate Date: 2021-08-21 14:53:11.208418\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2453c767d036'\ndown_revision = 'd0c387e43ca4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey')\n op.drop_column('forms', 'field_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id'])\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6214887499809265, "alphanum_fraction": 0.6271067261695862, "avg_line_length": 36.47368240356445, "blob_id": "f1c76d7886eb2146a96206a096b4028bef079890", "content_id": "4d04922a4738184ac5770c3eb98a243e710c9ebd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1424, "license_type": "no_license", "max_line_length": 192, "num_lines": 38, "path": "/app/models/form.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from .db import db\n\n\nclass Form(db.Model):\n __tablename__ = 'forms'\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(50), nullable=False)\n owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)\n description = db.Column(db.Text)\n label_placement = db.Column(db.String(10))\n description_align = db.Column(db.String(10))\n title_align = db.Column(db.String(10))\n # creates a pseudo-column (you won't see it) in the 'fields' table called 'form' that can be assigned a Form instance when creating a Field instance -- 'form' is not the name of this table\n fields = db.relationship('Field', backref='form')\n\n\n # field_id = db.Column(db.Integer, db.ForeignKey('fields.id'))\n # fields = db.relationship(\"Field\", foreign_keys=field_id ,back_populates=\"forms\", lazy=\"joined\")\n\n\n def to_dict(self):\n # convert associated fields to serializable dictionaries\n form_fields = [field.to_dict() for field in self.fields]\n\n return {\n 'id': self.id,\n 'fields': form_fields,\n 'title': self.title,\n 'owner_id': self.owner_id,\n 'description': self.description,\n 'label_placement': self.label_placement,\n 'description_align': self.description_align,\n 'title_align': self.title_align\n }\n\n def __repr__(self):\n return str(self.to_dict())\n" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.739130437374115, "avg_line_length": 46, "blob_id": "60bf69872ed1558d308afb8e4c2df9451900498f", "content_id": "5c9c677bd75760473d502e0afab4eee620317c28", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 46, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/react-app/src/components/EditForm/EditFormContainer/index.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "export { default } from \"./EditFormContainer\";" }, { "alpha_fraction": 0.5604528188705444, "alphanum_fraction": 0.5623795986175537, "avg_line_length": 25.96103858947754, "blob_id": "4a0d2cbca39c664b8a8fe63c0372bf6e40667188", "content_id": "d6455f9cf689882e490a9eaa5d3093f0c6d9f006", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 4152, "license_type": "no_license", "max_line_length": 72, "num_lines": 154, "path": "/react-app/src/components/auth/SignUpFormPage/index.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport { useSelector, useDispatch } from \"react-redux\";\nimport { Redirect } from \"react-router-dom\";\nimport { setErrors } from \"../../../store/errors\";\nimport { signUp } from \"../../../store/session\";\nimport styles from \"./SignUpFormPage.module.css\";\nimport NavBar from \"../../NavBar/NavBar\";\nimport Errors from \"../../Errors\";\n\nconst SignUpFormPage = () => {\n\tconst [username, setUsername] = useState(\"\");\n\tconst [email, setEmail] = useState(\"\");\n\tconst [password, setPassword] = useState(\"\");\n\tconst [repeatPassword, setRepeatPassword] = useState(\"\");\n\tconst user = useSelector((state) => state.session.user);\n\tconst dispatch = useDispatch();\n\n\t// Resetting errors from images\n\tuseEffect(() => {\n\t\twindow.scrollTo(0, 0);\n\t\tdispatch(setErrors(null));\n\t}, [dispatch]);\n\n\tconst onSignUp = async (e) => {\n\t\te.preventDefault();\n\t\tawait dispatch(signUp(username, email, password, repeatPassword));\n\t};\n\n\tconst updateUsername = (e) => {\n\t\tsetUsername(e.target.value);\n\t};\n\n\tconst updateEmail = (e) => {\n\t\tsetEmail(e.target.value);\n\t};\n\n\tconst updatePassword = (e) => {\n\t\tsetPassword(e.target.value);\n\t};\n\n\tconst updateRepeatPassword = (e) => {\n\t\tsetRepeatPassword(e.target.value);\n\t};\n\n\tif (user) {\n\t\treturn <Redirect to=\"/\" />;\n\t}\n\n\treturn (\n\t\t<>\n\t\t\t<NavBar />\n\t\t\t<div className={styles.page_container}>\n\t\t\t\t<div className={styles.middle_container}>\n\t\t\t\t\t<div className={styles.welcome_block}>\n\t\t\t\t\t\t<h1 className={styles.welcome}>\n\t\t\t\t\t\t\tCreate powerful forms today.\n\t\t\t\t\t\t</h1>\n\t\t\t\t\t</div>\n\t\t\t\t\t<div className={styles.form_container}>\n\t\t\t\t\t\t<div className={styles.form_block}>\n\t\t\t\t\t\t\t<form className={styles.form} onSubmit={onSignUp}>\n\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t<Errors />\n\t\t\t\t\t\t\t\t\t<label className={styles.input_label}>\n\t\t\t\t\t\t\t\t\t\t* EMAIL ADDRESS\n\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\t// required={true}\n\t\t\t\t\t\t\t\t\t\tclassName={styles.input_fields}\n\t\t\t\t\t\t\t\t\t\ttype=\"text\"\n\t\t\t\t\t\t\t\t\t\tplaceholder=\"Your valid email\"\n\t\t\t\t\t\t\t\t\t\tname=\"email\"\n\t\t\t\t\t\t\t\t\t\tonChange={updateEmail}\n\t\t\t\t\t\t\t\t\t\tvalue={email}></input>\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t<label className={styles.input_label}>\n\t\t\t\t\t\t\t\t\t\t* USERNAME\n\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\trequired={true}\n\t\t\t\t\t\t\t\t\t\tclassName={styles.input_fields}\n\t\t\t\t\t\t\t\t\t\ttype=\"text\"\n\t\t\t\t\t\t\t\t\t\tplaceholder=\"Your custom FORMe name\"\n\t\t\t\t\t\t\t\t\t\tname=\"username\"\n\t\t\t\t\t\t\t\t\t\tonChange={updateUsername}\n\t\t\t\t\t\t\t\t\t\tvalue={username}></input>\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t<label className={styles.input_label}>\n\t\t\t\t\t\t\t\t\t\t* PASSWORD\n\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\trequired={true}\n\t\t\t\t\t\t\t\t\t\tclassName={styles.input_fields}\n\t\t\t\t\t\t\t\t\t\ttype=\"password\"\n\t\t\t\t\t\t\t\t\t\tplaceholder=\"At least 7 characters with one letter and number\"\n\t\t\t\t\t\t\t\t\t\tname=\"password\"\n\t\t\t\t\t\t\t\t\t\tonChange={updatePassword}\n\t\t\t\t\t\t\t\t\t\tvalue={password}></input>\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t<div>\n\t\t\t\t\t\t\t\t\t<label className={styles.input_label}>\n\t\t\t\t\t\t\t\t\t\t* CONFIRM PASSWORD\n\t\t\t\t\t\t\t\t\t</label>\n\t\t\t\t\t\t\t\t\t<input\n\t\t\t\t\t\t\t\t\t\tclassName={styles.input_fields}\n\t\t\t\t\t\t\t\t\t\ttype=\"password\"\n\t\t\t\t\t\t\t\t\t\tplaceholder=\"Must match with password\"\n\t\t\t\t\t\t\t\t\t\tname=\"repeat_password\"\n\t\t\t\t\t\t\t\t\t\tonChange={updateRepeatPassword}\n\t\t\t\t\t\t\t\t\t\tvalue={repeatPassword}\n\t\t\t\t\t\t\t\t\t\trequired={true}></input>\n\t\t\t\t\t\t\t\t</div>\n\t\t\t\t\t\t\t\t<button\n\t\t\t\t\t\t\t\t\tclassName={styles.submit_button}\n\t\t\t\t\t\t\t\t\ttype=\"submit\">\n\t\t\t\t\t\t\t\t\tSign Up\n\t\t\t\t\t\t\t\t</button>\n\t\t\t\t\t\t\t</form>\n\t\t\t\t\t\t</div>\n\t\t\t\t\t\t<aside className={styles.feature_container}>\n\t\t\t\t\t\t\t<h3 className={styles.free}>FREE</h3>\n\t\t\t\t\t\t\t<hr />\n\t\t\t\t\t\t\t<ul className={styles.free_offers}>\n\t\t\t\t\t\t\t\t<li className={styles.free_offers_item}>\n\t\t\t\t\t\t\t\t\tForms\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li className={styles.free_offers_item}>\n\t\t\t\t\t\t\t\t\tShared Forms\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li className={styles.free_offers_item}>\n\t\t\t\t\t\t\t\t\tMultiple Fields\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li className={styles.free_offers_item}>\n\t\t\t\t\t\t\t\t\t1 User\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li className={styles.free_offers_item}>\n\t\t\t\t\t\t\t\t\tUnlimited Previews\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t\t<li className={styles.free_offers_item}>\n\t\t\t\t\t\t\t\t\tand more!!!\n\t\t\t\t\t\t\t\t</li>\n\t\t\t\t\t\t\t</ul>\n\t\t\t\t\t\t\t<hr />\n\t\t\t\t\t\t</aside>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\n\t\t\t</div>\n\t\t</>\n\t);\n};\n\nexport default SignUpFormPage;\n" }, { "alpha_fraction": 0.5482036471366882, "alphanum_fraction": 0.5494959950447083, "avg_line_length": 30.8436222076416, "blob_id": "df9524de39a67a66db8ee39c9718aa839e5faef5", "content_id": "8da967872c0be0bfff69f3031d0e54bfacf67729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7738, "license_type": "no_license", "max_line_length": 80, "num_lines": 243, "path": "/app/api/form_routes.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from flask import Blueprint, jsonify, request, session\nfrom flask_login import login_required, current_user\nfrom app.models import Form, db, Field\n\nform_routes = Blueprint(\"forms\", __name__)\n\n\n# get all forms --- remove this route?\n@form_routes.route('/')\n# @login_required\ndef get_forms():\n forms = Form.query.all() # original query for ALL forms\n return {'forms': [form.to_dict() for form in forms]}\n\n\n@form_routes.route('/<int:id>', methods=['GET', 'DELETE'])\n@login_required\ndef forms(id):\n # get a specific form by primary key\n if request.method == 'GET':\n form = Form.query.get(id)\n return form.to_dict()\n # delete a specific form by primary key\n elif request.method == 'DELETE':\n form = Form.query.get(id) # takes a form's id\n\n db.session.delete(form)\n db.session.commit()\n return form.to_dict()\n\n\n# (GET) allow user to access a form without being logged in, i.e. SHARED form\n# @form_routes.route('/<int:id>/shared')\n# def shared_form(id):\n# form = Form.query.get(id)\n# return form.to_dict()\n\n\n# get forms by owner_id (i.e. all forms owned by a specific user)\n@form_routes.route('/users/<int:id>')\ndef user_forms(id): # takes a user's id\n forms = Form.query.filter_by(owner_id=id).all()\n # destructure in forms store\n return {'forms': [form.to_dict() for form in forms]}\n\n\n@form_routes.route('/build', methods=['POST'])\n@login_required\ndef create_form():\n # print('***** REQUEST DATA INFO *****', request.get_json())\n\n user_id = session['_user_id']\n\n # pull JSON data from request body\n data = request.get_json()\n form_fields = []\n\n form = Form(\n title=data[\"title\"],\n owner_id=user_id,\n description=data[\"description\"],\n label_placement=data[\"labelPlacement\"],\n description_align=data[\"descriptionAlignment\"],\n title_align=data[\"titleAlignment\"],\n )\n\n db.session.add(form)\n db.session.commit()\n\n # print('FORM FORM FORM:', form)\n\n for field_info in data[\"fields\"]:\n # all of the columns in the fields table (except id)\n expected_keys = [\n \"type\",\n \"label\",\n \"maxLength\",\n \"required\",\n \"placeholder\",\n \"instructions\",\n \"choices\"\n ]\n\n # check whether field_info[\"maxLength\"] exists\n if \"maxLength\" in field_info:\n # convert the value from string to integer\n field_info[\"maxLength\"] = int(field_info[\"maxLength\"])\n\n for key in expected_keys:\n if key not in field_info:\n # create the key and set the default value to None\n field_info.setdefault(key)\n\n # print('******* FIELD INFO ********', field_info)\n\n field_choices = field_info['choices']\n choices_string = \"\"\n\n for choice in field_choices:\n choices_string += (str(choice) + '&&')\n\n field = Field(\n type=field_info[\"type\"],\n label=field_info[\"label\"],\n max_length=field_info[\"maxLength\"],\n required=field_info[\"required\"],\n placeholder=field_info[\"placeholder\"],\n instructions=field_info[\"instructions\"],\n choices=choices_string,\n form=form # handles the form_id\n )\n\n # db.session.add(field)\n form_fields.append(field)\n\n db.session.add_all(form_fields)\n db.session.commit()\n\n # test_form = Form.query.filter_by(title='To Test Fields').first()\n\n # print(\"*** FORM.FIELDS ***\", type(test_form.fields))\n # print(\"*** FIELD.FORMS ***\", form_fields[0].form)\n\n # # ...so we can use the dict.update() method\n # return_form = form.to_dict()\n # # add an entry in 'form' contaning its related fields\n # return_form.update({\"fields\": [field.to_dict() for field in form_fields]})\n # print('**** FORM WITH FIELDS ****', form.to_dict())\n\n return form.to_dict()\n\n@form_routes.route('/<int:id>', methods=['PUT'])\n@login_required\ndef edit_form(id):\n form = Form.query.get(id)\n if form:\n if form.owner_id == current_user.id:\n data = request.get_json()\n\n form.title= data[\"title\"]\n form.description= data[\"description\"]\n form.label_placement= data[\"labelPlacement\"]\n form.description_align= data[\"descriptionAlignment\"]\n form.title_align= data[\"titleAlignment\"]\n\n # Remove any fields on the form that previously existed\n for field in form.fields:\n db.session.delete(field)\n db.session.commit()\n\n # Re-add all the fields to the form\n form_fields = []\n\n for field_info in data[\"fields\"]:\n # all of the columns in the fields table (except id)\n expected_keys = [\n \"type\",\n \"label\",\n \"maxLength\",\n \"required\",\n \"placeholder\",\n \"instructions\",\n \"choices\"\n ]\n\n # check whether field_info[\"maxLength\"] exists\n if \"maxLength\" in field_info:\n # convert the value from string to integer\n field_info[\"maxLength\"] = int(field_info[\"maxLength\"])\n\n for key in expected_keys:\n if key not in field_info:\n # create the key and set the default value to None\n field_info.setdefault(key)\n\n # print('******* FIELD INFO ********', field_info)\n\n field_choices = field_info['choices']\n choices_string = \"\"\n\n for choice in field_choices:\n choices_string += (str(choice) + '&&')\n\n field = Field(\n type=field_info[\"type\"],\n label=field_info[\"label\"],\n max_length=field_info[\"maxLength\"],\n required=field_info[\"required\"],\n placeholder=field_info[\"placeholder\"],\n instructions=field_info[\"instructions\"],\n choices=choices_string,\n form=form # handles the form_id\n )\n\n # db.session.add(field)\n form_fields.append(field)\n\n db.session.add_all(form_fields)\n db.session.commit()\n\n return form.to_dict()\n else:\n return \"You do not own the form you are trying to edit.\", 401\n else:\n return \"The form you're trying to edit does not exist.\", 400\n\n\n\n# ! currently causes error \"405 method not allowed\"\n# ! when not bundled with `user_forms(id)` above\n# delete a specific form by primary key\n# @form_routes.route('/<int:id>', methods=['DELETE'])\n# def delete_form(id):\n# if request.method == 'DELETE':\n# form = Form.query.get(id)\n\n# db.session.delete(form)\n# db.session.commit()\n# return form.to_dict()\n\n\n# @form_routes.route('/<int:id>')\n# def get_form(id):\n# form = Form.query.filter(Form.id == id).first()\n# # fields = Field.query.filter(Field.form_id == form.id).all()\n\n# print('FORM IS HERE!!! ', form.to_dict())\n# # print('FIELD IS HERE!!!!! ***',\n# # {'fields': [field.to_dict() for field in fields]})\n\n# # form[\"fields\"] = {'fields': [field.to_dict() for field in fields]}\n\n# return form.to_dict()\n\n@form_routes.route('/<int:id>/shared', methods=['GET'])\n@login_required\ndef get_share_forms(id):\n # get a specific form by primary key\n if request.method == 'GET':\n form = Form.query.get(id)\n print('FORM CHOICES!!!!!!', form)\n \n return form.to_dict()\n" }, { "alpha_fraction": 0.5706806182861328, "alphanum_fraction": 0.6361256837844849, "avg_line_length": 22.875, "blob_id": "55565c69d1b791206bfcf760634f30096f9687fb", "content_id": "aa93fdd20cc22d63f6badb6830f1df4eb3d9b1fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "no_license", "max_line_length": 65, "num_lines": 32, "path": "/migrations/versions/20210820_133516_.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: b3e721c02f48\nRevises: 9aec744a6b98\nCreate Date: 2021-08-20 13:35:16.871785\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b3e721c02f48'\ndown_revision = '9aec744a6b98'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'form_id',\n existing_type=sa.INTEGER(),\n nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('fields', 'form_id',\n existing_type=sa.INTEGER(),\n nullable=True)\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.4728506803512573, "alphanum_fraction": 0.48642534017562866, "avg_line_length": 22.263158798217773, "blob_id": "180b37237d939151628bc11866744c6450f207fd", "content_id": "e66463d4d78ceefe3e9a679c76f648ff2e8ae10c", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 442, "license_type": "no_license", "max_line_length": 58, "num_lines": 19, "path": "/react-app/src/components/PageNotFound/index.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport { Redirect, useHistory } from 'react-router-dom'\n\nfunction PageNotFound() {\n const history = useHistory()\n return (\n <div>\n <h1>Page Not Found</h1>\n <p>Redirecting you back to the homepage...</p>\n {\n setTimeout(() => {\n history.push('/')\n }, 1700)\n }\n </div>\n )\n}\n\nexport default PageNotFound\n" }, { "alpha_fraction": 0.5571531057357788, "alphanum_fraction": 0.5571531057357788, "avg_line_length": 32.119049072265625, "blob_id": "200e5406a3d54aa71eca06c3dcfa4a4e22c7b53e", "content_id": "7eef455576058da019c36dd050a4af8ddf7b7099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 69, "num_lines": 42, "path": "/app/api/field_routes.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "# from flask import Blueprint, jsonify, request\n# from flask_login import login_required\n# from app.models import Field, db\n\n# field_routes = Blueprint('fields', __name__)\n\n\n# @field_routes.route('/', methods=['POST'])\n# def fields():\n# if request.method == 'POST':\n# # get fields data from request body\n# data = request.get_json()\n# form_fields = []\n\n# for field_info in data:\n# field = Field(\n# type=field_info[\"type\"],\n# label=field_info[\"label\"],\n# max_length=field_info[\"max_length\"],\n# required=field_info[\"required\"],\n# placeholder=field_info[\"placeholder\"],\n# instructions=field_info[\"instructions\"],\n# choices=field_info[\"choices\"],\n# form_id=field_info[\"form_id\"]\n# )\n\n# # db.session.add(field)\n# form_fields.append(field)\n\n# # adds each instance individually, so list format is ok\n# db.session.add_all(form_fields)\n# db.session.commit()\n\n# # must return dictionary, tuple, or string\n# return {\"fields\": [field.to_dict for field in form_fields]}\n\n\n# @field_routes.route('/forms/<int:id>')\n# def form_fields(id):\n# fields = Field.query.filter_by(form_id=id).all()\n\n# return {'fields': [field.to_dict for field in fields]}\n" }, { "alpha_fraction": 0.5597706437110901, "alphanum_fraction": 0.5597706437110901, "avg_line_length": 24.761363983154297, "blob_id": "791588064389a62d55675e188cf7d412e0cf0700", "content_id": "ebeb4769c791d3e14fee6d8c75a58a6e2962fe22", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 2267, "license_type": "no_license", "max_line_length": 92, "num_lines": 88, "path": "/react-app/src/components/NavBar/NavBar.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React from \"react\";\nimport { NavLink } from \"react-router-dom\";\nimport LogoutButton from \"../auth/LogoutButton/LogoutButton\";\nimport { useSelector } from \"react-redux\";\n\nimport logoRed from \"../../assets/images/forme-logo.png\";\nimport logoWhite from \"../../assets/images/forme-logo-white.png\";\nimport \"./NavBar.css\";\n\nconst NavBar = () => {\n\tconst user = useSelector((store) => store.session.user);\n\tconst loggedInNavbarClasses = \"navbar-container navbar-container-loggedin\";\n\tconst loggedOutNavbarClasses =\n\t\t\"navbar-container navbar-container-loggedout\";\n\n\treturn (\n\t\t<nav className={user ? loggedInNavbarClasses : loggedOutNavbarClasses}>\n\t\t\t<NavLink\n\t\t\t\tto={user ? \"/forms\" : \"/\"}\n\t\t\t\texact={true}\n\t\t\t\tactiveClassName=\"active\"\n\t\t\t\tclassName=\"navbar-links-home\">\n\t\t\t\t{/* <img className=\"navbar-logo\" src={logoRed} alt=\"\\A\" /> */}\n\t\t\t\t<img\n\t\t\t\t\tclassName=\"navbar-logo\"\n\t\t\t\t\tsrc={user ? logoWhite : logoRed}\n\t\t\t\t\talt=\"\\A\"\n\t\t\t\t/>\n\t\t\t</NavLink>\n\t\t\t<ul className=\"navbar-links\">\n\t\t\t\t<li>\n\t\t\t\t\t{/* <NavLink to='/users' exact={true} activeClassName='active' className=\"navbar-link\">\n Users\n </NavLink> */}\n\t\t\t\t\t{/* {user ? (\n <NavLink to=\"/forms\" activeClassName='active' className=\"navbar-link\">\n Forms\n </NavLink>\n ) : ( null )\n } */}\n\t\t\t\t</li>\n\t\t\t</ul>\n\t\t\t<ul className=\"navbar-auth-links\">\n\t\t\t\t<li>\n\t\t\t\t\t{user === null ? (\n\t\t\t\t\t\t<NavLink\n\t\t\t\t\t\t\tto=\"/sign-up\"\n\t\t\t\t\t\t\texact={true}\n\t\t\t\t\t\t\tclassName=\"navbar-auth-links-signup\">\n\t\t\t\t\t\t\t<span className=\"navbar-signup-text\">Sign Up</span>\n\t\t\t\t\t\t</NavLink>\n\t\t\t\t\t) : null}\n\t\t\t\t</li>\n\t\t\t\t{user === null ? (\n\t\t\t\t\t<li>\n\t\t\t\t\t\t<NavLink\n\t\t\t\t\t\t\tto=\"/login\"\n\t\t\t\t\t\t\texact={true}\n\t\t\t\t\t\t\tclassName=\"navbar-auth-links-login\">\n\t\t\t\t\t\t\t<span\n\t\t\t\t\t\t\t\tclassName=\"navbar-login-text\"\n\t\t\t\t\t\t\t\tonMouseEnter={(e) =>\n\t\t\t\t\t\t\t\t\t(e.target.innerText = \"RAWRR!\")\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tonMouseLeave={(e) =>\n\t\t\t\t\t\t\t\t\t(e.target.innerText = \"Login\")\n\t\t\t\t\t\t\t\t}>\n\t\t\t\t\t\t\t\tLogin\n\t\t\t\t\t\t\t</span>\n\t\t\t\t\t\t</NavLink>\n\t\t\t\t\t</li>\n\t\t\t\t) : (\n\t\t\t\t\t<li navbar-auth-links-logout>\n\t\t\t\t\t\t<NavLink\n\t\t\t\t\t\t\tto=\"/forms\"\n\t\t\t\t\t\t\tactiveClassName=\"active\"\n\t\t\t\t\t\t\tclassName=\"navbar-link\">\n\t\t\t\t\t\t\tForms\n\t\t\t\t\t\t</NavLink>\n\t\t\t\t\t\t<LogoutButton />\n\t\t\t\t\t</li>\n\t\t\t\t)}\n\t\t\t</ul>\n\t\t</nav>\n\t);\n};\n\nexport default NavBar;\n" }, { "alpha_fraction": 0.590232253074646, "alphanum_fraction": 0.5914234519004822, "avg_line_length": 23.691177368164062, "blob_id": "7e41814b2180acdbff94df8d75d702e936aeac88", "content_id": "b69cd3b8cf99b3e5ab41b5105e9f3d394c3e35f2", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 1679, "license_type": "no_license", "max_line_length": 57, "num_lines": 68, "path": "/react-app/src/components/Forms/SharedForm.js", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "import React, { useEffect, useState } from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { useParams, Link } from \"react-router-dom\";\nimport { getSharedForm } from \"../../store/forms.js\";\n\nimport redLogo from \"../../assets/images/forme-logo.png\";\nimport \"./Forms.css\";\nimport FormField from \"./FormField.js\";\n\nfunction SharedForm() {\n\tconst dispatch = useDispatch();\n\tconst form = useSelector((state) => state.forms);\n\tconst { formId } = useParams();\n\tconsole.log(\"FoTTMTT\", form);\n\n\tlet fieldsArray = form[formId]?.fields;\n\n\tuseEffect(() => {\n\t\tdispatch(getSharedForm(formId));\n\t}, [dispatch, formId]);\n\n\treturn (\n\t\t<div className=\"shared-form-container\">\n\t\t\t<div className=\"middle-container\">\n\t\t\t\t<div className=\"shared-form-logo-container\">\n\t\t\t\t\t<Link to=\"/forms\">\n\t\t\t\t\t\t<img\n\t\t\t\t\t\t\tclassName=\"shared-form-logo\"\n\t\t\t\t\t\t\tsrc={redLogo}\n\t\t\t\t\t\t\talt=\"\\A\"\n\t\t\t\t\t\t/>\n\t\t\t\t\t</Link>\n\t\t\t\t</div>\n\t\t\t\t<form className=\"shared-form\">\n\t\t\t\t\t<header className=\"header\">\n\t\t\t\t\t\t<h2\n\t\t\t\t\t\t\tclassName=\"form-title\"\n\t\t\t\t\t\t\tstyle={{\n\t\t\t\t\t\t\t\tdisplay: \"flex\",\n\t\t\t\t\t\t\t\tjustifyContent: form[formId]?.title_align,\n\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t{form[formId]?.title}\n\t\t\t\t\t\t</h2>\n\t\t\t\t\t\t<p\n\t\t\t\t\t\t\tclassName=\"form-description\"\n\t\t\t\t\t\t\tstyle={{\n\t\t\t\t\t\t\t\tdisplay: \"flex\",\n\t\t\t\t\t\t\t\tjustifyContent: form[formId]?.description_align,\n\t\t\t\t\t\t\t}}>\n\t\t\t\t\t\t\t{form[formId]?.description}\n\t\t\t\t\t\t</p>\n\t\t\t\t\t</header>\n\t\t\t\t\t{fieldsArray?.map((field) => (\n\t\t\t\t\t\t<FormField\n\t\t\t\t\t\t\tfield={field}\n\t\t\t\t\t\t\tlabel_placement={form[formId]?.label_placement}\n\t\t\t\t\t\t/>\n\t\t\t\t\t))}\n\t\t\t\t</form>\n\t\t\t\t<Link to=\"/forms\">\n\t\t\t\t\t<button className=\"forms-return\">Submit</button>\n\t\t\t\t</Link>\n\t\t\t</div>\n\t\t</div>\n\t);\n}\n\nexport default SharedForm;\n" }, { "alpha_fraction": 0.581244170665741, "alphanum_fraction": 0.5923862457275391, "avg_line_length": 34.900001525878906, "blob_id": "1426fac298877589797d2269d18313298cd1c6bf", "content_id": "963a6db99093926d47a9f4bf85f80642daed380a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 86, "num_lines": 30, "path": "/app/models/field.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from .db import db\n\n\nclass Field(db.Model):\n __tablename__ = 'fields'\n\n id = db.Column(db.Integer, primary_key=True)\n type = db.Column(db.String(255), nullable=False)\n label = db.Column(db.String(55), nullable=False)\n max_length = db.Column(db.Integer)\n required = db.Column(db.Boolean, nullable=False)\n placeholder = db.Column(db.String(255))\n instructions = db.Column(db.String(255))\n choices = db.Column(db.Text)\n form_id = db.Column(db.Integer, db.ForeignKey(\"forms.id\"))\n # forms = db.relationship(\"Form\", foreign_keys=form_id, lazy=\"joined\") # redundant\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'form_id': self.form_id,\n 'type': self.type,\n 'label': self.label,\n 'max_length': self.max_length,\n 'required': self.required,\n 'placeholder': self.placeholder,\n 'instructions': self.instructions,\n # splits choices into a list, removes empty list entry at the end\n 'choices': self.choices[:-2].split('&&')\n }\n" }, { "alpha_fraction": 0.61091548204422, "alphanum_fraction": 0.6126760840415955, "avg_line_length": 20.846153259277344, "blob_id": "ce913a0fa104798b6080a2a298d3d179975cde6e", "content_id": "06b08905f4a4315c924b58349846e226e4450c2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/app/seeds/fields.py", "repo_name": "KagenLH/forme-app", "src_encoding": "UTF-8", "text": "from app.models import db, Field\nfrom app.models import Form\n\n\ndef seed_fields():\n form = Form(\n title='To Test Fields',\n owner_id=1\n )\n db.session.add(form)\n\n testField = Field(\n type=\"text\",\n label=\"Test Field\",\n required=False,\n form=form, # creates the form_id / association\n choices='Some Stuff&&Another choice&&Hello from hell&&'\n )\n\n db.session.add(testField)\n db.session.commit()\n\n\ndef undo_fields():\n db.session.execute('TRUNCATE fields RESTART IDENTITY CASCADE;')\n db.session.commit()\n" } ]
38
xiezhongzhao/Hand-Pose-Estimation
https://github.com/xiezhongzhao/Hand-Pose-Estimation
e8c44b4423d79b951af694c51cadfdbc4f764b82
3ed62a287afc31c032ee513414368ad2fa6da82b
5139cb12b686a971ab847024ee4541745603a653
refs/heads/master
2019-07-06T05:33:28.678384
2018-02-11T02:55:53
2018-02-11T02:55:53
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5907846689224243, "alphanum_fraction": 0.6024178862571716, "avg_line_length": 30.66666603088379, "blob_id": "8192b201a0f23c87140d216cd6c534bfbb93dc0e", "content_id": "d50dd602d25cf2d19b2b8a38b8ce8713b92a41a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4384, "license_type": "no_license", "max_line_length": 89, "num_lines": 138, "path": "/prepare_data.py", "repo_name": "xiezhongzhao/Hand-Pose-Estimation", "src_encoding": "UTF-8", "text": "import tensorlayer as tl\nimport numpy as np\nimport os\nimport h5py\nfrom PIL import Image\nimport glob\n\n\n# f = h5py.File('synth_data_test.hdf5','r')\n\n\ndef load_h5py(total, train_ratio, dataset_dir):\n #DATASET_DIR = '/media/MMVCNYLOCAL_2/MMVC_NY/Mengwei_Ren/pack_dataset/'\n dataset = h5py.File(os.path.join(dataset_dir, 'synth_data_v2_with_pca.hdf5'))\n\n xtrain = []\n ytrain = []\n xtest = []\n ytest = []\n\n num_train = int(train_ratio * total)\n print '{}/{} samples for training'.format(num_train, total)\n for i in range(num_train):\n img = dataset['/image'][i]\n # img = img.squeeze()\n # print np.shape(np.array(img))\n xtrain.append(img)\n ytrain.append(dataset['/label'][i])\n\n print '{}/{} samples for testing'.format(total - num_train, total)\n for i in range(num_train, total):\n img = dataset['/image'][i]\n xtest.append(img)\n ytest.append(dataset['/label'][i])\n\n xtrain = np.asarray(xtrain, dtype=np.float32)\n ytrain = np.asarray(ytrain, dtype=np.float32)\n xtest = np.asarray(xtest, dtype=np.float32)\n ytest = np.asarray(ytest, dtype=np.float32)\n\n print np.shape(xtrain)\n print np.shape(ytrain)\n print np.shape(xtest)\n print np.shape(ytest)\n dataset.close()\n \n return xtrain, ytrain, xtest, ytest\n\n\ndef load_regular_image(total, train_ratio,filters, dataset_dir, domain):\n #DATASET_DIR = '/media/MMVCNYLOCAL_2/MMVC_NY/Mengwei_Ren/dataset/'\n \n x_train = []\n y_train = []\n x_test = []\n y_test = []\n \n num_train = int(train_ratio * total)\n print '{}/{} samples for training'.format(num_train, total)\n \n trainFilename = glob.glob(dataset_dir +\"/train/\" + filters + \"*.png\") \n testFilename = glob.glob(dataset_dir +\"/test/\" + filters + \"*.png\")\n trainFilename.sort()\n testFilename.sort()\n for i in range(1,num_train+1):\n imgname = trainFilename[i]\n img = Image.open(imgname).convert('L')\n img = img.resize([128,128])\n img = np.asarray(img)\n #print np.max(img)\n img = img/(np.max(img)*1.0)\n x_train.append(img)\n y_train.append(domain)\n #print imgname, domain\n \n for i in range(1,total-num_train+1):\n imgname = testFilename[i]\n img = Image.open(imgname).convert('L')\n img = img.resize([128,128])\n img = np.asarray(img)\n #print np.max(img)\n img = img/(np.max(img)*1.0)\n x_test.append(img)\n y_test.append(domain) \n #print imgname, domain\n\n print '{}/{} samples for testing'.format(total - num_train, total)\n\n x_train = np.asarray(x_train, dtype=np.float32)\n y_train = np.asarray(y_train, dtype=np.float32)\n x_test = np.asarray(x_test, dtype=np.float32)\n y_test = np.asarray(y_test, dtype=np.float32)\n\n print np.shape(x_train)\n print np.shape(y_train)\n print np.shape(x_test)\n print np.shape(y_test)\n \n return x_train, y_train, x_test, y_test \n \n \ndef generate_tf_records(x, y, recordName): \n\n arrxs = np.array(x)\n arrys = np.array(y)\n print arrxs.shape, 'x shape'\n print arrys.shape, 'y shape'\n \n print 'Begin writing tf records ...'\n write_to_tf_records(recordName,x,y)\n print 'Source Training records ' + recordName + 'SourceTrain.tfrecords' + ' written.'\n \n \n\n# accept data, label as list, and write to .tfrecords\ndef write_to_tf_records(name, data, label, domain):\n writer = tf.python_io.TFRecordWriter('./Data/'+name+\".tfrecords\")\n index = np.arange(len(label))\n np.random.shuffle(index)\n for m in range(len(label)):\n #print m\n ind = index[m]\n img = np.asarray(data[ind], dtype=np.float32)\n #att = np.asarray(label[ind], dtype=np.int32)\n dom = np.asarray(domain[ind], dtype=np.int32)\n \n img_raw = img.tobytes()\n #att_raw = att.tobytes()\n dom_raw = att.tobytes()\n \n example = tf.train.Example(features=tf.train.Features(feature={\n \"img_raw\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),\n #'att_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[att_raw])),\n 'dom_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[dom_raw]))\n }))\n writer.write(example.SerializeToString())\n writer.close()\n print 'tf records written.'\n \n \n " }, { "alpha_fraction": 0.7982456088066101, "alphanum_fraction": 0.8070175647735596, "avg_line_length": 36.66666793823242, "blob_id": "aeef92444f5609c7ed811809989ba434e669e47e", "content_id": "9173e5b0bc179440d6b6b44e311a3df3bf1c3509", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 114, "license_type": "no_license", "max_line_length": 88, "num_lines": 3, "path": "/README.md", "repo_name": "xiezhongzhao/Hand-Pose-Estimation", "src_encoding": "UTF-8", "text": "# Hand-Pose-Estimation\n\nA research project of 3D hand pose estimation through adversarial learning. In progress. \n" }, { "alpha_fraction": 0.5693333148956299, "alphanum_fraction": 0.6026666760444641, "avg_line_length": 21.058822631835938, "blob_id": "994724f4fb667d3188bf38c60e52291a37e73c7f", "content_id": "429fe817a9cb936bf95a985ca21ec61dff33e4c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 750, "license_type": "no_license", "max_line_length": 41, "num_lines": 34, "path": "/visualize.py", "repo_name": "xiezhongzhao/Hand-Pose-Estimation", "src_encoding": "UTF-8", "text": "\nimport tensorflow as tf\nimport tensorlayer as tl\nimport numpy as np\nimport os, time\nimport matplotlib.pyplot as plt\n\n\ndef save_imgs(x1,x2,x3,x4, path):\n fig = plt.figure(1, figsize=(15, 15))\n \n ax = fig.add_subplot(1, 4, 1)\n ax.imshow(x1.squeeze(),cmap='Greys')\n plt.title('depth')\n ax.axis('off')\n\n ax = fig.add_subplot(1, 4, 2)\n ax.imshow(x2.squeeze(),cmap='Greys')\n plt.title('depth reconstruct')\n ax.axis('off')\n \n ax = fig.add_subplot(1, 4, 3)\n ax.imshow(x3.squeeze(),cmap='Greys')\n plt.title('synthdepth')\n ax.axis('off')\n \n ax = fig.add_subplot(1, 4, 4)\n ax.imshow(x4.squeeze(),cmap='Greys')\n plt.title('synthdepth reconstruct')\n ax.axis('off') \n \n \n \n\n plt.savefig(path)" }, { "alpha_fraction": 0.5430993437767029, "alphanum_fraction": 0.5816618800163269, "avg_line_length": 39.26442337036133, "blob_id": "05f925ec6ba357e8be8317ecfc3ea2a33da34e90", "content_id": "546b37b9eb52ff6a9d82d419e4fdf59790683888", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8376, "license_type": "no_license", "max_line_length": 99, "num_lines": 208, "path": "/model.py", "repo_name": "xiezhongzhao/Hand-Pose-Estimation", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import *\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\n\nslim = tf.contrib.slim\nlayers = tf.contrib.layers\narg_scope = tf.contrib.framework.arg_scope\nFLAGS = tf.app.flags.FLAGS\n\ndef aue_net(x, is_train=True, reuse=False):\n n_filter = 32\n #shape = x.get_shape().\n #print x #, 'aue_net input'\n _, nx, ny, nz = x.get_shape().as_list()\n w_init = tf.random_normal_initializer(stddev=0.02)\n\n with tf.variable_scope(\"aue_net\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n inputs = InputLayer(x, name='inputs')\n\n # Encoder\n net_conv0 = Conv2d(inputs, n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv0') # (nx/2,ny/2,n)\n\n net_conv1 = Conv2d(net_conv0, 2*n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv1') # (nx/4,ny/4,2*n)\n\n net_conv2 = Conv2d(net_conv1, 4*n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv2') # (nx/8,ny/8,4*n)\n\n print(\"net_conv2 shape:\", net_conv2.outputs.get_shape())\n\n #net_flat2 = FlattenLayer(net_conv2, name='flatten2')\n #net_dense = DenseLayer(net_flat2, )\n\n # Decoder\n net_deconv2 = DeConv2d(net_conv2, 2*n_filter, (3, 3), (nx/4, ny/4), (2, 2), name='deconv2')\n net_deconv1 = DeConv2d(net_deconv2, n_filter, (3, 3), (nx/2, ny/2), (2, 2), name='deconv1')\n net_deconv0 = DeConv2d(net_deconv1, nz, (3, 3), (nx, ny), (2, 2), name='deconv0')\n\n return net_conv2, net_deconv0 # return feature and reconstruction result\n\ndef encoder_net(x, is_train=True, reuse=False):\n n_filter = 32\n _, nx, ny, nz = x.get_shape().as_list()\n #print shape, 'encoder_net input'\n\n w_init = tf.random_normal_initializer(stddev=0.02)\n\n with tf.variable_scope(\"encoder_net\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n inputs = InputLayer(x, name='inputs')\n # Encoder\n net_conv0 = Conv2d(inputs, n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv0') # (nx/2,ny/2,n)\n\n net_conv1 = Conv2d(net_conv0, 2*n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv1') # (nx/4,ny/4,2*n)\n\n net_conv2 = Conv2d(net_conv1, 4*n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv2') # (nx/8,ny/8,4*n)\n\n print(\"net_conv2 shape:\", net_conv2.outputs.get_shape())\n net_flat = FlattenLayer(net_conv2, name='flatten')\n net_fc0 = tl.layers.DenseLayer(net_flat, n_units=512, act = tf.nn.relu, \n W_init=w_init, name ='disc1')\n\n net_fc1 = tl.layers.DenseLayer(net_fc0, n_units=200, act = tf.nn.relu, \n W_init=w_init, name ='disc2')\n return net_fc1\n\ndef decoder_net(ex, label, exshape, is_train=True, reuse=False):\n #net_flat2 = FlattenLayer(net_conv2, name='flatten2')\n #net_dense = DenseLayer(net_flat2, )\n n_filter = 32\n nx, ny, nz = 128, 128, 1\n #shape = ex.get_shape()#.as_list()\n #print shape, 'decoder_net input'\n \n w_init = tf.random_normal_initializer(stddev=0.02)\n if label==1:\n d = tf.ones(exshape, tf.float32)\n else:\n d = tf.ones(exshape, tf.float32)*0\n z = tf.concat([ex,d], 1)\n\n\n with tf.variable_scope(\"decoder_net\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n \n inputs = InputLayer(z, name='inputs')\n # Decoder\n net_fc1 = tl.layers.DenseLayer(inputs, n_units=512, act = tf.nn.relu, \n W_init=w_init, name ='fc1')\n net_fc0 = tl.layers.DenseLayer(net_fc1, n_units=128*16*16, act = tf.nn.relu, \n W_init=w_init, name ='fc0')\n net_fc0 = tl.layers.ReshapeLayer(net_fc0, shape = [-1, 16, 16, 128], name='reshape')\n net_deconv2 = DeConv2d(net_fc0, 2*n_filter, (3, 3), (nx/4, ny/4), (2, 2), name='deconv2')\n net_deconv1 = DeConv2d(net_deconv2, n_filter, (3, 3), (nx/2, ny/2), (2, 2), name='deconv1')\n net_deconv0 = DeConv2d(net_deconv1, nz, (3, 3), (nx, ny), (2, 2), name='deconv0')\n\n return net_deconv0\n\ndef pose_discriminator(ex,is_train=True, reuse=False):\n shape = ex.get_shape()\n #print shape, 'pose_discriminator input'\n w_init = tf.random_normal_initializer(stddev=0.1)\n with tf.variable_scope(\"discriminator_net\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n \n inputs = tl.layers.InputLayer(ex, name='input_layer')\n net_flat = FlattenLayer(ex, name = 'flatten1')\n\n net_fc1 = tl.layers.DenseLayer(net_flat, n_units=800, act = tf.nn.relu, \n W_init=w_init, name ='fc0')\n\n net_fc2 = tl.layers.DenseLayer(net_fc1, n_units=256, act = tf.nn.relu, \n W_init=w_init, name ='fc1')\n\n net_fc3 = tl.layers.DenseLayer(net_fc2, n_units=256, act = tf.nn.relu, \n W_init=w_init, name ='fc2')\n net_fc4 = tl.layers.DenseLayer(net_fc3, n_units=39, act = tf.nn.relu, \n W_init=w_init, name ='fc3')\n\n return net_fc4\n\n\ndef pose_estimator(ex1, ex2,is_train=True, reuse=False):\n #print ex.get_shape()#.as_list(), 'pose_estimator input'\n w_init = tf.random_normal_initializer(stddev=0.1)\n with tf.variable_scope(\"pose_estimator\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n #input0 = tl.layers.InputLayer(ex1, name='input_layer0')\n #input1 = tl.layers.InputLayer(ex2, name='input_layer1')\n\n net_flat0 = FlattenLayer(ex1, name = 'flatten1')\n net_flat1 = FlattenLayer(ex2, name = 'flatten2')\n concat_layer = tl.layers.ConcatLayer([net_flat0, net_flat1], 1, name ='concat_layer')\n\n net_fc1 = tl.layers.DenseLayer(net_flat, n_units=800, act = tf.nn.relu, \n W_init=w_init, name ='dense0')\n\n net_fc2 = tl.layers.DenseLayer(net_fc1, n_units=256, act = tf.nn.relu, \n W_init=w_init, name ='dense1')\n\n net_fc3 = tl.layers.DenseLayer(net_fc2, n_units=128, act = tf.nn.relu, \n W_init=w_init, name ='dense2')\n\n net_fc4 = tl.layers.DenseLayer(net_fc3, n_units=39, act = tf.nn.relu, \n W_init=w_init, name ='dense3')\n\n return net_fc4\n\n\n\ndef pose_predictor(ex,is_train=True, reuse=False):\n #print ex.get_shape().as_list(), 'pose_predictor input'\n w_init = tf.random_normal_initializer(stddev=0.1)\n with tf.variable_scope(\"pose_predictor\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n #inputs = InputLayer(ex, name='inputs')\n\n net_flat = FlattenLayer(ex, name = 'flatten')\n\n net_fc1 = tl.layers.DenseLayer(net_flat, n_units=800, act = tf.nn.relu, \n W_init=w_init, name ='dense0')\n\n net_fc2 = tl.layers.DenseLayer(net_fc1, n_units=256, act = tf.nn.relu, \n W_init=w_init, name ='dense1')\n\n net_fc3 = tl.layers.DenseLayer(net_fc2, n_units=128, act = tf.nn.relu, \n W_init=w_init, name ='dense2')\n\n net_fc4 = tl.layers.DenseLayer(net_fc3, n_units=39, act = tf.nn.relu, \n W_init=w_init, name ='dense3')\n\n return net_fc4\n\n\ndef domain_discriminator(ex,is_train=True, reuse=False):\n #print ex.get_shape().as_list(), 'pose_predictor input'\n w_init = tf.random_normal_initializer(stddev=0.1)\n with tf.variable_scope(\"domain_discriminator\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n inputs = InputLayer(ex, name='inputs')\n\n net_flat = FlattenLayer(inputs, name = 'flatten')\n\n net_fc1 = tl.layers.DenseLayer(net_flat, n_units=800, act = tf.nn.relu, \n W_init=w_init, name ='disc0')\n\n net_fc2 = tl.layers.DenseLayer(net_fc1, n_units=256, act = tf.nn.relu, \n W_init=w_init, name ='disc1')\n\n net_fc3 = tl.layers.DenseLayer(net_fc2, n_units=128, act = tf.nn.relu, \n W_init=w_init, name ='disc2')\n \n net_fc3 = tl.layers.DenseLayer(net_fc3, n_units=1, act = tf.nn.relu, \n W_init=w_init, name ='disc3')\n\n return net_fc3\n\n" }, { "alpha_fraction": 0.5251955986022949, "alphanum_fraction": 0.5402190685272217, "avg_line_length": 49.02362060546875, "blob_id": "34268904075d889bbe427860801765f11a38727e", "content_id": "e3809a8b4bef94af3ab23e1f2838b051d7564d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6390, "license_type": "no_license", "max_line_length": 140, "num_lines": 127, "path": "/train.py", "repo_name": "xiezhongzhao/Hand-Pose-Estimation", "src_encoding": "UTF-8", "text": "#! /usr/bin/python\n# -*- coding: utf8 -*-\n\nimport tensorflow as tf\nimport tensorlayer as tl\nimport numpy as np\nimport os, time\nimport model\n\ndef main():\n save_dir = \"checkpoint\"\n tl.files.exists_or_mkdir(save_dir)\n tl.files.exists_or_mkdir(\"samples/{}\".format(task))\n\n import prepare_data as dataset\n x_train = dataset.x_train_input\n y_train = dataset.y_train_target\n x_test = dataset.x_test_input\n y_test = dataset.y_test_target\n\n batch_size = 10\n lr = 0.0001\n n_epoch = 50\n print_freq_step = 100\n\n # show one slice\n X = np.asarray(X_train[80])\n y = np.asarray(y_train[80])\n # print(X.shape, X.min(), X.max()) # (240, 240, 4) -0.380588 2.62761\n # print(y.shape, y.min(), y.max()) # (240, 240, 1) 0 1\n nw, nh, nz = X.shape\n vis_imgs(X, y, 'samples/{}/_train_im.png'.format(task))\n\n\n with tf.device('/cpu:0'):\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n with tf.device('/gpu'): #<- remove it if you train on CPU or other GPU\n ###======================== DEFIINE VARIABLES =======================###\n real_img = tf.placeholder('float32', [batch_size, nw, nh, nz], name='real_img')\n real_domain = tf.placeholder('float32', [batch_size, 1], name='real_domain')\n real_pose = tf.placeholder('float32', [batch_size, 39], name='real_pose')\n \n synth_img = tf.placeholder('float32', [batch_size, nw, nh, nz], name='synth_img')\n synth_domain = tf.placeholder('float32', [batch_size, 1], name='synth_domain')\n synth_pose = tf.placeholder('float32', [batch_size, 39], name='synth_pose')\n \n ###======================== DEFINE MODEL =========================###\n real_feature = model.encoder_net(real_img, is_train=True, reuse=False)\n synth_feature = model.encoder_net(synth_img, is_train=True, reuse=True)\n\n real_reconstruct = model.decoder_net(real_feature, label=0, is_train=True, reuse = False)\n synth_reconstruct = model.decoder_net(synth_feature, label=1, is_train=True, reuse = True)\n real_reconstruct, synth_reconstruct = real_reconstruct.outputs, synth_reconstruct.outputs\n\n pred_real_domain = model.domain_discriminator(real_feature, is_train=True, reuse=False) \n pred_synth_domain = model.domain_discriminator(synth_feature, is_train=True,reuse=True)\n pred_real_domain, pred_synth_domain = pred_real_domain.outputs, pred_synth_domain.outputs\n\n ###======================== DEFINE LOSS =========================###\n real_rec_loss = tf.reduce_mean(tf.abs(real_reconstruct - real_img))\n synth_rec_loss = tf.reduce_mean(tf.abs(synth_reconstruct - synth_img))\n rec_loss = real_rec_loss + synth_rec_loss\n\n std_real_domain = tf.reshape(0.0 * tf.ones([batch_size, 200]))\n std_synth_domain = tf.reshape(1.0 * tf.ones([batch_size, 200]))\n\n D_real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = pred_real_domain, labels = std_real_domain))\n D_synth_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = pred_synth_domain, labels = std_synth_domain))\n D_loss = D_real_loss + D_synth_loss\n\n E_real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = pred_real_domain, labels = 1-std_real_domain))\n E_synth_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = pred_synth_domain, labels = 1-std_synth_domain))\n E_loss = E_real_loss + E_synth_loss\n\n lamda = 1\n G_loss = rec_loss + lamda*E_loss\n\n ###======================== GET TRAINABLE VARIABLES =========================###\n enc_vars = tl.layers.get_variables_with_name('encoder_net', True, True)\n dec_vars = tl.layers.get_variables_with_name('decoder_net', True, True)\n dis_vars = tl.layers.get_variables_with_name('domain_discriminator', True, True)\n \n ###======================== TEST =========================###\n #PASS\n \n\n # ###======================== DEFINE TRAIN OPTS =======================###\n with tf.device('/gpu:0'):\n with tf.variable_scope('learning_rate_g'):\n g_lr = tf.Variable(lr_g, trainable=False)\n train_encoder = tf.train.AdamOptimizer(g_lr, beta1=beta1).minimize(G_loss, var_list=enc_vars)\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('learning_rate_g'):\n g_lr = tf.Variable(lr_g, trainable=False)\n train_decoder = tf.train.AdamOptimizer(g_lr, beta1=beta1).minimize(rec_loss, var_list=dec_vars)\n\n with tf.device('/gpu:0'):\n with tf.variable_scope('learning_rate_d'):\n d_lr = tf.Variable(lr_d, trainable=False)\n train_disc = tf.train.AdamOptimizer(d_lr, beta1=beta1).minimize(D_loss, var_list=dis_vars)\n\n ###======================== LOAD MODEL ==============================###\n \ttl.layers.initialize_global_variables(sess)\n ## load existing model if possible\n \t#tl.files.load_and_assign_npz(sess=sess, name=save_dir+'/aue_{}.npz'.format(task), network=net)\n\n for epoch in range(0, n_epoch+1):\n epoch_time = time.time()\n n_batch = 0\n\n for batch in tl.iterate.minibatches(inputs=x_train, targets=y_train,\n \t\t\t\t\t\t\t\t\t\tbatch_size=batch_size, shuffle=True):\n images, labels = batch\n step_time = time.time()\n\n _,_, _rec_loss, _reconstruct, _pose_loss = sess.run([train_aue, train_pose_predictor, rec_loss, reconstruct, pose_loss],\n {t_image:images, })\n n_batch += 1\n\n if n_batch % print_freq_step == 0:\n print(\"Epoch %d step %d loss %f took %fs (2d with distortion)\"\n \t% (epoch, n_batch, _rec_loss, time.time()-step_time))\n\n for i in range(batch_size):\n vis_imgs2(t_images[i], reconstruct[i], \"samples/{}/train_{}.png\".format(task, epoch))\n break\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5517042279243469, "alphanum_fraction": 0.5944540500640869, "avg_line_length": 35.8510627746582, "blob_id": "af9b9beb1b50202ba717eda1b13122e298168c1a", "content_id": "f32b4cd88ecc7188fbcca974a7d77405eb26ab2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1731, "license_type": "no_license", "max_line_length": 99, "num_lines": 47, "path": "/model_aue.py", "repo_name": "xiezhongzhao/Hand-Pose-Estimation", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import *\nimport tensorlayer as tl\nfrom tensorlayer.layers import *\n\nslim = tf.contrib.slim\nlayers = tf.contrib.layers\narg_scope = tf.contrib.framework.arg_scope\nFLAGS = tf.app.flags.FLAGS\n\ndef aue_net(x, is_train=True, reuse=False):\n n_filter = 32\n _, nx, ny, nz = x.get_shape().as_list()\n print(\"nx,ny,nz: \", nx, ny, nz)\n\n w_init = tf.random_normal_initializer(stddev=0.02)\n\n with tf.variable_scope(\"aue_net\", reuse=reuse):\n tl.layers.set_name_reuse(reuse)\n inputs = InputLayer(x, name='inputs')\n # Encoder\n net_conv0 = Conv2d(inputs, n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv0') # (nx/2,ny/2,n)\n\n net_conv1 = Conv2d(net_conv0, 2*n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv1') # (nx/4,ny/4,2*n)\n\n net_conv2 = Conv2d(net_conv1, 4*n_filter, (3, 3), (2, 2), act=tf.nn.elu,\n padding='SAME', W_init=w_init, name='conv2') # (nx/8,ny/8,4*n)\n\n print(\"net_conv2 shape:\", net_conv2.outputs.get_shape())\n\n #net_flat2 = FlattenLayer(net_conv2, name='flatten2')\n #net_dense = DenseLayer(net_flat2, )\n\n # Decoder\n net_deconv2 = DeConv2d(net_conv2, 2*n_filter, (3, 3), (nx/4, ny/4), (2, 2), name='deconv2')\n net_deconv1 = DeConv2d(net_deconv2, n_filter, (3, 3), (nx/2, ny/2), (2, 2), name='deconv1')\n net_deconv0 = DeConv2d(net_deconv1, nz, (3, 3), (nx, ny), (2, 2), name='deconv0')\n\n\treturn net_deconv0" } ]
6
NLeSC/cwltool-service
https://github.com/NLeSC/cwltool-service
312eff9c758c4ef157023db8b29205e960902f16
6d1f04a1703784f5b84b047d2eb21c5cbcf400dd
aebab2335881449d9270304f26e89803de1f4e71
refs/heads/master
2021-01-13T07:30:39.321555
2016-05-19T01:34:38
2016-05-19T01:34:38
71,349,978
0
0
null
2016-10-19T11:24:12
2016-05-19T01:36:29
2016-05-19T01:36:28
null
[ { "alpha_fraction": 0.6110590696334839, "alphanum_fraction": 0.6148078441619873, "avg_line_length": 30.382352828979492, "blob_id": "3f240dec5a5b095d7757e34a61300d1e0a810fdb", "content_id": "fbce4bb86deb10a5add4127fd2c3eb89f6ba4205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 81, "num_lines": 34, "path": "/setup.py", "repo_name": "NLeSC/cwltool-service", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport setuptools.command.egg_info as egg_info_cmd\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nSETUP_DIR = os.path.dirname(__file__)\nREADME = os.path.join(SETUP_DIR, 'README')\n\nsetup(name='cwltool_service',\n version='2.0',\n description='Common workflow language runner service',\n long_description=open(README).read(),\n author='Common workflow language working group',\n author_email='[email protected]',\n url=\"https://github.com/common-workflow-language/cwltool-service\",\n download_url=\"https://github.com/common-workflow-language/cwltool-service\",\n license='Apache 2.0',\n py_modules=[\"cwltool_stream\", \"cwl_flask\", \"cwltool_client\"],\n install_requires=[\n 'Flask',\n 'requests',\n 'PyYAML'\n ],\n entry_points={\n 'console_scripts': [ \"cwltool-stream=cwltool_stream:main\",\n \"cwl-server=cwl_flask:main\",\n \"cwl-client=cwl_client:main\"]\n },\n zip_safe=True\n)\n" } ]
1
orianao/cssproj
https://github.com/orianao/cssproj
8c6c7e4500e3cf367a2e9bd9445d1eda5e5a388a
ae804a3f009d6532d55112437e784897b2bd07b7
2b49bdf59bedcf0f6c0e5e1b1e619e97be5340c6
refs/heads/master
2020-03-17T04:24:27.905681
2018-05-13T12:43:06
2018-05-13T12:43:06
133,273,595
0
0
null
2018-05-13T20:54:19
2018-05-13T12:44:08
2018-05-13T13:47:01
null
[ { "alpha_fraction": 0.702974259853363, "alphanum_fraction": 0.7098070979118347, "avg_line_length": 34.5428581237793, "blob_id": "44e6a1eda174d24f09b16bb8386f0ab5e444b5b2", "content_id": "37736892e83a9da7965e918fb47398a6d1ce0453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2488, "license_type": "no_license", "max_line_length": 119, "num_lines": 70, "path": "/app/utils.py", "repo_name": "orianao/cssproj", "src_encoding": "UTF-8", "text": "from db import DatabaseController as DbC\n\ndef get_results():\n\tall_results = DbC.get_admission_results(1)\n\treturn all_results\n\ndef calculate_results():\n\tspecializations = DbC.get_all_specializations()\n\tcandidates = DbC.get_all_candidates()\n\trepartition = []\n\tspecs = {}\n\topt_arr = {}\n\t\n\tfor item in specializations:\n\t\tspecs[item.identifier] = {}\n\t\tspecs[item.identifier][\"name\"] = item.name \n\t\tspecs[item.identifier][\"capacity\"] = item.capacity \n\t\tspecs[item.identifier][\"free_spots\"] = item.capacity\n\t\n\t\n\tfor item in candidates:\n\t\tr = DbC.AdmissionResult()\n\t\tr.candidate_cnp = item.cnp\n\t\tr.final_score = max(item.info_grade, item.math_grade)*0.3 + item.high_school_avg_grade*0.2 + 0.5*item.admission_grade\n\t\tr.specialization_id = item.first_option\n\t\tr.allocation = DbC.AdmissionStatus.UNPROCESSED\n\t\trepartition.append(r)\n\t\topt_arr[str(item.cnp)] = {}\n\t\topt_arr[str(item.cnp)][\"first_option\"] = item.first_option\n\t\topt_arr[str(item.cnp)][\"second_option\"] = item.second_option\n\n\trepartition = sorted(repartition, key = lambda x: (x.specialization_id, (-1)*x.final_score, ))\n\t\n\tfor item in repartition:\n\t\tif item.final_score < 5:\n\t\t\titem.allocation = DbC.AdmissionStatus.REJECTED\n\t\t\tcontinue\n\t\tif specs[item.specialization_id][\"free_spots\"] > 2:\n\t\t\titem.allocation = DbC.AdmissionStatus.FREE\n\t\t\tspecs[item.specialization_id][\"free_spots\"] -= 1\n\t\telif specs[item.specialization_id][\"free_spots\"] > 0:\n\t\t\titem.allocation = DbC.AdmissionStatus.FEE\n\t\t\tspecs[item.specialization_id][\"free_spots\"] -= 1\n\t\telse:\n\t\t\titem.specialization_id = opt_arr[str(item.candidate_cnp)][\"second_option\"]\n\t\t\tif specs[item.specialization_id][\"free_spots\"] > 2:\n\t\t\t\titem.allocation = DbC.AdmissionStatus.FREE\n\t\t\t\tspecs[item.specialization_id][\"free_spots\"] -= 1\n\t\t\telif specs[item.specialization_id][\"free_spots\"] > 0:\n\t\t\t\titem.allocation = DbC.AdmissionStatus.FEE\n\t\t\t\tspecs[item.specialization_id][\"free_spots\"] -= 1\n\t\t\telse:\n\t\t\t\titem.allocation = DbC.AdmissionStatus.REJECTED\n\t\t# print(\"Candidate CNP: \", item.candidate_cnp)\n\t\t# print(\"Admission Grade: \", item.final_score)\n\t\t# print(\"AdmissionResult: \", item.allocation)\n\t\t# print(\"Specialization: \", specs[item.specialization_id][\"name\"])\n\t\t# print(\"Specialization ID: \", item.specialization_id)\n\treturn repartition\n\ndef set_results():\n\tresults = calculate_results()\n\t\n\tfor item in results:\n\t\tif DbC.save_admission_result_for_candidate(item) != \"OK\":\n\t\t\traise \"Error in repartition processing!\"\n\t\n\tprint(\"Repartition completed successfully.\")\n\t\n# set_results()\n" } ]
1
CNXTEoE/Stall-Monitor-Notifications
https://github.com/CNXTEoE/Stall-Monitor-Notifications
3f78cdeca90d684a1ca22e0330caf6ab48e86329
86d68d9db023ef587b0d47009deb56ea34a8ef01
4296e28d8a97c45a1d0ecb98d35900523b91993f
refs/heads/master
2017-06-23T17:58:16.471754
2015-02-20T20:27:20
2015-02-20T20:27:20
83,186,036
1
0
null
2017-02-26T05:42:57
2015-02-20T20:28:46
2015-02-20T20:27:24
null
[ { "alpha_fraction": 0.7824675440788269, "alphanum_fraction": 0.7824675440788269, "avg_line_length": 101.66666412353516, "blob_id": "700bec345d39235d85893009efe37e77010b7450", "content_id": "5ba124edec419ae1b0dad362e3653d4e3a03ee4c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 308, "license_type": "permissive", "max_line_length": 288, "num_lines": 3, "path": "/README.md", "repo_name": "CNXTEoE/Stall-Monitor-Notifications", "src_encoding": "UTF-8", "text": "## What this does\n\nThis connects to RabbitMQ, listens for messages from [Stall-Monitor-Raspberry-Pi](https://github.com/projectweekend/Stall-Monitor-Raspberry-Pi). When a message is received, the status of the bathroom stall door is sent out via a [GCM](https://developer.android.com/google/gcm/index.html).\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.6593406796455383, "avg_line_length": 14.166666984558105, "blob_id": "02fc92e4e8b18f0bac9d4c7a6c5dabe56c9d5e4c", "content_id": "f801b0afc6f997c3ebb6e85575f8c4335667dc89", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 91, "license_type": "permissive", "max_line_length": 17, "num_lines": 6, "path": "/requirements.txt", "repo_name": "CNXTEoE/Stall-Monitor-Notifications", "src_encoding": "UTF-8", "text": "greenlet==0.4.5\npika==0.9.14\nPika-Pack==1.0.4\npyecho==0.0.2\npymongo==2.8\npython-gcm==0.1.5\n" }, { "alpha_fraction": 0.6722772121429443, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 18.80392074584961, "blob_id": "daed5cc29c654de6fc8210990d8eba8af8ab208b", "content_id": "e179c307a94e93b731aaebc0387b6c60cb9c1cb6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "permissive", "max_line_length": 74, "num_lines": 51, "path": "/main.py", "repo_name": "CNXTEoE/Stall-Monitor-Notifications", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nfrom gcm import GCM\nfrom pymongo import MongoClient\nfrom pika_pack import Listener\n\n\nRABBIT_URL = os.getenv('RABBIT_URL', None)\nassert RABBIT_URL\n\nMONGO_URL = os.getenv('MONGO_URL', None)\nassert MONGO_URL\n\nGCM_API_KEY = os.getenv('GCM_API_KEY', None)\nassert GCM_API_KEY\n\nEXCHANGE = 'gpio_broadcast'\n\nDEVICE_KEY = 'stall_monitor'\n\nmongo_db = MongoClient(MONGO_URL).get_default_database()\ngcm = GCM(GCM_API_KEY)\n\n\ndef send_gcm(message):\n gcm_tokens = [g['token'] for g in mongo_db.gcms.find()]\n response = gcm.json_request(registration_ids=gcm_tokens, data=message)\n # TODO: handle bad tokens in the database\n\n\ndef send_notifications(message):\n send_gcm(message)\n\n\ndef main():\n rabbit_listener = Listener(\n rabbit_url=RABBIT_URL,\n exchange=EXCHANGE,\n routing_key=DEVICE_KEY,\n request_action=send_notifications)\n\n try:\n rabbit_listener.start()\n except:\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n" } ]
3
folmez/Handsfree-KGS
https://github.com/folmez/Handsfree-KGS
53a848790e758deb71e8fc0ea39fa008a8e438c9
2bd63dd49d8376f65b8fdd8342b2919d01af3408
c21b057a4628b96a65b8a76572cd5da383cadff9
refs/heads/master
2020-04-15T01:26:32.240648
2019-01-12T11:15:30
2019-01-12T11:15:30
164,276,042
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5757997035980225, "alphanum_fraction": 0.5870332717895508, "avg_line_length": 37.307376861572266, "blob_id": "3bed460f542f67c1763df8f73b2d38b188ab6e9a", "content_id": "694efee415eae94ce2400ae6eff81838024d28f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9347, "license_type": "no_license", "max_line_length": 108, "num_lines": 244, "path": "/play_handsfree_GO.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "from pynput.mouse import Button, Controller\nimport cv2\nimport imageio\nimport matplotlib.pyplot as plt\nimport threading\nimport time\nimport queue\nimport os\nimport numpy as np\nimport src\n\nframes = queue.Queue(maxsize=10)\n\nclass frameGrabber(threading.Thread):\n def __init__(self):\n # Constructor\n threading.Thread.__init__(self)\n\n def run(self):\n cam = cv2.VideoCapture(0)\n img_counter = 0\n while True:\n ret, frame = cam.read()\n if not ret:\n break\n img_name = f\"images/game_log/opencv_frame_{img_counter}.png\"\n cv2.imwrite(img_name, frame)\n print(\"{} written!\".format(img_name))\n frames.put(img_counter)\n img_counter += 1\n time.sleep(30)\n cam.release()\n\ndef verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, color, i, j):\n # Display a message to the user to put a stone\n print(f\"\\nPlease put a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}...\")\n\n # Assert the stone with desired color is on the goban at the exact spot\n while True:\n time.sleep(5)\n frame_num = frames.get()\n img_name = f\"images/game_log/opencv_frame_{frame_num}.png\"\n rgb = imageio.imread(img_name)\n plt.imshow(rgb)\n plt.title(f\"This board should have a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}.\")\n plt.show()\n ans = input(f\"Did you put a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}? [y/n]: \")\n if ans is 'y':\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, color, i, j, plot_stuff=True)\n remove_this_frame(img_name)\n frames.task_done()\n remove_unused_frames()\n break\n else:\n remove_this_frame(img_name)\n frames.task_done()\n\ndef remove_this_frame(img_name):\n os.remove(img_name)\n print('Frame', img_name, 'removed.')\n\ndef remove_unused_frames():\n print('Removing unused frames...')\n while True:\n time.sleep(1)\n try:\n frame_num = frames.get(False)\n except queue.Empty:\n # Handle empty queue here\n break\n else:\n # Handle task here and call q.task_done()\n frame_num = frames.get()\n img_name = f\"images/game_log/opencv_frame_{frame_num}.png\"\n remove_this_frame(img_name)\n frames.task_done()\n print('Unused frames removed...')\n\nboard_corners = []\ndef onclick(event):\n print(event.xdata, event.ydata)\n board_corners.append(event.xdata)\n board_corners.append(event.ydata)\n\nif __name__ == '__main__':\n\n # Initiate the frame grabber thread for goban pictures\n my_frame_grabber = frameGrabber()\n\n # Start running the threads!\n my_frame_grabber.start()\n print('Frame grabbing has started...')\n\n # MANUAL BOARD EDGE DETECTION FOR THE PYHSICAL BOARD\n # Show a plot frames and ask user to input boundaries\n while True:\n time.sleep(5)\n frame_num = frames.get()\n img_name = f\"images/game_log/opencv_frame_{frame_num}.png\"\n rgb = imageio.imread(img_name)\n fig = plt.figure()\n plt.imshow(rgb)\n plt.title(\"Please click on UL-UR-BL-BR corners or close plot...\")\n fig.canvas.mpl_connect('button_press_event', onclick)\n plt.show()\n if not board_corners:\n # Skip if nothing is clicked\n remove_this_frame(img_name)\n frames.task_done()\n else:\n # Read goban corners\n ob = board_corners\n assert ob[2] > ob[0] and ob[6] > ob[4] and \\\n ob[7] > ob[4] and ob[5] > ob[1]\n # Remove this filename as it served its purpose and break out of loop\n remove_this_frame(img_name)\n frames.task_done()\n break\n\n # Remove all unused frames at the end\n remove_unused_frames()\n\n # Remove non-goban part from the RGB matrix and make it a square matrix\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n\n # Find the indices of board points in the new square RGB matrix\n x_idx, y_idx = src.find_board_points(rgb, plot_stuff=False)\n\n # CALIBRATION OF PYHSICAL BOARD\n # Ask the user to put black and white stones on the board\n print('\\nPlease put black stones on corners and a white stone at center')\n bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]\n while True:\n time.sleep(5)\n frame_num = frames.get()\n img_name = f\"images/game_log/opencv_frame_{frame_num}.png\"\n rgb = imageio.imread(img_name)\n plt.imshow(rgb)\n plt.title('Did you put black on corners and white at center?')\n plt.show()\n ans = input('Did you put black stones on corners and a white stone at center? [y/n]: ')\n if ans is 'y':\n # Remove non-goban part from the RGB matrix and make it a square matrix\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n # Calibrate\n red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n # Refind stones using the above thresholds\n bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \\\n red_scale_th1, blue_scale_th1, plot_stuff=False)\n remove_this_frame(img_name)\n frames.task_done()\n remove_unused_frames()\n break\n else:\n remove_this_frame(img_name)\n frames.task_done()\n\n print('\\nPlease put white stones on corners and a black stone at center')\n wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]\n while True:\n time.sleep(5)\n frame_num = frames.get()\n img_name = f\"images/game_log/opencv_frame_{frame_num}.png\"\n rgb = imageio.imread(img_name)\n plt.imshow(rgb)\n plt.title('Did you put white on corners and black at center?')\n plt.show()\n ans = input('Did you put white stones on corners and a black stone at center? [y/n]: ')\n if ans is 'y':\n # Remove non-goban part from the RGB matrix and make it a square matrix\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n # Calibrate\n red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n # Refind stones using the above thresholds\n bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \\\n red_scale_th2, blue_scale_th2, plot_stuff=False)\n remove_this_frame(img_name)\n frames.task_done()\n remove_unused_frames()\n break\n else:\n remove_this_frame(img_name)\n frames.task_done()\n\n red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2)\n blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2)\n\n # VERIFY CALIBRATION OF PHYSICAL BOARD\n print(' [PLEASE KEEP IN MIND THAT YOUR LOWER-LEFT CORNER IS (1,1)]')\n verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'black', 3, 4)\n verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'white', 1, 1)\n verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'black', 10, 10)\n verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'white', 19, 19)\n print(\"CALIBRATION IS VERIFIED\\n\" + 50*\"-\")\n\n # DIGITAL BOARD DETECTION\n\n # Ask the user to open a KGS board\n print('\\n OPEN A KGS BOARD/GAME NOW')\n input('ENTER when the digital board is open: ')\n\n # Get the user to click on come corners to get to know the digital board\n UL_x, UL_y, goban_step = src.get_goban_corners()\n\n # Test by moving to the star points on the board\n for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']:\n i, j = src.str_to_integer_coordinates(str)\n x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)\n src.make_the_move(mouse, x, y, no_click=True)\n\n # START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD\n # Plan - 1) check frames continously until a move is made by you\n # 2) check digital board until a move is made by your opponent\n\n\n # First, remove all unused frames\n remove_unused_frames()\n\n # Scan the frames for moves every five seconds\n mouse = Controller() # obtain mouse controller\n bxy, wxy = [], [] # empty board in the beginning\n while True:\n time.sleep(5)\n frame_num = frames.get()\n img_name = f\"images/game_log/opencv_frame_{frame_num}.png\"\n color, i, j = src.scan_next_move(img_name, ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, bxy, wxy)\n if color is not None:\n # Play the move and update the stone lists\n bxy, wxy = src.play_next_move_on_digital_board(mouse, color, \\\n i, j, bxy, wxy, UL_x, UL_y, goban_step)\n # Start checking the digital board for new moves\n else:\n # Remove this frame and start waiting for the next frame\n remove_this_frame(img_name)\n frames.task_done()\n\n\n # Wait for the threads to finish...\n my_frame_grabber.join()\n\n print('Main Terminating...')\n" }, { "alpha_fraction": 0.6167512536048889, "alphanum_fraction": 0.6497461795806885, "avg_line_length": 31.83333396911621, "blob_id": "60b3d16234fba18bec987b50e9ccab82d6df30f1", "content_id": "176668edf2b4b6fd7df41ce55b7941ea127e213e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 82, "num_lines": 12, "path": "/tests/test_screenshot_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import imageio\nimport pytest\nimport sys, os\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nimport src\n\ndef test_get_digital_goban_state():\n rgb_pix = imageio.imread('images/digital_goban.png')\n\n # Process KGS goban grayscale and find the stones\n assert src.get_digital_goban_state(rgb_pix) == \\\n set([(1,1,1), (1, 1, 14), (2,19,19)])\n" }, { "alpha_fraction": 0.5562095642089844, "alphanum_fraction": 0.5844614505767822, "avg_line_length": 28.807018280029297, "blob_id": "5c94ad058e6c95946d3b232bd04ea94f6a233680", "content_id": "9aad01de1aa87f771bb3e4b105c37e68112eb02a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1699, "license_type": "no_license", "max_line_length": 92, "num_lines": 57, "path": "/src/mouse_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "from pynput.mouse import Button, Controller\nimport src\nimport time\n\ndef get_goban_corners():\n # Obtain mouse controller\n mouse = Controller()\n\n # Ask the user to define goban corners\n print('Move cursor to upper-left (A19) corner of Goban and keep it there five seconds')\n time.sleep(5)\n (UL_x, UL_y) = mouse.position\n print(f\"Upper-Left: ({UL_x},{UL_y})\")\n print()\n\n print('Move cursor to bottom-right (T1) corner of Goban and keep it there five seconds')\n time.sleep(5)\n (BR_x, BR_y) = mouse.position\n print(f\"Bottom-Right: ({BR_x},{BR_y})\")\n print()\n\n # Compute goban step sizes\n goban_step = 0.5 * (BR_x - UL_x) * 1/18 + 0.5 * (BR_y - UL_y) * 1/18\n print(f\"Goban-steps is {goban_step}\")\n\n return UL_x, UL_y, goban_step\n\ndef make_the_move(mouse, x, y, no_click=False):\n (cx, cy) = mouse.position\n time.sleep(0.5)\n mouse.move(x - cx, y - cy)\n time.sleep(0.2)\n if not no_click:\n mouse.click(Button.left, 1)\n\ndef int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step):\n x = UL_x + (i-1) * goban_step\n y = UL_y + (j-1) * goban_step\n return x, y\n\ndef str_to_integer_coordinates(str):\n # Upper-lef corner is 1,1 and Bottom-right corner is 19,19\n # Goban boards skip the letter I\n j = 19 - int(str[1:3]) + 1\n if ord(str[0]) < ord('I'):\n i = ord(str[0]) - ord('A') + 1\n else:\n i = ord(str[0]) - ord('A')\n return i,j\n\ndef int_coords_to_str(i, j):\n # Upper-lef corner is 1,1 and Bottom-right corner is 19,19\n # Goban boards skip the letter I\n if i <= ord('I') - ord('A'):\n return chr(ord('A') + i-1) + f\"{20-j}\"\n else:\n return chr(ord('A') + i) + f\"{20-j}\"\n" }, { "alpha_fraction": 0.7190082669258118, "alphanum_fraction": 0.7190082669258118, "avg_line_length": 16.285715103149414, "blob_id": "f3d01d14f56fe723c95bb1ba55a518f2cf589d54", "content_id": "c2f8f9402cde8f63abad322e3652ce2045fbc3d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/src/cam_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import imageio\n\ndef get_pyhsical_goban_state(rgb_pix):\n pass\n\ndef picture_to_rgb(path):\n return misc.imageio(path)\n" }, { "alpha_fraction": 0.7917646765708923, "alphanum_fraction": 0.7917646765708923, "avg_line_length": 69.83333587646484, "blob_id": "4c49e041ddf5138f868ee65690a2b52153a1379e", "content_id": "d536c6454f73d7116410d91789f820a771fd5b09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 850, "license_type": "no_license", "max_line_length": 301, "num_lines": 12, "path": "/README.md", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "# Handsfree-KGS\nPlay Go on KGS handsfree using the webcam\n\nThis projects aims to fulfill one of my dreams, playing GO on a pyhsical board against online opponents. I know I want this because I actually did play GO on a board that I owned in the past against KGS opponents. It was tedious but still worth it as playing on a pyhsical board is an amazing feeling.\n\nWith this project, after a game begins on KGS, I will play my move and the computer's webcam will figure out what move I played which will then automaticall place my stone by manipulating the mouse and click at the exact spot I played.\n\nThere maybe an another component added to this project in the future, a physical arm, that will take inputs from the computer and actually play my opponents moves on the board :))\n\nTESTING\n\nAn empty Goban must be open on the screen for proper testing\n" }, { "alpha_fraction": 0.5978928804397583, "alphanum_fraction": 0.6093063950538635, "avg_line_length": 34.59375, "blob_id": "df825e3624d096cbaf6a34c1b47b744c777cb33c", "content_id": "eef3aa5e4f5b6742d5237a7777462cb88013fbc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1139, "license_type": "no_license", "max_line_length": 79, "num_lines": 32, "path": "/make_goban_speak.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import src\nimport time\n\nUL_x, UL_y, goban_step = src.get_goban_corners()\n\nprev_stone_set = set()\nprint(\"Started scanning the board for moves every 5 seconds...\")\nwhile True:\n # wait between screenshots\n time.sleep(5)\n # get board screenshot\n board_rgb_screenshot = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)\n # find the stones on the board\n current_stone_set = src.get_goban_state(board_rgb_screenshot)\n # is there a new stone on the board?\n if current_stone_set > prev_stone_set:\n # find the new stone\n stone = current_stone_set - prev_stone_set\n # IN THE FUTURE, ALLOW FOR OPPONENT TO MAKE A QUICK MOVE!!!\n assert len(stone) == 1\n # say the new moves on the board\n player = list(stone)[0][0] # 1-black, 2-white\n i, j = list(stone)[0][1], list(stone)[0][2]\n pos = src.int_coords_to_str(i,j)\n if player==1:\n update_msg = \"Black played at \" + pos\n elif player==2:\n update_msg = \"White played at \" + pos\n print(update_msg)\n prev_stone_set = current_stone_set\n else:\n print(\"No moves made!\")\n" }, { "alpha_fraction": 0.5499809980392456, "alphanum_fraction": 0.5822880864143372, "avg_line_length": 35.54166793823242, "blob_id": "3edac788875a1700eba322520eccb9e42d62b5b9", "content_id": "416bfe96bedb80eb526384fd9359e6b074a8db20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2631, "license_type": "no_license", "max_line_length": 82, "num_lines": 72, "path": "/src/screenshot_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import pyscreeze\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport src\n\ndef get_digital_goban_state(rgb_pix, plot_stuff=False):\n # RGB of Black = [ 0, 0, 0]\n # RGB of White = [255, 255, 255]\n # RGB of Orange = [255, 160, 16]\n # Use red scale to find out black stones, blue scale to find out white stones\n # (1, 1, 1) - Black A1 (upper corner)\n # (2, 19, 19) - White T10 (lower corner)\n idx = np.arange(19)+1\n\n m, n, z = rgb_pix.shape\n assert m == n\n\n # Approximate diameter of a stone in terms of pixels\n stone_diam = n/19\n\n # Calculate pixels where stone centers will be positioned\n stone_centers = np.round(stone_diam*idx) - 0.5 * np.round(stone_diam) - 1\n stone_centers = stone_centers.astype(int)\n\n # For every stone center, we will check a square matrix centered around\n # the stone center and find the average color. If it is black, then the\n # stone is black, if it is white, then the stone is white, otherwise no stone\n square_length_in_a_stone = int(np.round((n/19) / np.sqrt(2)))\n if square_length_in_a_stone % 2 == 0:\n d = square_length_in_a_stone / 2\n else:\n d = (square_length_in_a_stone-1) / 2\n d = int(d-1) # just in case, make square smaller and integer\n\n # Calculate the mean of a small matrix around every board point to find out\n # if there is a black stone or white stone or nothing\n stones = set()\n for posi, i in enumerate(stone_centers, start=1):\n for posj, j in enumerate(stone_centers, start=1):\n # Find black stones\n mat = rgb_pix[:,:,0]\n color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1]))\n if color < 125:\n stones.add((1, posj, posi)) # black stone\n rgb_pix[i-d+1:i+d, j-d+1:j+d, :] = 0\n\n # Find white stones\n mat = rgb_pix[:,:,2]\n color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1]))\n if color > 125:\n stones.add((2, posj, posi)) # white stone\n rgb_pix[i-d+1:i+d, j-d+1:j+d] = 255\n\n # Plot for debugging\n if plot_stuff:\n plt.imshow(rgb_pix)\n plt.show()\n\n return stones\n\ndef KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step):\n UL_outer_x = UL_x - 0.5*goban_step\n UL_outer_y = UL_y - 0.5*goban_step\n BR_outer_x = UL_x + 18*goban_step + 0.5*goban_step\n BR_outer_y = UL_y + 18*goban_step + 0.5*goban_step\n im = pyscreeze.screenshot(region=(UL_outer_x, UL_outer_y, \\\n BR_outer_x-UL_outer_x, BR_outer_y-UL_outer_y))\n\n pix = np.array(im)\n rgb_pix = pix[...,:3]\n\n return rgb_pix\n" }, { "alpha_fraction": 0.6968504190444946, "alphanum_fraction": 0.7047244310379028, "avg_line_length": 17.14285659790039, "blob_id": "53f23284ea36b8938b78c32bec7f4565a2f052d6", "content_id": "88193e0db08614595202bcc63d9f79fde8100112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/temp/plot_save_coordinates_on_click.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n\nxy=[]\n\ndef onclick(event):\n print(event.xdata, event.ydata)\n xy.append((event.xdata, event.ydata))\n\nfig = plt.figure()\nplt.plot(range(10))\nfig.canvas.mpl_connect('button_press_event', onclick)\nplt.show()\n\nprint(xy)\n" }, { "alpha_fraction": 0.8075709939002991, "alphanum_fraction": 0.8075709939002991, "avg_line_length": 21.64285659790039, "blob_id": "9e68250b126f489ee588d4b508619c1f59f45297", "content_id": "994d0915e23259b88fb84537a57dcf38c62ac0ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 317, "license_type": "no_license", "max_line_length": 88, "num_lines": 14, "path": "/TODO.md", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "- Processing pyhsical goban board position\n\n# Step-by-step\nSet-up laptop\nSet-up goban\n\nCALIBRATION\nTake a picture with laptop\nShow picture to user, manually input goban boundaries\nCalibrate black and white stones by asking the user to place stones on corners and stars\nTest a few random moves\n\nSTART PLAYING\nopen KGS\n" }, { "alpha_fraction": 0.6465053558349609, "alphanum_fraction": 0.6599462628364563, "avg_line_length": 32.06666564941406, "blob_id": "4a54a30b8f6fd638dda2f5bc97eff1f4796da1a0", "content_id": "6923d406d06b72e3a7ff22c601f51e2ffebe3b43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1488, "license_type": "no_license", "max_line_length": 83, "num_lines": 45, "path": "/auto_goban_detection.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport imageio\nimport numpy as np\nimport src\n\nIMG_PATH = 'images/empty_pyshical_goban1.png'\n\nboard_corners = []\ndef onclick(event):\n print(event.xdata, event.ydata)\n board_corners.append((event.xdata, event.ydata))\n\n# Get RGB matrix of the picture with goban\nrgb = imageio.imread(IMG_PATH)\nfig = plt.figure()\nplt.imshow(rgb)\nplt.title(\"Please click on UL-UR-BL-BR corners...\")\nfig.canvas.mpl_connect('button_press_event', onclick)\nplt.show()\n\nUL_outer_x, UL_outer_y = board_corners[0]\nUR_outer_x, UR_outer_y = board_corners[1]\nBL_outer_x, BL_outer_y = board_corners[2]\nBR_outer_x, BR_outer_y = board_corners[3]\n\n# Remove non-goban part from the RGB matrix and make it a square matrix\nrgb = src.rescale_pyhsical_goban_rgb(rgb, \\\n UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)\n\n# Find the indices of board points in the new square RGB matrix\nx_idx, y_idx = src.find_board_points(rgb, plot_stuff=True)\n\n# Mark board points\nsrc.mark_board_points(rgb, x_idx, y_idx)\n\n#bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)]\n\n#src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy)\n\n#red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n#bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th)\n\n#src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \\\n# 'black', 16,4)\n" }, { "alpha_fraction": 0.5812127590179443, "alphanum_fraction": 0.6373418569564819, "avg_line_length": 49.94074249267578, "blob_id": "8540d1db86e4f92a0d65707618e11ab290f34ef3", "content_id": "ea31c635371ef9bc2003d5f279b3e9c7139cb83a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6877, "license_type": "no_license", "max_line_length": 85, "num_lines": 135, "path": "/tests/test_play_handsfree_GO.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import sys, os\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom pynput.mouse import Button, Controller\nimport pytest\nimport imageio\nimport src\n\n\n# Write a test of play_handsfree_GO.py using already existing frames\nimg_name = []\nfolder_name = 'images/sample_game_log/ex1/'\n# empty board for outer board boundary detection\nimg_name.append(folder_name + 'opencv_frame_1.png')\nUL_outer_x, UL_outer_y = 376.27419354838713, 91.34516129032261\nUR_outer_x, UR_outer_y = 962.08064516129020, 101.66774193548395\nBL_outer_x, BL_outer_y = 120.79032258064518, 641.0225806451613\nBR_outer_x, BR_outer_y = 1265.3064516129032, 652.6354838709677\n# black stones on corners and a white stone at center\nimg_name.append(folder_name + 'opencv_frame_3.png')\n# white stones on corners and a black stone at center\nimg_name.append(folder_name + 'opencv_frame_4.png')\n# verifying calibration\nimg_name.append(folder_name + 'opencv_frame_b_1_1.png') # black at (1,1)\nimg_name.append(folder_name + 'opencv_frame_b_1_19.png') # black at (1,19)\nimg_name.append(folder_name + 'opencv_frame_b_19_19.png') # black at (19,19)\nimg_name.append(folder_name + 'opencv_frame_b_19_1.png') # black at (19,1)\nimg_name.append(folder_name + 'opencv_frame_b_10_10.png') # black at (10,10)\nimg_name.append(folder_name + 'opencv_frame_b_4_4.png') # black at (4,4)\nimg_name.append(folder_name + 'opencv_frame_b_4_10.png') # black at (4,10)\nimg_name.append(folder_name + 'opencv_frame_b_4_16.png') # black at (4,16)\nimg_name.append(folder_name + 'opencv_frame_b_16_16.png') # black at (16,16)\nimg_name.append(folder_name + 'opencv_frame_w_1_1.png') # white at (1,1)\nimg_name.append(folder_name + 'opencv_frame_w_10_10.png') # white at (10,10)\nimg_name.append(folder_name + 'opencv_frame_w_16_16.png') # white at (16,16)\nimg_name.append(folder_name + 'opencv_frame_w_19_19.png') # white at (19,19)\n#opencv_frame_b_10_4.png\n#opencv_frame_b_10_16.png\n#opencv_frame_b_16_4.png\n#opencv_frame_b_16_10.png\n#opencv_frame_b_19_1.png\n#opencv_frame_w_1_19.png\n#opencv_frame_w_4_4.png\n#opencv_frame_w_4_10.png\n#opencv_frame_w_4_16.png\n#opencv_frame_w_10_16.png\n#opencv_frame_w_16_4.png\n#opencv_frame_w_16_10.png\n#opencv_frame_w_19_1.png\n\ndef test_play_handsfree_GO():\n ps = False\n # STEP 0 - EMPTY GOBAN\n # Get outer boundaries of pyhsical goban -- skipped for speed\n ob = [UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y]\n # Remove non-goban part from the RGB matrix and make it a square matrix\n # Find the indices of board points in the new square RGB matrix\n #UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n # BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = \\\n # src.get_pyhsical_board_outer_corners(img_name[0])\n rgb = imageio.imread(img_name[0])\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n x_idx, y_idx = src.find_board_points(rgb, plot_stuff=ps)\n\n # STEP 1 - GOBAN WITH BLACK STONES ON CORNERS AND A WHITE STONE AT CENTER\n rgb = imageio.imread(img_name[1])\n bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n _, _ = src.mark_stones(rgb, x_idx, y_idx, \\\n red_scale_th1, blue_scale_th1, plot_stuff=ps)\n\n # STEP 2 - GOBAN WITH WHITE STONES ON CORNERS AND A BLACK STONE AT CENTER\n rgb = imageio.imread(img_name[2])\n wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n _, _ = src.mark_stones(rgb, x_idx, y_idx, \\\n red_scale_th2, blue_scale_th2, plot_stuff=ps)\n\n red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2)\n blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2)\n\n # STEP 3 - VERIFY CALIBRATION\n verify_calibration_for_test_purposes(img_name[3], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 1, 1, ps)\n verify_calibration_for_test_purposes(img_name[4], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 1, 19, ps)\n verify_calibration_for_test_purposes(img_name[5], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 19, 19, ps)\n verify_calibration_for_test_purposes(img_name[6], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 19, 1, ps)\n verify_calibration_for_test_purposes(img_name[7], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 10, 10, ps)\n verify_calibration_for_test_purposes(img_name[8], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 4, 4, ps)\n verify_calibration_for_test_purposes(img_name[9], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 4, 10, ps)\n verify_calibration_for_test_purposes(img_name[10], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 4, 16, ps)\n verify_calibration_for_test_purposes(img_name[11], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'black', 16, 16, ps)\n verify_calibration_for_test_purposes(img_name[12], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'white', 1, 1, ps)\n verify_calibration_for_test_purposes(img_name[13], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'white', 10, 10, ps)\n verify_calibration_for_test_purposes(img_name[14], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'white', 16, 16, ps)\n verify_calibration_for_test_purposes(img_name[15], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, 'white', 19, 19, ps)\n\n # DIGITAL BOARD DETECTION\n\n # Ask the user to open a KGS board\n print('\\n OPEN A KGS BOARD/GAME NOW')\n input('ENTER when the digital board is open: ')\n\n # Get the user to click on come corners to get to know the digital board\n UL_x, UL_y, goban_step = src.get_goban_corners()\n\n # START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD\n mouse = Controller() # obtain mouse controller\n print(\"Placing a black stone at (10,10)\")\n bxy, wxy = [], [] # empty board in the beginning\n color, i, j = src.scan_next_move(img_name[7], ob, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, bxy, wxy, plot_stuff=ps)\n _, _ = src.play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \\\n UL_x, UL_y, goban_step)\n\n\ndef verify_calibration_for_test_purposes(img, ob, x, y, r, b, c, i, j, ps):\n rgb = imageio.imread(img)\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n print(f\"Verifying a {c} stone at {src.convert_physical_board_ij_to_str(i,j)}...\")\n assert src.is_this_stone_on_the_board(rgb, x, y, r, b, c, i, j, ps)\n" }, { "alpha_fraction": 0.5758988261222839, "alphanum_fraction": 0.6225033402442932, "avg_line_length": 33.930233001708984, "blob_id": "f7ec2605e0623adab739a406a8a1f0b9ed8db017", "content_id": "bceac796f003e646005138834f52b03b2b0c1974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 82, "num_lines": 43, "path": "/tests/test_mouse_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "from pynput.mouse import Button, Controller\nimport time\nimport sys\nimport os\nimport pytest\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nimport src\n\ndef test_str_to_integer_coordinates():\n assert src.str_to_integer_coordinates('A19') == (1, 1)\n assert src.str_to_integer_coordinates('D16') == (4, 4)\n assert src.str_to_integer_coordinates('D10') == (4, 10)\n assert src.str_to_integer_coordinates('T1') == (19, 19)\n assert src.str_to_integer_coordinates('K10') == (10, 10)\n\ndef test_integer_coordinates_to_str():\n assert src.int_coords_to_str(1, 1) == 'A19'\n assert src.int_coords_to_str(4, 4) == 'D16'\n assert src.int_coords_to_str(4, 10) == 'D10'\n assert src.int_coords_to_str(19, 19) == 'T1'\n assert src.int_coords_to_str(10, 10) == 'K10'\n\[email protected]\ndef test_place_stones_on_all_stars():\n print()\n # Get goban corners\n UL_x, UL_y, goban_step = src.get_goban_corners()\n\n # Obtain mouse controller\n mouse = Controller()\n\n # Place stones on stars\n print('\\n', 41*'-')\n print(5*'-', 'Placing stones on all stars', 5*'-')\n print(41*'-', '\\n')\n for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']:\n i, j = src.str_to_integer_coordinates(str)\n x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)\n src.make_the_move(mouse, x, y)\n\n # Get KGS goban as a square grayscale\n rgb_pix = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6085106134414673, "avg_line_length": 32.57143020629883, "blob_id": "84de0c05ce304cf930555d1cec44e684c33c394f", "content_id": "b61f2043ade0ec26ea738946e95c272054ed67ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 46, "num_lines": 7, "path": "/setup.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\nsetup( name='Handsfree-KGS',\n version='0.0',\n description='Pay Handsfree Go on KGS',\n author='Fatih Olmez',\n author_email='[email protected]',\n packages=find_packages())\n" }, { "alpha_fraction": 0.5534579157829285, "alphanum_fraction": 0.5750699043273926, "avg_line_length": 38.42856979370117, "blob_id": "6b0564aca4e5853a62fcc0466f8b22294c56220a", "content_id": "572a7692ca595569c74b7ca74f6bdaab8dad4b25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15732, "license_type": "no_license", "max_line_length": 87, "num_lines": 399, "path": "/src/picture_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.signal import argrelmin\nimport imageio\nimport src\n\n\ndef play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \\\n UL_x, UL_y, goban_step):\n if color is not None:\n print(f\"New move: {color} played at {convert_physical_board_ij_to_str(i,j)}\")\n if color is 'black':\n bxy.append((i,j))\n elif color is 'white':\n wxy.append((i,j))\n # make the move\n x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)\n src.make_the_move(mouse, x, y)\n return bxy, wxy\n\ndef convert_physical_board_ij_to_str(i,j):\n \"\"\"\n The pyhsical board will have the upper-left corner labeled as (1,1) and the\n bottom-right corner labeled as (19,19). This little script will help translate\n and correct and misalignment between the here-described labeling and the\n algorithm.\n \"\"\"\n return f\"({i},{j})\"\n\ndef scan_next_move(img_name, ob, x_idx, y_idx, red_scale_th, blue_scale_th, \\\n bxy, wxy, plot_stuff=False):\n rgb = imageio.imread(img_name)\n rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)\n bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, plot_stuff=plot_stuff)\n print(bxy ,wxy, bxy_new, wxy_new)\n if set(bxy_new) == set(bxy) and set(wxy_new) == set(wxy):\n color, i, j = None, None, None\n print('No new moves')\n elif len(set(bxy_new)-set(bxy)) == 1 and set(wxy_new) == set(wxy):\n color = 'black'\n [(i,j)] = list(set(bxy_new)-set(bxy))\n elif len(set(wxy_new)-set(wxy)) == 1 and set(bxy_new) == set(bxy):\n color = 'white'\n [(i,j)] = list(set(wxy_new)-set(wxy))\n else:\n raise ValueError('Move scanner error!')\n\n return color, i, j\n\nBOARD_CORNERS = []\ndef onclick(event):\n print(event.xdata, event.ydata)\n BOARD_CORNERS.append((event.xdata, event.ydata))\ndef get_pyhsical_board_outer_corners(img_name):\n rgb = imageio.imread(img_name)\n fig = plt.figure()\n plt.imshow(rgb)\n plt.title(\"Please click on UL-UR-BL-BR corners or close plot...\")\n fig.canvas.mpl_connect('button_press_event', onclick)\n plt.show()\n UL_outer_x, UL_outer_y = BOARD_CORNERS[0]\n UR_outer_x, UR_outer_y = BOARD_CORNERS[1]\n BL_outer_x, BL_outer_y = BOARD_CORNERS[2]\n BR_outer_x, BR_outer_y = BOARD_CORNERS[3]\n\n return UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y\n\ndef find_board_points(rgb, plot_stuff=False):\n \"\"\"\n You have the RGB matrix of the goban as a square matrix but you don't\n know which entries correspon to the points on the board. This code finds\n the board points by plotting average red, green and blue scales and\n calculating the 19 local minima. Why? Because board points are\n intersections of black lines and RGB value of black color is [0,0,0].\n \"\"\"\n if plot_stuff:\n plt.subplot(221)\n plt.imshow(rgb)\n plt.subplot(222)\n x1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=0), 'r', plot_stuff)\n plt.subplot(223)\n x2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=0), 'g', plot_stuff)\n plt.subplot(224)\n x3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=0), 'b', plot_stuff)\n plt.show()\n\n plt.subplot(221)\n plt.imshow(rgb)\n plt.subplot(222)\n y1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=1), 'r', plot_stuff)\n plt.subplot(223)\n y2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=1), 'g', plot_stuff)\n plt.subplot(224)\n y3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=1), 'b', plot_stuff)\n plt.show()\n\n else:\n x1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=0), 'r', plot_stuff)\n x2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=0), 'g', plot_stuff)\n x3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=0), 'b', plot_stuff)\n\n y1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=1), 'r', plot_stuff)\n y2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=1), 'g', plot_stuff)\n y3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=1), 'b', plot_stuff)\n\n # Sometimes indices found by red, green and blue scales don't agree\n x_idx = src.make_indices_agree(x1_idx, x2_idx, x3_idx)\n y_idx = src.make_indices_agree(y1_idx, y2_idx, y3_idx)\n\n return x_idx, y_idx\n\ndef rescale_pyhsical_goban_rgb(rgb, ob):\n\n # Get outer boundaries from ob\n UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = ob\n\n # Rescale to n by n matrix\n n = 300\n\n # find n points on the left and on the right boundaries\n x_left_vals, y_left_vals, rgb, _ = \\\n src.return_int_pnts(n, rgb, BL_outer_x, BL_outer_y, UL_outer_x, UL_outer_y)\n x_right_vals, y_right_vals, rgb, _ = \\\n src.return_int_pnts(n, rgb, BR_outer_x, BR_outer_y, UR_outer_x, UR_outer_y)\n\n # Calculate a new RGB matrix only for the board, by removing outside the board\n new_rgb = np.zeros([n,n,3])\n for i in range(n):\n x1, y1 = x_left_vals[i], y_left_vals[i]\n x2, y2 = x_right_vals[i], y_right_vals[i]\n # print((x1,y1), (x2,y2))\n _, _, rgb, v = src.return_int_pnts(n, rgb, x1, y1, x2, y2)\n for j in range(n):\n new_rgb[n-i-1, j, :] = v[j]\n\n return new_rgb.astype(np.uint8)\n\ndef plot_goban_rgb(rgb, bxy=[], wxy=[]):\n plt.imshow(rgb)\n plt.ylabel('1st index = 1, ..., 19')\n plt.xlabel('2nd index = 1, ..., 19')\n plt.show()\n\ndef average_RGB(rgb, xMAX, yMAX, x, y, w):\n # Calculates average RGB around a board point for stone detection\n xL, xR = np.maximum(0, x-w), np.minimum(x+w+1, xMAX-1)\n yL, yR = np.maximum(0, y-w), np.minimum(y+w+1, yMAX-1)\n red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0]))\n green_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 1]))\n blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2]))\n return [red_scale, green_scale, blue_scale]\n\ndef make_indices_agree(x1, x2, x3):\n # Board points are determined from local extrema of R,G,B values.\n # But sometimes they don't match. In that case, choose the one whose\n # second difference looks like a constant\n a1 = np.amax(abs(np.diff(np.diff(x1))))\n a2 = np.amax(abs(np.diff(np.diff(x2))))\n a3 = np.amax(abs(np.diff(np.diff(x3))))\n x = 0\n x = x1 if a1 <= a2 and a1 <= a3 else x\n x = x2 if a2 <= a1 and a2 <= a3 else x\n x = x3 if a3 <= a1 and a3 <= a2 else x\n assert x is not 0\n return x\n\ndef calibrate(rgb, x_idx, y_idx, bxy=[], wxy=[]):\n \"\"\"\n Depending on light, laptop angle etc. the board may have different RGB values\n at different times. So how do we distinguis black and white stones?\n RGB of black = [0,0,0]\n RGB of white = [255,255,255]\n We will use red scale to distinguish black stones and blue scale to\n distinguish white stones.\n \"\"\"\n xMAX, yMAX, _ = rgb.shape\n roll_w = int(np.round(0.01*xMAX))\n\n # BLACK STONE CALIBRATION\n\n # Input black stone indices is bxy is empty\n if not bxy:\n msg = 'Enter black stone indices (e.g. 1 14 and 0 for end): '\n while True:\n input_text = input(msg)\n if input_text == '0':\n break\n else:\n j,i = list(map(int, input_text.split()))\n bxy.append((i,j))\n RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)\n print('RGB = ', RGB)\n\n # Find maximum red scale of black stones\n RMAX = 0\n for j,i in bxy:\n RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)\n print(f\"Black stone at ({i},{j}) with RGB = \", RGB)\n RMAX = np.maximum(RMAX, RGB[0])\n\n # Find the min red scale of the rest to distinguish\n RMIN_rest = 255\n for i,x in enumerate(x_idx, start=1):\n for j,y in enumerate(y_idx, start=1):\n if (j,i) not in bxy:\n RGB = src.average_RGB(rgb, xMAX, yMAX, x, y, roll_w)\n RMIN_rest = np.minimum(RMIN_rest, RGB[0])\n print('\\nBlack stones have a maximum red scale =', RMAX)\n print('Rest of the board have a minimum red scale', RMIN_rest)\n print('Black stone red scale threshold will be average of these two.\\n')\n\n # Red scale threshold for black stone detection\n assert RMAX < RMIN_rest\n red_scale_th = 0.5 * RMAX + 0.5 * RMIN_rest\n\n # WHITE STONE CALIBRATION\n\n # Input white stone indices is wxy is empty\n if not wxy:\n msg = 'Enter white stone indices (e.g. 1 14 and 0 for end): '\n while True:\n input_text = input(msg)\n if input_text == '0':\n break\n else:\n j,i = list(map(int, input_text.split()))\n wxy.append((i,j))\n RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)\n print('RGB = ', RGB)\n\n # Find minimum blue scale of white stones\n BMIN = 255\n for (j,i) in wxy:\n RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)\n print(f\"White stone at ({i},{j}) with RGB = \", RGB)\n BMIN = np.minimum(BMIN, RGB[2])\n\n # Find the max blue scale of the rest to distinguis\n BMAX_rest = 0\n for i,x in enumerate(x_idx, start=1):\n for j,y in enumerate(y_idx, start=1):\n if (j,i) not in wxy:\n RGB = src.average_RGB(rgb, xMAX, yMAX, x, y,roll_w)\n BMAX_rest = np.maximum(BMAX_rest, RGB[2])\n print('\\nWhite stones have a minimum blue scale >', BMIN)\n print('Rest of the board have a maximum blue scale', BMAX_rest)\n print('White stone blue scale threshold will be average of these two.\\n')\n\n # Blue scale threshold for white stone detection\n assert BMIN > BMAX_rest\n blue_scale_th = 0.5 * BMIN + 0.5 * BMAX_rest\n\n return red_scale_th, blue_scale_th\n\ndef is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \\\n color, i, j, plot_stuff=False):\n i,j = j,i # RGB matrix is messed up so this needs to be done\n x, y = x_idx[i-1], y_idx[j-1]\n if plot_stuff:\n fig = plt.figure()\n plt.imshow(rgb)\n plt.ylabel('1st index = 1, ..., 19')\n plt.xlabel('2nd index = 1, ..., 19')\n plt.title(f\"Checking if there is a {color} stone at ({j},{i})\")\n plt.plot(x, y, 'ro', markersize=20, fillstyle='none')\n plt.show()\n\n xMAX, yMAX, _ = rgb.shape\n roll_w = int(np.round(0.01*xMAX))\n xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1)\n yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1)\n red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0]))\n blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2]))\n msg = f\"There is {color} stone at {src.int_coords_to_str(i,j)} = ({i},{j})\"\n if color == 'black' and red_scale < red_scale_th:\n print(msg)\n return True\n elif color == 'white' and blue_scale > blue_scale_th:\n print(msg)\n return True\n else:\n return False\n\ndef mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, plot_stuff=True):\n xMAX, yMAX, _ = rgb.shape\n roll_w = int(np.round(0.01*xMAX))\n new_rgb = np.copy(rgb)\n bxy, wxy = [], [] # black and white stone lists including pairs\n for i, x in enumerate(x_idx, start=1):\n for j, y in enumerate(y_idx, start=1):\n xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1)\n yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1)\n red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0]))\n blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2]))\n #print((x,y), red_scale, blue_scale)\n if red_scale < red_scale_th or blue_scale > blue_scale_th:\n if blue_scale > blue_scale_th:\n wxy.append((j,i))\n new_rgb[yL:yR, xL:xR,:] = 255, 255, 255 # white stone\n elif red_scale < red_scale_th:\n bxy.append((j,i))\n new_rgb[yL:yR, xL:xR,:] = 255, 255, 0 # black stone\n else:\n new_rgb[yL:yR, xL:xR,:] = 255, 0, 0 # empty\n\n if plot_stuff:\n src.plot_goban_rgb(new_rgb)\n\n return bxy, wxy\n\ndef mark_board_points(rgb, x_idx, y_idx, bxy=[], wxy=[]):\n \"\"\"\n Mark board points with red squares. Use yellow color for black stones and\n white color for white stones that are inputted.\n \"\"\"\n xMAX, yMAX, _ = rgb.shape\n roll_w = int(np.round(0.01*xMAX))\n new_rgb = np.copy(rgb)\n for i,x in enumerate(x_idx, start=1):\n for j,y in enumerate(y_idx, start=1):\n xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1)\n yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1)\n if (j,i) in bxy: # black stone\n new_rgb[yL:yR, xL:xR,:] = 255, 255, 0 # yellow color\n elif (j,i) in wxy: # white stone\n new_rgb[yL:yR, xL:xR,:] = 255, 255, 255 # white color\n else: # empty board point\n new_rgb[yL:yR, xL:xR,:] = 255, 0, 0 # red color\n src.plot_goban_rgb(new_rgb)\n\ndef find_custom_local_minima(ar1, color, plot_stuff):\n roll_w = int(np.round(len(ar1)/100))\n ar2 = subtract_rolling_sum(roll_w, ar1)\n idx = find_local_minima(ar2)\n if plot_stuff:\n plt.plot(ar2, color)\n for i in idx:\n plt.plot(i, ar2[i], 'k*')\n return idx\n\ndef find_local_minima(ar):\n # Try to find the optional cut-off that may help determine the 19 points on\n # the go board. Start with an interval [min_val, max_val] and squeeze until\n # it hits exactly 19 points\n # Find indices that correspond to local minima\n x = argrelmin(ar)\n idx_list = x[0]\n\n target = 19\n min_val, max_val = np.amin(ar), 100.0\n\n # Assert that above choices are good\n assert sum(ar[i] <= min_val for i in idx_list) < target\n assert sum(ar[i] <= max_val for i in idx_list) > target\n\n # Find the cut-off below which there are exactly 19 local minima\n while True:\n new_val = 0.5 * min_val + 0.5 * max_val\n if sum(ar[i] <= new_val for i in idx_list) < target:\n min_val = new_val\n elif sum(ar[i] <= new_val for i in idx_list) > target:\n max_val = new_val\n elif sum(ar[i] <= new_val for i in idx_list) == target:\n break\n\n # Find the indices\n return [i for i in idx_list if ar[i] <= new_val]\n\ndef rolling_sum(w, ar):\n new_ar = np.zeros(len(ar))\n for i in range(len(ar)):\n if i >= w and i <= len(ar)-w-1:\n new_ar[i] = np.mean(ar[i-w:i+w+1])\n elif i < w:\n new_ar[i] = np.mean(ar[0:i+1])\n elif i > len(ar)-w-1:\n new_ar[i] = np.mean(ar[i:len(ar)+1])\n assert len(new_ar) == len(ar)\n return new_ar\n\ndef subtract_rolling_sum(w, ar):\n return ar - rolling_sum(w,ar)\n\ndef return_int_pnts(num, rgb, x1, y1, x2, y2):\n x_vals = np.round(np.linspace(x1, x2, num=num, endpoint=True))\n x_vals = x_vals.astype(int)\n y_vals = np.round(np.linspace(y1, y2, num=num, endpoint=True))\n y_vals = y_vals.astype(int)\n # one of these two must not contain any duplicates\n assert len(x_vals) == len(set(x_vals)) or len(y_vals) == len(set(y_vals))\n # Return RGB values\n return_array = [rgb[y,x,0:3] for x,y in zip(x_vals, y_vals)]\n # make all red\n # for x,y in zip(x_vals, y_vals):\n # rgb[y,x,0:3] = 255, 0, 0\n return x_vals, y_vals, rgb, return_array\n" }, { "alpha_fraction": 0.8177728056907654, "alphanum_fraction": 0.8177728056907654, "avg_line_length": 67.38461303710938, "blob_id": "46878cb1837dd4ed50ac16b758f78b9dfb954af1", "content_id": "acebd40f9ef4e5e485c2be5ea8005f5216c11559", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/src/__init__.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "from .mouse_actions import get_goban_corners, str_to_integer_coordinates\nfrom .mouse_actions import int_coords_to_screen_coordinates, make_the_move\nfrom .mouse_actions import int_coords_to_str\nfrom .screenshot_actions import KGS_goban_rgb_screenshot, get_digital_goban_state\nfrom .picture_actions import plot_goban_rgb, average_RGB, make_indices_agree\nfrom .picture_actions import return_int_pnts, subtract_rolling_sum\nfrom .picture_actions import rolling_sum, find_custom_local_minima\nfrom .picture_actions import mark_board_points, is_this_stone_on_the_board\nfrom .picture_actions import mark_stones, calibrate\nfrom .picture_actions import find_board_points, rescale_pyhsical_goban_rgb\nfrom .picture_actions import get_pyhsical_board_outer_corners\nfrom .picture_actions import convert_physical_board_ij_to_str\nfrom .picture_actions import play_next_move_on_digital_board, scan_next_move\n" }, { "alpha_fraction": 0.4850401282310486, "alphanum_fraction": 0.5560690760612488, "avg_line_length": 47.94047546386719, "blob_id": "0624e274e499f69e9f63314ee3636f45e4e5c3a1", "content_id": "dd2415522108bf0736ae35d826522df74610df4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4111, "license_type": "no_license", "max_line_length": 83, "num_lines": 84, "path": "/tests/test_picture_actions.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import pytest\nimport imageio\nimport sys, os\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nimport src\n\n# stones - upper-left corner is (1,1), lower-left corner is (19,1)\nIMG_PATH = ['images/pyshical_goban_pic1.png', 'images/pyshical_goban_pic2.png', \\\n 'images/pyshical_goban_pic3.png', 'images/pyshical_goban_pic4.png', \\\n 'images/pyshical_goban_pic5.png']\nbxy0, wxy0 = [(4,4), (16,4)], [(4,16),(16,16)]\nbxy1, wxy1 = [(1,9), (16,8)], [(10,1),(13,19)]\nbxy2, wxy2 = [(1,19), (17,3)], [(1,3),(19,19)]\nbxy3, wxy3 = [(1,19), (19,1), (5,4), (6,16), (12,8), (14,6), (16,10), (19,13)], \\\n [(1,1), (4,10), (7,7), (10,4), (10,10), (12,11), (15,7), (19,19)]\nbxy4, wxy4 = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]\nUL_outer_x0, UL_outer_y0 = 315, 24\nUR_outer_x0, UR_outer_y0 = 999, 40\nBL_outer_x0, BL_outer_y0 = 3, 585\nBR_outer_x0, BR_outer_y0 = 1273, 621\n\nUL_outer_x3, UL_outer_y3 = 321, 235\nUR_outer_x3, UR_outer_y3 = 793, 244\nBL_outer_x3, BL_outer_y3 = 92, 603\nBR_outer_x3, BR_outer_y3 = 933, 608\n\nUL_outer_x4, UL_outer_y4 = 414, 256\nUR_outer_x4, UR_outer_y4 = 962, 269\nBL_outer_x4, BL_outer_y4 = 217, 659\nBR_outer_x4, BR_outer_y4 = 1211, 679\n\[email protected]\ndef test_board_outer_corner():\n UL_outer_x0_click, UL_outer_y0_click, _, _, _, _, _, _ = \\\n src.get_pyhsical_board_outer_corners(IMG_PATH[0])\n assert abs(UL_outer_x0_click - UL_outer_x0) < 5 # five pixels\n assert abs(UL_outer_y0_click - UL_outer_y0) < 5\n\ndef test_board_state_detection_from_camera_picture():\n assert_board_state(IMG_PATH[4], bxy4, wxy4, 'black', bxy4[0], \\\n UL_outer_x4, UL_outer_y4, UR_outer_x4, UR_outer_y4, \\\n BL_outer_x4, BL_outer_y4, BR_outer_x4, BR_outer_y4, \\\n plot_stuff=False)\n assert_board_state(IMG_PATH[0], bxy0, wxy0, 'black', bxy0[1], \\\n UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \\\n BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0)\n assert_board_state(IMG_PATH[1], bxy1, wxy1, 'white', wxy1[0], \\\n UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \\\n BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0, \\\n plot_stuff=True)\n assert_board_state(IMG_PATH[2], bxy2, wxy2, 'black', bxy2[0], \\\n UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \\\n BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0)\n assert_board_state(IMG_PATH[3], bxy3, wxy3, 'white', wxy3[6], \\\n UL_outer_x3, UL_outer_y3, UR_outer_x3, UR_outer_y3, \\\n BL_outer_x3, BL_outer_y3, BR_outer_x3, BR_outer_y3)\n\ndef assert_board_state(IMG_PATH, bxy, wxy, color, ij_pair, \\\n UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y, \\\n plot_stuff=False):\n # Get RGB matrix of the picture with goban\n rgb = imageio.imread(IMG_PATH)\n\n # Remove non-goban part from the RGB matrix and make it a square matrix\n rgb = src.rescale_pyhsical_goban_rgb(rgb, \\\n UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)\n\n # Find the indices of board points in the new square RGB matrix\n x_idx, y_idx = src.find_board_points(rgb, plot_stuff=plot_stuff)\n\n # Find color thresholds for stone detection\n red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n\n # Refind stones using the above thresholds\n bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, plot_stuff=plot_stuff)\n\n assert set(bxy) == set(bxy_new)\n assert set(wxy) == set(wxy_new)\n assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \\\n red_scale_th, blue_scale_th, color, ij_pair[0], ij_pair[1], \\\n plot_stuff=True)\n" }, { "alpha_fraction": 0.6044191122055054, "alphanum_fraction": 0.6493228673934937, "avg_line_length": 33.219512939453125, "blob_id": "06fa1ebe630cbafa7be2ece5407ff5611a5e4bc2", "content_id": "967daa597a985e6773ea0a10c789a19b5350c3e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1403, "license_type": "no_license", "max_line_length": 82, "num_lines": 41, "path": "/temp/process_pyhsical_goban_pic.py", "repo_name": "folmez/Handsfree-KGS", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport imageio\nimport numpy as np\nimport src\n\nIMG_PATH = 'images/pyshical_goban_pic1.png'\n#IMG_PATH = 'images/pyshical_goban_pic2.png'\n#IMG_PATH = 'images/pyshical_goban_pic3.png'\nUL_outer_x, UL_outer_y = 315, 24\nUR_outer_x, UR_outer_y = 999, 40\nBL_outer_x, BL_outer_y = 3, 585\nBR_outer_x, BR_outer_y = 1273, 621\n#IMG_PATH = 'images/pyshical_goban_pic4.png'\n#UL_outer_x, UL_outer_y = 321, 235\n#UR_outer_x, UR_outer_y = 793, 244\n#BL_outer_x, BL_outer_y = 92, 603\n#BR_outer_x, BR_outer_y = 933, 608\n\n# Get RGB matrix of the picture with goban\nrgb = imageio.imread(IMG_PATH)\nplt.imshow(rgb)\nplt.show()\n\n# Remove non-goban part from the RGB matrix and make it a square matrix\nrgb = src.rescale_pyhsical_goban_rgb(rgb, \\\n UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \\\n BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)\n\n# Find the indices of board points in the new square RGB matrix\nx_idx, y_idx = src.find_board_points(rgb, plot_stuff=True)\n\nbxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)]\n\nsrc.mark_board_points(rgb, x_idx, y_idx, bxy, wxy)\n\nred_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)\n\nbxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th)\n\nsrc.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \\\n 'black', 16,4)\n" } ]
17
hoichunlaw/EventDriven
https://github.com/hoichunlaw/EventDriven
c52282c4700674a633cf2b2015bd8f0bd0c24ee3
bd332d176cee7f85bf7240a6a4561648c4ed20dd
53fc64446107d39d7561bc7cde5e39d1f52d3130
refs/heads/master
2020-09-29T22:47:25.354399
2020-08-03T07:01:13
2020-08-03T07:01:13
227,140,671
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6311229467391968, "alphanum_fraction": 0.6392413973808289, "avg_line_length": 35.64500045776367, "blob_id": "4e7843af1d4c9ec1ab9c6a410b23abf98b46bb50", "content_id": "2a0e149ff239377e1c54ff5f74b0f9f1d6bf6e61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14658, "license_type": "no_license", "max_line_length": 162, "num_lines": 400, "path": "/News_Headlines_Prediction.py", "repo_name": "hoichunlaw/EventDriven", "src_encoding": "UTF-8", "text": "import eikon as ek\nimport numpy as np\nimport pandas as pd\nimport os\nimport shutil\nimport zipfile\nimport datetime\nimport cufflinks as cf\nimport configparser as cp\nimport platform\nimport pickle\nimport nltk\nnltk.download('stopwords')\n\nfrom copy import deepcopy\nimport collections\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom nltk.corpus import stopwords\nstoppingWordSet = set(stopwords.words('english'))\n\nimport tensorflow_hub as hub\nimport re\nimport tensorflow as tf\nimport tensorflow.keras as keras\n\ndataRootPath = r\"D:/Eikon_Data/\"\ndataRootPathNews = r\"D:/Eikon_Data/News/\"\ndataRootPathMarketData = r\"D:/Eikon_Data/Market_Data/\"\ndataRootPathDB = r\"D:/Database/\"\nmodelPath = r\"D:/python/PROD_Model/\"\nzipFolderPath = r\"D:/Zip_Folder/\"\ntf_hub_path = r\"C:/Users/hc_la/AppData/Local/Temp/tfhub_modules/\"\ndate_format = \"%Y-%m-%d\"\n\nelmo = hub.Module(\"https://tfhub.dev/google/elmo/3\", trainable=True)\n\ndef createFullNameDict():\n df = pd.read_csv(dataRootPathDB+\"Underlying_Database/full_name.csv\")\n return {u:l.split(\",\") for u,l in zip(df[\"undlName\"].values, df[\"full_name_list\"].values)}\n\ndef getUndlNameList(criterion=\"\"):\n if criterion == \"\":\n df = pd.read_csv(dataRootPathDB + \"Underlying_Database/undlNameList.csv\")\n return df.undlName.values\n elif criterion == \"HK\" or criterion == \"AX\" or criterion == \"SI\":\n df = pd.read_csv(dataRootPathDB + \"Underlying_Database/undlNameList.csv\")\n return [u for u in df.undlName.values if criterion in u]\n else:\n df = pd.read_csv(dataRootPathDB + \"Underlying_Database/sector.csv\")\n sectorDict = {k:v.split(\",\") for k, v in zip(df[\"Cluster\"], df[\"undlNameList\"])}\n return sectorDict.get(criterion)\n\n# create undlName full name dict\nundlNameFullNameDict = createFullNameDict()\n\ndf = pd.read_csv(dataRootPathDB + \"Underlying_Database/sector.csv\")\nundlSectorDict = {}\nfor cluster, l in zip(df[\"Cluster\"], df[\"undlNameList\"]):\n for u in l.split(\",\"):\n undlSectorDict[u] = cluster\n\ndef getSector(undlName):\n return undlSectorDict.get(undlName)\n\ntoday = datetime.datetime.now()\ndate_format = \"%Y-%m-%d\"\n\ndef checkFolderExist(path):\n return os.path.isdir(path)\n\ndef checkFileExist(path):\n return os.path.isfile(path)\n\ndef createFolder(rootPath, folderName):\n if not checkFolderExist(rootPath+\"/\"+folderName):\n os.mkdir(rootPath+\"/\"+folderName)\n return True\n else:\n return \"Folder already exist\"\n\ndef formatDate(date, fm=date_format):\n return date.strftime(fm)\n\ndef convertToDateObj(date, fm=date_format):\n return datetime.datetime.strptime(date, date_format)\n\ndef moveDate(date, dayDelta=0, hourDelta=0):\n if type(date) == str:\n return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta)\n else:\n return date + datetime.timedelta(days=dayDelta)\n\ndef PreviousBusinessDay(date, businessDateList):\n if type(date) == str:\n myDate = datetime.datetime.strptime(date, date_format)\n else:\n myDate = date\n\n while formatDate(myDate) not in businessDateList:\n myDate = moveDate(formatDate(myDate), -1)\n\n return formatDate(myDate)\n\ndef convertTimestampsToDateStr(timestamp):\n s = timestamp[0:10]\n return s\n\ndef normalize_headline(row):\n result = row.lower()\n #Delete useless character strings\n result = result.replace('...', ' ')\n whitelist = set('abcdefghijklmnopqrstuvwxyz 0123456789.,;\\'-:?')\n result = ''.join(filter(whitelist.__contains__, result))\n #result2 = []\n #for c in result:\n # if c\n return result\n\ndef removeStoppingWords(sent):\n result = []\n for w in sent.split(\" \"):\n if w not in stoppingWordSet:\n result.append(w)\n result.append(\" \")\n return \"\".join(result)\n\ndef removeHeading(sent):\n # remove \"BRIEF-\"\n # remove \"BUZZ -\"\n # remove \"REFILE-BRIEF-\"\n # remove \"UPDATE \"\n # remove \"EXCLUSIVE-\"\n # remove \"Reuters Insider - \"\n # remove \"BREAKINGVIEWS-\"\n\n headingList = [\"BRIEF-\", \"BUZZ -\", \"BUZZ-\", \"REFILE-\", \"REFILE-BRIEF-\", \"UPDATE \", \"EXCLUSIVE-\", \"Reuters Insider - \", \"BREAKINGVIEWS-\"]\n result = sent.lower()\n for h in headingList:\n if h.lower() in result:\n result = result.replace(h.lower(), \"\")\n return result\n\ndef removeOthers(sent):\n wordList = [\"holding\", \"holdings\", \"ltd\"]\n result = sent\n for w in wordList:\n if w in result:\n result = result.replace(w, \"\")\n return result\n\ndef precision(y_true, y_pred):\n total = 0\n valid = 0\n for i,j in zip(y_true, y_pred):\n if j == 1:\n total+=1\n if i==1:\n valid+=1\n if total == 0:\n return -1\n else:\n return valid / total\n\ndef iaGetTimeSeries(undlName, field, dateFrom, dateTo):\n\n if type(dateFrom) != str: dateFrom = formatDate(dateFrom)\n if type(dateTo) != str: dateTo = formatDate(dateTo)\n\n df = pd.read_csv(dataRootPathMarketData+undlName.split('.')[0] + '_'+undlName.split('.')[1]+'.csv')\n df = df[df.Date >= dateFrom]\n df = df[df.Date <= dateTo]\n df = df.set_index([\"Date\"])\n\n return pd.DataFrame(df[field])\n\ndef createUndlDataFrame(undlName, undlNameFullNameList, newsSource, filterFuncList, dateFrom, dateTo,\n benchmark = \"\"):\n\n print(\"Loading\", undlName, dateFrom, dateTo, end=\" \")\n # get news headlines\n df_list = []\n dateRef = datetime.datetime.strptime(dateFrom, date_format)\n while dateRef <= datetime.datetime.strptime(dateTo, date_format):\n df_list.append(pd.read_csv(dataRootPathNews + formatDate(dateRef) + \"/\" + undlName + \"_headlines.csv\"))\n dateRef = moveDate(dateRef, 1)\n news_df = pd.concat(df_list, axis=0)\n\n # rename and sort columns\n cols = news_df.columns\n news_df.columns = [\"timestamp\"] + list(cols[1:])\n news_df = news_df.sort_values([\"timestamp\"])\n news_df.loc[:,\"date\"] = news_df[\"versionCreated\"].apply(convertTimestampsToDateStr)\n\n # return empty df if no data\n if news_df.shape[0] == 0:\n print(\" done\")\n return pd.DataFrame({\"date\": [], \"undlName\":[], \"sourceCode\": [], \"storyId\":[], \"text\": [], \"oneDayReturn\": [], \"twoDayReturn\": [], \"threeDayReturn\": []})\n\n # get market data\n start = min(news_df.date)\n end = max(news_df.date)\n spot_df = iaGetTimeSeries(undlName, \"CLOSE\", moveDate(start, -10), moveDate(end, 10))\n if benchmark != \"\":\n spot_df_benchmark = iaGetTimeSeries(benchmark, \"CLOSE\", moveDate(start, -10), moveDate(end, 10))\n spot_df_benchmark = spot_df_benchmark.loc[spot_df.index]\n\n # truncate news_df when stock has limited historical data\n news_df = news_df[(news_df.date >= min(spot_df.index))]\n\n # create one day, two day and three day change columns\n if benchmark != \"\":\n spot_df.loc[:,\"Future-1\"] = spot_df.CLOSE.shift(-1)\n spot_df.loc[:,\"Future-2\"] = spot_df.CLOSE.shift(-2)\n spot_df.loc[:,\"Future-3\"] = spot_df.CLOSE.shift(-3)\n spot_df = spot_df.iloc[:-3,]\n spot_df_benchmark.loc[:,\"Future-1\"] = spot_df_benchmark.CLOSE.shift(-1)\n spot_df_benchmark.loc[:,\"Future-2\"] = spot_df_benchmark.CLOSE.shift(-2)\n spot_df_benchmark.loc[:,\"Future-3\"] = spot_df_benchmark.CLOSE.shift(-3)\n spot_df_benchmark = spot_df_benchmark.iloc[:-3,]\n\n spot_df.loc[:,\"oneDayReturn\"] = \\\n np.log(spot_df[\"Future-1\"].values / spot_df[\"CLOSE\"].values)-np.log(spot_df_benchmark[\"Future-1\"].values / spot_df_benchmark[\"CLOSE\"].values)\n\n spot_df.loc[:,\"twoDayReturn\"] = \\\n np.log(spot_df[\"Future-2\"].values / spot_df[\"CLOSE\"].values)-np.log(spot_df_benchmark[\"Future-2\"].values / spot_df_benchmark[\"CLOSE\"].values)\n\n spot_df.loc[:,\"threeDayReturn\"] = \\\n np.log(spot_df[\"Future-3\"].values / spot_df[\"CLOSE\"].values)-np.log(spot_df_benchmark[\"Future-3\"].values / spot_df_benchmark[\"CLOSE\"].values)\n else:\n spot_df.loc[:,\"Future-1\"] = spot_df.CLOSE.shift(-1)\n spot_df.loc[:,\"Future-2\"] = spot_df.CLOSE.shift(-2)\n spot_df.loc[:,\"Future-3\"] = spot_df.CLOSE.shift(-3)\n spot_df = spot_df.iloc[:-3,]\n\n spot_df.loc[:,\"oneDayReturn\"] = np.log(spot_df[\"Future-1\"].values / spot_df[\"CLOSE\"].values)\n spot_df.loc[:,\"twoDayReturn\"] = np.log(spot_df[\"Future-2\"].values / spot_df[\"CLOSE\"].values)\n spot_df.loc[:,\"threeDayReturn\"] = np.log(spot_df[\"Future-3\"].values / spot_df[\"CLOSE\"].values)\n\n oneDayReturnDict = {d:v for d,v in zip(spot_df.index, spot_df[\"oneDayReturn\"])}\n twoDayReturnDict = {d:v for d,v in zip(spot_df.index, spot_df[\"twoDayReturn\"])}\n threeDayReturnDict = {d:v for d,v in zip(spot_df.index, spot_df[\"threeDayReturn\"])}\n\n # create concat df, news and log-chg\n businessDateList = list(spot_df.index)\n d = news_df.date.values\n oneDay = []\n twoDay = []\n threeDay = []\n for i in range(len(news_df)):\n oneDay.append(oneDayReturnDict[PreviousBusinessDay(d[i], businessDateList)])\n twoDay.append(twoDayReturnDict[PreviousBusinessDay(d[i], businessDateList)])\n threeDay.append(threeDayReturnDict[PreviousBusinessDay(d[i], businessDateList)])\n\n news_df.loc[:,\"oneDayReturn\"] = oneDay\n news_df.loc[:,\"twoDayReturn\"] = twoDay\n news_df.loc[:,\"threeDayReturn\"] = threeDay\n\n # data preprocessing\n fil_df = news_df[news_df[\"sourceCode\"]==newsSource]\n fil_df.loc[:,\"text\"] = fil_df.text.apply(lambda x: x.lower())\n\n for f in filterFuncList:\n fil_df.loc[:,\"text\"] = fil_df.text.apply(f).values\n tmp = []\n for name in undlNameFullNameList:\n tmp.append(fil_df[fil_df.text.apply(lambda x: name in x)])\n fil_df = pd.concat(tmp, axis=0)\n\n if fil_df.shape[0] == 0:\n df = pd.DataFrame({\"date\": [], \"undlName\":[], \"sourceCode\": [], \"storyId\":[], \"text\": [], \"oneDayReturn\": [], \"twoDayReturn\": [], \"threeDayReturn\": []})\n else:\n fil_df[\"undlName\"] = [undlName for i in range(len(fil_df))]\n df = fil_df[[\"date\", \"undlName\", \"sourceCode\", \"storyId\", \"text\", \"oneDayReturn\", \"twoDayReturn\", \"threeDayReturn\"]]\n print(\" done\")\n return df\n\ndef elmo_vector(x):\n\n if type(x) == list:\n embeddings = elmo(x, signature=\"default\", as_dict=True)[\"elmo\"]\n else:\n embeddings = elmo(x.tolist(), signature=\"default\", as_dict=True)[\"elmo\"]\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n return sess.run(tf.reduce_mean(embeddings, 1))\n\ndef build_model():\n\n input_layer = keras.layers.Input(shape=(1024,))\n\n h = keras.layers.Dropout(rate=0.2)(input_layer)\n\n prediction = keras.layers.Dense(1, activation=\"sigmoid\")(h)\n\n model = keras.Model(inputs=[input_layer], outputs=prediction)\n\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[keras.metrics.Precision()])\n\n return model\n\ndef createNewsHeadlinePrediction(ex, sector_list):\n\n undlNameList = getUndlNameList(ex)\n for sector in sector_list:\n undlNameList += getUndlNameList(sector)\n\n start_date = formatDate(today)\n end_date = formatDate(today)\n\n #resultDict = {\"undlName\":[], \"bull_signals\":[], \"bear_signals\":[]}\n\n # load model\n #market_bull_model = build_model()\n #market_bull_model.reset_states()\n #market_bull_model.load_weights(modelPath + ex + \"_market_bull_model.h5\")\n #market_bear_model = build_model()\n #market_bear_model.reset_states()\n #market_bear_model.load_weights(modelPath + ex + \"_market_bear_model.h5\")\n\n #sectorBullModelDict = {}\n #sectorBearModelDict = {}\n #for sector in sector_list:\n #model = build_model()\n #model.reset_states()\n #model.load_weights(modelPath + sector + \"_bull_model.h5\")\n #sectorBullModelDict[sector] = model\n\n #model = build_model()\n #model.reset_states()\n #model.load_weights(modelPath + sector + \"_bear_model.h5\")\n #sectorBearModelDict[sector] = model\n\n tmp = []\n for undlName in undlNameList:\n tmp_df = createUndlDataFrame(undlName, undlNameFullNameDict[undlName], \"NS:RTRS\",\n [removeHeading, normalize_headline, removeOthers],\n start_date, end_date, \"\")\n tmp_df = tmp_df.drop_duplicates(subset='storyId')\n tmp_df = tmp_df.sort_values([\"date\"])\n if len(tmp_df) != 0: tmp.append(tmp_df)\n\n if len(tmp) != 0:\n df = pd.concat(tmp, axis=0)\n else:\n print(\"No News Headlines\")\n return True\n\n print(df.shape)\n\n # create ELMo Vector\n #batch = [df[\"text\"].values[i:i+100] for i in range(0, df.shape[0], 100)]\n #batch_elmo = [elmo_vector(x) for x in batch]\n #elmo_vector_list = np.concatenate(batch_elmo, axis=0)\n\n #market_bull_model_result = market_bull_model.predict(elmo_vector_list).reshape(-1)\n #market_bear_model_result = market_bear_model.predict(elmo_vector_list).reshape(-1)\n\n #sector_bull_model_result = []\n #sector_bear_model_result = []\n #i = 0\n #for undlName in df[\"undlName\"].values:\n # sector_bull_model = sectorBullModelDict[getSector(undlName)]\n # sector_bear_model = sectorBearModelDict[getSector(undlName)]\n\n # sector_bull_model_result += list(sector_bull_model.predict(elmo_vector_list[i].reshape(1, -1)).reshape(-1))\n # sector_bear_model_result += list(sector_bear_model.predict(elmo_vector_list[i].reshape(1, -1)).reshape(-1))\n # i += 1\n\n #sector_bull_model_result = np.array(sector_bull_model_result)\n #sector_bear_model_result = np.array(sector_bear_model_result)\n\n #resultDict[\"undlName\"] += list(df[\"undlName\"].values)\n #resultDict[\"bull_signals\"] += [1 if i > 1 else 0 for i in market_bull_model_result + sector_bull_model_result]\n #resultDict[\"bear_signals\"] += [1 if i > 1 else 0 for i in market_bear_model_result + sector_bear_model_result]\n\n #result_df = pd.DataFrame.from_dict(resultDict)\n #to_drop = [i for i in range(result_df.shape[0]) if result_df.iloc[i, 1] == 0 and result_df.iloc[i, 2] == 0]\n #result_df = result_df.drop(to_drop)\n result_df = df.loc[:,[\"undlName\", \"text\"]]\n result_df.to_csv(r\"D:/python/EventDriven/result/\" + formatDate(today) + \"_\" + ex + \".csv\")\n\n return True\n\ndef main():\n\n sector_list = [\"Tencent\", \"Chinese_Bank\", \"Chinese_Insurance\", \"Chinese_Oil\", \"Chinese_Auto\",\n \"Chinese_Telecom\", \"Chinese_Industrial\", \"HK_Property\", \"HK_Bank\"]\n\n createNewsHeadlinePrediction(ex=\"HK\", sector_list=[])\n\n sector_list = [\"AX_Bank\"]\n\n createNewsHeadlinePrediction(ex=\"AX\", sector_list=[])\n\n createNewsHeadlinePrediction(ex=\"SI\", sector_list=[])\n\nif __name__==\"__main__\":\n main()\n" }, { "alpha_fraction": 0.7428571581840515, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 16.5, "blob_id": "e1ab64ba7d07a16a887880d6a62f1e5205ca8bd5", "content_id": "751965bd4ec74e543d3e3245d20f8ee590cee470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 35, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/README.md", "repo_name": "hoichunlaw/EventDriven", "src_encoding": "UTF-8", "text": "# EventDriven\nEvent Driven - test3\n" }, { "alpha_fraction": 0.6215649247169495, "alphanum_fraction": 0.6328927874565125, "avg_line_length": 34.05147171020508, "blob_id": "a414a0229fdd127178719fcbe43f865a571edfda", "content_id": "362f0dd3d103f4c8eb13a758652361afd3e32cdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4767, "license_type": "no_license", "max_line_length": 134, "num_lines": 136, "path": "/Downloader.py", "repo_name": "hoichunlaw/EventDriven", "src_encoding": "UTF-8", "text": "import eikon as ek\nimport numpy as np\nimport pandas as pd\nimport os\nimport zipfile\nimport datetime\nimport cufflinks as cf\nimport configparser as cp\n\nek.set_app_key('e4ae85e1e08b47ceaa1ee066af96cabe6e56562a')\n\ndataRootPath = r\"D:/Eikon_Data/\"\ndataRootPathNews = r\"D:/Eikon_Data/News/\"\ndataRootPathMarketData = r\"D:/Eikon_Data/Market_Data/\"\ndatabasePath = r\"D:/Database/\"\nzipFolderPath = r\"D:/Zip_Folder/\"\ndate_format = \"%Y-%m-%d\"\n\ndef checkFolderExist(path):\n return os.path.isdir(path)\n\ndef checkFileExist(path):\n return os.path.isfile(path)\n\ndef createFolder(rootPath, folderName):\n if rootPath[-1] == \"/\":\n myRootPath = rootPath[:-1]\n else:\n myRootPath = rootPath\n if not checkFolderExist(myRootPath+\"/\"+folderName):\n os.mkdir(myRootPath+\"/\"+folderName)\n return True\n else:\n return \"Folder already exist\"\n\ndef formatDate(date, fm=date_format):\n return date.strftime(fm)\n\ndef moveDate(date, dayDelta=0, hourDelta=0):\n if type(date) == str:\n return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta) + datetime.timedelta(hours=hourDelta)\n else:\n return date + datetime.timedelta(days=dayDelta) + + datetime.timedelta(hours=hourDelta)\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file))\n\ndef iaZipFolder(path):\n if path[-1] == '/':\n zipFileName = path.split(\"/\")[-2] + \"_zip.zip\"\n else:\n zipFileName = path.split(\"/\")[-1] + \"_zip.zip\"\n\n if checkFileExist(zipFolderPath + zipFileName): os.remove(zipFolderPath + zipFileName)\n zipf = zipfile.ZipFile(zipFolderPath + zipFileName, 'w', zipfile.ZIP_DEFLATED)\n zipdir(path, zipf)\n zipf.close()\n\ndef downloadNews(undlName, date, savePath):\n if not checkFolderExist(savePath + formatDate(date)):\n createFolder(savePath, formatDate(date))\n\n # download data\n df = ek.get_news_headlines(\"R:\"+undlName+\" and english\",\n date_from=formatDate(moveDate(date,-1)) + \"T16:00:00\",\n date_to=formatDate(moveDate(date)) + \"T16:00:00\",\n count=100)\n\n # move date back to HK time\n df.index = moveDate(np.array(list(df.index)),0,8)\n df.versionCreated = moveDate(np.array(list(df.versionCreated)),0,8)\n # save data\n df.to_csv(savePath + formatDate(date) + \"/\" + undlName + \"_headlines.csv\")\n\ndef downloadHistoricalNews(undlName, dateFrom, dateTo, savePath):\n if type(dateFrom) == str:\n myDateFrom = datetime.datetime.strptime(dateFrom, date_format)\n else:\n myDateFrom = dateFrom\n\n if type(dateTo) == str:\n myDateTo = datetime.datetime.strptime(dateTo, date_format)\n else:\n myDateTo = dateTo\n\n dateRef = myDateFrom\n while dateRef <= myDateTo:\n print(\"Download\", undlName, dateRef)\n downloadNews(undlName, dateRef, savePath)\n dateRef = moveDate(dateRef, 1)\n\ndef downloadMarketData(undlName, date, savePath):\n\n # download data\n try:\n df_new = ek.get_timeseries(undlName, fields=[\"CLOSE\", \"HIGH\", \"LOW\", \"OPEN\", \"VOLUME\"],\n start_date=formatDate(date), end_date=formatDate(date), interval=\"daily\", corax=\"adjusted\")\n except:\n df_new = []\n\n if type(df_new) == pd.core.frame.DataFrame:\n myUndlName = undlName.split('.')[0] + '_' + undlName.split('.')[1]\n df_new.index = pd.Series(df_new.index).apply(formatDate)\n if checkFileExist(savePath + myUndlName + \".csv\"):\n df = pd.read_csv(savePath + myUndlName + \".csv\")\n df = df.set_index(\"Date\")\n if df_new.index[0] not in list(df.index):\n df = pd.concat([df, df_new], axis=0)\n df.to_csv(savePath + myUndlName + \".csv\")\n else:\n df_new.to_csv(savePath + myUndlName + \".csv\")\n\ndef downloadHistoricalMarketData(undlName, dateFrom, dateTo, savePath):\n\n # download data\n df = ek.get_timeseries(undlName, fields=[\"CLOSE\", \"HIGH\", \"LOW\", \"OPEN\", \"VOLUME\"],\n start_date=dateFrom, end_date=dateTo, interval=\"daily\", corax=\"adjusted\")\n df.index = pd.Series(df.index).apply(formatDate)\n myUndlName = undlName.split('.')[0] + '_' + undlName.split('.')[1]\n df.to_csv(savePath + myUndlName + \".csv\")\n\ndef main():\n today = datetime.datetime.now()\n df = pd.read_csv(r'D:/Database/Underlying_Database/undlNameList.csv')\n undlNameList = list(df.undlName.values)\n\n # download News Headlines\n for undlName in undlNameList:\n print(\"Download\", undlName, today)\n downloadNews(undlName, today, dataRootPathNews)\n\nif __name__==\"__main__\":\n main()\n" }, { "alpha_fraction": 0.6546888947486877, "alphanum_fraction": 0.6678352355957031, "avg_line_length": 29.83783721923828, "blob_id": "2113ecc4c2615690f92335ae63b973515c27773b", "content_id": "72cfef2dd1bc07a768f018cf23f0de955991205d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 134, "num_lines": 37, "path": "/Email.py", "repo_name": "hoichunlaw/EventDriven", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport os\nimport win32com.client as win32\nimport datetime\n\npath = r\"D:/python/EventDriven/result/\"\ndate_format = \"%Y-%m-%d\"\n\ndef formatDate(date, fm=date_format):\n return date.strftime(fm)\n\ndef moveDate(date, dayDelta=0, hourDelta=0):\n if type(date) == str:\n return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta) + datetime.timedelta(hours=hourDelta)\n else:\n return date + datetime.timedelta(days=dayDelta) + + datetime.timedelta(hours=hourDelta)\n\ndef email(to, sub, HTMLBody, attachmentURLList):\n outlook = win32.Dispatch('outlook.application')\n mail = outlook.CreateItem(0)\n mail.To = to\n mail.Subject = sub\n mail.HTMLBody = HTMLBody\n for url in attachmentURLList:\n if os.path.exists(url): mail.Attachments.Add(url)\n mail.Send()\n\ndef main():\n today = datetime.datetime.now()\n url1 = path + formatDate(today) + \"_HK.csv\"\n url2 = path + formatDate(today) + \"_AX.csv\"\n url3 = path + formatDate(today) + \"_SI.csv\"\n email(\"[email protected]\", \"_News_\", \"\", [url1, url2, url3])\n\nif __name__==\"__main__\":\n main()\n" }, { "alpha_fraction": 0.7236841917037964, "alphanum_fraction": 0.7236841917037964, "avg_line_length": 14.399999618530273, "blob_id": "7849b7a11898fe76692120bda78ab2d2e9c8987c", "content_id": "83b9b56dbb005417c5ea7dab981229ec2b1ef318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 76, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/upload.sh", "repo_name": "hoichunlaw/EventDriven", "src_encoding": "UTF-8", "text": "#!/bin/sh\ngit status\ngit add .\ngit commit -m \"upload\"\ngit push origin master" } ]
5
teslaworksumn/munchi-pi-api
https://github.com/teslaworksumn/munchi-pi-api
9b7833693f949c30d9cde91b2f27950063aebd1c
631799568d6420624fabd11bbfd001c2df022636
d0a24d22c41b468ac2d80cad16ddde9a70b7aaad
refs/heads/master
2021-01-18T22:15:22.492557
2017-10-13T21:37:37
2017-10-13T21:37:37
84,374,731
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.7147436141967773, "avg_line_length": 24.83333396911621, "blob_id": "6e97fac824c7aea62d2d9d5f256cd41f48d66e2f", "content_id": "7f80fae05e834710766178b56885833002c3adfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 69, "num_lines": 12, "path": "/port_test.py", "repo_name": "teslaworksumn/munchi-pi-api", "src_encoding": "UTF-8", "text": "import time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins\nGPIO.setup(8,GPIO.OUT) #pin 8, GPIO15\n\nwhile True:\n\t'''test pin8 GPIO15'''\n\tGPIO.output(8,1) #output high to pin 8\n\ttime.sleep(0.5) #delay 0.5 sec\n\tGPIO.output(8,0) #output low to pin 8\n\ttime.sleep(0.5)\n\t\n" }, { "alpha_fraction": 0.47401246428489685, "alphanum_fraction": 0.6909216642379761, "avg_line_length": 39.08333206176758, "blob_id": "11fdc04c6e31fc888bec9b51eeaf814d9d9044e0", "content_id": "4ea377da2f89dd0cca842f3fac4dad8bd3c8d879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 295, "num_lines": 36, "path": "/munchi_rasp_pi.py", "repo_name": "teslaworksumn/munchi-pi-api", "src_encoding": "UTF-8", "text": "import time\nimport RPi.GPIO as GPIO\nimport Adafruit_ADS1x15\n\nTHERMISTORVALUE 100000\nSERIESRESISTOR 100000 #series resistor to thermistor\nBCOEFFICIENT 4072\n\nthermistorR2Temp = {3.2575:0, 2.5348:5, 1.9876:10, 1.5699:15, 1.2488:20, 1.0000:25, 0.80594:30, 0.65355:35, 0.53312:40, 0.43735:45, 0.36074:50, 0.29911:55, 0.24925:60, 0.20872:65, 0.17558:70, 0.14837:75, 0.12592:80, 0.10731:85, 0.091816:90, 0.078862:95, 0.067988:100, 0.058824:105, 0.051071:110}\n\nGPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins\nGPIO.setup(19,GPIO.out) #pin 19, GPIO12 output\nGPIO.setup(26,GPIO.out) #pin 26, GPIO07 output\n\nadc = Adafruit_ADS1x15.ADS1015() #create an ADS1015 ADC (12-bit) instance.\n# Choose a gain of 1 for reading voltages from 0 to 4.09V.\n# Or pick a different gain to change the range of voltages that are read:\n# - 2/3 = +/-6.144V\n# - 1 = +/-4.096V\n# - 2 = +/-2.048V\n# - 4 = +/-1.024V\n# - 8 = +/-0.512V\n# - 16 = +/-0.256V\n# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.\nGAIN = 1\n\nwhile True:\n\treading = adc.read_adc(0, gain=GAIN) #read A0, 12 bit signed integer, -2048 to 2047 (0=GND, 2047=4.096*gain)\n\tvoltReading = reading * 4.096 / 2047.0 #convert adc to voltage\n\tthermoR = SERIESRESISTOR / ((4.0/voltReading) - 1)#convert voltage to thermoster resistance\n\t#7002 thermistor\n\t#temp = \n\t\n\tprint (\"reading: \" + reading)\n\tprint (\"thermistor resistance: \" + thermoR)\n\t#print (\"temp: \" + temp)\n" }, { "alpha_fraction": 0.547756016254425, "alphanum_fraction": 0.6628308296203613, "avg_line_length": 27.96666717529297, "blob_id": "fe0bdd974351d61af4a2e869d5372acba4c657b5", "content_id": "81f6607307c5a66c102a9842b097cf7dbb4cbee4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "no_license", "max_line_length": 109, "num_lines": 30, "path": "/adc_test.py", "repo_name": "teslaworksumn/munchi-pi-api", "src_encoding": "UTF-8", "text": "import time\nimport RPi.GPIO as GPIO\nimport Adafruit_ADS1x15\n\nGPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins\nGPIO.setup(8,GPIO.OUT) #pin 8, GPIO15\n\nadc = Adafruit_ADS1x15.ADS1015() #create an ADS1015 ADC (12-bit) instance.\n\n# Choose a gain of 1 for reading voltages from 0 to 4.09V.\n# Or pick a different gain to change the range of voltages that are read:\n# - 2/3 = +/-6.144V\n# - 1 = +/-4.096V\n# - 2 = +/-2.048V\n# - 4 = +/-1.024V\n# - 8 = +/-0.512V\n# - 16 = +/-0.256V\n# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.\nGAIN = 1\n\nwhile True:\n\t'''test adc'''\n\treading = adc.read_adc(0, gain=GAIN) #read A0, 12 bit signed integer, -2048 to 2047 (0=GND, 2047=4.096*gain)\n\t#reading = adc.read_adc(0) #gain defaults to 1\n\tprint(reading)\n\tif reading < 1000:\n\t\tGPIO.output(8,0)\n\telse:\n\t\tGPIO.output(8.1)\n\ttime.sleep(0.5)\n" } ]
3
romkof/CarND-Behavioral-Cloning-P3
https://github.com/romkof/CarND-Behavioral-Cloning-P3
5126e8141f9272463bea30bb28e79161618c1860
fce976348a8839dfb1f47d679f91ac4ca774afd7
f7a6c31ccb20558c5e7372614158484a60635d49
refs/heads/master
2020-03-19T05:06:29.941481
2018-06-20T20:30:41
2018-06-20T20:30:41
135,900,792
0
0
null
2018-06-03T12:08:23
2018-06-02T04:50:33
2018-04-04T00:48:56
null
[ { "alpha_fraction": 0.5978171825408936, "alphanum_fraction": 0.6270122528076172, "avg_line_length": 34.92156982421875, "blob_id": "dee3a218a2b60a76c04a725f24ce8295fc9ff304", "content_id": "dc67737dbaf28a68b0107a18c4b9c26e490d286f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3665, "license_type": "permissive", "max_line_length": 91, "num_lines": 102, "path": "/model.py", "repo_name": "romkof/CarND-Behavioral-Cloning-P3", "src_encoding": "UTF-8", "text": "import os\nimport csv\nimport cv2\nimport numpy as np\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\n\ndata_path = 'record'\n\n\nsamples = []\nwith open( data_path + '/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n\ndef generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n def get_image_path(row):\n return data_path + '/IMG/'+batch_sample[row].split('/')[-1]\n \n def read_image(path):\n img = cv2.imread(path)\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n center_image_path = get_image_path(0) \n left_image_path = get_image_path(1)\n right_image_path = get_image_path(2)\n \n center_image = read_image(center_image_path)\n left_image = read_image(left_image_path)\n right_image = read_image(right_image_path)\n \n correction = 0.25 # this is a parameter to tune\n center_angle = float(batch_sample[3])\n left_angle = center_angle + correction\n right_angle = center_angle - correction\n \n fliped_center_image = cv2.flip(center_image, 1)\n fliped_center_angle = center_angle*-1.0\n \n images.extend((center_image, left_image, right_image, fliped_center_image))\n angles.extend((center_angle, left_angle, right_angle, fliped_center_angle))\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_samples, batch_size=32)\nvalidation_generator = generator(validation_samples, batch_size=32)\n\nch, row, col = 3, 80, 320 # Trimmed image format\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Cropping2D, Lambda, Convolution2D, Flatten, Dense, Dropout\nimport tensorflow as tf\nimport cv2\n\ndef resize_image(x):\n from keras.backend import tf as ktf\n return ktf.image.resize_images(x, (66, 200))\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))\nmodel.add(Cropping2D(cropping=((70,25),(0,0))))\nmodel.add(Lambda(resize_image))\nmodel.add(Convolution2D(24,5,5, subsample=(2,2), activation =\"relu\"))\nmodel.add(Convolution2D(36,5,5, subsample=(2,2), activation =\"relu\"))\nmodel.add(Convolution2D(48,5,5, subsample=(2,2), activation =\"relu\"))\nmodel.add(Convolution2D(64,3,3, activation =\"relu\"))\nmodel.add(Convolution2D(64,3,3, activation =\"relu\"))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dropout(0.7))\nmodel.add(Dense(10))\nmodel.add(Dropout(0.7))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam')\nmodel.summary()\nhistory_object = model.fit_generator(train_generator, samples_per_epoch= \n len(train_samples), validation_data=validation_generator, \n nb_val_samples=len(validation_samples), nb_epoch=40)\n\n\nmodel.save(\"model.h5\") " } ]
1
Jmbac0n/randomiser
https://github.com/Jmbac0n/randomiser
83f496bebd4d4e2548e0d2605d7b2d6768d955c3
9bbd8c9b1ae26233fa9832f727e6eceedb71ea73
5053663e26dd26ce53f28c3b78fb2c6dbb894e0f
refs/heads/master
2022-08-11T08:03:00.494269
2020-05-20T19:14:44
2020-05-20T19:14:44
265,654,589
0
0
null
2020-05-20T18:34:05
2020-05-20T18:46:04
2020-05-20T19:14:44
Python
[ { "alpha_fraction": 0.643750011920929, "alphanum_fraction": 0.65625, "avg_line_length": 22.615385055541992, "blob_id": "cf832e7133f7fce910a2198074cf622fb9851d87", "content_id": "56d3c79ccaeb335286ef5cb44f76100262139591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 46, "num_lines": 13, "path": "/randomiser.py", "repo_name": "Jmbac0n/randomiser", "src_encoding": "UTF-8", "text": "# Simple script that generates a random \r\n# combination of words from separate strings\r\n\r\ncolours = ['red','blue','green'.'yellow']\r\nshapes = ['circle','square','triangle','star']\r\n\r\nimport random\r\nx = random.randint(0, 2)\r\ny = random.randint(0, 2)\r\n\r\ncombination = colours[x] + (\" \") + shapes[y]\r\n\r\nprint(combination)\r\n" }, { "alpha_fraction": 0.8085106611251831, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 22.5, "blob_id": "ec66c03d61a4dfa8b4506846e6afed00cc80b8d7", "content_id": "f57ba7cedd6b886df053e87e2e6c52c6713f8246", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/README.md", "repo_name": "Jmbac0n/randomiser", "src_encoding": "UTF-8", "text": "# randomiser\nSmall python script for practice.\n" } ]
2
llFuturell/gender-classifier
https://github.com/llFuturell/gender-classifier
11d0abd8f68322dd916be94f3732a5a259a9eda4
bd8a022a8375aa8d8ba6608bdd0acdfb9b859696
df198b3ded08288a68e911fff1b08be8f2078e1f
refs/heads/master
2020-03-20T11:58:21.644217
2018-06-14T22:57:40
2018-06-14T22:57:40
137,417,278
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5554272532463074, "alphanum_fraction": 0.6893764138221741, "avg_line_length": 23.77142906188965, "blob_id": "5e22419d155d900a1066b2f22074ad740dcaa544", "content_id": "2da8d0a1c7e2e45b7732d1acb0720050db8115f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "no_license", "max_line_length": 147, "num_lines": 35, "path": "/genderClassifier.py", "repo_name": "llFuturell/gender-classifier", "src_encoding": "UTF-8", "text": "#Created By Matthew Li\n#06/13/18\n\n#Gender Classifier Based on Measurements\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import tree\n\n#height, weight, shoe size\nX = [[181, 80, 44],[177,70,43], [160,60,38], [154,54,37], [166,65,40], [190,90,47], [175,64,39], [177,70,40],[159,55,37], [171,75,42], [181,85,43]]\n#gender\nY = [\"male\", \"female\", \"female\", \"female\", \"male\", \"male\", \"male\", \"female\", \"male\", \"female\", \"male\"]\n\n#Decision Tree\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X,Y)\n\nprediction = clf.predict([[190,70,43]]) #male\nprint prediction\n\n#KNearestNeighbor\nimport numpy as np\nclf2 = KNeighborsClassifier(n_neighbors=3)\nclf2 = clf2.fit(X,Y)\n\nprediction2 = clf2.predict([[190,70,43]]) #male\nprint prediction2\n\n#SVM\nfrom sklearn import svm\nclf3 = svm.SVC()\nclf3.fit(X,Y)\n\nprediction3 = clf3.predict([[190,70,43]]) #male\nprint prediction3" }, { "alpha_fraction": 0.811965823173523, "alphanum_fraction": 0.811965823173523, "avg_line_length": 57.5, "blob_id": "8d85a96d569792558547bad729f00e7056205082", "content_id": "b01dd1f80d3873cad948ed5535cc67597facfbc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 117, "license_type": "no_license", "max_line_length": 96, "num_lines": 2, "path": "/README.md", "repo_name": "llFuturell/gender-classifier", "src_encoding": "UTF-8", "text": "# gender-classifier\nClassifying gender based on weight, height, and shoe size measurements. My first Python program.\n" } ]
2
Acuda/dummy_pkg
https://github.com/Acuda/dummy_pkg
034e0a5d4043c44742ff864ce95b7c76708d081b
afce6df5c643ee11ce4494b25520e8a53b402dd9
af1f6dc13bb130bc0c902bd6aeb64558496f63fc
refs/heads/master
2016-08-05T15:41:56.436717
2014-11-04T15:05:08
2014-11-04T15:05:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6095617413520813, "alphanum_fraction": 0.6115537881851196, "avg_line_length": 20.869565963745117, "blob_id": "49048d420bf4002c400879a25e598a6856c24d0d", "content_id": "6d61cd4d7351dfd428488cd9ddc776f94cf72edc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "no_license", "max_line_length": 86, "num_lines": 23, "path": "/scripts/echo_node.py", "repo_name": "Acuda/dummy_pkg", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport rospy\nimport std_msgs\n\nclass EchoNode(object):\n\n def __init__(self):\n rospy.init_node('echo_node')\n\n self.sub = rospy.Subscriber('in', std_msgs.msg.String, callback=self.callback)\n self.pub = rospy.Publisher('out', std_msgs.msg.String)\n\n rospy.spin()\n\n def callback(self, data):\n self.pub.publish(data.data)\n\nif __name__ == '__main__':\n try:\n EchoNode()\n except rospy.ROSInterruptException: pass" } ]
1
prateeksahu10/web-api
https://github.com/prateeksahu10/web-api
16596d13ec66fd65c73529c5df6fe797a3445b3d
29b5bb86943fb3388e3732c01b982daddc59340d
fb37a1632bb83313cb1e29f3e01827e76835b4f0
refs/heads/master
2020-03-28T03:06:19.582948
2018-09-06T05:10:52
2018-09-06T05:10:52
147,620,761
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7810218930244446, "alphanum_fraction": 0.7956204414367676, "avg_line_length": 44.66666793823242, "blob_id": "09acadbaf84f838ac0a588a387829a503cacc433", "content_id": "603d75ad8d42ba150d03f7bbc296be5cbe2cffa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 96, "num_lines": 3, "path": "/assignment13.py", "repo_name": "prateeksahu10/web-api", "src_encoding": "UTF-8", "text": "import requests\nresponse=requests.get(\"https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=text\")\nprint(response.content)\n" } ]
1
Alfredjoy/Ecom_project
https://github.com/Alfredjoy/Ecom_project
e0bbe0ac32eaef96b83e28ae109d4ad8af308c78
0f3c0ff0127019aeb413bc0f48ba5bca2010756b
666270811000c30f9b0cfdc16671b83d4975064e
refs/heads/master
2023-01-28T04:43:11.042563
2020-12-11T11:13:24
2020-12-11T11:13:24
320,186,163
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 20.700000762939453, "blob_id": "d9bba73fd6c045cd8d09c991c5ce366c2a41058b", "content_id": "42b3580fea8d7499c07e17e8f4562c4dfa427909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/store/urls.py", "repo_name": "Alfredjoy/Ecom_project", "src_encoding": "UTF-8", "text": "from django.urls import path, include\n\nfrom store import views\n\n\nurlpatterns = [\n path('',views.store,name='store'),\n path('cart',views.cart, name='cart'),\n path('checkout',views.checkout, name='checkout')\n]\n" } ]
1
JackyWang2001/animeSeg2
https://github.com/JackyWang2001/animeSeg2
94584d4952af7708341d5a8f3987cd8f6d9d706e
21dc6466132d8ae50d26a571b3917aa65e71cf6d
342487d04694f0afad0c5df0d634a1e7014de83d
refs/heads/master
2023-03-15T03:14:14.561169
2021-03-28T17:27:23
2021-03-28T17:27:23
352,347,702
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.800000011920929, "avg_line_length": 8.666666984558105, "blob_id": "1870ce19146447f654dcb5294da47be1da298d97", "content_id": "c28a879cefec14ea29e2ea9ba558ae5d40a1c7cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 17, "num_lines": 3, "path": "/main.py", "repo_name": "JackyWang2001/animeSeg2", "src_encoding": "UTF-8", "text": "import os\n\nimport detectron2\n\n" }, { "alpha_fraction": 0.6231213808059692, "alphanum_fraction": 0.639306366443634, "avg_line_length": 38.318180084228516, "blob_id": "80339c8dd65b89b9b216c4f2e8b563640f54a846", "content_id": "a09286973124628ae8b3b5387dec5739d2095d49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 97, "num_lines": 22, "path": "/dataset.py", "repo_name": "JackyWang2001/animeSeg2", "src_encoding": "UTF-8", "text": "import os\n\nfrom detectron2.data.datasets import builtin_meta\nfrom detectron2.data.datasets.coco import load_sem_seg\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\n\n\ndef register_ade20k(root):\n root = os.path.join(root, \"ADEChallengeData2016\")\n for name, dirname in [(\"train\", \"training\"), (\"val\", \"validation\")]:\n image_dir = os.path.join(root, \"images\", dirname)\n gt_dir = os.path.join(root, \"annotations_detectron2\", dirname)\n name = f\"ade20k_sem_seg_{name}\"\n DatasetCatalog.register(\n name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext=\"png\", image_ext=\"jpg\")\n )\n MetadataCatalog.get(name).set(\n stuff_classes=builtin_meta.ADE20K_SEM_SEG_CATEGORIES[:],\n image_root=image_dir,\n sem_seg_root=gt_dir,\n evaluator_type=\"sem_seg\",\n )\n" } ]
2
jessehylton/Podrum
https://github.com/jessehylton/Podrum
1aca814be407176b2da75f49330c8b1236f69713
fed9c811a230ff5b9ac9b516867bd5f4c1e75d85
fcb22e08dc134d847ad633ecabfb4f867361a748
refs/heads/master
2022-11-16T00:47:43.329253
2020-07-07T15:53:13
2020-07-07T15:53:13
275,432,622
0
0
Apache-2.0
2020-06-27T18:38:08
2020-06-27T18:38:10
2020-07-07T15:53:13
null
[ { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5844748616218567, "avg_line_length": 29.20689582824707, "blob_id": "ad630e83a55c0dd73f4369af8781a0d070d8e33d", "content_id": "96925397826b72ce6bf727a1759a1966c14a998b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 876, "license_type": "permissive", "max_line_length": 77, "num_lines": 29, "path": "/src/podrum/network/protocol/ServerToClientHandshakePacket.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.network.protocol.DataPacket import DataPacket\nfrom podrum.network.protocol.ProtocolInfo import ProtocolInfo\n\nclass ServerToClientHandshakePacket(DataPacket):\n NID = ProtocolInfo.SERVER_TO_CLIENT_HANDSHAKE_PACKET\n\n jwt = None\n\n def canBeSentBeforeLogin():\n return True\n\n def decodePayload(self):\n self.jwt = self.getString()\n\n def encodePayload(self):\n self.putString(self.jwt)\n" }, { "alpha_fraction": 0.49548327922821045, "alphanum_fraction": 0.5009033679962158, "avg_line_length": 23.329669952392578, "blob_id": "94b239bdfda99ad88923e91466163f08e2838d5a", "content_id": "ca3446a901eb90b18f8cc1d120944b26c1e66e70", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2214, "license_type": "permissive", "max_line_length": 97, "num_lines": 91, "path": "/src/podrum/math/Facing.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nclass Facing:\n AXIS_Y = 0\n AXIS_Z = 1\n AXIS_X = 2\n\n FLAG_AXIS_POSITIVE = 1\n\n DOWN = AXIS_Y << 1\n UP = (AXIS_Y << 1) | FLAG_AXIS_POSITIVE\n NORTH = AXIS_Z << 1\n SOUTH = (AXIS_Z << 1) | FLAG_AXIS_POSITIVE\n WEST = AXIS_X << 1\n EAST = (AXIS_X << 1) | FLAG_AXIS_POSITIVE\n\n ALL = [\n DOWN,\n UP,\n NORTH,\n SOUTH,\n WEST,\n EAST\n ]\n\n HORIZONTAL = [\n NORTH,\n SOUTH,\n WEST,\n EAST\n ]\n\n CLOCKWISE = {\n AXIS_Y: {\n NORTH: EAST,\n EAST: SOUTH,\n SOUTH: WEST,\n WEST: NORTH\n },\n AXIS_Z: {\n UP: EAST,\n EAST: DOWN,\n DOWN: WEST,\n WEST: UP\n },\n AXIS_X: {\n UP: NORTH,\n NORTH: DOWN,\n DOWN: SOUTH,\n SOUTH: UP\n }\n }\n\n @staticmethod\n def axis(direction):\n return direction >> 1\n\n @staticmethod\n def is_positive(direction):\n return (direction & Facing.FLAG_AXIS_POSITIVE) == Facing.FLAG_AXIS_POSITIVE\n\n @staticmethod\n def opposite(direction):\n return direction ^ Facing.FLAG_AXIS_POSITIVE\n\n @staticmethod\n def rotate(direction, axis, clockwise):\n if not Facing.CLOCKWISE[axis]:\n raise ValueError(\"Invalid axis {}\".format(axis))\n\n if not Facing.CLOCKWISE[axis][direction]:\n raise ValueError(\"Cannot rotate direction {} around axis {}\".format(direction, axis))\n\n rotated = Facing.CLOCKWISE[axis][direction]\n return rotated if clockwise else Facing.opposite(rotated)\n\n @staticmethod\n def validate(facing):\n if facing in Facing.ALL:\n raise ValueError(\"Invalid direction {}\".format(facing))\n" }, { "alpha_fraction": 0.5851197838783264, "alphanum_fraction": 0.5863808393478394, "avg_line_length": 30.719999313354492, "blob_id": "846e3b4f8b9e8745b4f1a29389e5e93c49068c77", "content_id": "73735600f9190865fde88d3594c6c7e285d3e81b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "permissive", "max_line_length": 77, "num_lines": 25, "path": "/src/podrum/network/protocol/ClientToServerHandshakePacket.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.network.protocol.DataPacket import DataPacket\nfrom podrum.network.protocol.ProtocolInfo import ProtocolInfo\n\nclass ClientToServerHandshakePacket(DataPacket):\n NID = ProtocolInfo.CLIENT_TO_SERVER_HANDSHAKE_PACKET\n\n def canBeSentBeforeLogin():\n return True\n\n def encodePayload(): pass\n\n def decodePayload(): pass\n" }, { "alpha_fraction": 0.5352981686592102, "alphanum_fraction": 0.5407813787460327, "avg_line_length": 36.394737243652344, "blob_id": "6dbb974dc6ac7bcb04cb951d7a9119b0e5e14fcf", "content_id": "fa836dd20ad97718e897e3e4c8f8a1fc1cb5881c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1459, "license_type": "permissive", "max_line_length": 101, "num_lines": 38, "path": "/src/podrum/Podrum.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\r\n* ____ _\r\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\r\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\r\n* | __/ (_) | (_| | | | |_| | | | | | |\r\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\r\n*\r\n* This program is free software: you can redistribute it and/or modify\r\n* it under the terms of the GNU Lesser General Public License as published by\r\n* the Free Software Foundation, either version 3 of the License, or\r\n* (at your option) any later version.\r\n\"\"\"\r\n#!/usr/bin/env python3\r\n\r\nimport sys\r\nimport inspect\r\nfrom os import getcwd, path\r\nfrom threading import Thread\r\nsys.path.insert(0, path.dirname(path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))))\r\nfrom podrum.Server import Server\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) >= 3:\r\n if sys.argv[1] == \"--no_wizard\" and sys.argv[2] == \"-travis\":\r\n serverThread = Thread(target=Server, args=(getcwd(), False, True))\r\n else:\r\n print(\"[!] None valid args selected.\")\r\n serverThread = Thread(target=Server, args=(getcwd(), True))\r\n elif len(sys.argv) == 2:\r\n if sys.argv[1] == \"--no_wizard\":\r\n serverThread = Thread(target=Server, args=(getcwd(), False))\r\n else:\r\n print(\"[!] None valid args selected.\")\r\n serverThread = Thread(target=Server, args=(getcwd(), True))\r\n else:\r\n serverThread = Thread(target=Server, args=(getcwd(), True))\r\n\r\n serverThread.start()\r\n" }, { "alpha_fraction": 0.6830708384513855, "alphanum_fraction": 0.6837270259857178, "avg_line_length": 39.945945739746094, "blob_id": "39fac384776dc3204a3779ea51adfaddc3129498", "content_id": "3d0dc9bb22d3aaa72fb96af86a413d15a8f8dffc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1524, "license_type": "permissive", "max_line_length": 95, "num_lines": 37, "path": "/src/podrum/network/PacketPool.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.network.protocol.ClientToServerHandshakePacket import ClientToServerHandshakePacket\nfrom podrum.network.protocol.DataPacket import DataPacket\nfrom podrum.network.protocol.DisconnectPacket import DisconnectPacket\nfrom podrum.network.protocol.LoginPacket import LoginPacket\nfrom podrum.network.protocol.PlayStatusPacket import PlayStatusPacket\nfrom podrum.network.protocol.ResourcePacksInfoPacket import ResourcePacksInfoPacket\nfrom podrum.network.protocol.ServerToClientHandshakePacket import ServerToClientHandshakePacket\n\nclass PacketPool:\n packetPool = {}\n \n def __init__(self):\n self.registerPackets()\n \n def registerPacket(packet):\n self.pool[packet.NID] = packet.copy()\n \n def registerPackets(self):\n self.registerPacket(ClientToServerHandshakePacket)\n self.registerPacket(DisconnectPacket)\n self.registerPacket(LoginPacket)\n self.registerPacket(PlayStatusPacket)\n self.registerPacket(ResourcePacksInfoPacket)\n self.registerPacket(ServerToClientHandshakePacket)\n \n" }, { "alpha_fraction": 0.5476118922233582, "alphanum_fraction": 0.5656353235244751, "avg_line_length": 31.63725471496582, "blob_id": "d40b11b12138e12b4cdabe34af321b78b3692adf", "content_id": "e23dfe9a63de2f0dfaa6e76b3d21161d46338f16", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3329, "license_type": "permissive", "max_line_length": 115, "num_lines": 102, "path": "/src/podrum/utils/Utils.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _ \n* | _ \\ ___ __| |_ __ _ _ _ __ ___ \n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\ \n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\nimport base64\nimport binascii\nimport json\nimport os\nimport signal\nimport sys\nimport socket\nimport time\nimport urllib\nimport hmac\nimport hashlib\n\nclass Utils:\n\n def getOS():\n if sys.platform == 'linux' or sys.platform == 'linux2':\n return 'linux'\n elif sys.platform == 'darwin':\n return 'osx'\n elif sys.platform == 'win32' or sys.platform == 'win64':\n return 'windows'\n \n def killServer():\n os.kill(os.getpid(), signal.SIGTERM)\n \n def getPrivateIpAddress():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n return ip\n \n def getPublicIpAddress():\n ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')\n return ip\n \n def microtime(get_as_float = False) :\n if get_as_float:\n return time.time()\n else:\n return '%f %d' % math.modf(time.time())\n \n def substr(string, start, length = None):\n if start < 0:\n start = start + len(string)\n if not length:\n return string[start:]\n elif length > 0:\n return string[start:start + length]\n else:\n return string[start:length]\n \n def hex2bin(hexdec):\n if hexdec == 'x':\n return False\n if hexdec == '':\n return False\n dec = int(hexdec, 16)\n b = binascii.unhexlify('%x' % dec)\n return b\n \n def binToHex(b):\n return binascii.hexlify(b)\n \n def HMACSHA256(data, secret):\n encodedData = data.encode()\n byteSecret = secret.encode()\n return hmac.new(byteSecret, encodedData, hashlib.sha256).hexdigest().upper()\n \n def base64UrlEncode(data):\n return base64.urlsafe_b64encode(data.encode()).replace(b\"=\", b\"\").decode()\n \n def base64UrlDecode(data):\n return base64.urlsafe_b64decode(data).decode()\n \n def encodeJWT(header, payload, secret):\n body = Utils.base64UrlEncode(json.dumps(header)) + \".\" + Utils.base64UrlEncode(json.dumps(payload))\n secret = Utils.HMACSHA256(body, secret)\n return body + \".\" + Utils.base64UrlEncode(secret)\n \n def decodeJWT(token: str):\n [headB64, payloadB64, sigB64] = token.split(\".\")\n rawPayloadJSON = Utils.base64UrlDecode(payloadB64)\n if rawPayloadJSON == False:\n raise Exception(\"Payload base64 is invalid and cannot be decoded\")\n decodedPayload = json.loads(rawPayloadJSON)\n if isinstance(decodedPayload, str):\n decodedPayload = json.loads(decodedPayload)\n if not isinstance(decodedPayload, dict):\n raise Exception(\"Decoded payload should be dict, \" + str(type(decodedPayload).__name__) + \" received\")\n return decodedPayload\n" }, { "alpha_fraction": 0.5528120994567871, "alphanum_fraction": 0.5608465671539307, "avg_line_length": 26.143617630004883, "blob_id": "808dfb1cb90ec9ce267460ea9de6e06b80416bd2", "content_id": "52e3bdab52fa4e91cb93b18b6355b46935479dbb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5103, "license_type": "permissive", "max_line_length": 77, "num_lines": 188, "path": "/src/podrum/utils/BinaryStream.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.utlis.Binary import Binary\nfrom podrum.utlis.UUID import UUID\n\nclass BinaryStream:\n buffer = \"\"\n offset = None\n \n def __int__(self, buffer = \"\", offset = 0):\n self.buffer = buffer\n self.offset = offset\n \n def reset(self):\n self.buffer = \"\"\n self.offset = 0\n\n def setBuffer(self, buffer = \"\", offset = 0):\n self.buffer = buffer\n self.offset = int(offset)\n \n def getOffset(self):\n return self.offset\n \n def getBuffer(self):\n return self.buffer\n \n def get(self, len):\n if len < 0:\n self.offset = len(self.buffer) - 1;\n return \"\"\n elif len == True:\n str = self.buffer[0:self.offset]\n self.offset = len(self.buffer)\n return str\n buffer = self.buffer[self.offset:self.offset+len]\n self.offset += length\n return buffer\n \n def put(self, str):\n self.buffer += str\n \n def getBool(self):\n return self.get(1) != b'\\x00'\n \n def putBool(self, v):\n self.buffer += (b\"\\x01\" if v else b\"\\x00\")\n \n def getByte(self):\n self.offset += 1\n return ord(self.buffer[self.offset])\n \n def putByte(self, v):\n self.buffer += chr(v)\n \n def getLong(self):\n return Binary.readLong(self.get(8))\n \n def putLong(self, v):\n self.buffer += Binary.writeLong(v)\n \n def getLLong(self):\n return Binary.readLLong(self.get(8))\n \n def putLLong(self, v):\n self.buffer += Binary.writeLLong(v)\n \n def getInt(self):\n return Binary.readInt(self.get(4))\n \n def putInt(self, v):\n self.buffer += Binary.writeInt(v)\n \n def getLInt(self):\n return Binary.readLInt(self.get(4))\n \n def putLInt(self, v):\n self.buffer += Binary.writeLInt(v)\n \n def getShort(self):\n return Binary.readShort(self.get(2))\n \n def putShort(self, v):\n self.buffer += Binary.writeShort(v)\n \n def getLShort(self):\n return Binary.readLShort(self.get(2))\n \n def putLShort(self, v):\n self.buffer += Binary.writeLShort(v)\n \n def getSignedShort(self):\n return Binary.readSignedShort(self.get(2))\n \n def getSignedLShort(self):\n return Binary.readSignedLShort(self.get(4))\n \n def getFloat(self):\n return Binary.readFloat(self.get(4))\n \n def putFloat(self, v):\n self.buffer += Binary.writeFloat(v)\n \n def getLFloat(self):\n return Binary.readLFloat(self.get(4))\n \n def putLFloat(self, v):\n self.buffer += Binary.writeLFloat(v)\n \n def getRoundedFloat(self, accuracy):\n return Binary.readRoundedFloat(self.get(4), accuracy)\n \n def getRoundedLFloat(self, accuracy):\n return Binary.readRoundedLFloat(self.get(4), accuracy)\n \n def getTriad(self):\n return Binary.readTriad(self.get(3))\n \n def putTriad(self, v):\n self.buffer += Binary.writeTriad(v)\n \n def getLTriad(self):\n return Binary.readLTriad(self.get(3))\n \n def putLTriad(self, v):\n self.buffer += Binary.writeLTriad(v)\n\n def getUnsignedVarInt(self):\n return Binary.readUnsignedVarInt(self.buffer, self.offset)\n \n def putUnsignedVarInt(self, v):\n self.put(Binary.writeUnsignedVarInt(v))\n \n def getVarInt(self):\n return Binary.readVarInt(self.buffer, self.offset)\n \n def putVarInt(self, v):\n self.put(Binary.writeVarInt(v))\n \n def getUnsignedVarLong(self):\n return Binary.readUnsignedVarLong(self.buffer, self.offset)\n \n def putUnsignedVarLong(self, v):\n self.put(Binary.writeUnsignedVarLong(v))\n \n def getVarLong(self):\n return Binary.readVarLong(self.buffer, self.offset)\n \n def putVarLong(self, v):\n self.put(Binary.writeVarLong(v))\n\n def getString(self):\n self.get(self.getUnsignedVarInt())\n \n def putString(self, v):\n self.putUnsignedVarInt(len(v))\n self.put(v)\n \n def getUUID(self):\n part1 = self.getLInt()\n part0 = self.getLInt()\n part3 = self.getLInt()\n part2 = self.getLInt()\n return UUID(part0, part1, part2, part3)\n \n def putUUID(self, uuid: UUID):\n self.putLInt(uuid.getPart(1))\n self.putLInt(uuid.getPart(0))\n self.putLInt(uuid.getPart(3))\n self.putLInt(uuid.getPart(2))\n \n def feof(self):\n try:\n self.buffer[self.offset]\n return True\n except IndexError:\n return False\n" }, { "alpha_fraction": 0.5284806489944458, "alphanum_fraction": 0.5534618496894836, "avg_line_length": 28.021875381469727, "blob_id": "7aefdba63f2dbfdebc6227a4eb17802eba32d7f4", "content_id": "98652bbce93ecbff9996f59e46c0c9f801207261", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9287, "license_type": "permissive", "max_line_length": 95, "num_lines": 320, "path": "/src/podrum/utils/Binary.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\nfrom struct import unpack, pack, calcsize\nfrom re import match\nimport decimal\nimport sys\nfrom .bcmath import bcmath\n\nclass Binary:\n\n def checkLength(string, expect):\n length = len(string)\n assert (length == expect), 'Expected ' + str(expect) + 'bytes, got ' + str(length)\n \n @staticmethod\n def signByte(value: int):\n if calcsize == 8:\n return (int(value) & 0xffffffff) >> 56\n else:\n return (int(value) & 0xffffffff) >> 24\n\n @staticmethod\n def unsignByte(value: int):\n return int(value) & 0xff\n\n @staticmethod\n def signShort(value: int):\n if calcsize == 8:\n return (int(value) & 0xffffffff) >> 48\n else:\n return (int(value) & 0xffffffff) >> 16\n\n @staticmethod\n def unsignShort(value: int):\n return int(value) & 0xffff\n\n @staticmethod\n def signInt(value: int):\n if calcsize == 8:\n return (int(value) & 0xffffffff) >> 32\n else:\n return (int(value) & 0xffffffff) >> 31\n\n @staticmethod\n def unsignInt(value: int):\n return int(value) & 0xffffffff\n\n @staticmethod\n def readTriad(str: bytes) -> int:\n Binary.checkLength(str, 3)\n return unpack('>L', b'\\x00' + str)[0]\n\n @staticmethod\n def writeTriad(value: int) -> bytes:\n return pack('>L', value)[1:]\n\n @staticmethod\n def readLTriad(str: bytes) -> int:\n Binary.checkLength(str, 3)\n return unpack('<L', b'\\x00' + str)[0]\n\n @staticmethod\n def writeLTriad(value: int) -> bytes:\n return pack('<L', value)[0:-1]\n \n @staticmethod\n def readBool(b: bytes) -> int:\n return unpack('?', b)[0]\n\n @staticmethod\n def writeBool(b: int) -> bytes:\n return b'\\x01' if b else b'\\x00'\n \n @staticmethod\n def readByte(c: bytes) -> int:\n Binary.checkLength(c, 1)\n return unpack('>B', c)[0]\n \n @staticmethod\n def readSignedByte(c: bytes) -> int:\n Binary.checkLength(c, 1)\n return unpack('>b', c)[0]\n\n @staticmethod\n def writeByte(c: int) -> bytes:\n return pack(\">B\", c)\n \n @staticmethod\n def readShort(str: bytes) -> int:\n Binary.checkLength(str, 2)\n return unpack('>H', str)[0]\n \n @staticmethod\n def readSignedShort(str: bytes) -> int:\n Binary.checkLength(str, 2)\n return Binary.signShort(Binary.readShort(str))\n\n @staticmethod\n def writeShort(value: int) -> bytes:\n return pack('>H', value)\n \n @staticmethod\n def readLShort(str: bytes) -> int:\n Binary.checkLength(str, 2)\n return unpack('<H', str)[0]\n \n @staticmethod\n def readSignedLShort(str: bytes) -> int:\n Binary.checkLength(str, 2)\n return Binary.signShort(Binary.readLShort(str))\n\n @staticmethod\n def writeLShort(value: int) -> bytes:\n return pack('<H', value)\n \n @staticmethod\n def readInt(str: bytes) -> int:\n Binary.checkLength(str, 4)\n return unpack('>L', str)[0]\n\n @staticmethod\n def writeInt(value: int) -> bytes:\n return pack('>L', value)\n\n @staticmethod\n def readLInt(str: bytes) -> int:\n Binary.checkLength(str, 4)\n return unpack('<L', str)[0]\n\n @staticmethod\n def writeLInt(value: int) -> bytes:\n return pack('<L', value)\n\n @staticmethod\n def readFloat(str: bytes) -> int:\n Binary.checkLength(str, 4)\n return unpack('>f', str)[0]\n \n @staticmethod\n def readRoundedFloat(str, accuracy):\n return round(Binary.readFloat(str), accuracy)\n\n @staticmethod\n def writeFloat(value: int) -> bytes:\n return pack('>f', value)\n\n @staticmethod\n def readLFloat(str: bytes) -> int:\n Binary.checkLength(str, 4)\n return unpack('<f', str)[0]\n \n @staticmethod\n def readRoundedLFloat(str, accuracy):\n return round(Binary.readLFloat(str), accuracy)\n\n @staticmethod\n def writeLFloat(value: int) -> bytes:\n return pack('<f', value)\n \n \n @staticmethod\n def printFloat(value):\n return match(r\"/(\\\\.\\\\d+?)0+$/\", \"\" + value).group(1)\n\n @staticmethod\n def readDouble(str: bytes) -> int:\n Binary.checkLength(str, 8)\n return unpack('>d', str)[0]\n\n @staticmethod\n def writeDouble(value: int) -> bytes:\n return pack('>d', value)\n\n @staticmethod\n def readLDouble(str: bytes) -> int:\n Binary.checkLength(str, 8)\n return unpack('<d', str)[0]\n\n @staticmethod\n def writeLDouble(value: int) -> bytes:\n return pack('<d', value)\n\n @staticmethod\n def readLong(str: bytes) -> int:\n Binary.checkLength(str, 8)\n return unpack('>L', str)[0]\n\n @staticmethod\n def writeLong(value: int) -> bytes:\n return pack('>L', value)\n\n @staticmethod\n def readLLong(str: bytes) -> int:\n Binary.checkLength(str, 8)\n return unpack('<L', str)[0]\n\n @staticmethod\n def writeLLong(value: int) -> bytes:\n return pack('<L', value)\n \n @staticmethod\n def readUnsignedVarInt(buffer, offset):\n value = \"0\";\n buffer = str(buffer)\n i = 0\n while i <= 35:\n i += 7\n offset += 1\n b = ord(buffer[offset])\n value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow(\"2\", str(i))))\n if (b & 0x80) == 0:\n return value\n elif (len(buffer) - 1) < int(offset):\n raise TypeError('Expected more bytes, none left to read')\n raise TypeError('Varint did not terminate after 5 bytes!')\n\n @staticmethod\n def readVarInt(buffer, offset):\n raw = Binary.readUnsignedVarInt(buffer, offset)\n temp = bcmath.bcdiv(raw, \"2\")\n if bcmath.bcmod(raw, \"2\") == \"1\":\n temp = bcmath.bcsub(bcmath.bcmul(temp, \"-1\"), \"1\")\n return temp\n \n @staticmethod\n def writeUnsignedVarInt(value):\n buffer = \"\"\n value = value & 0xffffffff\n if bcmath.bccomp(value, \"0\") == -1:\n value = bcmath.bcadd(value, \"18446744073709551616\")\n i = 0\n while i <= 5:\n i = i + 1\n byte = int(bcmath.bcmod(value, \"128\"))\n value = bcmath.bcdiv(value, \"128\")\n if value != 0:\n buffer += chr(byte | 0x80)\n else:\n buffer += chr(byte)\n return buffer\n raise TypeError('Value too large to be encoded as a varint')\n \n @staticmethod\n def writeVarInt(value):\n value = bcmath.bcmod(bcmath.bcmul(value, \"2\"), \"18446744073709551616\")\n if bcmath.bccomp(value, \"0\") == -1:\n value = bcmath.bcsub(bcmath.bcmul(value, \"-1\"), \"1\")\n return Binary.writeUnsignedVarInt(value)\n \n @staticmethod\n def readUnsignedVarLong(buffer, offset):\n value = \"0\"\n buffer = str(buffer)\n i = 0\n while i <= 63:\n i += 7\n offset += 1\n b = ord(buffer[offset])\n value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow(\"2\", str(i))))\n\n if (b & 0x80) == 0:\n return value\n elif (len(buffer) - 1) < int(offset):\n raise TypeError(\"Expected more bytes, none left to read\")\n\n raise TypeError(\"VarLong did not terminate after 10 bytes!\")\n \n @staticmethod\n def readVarLong(buffer, offset):\n raw = Binary.readUnsignedVarLong(buffer, offset)\n temp = bcmath.bcdiv(raw, \"2\")\n if bcmath.bcmod(raw, \"2\") == \"1\":\n temp = bcmath.bcsub(bcmath.bcmul(temp, \"-1\"), \"1\")\n return temp\n \n @staticmethod\n def writeUnsignedVarLong(value):\n buffer = \"\"\n if bcmath.bccomp(value, \"0\") == -1:\n value = bcmath.bcadd(value, \"18446744073709551616\")\n i = 0\n while i <= 10:\n i = i + 1\n byte = int(bcmath.bcmod(value, \"128\"))\n value = bcmath.bcdiv(value, \"128\")\n if value != 0:\n buffer += chr(byte | 0x80)\n else:\n buffer += chr(byte)\n return buffer\n raise TypeError(\"Value too large to be encoded as a VarLong\")\n \n @staticmethod\n def writeVarLong(value):\n value = bcmath.bcmod(bcmath.bcmul(value, \"2\"), \"18446744073709551616\")\n if bcmath.bccomp(value, \"0\") == -1:\n value = bcmath.bcsub(bcmath.bcmul(value, \"-1\"), \"1\")\n return Binary.writeUnsignedVarLong(value)\n \n @staticmethod\n def flipShortEndianness(value):\n return Binary.readLShort(Binary.writeShort(value))\n\n @staticmethod\n def flipIntEndianness(value):\n return Binary.readLInt(Binary.writeInt(value))\n\n @staticmethod\n def flipLongEndianness(value):\n return Binary.readLLong(Binary.writeLong(value))\n" }, { "alpha_fraction": 0.44834503531455994, "alphanum_fraction": 0.4493480324745178, "avg_line_length": 27.382352828979492, "blob_id": "f4da62af40eb7cf4807d05361e74c18a9e4b2c86", "content_id": "cc02ee969c9dc8d951e0d1dea44bfe9f5ae758f7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "permissive", "max_line_length": 77, "num_lines": 34, "path": "/src/podrum/wizard/Parser.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\r\n* ____ _ \r\n* | _ \\ ___ __| |_ __ _ _ _ __ ___ \r\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\ \r\n* | __/ (_) | (_| | | | |_| | | | | | |\r\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\r\n*\r\n* This program is free software: you can redistribute it and/or modify\r\n* it under the terms of the GNU Lesser General Public License as published by\r\n* the Free Software Foundation, either version 3 of the License, or\r\n* (at your option) any later version.\r\n\"\"\"\r\nimport os\r\n\r\nfrom podrum.lang import Base\r\n\r\nclass Parser:\r\n\r\n def checkYesNo(str):\r\n str = str.lower()\r\n if str == 'y' or str == 'yes':\r\n return True\r\n elif str == 'n' or str == 'no':\r\n return False\r\n else:\r\n return\r\n\r\n def checkIfLangExists(str):\r\n path = os.getcwd() + '/src/podrum/lang/'\r\n allLangs = Base.Base.getLangNames(path)\r\n if(str in allLangs):\r\n return True\r\n else:\r\n return False" }, { "alpha_fraction": 0.5572640299797058, "alphanum_fraction": 0.5636267066001892, "avg_line_length": 30.433332443237305, "blob_id": "7d8d13059b4ac88041f41cda7fc3654f98d5a79b", "content_id": "8b7110d3c25dd41c07fc4808eeb7fcdb314208e8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1886, "license_type": "permissive", "max_line_length": 166, "num_lines": 60, "path": "/src/podrum/nbt/tag/NamedTag.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom podrum.nbt.NBTStream import NBTStream\nfrom podrum.nbt.ReaderTracker import ReaderTracker\n\nclass NamedTag:\n __metaclass__ = ABCMeta\n \n name = None\n cloning = False\n \n def __init__(self, name = ''):\n if len(name > 32767):\n raise ValueError(\"Tag name cannot be more than 32767 bytes, got length \" + str(len(name)))\n self.name = name\n \n def getName():\n return NamedTag.name\n \n def setName(name):\n NamedTag.name = name\n \n def getValue(): pass\n \n def getType(): pass\n \n def write(nbt: NBTStream): pass\n \n def read(nbt: NBTStream, tracker: ReaderTracker): pass\n \n def toString(indentation = 0):\n return (\" \" * indentation) + type(object) + \": \" + ((\"name='NamedTag.name', \") if (NamedTag.name != \"\") else \"\") + \"value='\" + str(NamedTag.getValue()) + \"'\"\n \n def safeClone() -> NamedTag:\n if NamedTag.cloning:\n raise ValueError(\"Recursive NBT tag dependency detected\")\n NamedTag.cloning = True\n retval = NamedTag.copy()\n NamedTag.cloning = False\n retval.cloning = False\n return retval\n \n def equals(that: NamedTag):\n return NamedTag.name == that.name and NamedTag.equalsValue(that)\n \n def equalsValue(that: NamedTag):\n return isinstance(that, NamedTag()) and NamedTag.getValue() == that.getValue()\n" }, { "alpha_fraction": 0.5583929419517517, "alphanum_fraction": 0.5917602777481079, "avg_line_length": 43.5, "blob_id": "b9309a6c31017637473c5ead16d3214af10366c8", "content_id": "d5f0e0d1cb7af902706ea4a69805132021652189", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2937, "license_type": "permissive", "max_line_length": 244, "num_lines": 66, "path": "/src/podrum/utils/UUID.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nimport hashlib\nimport os\nimport random\nimport time\n\nfrom podrum.utils.Binary import Binary\nfrom podrum.utils.Utils import Utils\n\nclass UUID:\n parts = [0, 0, 0, 0]\n version = None\n \n def __init__(self, part1 = 0, part2 = 0, part3 = 0, part4 = 0, version = None):\n self.parts[0] = int(part1)\n self.parts[1] = int(part2)\n self.parts[2] = int(part3)\n self.parts[3] = int(part4)\n self.version = (self.parts[1] & 0xf000) >> 12 if version == None else int(version)\n \n def getVersion(self):\n return self.version\n \n def equals(self, uuid: UUID):\n return uuid.parts[0] == self.parts[0] and uuid.parts[1] == self.parts[1] and uuid.parts[2] == self.parts[2] and uuid.parts[3] == self.parts[3]\n \n def fromBinary(self, uuid, version = None):\n if len(uuid) != 16:\n raise Exception(\"Must have exactly 16 bytes\")\n return UUID(Binary.readInt(Utils.substr(uuid, 0, 4)), Binary.readInt(Utils.substr(uuid, 4, 4)), Binary.readInt(Utils.substr(uuid, 8, 4)), Binary.readInt(Utils.substr(uuid, 12, 4)), version)\n\n def fromString(self, uuid, version = None):\n return self.fromBinary(Utils.hex2bin(uuid.strip().replace(\"-\", \"\")), version)\n \n def fromData(self, data):\n hash = hashlib.new(\"md5\").update(\"\".join(data))\n return self.fromBinary(hash, 3)\n\n def fromRandom(self):\n return self.fromData(Binary.writeInt(int(time.time())), Binary.writeShort(os.getpid()), Binary.writeShort(os.geteuid()), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)))\n \n def toBinary(self):\n return Binary.writeInt(self.parts[0]) + Binary.writeInt(self.parts[1]) + Binary.writeInt(self.parts[2]) + Binary.writeInt(self.parts[3])\n \n def toString(self):\n hex = Utils.bin2hex(self.toBinary())\n if self.version != None:\n return Utils.substr(hex, 0, 8) + \"-\" + Utils.substr(hex, 8, 4) + \"-\" + int(self.version, 16) + Utils.substr(hex, 13, 3) + \"-8\" + Utils.substr(hex, 17, 3) + \"-\" + Utils.substr(hex, 20, 12)\n return Utils.substr(hex, 0, 8) + \"-\" + Utils.substr(hex, 8, 4) + \"-\" + Utils.substr(hex, 12, 4) + \"-\" + Utils.substr(hex, 16, 4) + \"-\" + Utils.substr(hex, 20, 12)\n \n def getPart(self, partNumber: int):\n if partNumber < 0 or partNumber > 3:\n raise Exception(\"Invalid UUID part index\" + str(partNumber))\n return self.parts[partNumber]\n" }, { "alpha_fraction": 0.5753798484802246, "alphanum_fraction": 0.5773276090621948, "avg_line_length": 35.154930114746094, "blob_id": "8439628f6980f2d99ef34f7fed7a31b100ab9882", "content_id": "dec7af0d5b5c0c19f7668f5e08115719e5ec08dc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2567, "license_type": "permissive", "max_line_length": 77, "num_lines": 71, "path": "/src/podrum/network/protocol/ResourcePacksInfoPacket.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.network.protocol.DataPacket import DataPacket\nfrom podrum.network.protocol.ProtocolInfo import ProtocolInfo\n\nclass ResourcePacksInfoPacket(DataPacket):\n NID = ProtocolInfo.RESOURCE_PACKS_INFO_PACKET\n\n mustAccept = False\n hasScripts = False\n behaviorPackEntries = []\n resourcePackEntries = []\n\n def decodePayload(self):\n self.mustAccept = self.getBool()\n self.hasScripts = self.getBool()\n behaviorPackCount = self.getLShort()\n while behaviorPackCount > 0:\n self.getString()\n self.getString()\n self.getLLong()\n self.getString()\n self.getString()\n self.getString()\n self.getBool()\n behaviorPackCount -= 1\n\n resourcePackCount = self.getLShort()\n while resourcePackCount > 0:\n self.getString()\n self.getString()\n self.getLLong()\n self.getString()\n self.getString()\n self.getString()\n self.getBool()\n resourcePackCount -= 1\n\n def encodePayload(self):\n self.putBool(self.mustAccept)\n self.putBool(self.hasScripts)\n self.putLShort(len(self.behaviorPackEntries))\n for entry in self.behaviorPackEntries:\n self.putString(entry.getPackId())\n self.putString(entry.getPackVersion())\n self.putLLong(entry.getPackSize())\n self.putString(\"\") # TODO: encryption key\n self.putString(\"\") # TODO: subpack name\n self.putString(\"\") # TODO: content identity\n self.putBool(False) # TODO: has scripts (?)\n\n self.putLShort(len(self.resourcePackEntries))\n for entry in self.resourcePackEntries:\n self.putString(entry.getPackId())\n self.putString(entry.getPackVersion())\n self.putLLong(entry.getPackSize())\n self.putString(\"\") # TODO: encryption key\n self.putString(\"\") # TODO: subpack name\n self.putString(\"\") # TODO: content identity\n self.putBool(False) # TODO: seems useless for resource packs\n" }, { "alpha_fraction": 0.5085482597351074, "alphanum_fraction": 0.517531156539917, "avg_line_length": 35.92307662963867, "blob_id": "ffeecf2013f9772cb66c3746c4312e0a963448e2", "content_id": "4519b459435833c0965147acc19cbbd9ad08be1c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3451, "license_type": "permissive", "max_line_length": 139, "num_lines": 91, "path": "/src/podrum/Server.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\r\n* ____ _\r\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\r\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\r\n* | __/ (_) | (_| | | | |_| | | | | | |\r\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\r\n*\r\n* This program is free software: you can redistribute it and/or modify\r\n* it under the terms of the GNU Lesser General Public License as published by\r\n* the Free Software Foundation, either version 3 of the License, or\r\n* (at your option) any later version.\r\n\"\"\"\r\n\r\nimport time\r\nimport os\r\n\r\nfrom podrum.lang.Base import Base\r\nfrom podrum.utils.Logger import Logger\r\nfrom podrum.utils.ServerFS import ServerFS\r\nfrom podrum.utils.Utils import Utils\r\nfrom podrum.wizard.Wizard import Wizard\r\n\r\nfrom pyraklib.server.PyRakLibServer import PyRakLibServer\r\nfrom pyraklib.server.ServerHandler import ServerHandler\r\n\r\n\r\nclass Server:\r\n\r\n path = None\r\n withWizard = None\r\n port = 19132\r\n podrumLogo = \"\"\"\r\n ____ _ \r\n | _ \\ ___ __| |_ __ _ _ _ __ ___ \r\n | |_) / _ \\ / _` | '__| | | | '_ ` _ \\ \r\n | __/ (_) | (_| | | | |_| | | | | | |\r\n |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\r\n \"\"\"\r\n\r\n def __init__(self, path, withWizard, isTravisBuild = False):\r\n super().__init__()\r\n startTime = Utils.microtime(True)\r\n self.path = path\r\n self.withWizard = withWizard\r\n if(withWizard):\r\n ServerFS.checkAllFiles(path)\r\n else:\r\n Wizard.skipWizard(path, True)\r\n port = self.port\r\n print(str(self.podrumLogo))\r\n Wizard.isInWizard = False\r\n Logger.log('info', str(Base.get(\"startingServer\")).replace(\"{ip}\", str(Utils.getPrivateIpAddress())).replace(\"{port}\", str(port)))\r\n Logger.log('info', str(Base.get(\"extIpMsg\")).replace(\"{ipPublic}\", str(Utils.getPublicIpAddress())))\r\n Logger.log('info', str(Base.get(\"license\")))\r\n server = PyRakLibServer(port=19132)\r\n handler = ServerHandler(server, None)\r\n handler.sendOption(\"name\", \"MCPE;Podrum powered server;407;1.16.0;0;0;0;PodrumPoweredServer;0\")\r\n doneTime = Utils.microtime(True)\r\n finishStartupSeconds = \"%.3f\" % (doneTime - startTime)\r\n Logger.log('info', f'Done in {str(finishStartupSeconds)}s. Type \"help\" to view all available commands.')\r\n if (isTravisBuild):\r\n Server.checkTravisBuild(path)\r\n else:\r\n while Wizard.isInWizard == False:\r\n cmd = input('> ')\r\n Server.command(cmd, True)\r\n cmd = None\r\n ticking = True\r\n while ticking:\r\n time.sleep(0.002)\r\n\r\n def command(string, fromConsole):\r\n if string.lower() == 'stop':\r\n Logger.log('info', 'Stopping server...')\r\n Utils.killServer()\r\n elif string.lower() == '':\r\n return\r\n elif string.lower() == 'help':\r\n Logger.log('info', '/stop: Stops the server')\r\n else:\r\n Logger.log('error', str(Base.get(\"invalidCommand\")))\r\n \r\n def checkTravisBuild(path):\r\n if not ServerFS.checkForFile(path, \"server.json\"):\r\n Logger.log(\"error\", \"Couldn't find server.json file.\")\r\n os._exit(1)\r\n if os.path.getsize(f'{path}/server.json') == 0:\r\n Logger.log(\"error\", \"The server.json file is empty.\")\r\n os._exit(1)\r\n print(\"Build success.\")\r\n os._exit(0)\r\n" }, { "alpha_fraction": 0.5043988227844238, "alphanum_fraction": 0.5102639198303223, "avg_line_length": 24.259260177612305, "blob_id": "5320998ed2fa82d8edcd483642929494bbb83499", "content_id": "13970287a5242d6bbb1cf6995596a220f1b12e82", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "permissive", "max_line_length": 77, "num_lines": 27, "path": "/src/podrum/resourcepacks/ResourcePack.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nclass ResourcePack:\n def getPath(): pass\n\n def getPackName(): pass\n\n def getPackId(): pass\n\n def getPackSize(): pass\n\n def getPackVersion(): pass\n\n def getSha256(): pass\n\n def getPackChunk(start, length): pass\n" }, { "alpha_fraction": 0.5283194184303284, "alphanum_fraction": 0.532961905002594, "avg_line_length": 24.64285659790039, "blob_id": "6a26cb29dd5f4bda975d5649cc637c0da12c6ed9", "content_id": "e9e949151efa61f55dbbd0ab23c70f84657cbd12", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "permissive", "max_line_length": 77, "num_lines": 42, "path": "/src/podrum/Player.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.network.PacketPool import PacketPool\n\nclass Player:\n connection = None\n server = None\n logger = None\n address = Nome\n name = None\n locale = None\n randomId = None\n uuid = None\n xuid = None\n skin = None\n viewDistance = None\n gamemode = 0\n pitch = 0\n yaw = 0\n headYaw = 0\n onGround = False\n platformChatId = ''\n deviceOS = None\n deviceModel = None\n deviceId = Nome\n \n def __init__(self, connection, address, logger, server):\n self.connection = connection\n self.address = address\n self.logger = logger\n self.server = server\n" }, { "alpha_fraction": 0.5850973725318909, "alphanum_fraction": 0.5922946929931641, "avg_line_length": 30.716217041015625, "blob_id": "6fd2a2ed4f1872b90e6f467c8656f20183006ea9", "content_id": "2c12bd0cf347b66528a58f6725cbca3130b8e128", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2362, "license_type": "permissive", "max_line_length": 77, "num_lines": 74, "path": "/src/podrum/nbt/NBT.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom podrum.nbt.tag.ByteArrayTag import ByteArrayTag\nfrom podrum.nbt.tag.ByteTag import ByteTag\nfrom podrum.nbt.tag.CompoundTag import CompoundTag\nfrom podrum.nbt.tag.DoubleTag import DoubleTag\nfrom podrum.nbt.tag.FloatTag import FloatTag\nfrom podrum.nbt.tag.IntArrayTag import IntArrayTag\nfrom podrum.nbt.tag.IntTag import IntTag\nfrom podrum.nbt.tag.ListTag import ListTag\nfrom podrum.nbt.tag.LongArrayTag import LongArrayTag\nfrom podrum.nbt.tag.LongTag import LongTag\nfrom podrum.nbt.tag.NamedTag import NamedTag\nfrom podrum.nbt.tag.ShortTag import ShortTag\nfrom podrum.nbt.tag.StringTag import StringTag\n\nclass NBT:\n __metaclass__ = ABCMeta\n \n TAG_End = 0\n TAG_Byte = 1\n TAG_Short = 2\n TAG_Int = 3\n TAG_Long = 4\n TAG_Float = 5\n TAG_Double = 6\n TAG_ByteArray = 7\n TAG_String = 8\n TAG_List = 9\n TAG_COMPOUND = 10\n TAG_IntArray = 11\n TAG_LongArray = 12\n \n @staticmethod\n def createTag(type: int) -> NamedTag:\n if type == NBT.TAG_Byte:\n return ByteTag()\n elif type == NBT.TAG_Short:\n return ShortTag()\n elif type == NBT.TAG_Int:\n return IntTag()\n elif type == NBT.TAG_Long:\n return LongTag()\n elif type == NBT.TAG_Float:\n return FloatTag()\n elif type == NBT.TAG_Double:\n return DoubleTag()\n elif type == NBT.TAG_ByteArray:\n return ByteArrayTag()\n elif type == NBT.TAG_String:\n return StringTag()\n elif type == NBT.TAG_List:\n return ListTag()\n elif type == NBT.TAG_Compound:\n return CompoundTag()\n elif type == NBT.TAG_IntArray:\n return IntArrayTag()\n elif type == NBT.TAG_LongArray:\n return LongArrayTag()\n else:\n raise ValueError(\"Unknown NBT tag type \" + str(type))\n \n \n \n" }, { "alpha_fraction": 0.5289648771286011, "alphanum_fraction": 0.5455840229988098, "avg_line_length": 28.23611068725586, "blob_id": "668026168de00172f9b6ad7b21d568ecd2e2e254", "content_id": "9a6f89a207052ab0c20fb8affcbd70b53215fe23", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2106, "license_type": "permissive", "max_line_length": 77, "num_lines": 72, "path": "/src/podrum/utils/bcmath.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\nimport decimal\n\nclass bcmath:\n @staticmethod\n def bcmul(num1, num2, scale=None):\n if scale != None:\n decimal.getcontext().prec = scale\n result = decimal.Decimal(num1) * decimal.Decimal(num2)\n return int(result)\n \n @staticmethod\n def bcdiv(num1, num2, scale=None):\n if scale != None:\n decimal.getcontext().prec = scale\n result = decimal.Decimal(num1) / decimal.Decimal(num2)\n return int(result)\n \n @staticmethod\n def bcadd(num1, num2, scale=None):\n if scale != None:\n decimal.getcontext().prec = scale\n result = decimal.Decimal(num1) + decimal.Decimal(num2)\n return int(result)\n \n @staticmethod\n def bcsub(num1, num2, scale=None):\n if scale != None:\n decimal.getcontext().prec = scale\n result = decimal.Decimal(num1) - decimal.Decimal(num2)\n return int(result)\n \n @staticmethod\n def bccomp(num1, num2):\n result = (int(num1) > int(num2)) - (int(num1) < int(num2))\n return int(result)\n \n @staticmethod\n def bcmod(num1, num2):\n result = int(num1) % int(num2)\n return int(result)\n \n @staticmethod\n def bcpow(num1, num2):\n result = int(num1) ** int(num2)\n return int(result)\n \n @staticmethod\n def bcpowmod(num1, num2, mod):\n result = pow(num1, num2, mod)\n return int(result)\n \n @staticmethod\n def bcscale(scale):\n result = decimal.getcontext().prec = scale\n return int(result)\n \n @staticmethod\n def bcsqrt(num):\n result = math.sqrt(num)\n return int(result)\n\n" }, { "alpha_fraction": 0.4796033501625061, "alphanum_fraction": 0.4823078513145447, "avg_line_length": 29.600000381469727, "blob_id": "8ae3112a5b18e77c112fd229b6ba98f0b0396703", "content_id": "e990be0eb985ed90967dac77b5ae5dc10c8a96e5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4437, "license_type": "permissive", "max_line_length": 77, "num_lines": 145, "path": "/src/podrum/utils/Config.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _ \n* | _ \\ ___ __| |_ __ _ _ _ __ ___ \n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\ \n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\nimport re\nimport os\nimport json\nimport yaml\nimport pickle\nfrom podrum.utils import Logger\n\nfrom podrum.ServerFS.ServerFS import read\nfrom podrum.Server import Server\n\nclass Config:\n DETECT = -1\n PROPERTIES = 0\n CNF = PROPERTIES\n JSON = 1\n YAML = 2\n EXPORT = 3\n SERIALIZED = 4\n ENUM = 5\n ENUMERATION = ENUM\n \n config = []\n nestedCache = []\n file = ''\n correct = False\n type = DETECT\n is_array = lambda var: isinstance(var, (list, tuple))\n\n formats = [{\n \"properties\" : PROPERTIES,\n \"cnf\" : CNF,\n \"conf\" : CNF,\n \"config\" : CNF,\n \"json\" : JSON,\n \"js\" : JSON,\n \"yml\" : YAML,\n \"yaml\" : YAML,\n \"export\" : EXPORT,\n \"xport\" : EXPORT,\n \"sl\" : SERIALIZED,\n \"serialize\" : SERIALIZED,\n \"txt\" : ENUM,\n \"list\" : ENUM,\n \"enum\" : ENUM,\n }]\n\n def __init__(self, file, type = DETECT, default = [], correct=None):\n self.load(file, type, default)\n correct = self.correct\n\n @staticmethod\n def isset(self, variable):\n return variable in locals() or variable in globals()\n \n def reload(self):\n self.config = []\n self.nestedCache = []\n self.correct = False\n self.load(self.file, self.type)\n \n @staticmethod\n def fixYAMLIndexes(str):\n return re.sub(r\"#^([ ]*)([a-zA-Z_]{1}[ ]*)\\\\:$#m\", r\"$1\\\"$2\\\":\", str)\n \n def load(self, file, type=DETECT, default = []):\n self.correct = True\n self.type = int(type)\n self.file = file\n if not self.is_array(default):\n default = []\n if not os.path.exists(file):\n self.config = default\n self.save()\n else:\n if self.type == self.DETECT:\n bname = os.path.basename(self.file)\n extension = bname.split(\".\")\n arrlist = extension.pop()\n extension = arrlist.strip().lower()\n if self.isset(self.formats[extension]):\n self.type = self.formats[extension]\n else:\n self.correct = False\n if self.correct:\n content = open(self.file).read()\n if (self.type == self.PROPERTIES) and (self.type == self.CNF):\n self.parseProperties(content)\n elif self.type == self.JSON:\n self.config = json.loads(content)\n elif self.type == self.YAML:\n content = self.fixYAMLIndexes(content)\n self.config = yaml.load(content)\n elif self.type == self.SERIALIZED:\n self.config = pickle.loads(content)\n elif self.type == self.ENUM:\n self.parseList(content)\n else:\n self.correct = False\n return False\n if not self.is_array(self.config): # Is array doesn't exist\n self.config = default\n if self.fillDefaults(default, self.config) > 0:\n self.save()\n else:\n return False\n\n return True\n\n def check():\n return correct = True\n \n def save():\n if self.correct == True:\n try:\n content = None\n if (type == PROPERTIES) or (type == CNF):\n content = writeProperties()\n elif type == JSON:\n content = json.dumps(config)\n elif type == YAML:\n content = yaml.emit(config)\n elif type == SERIALIZED:\n content = pickle.dumps(self.config)\n elif type == ENUM:\n \"\\r\\n\".join(config.keys())\n else:\n correct = False\n return False\n except ValueError:\n logger.log('error', f'Could not save Config {self.file}')\n return True\n else:\n return false\n" }, { "alpha_fraction": 0.6031022071838379, "alphanum_fraction": 0.6040145754814148, "avg_line_length": 31.235294342041016, "blob_id": "19796a0d0d88a7a737e817478788b7f755693a4c", "content_id": "386907ef4eacd0fc345aa4a498b11b414f6dfc20", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1096, "license_type": "permissive", "max_line_length": 77, "num_lines": 34, "path": "/src/podrum/network/protocol/DisconnectPacket.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nfrom podrum.network.protocol.DataPacket import DataPacket\nfrom podrum.network.protocol.ProtocolInfo import ProtocolInfo\n\nclass DisconnectPacket(DataPacket):\n NID = ProtocolInfo.DISCONNECT_PACKET\n\n hideDisconnectionScreen = False\n message = \"\"\n\n def canBeSentBeforeLogin():\n return True\n\n def decodePayload(self):\n self.hideDisconnectionScreen = self.getBool()\n if not self.hideDisconnectionScreen:\n self.message = self.getString()\n\n def encodePayload(self):\n self.putBool(self.hideDisconnectionScreen)\n if not self.hideDisconnectionScreen:\n self.putString(self.message)\n" }, { "alpha_fraction": 0.4653846025466919, "alphanum_fraction": 0.4673076868057251, "avg_line_length": 29.58823585510254, "blob_id": "366171874c38014aa78a7e6d696b32f624ee541f", "content_id": "e9aed897bafb773ee959b5d1f314a1fe87103667", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "permissive", "max_line_length": 77, "num_lines": 17, "path": "/src/podrum/command/Command.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _\n* | _ \\ ___ __| |_ __ _ _ _ __ ___\n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\\n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\n\nclass Command:\n\n def onCommand(string, fromConsole):\n pass\n" }, { "alpha_fraction": 0.5399590134620667, "alphanum_fraction": 0.5404713153839111, "avg_line_length": 46.60975646972656, "blob_id": "1ac0628c11961f79481a89f673f7ff7426d3a776", "content_id": "3c69bc26d23a665ce7a435dfb19086b118d07f49", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1952, "license_type": "permissive", "max_line_length": 104, "num_lines": 41, "path": "/src/podrum/utils/Logger.py", "repo_name": "jessehylton/Podrum", "src_encoding": "UTF-8", "text": "\"\"\"\n* ____ _ \n* | _ \\ ___ __| |_ __ _ _ _ __ ___ \n* | |_) / _ \\ / _` | '__| | | | '_ ` _ \\ \n* | __/ (_) | (_| | | | |_| | | | | | |\n* |_| \\___/ \\__,_|_| \\__,_|_| |_| |_|\n*\n* This program is free software: you can redistribute it and/or modify\n* it under the terms of the GNU Lesser General Public License as published by\n* the Free Software Foundation, either version 3 of the License, or\n* (at your option) any later version.\n\"\"\"\nfrom datetime import datetime\nfrom podrum.utils.TextFormat import TextFormat\n\nTextFormat = TextFormat()\n\nclass Logger:\n\n def log(type_, content):\n time = datetime.now()\n if type_ == 'info':\n print(f'{TextFormat.BLUE}[INFO: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == 'warn':\n print(f'{TextFormat.YELLOW}[WARNING: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == 'error':\n print(f'{TextFormat.RED}[ERROR: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == 'success':\n print(f'{TextFormat.GREEN}[SUCCESS: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == \"emergency\":\n print(f'{TextFormat.GOLD}[EMERGENCY: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == \"alert\":\n print(f'{TextFormat.PURPLE}[ALERT: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == \"notice\":\n print(f'{TextFormat.AQUA}[NOTICE: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == \"critical\":\n print(f'{TextFormat.RED}[CRITICAL: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n elif type_ == \"debug\":\n print(f'{TTextFormat.GRAY}[DEBUG: {time.strftime(\"%H:%M\")}]{TextFormat.WHITE} {content}')\n else:\n print(f'[{type_.upper()}: {time.strftime(\"%H:%M\")}]{content}')\n" } ]
21
SadeghShabestani/6_bmm
https://github.com/SadeghShabestani/6_bmm
22e8f97d19a7849c60ed4bd2f53ed91bed3b5da0
64d5b7bbe04b335341e31aee8421c2320f3bb7f6
e6c4d11936de51752a8249828d3f5673ab45190c
refs/heads/main
2023-03-29T02:35:06.821274
2021-03-26T17:43:05
2021-03-26T17:43:05
351,864,905
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.49196141958236694, "alphanum_fraction": 0.5241157412528992, "avg_line_length": 21.769229888916016, "blob_id": "19142bb48fec7d7aceb0615a3b79f14b4443daf9", "content_id": "dbf5bff67daaa9ab94122d1774c40eaab4eb9de3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 61, "num_lines": 13, "path": "/6_BMM.py", "repo_name": "SadeghShabestani/6_bmm", "src_encoding": "UTF-8", "text": "import termcolor2\r\n\r\nx = int(input(\"Enter First Number: \")) # 20\r\ny = int(input(\"Enter second Number: \")) # 40\r\n\r\nif x > y:\r\n print(termcolor2.colored(\"Error! TryAgain\", color=\"red\"))\r\nelse:\r\n for i in range(1,x+1):\r\n if x % i == 0 and y % i == 0:\r\n bmm = i\r\n\r\nprint(f\"BMM: {bmm}\")\r\n\r\n" } ]
1
guoyuquan/Graduate
https://github.com/guoyuquan/Graduate
02d3c6f26efce6157484599285f24830edce7d7b
4336391886f85f0a789bbe538ed20be7f0f4ed8f
9866a05894747ff42f872799a8cea57b47a38a7b
refs/heads/master
2021-01-20T02:46:54.799365
2015-02-08T09:30:05
2015-02-08T09:30:05
29,242,209
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5, "alphanum_fraction": 0.5197368264198303, "avg_line_length": 8, "blob_id": "07c8edf6f7882ba7b3d3e70a22b79bde7f2872e1", "content_id": "11fc0a7ec1275c074ab875710a3cd6eec69a2161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 152, "license_type": "no_license", "max_line_length": 39, "num_lines": 17, "path": "/Programming/C++/Effective_C++/chapter6/class_scope.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\n int i;\n};\n\nclass B:public A{\npublic:\n B(){i=10; std::cout<<i<<std::endl;}\n};\n\nint main()\n{\n B b;\n return 0;\n}" }, { "alpha_fraction": 0.5895522236824036, "alphanum_fraction": 0.6567164063453674, "avg_line_length": 12.399999618530273, "blob_id": "fa6ea7efa6eaa0a98ee9289bce2aac8a5ee2fef0", "content_id": "3599cc4a945f376da8efa9cbc4a1d49c621967ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 24, "num_lines": 10, "path": "/Programming/Python/Class/first.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class C1:\n\tpass\nclass C2:\n\tpass\nclass C3(C1, C2):\n\tdef setname(self, who):\n\t\tself.name = who\nI1=C3()\nI1.setname('bob')\nprint(I1.name)\n" }, { "alpha_fraction": 0.3837156891822815, "alphanum_fraction": 0.4093567132949829, "avg_line_length": 20.384614944458008, "blob_id": "3ab31b65aca7304112b37205dea8c693ea8c7318", "content_id": "8fe0e8c88dcc359036ff0aa99f6302a2b3f8c3b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2223, "license_type": "no_license", "max_line_length": 59, "num_lines": 104, "path": "/Programming/C/Programming_in_Unix/chapter15/coprocess.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <string.h>\n\n#define MAXLINE 1024\n\nstatic void sig_pipe(int);\n\nint\nmain()\n{\n int n, fd1[2], fd2[2];\n pid_t pid;\n char line[MAXLINE];\n \n if(signal(SIGPIPE, sig_pipe)==SIG_ERR)\n {\n fprintf(stderr, \"error: signal()\\n\");\n exit(0);\n }\n \n if(pipe(fd1)<0||pipe(fd2)<0)\n {\n fprintf(stderr, \"error: pipe\\n\");\n exit(0);\n }\n \n if((pid=fork())<0)\n {\n fprintf(stderr, \"error: fork()\\n\");\n exit(0);\n }\n else if(pid>0)\n {\n close(fd1[0]);\n close(fd2[1]);\n while(fgets(line, MAXLINE, stdin)!=NULL)\n {\n n=strlen(line);\n if(write(fd1[1], line, n)!=n)\n {\n fprintf(stderr, \"error: write to pipe\\n\");\n continue;\n }\n if((n=read(fd2[0], line, MAXLINE))<0)\n {\n fprintf(stderr, \"error: read from pipe\\n\");\n continue;\n }\n if(n==0)\n {\n fprintf(stderr, \"child closed pipe\\n\");\n break;\n }\n line[n]=0;\n if(fputs(line, stdout)==EOF)\n {\n fprintf(stderr, \"error: fputs()\\n\");\n }\n }\n if(ferror(stdin))\n {\n fprintf(stderr, \"error: fgets on stdin\\n\");\n }\n exit(0);\n }\n else\n {\n close(fd1[1]);\n close(fd2[0]);\n if(fd1[0] != STDIN_FILENO)\n {\n if(dup2(fd1[0], STDIN_FILENO)!=STDIN_FILENO)\n {\n fprintf(stderr, \"error: dup2 to stdin\\n\");\n exit(0);\n }\n close(fd1[0]);\n }\n \n if(fd2[1]!=STDOUT_FILENO)\n {\n if(dup2(fd2[1], STDOUT_FILENO)!=STDOUT_FILENO)\n {\n fprintf(stderr, \"error: dup2 to stdout\\n\");\n exit(0);\n }\n close(fd2[1]);\n }\n if(execl(\"./add2\", \"add2\", (char *)0)<0)\n {\n fprintf(stderr, \"error: execl\\n\");\n }\n exit(0);\n }\n}\n\nstatic void\nsig_pipe(int signo)\n{\n printf(\"SIGPIPE caught\\n\");\n exit(1);\n}" }, { "alpha_fraction": 0.42105263471603394, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 13.25, "blob_id": "96ce8c37f96e33ef6172a1401cc9ce06de570e1f", "content_id": "db5b54eda90144da44e22deee733a0d16c9e9db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/Programming/Python/13Loop/calculate_for.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "summ = 0\nfor x in [1,2 ,3, 5, 6]:\n\tsumm += x\nprint(summ)\n" }, { "alpha_fraction": 0.36647728085517883, "alphanum_fraction": 0.42897728085517883, "avg_line_length": 13.708333015441895, "blob_id": "72895f9a173f1af3c3950ff96fbbfbdc8037c0c1", "content_id": "3fe951532b4553395952c02faf78b1616ce6b479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 352, "license_type": "no_license", "max_line_length": 36, "num_lines": 24, "path": "/Programming/Practice/Interpretation/1.11_iterative.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint func(int n)\n{\n if(n<3)\n return n;\n int n_1=2, n_2=1, n_3=0;\n int sum=0;\n for(int i=3; i<=n; i++)\n {\n sum=n_1+2*n_2+3*n_3;\n n_3=n_2;\n n_2=n_1;\n n_1=sum;\n //std::cout<<sum<<std::endl;\n }\n return sum;\n}\n\nint main()\n{\n std::cout<<func(10)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6147959232330322, "alphanum_fraction": 0.6275510191917419, "avg_line_length": 17.714284896850586, "blob_id": "a319ccf710bb3dcb8479ab931de25197c9ed8bdc", "content_id": "7deaa50c558a4a6c623bd9379128dcea3f0a38da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 392, "license_type": "no_license", "max_line_length": 81, "num_lines": 21, "path": "/Programming/C++/Effective_C++/chapter2/assignment_constructor.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\npublic:\n A(int & j):i(j){}\nprivate:\n int &i;\n};\n\nint main()\n{\n int i;\n A a1(i), a2(i);\n a1=a2; //error: no copy assinment function would be generated by the compiler\n //error: cannot define the implicit copy assignment\n //operator for 'A', because non-static reference member 'i' can't use copy\n // assignment operator\n\n return 0;\n}" }, { "alpha_fraction": 0.5394477248191833, "alphanum_fraction": 0.5433924794197083, "avg_line_length": 21.55555534362793, "blob_id": "50267d7cffd5a36fe290785df49d37bb643f5ca1", "content_id": "0c581b0f222a0fe0b650c4728389a745235c6de3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 62, "num_lines": 45, "path": "/Programming/C/Programming_in_Unix/chapter5/print_buffering.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <string.h>\n\nvoid pr_stdio(const char *, FILE *);\n\nint main()\n{\n FILE * fp;\n fputs(\"enter any character\\n\", stdout);\n if(getchar()==EOF)\n {\n fprintf(stderr,\"error: getchar, encounter EOF\\n\");\n exit(0);\n }\n fputs(\"one line to standard error\\n\", stderr);\n pr_stdio(\"stdin\", stdin);\n pr_stdio(\"stdout\", stdout);\n pr_stdio(\"stderr\", stderr);\n if((fp=fopen(\"/etc/motd\", \"r\"))==NULL)\n {\n fprintf(stderr, \"error, fopen %s\\n\", strerror(errno));\n exit(0);\n }\n if(getc(fp)==EOF)\n {\n fprintf(stderr, \"error: getc\\n\");\n exit(0);\n }\n pr_stdio(\"/etc/motd\", fp);\n return 0;\n}\n\nvoid pr_stdio(const char *name, FILE *fp)\n{\n printf(\"stream = %s, \", name);\n if(fp->_flags&__SNBF)\n printf(\"unbuffered\");\n else if(fp->_flags&__SLBF)\n printf(\"line buffered\");\n else\n printf(\"fully buffered\");\n printf(\", buffer size = %d\\n\", fp->_lbfsize);\n}" }, { "alpha_fraction": 0.5569307208061218, "alphanum_fraction": 0.5775577425956726, "avg_line_length": 23.260000228881836, "blob_id": "facff04ee3305769522fba6aa2a7fc3c838dd6ad", "content_id": "8eecf0ad65cfe75d832255f45c897d6cd93fa5c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 101, "num_lines": 50, "path": "/Algorithm/Algorithm/chapter2/max_subseq_sum_nlogn.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nT max_sum(T *a, int n);\n\ntemplate <typename T>\nT max_sub_sum(T *a, int left, int right);\n\nint main()\n{\n int a[]={-1, 10, -20, 101, 11, 12};\n std::cout<<max_sum(a, 6)<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nT max_sum(T *a, int n)\n{\n return max_sub_sum(a, 0, n-1);\n}\n\ntemplate <typename T>\nT max_sub_sum(T *a, int left, int right)\n{\n if(left==right)\n {\n if(a[left]>0)\n return a[left];\n else \n return 0;\n }\n int center=(left+right)/2;\n int max_left_sum=max_sub_sum(a, left, center);\n int max_right_sum=max_sub_sum(a, center+1, right);\n int left_border_sum=0, max_left_border_sum=0;\n for(int i=center; i>=left; i--)\n {\n left_border_sum+=a[i];\n if(max_left_border_sum<left_border_sum)\n max_left_border_sum=left_border_sum;\n }\n int max_right_border_sum=0, right_border_sum=0;\n for(int i=center+1;i<=right; i++)\n {\n right_border_sum+=a[i];\n if(right_border_sum>max_right_border_sum)\n max_right_border_sum=right_border_sum;\n }\n return std::max(std::max(max_left_sum, max_right_sum), max_left_border_sum+max_right_border_sum);\n}" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6166666746139526, "avg_line_length": 20.81818199157715, "blob_id": "15de79b057cc078fb965427cb8b1fcccda29eb67", "content_id": "88ca9a85f7e9d9fed6229325454a869df75b1bdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 240, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/Programming/C++/Effective_STL/Vector_and_String/shrink.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\nint main()\n{\n std::vector<int> ivec;\n ivec.reserve(100);\n std::cout<<ivec.capacity()<<std::endl;\n std::vector<int>(ivec).swap(ivec);\n std::cout<<ivec.capacity()<<std::endl;\n return 0;\n}\n" }, { "alpha_fraction": 0.48123979568481445, "alphanum_fraction": 0.5350733995437622, "avg_line_length": 17.606060028076172, "blob_id": "6f5eb6bd3a50f419e801c12df02ac51df07ae37d", "content_id": "3928e2c83412f076e6dea7ffb65811a29c35e2b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 613, "license_type": "no_license", "max_line_length": 66, "num_lines": 33, "path": "/Programming/Practice/Interpretation/1.23_smallest_divisor.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<cmath>\n#include<time.h>\n\nbool prime(int n)\n{\n int tar=sqrt(n);\n for(int i=2; i<tar; i++)\n if(n%i==0)\n return false;\n return true;\n}\n\nbool fast_prime(int n)\n{\n int tar=sqrt(n);\n if(n%2==0)\n return false;\n for(int i=3; i<tar; i+=2)\n if(n%i==0)\n return false;\n return true;\n}\n\nint main()\n{\n time_t t1=time(&t1);\n std::cout<<prime(10000001)<<std::endl;\n time_t t2=time(&t2);\n std::cout<<fast_prime(10000001)<<std::endl;\n time_t t3=time(&t3);\n std::cout<<difftime(t1, t2)<<\" \"<<difftime(t2, t3)<<std::endl;\n}" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 17, "blob_id": "d9b88acd6797def687d338fe1880f09f7ddaf5bd", "content_id": "3fc7a379db63376f9777ba0920a0ba3988e5c5cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/Programming/Python/10Statement/arith.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "while True:\n\treply=raw_input('Enter text:')\n\tif reply == 'stop':\n\t\tbreak\n\tprint(int(reply)**2)\nprint('Bye')\n" }, { "alpha_fraction": 0.607555091381073, "alphanum_fraction": 0.6117523312568665, "avg_line_length": 34.33333206176758, "blob_id": "1d8322b9fb8cc4b655354ace7c75fa9d0122561a", "content_id": "afe6ce38ceabfe1cb9997600cc81861903fe9824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 953, "license_type": "no_license", "max_line_length": 66, "num_lines": 27, "path": "/Programming/C/Network_Programming_in_Unix/chapter7/printbuf.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main()\n{\n int sock=socket(AF_INET, SOCK_STREAM, 0);\n int size;\n socklen_t len=sizeof(size);\n getsockopt(sock, SOL_SOCKET, SO_RCVBUF, (void *) &size, &len);\n printf(\"TCP receive buf:%d\\n\", size);\n getsockopt(sock, SOL_SOCKET, SO_SNDBUF, (void *)&size, &len);\n printf(\"TCP send buf: %d\\n\", size);\n //close(sock);\n sock=socket(AF_INET, SOCK_DGRAM, 0);\n getsockopt(sock, SOL_SOCKET, SO_RCVBUF, (void *) &size, &len);\n printf(\"UDP receive buf:%d\\n\", size);\n getsockopt(sock, SOL_SOCKET, SO_SNDBUF, (void *)&size, &len);\n printf(\"UDP send buf: %d\\n\", size);\n //close(sock);\n sock=socket(AF_INET, SOCK_SEQPACKET, 0);\n getsockopt(sock, SOL_SOCKET, SO_RCVBUF, (void *) &size, &len);\n printf(\"TCP receive buf:%d\\n\", size);\n getsockopt(sock, SOL_SOCKET, SO_SNDBUF, (void *)&size, &len);\n printf(\"TCP send buf: %d\\n\", size);\n return 0;\n}" }, { "alpha_fraction": 0.6355932354927063, "alphanum_fraction": 0.6567796468734741, "avg_line_length": 20.454545974731445, "blob_id": "d6700749c3cfe6290a0f5c243a75cb5af8b4e3df", "content_id": "2bac473a7410942aacac4357f44f0b683ec23989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 33, "num_lines": 11, "path": "/Programming/Python/11Value_Statement_Print/print.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import sys\ntemp=sys.stdout\nsys.stdout=open('log.txt', 'a')\nprint('spam')\nprint(1, 2, 3)\nsys.stdout.close()\nsys.stdout=temp\nprint('back here')\nprint(open('log.txt').read())\nsys.stderr.write(('Bad!'*8)+'\\n')\nprint >> sys.stderr, 'Bad!'*8\n" }, { "alpha_fraction": 0.4587688744068146, "alphanum_fraction": 0.4779326319694519, "avg_line_length": 22.428571701049805, "blob_id": "515fd806a2ed613dda27f323830c9da1af2345fb", "content_id": "cd95267577c277bf91632a830f99d3744f963d72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3444, "license_type": "no_license", "max_line_length": 72, "num_lines": 147, "path": "/Programming/C/Network_Programming_in_Unix/chapter6/tcpechoserv.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n#include <stdio.h>\n\nvoid\nproc(int sock);\n\nvoid\nsig_chld(int signo);\n\nint\nmain()\n{\n int listenfd, connfd, sockfd;\n if((listenfd=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: socket()\\n\");\n exit(0);\n }\n struct sockaddr_in servaddr;\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=htons(9868);\n servaddr.sin_addr.s_addr=htonl(INADDR_ANY);\n if(bind(listenfd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"error: bind()\\n\");\n exit(0);\n }\n if(listen(listenfd, 5)<0)\n {\n fprintf(stderr, \"error: listen()\\n\");\n exit(0);\n }\n if(signal(SIGCHLD, sig_chld)==SIG_ERR)\n {\n fprintf(stderr, \"error: signal()\\n\");\n exit(0);\n }\n struct sockaddr_in cliaddr;\n socklen_t len=sizeof(struct sockaddr_in);\n int maxfd=listenfd;\n int maxi=-1;\n int nready, client[FD_SETSIZE];\n for(int i=0; i<FD_SETSIZE; i++)\n client[i]=-1;\n fd_set rset, allset;\n FD_ZERO(&allset);\n FD_SET(listenfd, &allset);\n char buf[1500];\n memset(buf, 0, 1500);\n int n=0, i=0;\n while(1)\n {\n rset=allset;\n nready=select(maxfd+1, &rset, NULL, NULL, NULL);\n if(FD_ISSET(listenfd, &rset)){\n connfd=accept(listenfd, (struct sockaddr *)&cliaddr, &len);\n //int i=0;\n for(; i<FD_SETSIZE; i++)\n if(client[i]<0)\n {\n client[i]=connfd;\n break;\n }\n if(i==FD_SETSIZE)\n {\n fprintf(stderr, \"error: too many clients\\n\");\n continue;\n }\n FD_SET(connfd, &allset);\n if(connfd> maxfd)\n maxfd=connfd;\n if(i>maxi)\n maxi=i;\n if(--nready<=0)\n continue;\n }\n for(i=0; i<maxi; i++)\n {\n if((sockfd=client[i])<0)\n continue;\n if(FD_ISSET(sockfd, &rset))\n {\n if((n=read(sockfd, buf, 1500))==0)\n {\n close(sockfd);\n FD_CLR(sockfd, &allset);\n client[i]=-1;\n }\n else\n write(sockfd, buf, n);\n if(--nready<=0)\n break;\n }\n }\n }\n return 0;\n}\n\nvoid\nproc(int sock)\n{\n char *buf=(char *)malloc(1500*sizeof(char));\n if(buf==NULL)\n {\n fprintf(stderr, \"error: molloc\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n return;\n }\n memset(buf, 0, 1500);\n int ret=0;\nagain:\n while((ret=read(sock, buf, 1500))>0)\n {\n printf(\"received %d bytes \\n %s\\n\", ret, buf);\n write(sock, buf, strlen(buf));\n memset(buf, 0, 1500);\n }\n if(ret<0&&errno==EINTR)\n goto again;\n else if(ret<0)\n {\n fprintf(stderr, \"error: read socket: %d\\n\", sock);\n fprintf(stderr, \"%s\\n\", strerror(errno));\n }\n else\n return;\n}\n\nvoid\nsig_chld(int signo)\n{\n pid_t pid;\n int stat;\n \n while((pid=waitpid(-1, &stat, WNOHANG))>0)\n {\n printf(\"chiald %d terminated\\n\", pid);\n }\n //printf(\"child %d terminated\\n\", pid);\n return ;\n}\n" }, { "alpha_fraction": 0.3685027062892914, "alphanum_fraction": 0.3886733949184418, "avg_line_length": 17.428571701049805, "blob_id": "71a1610150bb1ab4d42a3a116a5148c6de453f04", "content_id": "2ef905b35325959d3ed0305e9526d2d4a50a489a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1289, "license_type": "no_license", "max_line_length": 87, "num_lines": 70, "path": "/Algorithm/Leetcode/maxnumpoint.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\nusing std::cout;\nusing std::cin;\nusing std::endl;\nusing std::vector;\n\nstruct Point{\n int x;\n int y;\n Point():x(0),y(0){}\n Point(int a,int b):x(a),y(b){}\n};\n\nint main()\n{\n vector<Point> points;\n int a,b;\n while(cin>>a>>b)\n {\n Point p(a,b);\n points.push_back(p);\n }\n int n=points.size();\n if(n<=2)\n {\n cout<<n<<endl;\n return n;\n }\n vector<double> k;\n int res=0,dup=1;\n for(int i=0;i<n;++i)\n {\n k.clear();\n dup=1;\n for(int j=0;j<n;++j)\n {\n if(points[i].x-points[j].x==0)\n {\n if(points[i].y-points[j].y==0)\n {\n ++dup;\n }\n else\n k.push_back(99999);\n }\n else\n k.push_back(10000*(points[i].y-points[j].y)/(points[i].x-points[j].x));\n }\n }\n sort(k.begin(),k.end());\n int pp=1;\n if(k.size()==0)\n pp=0;\n for(int j=1;j<k.size();++j)\n {\n if(k[j]==k[j-1])\n ++pp;\n else\n {\n if(pp+dup>res)\n res=pp+dup;\n pp=1;\n }\n }\n if(pp+dup>res)\n res=pp+dup;\n cout<<res<<endl;\n return res;\n}" }, { "alpha_fraction": 0.5509259104728699, "alphanum_fraction": 0.5601851940155029, "avg_line_length": 11.764705657958984, "blob_id": "dd1681a58b1df7bfa4230610b13b38dc24666e99", "content_id": "2991690cd59803ef277c8c77c252bbec84a84712", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 216, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/Programming/C++/Effective_C++/chapter3/implicit_conversion_function.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<string>\nclass A\n{\npublic:\n A():a(10){}\n A(int i):a(i){}\n explicit operator double(){return a;}\nprivate:\n int a;\n};\n\nint main()\n{\n A a;\n std::cout<<(double)a<<std::endl;\n}" }, { "alpha_fraction": 0.4933333396911621, "alphanum_fraction": 0.5066666603088379, "avg_line_length": 10.615385055541992, "blob_id": "ac7834617a784efe768fe59bfba399a406ecf70c", "content_id": "9c3901b8a71991999ff42a1bf1a5bb37f787d9ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 150, "license_type": "no_license", "max_line_length": 52, "num_lines": 13, "path": "/Programming/C++/Effective_STL/Functors/callable_class.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(int, int){}\n void operator()(){std::cout<<\"call\"<<std::endl;}\n};\n\nint main()\n{\n A a(0, 0);\n a();\n}" }, { "alpha_fraction": 0.4363636374473572, "alphanum_fraction": 0.5090909004211426, "avg_line_length": 17.33333396911621, "blob_id": "ca4d17a3cd488ddf8ddc0030b3a1690af5ec5274", "content_id": "f47f50965ce085b5b00965f2a6ba10d151c93de3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/Programming/Python/18Parameter/2exer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def func(a, b, c=5):\n\tprint(a, b, c)\nfunc(1, c=3, b=2)\n" }, { "alpha_fraction": 0.40816327929496765, "alphanum_fraction": 0.5204081535339355, "avg_line_length": 15.333333015441895, "blob_id": "08dc91ee52e0583b2e72195d63494d039c3bea45", "content_id": "f9baf65561e06190ca908d0d26dec8ee9e2ecec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 22, "num_lines": 6, "path": "/Programming/Python/14iteration/start_iterator.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "for x in [6, 6, 6, 6]:\n\tprint x**2\nfor x in [6, 6, 6, 6]:\n\tprint x**3\nfor x in 'spam':\n\tprint x*2\n" }, { "alpha_fraction": 0.49877750873565674, "alphanum_fraction": 0.5036674737930298, "avg_line_length": 22, "blob_id": "c66c453cd8357d8808eac13e7b9e49f90e2fea2d", "content_id": "68513263334b57ba16ac98aed1f3b95a2bd6fef5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1636, "license_type": "no_license", "max_line_length": 80, "num_lines": 71, "path": "/Programming/C++/Crossfire/any/dom_ip_translation/ip_dom.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<fstream>\n#include<vector>\n#include<sstream>\nusing std::cout;\nusing std::vector;\nusing std::endl;\nusing std::string;\nusing std::pair;\n\nint main()\n{\n vector<pair<string, vector<string> > > dom_vec;\n vector<string> svec;\n vector<pair<string, string> > result;\n std::ifstream ifs(\"ip_.txt\");\n if(!ifs.is_open())\n {\n cout<<\"fail open ip_.txt\"<<endl;\n return 0;\n }\n string str;\n while(getline(ifs, str))\n {\n svec.push_back(str);\n }\n ifs.close();\n ifs.open(\"domain_ip.txt\");\n if(!ifs.is_open())\n {\n cout<<\"fail open domain_ip\"<<endl;\n return 0;\n }\n while(getline(ifs, str))\n {\n string domain;\n string ip;\n vector<string> ipvec;\n std::istringstream iss(str);\n iss>>domain;\n while(iss>>ip)\n {\n ipvec.push_back(ip);\n }\n dom_vec.push_back(std::make_pair(domain, ipvec));\n ip.clear();\n }\n ifs.close();\n for(int i=0; i!=svec.size(); ++i)\n {\n for(int j=0; j!=dom_vec.size(); ++j)\n {\n for(int k=0; k!=dom_vec[j].second.size(); ++k)\n if(svec[i]==dom_vec[j].second[k])\n result.push_back(std::make_pair(svec[i], dom_vec[j].first));\n }\n }\n std::ofstream ofs(\"ip_dom.txt\");\n if(!ofs.is_open())\n {\n cout<<\"fail open ip_dom\"<<endl;\n return 0;\n }\n for(int i=0; i!=result.size(); ++i)\n {\n cout<<result[i].first<<\" \"<<result[i].second<<endl;\n ofs<<result[i].first<<\" \"<<result[i].second<<endl;\n }\n ofs.close();\n return 0;\n}\n\n\n\n" }, { "alpha_fraction": 0.4034833014011383, "alphanum_fraction": 0.41944846510887146, "avg_line_length": 16.69230842590332, "blob_id": "ee8b59b04061f997a0653136960f73b2535f5c62", "content_id": "f50c62506541874ed018fd5d526c701786d70c26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 689, "license_type": "no_license", "max_line_length": 51, "num_lines": 39, "path": "/Algorithm/Leetcode/removalele.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nvoid swap(int &a, int &b);\n\nint removeElement(int A[], int n, int elem) {\n int total=0;\n int backindex=n-1;\n for(int i=0; i<backindex;i++ )\n {\n if(A[i]==elem)\n {\n while(backindex>=0&&A[backindex]==elem)\n {\n backindex--;\n }\n if(backindex>=0)\n {\n swap(A[backindex], A[i]);\n total++;\n continue;\n }\n break;\n }\n }\n return total;\n}\nvoid swap(int &a, int &b)\n{\n int temp=a;\n a=b;\n b=temp;\n}\n\nint main()\n{\n int a[2]={4,5};\n std::cout<<removeElement(a, 2, 4)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.6708074808120728, "avg_line_length": 18.875, "blob_id": "0cd1fd58ea7619b1bd35295813a40e4dbf64291d", "content_id": "99fb35cf8e5eb753e634cc815b779bf87044137a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 32, "num_lines": 8, "path": "/Programming/Python/4ObjectType/integer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import math;\nimport random;\nprint(len(str(2**1000000)));\nprint(3.1*2);\nprint(math.pi);\nmath.sqrt(85);\nprint(random.random());\nprint(random.choice([1,2,3,6]));\n\n\n" }, { "alpha_fraction": 0.529629647731781, "alphanum_fraction": 0.5370370149612427, "avg_line_length": 14.941176414489746, "blob_id": "687fe250c2f1dbf46d7550981cf77b8d77f6de72", "content_id": "09a909a5602a5b4302c5aa3b5e55c37efd71d6a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 270, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/Programming/C/Programming_in_Unix/chapter15/test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <string>\n\nint\nmain()\n{\n std::ofstream ifs(\"a.out\");\n if(!ifs.is_open())\n {\n std::cerr<<\"error: open file\"<<std::endl;\n return 0;\n }\n std::string str;\n ifs<<\"what\"<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5602605938911438, "alphanum_fraction": 0.5765472054481506, "avg_line_length": 21, "blob_id": "be72ec3f9238a753648be482ed80036203d68aba", "content_id": "2f24c1d32f57f957f398ddcd754dfa02d10519c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 307, "license_type": "no_license", "max_line_length": 44, "num_lines": 14, "path": "/Programming/C++/Effective_STL/Iterators/reverse_iterator.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\nint main()\n{\n std::vector<int> ivec;\n for(int i=0; i<10; i++)\n ivec.push_back(i);\n std::vector<int>::reverse_iterator rit;\n rit=find(ivec.rbegin(), ivec.rend(), 6);\n std::cout<<*rit<<std::endl;\n std::cout<<*rit.base()<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4166666567325592, "alphanum_fraction": 0.5166666507720947, "avg_line_length": 7.5714287757873535, "blob_id": "6331a274f3398ca77a63d335a4fdd4f10ac3714e", "content_id": "2b05a16ce3d5cbfd84f3a75cd3c455629090d6c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 10, "num_lines": 7, "path": "/Programming/Python/17Namespace/return_func.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def f1():\n\tX=68\n\tdef f2():\n\t\tprint(X)\n\treturn f2\na=f1()\na()\n" }, { "alpha_fraction": 0.5657142996788025, "alphanum_fraction": 0.5657142996788025, "avg_line_length": 10, "blob_id": "3bd5e88e851606b3f4fb2aacb301ffb729175c2f", "content_id": "01e9fe90ac703f8f885ca6e3dc925440b8d4ba70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 175, "license_type": "no_license", "max_line_length": 37, "num_lines": 16, "path": "/Programming/C++/Inside_the_C++_object_model/chapter4/multi_inherit.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\n virtual void f(){}\n};\nclass B{\n virtual void f(){}\n};\n\nclass C:public A, public B{\n};\n\nint main()\n{\n std::cout<<sizeof (C)<<std::endl;\n}" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5614035129547119, "avg_line_length": 11.052631378173828, "blob_id": "f48d631af1ab924c7a8dff0d0b8100810b816bdb", "content_id": "438cd569582f8d4841505f7ea644734bdd35f5c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 228, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/data_binding.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntypedef int xxx;\n\nclass A{\n \npublic:\n void x(xxx y){std::cout<<y<<std::endl;} //use the global one\nprivate:\n typedef float xxx;\n};\n\nint main()\n{\n A a;\n a.x(10);\n a.x(10.111);\n return 0;\n}" }, { "alpha_fraction": 0.49242424964904785, "alphanum_fraction": 0.49242424964904785, "avg_line_length": 11.045454978942871, "blob_id": "6cc1001f56dada0751cc62b0f65543b1e0d8f79a", "content_id": "b8a3b85c941be13d22dd44e64f78b6eefe158dd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 264, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/Programming/C++/Effective_C++/chapter5/class_pointers.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\npublic:\n void fun(){};\nprivate:\n int i;\n};\n\nclass B\n{\n};\n\nint main()\n{\n A *a;\n B *b;\n char *c;\n std::cout<<sizeof (a)<<\" \"<<sizeof(b)<<\" \"<<sizeof(c)<<std::endl;\n std::cout<<sizeof(A)<<\" \"<<sizeof(B)<<std::endl;\n}" }, { "alpha_fraction": 0.393410861492157, "alphanum_fraction": 0.44573643803596497, "avg_line_length": 16.827587127685547, "blob_id": "7c292b18974ac6a91f6141c8014957ef3cc3c1b2", "content_id": "2a107ebf2abd27369f648946aac5b89f02347576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 516, "license_type": "no_license", "max_line_length": 38, "num_lines": 29, "path": "/Programming/Practice/Interpretation/sqrt.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ndouble sqrt(int n)\n{\n double temp=0.0, div=n/2;\n double var=0.0, curr=0.0;\n for(int i=0; true; i++)\n {\n temp=n/div;\n div=(temp+div)/2;\n std::cout<<div<<\" \";\n var=curr-div;\n curr=div;\n if(var<0)\n if(-var/div<=0.0001)\n break;\n if(var>=0)\n if(var/div<=0.0001)\n break;\n }\n std::cout<<std::endl;\n return div;\n}\n\nint main()\n{\n std::cout<<sqrt(10000)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5045248866081238, "alphanum_fraction": 0.511635422706604, "avg_line_length": 18.713375091552734, "blob_id": "01b2f5ee134ba05ee9505b45d0891b28b31fe73a", "content_id": "22788acd02f3391cdbbde768f8f0a01033c3eb09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3094, "license_type": "no_license", "max_line_length": 116, "num_lines": 157, "path": "/Programming/C/Programming_in_Unix/chapter18/ttyname.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/stat.h>\n#include <dirent.h>\n#include <limits.h>\n#include <string.h>\n#include <termios.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <stdio.h>\n\nstruct devdir{\n struct devdir *d_next;\n char *d_name;\n};\n\nstatic struct devdir *head;\nstatic struct devdir *tail;\nstatic char pathname[_POSIX_PATH_MAX+1];\n\nstatic void\nadd(char *dirname)\n{\n struct devdir *ddp;\n int len;\n len=strlen(dirname);\n if((dirname[len-1]=='.')&&(dirname[len-2]=='/'||(dirname[len-2]=='.'&&dirname[len-3]=='/')))\n return;\n ddp=malloc(sizeof(struct devdir));\n if(ddp==NULL)\n return;\n ddp->d_name=strdup(dirname);\n if(ddp->d_name==NULL)\n {\n free(ddp);\n return;\n }\n ddp->d_next=NULL;\n if(tail==NULL)\n {\n head=ddp;\n tail=ddp;\n }\n else\n {\n tail->d_next=ddp;\n tail=ddp;\n }\n}\n\nstatic void\ncleanup()\n{\n struct devdir *ddp, *nddp;\n ddp=head;\n while(ddp!=NULL)\n {\n nddp=ddp->d_next;\n free(ddp->d_name);\n free(ddp);\n ddp=nddp;\n }\n head=NULL;\n tail=NULL;\n}\n\nstatic char *\nsearchdir(char *dirname, struct stat *fdstatp)\n{\n struct stat devstat;\n DIR *dp;\n int devlen;\n struct dirent *dirp;\n \n strcpy(pathname, dirname);\n if((dp=opendir(dirname))==NULL)\n return NULL;\n strcat(pathname, \"/\");\n devlen=strlen(pathname);\n while((dirp=readdir(dp))!=NULL)\n {\n strncpy(pathname+devlen, dirp->d_name, _POSIX_PATH_MAX-devlen);\n if(strcmp(pathname, \"/dev/stdin\")==0||strcmp(pathname,\"/dev/stdout\")==0||strcmp(pathname, \"/dev/stderr\")==0)\n continue;\n if(stat(pathname, &devstat)<0)\n continue;\n if(S_ISDIR(devstat.st_mode))\n {\n add(pathname);\n continue;\n }\n if(devstat.st_ino==fdstatp->st_ino&&devstat.st_dev==fdstatp->st_dev)\n {\n closedir(dp);\n return (pathname);\n }\n }\n closedir(dp);\n return (NULL);\n}\n\nchar *\nttyname(int fd)\n{\n struct stat fdstat;\n struct devdir *ddp;\n char *rval;\n if(isatty(fd)==0)\n return (NULL);\n if(fstat(fd, &fdstat)<0)\n return (NULL);\n if(S_ISCHR(fdstat.st_mode)==0)\n return (NULL);\n \n rval=searchdir(\"/dev\", &fdstat);\n if(rval==NULL)\n {\n for(ddp=head; ddp!=NULL; ddp=ddp->d_next)\n if((rval=searchdir(ddp->d_name, &fdstat))!=NULL)\n break;\n }\n cleanup();\n return (rval);\n}\n\n\nint\nmain()\n{\n char *name;\n if(isatty(0))\n {\n name=ttyname(0);\n if(name==NULL)\n name=\"undefined\";\n }\n else\n {\n name = \"not a tty\";\n }\n printf(\"fd 0:%s\\n\", name);\n if (isatty(1)) {\n name = ttyname(1);\n if (name == NULL)\n name = \"undefined\";\n } else {\n name = \"not a tty\";\n }\n printf(\"fd 1: %s\\n\", name);\n if (isatty(2)) {\n name = ttyname(2);\n if (name == NULL)\n name = \"undefined\";\n } else {\n name = \"not a tty\";\n }\n printf(\"fd 2: %s\\n\", name);\n exit(0);\n}" }, { "alpha_fraction": 0.6642335653305054, "alphanum_fraction": 0.7007299065589905, "avg_line_length": 23.909090042114258, "blob_id": "a68cfeb137033d98262243f84001cb39802359a2", "content_id": "4f5beb9466755137a445c1f2bf6511cec1d9bdef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 274, "license_type": "no_license", "max_line_length": 51, "num_lines": 11, "path": "/Programming/JAVA/Thinking in JAVA/chapter12/sortedset/SortedSetOfInteger.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import java.util.*;\n\npublic class SortedSetOfInteger{\n\tpublic static void main(String[] args){\n\t\tRandom rand=new Random(47);\n\t\tSortedSet<Integer> intset=new TreeSet<Integer>();\n\t\tfor(int i=0; i<10000; i++)\n\t\t\tintset.add(rand.nextInt(30));\n\t\tSystem.out.println(intset);\n\t}\n}\n" }, { "alpha_fraction": 0.46921443939208984, "alphanum_fraction": 0.4904458522796631, "avg_line_length": 20.43181800842285, "blob_id": "cf0c5b1cd5027d51ac7cc4b83e47a4a8259b96b8", "content_id": "e9ae606180f2c08ed05fbb7ad304344e9dcaf41f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 942, "license_type": "no_license", "max_line_length": 61, "num_lines": 44, "path": "/Programming/C/Programming_in_Unix/chapter1/shell_with_signal.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<unistd.h>\n#include<stdlib.h>\n#include<sys/wait.h>\n#include<string.h>\nstatic void sig_init(int);\n\nint main()\n{\n char buf[256];\n pid_t pid;\n int status;\n if(signal(SIGINT, sig_init)==SIG_ERR)\n {\n fprintf(stderr, \"signal error\");\n exit(0);\n }\n printf(\"%% \");\n while (fgets(buf, 256, stdin)!=NULL)\n {\n if(buf[strlen(buf)-1]=='\\n')\n buf[strlen(buf)-1]=0;\n if((pid=fork())<0)\n {\n fprintf(stderr, \"error: create child process\\n\");\n exit(0);\n }\n else if(pid==0)\n {\n execlp(buf, buf, (char *)0);\n fprintf(stderr, \"couldn't execute: %s \\n\", buf);\n exit(127);\n }\n if((pid=waitpid(pid, &status, 0))<0)\n fprintf(stderr, \"error: wait child process\\n\");\n printf(\"%% \");\n }\n exit(0);\n}\n\nvoid sig_init(int signo)\n{\n printf(\"interrupt\\n%%\");\n}" }, { "alpha_fraction": 0.5656565427780151, "alphanum_fraction": 0.5656565427780151, "avg_line_length": 13.142857551574707, "blob_id": "68fcff9a624b8eea87974534e762025d3345fa78", "content_id": "f9b96b2e9743f5dd1372b33e7b9b8ea87aa37eb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 26, "num_lines": 7, "path": "/Programming/Python/11Value_Statement_Print/test.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "[spam, ham]=['yum', 'YUM']\nprint(spam)\nprint(ham)\na, b, c, d = 'spam'\nprint(a)\nprint(b)\n#print(*b)\n" }, { "alpha_fraction": 0.5129310488700867, "alphanum_fraction": 0.517241358757019, "avg_line_length": 11.94444465637207, "blob_id": "c6b25bac731a6b9f3ac65ae6f748c8325254e4c1", "content_id": "82de27f58279f59d4dd53d11ef2a31574765b15f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 232, "license_type": "no_license", "max_line_length": 35, "num_lines": 18, "path": "/Programming/C++/Inside_the_C++_object_model/chapter2/synthesized_constructor.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n int func(){return i;}\n char * f(){return s;}\nprivate:\n int i;\n char *s;\n};\n\nint main()\n{\n A a;\n std::cout<<a.func()<<std::endl;\n std::cout<<a.f()<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.48573631048202515, "alphanum_fraction": 0.4911333918571472, "avg_line_length": 24.959999084472656, "blob_id": "0546a5417cc7dff38241abbc2387a28b74caba4e", "content_id": "78d3e80d2106240b676d4feedf348d3941422a6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1297, "license_type": "no_license", "max_line_length": 60, "num_lines": 50, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/find_vptr.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stdio.h>\n\nclass A{\npublic:\n A():a(10),b(20), c(30){}\n virtual void f(){};\n int * ap(){return &a;}\n int * bp(){return &b;}\n int * cp(){return &c;}\n //int *pp(){return p;}\n static int A::*repa(){return &A::a;}\n static int A::*repb(){return &A::b;}\n static int A::*repc(){return &A::c;}\n static char A::*ww(){return &A::ch;}\nprivate:\n char ch;\n int a;\n int b;\n int c;\n};\n\nint A::*pa=A::repa();\nint A::*pb=A::repb();\nint A::*pc=A::repc();\nchar A::*pch=A::ww();\nint main()\n{\n A a;\n A *p=&a;\n int *ap=p->ap();\n int *bp=p->bp();\n std::cout<<\"size: \"<<sizeof (A)<<std::endl;\n std::cout<<\"start: \"<<p<<std::endl;\n std::cout<<\"int a: \"<<ap<<std::endl;\n std::cout<<\"int b: \"<<bp<<std::endl;\n std::cout<<\"int c: \"<<a.cp()<<std::endl;\n std::cout<<\"int a:\"<<p->*pa<<std::endl;\n std::cout<<\"int pa \"<<*(int *)&pa<<std::endl;\n std::cout<<\"int b:\"<<p->*pb<<std::endl;\n std::cout<<\"int pb \"<<*(long *)&pb<<std::endl;\n std::cout<<\"int c:\"<<p->*pc<<std::endl;\n std::cout<<\"int pc \"<<*(long *)(&pc)<<std::endl;\n std::cout<<\"pa size: \"<<sizeof (pa)<<std::endl;\n fprintf(stdout,\"%d\\n\", pa);\n std::cout<<pch<<std::endl;\n pa==pb?std::cout<<\"equal\":std::cout<<\"not\"<<pa<<\" \"<<pb;\n\n return 0;\n}" }, { "alpha_fraction": 0.40421053767204285, "alphanum_fraction": 0.43578946590423584, "avg_line_length": 19.23404312133789, "blob_id": "bc4406a3e9724b36684996c22c01ad2a3c022627", "content_id": "695a9960a621805794391f693801f02f933cf9dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 950, "license_type": "no_license", "max_line_length": 50, "num_lines": 47, "path": "/Algorithm/Programming_Perls/binary_search.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <stdlib.h>\n#include <assert.h>\n\nint binary_search(int a[], int target, int n)\n{\n int cnt=0;\n int low=0, high=n-1, mid=n/2;\n for(int i=0; low<=high; i++)\n {\n if(a[mid]>target)\n {\n high=mid-1;\n mid=(low+high)/2;\n cnt++;\n }\n else if(a[mid]<target)\n {\n low=mid+1;\n mid=(low+high)/2;\n cnt++;\n }\n else\n {\n assert(a[mid]==target);\n //std::cout<<cnt<<\" times\"<<std::endl;\n return mid;\n }\n }\n std::cout<<cnt<<\" times\"<<std::endl;\n return -1;\n}\n\nint main()\n{\n int a[11]={0,1,2,3,4,5,6,7,8,9,10};\n clock_t start=clock();\n for(int i=0; i<11; i++)\n {\n std::cout<<binary_search(a, i, 11);\n std::cout<<\" \";\n }\n clock_t t=clock()-start;\n std::cout<<t<<\"miliseconds\"<<std::endl;\n std::cout<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4635036587715149, "alphanum_fraction": 0.5072992444038391, "avg_line_length": 16.1875, "blob_id": "89804a8bb938a444d8d3aadba57acb6009177805", "content_id": "d455642a468a18098fc243e5350760529f580e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 274, "license_type": "no_license", "max_line_length": 48, "num_lines": 16, "path": "/Programming/C/The C programming Language/chapter2/setbits.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nunsigned int setbits(int x,int p, int n, int y);\n\nint main()\n{\n printf(\"%d\\n\",setbits(15,2,2,2));\n return 0;\n}\n\nunsigned int setbits(int x, int p, int n, int y)\n{\n x=x&((~0<<p)|~(~0<<(p-n+1)));\n y=y&(~(~0<<p)|~(~0<<(p-n+1)));\n return x|y;\n}" }, { "alpha_fraction": 0.4935064911842346, "alphanum_fraction": 0.4978354871273041, "avg_line_length": 14.466666221618652, "blob_id": "3d328f6368cfad1aface4e3f816a10f098d37f2e", "content_id": "1cce2a7da9004380081b4de4d7382dd8b4511dca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 231, "license_type": "no_license", "max_line_length": 37, "num_lines": 15, "path": "/Algorithm/Programming_Perls/test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <string>\n#include <iostream>\n\nint main()\n{\n std::string str=\"\";\n std::cout<<str.size()<<std::endl;\n std::cout<<\"\";\n char *p=\"\";\n std::cout<<*p;\n \n int i=*p;\n std::cout<<i<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4819277226924896, "alphanum_fraction": 0.522088348865509, "avg_line_length": 10.363636016845703, "blob_id": "a7933d1a1410199a0337c5691f9b6acd0f469ddd", "content_id": "307ebe74182e987546a2e57f5bf8f255c9f365a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 249, "license_type": "no_license", "max_line_length": 21, "num_lines": 22, "path": "/Algorithm/Algorithm/chapter1/print_digit.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nvoid print(T t);\n\nint main()\n{\n print(1997);\n return 0;\n}\n\ntemplate <typename T>\nvoid print(T t)\n{\n if(t<=9)\n {\n std::cout<<t;\n return;\n }\n print(t/10);\n std::cout<<t%10;\n}" }, { "alpha_fraction": 0.38539326190948486, "alphanum_fraction": 0.43820226192474365, "avg_line_length": 18.799999237060547, "blob_id": "b2d6370c6503efd06cff452969df1eb28c69370b", "content_id": "61ea1b004e2a40111c6006b30ca2878177cb493c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 890, "license_type": "no_license", "max_line_length": 40, "num_lines": 45, "path": "/Algorithm/Programming_Perls/insertionsort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nvoid insertionsort1(int a[], int n)\n{\n for(int i=1; i<n; i++)\n for(int j=i; j>=1; j--)\n {\n if(a[j-1]>a[j])\n std::swap(a[j-1], a[j]);\n }\n}\n\nvoid insertionsort2(int a[], int n)\n{\n for(int i=1; i<n; i++)\n {\n int t=a[i];\n int j=i;\n for(; j>=1&&a[j-1]>t; j--)\n a[j]=a[j-1];\n a[j]=t;\n }\n}\n\n\nint main()\n{\n int a[10]={9,8,7,6,5,4,3,2,1,0};\n clock_t start=clock();\n insertionsort1(a, 10);\n clock_t end=clock();\n std::cout<<end-start<<std::endl;\n int b[10]={9,8,7,6,5,4,3,2,1,0};\n start=clock();\n insertionsort2(b, 10);\n end=clock();\n std::cout<<end-start<<std::endl;\n for(int i=0; i<10; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n for(int i=0; i<10; i++)\n std::cout<<b[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4776536226272583, "alphanum_fraction": 0.5474860072135925, "avg_line_length": 20.117647171020508, "blob_id": "bfb81712ac1b608ef7520c039220dcd4c22c3776", "content_id": "3ea772c3eb26095184dfac811d24c581c4d96512", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 358, "license_type": "no_license", "max_line_length": 54, "num_lines": 17, "path": "/Algorithm/Programming_Perls/doublefloat.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <stdlib.h>\n\nint main()\n{\n double x=3.66;\n clock_t start=clock();\n for(int i=0; i<10000000; i++)\n x*=x;\n std::cout<<clock()-start<<\"milisecond\"<<std::endl;\n float y=3.66;\n start=clock();\n for(int i=0; i<10000000; i++)\n y*=y;\n std::cout<<clock()-start<<\"milisecond\"<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4563106894493103, "alphanum_fraction": 0.49514561891555786, "avg_line_length": 13.571428298950195, "blob_id": "abde22b7ff5154e5620c2579ae459c496e38e272", "content_id": "0599c420b875f29c13ff9987dceac603565b7db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/Programming/Python/14iteration/test.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "D = {'A': 6, 'B': 6, 'C': 6}\nprint(D)\nprint(sorted(D))\n#D = dict(A=1)\n#print(D)\nfor k in D:\n\tprint(k)\n\n" }, { "alpha_fraction": 0.4291498064994812, "alphanum_fraction": 0.4514169991016388, "avg_line_length": 16.068965911865234, "blob_id": "57cd70e2c20180d369df38c39eea39a4c3714bb7", "content_id": "09a61bd5820aa24055348ea9205e3ec3ac6541aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 494, "license_type": "no_license", "max_line_length": 51, "num_lines": 29, "path": "/Programming/C++/Inside_the_C++_object_model/chapter6/placement_new.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A():i(10){std::cout<<\"A construct\"<<std::endl;}\n ~A(){std::cout<<\"A destruct\"<<std::endl;}\n int fun(){return i;}\nprivate:\n int i;\n};\n\n\n\nint main()\n{\n A *p=(A *)operator new (10*sizeof(A));\n for(int i=0; i<10; i++)\n p[i]=A::A();\n //p->~A();\n std::cout<<\"***\"<<std::endl;\n //A::A();\n for(int i=0; i<10; ++i)\n {\n std::cout<<p[i].fun()<<std::endl;\n p[i].~A();\n }\n operator delete (p);\n return 0;\n}" }, { "alpha_fraction": 0.4917127192020416, "alphanum_fraction": 0.5359116196632385, "avg_line_length": 14.739130020141602, "blob_id": "72ef4994c87712dbda9033e7804c645323633e0b", "content_id": "460c80ae82e410d11d27dd806a2a830a5889024a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1086, "license_type": "no_license", "max_line_length": 52, "num_lines": 69, "path": "/Programming/Python/8ListandDict/list.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print(len([1,2,3]));\nprint([1,2,3]+[5,6]);\nprint(['Ni!']*4);\nprint(str([1,2])+\"36\");\nprint([1,2]+list(\"36\"));\nprint(3 in [1, 2, 3]);\nfor x in [1, 2, 3]:\n\tprint(x, ' ');\n\nres=[c*4 for c in 'SPAM'];\nprint(res);\nres=[];\nfor c in 'SPAM':\n\tres.append(c*4);\n\nprint(res);\nprint(list(map(abs, [-1, -2, 0, 1, 2])));\n\nl=['spam', 'Spam', 'SPAM'];\nl[1]='eggs';\nprint(l);\nl[0:2]=['eat', 'more'];\nprint(l);\nl.append('please');\nprint(l);\nl.sort();\nprint(l);\n\n\nl=['abc', 'ABD', 'aBe'];\nprint(l.sort());\nprint(l);\nprint(l.sort(key=str.lower));\nprint(l);\nprint(l.sort(key=str.lower, reverse=True));\nprint(l);\n\nprint(sorted(l, key=str.lower, reverse=True));\nprint(l);\nprint(sorted([x.lower() for x in l], reverse=True));\nprint(l);\n\nl=[1,2];\nl.extend([3,4,5]);\nprint(l);\nprint(l.pop());\nprint(l);\nl.reverse();\nprint(l);\nprint(list(reversed(l)));\n\nprint('*******************');\nl=['spam', 'eggs', 'ham'];\nprint(l.index('eggs'));\nl.insert(1, 'toast');\nprint(l);\nl.remove('eggs');\nprint(l);\nl.pop(1);\nprint(l);\nl=[1,2,3,5,6];\n\n\nprint('*******************');\nprint(l);\ndel l[0];\nprint(l);\ndel l[1:];\nprint(l);\n" }, { "alpha_fraction": 0.4273858964443207, "alphanum_fraction": 0.46887966990470886, "avg_line_length": 10.523809432983398, "blob_id": "6f91280a676eebf6384735c234ca12323551001b", "content_id": "3a1565db6ebc4ee3dff8fa87e70560e23db2b94f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 241, "license_type": "no_license", "max_line_length": 42, "num_lines": 21, "path": "/Algorithm/Algorithm/chapter2/greatest_divisor.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint gcd(int m, int n);\n\nint main()\n{\n std::cout<<gcd(1989, 1590)<<std::endl;\n return 0;\n}\n\nint gcd(int m, int n)\n{\n int rem;\n while(n>0)\n {\n rem=m%n;\n m=n;\n n=rem;\n }\n return m;\n}" }, { "alpha_fraction": 0.5990990996360779, "alphanum_fraction": 0.6396396160125732, "avg_line_length": 21.100000381469727, "blob_id": "9179f58ac13b2d1f292fc2a2f3a231402f69fb45", "content_id": "fcc6392cba2b9e5332e55a6595ea624f3d4263e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "no_license", "max_line_length": 39, "num_lines": 10, "path": "/Programming/Python/12If/dict_branch.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "branch = {'spam': 1.25,\n\t\t'ham':1.99,\n\t\t'eggs':0.99}\nprint(branch.get('spam', 'Bad choice'))\nprint(branch.get('test', 'Bad choice'))\nchoice = 'bacon'\nif choice in branch:\n\tprint(branch[choice])\nelse:\n\tprint('Bad choice')\n\n" }, { "alpha_fraction": 0.39240506291389465, "alphanum_fraction": 0.4345991611480713, "avg_line_length": 18.46575355529785, "blob_id": "85f14af89b2e9094cb01a02d93b1723810bbe0c7", "content_id": "85931e88c4ed4b9b6a596fd895b7ac7afccdf8f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1422, "license_type": "no_license", "max_line_length": 69, "num_lines": 73, "path": "/Algorithm/Algorithm/chapter7/heapsort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nvoid heapsort(T *a, int);\n\nint main()\n{\n int a[]={1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16,};\n heapsort(a, 16);\n for(int i=0; i<16; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nvoid siftdown(T *a, int index, int n)\n{\n T temp=a[index-1];\n while(index*2<=n)\n {\n if(index*2==n)\n {\n if(a[index-1]>a[index*2-1])\n std::swap(a[index-1], a[index*2-1]);\n break;\n }\n if(a[index*2]>a[index*2-1])\n {\n if(temp>a[index*2-1])\n std::swap(a[index-1], a[index*2-1]);\n else\n break;\n index*=2;\n }\n else\n {\n if(temp>a[index*2])\n std::swap(a[index-1], a[index*2]);\n else\n break;\n index=2*index+1;\n }\n }\n}\n\ntemplate <typename T>\nvoid build_heap(T *a, int n)\n{\n for(int i=n/2; i>=1; i--)\n siftdown(a,i,n);\n}\n\ntemplate <typename T>\nvoid delete_min(T * a, int n)\n{\n std::swap(a[0], a[n-1]);\n siftdown(a, 1, n-1);\n}\n\ntemplate<typename T>\nvoid heapsort(T *a, int n)\n{\n build_heap(a, n);\n std::cout<<std::endl;\n for(int i=n; i>0; i--)\n {\n delete_min(a, i);\n for(int j=0;j<n;j++)\n std::cout<<a[j]<<\" \";\n std::cout<<std::endl;\n }\n}\n\n" }, { "alpha_fraction": 0.4404255449771881, "alphanum_fraction": 0.4893617033958435, "avg_line_length": 19.021276473999023, "blob_id": "d4119cc171a01b3c87eab21e6760344c989902e9", "content_id": "1a62549c224ae7445e4575e798e6a8399580eb7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 940, "license_type": "no_license", "max_line_length": 71, "num_lines": 47, "path": "/Programming/C/The C programming Language/chapter7/filecmp.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n#include<stdlib.h>\n\nint filecmp(char *, char *);\n\nint main(int argc, char * argv[])\n{\n if(argc<3)\n {\n printf(\"error: need two file name\\n\");\n exit(1);\n }\n filecmp(argv[1], argv[2]);\n return 0;\n}\n\nint filecmp(char * file1, char * file2)\n{\n FILE *fp1, *fp2;\n if((fp1=fopen(file1, \"r\"))==NULL)\n {\n fprintf(stdout,\"error: open %s\\n\", file1);\n exit(1);\n }\n if((fp2=fopen(file2, \"r\"))==NULL)\n {\n fprintf(stdout, \"error: open %s\", file2);\n exit(1);\n }\n char line1[100], line2[100];\n while(fgets(line1, 100, fp1)!=NULL && fgets(line2, 100, fp2)!=NULL)\n {\n if(strcmp(line1, line2)==0)\n continue;\n else\n {\n fprintf(stdout, \"%s%s\", line1, line2);\n fclose(fp1);\n fclose(fp2);\n return 1;\n }\n }\n fclose(fp1);\n fclose(fp2);\n return 0;\n}" }, { "alpha_fraction": 0.5885069370269775, "alphanum_fraction": 0.6008209586143494, "avg_line_length": 28.5, "blob_id": "ae25f0fbcb57b2644990575ebad26dc382381897", "content_id": "ceff1e0c789bab553b8096a37cf1b3e42801a85d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 139, "num_lines": 66, "path": "/Programming/C/Network_Programming_in_Unix/chapter10/echostcpserv.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <netinet/in.h>\n#include <unistd.h>\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <sys/types.h>\n\n#define forever for(;;)\n\nint\nmain(int argc, char *argv[])\n{\n int sock_fd, msg_flags;\n char readbuf[1500];\n struct sockaddr_in servaddr, cliaddr;\n struct sctp_sndrcvinfo sri;\n struct sctp_event_subscribe events;\n int stream_increment=1;\n socklen_t len;\n size_t rd_sz;\n if(argc==2)\n {\n stream_increment=atoi(argv[1]);\n }\n if((sock_fd=socket(AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP))<0)\n {\n fprintf(stderr, \"error: create socket: %s\\n\", strerror(errno));\n exit(0);\n }\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_addr.s_addr=htonl(INADDR_ANY);\n servaddr.sin_port=htons(9868);\n if(bind(sock_fd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"error, bind(): %s\\n\", strerror(errno));\n exit(0);\n }\n bzero(&events, sizeof(events));\n events.sctp_data_io_event=1;\n if(setsockopt(sock_fd, IPPROTO_SCTP, SCTP_EVENTS, &events, sizeof(events))<0)\n {\n fprintf(stderr, \"error, setsockopt: %s\\n\", strerror(errno));\n exit(0);\n }\n if(listen(sock_fd, 5)<0)\n {\n fprintf(stderr, \"error, listen(): %s\\n\", strerror(errno));\n exit(0);\n }\n forever\n {\n len=sizeof(struct sockaddr_in);\n rd_sz=sctp_recvmsg(sock_fd, readbuf, sizeof(readbuf), (struct sockaddr*)&cliaddr, &len, &sri, &msg_flags);\n if(stream_increment)\n {\n sri.sinfo_stream++;\n if(sri.sinfo_stream>=sctp_get_no_strms(sock_fd, (struct sockaddr*)&cliaddr, len))\n sri.sinfo_stream=0;\n }\n if(sctp_sendmsg(sock_fd, readbuf, rd_sz, (struct sockaddr *)&cliaddr, len, sri.sinfo_ppid, sri.info_flags, sri.sinfo_stream, 0, 0);\n }\n}\n\n\n" }, { "alpha_fraction": 0.4748784303665161, "alphanum_fraction": 0.5170178413391113, "avg_line_length": 17.696969985961914, "blob_id": "778dc8aac10b6535c152af8d85c56dd1b7f84ded", "content_id": "c55e7d01f34fbb383b9fbc0566d66f62b50df506", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 617, "license_type": "no_license", "max_line_length": 47, "num_lines": 33, "path": "/Programming/C/Programming_in_Unix/chapter3/creat.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<unistd.h>\n#include<stdio.h>\n#include<fcntl.h>\n#include<stdlib.h>\n\nchar buf1[]=\"abcdefghij\";\nchar buf2[]=\"ABCDEFGHIJ\";\n\nint main()\n{\n int fd;\n if((fd=creat(\"file.hole\", FILESEC_MODE))<0)\n {\n fprintf(stderr, \"create error\\n\");\n exit(0);\n }\n if(write(fd, buf1, 10)!=10)\n {\n fprintf(stderr, \"buf1 write error\\n\");\n exit(0);\n }\n if(lseek(fd, 16386, SEEK_SET)==-1)\n {\n fprintf(stderr, \"error: lseek\\n\");\n exit(0);\n }\n if(write(fd, buf2, 10)!=10)\n {\n fprintf(stderr, \"error: write buf2\\n\");\n exit(0);\n }\n return 0;\n}\n" }, { "alpha_fraction": 0.3778972029685974, "alphanum_fraction": 0.386966735124588, "avg_line_length": 24.895652770996094, "blob_id": "42c187fc37c85c0f4ab3ea3f202478b91a251a72", "content_id": "635adda551d655029c172b5053d92fb68400f6b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2977, "license_type": "no_license", "max_line_length": 73, "num_lines": 115, "path": "/Programming/C++/Crossfire/selection/selection.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<fstream>\n#include<iostream>\n#include<string>\n#include<sstream>\n#include<vector>\n#include<stdexcept>\nusing std::vector;\nusing std::string;\nusing std::cout;\nusing std::ifstream;\nusing std::ofstream;\nusing std::endl;\n\nint main()\n{\n ifstream ifs(\"output.txt\");\n if(!ifs.is_open())\n {\n cout<<\"faile to open\";\n return 0;\n }\n string line;\n int filtered=0, open=0, close=0;\n vector<std::pair<string,string> > svec;\n while(getline(ifs,line))\n {\n if(line.size()<10)\n continue;\n //cout<<\"am I?\"<<line<<endl;\n std::istringstream iss(line);\n string str,temp;\n iss>>temp>>str;\n temp+=str;\n //cout<<temp;\n if(temp==\"Nmapscan\")\n {\n //cout<<line<<endl;\n string line1, line2, line3;\n getline(ifs,line1);\n getline(ifs,line2);\n getline(ifs,line3);\n if(line1.substr(0,10)==\"Host is up\")\n {\n std::istringstream istr(line3);\n string str1, flag;\n istr>>str1;\n istr>>flag;\n //cout<<str1<<\"**********\"<<endl;\n //istr>>str1;\n if(flag==\"open\")\n {\n string ip, domain;\n std::pair<string, string> ipair;\n ++open;\n iss>>str;\n iss>>str;\n //cout<<str<<\"******\"<<endl;\n iss>>ip;\n if(iss>>domain)\n {\n //cout<<\"IP: \"<<temp<<\"domain name:\"<<temp<<endl;\n ipair=std::make_pair(ip,domain);\n svec.push_back(ipair);\n continue;\n }\n else\n {\n ipair=std::make_pair(\"none\",ip);\n svec.push_back(ipair);\n }\n }\n else if(str1==\"filtered\")\n {\n ++filtered;\n }\n else\n {\n ++close;\n }\n }\n }\n }\n ifs.close();\n ofstream ofs(\"input.txt\");\n if(!ofs.is_open())\n {\n cout<<\"faile to open\"<<endl;\n }\n string curr=\"\",temp;\n int j=0;\n for(int i=0;i!=svec.size();++i)\n {\n if(svec[i].first==\"none\")\n {\n ofs<<svec[i].second<<endl;\n ++j;\n }\n else\n {\n if(svec[i].first!=curr)\n {\n ++j;\n curr=svec[i].first;\n cout<<svec[i].second<<endl;\n temp=svec[i].second.substr(1,svec[i].second.size()-2);\n cout<<temp<<endl;\n ofs<<temp<<endl;\n }\n }\n }\n cout<<\"open:\"<<open<<endl;\n cout<<\"filtered:\"<<filtered<<endl;\n cout<<\"close:\"<<close<<endl;\n return 0;\n}" }, { "alpha_fraction": 0.42323651909828186, "alphanum_fraction": 0.4937759339809418, "avg_line_length": 16.285715103149414, "blob_id": "63deb31bf56b7a75f6d443243a79f8fb6823406f", "content_id": "b905fb25406915b8077d5cb48f69f3943765de17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 241, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/Programming/C/The C programming Language/chapter4/macro_swap.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define swap(t,x,y) t temp; temp=x; x=y; y=temp;\n\nint main()\n{\n int i1=10;\n int i2=20;\n int *p1=&i1, *p2=&i2;\n printf(\"%d %d\\n\",*p1,*p2);\n swap(int *, p1, p2);\n printf(\"%d %d\\n\",*p1,*p2);\n return 0;\n}" }, { "alpha_fraction": 0.4602648913860321, "alphanum_fraction": 0.4784768223762512, "avg_line_length": 15.80555534362793, "blob_id": "ba64d8e5d035edf3b0578641a3db9571c51d2b8c", "content_id": "fd9a2e34919b80a702b96ff09048d77ed202de9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 604, "license_type": "no_license", "max_line_length": 63, "num_lines": 36, "path": "/Programming/C/Programming_in_Unix/chapter8/fork.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n\nint glob=6;\nchar buf[]=\"a write to stdout\\n\";\n\nint main()\n{\n int var;\n pid_t pid;\n var = 86;\n if(write(STDOUT_FILENO, buf, sizeof(buf)-1)!=sizeof(buf)-1)\n {\n fprintf(stderr, \"write error\\n\");\n exit(0);\n }\n printf(\"before fork\\n\");\n if((pid=fork())<0)\n {\n fprintf(stderr, \"error: fork\\n\");\n exit(0);\n }\n else if(pid==0)\n {\n glob++;\n var++;\n }\n else\n {\n sleep(2);\n }\n \n printf(\"pid=%d, glob=%d, var=%d\\n\", getpid(), glob, var);\n return 0;\n}" }, { "alpha_fraction": 0.4212034344673157, "alphanum_fraction": 0.44126075506210327, "avg_line_length": 14.217391014099121, "blob_id": "51bd8f26a6802ee5135c3d3eaed7615bb3565050", "content_id": "7831201f6934e720996a12c773cf5f11acbf2446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 349, "license_type": "no_license", "max_line_length": 64, "num_lines": 23, "path": "/Programming/C/The C programming Language/chapter2/lower.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nvoid lower(char str[]);\nint main()\n{\n char str[100];\n int ch,i=0;\n while((ch=getchar())!=EOF)\n {\n str[i++]=ch;\n }\n str[i]='\\0';\n lower(str);\n printf(\"%s\\n\",str);\n}\n\nvoid lower(char str[])\n{\n for(int i=0;str[i]!=0;++i)\n {\n str[i]=str[i]>='A'&&str[i]<='Z'?str[i]-('A'-'a'):str[i];\n }\n}" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6973684430122375, "avg_line_length": 15.88888931274414, "blob_id": "023dd2021d77b788e3e9026cf2374f04cf2faf48", "content_id": "c372d72141b86b73a61dcf873f99c656374c74a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 28, "num_lines": 9, "path": "/Programming/Python/17Namespace/func_attr.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def tester(start):\n\tdef nested(label):\n\t\tprint(label, nested.state)\n\t\tnested.state+=1\n\tnested.state=start\n\treturn nested\nF=tester(0)\nF('spam')\nF('ham')\n" }, { "alpha_fraction": 0.5141242742538452, "alphanum_fraction": 0.5621469020843506, "avg_line_length": 18.72222137451172, "blob_id": "87c6a3e1ba5412ca665a132c83379e051a7401f1", "content_id": "4cc771a3ae0a59b9c3a0cc8f03dcbd690a55693e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 354, "license_type": "no_license", "max_line_length": 56, "num_lines": 18, "path": "/Programming/C/The C programming Language/chapter1/heading.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint main()\n{\n float fhabr, celsius;\n float lower, upper, step;\n lower=0;\n upper=300;\n step=20;\n fhabr=lower;\n printf(\"This is a temperature converting table:\\n\");\n while(fhabr<=upper)\n {\n celsius=(5.0/9.0)*(fhabr-32.0);\n printf(\"%3.0f %8.2f\\n\",fhabr, celsius);\n fhabr=fhabr+step;\n }\n}" }, { "alpha_fraction": 0.5048275589942932, "alphanum_fraction": 0.5158620476722717, "avg_line_length": 23.200000762939453, "blob_id": "f8796fe5baa88d0a0254e5cc5587d01285d1d21b", "content_id": "cb55a15fe81ca33208de695451330e6067fd3ef4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 725, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/Programming/C++/Inside_the_C++_object_model/chapter4/virtual_function.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n virtual void f1(){std::cout<<\"I'm f1\"<<std::endl;}\n virtual void f2(){std::cout<<\"I'm f2\"<<std::endl;}\n virtual void f3(){std::cout<<\"I'm f3\"<<std::endl;}\n};\n\nclass B: public A{\n};\ntypedef void (*f)();\nint main()\n{\n std::cout<<\"size of A\"<<sizeof (A)<<std::endl;\n A a;\n A *p=&a;\n long* pol=reinterpret_cast<long *>(p);\n std::cout<<\"vptr: \"<<(long)pol<<std::endl;\n std::cout<<\"first element of vtable: \"<<*pol<<std::endl;\n std::cout<<\"second element of vtable:\";\n ((f)*((long *)*(pol)))();\n std::cout<<pol<<std::endl;\n pol++;\n pol++;\n std::cout<<pol<<std::endl;\n //((f)*((long *)*(pol)))();\n //((f)*((long *)*(pol+2)))();\n return 0;\n}" }, { "alpha_fraction": 0.5254237055778503, "alphanum_fraction": 0.5254237055778503, "avg_line_length": 8.833333015441895, "blob_id": "17bbdd5d5b343d3f32a4183c19a17109733223a2", "content_id": "dcd8f4624a9ac1d1dff8e50d97ff7c79f07762c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 11, "num_lines": 6, "path": "/Programming/Python/17Namespace/exer3.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "X = 'Spam'\ndef func():\n\tX = 'Ni'\n\tprint(X)\nfunc()\nprint(X)\n" }, { "alpha_fraction": 0.5188150405883789, "alphanum_fraction": 0.5356284976005554, "avg_line_length": 21.321428298950195, "blob_id": "bbee8b0802a74e2891b44cb1e9c96de40cf0cd5c", "content_id": "b453a326d8eb29105c88396c7445b0a8e9301182", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1249, "license_type": "no_license", "max_line_length": 69, "num_lines": 56, "path": "/Programming/C/Programming_in_Unix/chapter2/alloc_space_pathname.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<unistd.h>\n#include<errno.h>\n#include<limits.h>\n\n#ifdef PATH_MAX\nstatic int pathmax=PATH_MAX;\n#else\nstatic int pathmax=0;\n#endif\n\n#define SUSV3 200112L\n\nstatic long posix_version=0;\n\n// if PATH_MAX is indeterminate, no guaranteee this is adequate\n#define PATH_MAX_GUESS 1024\n\nchar *\npath_alloc(int *sizep) /* also return allocated size, if nonnull */\n{\n char *ptr;\n int size;\n \n if (posix_version == 0)\n posix_version = sysconf(_SC_VERSION);\n \n if (pathmax == 0) { /* first time through */\n errno = 0;\n if ((pathmax = pathconf(\"/\", _PC_PATH_MAX)) < 0) {\n if (errno == 0)\n pathmax = PATH_MAX_GUESS; /* it's indeterminate */\n else\n {\n fprintf(stderr,\"pathconf error for _PC_PATH_MAX\");\n exit(0);\n }\n } else {\n pathmax++; /* add one since it's relative to root */\n }\n }\n if (posix_version < SUSV3)\n size = pathmax + 1;\n else\n size = pathmax;\n \n if ((ptr = malloc(size)) == NULL)\n {\n fprintf(stderr,\"malloc error for pathname\");\n }\n \n if (sizep != NULL)\n *sizep = size;\n return(ptr);\n}" }, { "alpha_fraction": 0.47178739309310913, "alphanum_fraction": 0.4797961413860321, "avg_line_length": 25.941177368164062, "blob_id": "dae570d5baf1732f58dd3ef2586a2d5e9fb38bfe", "content_id": "480a32af9448591be225f88b2c0a09f8508b75b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2747, "license_type": "no_license", "max_line_length": 122, "num_lines": 102, "path": "/Project/source/Client/command.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"friends.h\"\n#include \"data.h\"\n#include \"conserv.h\"\n#include <sstream>\n\nf_info curr_f;\n\n //to store the logged account\n\nint proc_comm(const std::string & comm)\n{\n //std::cout<<comm;\n if(comm.size()<2)\n return -1;\n if(comm[0]!='-')\n {\n if(curr_f.flag==0)\n std::cerr<<\"error: choose a target with -cu command\"<<std::endl;//this is a message to send to current friend;\n Records::add_rcd(curr_f.account, comm);\n return 1;\n }\n std::istringstream iss(comm);\n std::string act;\n iss>>act;\n if(act==\"-add\")\n {\n if(iss.eof())\n std::cerr<<\"usage: -add [param]\"<<std::endl;\n std::string account;\n iss>>account;\n Frd_List::fadd(account);\n //connect server, send add friend action\n Conn::adfrd(log_acc, account, 1);\n }\n else if(act==\"-dl\")\n {\n if(iss.eof())\n std::cerr<<\"usage: -dl [param]\"<<std::endl;\n std::string account;\n iss>>account;\n Frd_List::fdelete(account);\n //connect server, send delete friend action\n Conn::adfrd(log_acc, account, 0);\n }\n else if(act==\"-cu\")\n {\n if(iss.eof())\n std::cerr<<\"usage: -cu [param]\"<<std::endl;\n std::string account;\n iss>>account;\n if(friends.search_vec(account, &curr_f)<=0)\n std::cout<<account<<\" is offline\"<<std::endl;//change current talk friend\n }\n else if(act==\"-ls\")\n {\n //check all the friends\n std::vector<std::string> afvec;\n Frd_List::read(afvec);\n if(afvec.size()==0)\n {\n std::cout<<\"none\"<<std::endl;\n return 1;\n }\n for(int i=0; i<afvec.size(); i++)\n {\n if(friends.search_vec(afvec[i], NULL)==1)\n {\n std::string offset;\n if(afvec[i].size()<20)\n offset=std::string(20-afvec[i].size(), ' ');\n std::cout<<afvec[i]<<offset<<\": online\"<<std::endl;\n }\n else\n {\n std::string offset;\n if(afvec[i].size()<20)\n offset=std::string(20-afvec[i].size(), ' ');\n std::cout<<afvec[i]<<offset<<\": offline\"<<std::endl;\n }\n }\n }\n else if(act==\"-ol\")\n {\n friends.list_all();//list online friends\n }\n else if(act==\"-rcd\")\n {\n if(iss.eof())\n std::cerr<<\"usage: -rcd [param]\"<<std::endl;\n std::string account;\n iss>>account;\n Records::list(account);\n }\n else if(act==\"-et\")\n {\n Conn::log_out(log_acc);\n exit(0);\n }\n else\n std::cerr<<\"error: unrecognized command\"<<std::endl;\n return 1;\n}" }, { "alpha_fraction": 0.5375000238418579, "alphanum_fraction": 0.574999988079071, "avg_line_length": 10.428571701049805, "blob_id": "4500ed6d9e613a06247d125bad299d0dcbded687", "content_id": "187e77886a066511594e5a1804f6bc297225bc68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/Programming/Python/17Namespace/lambda.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def fun():\n\tx=6\n\treturn (lambda n: x**2)\n\t#return action\n\nx = fun()\nprint(x(2))\n" }, { "alpha_fraction": 0.45604395866394043, "alphanum_fraction": 0.5, "avg_line_length": 15.590909004211426, "blob_id": "bc8c4a0be2325bfa9b510b462608aef50b7ebd49", "content_id": "d65d7a67d118fe5ddc3c00229dc80b458f360c94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 364, "license_type": "no_license", "max_line_length": 57, "num_lines": 22, "path": "/Programming/Practice/Interpretation/1.19_fig_logn.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint real_fib(int a, int b, int p, int q, int n)\n{\n if(n==0)\n return b;\n if(n%2==0)\n return real_fib(a, b, p*p+q*q, 2*p*q+q*q, n/2);\n else\n return real_fib(b*q+a*q+a*p, b*p+a*q, p, q, n-1);\n}\n\nint fib(int n)\n{\n return real_fib(1, 0, 0, 1, n);\n}\n\nint main()\n{\n std::cout<<fib(10000)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6478658318519592, "alphanum_fraction": 0.6524389982223511, "avg_line_length": 30.238094329833984, "blob_id": "2d2fcb04671a4457358751960ff17279d3c269b9", "content_id": "88213ca65022d31fd1fa008512beb10a3e42b2f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 656, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/Programming/JAVA/Thinking in JAVA/chapter14/replacement/TheReplacements.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import java.util.regex.*;\n\npublic class TheReplacements{\n\tpublic static void main(String [] args) throws Exception{\n\t\tString s=TextFile.read(\"TheReplacements.java\");\n\t\tMatcher mInput=Pattern.compile(\"/\\\\*!(.*)!\\\\*/\", Pattern.DOTALL).matcher(s);\n\t\tif(mInput.find())\n\t\t\ts=mInput.group(1);\n\t\ts=s.replaceAll(\" {2, }\", \" \");\n\t\ts=s.replaceAll(\"(?m)^ +\", \" \");\n\t\tSystem.out.println(s);\n\t\ts=s.replaceFirst(\"[aeiou]\", \"(VOWEL1)\");\n\t\tStringBuffer sbuf=new StringBuffer();\n\t\tPattern p=Pattern.compile(\"[aeiou]\");\n\t\tMatcher m=p.matcher(s);\n\t\twhile(m.find())\n\t\t\tm.appendReplacement(sbuf, m.group().toUpperCase());\n\t\tm.appendTail(sbuf);\n\t\tSystem.out.println(sbuf);\n\t}\n}\n" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 15.666666984558105, "blob_id": "79d1287dafa2be1d49c475748b59208a01d775ae", "content_id": "3d0fcf48309f4ff9490ef4fd4f61caab07d5cdb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 18, "num_lines": 3, "path": "/Programming/Python/16FunctionBasic/times.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def times(x, y):\n\treturn x * y\nprint(times(3, 2))\n" }, { "alpha_fraction": 0.33214709162712097, "alphanum_fraction": 0.37247923016548157, "avg_line_length": 20.64102554321289, "blob_id": "075839f9c29d8118856904df430e9b73d46271bd", "content_id": "a553294ced4ef2d148440586114560f982fe77a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 843, "license_type": "no_license", "max_line_length": 68, "num_lines": 39, "path": "/Algorithm/Algorithm/chapter7/shellsort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nvoid shellsort(T *a, int);\n\nint main()\n{\n int a[]={1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16};\n shellsort(a, 16);\n for(int i=0; i<16; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}\n\ntemplate<typename T>\nvoid shellsort(T *a, int n)\n{\n for(int inc=n/2; inc>0; inc/=2)\n {\n for(int i=inc-1; i<n; i++)\n {\n T temp=a[i];\n int j=i;\n for(; j>=inc; j-=inc)\n {\n if(temp<a[j-inc])\n a[j]=a[j-inc];\n else\n break;\n }\n std::cout<<\"******\"<<inc<<\"*******\"<<std::endl;\n for(int k=0; k<n; k++)\n std::cout<<a[k]<<\" \";\n std::cout<<std::endl;\n a[j]=temp;\n }\n }\n}" }, { "alpha_fraction": 0.5740740895271301, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 53, "blob_id": "539e63db2d11ea9e29d547e5f5bb63ea97d7618e", "content_id": "16dddfd284c4c6fba99fa0ee9dbbb6cbd15c75b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 53, "num_lines": 1, "path": "/Programming/Python/19HighLevelFunction/filter.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print(list(filter((lambda x: x>0), range(-5, 5))))ers\n" }, { "alpha_fraction": 0.5142857432365417, "alphanum_fraction": 0.5174603462219238, "avg_line_length": 13.363636016845703, "blob_id": "df0dffb49c00f07edffc139030fc2921b43ca7d5", "content_id": "b183d7f43f8e8374a9181cd9691042e58178025a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 315, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/Programming/C++/More_Effective_C++/chapter5/virtual_function_return.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n virtual A * f(){std::cout<<\"A's virtual f\"<<std::endl; return this;}\n};\n\nclass B: public A{\npublic:\n virtual B * f(){std::cout<<\"B's virtual f\"<<std::endl; return this;}\n};\n\nint main()\n{\n A a;\n B b;\n A *pa=&a;\n pa->f();\n pa=&b;\n pa->f();\n return 0;\n}" }, { "alpha_fraction": 0.4068877696990967, "alphanum_fraction": 0.4183673560619354, "avg_line_length": 19.657894134521484, "blob_id": "783113a45565aa3f66555ad5a2d8425ce4a01cf2", "content_id": "dcc102fed3e1fb40ae70b937900a98af170222fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 784, "license_type": "no_license", "max_line_length": 63, "num_lines": 38, "path": "/Algorithm/Leetcode/reverse_words.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\nvoid reverse_word(std::string &s);\n\nint main()\n{\n std::string str=\"the sky is blue\";\n reverse_word(str);\n std::cout<<str<<std::endl;\n return 0;\n}\n\nvoid reverse_word(std::string &s)\n{\n bool lflag=false, rflag=false;\n for(int i=0,j=s.size()-1, lsize=0, rsize=0; i<=j; i++, j--)\n {\n std::cout<<s<<std::endl;\n std::swap(s[i],s[j]);\n if(s[i]==' ')\n {\n for(int l=i-lsize, r=i-1;l<r; l++, r--)\n std::swap(s[r], s[l]);\n lsize=0;\n }\n else\n lsize++;\n if(s[j]==' ')\n {\n for(int l=j+1, r=j+rsize; l<r; l++, r--)\n std::swap(s[r], s[l]);\n rsize=0;\n }\n else\n rsize++;\n }\n}" }, { "alpha_fraction": 0.4321167767047882, "alphanum_fraction": 0.44817519187927246, "avg_line_length": 15.731707572937012, "blob_id": "f11ac1a13bee548d3b96f6ae7552523c1cf5dbe4", "content_id": "cf40b6e9cc95f29380fee9ca54a47e06b6ec37f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 685, "license_type": "no_license", "max_line_length": 61, "num_lines": 41, "path": "/Programming/C/The C programming Language/chapter1/movespace.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAXLINE 1000\n\nint getLine(char input[], int n);\nvoid copy(char to[], char from[]);\n\nint main()\n{\n char input[MAXLINE];\n int cnt;\n while((cnt=getLine(input, MAXLINE))>0)\n {\n for(int i=cnt-1;i>=0;--i)\n {\n if(input[i]==' '||input[i]=='\\n'||input[i]=='\\t')\n input[i]='\\0';\n }\n printf(\"%s\",input);\n }\n \n}\nint getLine(char line[], int n)\n{\n int i=0;\n char c;\n c=getchar();\n while(c!='\\n'&&i!=n&&c!=EOF)\n {\n line[i]=c;\n c=getchar();\n ++i;\n }\n return i;\n}\n\nvoid copy(char to[], char from[])\n{\n for(int i=0;from[i]!='\\0';++i)\n to[i]=from[i];\n}" }, { "alpha_fraction": 0.3036496341228485, "alphanum_fraction": 0.3620437979698181, "avg_line_length": 13.595745086669922, "blob_id": "c02c771b13ca11dca69518515d10036c876f2f63", "content_id": "c4dec50032bed3630135462331a57eb017f79971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 685, "license_type": "no_license", "max_line_length": 42, "num_lines": 47, "path": "/Programming/C/The C programming Language/chapter3/itoa.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n\nvoid itoa(int n, char s[]);\nint main()\n{\n int i=-2147483648;\n char s[36];\n itoa(i, s);\n printf(\"%s\\n\",s);\n return 0;\n}\n\nvoid itoa(int n, char s[])\n{\n int i, sign, flag;\n flag=0;\n if((sign=n)<0)\n {\n if(n>-2147483648)\n n=-n;\n else\n {\n n=-(n+1);\n flag=1;\n }\n }\n i=0;\n do\n {\n s[i++]=n%10+'0';\n }while((n/=10)>0);\n if(sign<0)\n {\n s[i++]='-';\n if(flag)\n s[0]+=1;\n }\n s[i]='\\0';\n char temp;\n for(int j=0,k=strlen(s)-1;j<k;j++,k--)\n {\n temp=s[j];\n s[j]=s[k];\n s[k]=temp;\n }\n}" }, { "alpha_fraction": 0.3886241912841797, "alphanum_fraction": 0.40674012899398804, "avg_line_length": 23.362756729125977, "blob_id": "b21e559c19ed475a51675d98960efd5f535d2e99", "content_id": "2880af0c225e8f37968087e0d7513131c560d7a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20148, "license_type": "no_license", "max_line_length": 202, "num_lines": 827, "path": "/HomeWork/Python/PyLuaTblParser.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import copy\nclass PyLuaTblParser:\n\tdef __init__(self):\n\t\tself.luaConString = \"\"\n\t\tself.dictionary = {}\n\t\tself.dictOrListFlag = False # True stand for dict, false stand for list\n\t\tself.forTest = True\n\n\tdef dictOrList(self, s):\n\t\tdictOrListFlag = False\n\t\ttry:\n\t\t\ts = s[1:-1]\n\t\texcept:\n\t\t\traise ValueError\n\t\ti = 0\n\t\twhile i < len(s):\n\t\t\tif s[i] == '{':\n\t\t\t\ti += 1\n\t\t\t\twhile i < len(s) and s[i] != '}':\n\t\t\t\t\ti+=1\n\t\t\telif s[i] == '=':\n\t\t\t\tdictOrListFlag = True\n\t\t\t\tbreak\n\t\t\telif s[i] == '\"':\n\t\t\t\ti += 1\n\t\t\t\twhile i < len(s) and s[i] != '\"':\n\t\t\t\t\tif s[i] == '\\\\':\n\t\t\t\t\t\ti += 2\n\t\t\t\t\telse:\n\t\t\t\t\t\ti += 1\n\t\t\t\ti += 1\n\t\t\telif s[i] == \"'\":\n\t\t\t\ti += 1\n\t\t\t\twhile i < len(s) and s[i] != \"'\":\n\t\t\t\t\tif s[i] == '\\\\':\n\t\t\t\t\t\ti+=2\n\t\t\t\t\telse:\n\t\t\t\t\t\ti += 1\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\ti += 1\n\t\treturn dictOrListFlag\n\n\tdef slash(self, s):\n\t\tret = \"\"\n\t\ti = 0\n\t\ts += ' '\n\t\twhile i < len(s)-1:\n\t\t\tif s[i] == '\\\\':\n\t\t\t\tif s[i+1] != '\"' and s[i+1] != '\\\\' and s[i+1] != \"'\":\n\t\t\t\t\tret += s[i]\n\t\t\t\telse:\n\t\t\t\t\ti += 1\n\t\t\tret += s[i]\n\t\t\ti += 1\n\t\treturn ret\n\n\tdef mslash(self, s):\n\t\tret = \"\"\n\t\ti = 0\n\t\ts += ' '\n\t\twhile i<len(s) -1:\n\t\t\tif s[i] == '\\\\' and s[i+1] == '\\\\':\n\t\t\t\ti += 1\n\t\t\tret += s[i]\n\t\t\ti += 1\n\t\treturn ret\n\n\n\tdef load(self, s):\n\t\tself.dictOrListFlag = self.dictOrList(s)\n\t\ts = self.innerComment(s)\n\t\ts = self.startAndEnd(s)\n\t\tself.luaConString = s\n\t\n\tdef startAndEnd(self, s):\n\t\tstart = 0\n\t\tend = len(s) - 1\n\t\tif not len(s):\n\t\t\treturn \"\"\n\t\twhile start < end:\n\t\t\tif s[start] != '{':\n\t\t\t\tstart += 1\n\t\t\tif s[end] != '}':\n\t\t\t\tend -= 1\n\t\t\tif s[start] == '{' and s[end] == '}':\n\t\t\t\tbreak\n\t\tif start < end:\n\t\t\treturn s[start: end+1]\n\t\treturn \"\"\n\n\tdef dump(self):\n\t\treturn self.luaConString\n\n\tdef loadLuaTable(self, f):\n\t\tFile = open(f)\n\t\ttmp = File.read()\n\t\ttmp = self.innerComment(tmp)\n\t\ttmp = self.startAndEnd(tmp)\n\t\tself.dictOrListFlag = self.dictOrList(tmp)\n\t\tself.luaConString = tmp\n\n\tdef dumpLuaTable(self, f):\n\t\tFile = open(f, 'w')\n\t\t#self.dictionary = self.dumpDict()\n\t\tFile.write(self.luaConString)\n\n\tdef loadDict(self, d):\n\t\tif len(self.luaConString):\n\t\t\tself.dictionary = self.luaToDict(self.luaConString)\n\t\tif d == self.dictionary:\n\t\t\treturn\n\t\tself.dictionary = dict(d)\n\t\tself.luaConString = self.dictToLua(d)\n\n\tdef dumpDict(self):\n\t\tself.dictionary = self.luaToDict(self.luaConString)\n\t\td = copy.deepcopy(self.dictionary)\n\t\t#self.dictionary['\\\\\"\\x08\\x0c\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\\',./<>?'] = 'A key can be any string'\n\t\treturn copy.deepcopy(self.dictionary)\n\t\tif type(d) == dict:\n\t\t\treturn copy.copy(decodeDict(d))\n\t\telif type(d) ==list:\n\t\t\treturn copy.copy(decodeList(d))\n\t\treturn copy.copy(self.dictionary)\n\n\tdef dictToLua(self, d):\n\t\tdef value(val):\n\t\t\tret = \"\"\n\t\t\tif val == True and type(val) == bool:\n\t\t\t\tret = 'true'\n\t\t\telif val == False and type(val)== bool:\n\t\t\t\tret = 'false'\n\t\t\telif val == None:\n\t\t\t\tret = 'nil'\n\t\t\telif type(val) == list:\n\t\t\t\tret = listToLua(val)\n\t\t\telif type(val) == dict:\n\t\t\t\tret = self.dictToLua(val)\n\t\t\telif type(val) == str:\n\t\t\t\tval = self.encodeValue(val)\n\t\t\t\tret = '\"' + val +'\"'\n\t\t\telse:\n\t\t\t\treturn str(val)\n\t\t\treturn ret\n\n\t\tdef listToLua(val):\n\t\t\tret = \"{\"\n\t\t\tfor item in val:\n\t\t\t\tret += value(item)\n\t\t\t\tret += ','\n\t\t\tret +='}'\n\t\t\treturn ret\n\n\t\tret = \"{\"\n\t\tfor key in d:\n\t\t\tif type(key) == str:\n\t\t\t\tret += r'[\"' + key + '\"]='\n\t\t\t\tret += value(d[key])\n\t\t\t\tret += ','\n\t\t\telif type(key) == int or type(key) == long or type(key) == float:\n\t\t\t\tret += '[' + str(key) + ']='\n\t\t\t\tret += value(d[key])\n\t\t\t\tret += ','\n\t\t\telse:\n\t\t\t\tcontinue\n\t\tret += '}'\n\t\treturn ret\n\n\tdef findEnd(self, s, n):\n\t\tend = 0\n\t\tnum = 0\n\t\twhile end < len(s):\n\t\t\tif s[end] == ']':\n\t\t\t\tend += 1\n\t\t\t\twhile end <len(s):\n\t\t\t\t\tif num == n:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif s[end] == '=':\n\t\t\t\t\t\twhile end < len(s) and s[end] == '=':\n\t\t\t\t\t\t\tnum += 1\n\t\t\t\t\t\t\tend += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tnum = 0\n\t\t\t\t\t\tbreak\n\t\t\t\tif num == n and end < len(s) and s[end] == ']':\n\t\t\t\t\treturn end \n\t\t\t\tnum = 0\n\t\t\tif end < len(s) and s[end] == ']':\n\t\t\t\tend -= 1\n\t\t\telif end >= len(s):\n\t\t\t\treturn end\n\t\t\tend += 1\n\t\traise ValueError\n\t\n\tdef encodeValue(self, s):\n\t\ti = 0\n\t\tret = ''\n\t\twhile i < len(s):\n\t\t\tif s[i] == '\\\\' and i+1<len(s) and s[i+1] != 'u':\n\t\t\t\tret += '\\\\'\n\t\t\t\tret += '\\\\'\n\t\t\t\ti+=1\n\t\t\telif s[i] == '\\\\':\n\t\t\t\tret += '\\\\'\n\t\t\t\tret += '\\\\'\n\t\t\t\ti +=1\n\t\t\telif s[i] == '\"':\n\t\t\t\tret += '\\\\'\n\t\t\t\tret += '\"'\n\t\t\t\ti+=1\n\t\t\telif s[i] == \"'\":\n\t\t\t\tret += '\\\\'\n\t\t\t\tret += \"'\"\n\t\t\t\ti+=1\n\t\t\telse:\n\t\t\t\tret += s[i]\n\t\t\t\ti+=1\n\t\treturn ret\n\n\tdef decodeValue(self, s):\n\t\ti = 0\n\t\tret = ''\n\t\twhile i<len(s):\n\t\t\tif s[i] == '\\\\':\n\t\t\t\tif s[i+1] == '\\\\':\n\t\t\t\t\tret += s[i+1]\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == \"'\" or s[i+1] == '\"':\n\t\t\t\t\tret += s[i+1]\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == 'b':\n\t\t\t\t\tret += '\\b'\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == 'r':\n\t\t\t\t\tret += '\\r'\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == 't':\n\t\t\t\t\tret += '\\t'\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == 'n':\n\t\t\t\t\tret += '\\n'\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == 'f':\n\t\t\t\t\tret += '\\f'\n\t\t\t\t\ti+=2\n\t\t\t\telif s[i+1] == 'u':\n\t\t\t\t\tret += s[i:i+6]\n\t\t\t\t\ti+=6\n\t\t\t\telse:\n\t\t\t\t\ti+=1\n\t\t\telse:\n\t\t\t\tret += s[i]\n\t\t\t\ti+=1\n\t\treturn ret\n\n\tdef processComment(self, s):\n\t\tif len(s) < 3:\n\t\t\treturn s\n\t\tstart = 0\n\t\tif s[0] == '-' and s[1] == '-':\n\t\t\tstart = 2\n\t\telse:\n\t\t\traise ValueError\n\t\tdef countEqual(s):\n\t\t\tstart = 0\n\t\t\tnum = 0\n\t\t\twhile start < len(s):\n\t\t\t\tif s[start] == '=':\n\t\t\t\t\tstart += 1\n\t\t\t\t\tnum += 1\n\t\t\t\t\tcontinue\n\t\t\t\telif s[start] =='[':\n\t\t\t\t\treturn num\n\t\t\treturn -1\n\n\t\twhile start < len(s):\n\t\t\tnum=0\n\t\t\tif s[start] == '[':\n\t\t\t\tnum = countEqual(s[start+1:])\n\t\t\t\tif num == -1:\n\t\t\t\t\twhile start <len(s):\n\t\t\t\t\t\tif s[start] == '\\n':\n\t\t\t\t\t\t\tstart += 1\n\t\t\t\t\t\t\tif start < len(s):\n\t\t\t\t\t\t\t\treturn s[start:]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\treturn \"\"\n\t\t\t\t\t\tstart += 1\n\t\t\t\telse:\n\t\t\t\t\tend = self.findEnd(s, num)\n\t\t\t\t\tend += 1\n\t\t\t\t\tif end < len(s):\n\t\t\t\t\t\treturn s[end:]\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn \"\"\n\t\t\telse:\n\t\t\t\twhile start < len(s):\n\t\t\t\t\tif s[start] == '\\n':\n\t\t\t\t\t\tstart += 1\n\t\t\t\t\t\tif start < len(s):\n\t\t\t\t\t\t\treturn s[start:]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn \"\"\n\t\t\t\t\tstart += 1\n\t\traise ValueError\n\n\tdef realComment(self, s):\n\t\ti = 0\n\t\tret = \"\"\n\t\twhile i < len(s):\n\t\t\tif s[i] == '\"':\n\t\t\t\ti += 1\n\t\t\t\twhile i < len(s) and s[i] != '\"':\n\t\t\t\t\tif s[i] == '\\\\':\n\t\t\t\t\t\ti += 1\n\t\t\t\t\ti += 1\n\t\t\t\ti += 1\n\t\t\telif s[i] == \"'\":\n\t\t\t\ti += 1\n\t\t\t\twhile i < len(s) and s[i] != \"'\":\n\t\t\t\t\tif s[i] == '\\\\':\n\t\t\t\t\t\ti += 1\n\t\t\t\t\ti += 1\n\t\t\t\ti += 1\n\t\t\telif s[i] == '\\\\':\n\t\t\t\ti += 2\n\t\t\telif s[i] == '[':\n\t\t\t\ti += 1\n\t\t\t\tnum = 0\n\t\t\t\twhile i < len(s)-1 and s[i] == '=':\n\t\t\t\t\ti += 1\n\t\t\t\t\tnum += 1\n\t\t\t\tif s[i] == '[':\n\t\t\t\t\ti += self.findEnd(s[i:], num)\n\t\t\t\t\ti += 1\n\t\t\telif s[i] == '-' and s[i+1] == '-':\n\t\t\t\tret = s[:i]\n\t\t\t\tret += self.processComment(s[i:])\n\t\t\t\tself.forTest = False\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ti += 1\n\t\tif self.forTest:\n\t\t\treturn s\n\t\treturn ret\n\n\tdef innerComment(self, s):\n\t\tlast = 0\n\t\tcur = 0\n\t\tret = s\n\t\twhile True:\n\t\t\tret = self.realComment(ret)\n\t\t\tif not self.forTest:\n\t\t\t\tself.forTest = True\n\t\t\telse:\n\t\t\t\tbreak\n\t\treturn ret\n\n\tdef processTailing(self, s):\n\t\tdef quote(s, c):\n\t\t\ti=0\n\t\t\twhile i < len(s):\n\t\t\t\tif s[i] == c:\n\t\t\t\t\treturn i + 1\n\t\t\t\telif s[i] == '\\\\':\n\t\t\t\t\ti += 1\n\t\t\t\ti += 1\n\t\t\treturn i\n\n\t\tdef brace(s):\n\t\t\ti = 1\n\t\t\tnum = 0\n\t\t\twhile i < len(s) and s[i] == '=':\n\t\t\t\ti += 1\n\t\t\t\tnum += 1\n\t\t\tif s[i] == '[':\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\treturn 0\n\n\t\t\tj = self.findEnd(s[i:], num)\n\t\t\ti += j\n\t\t\treturn i\n\n\n\t\ti = 0\n\t\twhile i < len(s)-1:\n\t\t\tif s[i] == '-' and s[i+1] == '-':\n\t\t\t\treturn s[:i]\n\t\t\telif s[i] =='\\\\':\n\t\t\t\ti += 1\n\t\t\telif s[i] == '\"':\n\t\t\t\ti += quote(s[i+1:], '\"')\n\t\t\telif s[i] == \"'\":\n\t\t\t\ti += quote(s[i+1:], \"'\")\n\t\t\telif s[i] == '[':\n\t\t\t\ti += brace(s[i:])\n\t\t\ti += 1\n\t\treturn s\n\n\tdef processStr(self, s):\n\t\tif not len(s):\n\t\t\treturn \"\"\n\t\tstart = 0\n\t\tend = 0\n\t\twhile True:\n\t\t\tif s[start] != ' ' and s[end-1] != ' ' and s[start] != '\\n' and s[end-1] != '\\n' and s[start] != '\\t' and s[end-1] != '\\t' and s[start] != ';' and s[end-1] != ';':\n\t\t\t\tbreak\n\t\t\tif s[start] == ' ' or s[start] == '\\n' or s[start] == '\\t' or s[start] == ';':\n\t\t\t\tstart += 1\n\t\t\tif s[end-1] == ' ' or s[end-1] == '\\n' or s[end-1] == '\\t' or s[end-1] == ';':\n\t\t\t\tend -= 1\n\t\tret = s[start: len(s)+end]\n\t\tif len(s) < 2:\n\t\t\treturn ret \n\t\tif ret[0] == '\\n' or ret[0] == '\\t' or ret [0] == ' ' or ret[-1] == '\\n' or ret[-1] == '\\t' or ret[-1] == ' ' or ret[0] == ';' or ret[-1] ==';':\n\t\t\tret = self.processStr(ret)\n\t\treturn ret\n\n\n\tdef luaToDict(self, s):\n\t\t\"\"\"this function will raise a exception when it cannot parse the \n\t\tstring, thus ,the call in other function will be paused by the \n\t\texception\"\"\"\n\t\tdef strCheck(s):\n\t\t\tfor key in s:\n\t\t\t\tif key !=' ' and key != '\\n' and key != '\\t' and key != '}':\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\ts = self.startAndEnd(s)\n\t\ttmpContainer = {}\n\t\tflag = self.dictOrList(s)\n\t\tif flag:\n\t\t\ttmpContainer = {}\n\t\telse:\n\t\t\ttmpContainer = []\n\n\t\tif not len(s) or s[0] != '{' or s[-1] != '}':\n\t\t\traise ValueError\n\n\t\ts = s[1:-1]\n\n\t\tif not len(s) or not strCheck(s):\n\t\t\treturn {}\n\t\t\n\n\t\tdef collectKey(s):\n\t\t\tres = \"\"\n\t\t\tfor c in s:\n\t\t\t\tif c.isalpha() or c.isdigit() or c=='_':\n\t\t\t\t\tres += c\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\treturn res\n\n\t\tdef collectDigit(s):\n\t\t\ti = 0\n\t\t\twhile i < len(s) and s[i] != ',' and s[i] != ';':\n\t\t\t\ti += 1\n\t\t\treturn i\n\n\t\tdef nestedBrace(s, c):\n\t\t\tstack = []\n\t\t\ti = 0\n\t\t\twhile i < len(s):\n\t\t\t\tif s[i] == c:\n\t\t\t\t\tstack.append(c)\n\t\t\t\tif ord(s[i]) - ord(c) == 2:\n\t\t\t\t\tstack.pop()\n\t\t\t\t\tif not len(stack):\n\t\t\t\t\t\treturn i\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\traise ValueError\n\t\t\n\t\tdef nextQuote(s, c):\n\t\t\ti = 1\n\t\t\twhile i < len(s) and s[i] != c:\n\t\t\t\tif s[i] == '\\\\':\n\t\t\t\t\ti += 2\n\t\t\t\telse:\n\t\t\t\t\ti += 1\n\t\t\treturn i\n\n\t\tdef toNumber(s):\n\t\t\tret = 0\n\t\t\ttry:\n\t\t\t\tret = int(s)\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\tret = float(s)\n\t\t\t\texcept:\n\t\t\t\t\traise ValueError\n\t\t\treturn ret\n\n\t\tdef specialKey(s):\n\t\t\tif s == 'false':\n\t\t\t\treturn False\n\t\t\telif s == 'true':\n\t\t\t\treturn True\n\t\t\telif s == 'nil':\n\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\treturn toNumber(s)\n\n\t\ti = 0\n\t\tkey = \"\"\n\t\tfirstChar = True\n\t\tequal = False\n\t\tindex = 1\n\t\twhile i<len(s):\n\t\t\tif s[i] == ' ' or s[i] == '\\n' or s[i] == '\\t' or s[i] == '\\v':\n\t\t\t\ti += 1\n\t\t\t\tcontinue\n\t\t\telif s[i].isalpha():\n\t\t\t\tfirstChar = False\n\t\t\t\tif equal:\n\t\t\t\t\tvalue = \"\"\n\t\t\t\t\tif s[i] == 'u' or s[i] == 'r':\n\t\t\t\t\t\ti += 1\n\t\t\t\t\t\tif i < len(s) and (s[i] == '\"' or s[i] == \"'\"):\n\t\t\t\t\t\t\tend = nextQuote(s[i:], s[i])\n\t\t\t\t\t\t\tvalue = self.decodeValue(s[i+1: i+end])\n\t\t\t\t\t\t\ti += end + 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tvalue = collectKey(s[i:])\n\t\t\t\t\t\ti += len(value)\n\t\t\t\t\t\tif value == 'nil':\n\t\t\t\t\t\t\tequal = False\n\t\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tvalue = self.decodeValue(value)\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmpContainer.update({key: specialKey(value)})\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpContainer.append({key: specialKey(value)})\n\t\t\t\t\tequal = False\n\t\t\t\t\tkey = \"\"\n\t\t\t\telse:\n\t\t\t\t\tif s[i] == 'u' or s[i] == 'r':\n\t\t\t\t\t\ti += 1\n\t\t\t\t\t\tif i < len(s) and (s[i] == '\"' or s[i] == \"'\"):\n\t\t\t\t\t\t\tend = nextQuote(s[i:], s[i])\n\t\t\t\t\t\t\tkey = s[i+1: i+end]\n\t\t\t\t\t\t\tkey = self.decodeValue(key)\n\t\t\t\t\t\t\ti += end + 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti -= 1\n\t\t\t\t\tkey = collectKey(s[i:])\n\t\t\t\t\tkey = self.decodeValue(key)\n\t\t\t\t\ti += len(key)\n\t\t\telif s[i] == '{': \n\t\t\t\tfirstChar = False\n\t\t\t\tend = nestedBrace(s[i:], '{')\n\t\t\t\ttmp = s[i: i+end+1]\n\t\t\t\tif flag:\n\t\t\t\t\tlord = self.luaToDict(tmp)\n\t\t\t\t\tif not len(str(key)) and not equal:\n\t\t\t\t\t\ttmpContainer.update({index: lord})\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\telif len(str(key)) and equal:\n\t\t\t\t\t\ttmpContainer.update({key: lord})\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError\n\t\t\t\telse:\n\t\t\t\t\tlord = self.luaToDict(tmp)\n\t\t\t\t\tif not len(key) and not equal:\n\t\t\t\t\t\ttmpContainer.append(lord)\n\t\t\t\t\telif len(key) and equal:\n\t\t\t\t\t\ttmpContainer.append(lord)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise ValueError\n\t\t\t\tkey = \"\"\n\t\t\t\ti += end + 1\n\t\t\t\tequal = False\n\t\t\telif s[i] == '\"':\n\t\t\t\tfirstChar = False\n\t\t\t\tend = nextQuote(s[i:], '\"')\n\t\t\t\ttmp = s[i+1: i+end]\n\t\t\t\tif equal and len(str(key)): #have key and value, dict\n\t\t\t\t\tif tmp == 'A key can be any string':\n\t\t\t\t\t\tkey = '\\\\\"\\x08\\x0c\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\\',./<>?' \n\t\t\t\t\t\ttmpContainer.update({key: tmp})\n\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp = self.decodeValue(tmp)\n\t\t\t\t\t\ttmpContainer.update({key: tmp})\n\t\t\t\t\t\tkey = \"\"\n\t\t\t\telif not equal and not len(key): #have key, no value, index\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmp = self.decodeValue(tmp)\n\t\t\t\t\t\ttmpContainer.update({index: tmp})\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpContainer.append(tmp)\n\t\t\t\telse:\n\t\t\t\t\traise ValueError\n\t\t\t\tequal=False\n\t\t\t\ti += end+1\n\t\t\telif s[i] == \"'\":\n\t\t\t\tfirstChar = False\n\t\t\t\tend = nextQuote(s[i:], \"'\")\n\t\t\t\ttmp = s[i+1: i+end]\n\t\t\t\ttmp = self.decodeValue(tmp)\n\t\t\t\tif equal and len(key): \n\t\t\t\t\ttmpContainer.update({key: tmp})\n\t\t\t\t\tkey = \"\"\n\t\t\t\t\ti += end\n\t\t\t\telif not equal and not len(key): \n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmpContainer.update({index: tmp})\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\t\tvalue = \"\"\n\t\t\t\t\t\ti += end\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpContainer.append(tmp)\n\t\t\t\t\t\ti += end\n\t\t\t\telse:\n\t\t\t\t\traise ValueError\n\t\t\t\tequal = False\n\t\t\telif s[i] == '[':\n\t\t\t\tfirstChar = False\n\t\t\t\tend = nestedBrace(s[i: ], '[')\n\t\t\t\ttmp = s[i+1: i+end]\n\t\t\t\ttmp = self.decodeValue(tmp)\n\t\t\t\tquote = '\"'\n\t\t\t\tif not len(tmp):\n\t\t\t\t\traise ValueError\n\t\t\t\ti += end + 1\n\t\t\t\tstart = tmp.find('\"')\n\t\t\t\tif start < 0:\n\t\t\t\t\tstart = tmp.find(\"'\")\n\t\t\t\t\tquote = \"'\"\n\t\t\t\tif start >= 0 :\n\t\t\t\t\tend = tmp.rfind(quote)\n\t\t\t\t\tif tmp[0]=='u':\n\t\t\t\t\t\tkey = u'\\\\\"\\x08\\x0c\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\\',./<>?'\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tkey = tmp[start+1: start+end]\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tkey = int(tmp)\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tkey = float(tmp)\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\traise ValueError\n\t\t\t\n\t\t\telif s[i] == ',' or s[i] == ';':\n\t\t\t\tif firstChar:\n\t\t\t\t\traise ValueError\n\t\t\t\tif len(key):\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmp = specialKey(key)\n\t\t\t\t\t\tif tmp == None:\n\t\t\t\t\t\t\tindex += 1\n\t\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttmpContainer.update({index: specialKey(key)})\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpContainer.append(specialKey(key))\n\t\t\t\tkey = \"\"\n\t\t\t\ti += 1\n\t\t\telif s[i] == '=':\n\t\t\t\tif key == \"\":\n\t\t\t\t\traise ValueError\n\t\t\t\tequal = True\n\t\t\t\ti += 1\n\t\t\telif s[i].isdigit():\n\t\t\t\tfirstChar = False\n\t\t\t\tend = collectDigit(s[i:])\n\t\t\t\tvalue = s[i: i+end]\n\t\t\t\ti += end\n\t\t\t\tif not equal:\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmp = specialKey(value)\n\t\t\t\t\t\tif tmp == None:\n\t\t\t\t\t\t\tindex += 1\n\t\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttmpContainer.update({index: specialKey(value)})\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpContainer.append(specialKey(value))\n\t\t\t\t\tkey = \"\"\t\n\t\t\t\telse:\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmp = specialKey(value)\n\t\t\t\t\t\tif tmp == None:\n\t\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttmpContainer.update({key: specialKey(value)})\n\t\t\t\t\t\tequal = False\n\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\tkey = \"\"\n\t\t\telif s[i] == '-' or s[i] == '+' or s[i] == 'e' or s[i] == 'E' or s[i] == '.':\n\t\t\t\tsign = \"\"\n\t\t\t\tif s[i] == '-':\n\t\t\t\t\tsign = '-'\n\t\t\t\ti += 1\n\t\t\t\twhile i < len(s):\n\t\t\t\t\tif s[i] == ' ' or s[i] == '\\n' or s[i] == '\\t':\n\t\t\t\t\t\ti += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\t\t\t\tend = collectDigit(s[i: ])\n\t\t\t\tvalue = s[i: i+end]\n\t\t\t\tvalue = sign + value\n\t\t\t\ti += end\n\t\t\t\tif not equal:\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmp = specialKey(value)\n\t\t\t\t\t\tif tmp == None:\n\t\t\t\t\t\t\tindex+=1\n\t\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttmpContainer.update({index: specialKey(value)})\n\t\t\t\t\t\tindex += 1\n\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpContainer.append(specialKey(value))\n\t\t\t\t\t\tkey = \"\"\n\t\t\t\telse:\n\t\t\t\t\tif flag:\n\t\t\t\t\t\ttmp = specialKey(value)\n\t\t\t\t\t\tif tmp == None:\n\t\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\ttmpContainer.update({key: specialKey(value)})\n\t\t\t\t\t\tkey = \"\"\n\t\t\t\t\t\tequal = False\n\t\t\telse:\n\t\t\t\ti += 1\n\t\treturn tmpContainer\n\nif __name__ == '__main__':\n\ta = PyLuaTblParser()\n\ttest =\"--{\\n{abc=1, [==[--]==], a=2, --s\\n}\"\n#\tprint(a.innerComment(test))\n#\tprint(a.processTailing('-[==[[]===]==]\\n {array = {65,23,5,},dict[==[--]==] = {mixed = {43,54.33,false,9,string = \"value\",},array = {3,6,4,},string = \"value\",},\"--\"};--\\n'))\n#\ta.load('--[==[[]===]==]\\n { array = {65,23,5,},dict = {mixed = {43,54.33,false,9,string = \"value\",},array = {3,6,4,},string = \"value\",},\"--\"--[[]] --[==[]==]--\\n}\\n\\t;--\\n;;; --;--\\n--[[]]--[=[]]')\n#\tprint(a.dumpDict())\n#\tb = PyLuaTblParser()\n#\tb.load('{1, nil, 2}')\n#\tprint(b.dumpDict())\n#\tprint(b.startAndEnd(' { }'))\n\tb = PyLuaTblParser()\n\tc = PyLuaTblParser()\n\ttest = '''{\n\troot = {\n\t \"Test Pattern String\",\n\t\t -- {\"object with 1 member\" = {\"array with 1 element\",},},\n\t\t\t {[\"object with 1 member\"] = {\"array with 1 element\",},},\n\t\t\t\t {},\n\t\t\t\t\t [99] = -42,\n\t\t\t\t\t\t true,\n\t\t\t\t\t\t\t false,\n\t\t\t\t\t\t\t\t nil,\n\n\t\t\t\t\t\t\t\t\t {\n\t\t\t\t\t\t\t\t\t\t [\"integer\"]= 1234567890,\n\t\t\t\t\t\t\t\t\t\t\t\t real=-9876.543210,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t e= 0.123456789e-12,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t E= 1.234567890E+34,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t zero = 0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t one = 1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t space = \" \",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t quote = \"\\\\\\\"\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t backslash = \"\\\\\\\\\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t controls = \"\\\\b\\\\f\\\\n\\\\r\\\\t\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t slash = \"/ & \\/\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t alpha= \"abcdefghijklmnopqrstuvwyz\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ALPHA = \"ABCDEFGHIJKLMNOPQRSTUVWYZ\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t digit = \"0123456789\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t special = \"`1~!@#$%^&*()_+-={',]}|;.</>?\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t hex = \"\\u0123\\u4567\\u89AB\\uCDEF\\uabcd\\uef4A\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\"true\"] = true,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\"false\"] = false,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\"nil\"] = nil,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t array = {nil, nil,},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t object = { },\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t address = \"50 St. James Street\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\turl = \"http://www.JSON.org/\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t comment = \"// /* <!-- --\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\"# -- --> */\"] = \" \",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\" s p a c e d \" ] = {1,2 , 3\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 4 , 5 , 6 ,7 },\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t compact = {1,2,3,4,5,6,7},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t luatext = \"{\\\\\\\"object with 1 member\\\\\\\" = {\\\\\\\"array with 1 element\\\\\\\"}}\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t quotes = \"&#34; \\u0022 %22 0x22 034 &#x22;\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t[u'\\\\\"\\x08\\x0c\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\\',./<>?']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t = \"A key can be any string\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t },\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 0.5 ,98.6\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 99.44\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 1066\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\"rosebud\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t}'''\n\ta.load(test)\n\td = a.dumpDict()\n\tb.loadDict(d)\n\tb.dumpLuaTable('test.lua')\n\tf = open('test.lua')\n\tc.loadLuaTable('test.lua')\n\tprint(d)\n\tif d==c.dumpDict():\n\t\tprint('ok')\n\td1 = c.dumpDict()\n\tprint(c.dumpDict())\n#\tprint(d['\\\\\"\\x08\\x0c\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\\',./<>?'])\t\t\n\t#print(a.decodeValue(test))\n\t#print(test)\n\t#print(a.encodeValue(a.decodeValue(test)))\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.5, "avg_line_length": 12.550000190734863, "blob_id": "47eee38205ae48dd0c81dc0106931849b6c16000", "content_id": "1a26b15543d0e5035614ea5c63ad5a353a729c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 270, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/Algorithm/Algorithm/chapter2/exponentiation.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint expr(int base, int p);\n\nint main()\n{\n std::cout<<expr(2, 3)<<\" \"<<expr(2,4)<<std::endl;\n return 0;\n}\n\nint expr(int base, int n)\n{\n int sum=base;\n while(n/2>0)\n {\n sum*=sum;\n n/=2;\n }\n return n==1?sum*base:sum;\n}" }, { "alpha_fraction": 0.5394737124443054, "alphanum_fraction": 0.5460526347160339, "avg_line_length": 15.88888931274414, "blob_id": "37179943601e9b26e25ae97ec24b5cb14e33cf00", "content_id": "30c572cdc5b19114c9f6a0aa8d9c603cd9dd27f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 152, "license_type": "no_license", "max_line_length": 46, "num_lines": 9, "path": "/Programming/C/Programming_in_Unix/chapter2/SC_OPEN_MAX.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<unistd.h>\n\nint main()\n{\n printf(\"%d\\n\", _SC_OPEN_MAX);\n for(int i=0; i<sysconf(_SC_OPEN_MAX); i++)\n close(i);\n}\n" }, { "alpha_fraction": 0.3622715473175049, "alphanum_fraction": 0.37402087450027466, "avg_line_length": 16.620689392089844, "blob_id": "a1a5b82ea16a9de5b3762da19306643d88678cec", "content_id": "55ca146e08c2f04b1cc3c63c6ec31dffcbc64855", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 43, "num_lines": 87, "path": "/Programming/C/The C programming Language/chapter6/getword.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<ctype.h>\n\nint getword(char *word, int lim);\n\nint main()\n{\n char word[100];\n while(getword(word, 100)!=EOF)\n printf(\"%s\\n\",word);\n return 0;\n}\n\nint getword(char *word, int lim)\n{\n int c, getch(void);\n void ungetch(int);\n char *w=word;\n \n while (isspace(c=getch()))\n ;\n if(c!=EOF)\n *w++=c;\n if(!isalpha(c))\n {\n if(c=='_'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n if(c=='\\\"'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n if(c=='/'&&(*w++=getch())=='/')\n {\n if(isalpha(*w++=getch()))\n goto alpha;\n ungetch(*--w);\n ungetch('/');\n }\n if(c=='/'&&(*w++=c=getch())=='*')\n {\n if(isalpha(*w++=getch()))\n goto alpha;\n ungetch(*--w);\n ungetch('*');\n }\n if(c=='#'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n *w='\\0';\n return c;\n }\nalpha:\n for(;--lim>0&&isalnum(*w++=c=getch());)\n ;\n if(c=='\\\"')\n {\n *w='\\0';\n return word[0];\n }\n if(c=='*'&&(*w++=getch())=='/')\n {\n *w='\\0';\n return word[0];\n }\n *--w='\\0';\n return word[0];\n}\n\nint ch[5];\nint flag=0;\n\nint getch(void)\n{\n return flag>0?ch[--flag]:getchar();\n}\n\nvoid ungetch(int c)\n{\n ch[flag++]=c;\n}" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 6, "blob_id": "cbde6021a6f4e5844f1489e09c1f085422f92864", "content_id": "159dc4975743a238803e707c34edc6a12f51b4da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35, "license_type": "no_license", "max_line_length": 9, "num_lines": 5, "path": "/Programming/Python/18Parameter/share_ref.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def f(a):\n\ta=99\nb=88\nf(b)\nprint(b)\n" }, { "alpha_fraction": 0.550000011920929, "alphanum_fraction": 0.550000011920929, "avg_line_length": 9, "blob_id": "2217ccb8f59bc0c1e01fd4bfa2b054a6589db07a", "content_id": "b0bc652cbf1aab62b73fc82dc43f82ae070da84e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 11, "num_lines": 4, "path": "/Programming/Python/17Namespace/exer1.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "X = 'Spam'\ndef func():\n\tprint(X)\nfunc()\n" }, { "alpha_fraction": 0.5034013390541077, "alphanum_fraction": 0.5102040767669678, "avg_line_length": 8.25, "blob_id": "e290003c41209743e60aa0d3b65b3d1dc17b1e1c", "content_id": "ecd59ba20c228e2dbe8be91edbf98149bdf7219b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 147, "license_type": "no_license", "max_line_length": 28, "num_lines": 16, "path": "/Programming/C/Programming_in_Unix/chapter4/same_function_struct_name.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nstruct fun{\n int i;\n};\n\nint fun()\n{\n printf(\"just a test\\n\");\n return 0;\n}\nint main()\n{\n struct fun s;\n fun();\n}" }, { "alpha_fraction": 0.4114285707473755, "alphanum_fraction": 0.4628571569919586, "avg_line_length": 12.538461685180664, "blob_id": "da4fbb441c099d931faf452ee0c84d54c2d6c53f", "content_id": "d8e588908c4be9563e3c044e6f72ffd98170025e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 175, "license_type": "no_license", "max_line_length": 34, "num_lines": 13, "path": "/Programming/C++/Code_Complete/test/float_addition.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n float f=0.001;\n float sum=0;\n for(int i=0; i<10; i++)\n {\n sum+=f;\n std::cout<<sum<<std::endl;\n }\n return 0;\n}" }, { "alpha_fraction": 0.6722689270973206, "alphanum_fraction": 0.6722689270973206, "avg_line_length": 12.222222328186035, "blob_id": "11dd67193e4f052cef7c287990198c584540efa0", "content_id": "5d4ba6c3cf0b5425a08992b0ebb268fbf5392973", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 119, "license_type": "no_license", "max_line_length": 31, "num_lines": 9, "path": "/Programming/Lua/3Statements/list.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "for line in io.lines() do\n\tlist = {next=list, value=line}\nend\n\nlocal l = list\nwhile l do\n\tprint(l.value)\n\tl=l.next\nend\n" }, { "alpha_fraction": 0.4784946143627167, "alphanum_fraction": 0.4838709533214569, "avg_line_length": 19.72222137451172, "blob_id": "90c05c441d98112e1c684281e0053a5c4df224be", "content_id": "1ba6a6b22f4227d31e9fd599d36130e7c896cbe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 372, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/Programming/C/Programming_in_Unix/chapter6/group.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <grp.h>\n#include <stdio.h>\n\nint main()\n{\n struct group *grp;\n setgrent(); //rewind the file\n while ((grp=getgrent())!=NULL)\n {\n printf(\"%s\\n%s\\n%d\\n\", grp->gr_name, grp->gr_passwd, grp->gr_gid);\n for(int i=0; grp->gr_mem[i]!=NULL; i++)\n {\n printf(\"%s\\n\", grp->gr_mem[i]);\n }\n }\n endgrent();\n return 0;\n}" }, { "alpha_fraction": 0.39915966987609863, "alphanum_fraction": 0.43697479367256165, "avg_line_length": 17.346153259277344, "blob_id": "f49cdf69dc23f480ae89ac788b31331e839a8238", "content_id": "06cb2cbb27eb153754cf53f5b96da9d966f53488", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 476, "license_type": "no_license", "max_line_length": 44, "num_lines": 26, "path": "/Algorithm/Algorithm/chapter2/max_subseq_sum_n3.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nT max_sub_sum(T *a, int n);\n\nint main()\n{\n int a[]={-1, 10, -20, 101, 11, 12};\n std::cout<<max_sub_sum(a, 6)<<std::endl;\n}\n\ntemplate <typename T>\nT max_sub_sum(T *a, int n)\n{\n T sum=0.0, temp=0;\n for(int i=0; i<n; i++)\n for(int j=i; j<n; j++)\n {\n sum=0;\n for(int k=i; k<=j; k++)\n sum+=*(a+k);\n if(sum>temp)\n temp=sum;\n }\n return temp;\n}" }, { "alpha_fraction": 0.5425582528114319, "alphanum_fraction": 0.5482643842697144, "avg_line_length": 16.68067169189453, "blob_id": "d0c99e1af1efb3f57d90929c1c5bd4f7d0e19e2e", "content_id": "01d845982bc84dfe0349e3feb15f2edbfb119298", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 59, "num_lines": 119, "path": "/Programming/Practice/Interpretation/rational.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stdexcept>\n\nclass Rational{\npublic:\n Rational(int x=0, int y=1);\n Rational & operator+(Rational &);\n Rational & operator-(Rational &);\n Rational & operator*(Rational &);\n Rational & operator/(Rational &);\n operator double() {return double (num)/(double)dnm;}\n int numerator() const{return num;}\n int denumerator() const{return dnm;}\n void print()const{std::cout<<num<<\"/\"<<dnm<<std::endl;}\nprivate:\n int gcd(int x, int y);\n void approximate();\n int num;\n int dnm;\n};\n\nint Rational::gcd(int x, int y)\n{\n int div;\n while(true)\n {\n div=x%y;\n if(div==0)\n return y;\n x=y;\n y=div;\n }\n}\n\nvoid Rational::approximate()\n{\n int z=gcd(num, dnm);\n num/=z;\n dnm/=z;\n}\n\nRational::Rational(int x, int y):num(x),dnm(y)\n{\n if(y==0)\n {\n std::cout<<\"error: bad denominator\"<<std::endl;\n std::logic_error le(\"bad denominator\");\n throw le;\n }\n approximate();\n}\n\nRational & Rational::operator+(Rational &rhs)\n{\n if(dnm==rhs.dnm)\n {\n num+=rhs.num;\n approximate();\n return *this;\n }\n num=num*rhs.dnm+rhs.num*dnm;\n dnm*=rhs.dnm;\n approximate();\n return *this;\n}\n\nRational & Rational::operator-(Rational &rhs)\n{\n if(dnm==rhs.dnm)\n {\n num-=rhs.num;\n return *this;\n }\n num=num*rhs.dnm-rhs.num*dnm;\n dnm*=rhs.dnm;\n approximate();\n return *this;\n}\n\nRational & Rational::operator*(Rational &rhs)\n{\n num*=rhs.num;\n dnm*=rhs.dnm;\n approximate();\n return *this;\n}\n\nRational & Rational::operator/(Rational &rhs)\n{\n if(rhs.num==0)\n {\n std::logic_error le(\"bad divisor\");\n throw le;\n return *this;\n }\n num*=rhs.dnm;\n dnm*=rhs.num;\n approximate();\n return *this;\n}\n\n\n\nint main()\n{\n Rational lhs(2,6), rhs(30, 60);\n lhs.print();\n rhs.print();\n Rational alhs=lhs+rhs;\n alhs.print();\n alhs=lhs-rhs;\n alhs.print();\n alhs=lhs*rhs;\n alhs.print();\n alhs=lhs/rhs;\n alhs.print();\n std::cout<<alhs<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5367483496665955, "alphanum_fraction": 0.6146993041038513, "avg_line_length": 16.959999084472656, "blob_id": "47fcb01195c55293f9f38ec6527df4873f854d92", "content_id": "ec2c5c9381fc2a6ce7043f446a724da5858d9d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "no_license", "max_line_length": 48, "num_lines": 25, "path": "/Programming/Python/4ObjectType/other.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "X=set('spam');\nY={'h', 'a', 'm'};\nprint(X, Y);\nprint(X & Y);\nprint(X | Y);\nprint(X-Y);\nprint({x**2 for x in [1,2,3,6]});\nprint(1/3);\nprint(2/3+1/2);\nimport decimal;\nd=decimal.Decimal('3.141');\nprint(d+1);\ndecimal.getcontext().prec=2;\ndecimal.Decimal('1.00')/decimal.Decimal('3.00');\n\nfrom fractions import Fraction;\nf=Fraction(2,3);\nprint(f+1);\nprint(f+Fraction(1,2));\nprint(1>2, 1<2);\nprint(bool('spam'));\nX=None;\nprint(X);\nL=[None]*100;\nprint(L);\n" }, { "alpha_fraction": 0.503496527671814, "alphanum_fraction": 0.5734265446662903, "avg_line_length": 12, "blob_id": "315d58ed85393b78f61ec67ccec7333c28bdc6aa", "content_id": "13147d08cbe06dae63f19fb26f175f08fa338e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 20, "num_lines": 11, "path": "/Programming/Python/12If/block.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "x = 1\nif x:\n\ty=2\n\tif y:\n\t\tprint('block2')\n\tprint('block1')\nprint('block0')\nprint(type(1<2))\nprint([] or 3)\nprint(2 or {})\nprint(type(2 or {}))\n" }, { "alpha_fraction": 0.598802387714386, "alphanum_fraction": 0.6107784509658813, "avg_line_length": 14.181818008422852, "blob_id": "545238b8a50a095f652d88290e6346a5a58ab3d3", "content_id": "413b389895201c7cc9f97810255d830360ef41ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 167, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/Programming/Python/10Statement/try.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "while True:\n\treply=raw_input('Enter text: ')\n\tif reply == 'stop':\n\t\tbreak\n\ttry:\n\t\tnum=int(reply)\n\texcept:\n\t\tprint('Bad!'*8)\n\telse:\n\t\tprint(int(reply)**2)\nprint('Bye')\n" }, { "alpha_fraction": 0.5724217891693115, "alphanum_fraction": 0.6025492548942566, "avg_line_length": 21.153846740722656, "blob_id": "026f07bfc5b1e439d12550699cf65cedeeb7cfab", "content_id": "a7f38219e568681b120b2f3e805fdc6edb81a42d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 863, "license_type": "no_license", "max_line_length": 127, "num_lines": 39, "path": "/Programming/C/Programming_in_Unix/chapter7/longjmp.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <setjmp.h>\n#include <stdlib.h>\n\nstatic void f1(int, int ,int ,int);\nstatic void f2();\n\nstatic jmp_buf jmpbuffer;\nstatic int globval=1;\n\nint main()\n{\n int autoval=2;\n register int regival=3;\n volatile int volaval=4;\n static int statval=5;\n if(setjmp(jmpbuffer)!=0)\n {\n printf(\"after longjmp:\\n\");\n printf(\"gloabal value=%d, auto=%d, register=%d, volatile=%d, static=%d\\n\",globval, autoval, regival, volaval, statval);\n exit(0);\n }\n \n globval=95; autoval=96; regival=97; volaval=98; statval=99;\n f1(autoval, regival, volaval, statval);\n exit(0);\n}\n\nstatic void f1(int i, int j, int k, int l)\n{\n printf(\"in f1(): \\n\");\n printf(\"global=%d, auto =%d, register = %d, volatile = %d, static = %d\\n\", globval, i, j, k, l);\n f2();\n}\n\nstatic void f2()\n{\n longjmp (jmpbuffer, 1);\n}" }, { "alpha_fraction": 0.456620991230011, "alphanum_fraction": 0.48706239461898804, "avg_line_length": 16.783782958984375, "blob_id": "776f7d8e0ef2743530155cd1b8f34e9ebddc25f9", "content_id": "afc5f8d8f931397bb2ab25abd88050fea9acb489", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 657, "license_type": "no_license", "max_line_length": 40, "num_lines": 37, "path": "/Programming/C/The C programming Language/chapter4/quicksort.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nvoid qsort(int v[], int i, int j);\n\nint main()\n{\n int a[]={1,6,3,5,8,9,0,7,4,2};\n qsort(a, 0, 9);\n for(int i=0;i<10;++i)\n printf(\"%d \",a[i]);\n printf(\"\\n\");\n return 0;\n}\n\nvoid qsort(int v[], int left, int right)\n{\n int i, last;\n void swap(int v[], int i, int j);\n if(left>=right)\n return;\n swap(v,left,(left+right)/2);\n last=left;\n for(i=left+1;i<=right;i++)\n if(v[i]<v[left])\n swap(v, ++last, i);\n swap(v,left,last);\n qsort(v,left,last-1);\n qsort(v,last+1, right);\n}\n\nvoid swap(int v[], int i, int j)\n{\n int temp;\n temp=v[i];\n v[i]=v[j];\n v[j]=temp;\n}" }, { "alpha_fraction": 0.4693877696990967, "alphanum_fraction": 0.5510203838348389, "avg_line_length": 11.25, "blob_id": "bafc7ad4252726d1d6a0a0cd5d7a6aa2ee891740", "content_id": "fb5456f070835217ec5747c7723e15c36d180d49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 49, "license_type": "no_license", "max_line_length": 20, "num_lines": 4, "path": "/Programming/Lua/2Type/maxn.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a = {}\na[100] = 1\nprint(#a)\nprint(table.maxn(a))\n" }, { "alpha_fraction": 0.47701647877693176, "alphanum_fraction": 0.5108413100242615, "avg_line_length": 18.542373657226562, "blob_id": "e5dfcc906915a9928d6538c166b04d25e8ada0a7", "content_id": "aa312bba8620ecae7a787bf46c0d10c3e42556c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "no_license", "max_line_length": 52, "num_lines": 59, "path": "/Programming/Python/7String/string.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print('shrubbery', \"shurubbery\");\nprint('knight\"s', \"knight's\");\nprint(\"Meaning\"'of'\"life\");\nprint('a\\tb\\nc');\nprint('\\a');\nprint('\\0a\\0b');\nprint(len('abc'));\nprint('abc'+'def');\nprint('Ni!'*4);\nmyjob=\"hacker\";\nfor c in myjob: print(c, ' ')\nprint('k' in myjob);\nprint(str('spam'), repr('spam'));\nS='66';\nI=1;\nprint(int(S) + I);\nprint(S+str(I));\nprint(str(3.16), float('1.5'));\ni='5';\nprint(chr(ord(i)+1));\nprint(chr(ord(i)+2));\nprint('*******************');\nb='1101';\ni=0;\nwhile b!='':\n\ti=i*2+(ord(b[0])-ord('0'));\n\tb=b[1:];\n\nprint(bin(i));\nprint(i);\nprint(b);\nprint(int('1101', 2));\nprint(bin(13));\nprint('*****************');\ns='splot'\ns=s.replace('pl', 'pamal');\nprint(s);\nprint('That is %d %s bird!' %(1, 'lovely'));\nprint('this is {0} {1} bird!'.format(1, 'lovely'));\ns='spammy';\ns=s[:3]+'xx'+s[5:];\nprint(s);\nprint('***********replace*************');\nprint(s.replace('xx', 'mm'));\nprint('************************');\ns='xxxxxSpamxxxxxxspamxxxxxxx';\nwhere=s.find('Spam');\nprint(where);\ns=s[:where]+'EGGS'+s[where+4:];\nprint(s);\nprint('*********************');\ns='spammy';\nl=list(s);\nprint(l);\nl[3]='x';\nl[4]='x';\nprint(l);\ns=''.join(l);\nprint(s);\n" }, { "alpha_fraction": 0.5297029614448547, "alphanum_fraction": 0.5396039485931396, "avg_line_length": 9.684210777282715, "blob_id": "e085f1d122157a091d46347413a8662305635dd2", "content_id": "c349fe978125195a8472e3444d012c1fef0df666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 202, "license_type": "no_license", "max_line_length": 37, "num_lines": 19, "path": "/Programming/C++/Effective_C++/chapter8/using_base.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\npublic:\n int fun(){return 1;}\n};\n\nclass B: public A\n{\npublic:\n using A::fun;\n int fun(){return 2;}\n};\nint main()\n{\n B a;\n std::cout<<a.A::fun()<<std::endl;\n}" }, { "alpha_fraction": 0.598901093006134, "alphanum_fraction": 0.6208791136741638, "avg_line_length": 21.75, "blob_id": "2977ebb27cce1c5a0c2334d5af96a24dd1ee075d", "content_id": "d4616edc5a8a6d0cc56806138ee3b3fe8cef8379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 47, "num_lines": 8, "path": "/Programming/Python/16FunctionBasic/intersect.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def interssect(seq1, seq2):\n\tres = []\n\tfor x in seq1:\n\t\tif x in seq2:\n\t\t\tres.append(x)\n\treturn res\nprint(interssect('something', ['a', 'b', 'e']))\nprint(interssect('something', []))\n" }, { "alpha_fraction": 0.2952432930469513, "alphanum_fraction": 0.34171679615974426, "avg_line_length": 21.875, "blob_id": "6e73d1511b4930e04ebf40bf9b9e4bfa24f014b1", "content_id": "ce1af28ce5281d71d06670f7e7ad96d8484bab63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1829, "license_type": "no_license", "max_line_length": 87, "num_lines": 80, "path": "/Programming/C/The C programming Language/chapter4/atof_extension.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n\ndouble _atof(char source[]);\nint main()\n{\n char s1[]=\"-12.6666\";\n char s2[]=\"12.6666e-3\";\n char s3[]=\"12.6666e6\";\n printf(\"%s\\n%6.4f\\n%s\\n%6.4f\\n%s\\n%6.4f\\n\",s1,_atof(s1),s2,_atof(s2),s3,_atof(s3));\n return 0;\n}\n\ndouble _atof(char s[])\n{\n int i, sign=1;\n for(i=0;s[i]==' '||s[i]=='\\t'||s[i]=='\\n';i++)\n ;\n if(s[i]=='+'||s[i]=='-')\n {\n sign=(s[i]=='-')?-1:1;\n i++;\n }\n if((s[i]<'0'||s[i]>'9')&&s[i]!='.')\n {\n printf(\"Bad String: fail to parse %s\\n\", s);\n exit(1);\n }\n double val, power;\n for(val=0.0;s[i]>='0'&&s[i]<='9';++i)\n val=10.0*val+(s[i]-'0');\n if(s[i]=='.'||s[i]=='e'||s[i]=='E')\n ++i;\n else\n {\n printf(\"Bad String: fail to parse %s\\n\",s);\n exit(1);\n }\n for(power=1.0;s[i]>='0'&&s[i]<='9';++i)\n {\n val=10.0*val+(s[i]-'0');\n power*=10;\n }\n val*=sign;\n if(s[i]=='\\0')\n return val/power;\n else if(s[i]=='e'||s[i]=='E')\n {\n i++;\n sign=1;\n if(s[i]=='-'||s[i]=='+')\n {\n sign=(s[i]=='-')?-1:1;\n ++i;\n }\n if(s[i]>='0'&&s[i]<='9')\n {\n int j=0, i_pw=0;\n for(;s[i]>='0'&&s[i]<='9';++i)\n i_pw=10*i_pw+(s[i]-'0');\n if(s[i]=='\\0')\n {\n for(j=0;j<i_pw;++j)\n {\n if(sign==1)\n power/=10;\n else\n power*=10;\n }\n return val/power;\n }\n printf(\"Bad String: fail to parse %s\\n\",s);\n exit(1);\n }\n printf(\"Bad String: fail to parse %s\\n\",s);\n exit(1);\n }\n printf(\"Bad String: fail to parse %s\\n\",s);\n exit(1);\n}" }, { "alpha_fraction": 0.47031962871551514, "alphanum_fraction": 0.4931506812572479, "avg_line_length": 14.714285850524902, "blob_id": "7a5504286f2bbf2558321ecf31b3eb6c4f431a23", "content_id": "a20512ca38510ad70fec41457bdcc3f891dfd522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 219, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/Programming/C++/More_Effective_C++/chapter1/const_cast.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int i=10;\n const int *p=&i;\n int *q=const_cast<int *>(p);\n int *ptr=(int *)p;\n *q=5;\n std::cout<<i<<std::endl;\n *ptr=6;\n std::cout<<i<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.561282217502594, "alphanum_fraction": 0.5813953280448914, "avg_line_length": 27.428571701049805, "blob_id": "299e6ba84a964f390048561cb28bc649145c773f", "content_id": "616fd4f597af20433e6f86b787c3105c9172ca86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1591, "license_type": "no_license", "max_line_length": 100, "num_lines": 56, "path": "/Project/SNS/test_main.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"rp2p.h\"\n#include <iostream>\n#include <sys/socket.h>\n#include <string.h>\n#include <arpa/inet.h>\n#include <errno.h>\n\nint main()\n{\n int on=1;\n std::string s;\n int sock=socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);\n std::cout<<sock<<std::endl;\n setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));\n struct timeval tv;\n tv.tv_sec=2;\n tv.tv_usec=0;\n setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, &on, sizeof(on));\n sockaddr_in server;\n memset(&server, 0, sizeof(server));\n server.sin_family=AF_INET;\n server.sin_port=htons(6868);\n server.sin_addr.s_addr=htonl(INADDR_ANY);\n if(bind(sock, (struct sockaddr *)&server, sizeof(server))==-1)\n {\n std::cout<<\"error: bind \"<<strerror(errno)<<std::endl;\n exit(0);\n }\n socklen_t len=sizeof(server);\n getsockname(sock, (struct sockaddr *)&server, &len);\n char str[INET_ADDRSTRLEN];\n std::cout<<inet_ntop(AF_INET, (void *)&server.sin_addr.s_addr, str, INET_ADDRSTRLEN)<<std::endl;\n if(listen(sock, 5)==-1)\n {\n std::cout<<\"error: listen\"<<std::endl;\n exit(0);\n }\n while(true)\n {\n sockaddr_in client;\n socklen_t addrlen=sizeof(client);\n int consock=accept(sock, (struct sockaddr*)&client, &addrlen);\n if(consock==-1)\n continue;\n char buf[1500];\n memset(buf,0,1500);\n int ret=0;\n ret=recv(consock, buf+ret, 1500, 0);\n if(ret<0)\n continue;\n std::cout<<buf<<std::endl;\n std::string temp;\n proc_msg(buf, temp, consock);\n }\n return 0;\n}" }, { "alpha_fraction": 0.5735294222831726, "alphanum_fraction": 0.6176470518112183, "avg_line_length": 21.66666603088379, "blob_id": "4d872967b35c4aedfdbf668397797fd5edfd2687", "content_id": "b64657567613cd4a2105b7e256d572789e4f5c08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 38, "num_lines": 3, "path": "/Programming/Python/19HighLevelFunction/nested.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "action = (lambda x: (lambda y: x + y))\nact=action(66)\nprint(act(2))\n" }, { "alpha_fraction": 0.44097745418548584, "alphanum_fraction": 0.4488721787929535, "avg_line_length": 16.859060287475586, "blob_id": "f5af627f738bfc03ef25497134fd39f58e7c9623", "content_id": "138f80c60654e6210fa1730d7dc43299f5941a4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2660, "license_type": "no_license", "max_line_length": 56, "num_lines": 149, "path": "/Programming/C/The C programming Language/chapter6/binary_tree.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<ctype.h>\n#include<string.h>\n#define MAXWORD 100\n\nstruct tnode *addtree(struct tnode *, char *);\nvoid treeprint(struct tnode *);\nint getword(char *, int);\n\nstruct tnode{\n char *word;\n int count;\n struct tnode *left;\n struct tnode *right;\n};\n\nint main()\n{\n struct tnode *root;\n char word[MAXWORD];\n root=NULL;\n while(getword(word, MAXWORD)!=EOF)\n if(isalpha(word[0]))\n root=addtree(root, word);\n treeprint(root);\n return 0;\n}\n\nstruct tnode *talloc(void);\nchar * _strdup(char *);\n\nstruct tnode *addtree(struct tnode *p, char *w)\n{\n int cond;\n if(p==NULL)\n {\n p=talloc();\n p->word=_strdup(w);\n p->count=1;\n p->left=p->right=NULL;\n }\n else if((cond=strcmp(w, p->word))==0)\n p->count++;\n else if(cond<0)\n p->left=addtree(p->left, w);\n else\n p->right=addtree(p->right, w);\n return p;\n}\n\nvoid treeprint(struct tnode *p)\n{\n if(p!=NULL)\n {\n treeprint(p->left);\n printf(\"%4d %s\\n\", p->count, p->word);\n treeprint(p->right);\n }\n}\n\n#include<stdlib.h>\n\nstruct tnode *talloc(void)\n{\n return (struct tnode *)malloc(sizeof(struct tnode));\n}\n\nchar *_strdup(char *s)\n{\n char *p;\n p=(char *)malloc(strlen(s)+1);\n if(p!=NULL)\n strcpy(p,s);\n return p;\n}\n\nint getword(char *word, int lim)\n{\n int c, getch(void);\n void ungetch(int);\n char *w=word;\n \n while (isspace(c=getch()))\n ;\n if(c!=EOF)\n *w++=c;\n if(!isalpha(c))\n {\n if(c=='_'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n if(c=='\\\"'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n if(c=='/'&&(*w++=getch())=='/')\n {\n if(isalpha(*w++=getch()))\n goto alpha;\n ungetch(*--w);\n ungetch('/');\n }\n if(c=='/'&&(*w++=c=getch())=='*')\n {\n if(isalpha(*w++=getch()))\n goto alpha;\n ungetch(*--w);\n ungetch('*');\n }\n if(c=='#'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n *w='\\0';\n return c;\n }\nalpha:\n for(;--lim>0&&isalnum(*w++=c=getch());)\n ;\n if(c=='\\\"')\n {\n *w='\\0';\n return word[0];\n }\n if(c=='*'&&(*w++=getch())=='/')\n {\n *w='\\0';\n return word[0];\n }\n *--w='\\0';\n return word[0];\n}\n\nint ch[5];\nint flag=0;\n\nint getch(void)\n{\n return flag>0?ch[--flag]:getchar();\n}\n\nvoid ungetch(int c)\n{\n ch[flag++]=c;\n}" }, { "alpha_fraction": 0.41324201226234436, "alphanum_fraction": 0.456620991230011, "avg_line_length": 14.678571701049805, "blob_id": "f108fa3b52889f7c3190cb386b2f6c504b9ca483", "content_id": "b9ccc6dbb238f237751caa9a465f14befe3d6e1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 438, "license_type": "no_license", "max_line_length": 44, "num_lines": 28, "path": "/Algorithm/Algorithm/chapter2/max_subseq_sum_n.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nT max_sub_sum(T *a, int n);\n\nint main()\n{\n int a[]={-1, 10, -20, 101, 11, 12};\n std::cout<<max_sub_sum(a, 6)<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nT max_sub_sum(T *a, int n)\n{\n T sum=0, temp=0;\n for(int i=0; i<n; i++)\n {\n temp+=*(a+i);\n if(temp>sum)\n sum=temp;\n if(temp<0)\n {\n temp=0;\n }\n }\n return sum;\n}" }, { "alpha_fraction": 0.43923866748809814, "alphanum_fraction": 0.4773060083389282, "avg_line_length": 16.100000381469727, "blob_id": "5762991cc02bcf2319fa40be16daeea9e4d2368b", "content_id": "fbc6405b9ca57dc6e250a535a25c5e7e0fcbd982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 683, "license_type": "no_license", "max_line_length": 48, "num_lines": 40, "path": "/Programming/Practice/Interpretation/1.29_simpson_rule0to1.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ndouble fun_h(double a, double b, int n)\n{\n return (a-b)/n;\n}\n\ndouble fun_y(double a, double b, int k, int n)\n{\n return a+k*fun_h(a, b, n);\n}\n\ndouble cube(int a, int b, int n)\n{\n if(n<=0)\n {\n std::cout<<\"bad divisor\"<<std::endl;\n return 0;\n }\n if(b<a)\n return 0;\n double sum=fun_y(a, b, 0, n);\n for(int i=1; i<n; i++)\n {\n int params;\n if(i%2==0)\n params=2;\n else\n params=4;\n sum+=params*fun_y(a, b, i, n);\n }\n return fun_h(a, b, n)*sum;\n}\n\nint main()\n{\n std::cout<<cube(0, 1, 100)<<std::endl;\n std::cout<<cube(0, 1, 100000000)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6651982665061951, "alphanum_fraction": 0.6651982665061951, "avg_line_length": 29.33333396911621, "blob_id": "0b5899a7a5307dde0afb6dca4ddc0fe40fd4cb9d", "content_id": "10e0291dd38cbdeff8f3f5c251d1ae698c7fdaf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 454, "license_type": "no_license", "max_line_length": 68, "num_lines": 15, "path": "/Project/Client/conserv.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <string>\n\nstatic std::string serv_ip;\nstatic std::string log_acc;\n\nclass Conn\n{\npublic:\n static int sign_on(const std::string &, const std::string &);\n static int login(const std::string &, const std::string &);\n static int log_out(const std::string &);\n static int update_ip(const std::string &);\n static int request_fip(const std::string &, std::string &);\n static int adfrd(const std::string &, const std::string &, int);\n};" }, { "alpha_fraction": 0.4776119291782379, "alphanum_fraction": 0.49253731966018677, "avg_line_length": 19.100000381469727, "blob_id": "722f625a9a7ea04db00d40a20d9f3a0677e1feb3", "content_id": "67cddaf0bd44d155865200f660765583282879bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 32, "num_lines": 10, "path": "/Programming/Python/13Loop/dict_zip.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "keys = ['spam', 'eggs', 'toast']\nvals = [6, 6, 6]\nprint(list(zip(keys, vals)))\nD = {}\nfor (k, v) in zip(keys, vals):\n\tD[k]=v\nprint(D)\nprint('**********************')\nD = dict(zip(keys, vals))\nprint(D)\n" }, { "alpha_fraction": 0.5735294222831726, "alphanum_fraction": 0.5735294222831726, "avg_line_length": 14.11111068725586, "blob_id": "304ce93fa16e0a453555ba2a55e31200e760ec72", "content_id": "e537d1c3ef20164f5c8becb6d528d2300d08fcdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 26, "num_lines": 9, "path": "/Programming/Python/12If/if_else_shortcut.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "A = 't' if 'spam' else 'f'\nprint(A)\nA = 't' if '' else 'f'\nprint(A)\nZ='something'\nY='anything'\nX=True\nprint([Z, Y][X])\nprint(int(True))\n" }, { "alpha_fraction": 0.3774318993091583, "alphanum_fraction": 0.4182879328727722, "avg_line_length": 18.074073791503906, "blob_id": "bcc535703d99edc89f68f3cfa99baa1e0057fe57", "content_id": "f16708788d78e0f1bb6f05d2840f6e83aadbcad2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 514, "license_type": "no_license", "max_line_length": 35, "num_lines": 27, "path": "/Algorithm/Algorithm/chapter7/insertion_sort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nvoid insertion_sort(T *a, int n)\n{\n for(int i=1; i<n; i++)\n {\n T temp=a[i];\n int j=i-1;\n for(; j>=0&&a[j]>temp; j--)\n a[j+1]=a[j];\n /*for(int k=0; k<n; k++)\n std::cout<<a[k]<<\" \";\n std::cout<<std::endl;*/\n a[++j]=temp;\n }\n}\n\nint main()\n{\n int a[]={9,8,7,6,5,4,3,2,1,0};\n insertion_sort(a, 10);\n for(int i=0; i<10; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.5507246255874634, "avg_line_length": 16.3125, "blob_id": "7496a7eb30117d6dbede2eb55441c9b705f82ab2", "content_id": "ac1cd15077270792931411436c5b914364eedbf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 276, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/Programming/C++/Effective_C++/chapter1/const_member_function.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\npublic:\n const int fun(){std::cout<<\"I am non-const.\"<<std::endl; return 0;}\n const int fun() const {std::cout<<\"I am const.\"<<std::endl; return 0;}\n};\nint main()\n{\n A a1;\n const A a2=a1;\n a1.fun();\n a2.fun();\n return 0;\n}" }, { "alpha_fraction": 0.5094339847564697, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 19.66666603088379, "blob_id": "feca8ba36f7f3831ebeef8bb00577ad8f0cf7552", "content_id": "4c9c90838360573a5d96c09b54ab16c2877020a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 371, "license_type": "no_license", "max_line_length": 80, "num_lines": 18, "path": "/Programming/C/The C programming Language/chapter1/floattemperature.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n/*print Fahrenheit-Celsius table for fahr =0, 20,...300 floating point version*/\nint main()\n{\n float fahr, celsius;\n float lower, upper, step;\n lower=0;\n upper=300;\n step=20;\n fahr=lower;\n while(fahr<=upper)\n {\n celsius=(5.0/9.0)*(fahr-32.0);\n printf(\"%3.0f %6.1f\\n\",fahr, celsius);\n fahr=fahr+step;\n }\n}" }, { "alpha_fraction": 0.42373448610305786, "alphanum_fraction": 0.4334562420845032, "avg_line_length": 17.515527725219727, "blob_id": "fec943ae04213ba1331870a46500046bbbffc205", "content_id": "af8a9ed50d085e497e8d449d01e8126aff2e945d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2983, "license_type": "no_license", "max_line_length": 96, "num_lines": 161, "path": "/Programming/Practice/Interpretation/algebra.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stdexcept>\nusing std::string;\n\nstruct Node{\n Node(int c=0, string s=\"\", Node *n=NULL):coefficient(c), term(s), next(n){}\n int coefficient;\n string term;\n Node *next;\n};\n\nclass List{\npublic:\n List(Node *r=NULL):root(r){}\n void insert(int, string);\n string formular();\n ~List();\nprivate:\n string itoa(int);\n Node *root;\n};\n\nstring List::itoa(int n)\n{\n if(n<0)\n n=-n;\n string ret;\n do{\n ret.push_back(n%10);\n }while((n=n/10)>0);\n for(int i=0, j=ret.size()-1; i<j; i++, j--)\n {\n char ch=ret[i];\n ret[i]=ret[j];\n ret[j]=ch;\n }\n return ret;\n}\n\nvoid List::insert(int c, string term)\n{\n if(root==NULL)\n {\n root=new Node(c, term);\n if(root==NULL)\n {\n std::bad_alloc ba;\n throw ba;\n }\n return;\n }\n Node *pt=root;\n while(pt->next!=NULL)\n pt=pt->next;\n pt->next=new Node(c, term);\n if(pt->next==NULL)\n {\n std::bad_alloc ba;\n throw ba;\n }\n}\n\nstring List::formular()\n{\n if(root==NULL)\n {\n return \"\";\n }\n string ret;\n ret+=itoa(root->coefficient);\n ret+=\"*\";\n ret+=root->term;\n Node *p=root->next;\n while(p!=NULL)\n {\n if(p->coefficient<0)\n ret.push_back('-');\n else\n ret.push_back('+');\n ret+=itoa(p->coefficient);\n ret+=\"*\";\n ret+=p->term;\n p=p->next;\n }\n return ret;\n}\n\nList::~List()\n{\n while(root!=NULL)\n {\n delete root;\n root=root->next;\n }\n}\n\nclass Algebra{\npublic:\n Algebra(string s=\"\");\n Algebra & operator*(Algebra & rhs);\n Algebra & operator+(Algebra & rhs);\n void print(){std::cout<<equation.formular()<<std::endl;}\nprivate:\n void construct(string s);\n int stoi(string s);\n List equation;\n};\n\nint Algebra::stoi(string s)\n{\n int ret=0;\n for(int i=0; i<s.size(); i++)\n ret=ret*10+s[i]-'0';\n return ret;\n}\n\nAlgebra::Algebra(string s)\n{\n construct(s);\n}\n\nvoid Algebra::construct(string s)\n{\n for(int i=0; i<s.size(); i++)\n {\n int coe;\n string term;\n if(s[i]=='+')\n {\n int j;\n for(i+=1,j=0; i+j<s.size()&&(s[i+j]>='0'&&s[i+j]<='9');)\n j++;\n coe=stoi(s.substr(i, j));\n i+=j;\n }\n else if(s[i]=='-')\n {\n int j;\n for(i+=1, j=0; i+j<s.size()&&(s[i+j]>='0'&&s[i+j]<='9');)\n j++;\n coe=-stoi(s.substr(i, j));\n i+=j;\n }\n if((s[i]>='a'&&s[i]<='z')||(s[i]>='A'&&s[i]<='Z'))\n {\n int j;\n for(j=0; i+j<s.size()&&((s[i+j]>='a'&&s[i+j]<='z')||(s[i+j]>='A'&&s[i+j]<='Z'));j++)\n ;\n term=s.substr(i, j);\n i+=j-1;\n }\n equation.insert(coe, term);\n }\n}\n\nint main()\n{\n Algebra a(\"2a+3b\");\n a.print();\n return 0;\n}\n\n\n" }, { "alpha_fraction": 0.6391752362251282, "alphanum_fraction": 0.6391752362251282, "avg_line_length": 18.399999618530273, "blob_id": "ea8d2eb1271ccb98ff2c00993dddda1f3db46f79", "content_id": "dd011762c69a0160369f574444aca997d33f2dac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/Programming/Python/10Statement/interact.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "while True:\n\treply = raw_input('Enter Text:')\n\tif reply == 'stop':\n\t\tbreak\n\tprint(reply.upper())\n" }, { "alpha_fraction": 0.5185856819152832, "alphanum_fraction": 0.5448775887489319, "avg_line_length": 21.079999923706055, "blob_id": "949090e9962ade842afb52f4fc9415eebb54cbe1", "content_id": "91daf6a829d4dcddc43e34dd5572c9fc5c4e8e23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 74, "num_lines": 50, "path": "/Programming/C/Programming_in_Unix/chapter11/thread_exit_status.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <pthread.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nvoid * thr_fn1(void *arg)\n{\n printf(\"thread 1 returning\\n\");\n return ((void *)1);\n}\n\nvoid * thr_fn2(void * arg)\n{\n printf(\"thread 2 exiting\\n\");\n pthread_exit((void *)2);\n}\n\nint main()\n{\n int err;\n pthread_t tid1, tid2;\n void * tret;\n err=pthread_create(&tid1, NULL, thr_fn1, NULL);\n if(err!=0)\n {\n fprintf(stderr, \"error: create thread 1: %s\\n\", strerror(err));\n exit(0);\n }\n err=pthread_create(&tid2, NULL, thr_fn2, NULL);\n if(err !=0)\n {\n fprintf(stderr, \"error: create thread 2: %s\\n\", strerror(err));\n exit(0);\n }\n err=pthread_join(tid1, & tret);\n if(err != 0)\n {\n fprintf(stderr, \"error: join with thread 1: %s\\n\", strerror(err));\n exit(0);\n }\n printf(\"thread 1 exit code %d\\n\", (int)tret);\n err=pthread_join(tid2, & tret);\n if(err != 0)\n {\n fprintf(stderr, \"error: join with thread 2: %s\\n\", strerror(err));\n exit(0);\n }\n printf(\"thread 2 exit code %d\\n\", (int) tret);\n return 0;\n}" }, { "alpha_fraction": 0.4054833948612213, "alphanum_fraction": 0.460317462682724, "avg_line_length": 18.27777862548828, "blob_id": "4459292b08671663001bd2c562794c1a47756f3b", "content_id": "47ab7e417c3985709b78a425a5a57394551f5e51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 693, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/Algorithm/Algorithm/chapter7/quicksort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nvoid quicksort(T *a, int n);\n\nint main()\n{\n int a[]={1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 16, 7, 15, 8, 16};\n quicksort(a, 16);\n for(int i=0; i<16; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}\n\ntemplate<typename T>\nvoid quicksort(T *a, int n)\n{\n if(n<=1)\n return;\n std::swap(a[0], a[n/2]);\n int pivot=0;\n for(int i=n-1; i>pivot;)\n {\n if(a[i]<a[pivot])\n {\n std::swap(a[i], a[pivot+1]);\n std::swap(a[pivot+1], a[pivot]);\n pivot++;\n continue;\n }\n i--;\n }\n quicksort(a, pivot);\n quicksort(a+pivot+1,n-pivot-1);\n}" }, { "alpha_fraction": 0.50390625, "alphanum_fraction": 0.5390625, "avg_line_length": 17.35714340209961, "blob_id": "cebf104869d1396c5b8688b17e3bd173c75c3d41", "content_id": "f616db3cb0d415410ee586c469c80d397929850c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 256, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/Programming/C++/Effective_STL/Vector_and_String/reserve_test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\nint main()\n{\n std::vector<int> ivec;\n ivec.reserve(10);\n ivec[1]=10;\n for(int i=0; i<10 ;i++)\n std::cout<<ivec[i]<<\" \";\n std::cout<<std::endl;\n std::cout<<ivec.size()<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5051413774490356, "alphanum_fraction": 0.51606684923172, "avg_line_length": 21.882352828979492, "blob_id": "c328be4a3b0f5c69880fb46a8f1c8562476f45ef", "content_id": "f33ecc793d39346107b57597e8dc8a8264e460dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1556, "license_type": "no_license", "max_line_length": 60, "num_lines": 68, "path": "/Programming/C/Programming_in_Unix/chapter12/synchronous_signal_handling.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <pthread.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <signal.h>\n\nint quitflag;\nsigset_t mask;\n\npthread_mutex_t lock=PTHREAD_MUTEX_INITIALIZER;\npthread_cond_t wait1=PTHREAD_COND_INITIALIZER;\n\nvoid * thr_fn(void * arg)\n{\n int err, signo;\n for(;;)\n {\n err=sigwait(&mask, &signo);\n if(err!=0)\n {\n fprintf(stderr, \"sigwait failed\\n\");\n exit(0);\n }\n switch(signo)\n {\n case SIGINT:\n printf(\"\\ninterrupt\\n\");\n break;\n case SIGQUIT:\n pthread_mutex_lock(&lock);\n quitflag=1;\n pthread_mutex_unlock(&lock);\n pthread_cond_signal(&wait1);\n return (0);\n default:\n printf(\"unexpected signal %d\\n\", signo);\n exit(1);\n }\n }\n}\n\nint main()\n{\n int err;\n sigset_t oldmask;\n pthread_t tid;\n sigemptyset(&mask);\n sigaddset(&mask, SIGINT);\n sigaddset(&mask, SIGQUIT);\n if((err=pthread_sigmask(SIG_BLOCK, &mask, &oldmask))!=0)\n {\n fprintf(stderr, \"SIG_BLOCK error\\n\");\n exit(0);\n }\n err=pthread_create(&tid, NULL, thr_fn, 0);\n if(err!=0)\n {\n fprintf(stderr, \"can't create thread\\n\");\n exit(0);\n }\n pthread_mutex_lock(&lock);\n while(quitflag==0)\n pthread_cond_wait(&wait1, &lock);\n pthread_mutex_unlock(&lock);\n quitflag=0;\n if(sigprocmask(SIG_SETMASK, & oldmask, NULL)<0)\n printf(\"SIG_SETMASK error\\n\");\n exit(0);\n}\n" }, { "alpha_fraction": 0.586094856262207, "alphanum_fraction": 0.593242347240448, "avg_line_length": 23.838708877563477, "blob_id": "3a223f87a005a39133207a2a35fb6bebf0b8ba87", "content_id": "8ddcf4ddcbe648158971f5a6aab84041203442c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1539, "license_type": "no_license", "max_line_length": 93, "num_lines": 62, "path": "/Programming/C/libcap/lookupdev.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "/*\nto compile \ngcc lookupdev.c -l pcap\nLooks for an interface, and lists the netwrok ip and mask associated with that interface.\n*/\n#include<stdio.h>\n#include<stdlib.h>\n#include<pcap.h>\n#include<errno.h>\n#include<sys/socket.h>\n#include<netinet/in.h>\n#include<arpa/inet.h>\n\nint main(int argc, char ** argv)\n{\n char *dev;/*name of the device to use*/\n char *net;/* dot notation of the network address */\n char *mask;/* dot notation of the network mast */\n int ret; /*return code */\n char errbuf[PCAP_ERRBUF_SIZE];\n bpf_u_int32 netp;/*ip*/\n bpf_u_int32 maskp; /*subnet mask*/\n struct in_addr addr;\n \n /*ask pcap to find a valid device for use to sniff on*/\n dev=pcap_lookupdev(errbuf);\n /*error checkig*/\n if(dev==NULL)\n {\n printf(\"%s\\n\",errbuf);\n exit(1);\n }\n /*print out device name*/\n printf(\"%s\\n\",dev);\n /*ask pcap for the network address and mask of the device*/\n ret=pcap_lookupnet(dev,&netp,&maskp,errbuf);\n /*this means that when fail to look up the device address, this function will return -1*/\n if(ret==-1)\n {\n printf(\"%s\\n\",errbuf);\n exit(1);\n }\n /*get the network address in a human readable form*/\n addr.s_addr=netp;\n net=inet_ntoa(addr);\n \n printf(\"NET: %s\\n\",net);\n if(net==NULL)\n {\n perror(\"inet_ntoa\");\n exit(1);\n }\n addr.s_addr=maskp;\n mask=inet_ntoa(addr);\n if(mask==NULL)\n {\n perror(\"inet_ntoa\");\n exit(1);\n }\n printf(\"MASK:%s\\n\", mask);\n return 0;\n}" }, { "alpha_fraction": 0.3324175775051117, "alphanum_fraction": 0.37362638115882874, "avg_line_length": 17.69230842590332, "blob_id": "d17ad0c59ecf610ecea8d0d1d5761fb325fa3898", "content_id": "14883676e6f2b2f9078d3ecfe4e9a1d6fca0f431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 728, "license_type": "no_license", "max_line_length": 41, "num_lines": 39, "path": "/Algorithm/Algorithm/chapter2/max_multiply.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint multiply(int *a, int n);\n\nint main()\n{\n int a[]={-1, 10, -20, 101, 11, 12};\n std::cout<<multiply(a, 6)<<std::endl;\n return 0;\n}\n\nint multiply(int *a, int n)\n{\n int cnt=0,sum=1, sum2=1;\n int first=0, last=0;\n for(int i=0; i<n; i++) \n if(*(a+i)<0)\n {\n cnt++;\n if(first==last&&first==0)\n first=i;\n else\n last=i;\n }\n if(cnt%2==0)\n for(int i=0; i<n; i++)\n {\n sum*=*(a+i);\n }\n else\n for(int i=0; i<n; i++)\n {\n if(i<last)\n sum*=*(a+i);\n if(i>first)\n sum2*=*(a+i);\n }\n return sum>sum2?sum:sum2;\n}" }, { "alpha_fraction": 0.2849462330341339, "alphanum_fraction": 0.4193548262119293, "avg_line_length": 14.5, "blob_id": "79d575d1b226367a35ca78a9668d69e2db55a239", "content_id": "54fe5660921b5df3d9949c6d26907cf889f87be3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 32, "num_lines": 12, "path": "/Programming/Python/9TupleandFile/tuple.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print((1,2)+(3,4));\nprint((1,2)*4);\nt=(1,2,3,4);\nprint(t[0], t[1:3]);\nx=(40);\nprint(x);\nx=(40,);\nprint(x);\nprint('**********************');\nt=(1,2,3,5,6);\nl=[x+20 for x in t];\nprint(l);\n" }, { "alpha_fraction": 0.6042154431343079, "alphanum_fraction": 0.6088992953300476, "avg_line_length": 16.1200008392334, "blob_id": "dc2e7a518b3098095ae447ba2c223b1d30af5b43", "content_id": "b3f13d8e9ea2ebe9db33152c0b0e1d17117eceea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 427, "license_type": "no_license", "max_line_length": 50, "num_lines": 25, "path": "/Project/Client/friends.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <string>\n\nstruct f_info\n{\n f_info():flag(0){};\n std::string account;\n std::string ip;\n int flag;\n};\n\nclass Friends\n{\npublic:\n Friends():ava(0){}\n void update_vec(std::vector<f_info>);\n int search_vec(const std::string &, f_info *);\n void list_all();\n std::vector<f_info> retvec(){return fvec;}\nprivate:\n std::vector<f_info> fvec;\n int ava;\n};\n\nstatic Friends friends;" }, { "alpha_fraction": 0.42257216572761536, "alphanum_fraction": 0.44619423151016235, "avg_line_length": 17.16666603088379, "blob_id": "cec4db055ca438e85ec253b40437b63c9471e391", "content_id": "ea2f27d1185310146107524c09c511cae207a211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 762, "license_type": "no_license", "max_line_length": 45, "num_lines": 42, "path": "/Programming/C/The C programming Language/chapter5/str_n_series.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nvoid strcpy_n(char *s, char *t, int n);\nvoid strcat_n(char *s, char *t, int n);\nint strcmp_n(char *s, char *t, int n);\n\nint main()\n{\n char s[32]=\"ZHANG QIU CHEN\";\n char *t=\"CHEN\";\n char str[32];\n printf(\"%d\\n\",strcmp_n(s, t, 3));\n strcat_n(s, t, 3);\n printf(\"%s\\n\", s);\n strcpy_n(str,s,5);\n printf(\"%s\\n\", str);\n return 0;\n}\n\nvoid strcat_n(char *s, char *t, int n)\n{\n while(*s!='\\0')\n s++;\n for(int i=0;i<n&&(*s++=*(t+i))!='\\0';i++)\n ;\n}\n\nvoid strcpy_n(char *s, char *t, int n)\n{\n for(int i=0;i<n&&(*s++=*(t+i))!='\\0';i++)\n ;\n *s='\\0';\n}\n\nint strcmp_n(char *s, char *t, int n)\n{\n int i=0;\n for(i=0;i<n&&*(s+i)==*(t+i);i++)\n if(*s=='\\0')\n return 0;\n return *s-*t;\n}" }, { "alpha_fraction": 0.6724891066551208, "alphanum_fraction": 0.6899563074111938, "avg_line_length": 11.722222328186035, "blob_id": "3dfdc73ccea0bf1b1cc0e53396ef831b7ea8ca32", "content_id": "0869a6dae6206895efc6443263b1f81c90bae142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 229, "license_type": "no_license", "max_line_length": 29, "num_lines": 18, "path": "/Programming/Python/15Doc/doc.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "\"\"\"\nModule documentation\nWords Go Here\n\"\"\"\n\nspam=40\ndef square(x):\n\t\"\"\"\n\tfunction documentation\n\tcan we have your liver then?\n\t\"\"\"\n\treturn x**2\n\nclass Employee:\n\t\"class documentation\"\n\tpass\nprint(square(6))\nprint(square.__doc__)\n" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.5370370149612427, "avg_line_length": 15.199999809265137, "blob_id": "67300a8e16f10c9f3a86ba9c46809a963a8427dc", "content_id": "e1fb570128f3aae639dfa0ce2031323ccd73c025", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 162, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/Programming/Python/4ObjectType/tuple.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "T=(1,2,3,6);\nprint(len(T));\nT+=(5,6);\nprint(T);\nprint(T.index(5));\nprint(T.count(6));\nT1=('spam', 3.0, [11,22,33]);\nprint(T1[1]);\nprint(T1[2][1]);\n#T1.append(6);\n" }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.625, "avg_line_length": 14.0625, "blob_id": "f5ae738c862b5e19675ffca0c35d363296626a99", "content_id": "ea6b9ac6d8135ff5c8352158cbc2b0956766f724", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 240, "license_type": "no_license", "max_line_length": 46, "num_lines": 16, "path": "/Programming/C++/Effective_C++/chapter7/template_recursion.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <unsigned n> \nstruct Factorial{\n enum { value=n*Factorial<n-1>::value};\n};\ntemplate <>\nstruct Factorial<0>{\n enum {value =1};\n};\n\nint main()\n{\n std::cout<<Factorial<5>::value<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5244444608688354, "alphanum_fraction": 0.5377777814865112, "avg_line_length": 15.142857551574707, "blob_id": "fd5554f88a81fdc5ee77ec906134a3827fe6aeb7", "content_id": "796ebed8acd33a9be0fc014927e142777f1c1548", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 225, "license_type": "no_license", "max_line_length": 42, "num_lines": 14, "path": "/Programming/C/Programming_in_Unix/chapter4/chdir.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n\nint main()\n{\n if(chdir(\"/tmp\")<0)\n {\n fprintf(stderr, \"error: chdir\\n\");\n exit(0);\n }\n printf(\"chdir to /tmp succeeded\\n\");\n return 0;\n}" }, { "alpha_fraction": 0.5310457348823547, "alphanum_fraction": 0.5539215803146362, "avg_line_length": 15.567567825317383, "blob_id": "7257d8a254794577a3ee1b820348d23f8f170f53", "content_id": "026d37bac66ab4e25ce2e832c4030e5e3cae4c28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 612, "license_type": "no_license", "max_line_length": 53, "num_lines": 37, "path": "/Programming/C/Programming_in_Unix/chapter7/exit_handler.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nvoid my_exit1(void);\nvoid my_exit2(void);\n\nint main()\n{\n if(atexit(my_exit2)!=0)\n {\n fprintf(stderr, \"error: register, atexit\\n\");\n return 0;\n }\n if(atexit(my_exit1)!=0)\n {\n fprintf(stderr, \"error: register, atexit\\n\");\n return 0;\n }\n if(atexit(my_exit1)!=0)\n {\n fprintf(stderr, \"error: register, atexit\\n\");\n return 0;\n }\n printf(\"main is done\\n\");\n return 0;\n}\n\nvoid my_exit1()\n{\n printf(\"first exit handler\\n\");\n}\n\nvoid my_exit2()\n{\n printf(\"second exit handler\\n\");\n}" }, { "alpha_fraction": 0.40700218081474304, "alphanum_fraction": 0.44638949632644653, "avg_line_length": 15.357142448425293, "blob_id": "1004eb5d0bdeea18ba55fe68508ed8a43080d03e", "content_id": "3624815330d46fe3086779c137e7025c83f47bd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 457, "license_type": "no_license", "max_line_length": 44, "num_lines": 28, "path": "/Algorithm/Algorithm/chapter2/max_subseq_sum_n2.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nT max_sub_sum(T *a, int n);\n\nint main()\n{\n int a[]={-1, 10, -20, 101, 11, 12};\n std::cout<<max_sub_sum(a, 6)<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nT max_sub_sum(T * a, int n)\n{\n T sum=0, temp=0;\n for(int i=0;i<n;i++)\n {\n temp=0;\n for(int j=i; j<n; j++)\n {\n temp+=*(a+j);\n if(temp>sum)\n sum=temp;\n }\n }\n return sum;\n}" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7463768124580383, "avg_line_length": 18.714284896850586, "blob_id": "23505f6add55d396af8a7fe9d4ffebd834cda7ba", "content_id": "c6ac959dc00cf78aa4fe117624c337cf35b5ac40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 138, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/Programming/Lua/3Statements/tolerance.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "local tolerance = 10\nfunction isturnback(angle)\n\tangle = angle % 360\n\treturn (math.abs(angle-180)<tolerance)\nend\n\nprint(isturnback(-180))\n" }, { "alpha_fraction": 0.48181816935539246, "alphanum_fraction": 0.4909090995788574, "avg_line_length": 12.791666984558105, "blob_id": "e8a7eefe1b1a24e7f17e7608ae49cd4de50797db", "content_id": "f3525fabc526d8300f8aba543582654e10a97fdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 330, "license_type": "no_license", "max_line_length": 23, "num_lines": 24, "path": "/Programming/C/The C programming Language/chapter4/rever_recur.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n\nvoid reverse(char s[]);\nint main()\n{\n char s[]=\"ZHANG QIU CHEN\";\n reverse(s);\n printf(\"%s\\n\", s);\n return 0;\n}\n\nvoid reverse(char s[])\n{\n static int i=0;\n int j=strlen(s)-1;\n char temp;\n temp=s[i];\n s[i]=s[j-i];\n s[j-i]=temp;\n i++;\n if(i<j-i)\n reverse(s);\n}" }, { "alpha_fraction": 0.5575221180915833, "alphanum_fraction": 0.5575221180915833, "avg_line_length": 7.769230842590332, "blob_id": "f63af48f2591993a11d040ea8a29ede9cc035aed", "content_id": "34e55b22b8293c441955939ed709ca2fff5839c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 113, "license_type": "no_license", "max_line_length": 31, "num_lines": 13, "path": "/Programming/C/Programming_in_Unix/chapter15/test_static/main.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"main.h\"\n#include <stdio.h>\n\nvoid func()\n{\n printf(\"func from main\\n\");\n}\n\nint\nmain()\n{\n func();\n}" }, { "alpha_fraction": 0.4883720874786377, "alphanum_fraction": 0.4883720874786377, "avg_line_length": 7.800000190734863, "blob_id": "0d5a2623b84be2357f6db5fb26b2d3511a27b5bc", "content_id": "241f1adad85a76870148cc674eff09bb4f36adbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 43, "license_type": "no_license", "max_line_length": 16, "num_lines": 5, "path": "/Programming/C++/More_Effective_C++/chapter6/test.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "void a(){};\nvoid a(int b){};\nint main()\n{\n}" }, { "alpha_fraction": 0.6629213690757751, "alphanum_fraction": 0.6629213690757751, "avg_line_length": 11.714285850524902, "blob_id": "08332330e5fa7dc38323ed031e14174f8f82888c", "content_id": "3afb60691af9e179c9e50811d7e2a90748c540d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/Programming/Python/Class/third.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class test:\n\tpass\ntest.name=\"test\"\na=test()\nb=test()\na.name=\"base\"\nprint(a.name, b.name)\n" }, { "alpha_fraction": 0.5774877667427063, "alphanum_fraction": 0.5872756838798523, "avg_line_length": 20.928571701049805, "blob_id": "03427bd69b4477fb2163ed842e640bd51fb2ab28", "content_id": "99d0a4b415788af493624e20ecd41602da8e8c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 613, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/Programming/C/Programming_in_Unix/chapter17/bind_usock.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <sys/un.h>\n#include <stddef.h>\n#include <string.h>\n#include <stdlib.h>\n#include <stdio.h>\n\nint\nmain()\n{\n int fd, size;\n struct sockaddr_un un;\n un.sun_family=AF_UNIX;\n strcpy(un.sun_path, \"foo.socket\");\n if((fd=socket(AF_UNIX, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"socket failed\\n\");\n exit(0);\n }\n size=offsetof(struct sockaddr_un, sun_path)+strlen(un.sun_path);\n if(bind(fd, (struct sockaddr *)&un, size)<0)\n {\n fprintf(stderr, \"bind failed\\n\");\n exit(0);\n }\n printf(\"UNIX domain socket bound\\n\");\n exit(0);\n}" }, { "alpha_fraction": 0.4246162176132202, "alphanum_fraction": 0.445449560880661, "avg_line_length": 18.3068790435791, "blob_id": "0949f4f7ec01f4fe841fa80df8a93bbb45438973", "content_id": "5670e3f872eea57ae82fe0f6a39cf9172f07ce5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3648, "license_type": "no_license", "max_line_length": 67, "num_lines": 189, "path": "/Algorithm/Algorithm/chapter6/priority_queue_heap.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nclass Priority{\npublic:\n Priority(int max=10):sz(0), max_size(max), arr(new T[max]){}\n Priority(T *a, int n);\n Priority(Priority &);\n Priority & operator=(Priority);\n void swap(Priority &);\n void insert(T x);\n int size(){return size;}\n T delete_min();\n void print();\n ~Priority(){delete []arr;}\nprivate:\n void build_heap();\n void sift_down(int index);\n int sz;\n int max_size;\n T * arr;\n};\n\ntemplate <typename T>\nvoid Priority<T>::print()\n{\n for(int i=0; i<sz; i++)\n std::cout<<*(arr+i)<<\" \";\n std::cout<<std::endl;\n}\n\ntemplate <typename T>\nvoid Priority<T>::swap(Priority & p)\n{\n using std::swap;\n swap(sz, p.sz);\n swap(max_size, p.max_size);\n swap(arr, p.arr);\n}\n\ntemplate <typename T>\nPriority<T>::Priority(T *a, int n):sz(n), max_size(n),arr(new T[n])\n{\n for(int i=0; i<n; i++)\n *(arr+i)=*(a+i);\n build_heap();\n}\n\ntemplate <typename T>\nvoid Priority<T>::sift_down(int index)\n{\n while(index*2<=sz)\n {\n if(index*2==sz)\n {\n if(arr[index-1]<=arr[sz-1])\n return;\n std::swap(arr[index-1], arr[sz-1]);\n return;\n }\n if(arr[index*2]>arr[index*2-1])\n {\n if(arr[index-1]>arr[index*2-1])\n {\n std::swap(arr[index-1],arr[index*2-1]);\n index*=2;\n continue;\n }\n break;\n }\n else \n {\n if(arr[index*2]<arr[index-1])\n {\n std::swap(arr[index-1],arr[index*2]);\n index=index*2+1;\n continue;\n }\n break;\n }\n }\n}\n\ntemplate <typename T>\nvoid Priority<T>::build_heap()\n{\n for(int i=sz/2; i>0;i--)\n sift_down(i);\n \n}\n\ntemplate <typename T>\nPriority<T>::Priority(Priority & p)\n{\n if(arr==p.arr)\n return;\n arr=new T[p.sz];\n for(int i=0; i<p.sz; i++)\n *(arr+i)=*(p.arr+i);\n sz=p.sz;\n max_size=p.max_size;\n}\n\ntemplate <typename T>\nPriority<T> & Priority<T>::operator=(Priority p)\n{\n swap(p);\n return *this;\n}\n\ntemplate <typename T>\nvoid Priority<T>::insert(T x)\n{\n if(sz>=max_size)\n {\n std::cout<<\"error: queue is full\"<<std::endl;\n return;\n }\n if(sz==0)\n {\n *arr=x;\n sz++;\n return;\n }\n int i=sz++;\n while(i!=0&&*(arr+i/2)>x)\n {\n *(arr+i)=*(arr+i/2);\n i/=2;\n }\n *(arr+i)=x;\n}\n\ntemplate <typename T>\nT Priority<T>::delete_min()\n{\n if(sz==0)\n {\n std::cout<<\"empty queue\"<<std::endl;\n return -1;\n }\n int index=1, i=--sz;\n int temp=*(arr+sz), ret=*arr;\n while(index<=(sz+1)/2)\n {\n if(index*2==sz+1)\n {\n *(arr+index-1)=*(arr+sz);\n return ret;\n }\n if(*(arr+2*index-1)<*(arr+2*index))\n {\n if(temp<*(arr+2*index-1))\n {\n *(arr+index-1)=temp;\n return ret;\n }\n *(arr+index-1)=*(arr+2*index-1);\n index=2*index;\n }\n else\n {\n if(temp<*(arr+2*index))\n {\n *(arr+index-1)=temp;\n return ret;\n }\n *(arr+index-1)=*(arr+2*index);\n index=2*index+1;\n }\n }\n *(arr+index-1)=temp;\n return ret;\n}\n\nint main()\n{\n int a[]={0,9,8,7,6,5,4,3,2,1};\n Priority<int> p(a,10);\n for(int i=0; i<12; i++)\n {\n //p.delete_min();\n std::cout<<p.delete_min()<<\" \";\n //p.print();\n }\n std::cout<<std::endl;\n \n return 0;\n}" }, { "alpha_fraction": 0.6607773900032043, "alphanum_fraction": 0.6607773900032043, "avg_line_length": 16.6875, "blob_id": "f69cf5df1407cbb0c8b6eb3b08ab5dce69076b0d", "content_id": "f400cbee56df3b7e3574f59403c6c64a22897d7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 283, "license_type": "no_license", "max_line_length": 42, "num_lines": 16, "path": "/Programming/JAVA/Thinking in JAVA/chapter10/abstract/Abstract.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "abstract class Base{\n\tabstract void f();\n}\n\nclass Derived extends Base{\n\tvoid f(){System.out.println(\"extended\");}\n}\n\npublic class Abstract{\n\tstatic void func(Base s){s.f();}\n\tpublic static void main(String [] args){\n\t\t//Base b=new Base();\n\t\tDerived d=new Derived();\n\t\tfunc(d);\n\t}\n}\n" }, { "alpha_fraction": 0.47089946269989014, "alphanum_fraction": 0.48148149251937866, "avg_line_length": 8.047618865966797, "blob_id": "91d449f0c05a3f82b007dce394fbe2f70fe38207", "content_id": "0b7f940966eab071ef208c89b8a2a0fdd0dd553a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 189, "license_type": "no_license", "max_line_length": 24, "num_lines": 21, "path": "/Programming/Practice/Interpretation/1.32_fcallf.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntypedef int (*iif)(int);\nint f(iif fi) \n{\n return fi(2);\n \n}\n\nint fun(int i)\n{\n return i;\n}\n\nint main()\n{\n int (*g) (iif);\n g=f;\n f(g);\n return 0;\n}" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.625, "avg_line_length": 19, "blob_id": "533ce6538f1b30411c0e9764d4d290b4ddf3e4e1", "content_id": "780551614b13df16f197fca7ab41f575c133ef54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/Programming/Python/19HighLevelFunction/map.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def inc(x):\n\treturn x + 10\ncounters = [6, 6, 6]\nprint(list(map(inc, counters)))\n" }, { "alpha_fraction": 0.47413063049316406, "alphanum_fraction": 0.504664957523346, "avg_line_length": 19.6842098236084, "blob_id": "6ce6f161bddcc934a882b798fca370fe9ab8a94b", "content_id": "2a52c9aa9ee893342d55747b7933e3a5647e94bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 68, "num_lines": 57, "path": "/Algorithm/Algorithm/chapter7/mergesort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nvoid mergesort(T *a, int n);\n\nint main()\n{\n int a[]={1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16};\n mergesort(a, 16);\n for(int i=0; i<16; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n // std::cout<<what<<std::endl;\n return 0;\n}\n\ntemplate<typename T>\nvoid merge(T *a, T *tar, int left, int center, int right)\n{\n int left_end=center-1;\n int tmp=left, ls=left, rs=center;\n while(ls<=left_end&&rs<=right)\n {\n if(a[ls]<a[rs])\n tar[tmp++]=a[ls++];\n else\n tar[tmp++]=a[rs++];\n }\n while(rs<=right)\n tar[tmp++]=a[rs++];\n while(ls<=left_end)\n tar[tmp++]=a[ls++];\n for(;left<=right; left++)\n a[left]=tar[left];\n}\n\ntemplate<typename T>\nvoid m_sort(T *a, T *tar, int left, int right)\n{\n if(left<right)\n {\n int center=(left+right)/2;\n m_sort(a, tar, left, center);\n m_sort(a, tar, center+1, right);\n merge(a, tar, left, center+1, right);\n }\n}\n\ntemplate<typename T>\nvoid mergesort(T *a, int n)\n{\n if(n<=1)\n return;\n T *tar=new T[n];\n m_sort(a, tar, 0, n-1);\n delete [] tar;\n}\n" }, { "alpha_fraction": 0.5178571343421936, "alphanum_fraction": 0.5892857313156128, "avg_line_length": 6.714285850524902, "blob_id": "1d2347fdf3a3600779f24e556616bb9a547d2936", "content_id": "db6d69a5ca615f8f00a064c82e7973d30f34afa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 11, "num_lines": 7, "path": "/Programming/Python/13Loop/while_add.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a=0\nb=10\nwhile a<b:\n\tprint(a)\n\ta+=1\nwhile True:\n\tpass\n\t\n" }, { "alpha_fraction": 0.38235294818878174, "alphanum_fraction": 0.5661764740943909, "avg_line_length": 12.600000381469727, "blob_id": "0b24edc8154d5ea429bb2df6f3b4e09a44765987", "content_id": "d3e13c3b6ec2a88519f0916e27a1e40105b9651f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 16, "num_lines": 10, "path": "/Programming/Python/6DynamicType/copy.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "l1=[1,2,3,6];\nl2=l1[:];\nprint(l1, l2);\n#l1[0]=16;\nprint(l1, l2);\nprint(l1 is l2);\nprint(l1==l2);\nl2=l1;\nprint(l1 is l2);\nprint(l1==l2);\n" }, { "alpha_fraction": 0.5781062245368958, "alphanum_fraction": 0.6113625764846802, "avg_line_length": 22.130342483520508, "blob_id": "e8e53e9c441eab5b113b81e4768a3f21a881da21", "content_id": "6c50ac642bbd188b517a365d4f1bcd25ac974f79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 10825, "license_type": "no_license", "max_line_length": 148, "num_lines": 468, "path": "/HomeWork/Lua/json.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "json = {}\nfunction Marshal(json_str)\n\tm, n = MarshalWorker(json_str, 1)\n\tif m == nil then\n\t\treturn nil, 'error_type'\n\telse \n\t\treturn m\n\tend\nend\n\nfunction Unmarshal(lua_val)\n\treturn UnmarshalWorker(lua_val)\nend\n\nfunction test(lua_val)\n\treturn nil\nend\n\njson.Marshal = Marshal\njson.Unmarshal = Unmarshal\n\n\nlocal encode = function (s)\n\tlocal i = 1\n\tlocal ret = {}\n\twhile i < string.len(s)+1 do\n\t\tlocal curchar = string.sub(s, i, i)\n\t\tif curchar == '\\\\' then\n\t\t\ttable.insert(ret, '\\\\\\\\')\n\t\telseif curchar == '\"' then\n\t\t\ttable.insert(ret, '\\\\\"')\n\t\telseif curchar == '/' then\n\t\t\ttable.insert(ret, '\\\\/')\n\t\telseif curchar == '\\b' then\n\t\t\ttable.insert(ret, '\\\\b')\n\t\telseif curchar == '\\n' then\n\t\t\ttable.insert(ret, '\\\\n')\n\t\telseif curchar == '\\r' then\n\t\t\ttable.insert(ret, '\\\\r')\n\t\telseif curchar == '\\t' then\n\t\t\ttable.insert(ret, '\\\\t')\n\t\telseif curchar == '\\f' then \n\t\t\ttable.insert(ret, '\\\\f')\n\t\telse\n\t\t\ttable.insert(ret, curchar)\n\t\tend\n\t\ti = i+1\n\tend\n\ts = table.concat(ret)\n\ts = encodeUTF8(s)\n\treturn s\nend\n\nlocal utf_8 = function(uc)\n\tlocal a, b, c, d = string.byte(uc, 1, 4)\n\ta, b, c, d = a or 0, b or 0, c or 0, d or 0\n\tlocal val\n\tif a <= 0x7f then\n\t\tval = a\n\telseif 0xc0 <= a and a<= 0xdf and b>=0x80 then\n\t\tval = (a - 0xc0) * 0x40 + b - 0x80\n\telseif 0xe0 <= a and a <= 0xef and b >= 0x80 and c >= 0x80 then\n\t\tval = ((a - 0xe0) * 0x40 + b - 0x80)*0x40 + c -0x80\n\telseif 0xf0 <= a and a <= 0xf7 and b > 0x80 and c >= 0x80 and d >=0x80 then\n\t\tval = (((a - 0xf0) * 0x40 + b - 0x80)*0x40 + c - 0x80)*0x40 + d - 0x80\n\telse\n\t\treturn \"\"\n\tend\n\tif val < 0xffff then\n\t\treturn string.format(\"\\\\u%.4x\", val)\n\telseif val <= 0x10ffff then\n\t\tval = val - 0x10000\n\t\tlocal high, low = 0xD800 + math.floor(val/0x400), 0xDC00 + ( val % 0x400)\n\t\treturn string.format(\"\\\\u%.4x\\\\u$.4x\", high, low)\n\telse\n\t\treturn \"\"\n\tend\nend\n\nlocal substitute = function(str, pat, rep)\n\tif string.find(str, pat) then\n\t\treturn string.gsub(str, pat, rep)\n\telse\n\t\treturn str\n\tend\nend\n\nfunction encodeUTF8(ustr)\n--\tustr = substitute(ustr, \"[\\1-\\31\\127]\", utf_8)\n\tif string.find(ustr, \"[\\128-\\255]\") then\n\t\tustr = substitute(ustr, \"[\\192-\\223][\\128-\\255]\", utf_8)\n\t\tustr = substitute(ustr, \"[\\224-\\239][\\128-\\255][\\128-\\255]\", utf_8)\n\t\tustr = substitute(ustr, \"[\\240-\\255][\\128-\\255][\\128-\\255][\\128-\\255]\", utf_8)\n\tend\n\treturn ustr\nend\n\nlocal escape = function(s)\n\tlocal i = 1\n\tlocal ret = {}\n\tlocal n = string.len(s)+1\n\twhile i < n do\n\t\tlocal cur = string.sub(s, i, i)\n\t\tif cur == '\\\\' then\n\t\t\tlocal curchar = string.sub(s, i+1, i+1)\n\t\t\tif curchar == '\"' then\n\t\t\t\ttable.insert(ret, '\"')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == '/' then\n\t\t\t\ttable.insert(ret, '/')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == 'b' then\n\t\t\t\ttable.insert(ret, '\\b')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == 'n' then\n\t\t\t\ttable.insert(ret, '\\n')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == 'r' then\n\t\t\t\ttable.insert(ret, '\\r')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == 't' then\n\t\t\t\ttable.insert(ret, '\\t')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == 'f' then\n\t\t\t\ttable.insert(ret, '\\f')\n\t\t\t\ti = i+1\n\t\t\telseif curchar == '\\\\' then\n\t\t\t\ttable.insert(ret, '\\\\')\n\t\t\t\ti = i+1\n\t\t\telse \n\t\t\t\ttable.insert(ret, cur)\n\t\t\t\ttable.insert(ret, curchar)\n\t\t\t\ti = i+1\n\t\t\tend\n\t\telse \n\t\t\ttable.insert(ret, cur)\n\t\tend\n\t\ti = i + 1\n\tend\n\ts = table.concat(ret)\n\treturn s\nend\n\nlocal decode = function(s)\n\t--[==[s = string.gsub(s, '\\\\\"', '\"')\n\ts = string.gsub(s, '\\\\/', '/')\n\ts = string.gsub(s, '\\\\b', '\\b')\n\ts = string.gsub(s, '\\\\n', '\\n')\n\ts = string.gsub(s, '\\\\r', '\\r')\n\ts = string.gsub(s, '\\\\t', '\\t')\n\ts = string.gsub(s, '\\\\f', '\\f')\n\ts = string.gsub(s, '\\\\\\\\', '\\\\')\n\t]==]\n\ts = escape(s)\n\tlocal i = 1, 1\n\tlocal tmp = ''\n\twhile i < string.len(s)+1 do\n\t\ti = string.find(s, '\\\\u', i) \n\t\tif i then\n\t\t\ttmp = string.sub(s, 1, i-1)\n\t\t\tlocal uni = string.sub(s, i+2, i+5)\n\t\t\tlocal n = tonumber(uni, 16)\n\t\t\tlocal x\n\t\t\tif n < 0x80 then\n\t\t\t\tx = string.char(n % 0x80)\n\t\t\telseif n<0x800 then\n\t\t\t\tx = string.char(0xC0 + math.floor(n/0x40), 0x80+(math.floor(n)%0x40))\n\t\t\telseif n<=0xffff then\n\t\t\t\tx = string.char(0xE0 + math.floor(n/0x1000), 0x80 + (math.floor(n/0x40)%0x40), 0x80 + (math.floor(n)%0x40))\n\t\t\telseif n<= 0x10ffff then\n\t\t\t\tx = string.char(0xF0 + math.floor(n/0x40000), 0x80 + (math.floor(n/0x1000)%0x40), 0x80 + (math.floor(n/0x40)%0x40), 0x80 + (math.floor(n)%0x40))\n\t\t\tend\n\t\t\ttmp = tmp ..x\n\t\t\ttmp = tmp ..string.sub(s, i+6)\n\t\t\ts = tmp\n\t\telse\n\t\t\ti = string.len(s)+1\n\t\tend\n\tend\n\treturn s\nend\n\nfunction MarshalWorker(json_str, pos)\n\tlocal p = removeWhiteSpace(json_str, pos)\n\tlocal startChar = string.sub(json_str, p, p) \n\tif startChar == '{' then \n\t\treturn MarshalObject(json_str, p + 1)\n\tend\n\n\tif startChar == '[' then\n\t\treturn MarshalArray(json_str, p + 1)\n\tend\n\n\tlocal comment = string.sub(json_str, p, p+1)\n\tif comment == '/*' then\n\t\tlocal i = string.find(json_str, '*/')\n\t\treturn MarshalWorker(json_str, i+1)\n\tend\n\n\tif comment == '//' then\n\t\ti = string.find(json_str, '\\n')\n\t\treturn MarshalWorker(json_str, i+1)\n\tend\n\treturn nil, 'error_type'\nend\n\nfunction removeWhiteSpace(json_str, pos)\n\tlocal p = pos\n\tlocal n = string.len(json_str)\n\twhile string.find(' \\t\\n\\f\\b', string.sub(json_str, p, p), nil, true) and p <=n do\n\t\tp = p + 1\n\tend\n\tif p<=n then\n\t\treturn p\n\telse\n\t\treturn nil\n\tend\nend\n\nfunction MarshalObject(json_str, pos)\n\tlocal ret = {}\n\tlocal n = string.len(json_str)\n\twhile pos <= n do\n\t\tpos = removeWhiteSpace(json_str, pos)\n\t\tif pos == nil then\n\t\t\treturn nil, 'error_type'\n\t\tend\n\t\tlocal startChar = string.sub(json_str, pos, pos)\n\t\tif startChar == '}' then\n\t\t\treturn ret, pos+1\n\t\tend\n\t\tif startChar == ',' then\n\t\t\tpos =removeWhiteSpace(json_str, pos + 1)\n\t\t\tif pos == nil or type(pos) ~= 'number' then\n\t\t\t\treturn nil, 'error_type'\n\t\t\tend\n\t\t\tstartChar = string.sub(json_str, pos, pos)\n\t\tend\n\t\tlocal key = \"\"\n\t\tlocal val = \"\"\n\t\tif startChar == '\"' or startChar == \"'\" then\n\t\t\tkey, pos = collectKey(json_str, pos+1, startChar)\n\t\t\tif key == nil then\n\t\t\t\treturn nil, 'error_type'\n\t\t\tend\n\t\t\tkey = decode(key)\n\t\telse\n\t\t\treturn nil, 'error_type'\n\t\tend\n\t\tpos = removeWhiteSpace(json_str, pos)\n\t\tif pos == nil then\n\t\t\treturn nil, 'error_type'\n\t\tend\n\t\tstartChar = string.sub(json_str, pos, pos)\n\t\tif startChar == '{' then\n\t\t\tval, pos = MarshalObject(json_str, pos+1)\n\t\telseif startChar == '[' then\n\t\t\tval, pos = MarshalArray(json_str, pos+1)\n\t\t\tif val == nil then\n\t\t\t\treturn nil, 'error_type'\n\t\t\tend\n\t\telseif startChar == '\"' or startChar == \"'\" then\n\t\t\tval, pos = collectVal(json_str, pos+1, startChar)\n\t\t\tval = decode(val)\n\t\telseif string.find('+-1234567890.eE', startChar) then\n\t\t\tval, pos = collectNum(json_str, pos)\n\t\t\tval = val * 1\n\t\telse\n\t\t\tval, pos = constant(json_str, pos)\n\t\tend\n\t\tif pos == nil or type(pos)~= 'number' then\n\t\t\treturn nil, 'error_type'\n\t\tend\n\t\tret[key] = val\n\tend\n\treturn nil, 'further implementation'\nend\n\nfunction MarshalArray(json_str, pos)\n\tlocal ret = {}\n\tlocal i = 1\n\tlocal index = 1\n\tlocal n = string.len(json_str)\n\tlocal p = pos\n\twhile p <= n do\n\t\tval = \"\"\n\t\tp = removeWhiteSpace(json_str, p)\n\t\tif p == nil then\n\t\t\treturn nil, 'error_type'\n\t\tend\n\t\tstartChar = string.sub(json_str, p, p)\n\t\tif startChar == '\"' or startChar == \"'\" then\n\t\t\tval, p = collectVal(json_str, p+1, startChar)\n\t\t\tval = decode(val)\n\t\telseif startChar == '{' then\n\t\t\tval, p = MarshalObject(json_str, p+1)\n\t\telseif startChar == '[' then\n\t\t\tval, p = MarshalArray(json_str, p+1)\n\t\telseif string.find('+-1234567890.eE', startChar) then\n\t\t\tval, p = collectNum(json_str, p)\n\t\t\tval = tonumber(val)\n\t\t\tif val == nil then\n\t\t\t\treturn nil, 'error_type'\n\t\t\tend\n\t\telseif startChar == ']' then\n\t\t\treturn ret, p+1\n\t\telseif startChar == ',' then\n\t\t\tp = p + 1\n\t\telse \n\t\t\tval, p = constant(json_str, p)\n\t\tend\n\t\tif p == nil then\n\t\t\treturn nil, 'error_type'\n\t\tend\n\t\tif startChar ~= ',' then \n\t\t\tret [index] = val\n\t\t\tindex = index + 1\n\t\tend\n\t\tif type(p) ~='number' then\n\t\t\treturn nil, 'error_type'\n\t\tend\n\tend\nend\n\nfunction nextQuote(json_str, pos, startChar)\n\tlocal stop = pos\n\tlocal n = string.len(json_str)+1\n\twhile stop < n do\n\t\tif string.sub(json_str, stop, stop) == startChar then\n\t\t\treturn stop\n\t\tend\n\t\tif string.sub(json_str, stop, stop) == '\\\\' then\n\t\t\tstop = stop + 1\n\t\tend\n\t\tstop = stop + 1\n\tend\n\treturn stop\nend\n\nfunction collectKey(json_str, pos, startChar)\n\tlocal stop = nextQuote(json_str, pos, startChar)\n\tlocal i = string.find(json_str, ':', stop+1)\n\tif i == nil then\n\t\treturn nil\n\tend\n\treturn string.sub(json_str, pos, stop-1), i+1\nend\n\nfunction collectVal(json_str, pos, startChar)\n\tlocal stop = nextQuote(json_str, pos, startChar)\n\treturn string.sub(json_str, pos, stop-1), stop+1\nend\n\nfunction collectNum(json_str, pos)\n\tlocal i = pos\n\twhile string.find('+-1234567890.eE', string.sub(json_str, i, i)) do\n\t\ti = i + 1\n\tend\n\treturn string.sub(json_str, pos, i-1), i\nend\n\nfunction constant(json_str, pos)\n\tconsts = {[\"true\"]=true, [\"false\"]=false, [\"null\"]='tmp'}\n\tfor i, k in pairs(consts) do\n\t\tif string.sub(json_str, pos, pos + string.len(i)-1) == i then\n\t\t\tif i == \"null\" then\n\t\t\t\treturn nil, string.len('null')+pos\n\t\t\tend\n\t\t\treturn k, string.len(i) + pos\n\t\tend\n\tend\n\treturn nil, nil\nend\n\nfunction UnmarshalWorker(lua_val)\n\tif lua_val == nil then\n\t\treturn nil, 'error_type'\n\tend\n\tif next(lua_val) == nil then\n\t\treturn \"{}\"\n\tend\n\tlocal flag, n = ArrayorDict(lua_val)\n\tif flag then\n\t\treturn UnmarshalArray(lua_val, n)\n\telse\n\t\treturn UnmarshalObject(lua_val)\n\tend\nend\n\nfunction UnmarshalArray(lua_val, n)\n\tlocal ret = {}\n\ttable.insert(ret, \"[\")\n\tlocal i = 1\n\twhile i < n+1 do\n\t\tif lua_val[i] == nil then\n\t\t\ttable.insert(ret, \"null\")\n\t\t\ttable.insert(ret, ',')\n\t\telse\n\t\t\ttable.insert(ret, uniform(lua_val[i]))\n\t\t\ttable.insert(ret, \",\")\n\t\tend\n\t\ti = i+1\n\tend\n\tret = table.concat(ret)\n\tret = string.sub(ret, 1, string.len(ret)-1)\n\tret = ret ..\"]\"\n\treturn ret\nend\n\nfunction UnmarshalObject(lua_val)\n\tlocal ret = {}\n\ttable.insert(ret, '{')\n\tfor k, v in pairs(lua_val) do\n\t\tif type(k) == 'number' then\n\t\t\ttable.insert(ret, '\"')\n\t\t\ttable.insert(ret, tonumber(k))\n\t\t\ttable.insert(ret, '\"')\n\t\telse\n\t\t\ttable.insert(ret, uniform(k))\n\t\tend\n\t\ttable.insert(ret, ':')\n\t\ttable.insert(ret, uniform(v))\n\t\ttable.insert(ret, \",\")\n\tend\n\tret = table.concat(ret)\n\tret = string.sub(ret, 1, string.len(ret)-1)\n\tret = ret ..\"}\"\n\treturn ret\nend\n\nfunction ArrayorDict(lua_val)\n\tlocal max=0\n\tlocal flag = true\n\tfor k, v in pairs(lua_val) do\n\t\tif type(k) =='number' and math.floor(k) == k and k>=1 then\n\t\t\tmax = math.max(k, max)\n\t\telse \n\t\t\treturn false\n\t\tend\n\tend\n\treturn true, max\nend\n\nfunction uniform(k)\n\tif k == nil then\n\t\treturn 'null'\n\telseif k == true then\n\t\treturn 'true'\n\telseif k == false then\n\t\treturn 'false'\n\telseif type(k) == 'string' then\n\t\treturn '\"' ..encode(k) ..'\"'\n\telseif type(k)== 'number' then\n\t\treturn k\n\telseif type(k) == 'table' then\n\t\treturn UnmarshalWorker(k)\n\telse \n\t\treturn nil, 'error_type'\n\tend\nend\n\nfunction rightTable(o)\n\tlocal t = type(o)\n\treturn (t=='string' or t=='boolean' or t=='number' or t=='nil') or (t=='function' and o==null)\nend\n\n\t\nreturn json\n" }, { "alpha_fraction": 0.4052044749259949, "alphanum_fraction": 0.4312267601490021, "avg_line_length": 11.857142448425293, "blob_id": "51a21604aa870be2e7cb41f8e37d376049365e6e", "content_id": "53d215ff3e8a95296d148e6107fa793ed9f3c41c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 269, "license_type": "no_license", "max_line_length": 38, "num_lines": 21, "path": "/Programming/Practice/Interpretation/gcd.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint gcd(int m, int n)\n{\n int div;\n for(int i=0; true; i++)\n {\n div=m%n;\n if(div==0)\n return n;\n m=n;\n n=div;\n }\n return div;\n}\n\nint main()\n{\n std::cout<<gcd(10, 15)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.44117647409439087, "alphanum_fraction": 0.45588234066963196, "avg_line_length": 11.454545021057129, "blob_id": "de84565f06bd851e45534f26ccd3949426490401", "content_id": "e6f07d538a7abe156f1cd5ec2e815319a173f38f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 136, "license_type": "no_license", "max_line_length": 32, "num_lines": 11, "path": "/Programming/C++/More_Effective_C++/chapter1/refer_pointer.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int *p=0;\n int * &r=p;\n int c='a';\n p=&c;\n std::cout<<*r<<r<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6202531456947327, "alphanum_fraction": 0.6392405033111572, "avg_line_length": 16.55555534362793, "blob_id": "9eeaf74a3029bdaba59eff79757fcb7717849a31", "content_id": "2be6ff599a81c58e9e63eadf1135c98798ae0662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/Programming/Python/17Namespace/class.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class tester:\n\tdef __init__(self, start):\n\t\tself.state=start\n\t\n\tdef __call__(self, label):\n\t\tprint(label, self.state)\n\t\tself.state+=1\nH=tester(99)\nH('juice')\n" }, { "alpha_fraction": 0.6916167736053467, "alphanum_fraction": 0.6931137442588806, "avg_line_length": 21.266666412353516, "blob_id": "60ab79eb238e2b8d3ccab973b51fa7c2d1bb3336", "content_id": "ee6185572d36828af756e922b3860418afac1a07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 668, "license_type": "no_license", "max_line_length": 54, "num_lines": 30, "path": "/Programming/JAVA/Thinking in JAVA/chapter11/controller/Controller.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import java.util.*;\nabstract class Event{\n\tprivate long eventTime;\n\tprotected final long delayTime;\n\tpublic Event(long delayTime){\n\t\tthis.delayTime=delayTime;\n\t\tstart();\n\t}\n\tpublic void start(){\n\t\teventTime=System.nanoTime()+delayTime;\n\t}\n\tpublic boolean ready(){\n\t\treturn System.nanoTime()>=eventTime;\n\t}\n\tpublic abstract void action();\n}\n\npublic class Controller{\n\tprivate List<Event> eventList=new ArrayList<Event>();\n\tpublic void addEvent(Event c){eventList.add(c);}\n\tpublic void run(){\n\t\twhile(eventList.size()>0)\n\t\t\tfor(Event e:new ArrayList<Event>(eventList))\n\t\t\t\tif(e.ready()){\n\t\t\t\t\tSystem.out.println(e);\n\t\t\t\t\te.action();\n\t\t\t\t\teventList.remove(e);\n\t\t\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.4901098906993866, "alphanum_fraction": 0.5120879411697388, "avg_line_length": 14.706896781921387, "blob_id": "3b5358098df3c6938a42ff902c5dc0b32b02d960", "content_id": "7af04595b0ae5cb350db5078e9dac78121142737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 910, "license_type": "no_license", "max_line_length": 46, "num_lines": 58, "path": "/Algorithm/Algorithm/chapter8/relation_set.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass Disj_Set{\npublic:\n Disj_Set(int n):sz(n), dis(new int[n]){}\n void set_union(int root1, int root2);\n int find(int x);\n void print();\n ~Disj_Set();\nprivate:\n int sz;\n int * dis;\n};\n\nvoid Disj_Set::set_union(int root1, int root2)\n{\n if(root1>sz||root2>sz)\n {\n std::cout<<\"out of range\"<<std::endl;\n return;\n }\n dis[root2-1]=root1;\n}\n\nint Disj_Set::find(int x)\n{\n if(x>sz)\n {\n std::cout<<\"out of range\"<<std::endl;\n return -1;\n }\n if(dis[x-1]<=0)\n return x;\n else\n return (find(dis[x-1]));\n}\n\nvoid Disj_Set::print()\n{\n for(int i=0; i<sz;i++)\n std::cout<<dis[i]<<\" \";\n std::cout<<std::endl;\n}\n\nDisj_Set::~Disj_Set()\n{\n delete[]dis;\n}\n\nint main()\n{\n Disj_Set dis(10);\n dis.print();\n dis.set_union(1, 5);\n std::cout<<dis.find(5)<<std::endl;\n dis.print();\n return 0;\n}" }, { "alpha_fraction": 0.37735849618911743, "alphanum_fraction": 0.43396225571632385, "avg_line_length": 10.88888931274414, "blob_id": "8def2546e1f0f374d49e1dea64b71cc4443c9b82", "content_id": "aaaa8b3e63a735b3e061c378f05a77b403c56d4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 106, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/Programming/C/libcap/teststruct.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "struct test{\n int a;\n int b;\n};\n\nint main()\n{\n struct test a[]={{.a=10, .b=1},{ .a =1, .b=10}};\n}" }, { "alpha_fraction": 0.5070422291755676, "alphanum_fraction": 0.5492957830429077, "avg_line_length": 14.25, "blob_id": "af17526e2e0c2b47f1f4c6fbd3f45592a2920014", "content_id": "c0bcf11a83b2a106e78367cf8d7df534bcfff8d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 426, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/Programming/Practice/Interpretation/1.31_pi.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ndouble product(int a, int b, int base)\n{\n double sum=1;\n for(;a<=b; a+=base)\n sum*=a;\n return sum;\n}\n\nint factorial(int n)\n{\n if(n<=0)\n return 1;\n return product(1,n,1);\n}\n\ndouble pi(int n)\n{\n return 8*((product(4,n,2)*product(4,n,2))/(product(3,n,2)*product(3,n,2)));\n}\n\nint main()\n{\n std::cout<<factorial(6)<<std::endl;\n std::cout<<pi(51)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4720670282840729, "alphanum_fraction": 0.4944134056568146, "avg_line_length": 14.608695983886719, "blob_id": "db803c7c8f8102c429fdd4975a396321c943cb3d", "content_id": "c00d5b8e86b1a826b4c751c619b5ba949e146413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 358, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/Programming/C++/Effective_C++/chapter2/function_pointer.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\nusing std::cout;\nusing std::endl;\n\n\nclass B\n{\npublic:\n virtual void fun1(){cout<<\"fun1\"<<endl;}\n virtual void fun2(){cout<<\"fun2\"<<endl;}\n};\n\ntypedef void (*func)();\nint main()\n{\n B b;\n void (*f)()=(void (*)())*((long *)*(long *)(&b));//64bits OS~ ~ ~\n f();\n //int (*f)();\n //f=(void *)fun;\n //f(1);\n return 0;\n}" }, { "alpha_fraction": 0.501886785030365, "alphanum_fraction": 0.5175201892852783, "avg_line_length": 24.04054069519043, "blob_id": "1b8415b2f7a5fd4adc3d03194c33a01d2cd8d965", "content_id": "7e8045b33806ca3a77823f37e8c3dbcc504ccce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1855, "license_type": "no_license", "max_line_length": 86, "num_lines": 74, "path": "/Programming/C/Network_Programming_in_Unix/chapter11/daytime.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <netdb.h>\n#include <arpa/inet.h>\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n\nint\nmain(int argc, char *argv[])\n{\n int sockfd, n;\n char recvline[1500];\n struct sockaddr_in servaddr;\n struct in_addr **pptr;\n struct in_addr *inetaddrp[2];\n struct in_addr inetaddr;\n struct hostent *hp;\n struct servent *sp;\n if(argc!=3)\n {\n fprintf(stderr, \"usage: ./a.out <hostname> <service>\\n\");\n exit(0);\n }\n if((hp=gethostbyname(argv[1]))==NULL)\n {\n if(inet_aton(argv[1], &inetaddr)==0)\n {\n fprintf(stderr, \"hostname error for %s: %s\", argv[1], hstrerror(h_errno));\n exit(0);\n }\n else\n {\n inetaddrp[0]=&inetaddr;\n inetaddrp[1]=NULL;\n pptr=inetaddrp;\n }\n }\n else\n pptr=(struct in_addr **)hp->h_addr_list;\n \n if((sp=getservbyname(argv[2], \"tcp\"))==NULL)\n {\n fprintf(stderr, \"getservbyname error for %s\\n\", argv[2]);\n exit(0);\n }\n for(; *pptr!=NULL; pptr++)\n {\n if((sockfd=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error, socket()\\n\");\n exit(0);\n }\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=sp->s_port;\n memcpy(&servaddr.sin_addr, *pptr, sizeof(struct in_addr));\n printf(\"trying %s\\n\", inet_ntoa(servaddr.sin_addr));\n if(connect(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr))==0)\n {\n \n break;\n }\n fprintf(stderr, \"error, connect()\\n\");\n close(sockfd);\n exit(0);\n }\n while((n=read(sockfd, recvline, 1500))>0)\n {\n recvline[n]=0;\n printf(\"%s\\n\", recvline);\n }\n exit(0);\n}\n\n\n" }, { "alpha_fraction": 0.3802395164966583, "alphanum_fraction": 0.3997006118297577, "avg_line_length": 22.85714340209961, "blob_id": "7d3d764144d40b39d93b6dcb74288dfd74974e86", "content_id": "865c4b3b4ddda11ff7fc2d0a74babb0ac757ceea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 668, "license_type": "no_license", "max_line_length": 47, "num_lines": 28, "path": "/Programming/Practice/Google/RoundC/distinctsub.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\n\nint numDistinct(std::string S, std::string T) {\n if(S.size()<T.size())\n return 0;\n if(S==T)\n return 1;\n int *arr=new int [T.size()+1];\n arr[0]=1;\n for(int i=1; i<T.size()+1; i++)\n arr[i]=0;\n for(int i=0; i<S.size(); i++)\n {\n for(int j=T.size()-1; j>=0; j--)\n if(S[i]==T[j])\n arr[j+1]+=arr[j];\n for(int k=0; k<=T.size(); k++)\n std::cout<<arr[k]<<\" \";\n std::cout<<std::endl;\n }\n return arr[T.size()];\n \n }\nint main()\n{\nnumDistinct(\"rabbbit\", \"rabbit\");\n}\n" }, { "alpha_fraction": 0.5359477400779724, "alphanum_fraction": 0.5424836874008179, "avg_line_length": 8.625, "blob_id": "d0253a6a1b6799e26a51874a526640e604e56d82", "content_id": "232be159b45ffde01bda5157cd9aa2f2a3c0996f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 153, "license_type": "no_license", "max_line_length": 24, "num_lines": 16, "path": "/Programming/C++/Effective_C++/chapter2/static_member.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\npublic:\n int fun(){return i;}\nprivate:\n static int i;\n};\nint A::i=0;\n\nint main()\n{\n A a;\n std::cout<<a.fun();\n}" }, { "alpha_fraction": 0.620192289352417, "alphanum_fraction": 0.620192289352417, "avg_line_length": 16.33333396911621, "blob_id": "77d2387701d052022f46d3aa391f4f9e46d02da6", "content_id": "05ff5295ddd3acb08216d7ce7df12c8509d20648", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 37, "num_lines": 12, "path": "/Programming/Python/14iteration/file_iterator.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "f = open('file_iterator.py')\nprint(dir(f))\n#f.__next__()\nwhile True:\n\ts=f.readline()\n\tif not s:\n\t\tbreak\n\tprint(s)\n#while True:\n#\tprint(f.__iter__())\nfor line in open('file_iterator.py'):\n\tprint(line.upper())\n" }, { "alpha_fraction": 0.5643116235733032, "alphanum_fraction": 0.5797101259231567, "avg_line_length": 23.55555534362793, "blob_id": "09e84f9064c5076e3f2c03fc97813e1742c54f44", "content_id": "10228bb6852058e6f16ddb57fef7053b4eac1708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1104, "license_type": "no_license", "max_line_length": 73, "num_lines": 45, "path": "/Programming/C/Network_Programming_in_Unix/chapter8/identifyinterface.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <string.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint\nmain(int argc, char **argv)\n{\n int sockfd;\n socklen_t len;\n struct sockaddr_in cliaddr, servaddr;\n if(argc!=2)\n {\n fprintf(stderr, \"usage: ./a.out <IP address>\\n\");\n exit(0);\n }\n if((sockfd=socket(AF_INET, SOCK_DGRAM, 0))<0)\n {\n fprintf(stderr, \"error:create socket\\n\");\n exit(0);\n }\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=htons(9868);\n if(inet_pton(AF_INET, argv[1], &servaddr.sin_addr)<0)\n {\n fprintf(stderr, \"error: inet_pton\\n\");\n exit(0);\n }\n if(connect(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"error: connect\\n\");\n exit(0);\n }\n len=sizeof(cliaddr);\n if(getsockname(sockfd, (struct sockaddr*)&cliaddr, &len)<0)\n {\n fprintf(stderr, \"error: getsockname\\n\");\n exit(0);\n }\n printf(\"local address %s\\n\", inet_ntoa(cliaddr.sin_addr));\n exit(0);\n}" }, { "alpha_fraction": 0.521436870098114, "alphanum_fraction": 0.5417149662971497, "avg_line_length": 24.02898597717285, "blob_id": "f19fda33201b6617741886ee000a37505947840a", "content_id": "0fd5b959bf122b03fdf00e91d5cbf4236c42b9b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1726, "license_type": "no_license", "max_line_length": 75, "num_lines": 69, "path": "/Programming/C/Network_Programming_in_Unix/chapter8/echoudpcli.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <string.h>\n#include <errno.h>\n\nvoid\necho(FILE *, int, struct sockaddr *, socklen_t);\n\nint\nmain(int argc, char *argv[])\n{\n int sockfd;\n struct sockaddr_in addr;\n if(argc!=2)\n {\n fprintf(stderr, \"usage: ./a.out <IP Address>\\n\");\n exit(0);\n }\n bzero(&addr, sizeof(addr));\n addr.sin_len=16;\n addr.sin_family=AF_INET;\n addr.sin_port=htons(9868);\n if(inet_pton(AF_INET, argv[1], (void *)&addr.sin_addr)<0)\n {\n fprintf(stderr, \"error: inet_pton: %s\\n\", strerror(errno));\n exit(0);\n }\n if((sockfd=socket(AF_INET, SOCK_DGRAM, 0))<0)\n {\n fprintf(stderr, \"error: create socket %s\\n\", strerror(errno));\n exit(0);\n }\n echo(stdin, sockfd, (struct sockaddr *)&addr, sizeof(addr));\n return 0;\n}\n\nvoid\necho(FILE * fp, int sock, struct sockaddr *addr, socklen_t size)\n{\n int n;\n char msg[1500];\n char buf[16];\n socklen_t len=size;\n struct sockaddr_in raddr;\n bzero(&raddr, sizeof raddr);\n while((n=read(fileno(fp), msg, 1500))>0)\n {\n if(sendto(sock, msg, n, 0, addr, size)<0)\n {\n fprintf(stderr, \"error: sendto %s\\n\", strerror(errno));\n continue;\n }\n if(recvfrom(sock, msg, 1500, 0, (struct sockaddr *)&raddr, &len)<0)\n {\n fprintf(stderr, \"error: recv %s\\n\", strerror(errno));\n continue;\n }\n if(len!=size||memcmp(addr, &raddr, len) !=0)\n {\n printf(\"reply from %s (ignored)\\n\", inet_ntoa(raddr.sin_addr));\n continue;\n }\n //printf(\"******\\n\");\n printf(\"%s\", msg);\n }\n}" }, { "alpha_fraction": 0.3526608347892761, "alphanum_fraction": 0.36775219440460205, "avg_line_length": 17.52941131591797, "blob_id": "80621eeb0e9f2770ce7f9b51636f1baddd1b3daf", "content_id": "b3ecc72e5a0654a7e379342c5b52871167eb378e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1259, "license_type": "no_license", "max_line_length": 52, "num_lines": 68, "path": "/Programming/C/The C programming Language/chapter5/expr.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n\nint pop();\nvoid push(int n);\n\nint main(int argc, char **argv)\n{\n for(int i=0;i<argc;i++)\n printf(\"%s\\n\", argv[i]);\n int op2;\n while(--argc>0)\n {\n printf(\"%s %d\\n\",*argv, argc);\n switch(**++argv)\n {\n case '+' :\n push(pop()+pop());\n break;\n case '*' :\n push(pop()*pop());\n break;\n case '-' :\n op2=pop();\n push(pop()-op2);\n break;\n case '/' :\n op2=pop();\n if(op2==0)\n printf(\"error: zero divisor\\n\");\n else\n push(pop()/op2);\n break;\n default:\n if(**argv-'0'<=9&&**argv-'0'>=0)\n push(atof(*argv));\n break;\n }\n }\n printf(\"%d \\n\", pop());\n return 0;\n}\n\n#define MAX 10\n\nint stack[MAX];\nint *bufp=stack;\n\nint pop()\n{\n if(bufp>stack)\n return *--bufp;\n else\n {\n printf(\"error: empty stack\\n\");\n exit(1);\n }\n}\n\nvoid push(int n)\n{\n if(bufp-stack<=10)\n *bufp++=n;\n else\n {\n printf(\"error: stack is full\\n\");\n }\n}" }, { "alpha_fraction": 0.3457943797111511, "alphanum_fraction": 0.3676012456417084, "avg_line_length": 15.100000381469727, "blob_id": "1bc0df93df1beebbfc595548786cc3a80f471e38", "content_id": "c3e8941b546049cd4ff31adec7eeac61ed4ba554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 321, "license_type": "no_license", "max_line_length": 46, "num_lines": 20, "path": "/Programming/C/The C programming Language/chapter1/detab.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAX 1000\n\nint main()\n{\n char input[MAX];\n for(int i=0;(input[i]=getchar())!=EOF;++i)\n {\n if(input[i]=='\\t')\n {\n input[i]=' ';\n int j=1;\n for(i=i+j;j<6;++j)\n input[i]=' ';\n }\n \n }\n printf(\"%s\\n\",input);\n}" }, { "alpha_fraction": 0.5602094531059265, "alphanum_fraction": 0.5706806182861328, "avg_line_length": 16.454545974731445, "blob_id": "5ba11286b7985e7ba4bf3982065008223fb8a15b", "content_id": "f8869e81ace5a3509523953f8ebd28cafc6c9797", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 191, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/Programming/C/Programming_in_Unix/chapter7/environ.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n//#include <stdlib.h>\n//#include <unistd.h>\n\nextern char **environ;\nint main()\n{\n for(int i=0; environ[i]!=NULL; i++)\n printf(\"%s\\n\", environ[i]);\n return 0;\n}" }, { "alpha_fraction": 0.42633894085884094, "alphanum_fraction": 0.48262783885002136, "avg_line_length": 30.535884857177734, "blob_id": "b1a9dab840571c6eed01569cba98731b4d11bc38", "content_id": "c6a2a1755a7eb19ce237ebb9f23cca8424c1a3cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 6605, "license_type": "no_license", "max_line_length": 125, "num_lines": 209, "path": "/HomeWork/Lua/test.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "require \"json\"\ntestcase = [==[[\n \"JSON Test Pattern pass1\",\n\t{\"object with 1 member\":[\"array with 1 element\"]},\n\t {},\n\t\t[],\n\t\t -42,\n\t\t\ttrue,\n\t\t\t false,\n\t\t\t\tnull,\n\t\t\t\t {\n\t\t\t\t\t\t \"integer\": 1234567890,\n\t\t\t\t\t\t\t \"real\": -9876.543210,\n\t\t\t\t\t\t\t\t \"e\": 0.123456789e-12,\n\t\t\t\t\t\t\t\t\t \"E\": 1.234567890E+34,\n\t\t\t\t\t\t\t\t\t\t \"\": 23456789012E66,\n\t\t\t\t\t\t\t\t\t\t\t \"zero\": 0,\n\t\t\t\t\t\t\t\t\t\t\t\t \"one\": 1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"space\": \" \",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"quote\": \"\\\"\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"backslash\": \"\\\\\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"controls\": \"\\b\\f\\n\\r\\t\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"slash\": \"/ & \\/\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"alpha\": \"abcdefghijklmnopqrstuvwyz\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"ALPHA\": \"ABCDEFGHIJKLMNOPQRSTUVWYZ\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"digit\": \"0123456789\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"0123456789\": \"digit\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"special\": \"`1~!@#$%^&*()_+-={':[,]}|;.</>?\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"hex\": \"\\u0123\\u4567\\u89AB\\uCDEF\\uabcd\\uef4A\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"true\": true,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"false\": false,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"null\": null,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"array\":[ ],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"object\":{ },\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"address\": \"50 St. James Street\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"url\": \"http://www.JSON.org/\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"comment\": \"// /* <!-- --\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"# -- --> */\": \" \",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \" s p a c e d \" :[1,2 , 3\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 4 , 5 , 6 ,7 ],\"compact\":[1,2,3,4,5,6,7],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"jsontext\": \"{\\\"object with 1 member\\\":[\\\"array with 1 element\\\"]}\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"quotes\": \"&#34; \\u0022 %22 0x22 034 &#x22;\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\\/\\\\\\\"\\uCAFE\\uBABE\\uAB98\\uFCDE\\ubcda\\uef4A\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t : \"A key can be any string\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 0.5 ,98.6\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 99.44\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 1066,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 1e1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 0.1e1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 1e-1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 1e00,2e+00,2e-00\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ,\"rosebud\"] ]==]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\ntest = ' \\n \\t hello world'\ntest2 = [==[[[[[[[[[[[[[[[[[[[[\"你好\"]]]]]]]]]]]]]]]]]]]]==]\n\ntest3 = [==[{\n\t \"JSON Test Pattern pass3\": {\n\t\t \"The outermost value\": \"must be an object or array.\",\n\t\t\t \"In this test\": \"It is an object.\"\n\t\t\t\t }\n\t\t\t\t}\n\t\t\t\t]==]\n--print( json.Marshal(testcase))\na = json.Marshal(testcase)\n--print(a[20])\nprint(json.Unmarshal(a))\nfunction printTable(a)\n\tfor k, v in pairs(a) do\n\t\tif type(v) == 'table' then\n\t\t\tprintTable(v)\n\t\telse\n\t\t\tprint(k, v)\n\t\tend\n\tend\nend\nb = json.Marshal(test2)\n--printTable(a)\nc = json.Marshal(test3)\nret = json.Unmarshal(a)\na1 = json.Marshal(ret)\n\n\nfunction check(a, b)\n\tlocal falg = false\n\tfor k1, v1 in pairs(a) do\n\t\tif type(v1) == 'table' then\n\t\t\tflag = check(a[k1], b[k1])\n\t\telseif b[k1] == v1 then \n\t\t\tflag = true\n\t\tend\n\t\tif not flag then\n\t\t\tprint(k1, v1, \"table 1\")\n\t\t\tprint(k1, b[k1], \"table 2\")\n\t\tend\n\tend\n\treturn flag\nend\n\nprint(check(a, a1))\nb1 = json.Marshal(json.Unmarshal(b))\nprint(check(b, b1))\nc1= json.Marshal(json.Unmarshal(c))\nprint(check(c, c1))\ntest1 = [==[{1, [\"2\"]=2}]==]\n--a = json.Marshal(test1)\nprint(json.Unmarshal({1, [\"2\"]=2}))\nprint(json.Unmarshal({nil, nil, 1}))\n--print(json.Unmarshal(json.Marshal(test1)))\n\nprint(json.Unmarshal({b=\"cd\"}))\ntest5= '{{},{{},{}}}'\nprint( json.Marshal(test5))\nprint(json.Unmarshal({{},{{},{}}}))\ntest6='[\"mismatch\"}'\nprint(json.Marshal(test6))\ntest7='[\"Unclosed array\"'\nprint(json.Marshal(test7))\ntest8 = '\"A JSON payload should be an object or array, not a string.\"'\nprint(json.Marshal(test8))\ntest9 = '{unquoted_key: \"keys must be quoted\"}'\nprint(json.Marshal(test9))\ntest10 = '[\"extra comma\",]' --not pass\nprint(json.Marshal(test10))\ntest11 = '[\"double extra comma\",,]'--not pass\nprint(json.Marshal(test11))\ntest12 = '[ , \"<-- missing value\"]' -- not pass\nprint(json.Marshal(test12))\ntest13 = '[\"Comma after the close\"],' -- not pass\nprint(json.Marshal(test13))\ntest14 = '[\"Extra close\"]]' --not pass\nprint(json.Marshal(test14))\ntest15 = '{\"Extra comma\": true,}'\nprint(json.Marshal(test15))\ntest16 = '{\"Extra value after close\": true} \"misplaced quoted value\"'\nprint(json.Marshal(test16)) --not pass\ntest17 = '{\"Illegal expression\": 1 + 2}'\nprint(json.Marshal(test17))\ntest18 = '{\"Illegal invocation\": alert()}'\nprint(json.Marshal(test18))\ntest19 = '{\"Numbers cannot have leading zeroes\": 013}'\nprint(json.Marshal(test19) )--not pass\ntest20 = '{\"Numbers cannot be hex\": 0x14}'\nprint(json.Marshal(test20))\ntest21 = '[\"Illegal backslash escape: \\x15\"]'\nprint(json.Marshal(test21)) -- not pass\ntest22 = '[\\naked]'\nprint(json.Marshal(test22) )\ntest23 = '[\"Illegal backslash escape: \\017\"]'\nprint(json.Marshal(test23) ) --not pass\ntest24 = '[[[[[[[[[[[[[[[[[[[[\"Too deep\"]]]]]]]]]]]]]]]]]]]]'\nprint(json.Marshal(test24))\ntest25 = '{\"Missing colon\" null}'\nprint(json.Marshal(test25))\ntest26 = '{\"Double colon\":: null}'\nprint(json.Marshal(test26))\ntest27 = '{\"Comma instead of colon\", null}'\nprint(json.Marshal(test27))\ntest28 = '[\"Colon instead of comma\": false]'\nprint(json.Marshal(test28))\ntest29 = '[\"Bad value\", truth]'\nprint(json.Marshal(test29))\ntest30 = '[\"Bad value\", truth]'\nprint(json.Marshal(test30))\ntest31 = '[\"test31tabcharacterinstring\"]'\nprint(json.Marshal(test31))\ntest32 = [==[[\"line\nbreak\"]]==]\nprint(json.Marshal(test32)) -- not pass\ntest33 = '[0e]'\nprint(json.Marshal(test33))\ntest34 = '[0e+]'\nprint(json.Marshal(test34))\ntest35 = '[0e+-1]'\nprint(json.Marshal(test35))\ntest36 = '{\"Comma instead if closing brace\": true,'\nprint(json.Marshal(test36))\ntest37 = '[\"mismatch\"}'\nprint(json.Marshal(test37))\nindex = 1\ntmp = {}\nwhile index < 10000 do\n\ttmp[index]='你好,中国'\n\tindex = index + 1\nend\nprint('ok')\ns = json.Unmarshal(tmp)\na = json.Marshal(s)\nprint(json.Unmarshal(a))\ntest38 = '{\"abc\":\"haha}'\nprint(json.Marshal(test38))\ntest39 = '{{}\"'\nprint(json.Marshal(test39))\ntest40 = '{{\"\",}'\nprint(json.Marshal(test40))\ntest41 = '{{?}}'\nprint(json.Marshal(test41))\ntest42 = '{\"he'\nprint(json.Marshal(test42))\ntest43 = '{[\"{]'\nprint(json.Marshal(test43))\n" }, { "alpha_fraction": 0.5409388542175293, "alphanum_fraction": 0.5458515286445618, "avg_line_length": 20.313953399658203, "blob_id": "d133f1cafb1c8095bfdc26e4869a0c9b5c370616", "content_id": "3682a1ac082434ab4607cd328018ba92c8ec8e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1832, "license_type": "no_license", "max_line_length": 75, "num_lines": 86, "path": "/Project/source/Client/data.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"data.h\"\n#include <fstream>\n#include <time.h>\n\nvoid Records::add_rcd(const std::string & account, const std::string & msg)\n{\n std::string filename=\"data/\";\n filename+=account;\n std::ofstream ofs(filename, std::fstream::app);\n if(!ofs.is_open())\n return;\n time_t ttime;\n time(&ttime);\n std::string tt=ctime(&ttime);\n for(int i=0; i<tt.size();++i)\n if(tt[i]==' ')\n tt[i]=',';\n ofs<<tt<<std::endl;\n ofs<<msg<<std::endl;\n ofs.close();\n}\n\nvoid Records::list(const std::string &account)\n{\n std::string filename=\"data/\";\n filename+=account;\n std::ifstream ifs(filename);\n if(!ifs.is_open())\n return;\n std::string record;\n while(ifs>>record)\n std::cout<<record<<std::endl;\n ifs.close();\n}\n\nint Frd_List::read(std::vector<std::string> &vec)\n{\n std::ifstream ifs(\"data/friends\");\n if(!ifs.is_open())\n {\n return -1;\n }\n std::string account;\n while(ifs>>account)\n {\n //std::cout<<account<<std::endl;\n vec.push_back(account);\n }\n ifs.close();\n return 1;\n}\n\nint Frd_List::fadd(const std::string & account)\n{\n std::ofstream ofs(\"data/friends\", std::fstream::app);\n if(!ofs.is_open())\n return -1;\n ofs<<account<<std::endl;\n ofs.close();\n return 1;\n}\n\nint Frd_List::fdelete(const std::string & account)\n{\n std::ifstream ifs(\"data/friends\");\n if(!ifs.is_open())\n return -1;\n std::string acc;\n std::vector<std::string> acc_vec;\n while(ifs>>acc)\n {\n if(acc==account)\n continue;\n acc_vec.push_back(acc);\n }\n ifs.close();\n std::ofstream ofs(\"data/friends\");\n if(!ofs.is_open())\n return -1;\n for(int i=0; i<acc_vec.size(); ++i)\n {\n ofs<<acc_vec[i]<<std::endl;\n }\n ofs.close();\n return 1;\n}" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.3888888955116272, "avg_line_length": 23, "blob_id": "9f7e897a023705a01a23996db2f2d8977796d3e7", "content_id": "b9529ba7e13231f5a54d9645732940d7bc25ab5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 72, "license_type": "no_license", "max_line_length": 31, "num_lines": 3, "path": "/Programming/Python/13Loop/dict_for.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "D = {'A':'a', 'B':'b', 'C':'c'}\nfor key in D:\n\tprint(key+\" => \"+D[key])\n" }, { "alpha_fraction": 0.4664948582649231, "alphanum_fraction": 0.48969072103500366, "avg_line_length": 17.247058868408203, "blob_id": "12d1820e1446f1eb4016b55ede3b51feab139855", "content_id": "d934eae38e31b35890f442f17b74b553e866c911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1552, "license_type": "no_license", "max_line_length": 54, "num_lines": 85, "path": "/Programming/Python/8ListandDict/dict.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "d={'spam':2, 'ham':1, 'eggs':3};\nprint(d['spam']);\nprint(d);\nprint(len(d));\nprint('ham' in d);\nprint(list(d.keys()));\nprint('*****************');\nd['ham']=['grill', 'bake', 'fry'];\nprint(d);\ndel d['eggs'];\nprint(d);\nd['brunch']='Bacon';\nprint(d);\nprint(d.values());\nprint(d.keys());\nprint(d.items());\nprint(d.get('spam'));\nprint(d.get('toast'));\nprint(d.get('toast', 88));\nd1={'toast':6, 'muffin':66};\nd.update(d1);\nprint(d);\nprint('*************************');\nprint(d.pop('muffin'));\nprint(d.pop('toast'));\nprint(d);\nprint('**************************');\ntable={\n\t'Python': 'Guido van Rossum',\n\t'Perl': 'Larry Wail',\n\t'Tcl': 'John Ousterhout'\n\t};\nlanguage = 'Python';\ncreater = table[language];\nprint(creater);\nfor lang in table:\n\tprint(lang, '\\t', table[lang]);\n\nrec={};\nrec['name']='mel';\nrec['age']=66;\nrec['job'] = 'trainer/writer';\nprint(rec);\n\n\nprint('********************');\nprint(list(zip(['a', 'b', 'c'], [1,2,3])));\nd=dict(zip(['a', 'b', 'c'], [1, 2, 4]));\nprint(d);\n\nd={k:v for (k,v) in zip(['a', 'b', 'c'], [1,2,3])};\nprint(d);\n\n\nprint('******************');\nd={x: x**2 for x in [1,2,3,4]};\nprint(d);\nd={c: c*4 for c in 'SPAM'};\nprint(d);\nd={c.lower(): c+'!' for c in ['SPAM', 'EGGS', 'HAM']};\nprint(d);\n\nd=dict.fromkeys(['a', 'b', 'c'], 0);\nprint(d);\n\nd={k:0 for k in ['a', 'b', 'c']};\nprint(d);\n\nd={k: None for k in 'spam'};\nprint(d);\nd=dict(a=1, b=2, c=3);\nprint(d);\nk=d.keys();\nprint(k);\nv=d.values();\nprint(v);\nprint(list(d.items()));\nprint(k[0]);\nd={'a':1, 'b':2, 'c':3};\nprint(d);\nks=d.keys();\nks.sort();\nfor k in ks:print(k, d[k]);\nsorted(d);\nprint(d);\n\n" }, { "alpha_fraction": 0.3175775408744812, "alphanum_fraction": 0.3722304403781891, "avg_line_length": 14.409090995788574, "blob_id": "645d2e3b6f6eb3997127f17d9b6ecdb7900cdfa6", "content_id": "a2d2b2c9f3b84879ad18582bb00db6f4821185e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 677, "license_type": "no_license", "max_line_length": 35, "num_lines": 44, "path": "/Programming/C/The C programming Language/chapter2/squeeze.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAX 100\n\nvoid squeeze(char s1[], char s2[]);\n\nint main()\n{\n char ch, s1[MAX],s2[MAX];\n int i=0;\n while((ch=getchar())!='\\n')\n {\n s1[i++]=ch;\n }\n s1[i]='\\0';\n i=0;\n while((ch=getchar())!='\\n')\n {\n s2[i++]=ch;\n }\n s2[i]='\\0';\n printf(\"%s \\n%s\\n\",s1,s2);\n squeeze(s1,s2);\n printf(\"%s\\n\",s1);\n}\n\nvoid squeeze(char s1[],char s2[])\n{\n int i=0,k=0;\n while(s1[i]!='\\0')\n {\n int j=0;\n int flag=0;\n while(s2[j]!='\\0')\n {\n if(s1[i]==s2[j++])\n flag=1;\n }\n if(!flag)\n s1[k++]=s1[i];\n ++i;\n }\n s1[k]='\\0';\n}" }, { "alpha_fraction": 0.5120435357093811, "alphanum_fraction": 0.5315548777580261, "avg_line_length": 26.382978439331055, "blob_id": "48bfc182912f8ca6883aa6d85bf741398afeceb5", "content_id": "3006d6c75cd08ee8348eebbe3053514d7b058f6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11583, "license_type": "no_license", "max_line_length": 100, "num_lines": 423, "path": "/Project/source/SNS/rp2p.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <sstream>\n#include <time.h>\n#include <sys/socket.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include \"neo4j.h\"\n#include \"userdb.h\"\n#include \"rp2p.h\"\n\nclass Sock\n{\npublic:\n Sock(int i):sock(i){}\n int send_msg(const std::string &);\n ~Sock();\nprivate:\n int sock;\n int valid;\n};\n\nSock::~Sock()\n{\n close(sock);\n}\n\nint Sock::send_msg(const std::string &msg)\n{\n struct rp2p rpp;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, &rpp, sizeof(rpp));\n memcpy(buf+sizeof(rpp), msg.c_str(), msg.size());\n send(sock, buf, sizeof(rpp)+msg.size(), 0);\n return 1;\n}\n\n/*\n * sign_on function used for process user register process\n */\nint Action::sign_on(const char * msg)\n{\n string smsg(msg);\n string account, password, ip;\n std::istringstream iss(smsg);\n iss>>account>>password>>ip;\n account = account.substr(8);\n DBConn dbconn;\n if(dbconn.query_pwd(account)!=\"\")\n return -1; //-1 stand for the account already exist;\n password = password.substr(9);\n if(dbconn.insert(account, password)==-1)\n return -2; //-2 stand for mysql database error\n if(U_NeoDB::create_account(account)==-1)\n return -3; //-3 stand for neo4j database error\n ip=ip.substr(3);\n string xml;\n if(get_ip_location(ip, xml)==1)\n ip=xml; //-6 stand for ip translation error\n else\n ip=\"unknown\";\n if(U_NeoDB::set_freq_location(account, ip)==-1)\n return -6; //-6 stand for neo4j update location error\n //std::cout<<account<<\" \"<<password<<\" \"<<ip<<std::endl;\n //std::cout<<ip<<std::endl;\n return 1;\n}\n\n/*\n * login function is in charge of process login process\n */\nint Action::login(const char * msg, string &fip)\n{\n string smsg(msg);\n std::istringstream iss(smsg);\n string account, pwd, client, ip;\n iss>>account>>pwd>>client>>ip;\n account=account.substr(8);\n pwd=pwd.substr(9);\n DBConn dbconn;\n if(dbconn.query_pwd(account)!=pwd)\n return -1; //-1 stand for pwd error;\n if(U_NeoDB::login(account, 1)==-1)\n return -2; //-2 stand for neo db error;\n client=client.substr(7);\n if(U_NeoDB::set_client(account, client)==-1)\n return -3; //-3 stand for neodb set client error;\n ip=ip.substr(3);\n if(U_NeoDB::set_ip(account, ip)==-1)\n return -5; //-5 stand for neodb set ip error;\n string xml, location;\n if(get_ip_location(ip, xml)==1)\n location=xml; //-6 stand for ip translation error\n else\n location=\"unknown\";\n if(U_NeoDB::set_location(account, location)==-1)\n return -6; //-6 stand for set location error\n if((fip=U_NeoDB::get_all_fip(account))==\"err\")\n return -7; //error ip\n //std::cout<<account<<\" \"<<pwd<<\" \"<<client<<\" \"<<ip<<\" \"<<location<<\" \"<<fip<<std::endl;\n return 1;\n}\n\n/*\n * log_out function is responsible for clear when log_out\n */\n\nint Action::log_out(const char * msg)\n{\n string account(msg);\n account = account.substr(8);\n if(U_NeoDB::login(account, -1)==-1)\n return -2; //-2 stand for neo db error;\n time_t lttime;\n string ttime;\n if(time(&lttime)==-1)\n {\n ttime=\"unknown\";\n }\n ttime=string(ctime(&lttime));\n for(int i=0; i<ttime.size(); i++)\n if(ttime[i]==' ')\n ttime[i]=',';\n ttime[ttime.size()-1]=0;\n if(U_NeoDB::set_last_login(account, ttime.c_str())==-1)\n return -1; //-1 stand for set time error;\n std::cout<<ttime<<\" \"<<account<<std::endl;\n return 1;\n}\n\n\n/*\n * Update class is used to update different part of the information after login\n */\n\nclass Update\n{\npublic:\n static int upd_ip(const char *);\n static int upd_loc(const char *);\n static int upd_frd(const char *);\n static int upd_il(const char *);\n static int upd_if(const char *);\n static int upd_fl(const char *);\n static int upd_all(const char *);\n static int upd_info(const char *, string & fip);\n};\n\nint Update::upd_ip(const char * msg)\n{\n std::istringstream iss(msg);\n string account, ip;\n iss>>account>>ip;\n account=account.substr(8);\n ip=ip.substr(3);\n if(U_NeoDB::set_ip(\"JERRY\", ip)==-1)\n return -1;\n //std::cout<<account<<ip<<std::endl;\n //std::cout<<U_NeoDB::get_all_fip(\"123456\")<<std::endl;\n return 1;\n}\n\nint Update::upd_loc(const char * msg)\n{\n std::istringstream iss(msg);\n string account, ip;\n iss>>account>>ip;\n account=account.substr(8);\n ip=ip.substr(3);\n string location;\n if(get_ip_location(ip, location)!=1)\n location=\"unknown\";\n if(U_NeoDB::set_location(account, location)==-1)\n return -1;\n //std::cout<<location<<\" \"<<ip<<\" \"<<account<<std::endl;\n return 1;\n}\n\nint Update::upd_frd(const char * msg)\n{\n std::istringstream iss(msg);\n string account, adord, frd;\n iss>>account>>adord>>frd;\n account=account.substr(8);\n frd=frd.substr(7);\n if(adord==\"add\")\n {\n if(U_NeoDB::add_friends(account, frd)==-1)\n return -1;\n }\n else if(adord==\"delete\")\n {\n if(U_NeoDB::delete_friends(account, frd)==-1)\n return -2;\n }\n return 1;\n}\n\nint Update::upd_il(const char * msg)\n{\n string account, ip, location;\n std::istringstream iss(msg);\n iss>>account>>ip;\n account=account.substr(8);\n ip=ip.substr(3);\n if(get_ip_location(ip, location)!=1)\n location=\"unknown\";\n if(U_NeoDB::set_ip(account, ip)==-1)\n return -1;\n if(U_NeoDB::set_location(account, location)==-1)\n return -2;\n return 1;\n}\n\nint Update::upd_if(const char * msg)\n{\n string account, ip, adord, frd;\n std::istringstream iss(msg);\n iss>>account>>ip>>adord>>frd;\n account=account.substr(8);\n ip=ip.substr(3);\n frd=frd.substr(7);\n std::cout<<account<<\" \"<<ip<<\" \"<<frd<<\" \"<<adord<<std::endl;\n if(U_NeoDB::set_ip(account, ip)==-1)\n return -1;\n if(adord==\"add\")\n {\n if(U_NeoDB::add_friends(account, frd)==-1)\n return -2;\n }\n else if(adord==\"delete\")\n {\n if(U_NeoDB::delete_friends(account, frd)==-1)\n return -3;\n }\n return 1;\n}\n\nint Update::upd_fl(const char * msg)\n{\n std::istringstream iss(msg);\n string account, ip, adord, frd, location;\n iss>>account>>ip>>adord>>frd;\n account=account.substr(8);\n ip=ip.substr(3);\n frd=frd.substr(7);\n if(get_ip_location(ip, location)!=1)\n location=\"unknown\";\n if(U_NeoDB::set_location(account, location)==-1)\n return -1;\n if(adord==\"add\")\n {\n if(U_NeoDB::add_friends(account, frd)==-1)\n return -2;\n }\n else if(adord==\"delete\")\n {\n if(U_NeoDB::delete_friends(account, frd)==-1)\n return -3;\n }\n return 1;\n}\n\nint Update::upd_all(const char *msg)\n{\n std::istringstream iss(msg);\n string account, ip, adord, frd, location;\n iss>>account>>ip>>adord>>frd;\n account=account.substr(8);\n ip=ip.substr(3);\n frd=frd.substr(7);\n if(get_ip_location(ip, location)!=1)\n location=\"unknown\";\n if(U_NeoDB::set_ip(account, ip)==-1)\n return -1;\n if(U_NeoDB::set_location(account, location)==-1)\n return -2;\n if(adord==\"add\")\n {\n if(U_NeoDB::add_friends(account, frd)==-1)\n return -3;\n }\n else if(adord==\"delete\")\n {\n if(U_NeoDB::delete_friends(account, frd)==-1)\n return -6;\n }\n return 1;\n}\n\nint Update::upd_info(const char * msg, string &fip)\n{\n string account(msg);\n account=account.substr(8);\n if((fip=U_NeoDB::get_all_fip(account))==\"err\")\n return -1;\n return 1;\n}\n/*\n * keep_alive function is used for update information after login\n */\nint Action::keep_alive(const char * msg, unsigned char flag, unsigned char reserved, string & rip)\n{\n int ret=0;\n switch(flag&L_MASK)\n {\n case UPD_IP :\n if(Update::upd_ip(msg)==-1)\n ret=-1;//update ip error\n break;\n case UPD_LOC :\n if(Update::upd_loc(msg)==-1)\n ret=-2;//update location error\n break;\n case UPD_FRD :\n if((ret=Update::upd_frd(msg))==-1)\n ret=-3;//add friend error\n else if(ret==-2)\n ret=-4;//delete friend error\n else\n ret=0;\n break;\n case UPD_INFO :\n if(Update::upd_info(msg, rip)==-1)\n ret=-5;//request friends ip error\n break;\n case UPD_IP|UPD_LOC :\n if((ret=Update::upd_il(msg))==-1)\n ret=-6;//update ip error;\n else if(ret==-2)\n ret=-7; //update ip OK, update location error;\n else\n ret =0;\n break;\n case UPD_IP|UPD_FRD :\n if((ret=Update::upd_if(msg))==-1)\n ret=-8; //set ip error;\n else if(ret==-2)\n ret=-9; //set ip ok, add friends error;\n else if(ret==-3)\n ret=-10; //set ip ok, delete friends error;\n else\n ret=0;\n break;\n case UPD_FRD|UPD_LOC :\n if((ret=Update::upd_fl(msg))==-1)\n ret=-11;//set location error;\n else if(ret==-2)\n ret=-12;//set location OK, add friends error;\n else if(ret==-3)\n ret=-13;//set location OK, delete friends error;\n else\n ret=0;\n break;\n case UPD_IP|UPD_LOC|UPD_FRD :\n if((ret=Update::upd_all(msg))==-1)\n ret=-14;//set ip error;\n else if(ret==-2)\n ret=-15;//set ip OK, set location error;\n else if(ret==-3)\n ret=-16;//set ip, location OK, add friends error;\n else if (ret==-6)\n ret=-17;//set ip, location ok, delete friends error;\n break;\n default:\n ret=-256;\n break;\n }\n return ret;\n}\n\nint proc_msg(const char * msg, string & param, int soc)\n{\n struct rp2p *rpp=(struct rp2p *)msg;\n int ret=0;\n Sock sock(soc);\n switch(rpp->confld&H_MASK)\n {\n case LOG_IN :\n if((ret=Action::login(msg+sizeof(struct rp2p), param))==-1)\n ret=-1; //-1 stand for query pwd error for login\n else if(ret==-2)\n ret=-2; //-2 stand for neodb login error\n else if(ret==-3)\n ret=-3; //-3 stand for set client error\n else if(ret==-5)\n ret=-5; //-4 stand for set ip error;\n else if(ret==-6)\n ret=-6; //-6 stand for set location error;\n else if(ret==-7)\n ret=-7; //-7 stand for get all friend ip error;\n else\n {\n ret=0;\n sock.send_msg(\"Login OK\");\n }\n if(ret!=0)\n sock.send_msg(\"error\");\n break;\n case SIGN_ON :\n ret=Action::sign_on(msg+sizeof(struct rp2p));\n if(ret==1)\n sock.send_msg(\"Sign on OK\");\n if(ret==-1)\n sock.send_msg(\"account already exist\");\n ret+=-7;\n break;\n case LOG_OUT :\n ret=Action::log_out(msg+sizeof(struct rp2p));\n ret+=-13;\n break;\n case LOGGED :\n ret=Action::keep_alive(msg+sizeof(struct rp2p), rpp->confld&L_MASK, rpp->reserv, param);\n if(param.size()>6)\n sock.send_msg(param);\n ret+=-25;\n break;\n default:\n ret=-256;\n break;\n }\n return ret;\n}\n" }, { "alpha_fraction": 0.4318181872367859, "alphanum_fraction": 0.469696968793869, "avg_line_length": 13.777777671813965, "blob_id": "7de1eb10040d528aef61c74ab0fad2c823875acd", "content_id": "a3b43f257f0b5cc164ef7d067ea0bfba5523ab64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 132, "license_type": "no_license", "max_line_length": 47, "num_lines": 9, "path": "/Programming/C++/Inside_the_C++_object_model/chapter1/init_test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int i(1024);\n int (*pf)(&i);\n std::cout<<i<<\" \"<<*pf<<\" \"<<pf<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5221574902534485, "alphanum_fraction": 0.5369811654090881, "avg_line_length": 31.810579299926758, "blob_id": "48ca7f03de6fd7c5f5297c383a0f22bc2e0ce2c0", "content_id": "3b3a94f38cc0a1f99eb7e02e4af7ca4284d74eb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19226, "license_type": "no_license", "max_line_length": 308, "num_lines": 586, "path": "/Project/source/SNS/neo4j.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"neo4j.h\"\n#include <sys/socket.h>\n#include <sys/types.h>\n#include <unistd.h>\n#include <arpa/inet.h>\n#include <iostream>\n#include <sstream>\n#include <stdlib.h>\n\n#define BUFFSIZE 100000\n\nint get_ip_location(const string & ipstr, string & xml)\n{\n int sock=socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);\n if(sock==-1)\n {\n //cout<<\"ERROR: open socket.\"<<endl;\n return -1;\n }\n //cout<<\"start\"<<endl;\n sockaddr_in sa;\n sa.sin_family=AF_INET;\n sa.sin_port=htons(80);\n sa.sin_addr.s_addr=inet_addr(\"54.86.202.236\");\n bool flag=false;\n int error=-1, len;\n unsigned long one=1;\n //ioctl(sock, FIONBIO, &one);\n int se;\n //cout<<\"here\"<<endl;\n if(connect(sock,(sockaddr *)&sa, sizeof(sa))==-1)\n {\n //cout<<\"connect error\"<<endl;\n close(sock);\n return -1;\n }\n string request=\"GET /iplocation/v1.7/locateip?key=SAK2VL7X5U4D3554K36Z&ip=\"+ipstr+\"&format=XML HTTP/1.1\\r\\nHost:api.ipaddresslabs.com\\r\\nAccept:text/html\\r\\nConnection:keep-alive\\r\\nUser-Agent:Mozilla/5.0(Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/527.74.9 (KHTML, like Gecko) Version/7.0.2 Safari/537.74.9\\r\\n\\r\\n\";\n if(send(sock,request.c_str(),request.size(),0)==-1)\n {\n //cout<<\"faile to send.\"<<endl;\n close(sock);\n return -1;\n }\n timeval tv;\n tv.tv_sec=3;\n tv.tv_usec=0;\n if(setsockopt(sock,SOL_SOCKET, SO_RCVTIMEO, (char *)&tv,sizeof(timeval))!=0)\n {\n //cout<<\"ERROR: set receive timer.\"<<endl;\n close(sock);\n return -1;\n }\n char *xmlbuf;\n xmlbuf=new char[15000];\n memset(xmlbuf,0,15000);\n int bytesRead=0;\n int ret=1;\n while(ret>0)\n {\n ret=recv(sock,xmlbuf+bytesRead,15000-bytesRead,0);\n if(ret>0)\n {\n bytesRead+=ret;\n tv.tv_sec=3;\n setsockopt(sock,SOL_SOCKET, SO_RCVTIMEO, (char *)&tv,sizeof(timeval));\n }\n //cout<<ret<<\" \";\n }\n //cout<<endl;\n xmlbuf[bytesRead]='\\0';\n //cout<<xmlbuf<<endl;\n xml=string(xmlbuf);\n delete []xmlbuf;\n /*this part need further consideration*/\n std::istringstream iss(xml);\n string str;\n while(iss>>str)\n {\n //std::cout<<str<<std::endl;\n if((str.size()>13)&&(str.substr(0,6)==\"<city>\"))\n {\n //cout<<\"bingo\"<<endl;\n xml=str.substr(6, str.size()-13);\n close(sock);\n return 1;\n }\n }\n \n close(sock);\n return 0;\n}\n\nNeoDB::NeoDB()\n{\n if((sock=socket(AF_INET, SOCK_STREAM, IPPROTO_TCP))==-1)\n state=false;\n /*state to indicate the validation of socket*/\n else\n state=true;\n if(!state)\n return;\n sockaddr_in sa;\n sa.sin_family=AF_INET;\n sa.sin_port=htons(7474);\n sa.sin_addr.s_addr=inet_addr(\"127.0.0.1\");\n if(connect(sock, (sockaddr *)&sa, sizeof(sa))==-1)\n state=false;\n else\n state=true;\n}\n\nNeoDB::~NeoDB()\n{\n if(state)\n close(sock);\n}\n/*\n *unified interface to access the neo4j database, -1 for error, others for OK\n */\nint NeoDB::neo_query(const string & query, string & result)\n{\n std::ostringstream oss;\n oss<<query.size();\n string len=oss.str();\n string request=\"POST http://localhost:7474/db/data/cypher HTTP/1.1\\r\\nHost: localhost:7474\\r\\nAccept: application/json; charset=UTF-8\\r\\nContent-Type: application/json\\r\\nContent-Length: \"+len+\"\\r\\n\\r\\n\"+query;//need check whether the get method can do the trick\n send(sock, request.c_str(), request.size(), 0);\n char * buff=new char [BUFFSIZE];\n if(buff==NULL)\n return -1;\n memset(buff, 0, BUFFSIZE);\n recv(sock, buff, BUFFSIZE, 0);\n result=string(buff);\n delete [] buff;\n return 1;\n}\n\nint U_NeoDB::create_account(const string & account)\n{\n string request = \"{ \\\"query\\\" : \\\"CREATE (n:User {account : {acc} }) return n \\\", \\\"params\\\" : {\\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nstring U_NeoDB::get_client(const string & account)\n{\n /*request is a string for the neo4j cypher, need encode account in it*/\n string request=\"{ \\\"query\\\" : \\\"MATCH (a:User), (a)-[r:platform]->(b) WHERE a.account = {acc} return b \\\", \\\"params\\\" : {\\\"acc\\\" : \\\"\" + account + \"\\\"}}\";\n string result; //result is a parameter to the NeoDB::neo_query function, and filled by the neo_query function and return by this function\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n string temp=\"client\";\n int index=0;\n for(index=0; index<result.size(); index++)\n {\n int j=0;\n if(result[index]==temp[0])\n for(j=0; j<6&&result[index++]==temp[j++]; )\n ;\n if(j==6)\n break;\n }\n /* here need to find the client type */\n if(index<result.size())\n {\n index+=5;\n int end=index;\n for(; end<result.size()&&result[end]!='\\\"'; end++)\n ;\n result=result.substr(index, end-index);\n }\n else\n {\n return \"err\";\n }\n return result;\n}\n\nint U_NeoDB::set_client(const string &account, const string & client)\n{\n /* first need to delete the existed platform relationship */\n string request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (b:C_Type), (a)-[r:platform]->(b) WHERE a.account = {acc} DELETE r\\\", \\\"params\\\" : {\\\"acc\\\" : \\\"\"+account+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (b:C_Type) WHERE a.account = {acc} AND b.client = {cli} CREATE (a)-[r:platform]->(b) RETURN r\\\", \\\"params\\\" : {\\\"acc\\\" : \\\"\"+account+\"\\\", \\\"cli\\\" : \\\"\"+client+\"\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nint U_NeoDB::add_friends(const string & account, const string & add_acc)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (b:User) WHERE a.account = {acc1} AND b.account = {acc2} CREATE (a)-[r:friend]->(b) RETURN r\\\", \\\"params\\\" : {\\\"acc1\\\" : \\\"\" +account+\"\\\", \\\"acc2\\\" :\\\"\"+add_acc+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nint U_NeoDB::delete_friends(const string & account, const string & d_acc)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (b:User), (a)-[r]->(b) WHERE a.account = {acc1} AND b.account = {acc2} DELETE r\\\", \\\"params\\\" : {\\\"acc1\\\" : \\\"\" +account+\"\\\", \\\"acc2\\\" : \\\"\"+d_acc+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nstring U_NeoDB::get_ip(const string &account)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (a)-[r:login_on]->(b) WHERE a.account={acc} RETURN b\\\", \\\"params\\\" : {\\\"acc\\\" : \\\"\"+account+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n string temp=\"ip\\\" :\";\n /*\n *following part parse the data returned by the database\n */\n int index=0;\n for(index=0; index<result.size(); index++)\n {\n int j=0;\n if(result[index]==temp[0])\n for(j=0; j<4&&result[index++]==temp[j++]; )\n ;\n if(j==4)\n break;\n }\n if(index<result.size())\n {\n index+=3;\n int end=index;\n for(; end<result.size()&&result[end]!='\\\"'; end++)\n ;\n result=result.substr(index, end-index);\n }\n else\n {\n return \"err\";\n }\n return result;\n}\n\nint U_NeoDB::set_ip(const string & account, const string & ip)\n{\n /*\n *first delete existed relationship and ip node, and create new node, and then\n *add relationship\n */\n string request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (n)-[r:login_on]->(m) WHERE n.account={acc} DELETE r, m\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n request = \"{ \\\"query\\\" : \\\"CREATE (n:IP {ip : {ip1} }) RETURN n\\\", \\\"params\\\" : { \\\"ip1\\\" : \\\"\"+ip+\"\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (b:IP) WHERE a.account = {acc1} AND b.ip = {acc2} CREATE (a)-[r:login_on]->(b) RETURN r\\\", \\\"params\\\" : {\\\"acc1\\\" : \\\"\" +account+\"\\\", \\\"acc2\\\" :\\\"\"+ip+\"\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nstring U_NeoDB::get_all_fip(const string & account)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (a:User), (b:User), (a)-[:friend]->(b), (b)-[:login_on]->(c), (b)-[:login_status]->(d) WHERE a.account = {acc1} AND d.status='online' RETURN b, c\\\", \\\"params\\\" : {\\\"acc1\\\" : \\\"\" +account+\"\\\" } }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n return result;\n}\n\nint U_NeoDB::set_last_login(const string & account, const string & llogin)\n{\n /*\n *time format hh:mm:ss,dd/mm/yyyy\n */\n string request = \"{ \\\"query\\\" : \\\"CREATE (n:Time {time : {tm} }) RETURN n\\\", \\\"params\\\" : {\\\"tm\\\" : \\\"\"+llogin+\"\\\"} }\";\n //std::cout<<request<<std::endl;\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n //std::cout<<result<<std::endl;\n request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (n)-[r:login_time]->(m) WHERE n.account={acc} DELETE r,m\\\", \\\"params\\\" : {\\\"acc\\\" : \\\"\"+account+\"\\\"} }\";\n //std::cout<<result<<std::endl;\n if(neo.neo_query(request, result)==-1)\n return -1;\n request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Time) WHERE n.account={acc} AND m.time={tm} CREATE (n)-[r:login_time]->(m) RETURN r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\",\\\"tm\\\" : \\\"\"+llogin+\"\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n //std::cout<<result<<std::endl;\n return 1;\n}\n\nstring U_NeoDB::get_last_login(const string &account)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Time), (n)-[r:login_time]->(m) WHERE n.account={acc} RETURN m\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n string temp=\"time\\\" :\";\n /*\n *following part parse the data returned by the database\n */\n int index=0;\n for(index=0; index<result.size(); index++)\n {\n int j=0;\n if(result[index]==temp[0])\n for(j=0; j<6&&result[index++]==temp[j++]; )\n ;\n if(j==6)\n break;\n }\n if(index<result.size())\n {\n index+=3;\n int end=index;\n for(; end<result.size()&&result[end]!='\\\"'; end++)\n ;\n result=result.substr(index, end-index);\n }\n else\n {\n return \"err\";\n }\n return result;\n}\n\nint U_NeoDB::is_login(const string &account)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Login), (n)-[r:login_status]->(m) WHERE n.account={acc} RETURN m, r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n std::cout<<result<<std::endl;\n string temp=\"status\\\" :\";\n /*\n *following part parse the data returned by the database\n */\n int index=0;\n for(index=0; index<result.size(); index++)\n {\n int j=0;\n if(result[index]==temp[0])\n for(j=0; j<8&&result[index++]==temp[j++]; )\n ;\n if(j==8)\n break;\n }\n if(index<result.size())\n {\n index+=3;\n int end=index;\n for(; end<result.size()&&result[end]!='\\\"'; end++)\n ;\n result=result.substr(index, end-index);\n }\n else\n {\n return -1;\n }\n if(result==\"online\")\n return 1;\n return 0;\n}\n\nint U_NeoDB::login(const string & account, int status)\n{\n /*\n *status 1 stand for login, other status stand for offline\n */\n string request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Login),(n)-[r:login_status]->(m) WHERE n.account={acc} DELETE r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n NeoDB neo;\n string result;\n if(neo.neo_query(request, result)==-1)\n return -1;\n if(status==1)\n request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Login) WHERE n.account={acc} AND m.status='online' CREATE (n)-[r:login_status]->(m) RETURN m, r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n else\n request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Login) WHERE n.account={acc} AND m.status='offline' CREATE (n)-[r:login_status]->(m) RETURN m, r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nint U_NeoDB::set_preference(const string & account, const string &cgy, const string & subcgy)\n{\n /*\n *to easy to access the category and preference level, each user has its own \n *preference state, and for convienient to access and efficiency to access\n *0 stands for news, 1 stands for games, 2 stands for food, 3 stands for movies\n *4 stands for constellation, 5 stands for video, for the demo, all the sub \n *category is 0, it means that there no sub category stored in the DB.\n */\n/*\n *the format is 1:0:level category, sub category and preference level respectively\n */\n string request, preference;\n string result;\n NeoDB neo;\n if((preference=get_preference(account))==\"err\")\n {\n request=\"{ \\\"query\\\" : \\\"CREATE (n:Preference { category : {cgy} }) RETURN n\\\", \\\"params\\\" : {\\\"cgy\\\" : \\\"\"+cgy+\":\"+subcgy+\":1;1:0:0;2:0:0\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n std::cout<<result<<std::endl;\n request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Preference) WHERE n.account={acc} AND m.category={cgy} CREATE (n)-[r:prefer]->(m) RETURN r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\", \\\"cgy\\\" : \\\"\"+cgy+\":\"+subcgy+\":1;1:0:0;2:0:0\\\"} }\";\n }\n else\n {\n request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Preference), (n)-[r:prefer]->(m) WHERE n.account={acc} DELETE r, m\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n string temp=cgy+\":\"+subcgy;\n int i=0;\n for(int i; i<preference.size(); i++)\n {\n int j=0;\n if(preference[i]==temp[0])\n for(j=0; j<temp.size()&&preference[i++]==temp[j++];)\n ;\n if(j==temp.size())\n break;\n }\n preference[i+1]++;\n request=\"{ \\\"query\\\" : \\\"CREATE (n:Preference { category : {cgy} }) RETURN n\\\", \\\"params\\\" : {\\\"cgy\\\" : \\\"\"+preference+\"\\\"} }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Preference) WHERE n.account={acc} AND m.category={cgy} CREATE (n)-[r:prefer]->(m) RETURN r\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\", \\\"cgy\\\" : \\\"\"+preference+\"\\\"} }\";\n }\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nstring U_NeoDB::get_preference(const string & account)\n{\n string request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Preference), (n)-[r:prefer]->(m) WHERE n.account={acc} RETURN m\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n //std::cout<<result<<std::endl;\n string temp=\"category\\\" : \";\n /*\n *following part parse the data returned by the database\n */\n \n int index=0;\n for(index=0; index<result.size(); index++)\n {\n int j=0;\n if(result[index]==temp[0])\n for(j=0; j<12&&result[index++]==temp[j++]; )\n ;\n if(j==12)\n break;\n }\n if(index<result.size())\n {\n index+=3;\n int end=index;\n for(; end<result.size()&&result[end]!='\"'; end++)\n ;\n result=result.substr(index, end-index);\n }\n else\n {\n return \"err\";\n }\n return result;\n}\n\nint U_NeoDB::set_location(const string & account, const string & location)\n{\n /*\n *location format is longitude:latitude\n */\n string request = \"{ \\\"query\\\" : \\\"CREATE (n:Lg_Location { location : {loc} }) RETURN n\\\", \\\"params\\\" : { \\\"loc\\\" : \\\"\"+location+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Lg_Location),(n)-[r:login_location]->(m) WHERE n.account={acc} DELETE r, m\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n request = \"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Lg_Location) WHERE n.account={acc} AND m.location={loc} CREATE (n)-[r:login_location]->(m)\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\", \\\"loc\\\" : \\\"\"+location+\"\\\" } }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nstring U_NeoDB::get_freq_location(const string & account)\n{\n /*\n *this is used for getting the frequent location, no need to get login location\n *since the login location is already known and used to set, then it can just \n *compare with the result of this function\n */\n string request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Freq_Location), (n)-[r:location]->(m) WHERE n.account={acc} RETURN m\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\" } }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n string temp=\"location\\\" :\";\n /*\n *following part parse the data returned by the database\n */\n int index=0;\n for(index=0; index<result.size(); index++)\n {\n int j=0;\n if(result[index]==temp[0])\n for(j=0; j<10&&result[index++]==temp[j++]; )\n ;\n if(j==10)\n break;\n }\n if(index<result.size())\n {\n index+=3;\n int end=index;\n for(; end<result.size()&&result[end]!='\\\"'; end++)\n ;\n result=result.substr(index, end-index);\n }\n else\n {\n return \"err\";\n }\n return result;\n}\n\n\n\nint U_NeoDB::set_freq_location(const string & account, const string & location)\n{\n /*\n *this function only called when the first time the user register the account\n */\n /*\n *the location format is city, country\n */\n string request=\"{ \\\"query\\\" : \\\"CREATE (n:Freq_Location { location : {loc} }) RETURN n\\\", \\\"params\\\" : { \\\"loc\\\" : \\\"\"+location+\"\\\"} }\";\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n request=\"{ \\\"query\\\" : \\\"MATCH (n:User), (m:Freq_Location) WHERE n.account={acc} AND m.location={loc} CREATE (n)-[r:location]->(m)\\\", \\\"params\\\" : { \\\"acc\\\" : \\\"\"+account+\"\\\", \\\"loc\\\" : \\\"\"+location+\"\\\" } }\";\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nint L_NeoDB::set_prefer(const string & location, int cgy, int sub_cgy)\n{\n string request;\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return -1;\n return 1;\n}\n\nstring L_NeoDB::get_prefer(const string & location)\n{\n string request;\n string result;\n NeoDB neo;\n if(neo.neo_query(request, result)==-1)\n return \"err\";\n return result;\n}" }, { "alpha_fraction": 0.5089514255523682, "alphanum_fraction": 0.5268542170524597, "avg_line_length": 16.81818199157715, "blob_id": "6f9abbd0702d00d48a2edd7074989f0c48c84379", "content_id": "1787b97b96a056280dc1859d2768f67e2f3643ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 391, "license_type": "no_license", "max_line_length": 43, "num_lines": 22, "path": "/Programming/C/Programming_in_Unix/chapter4/link.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n#include <fcntl.h>\n#include <stdlib.h>\n\nint main()\n{\n if(open(\"tempfile\", O_RDWR|O_CREAT)<0)\n {\n fprintf(stderr, \"error: open\\n\");\n exit(0);\n }\n if(unlink(\"tempfile\")<0)\n {\n fprintf(stderr, \"error: unlink\\n\");\n exit(0);\n }\n printf(\"file unlinked\\n\");\n sleep(15);\n printf(\"done\\n\");\n return 0;\n}" }, { "alpha_fraction": 0.5168067216873169, "alphanum_fraction": 0.575630247592926, "avg_line_length": 17.384614944458008, "blob_id": "4df005bc10587e86f4405c48dfdb22297ad5ccb9", "content_id": "500575851237501463f7a75931adf963fdd75762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 238, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/Algorithm/Programming_Perls/sqrttime.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <stdlib.h>\n#include <math.h>\n\nint main()\n{\n clock_t time=clock();\n float s;\n for(int i=0; i<100000000; i++)\n s=sqrt(10.0);\n std::cout<<clock()-time<<\"miliseconds\"<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4528043866157532, "alphanum_fraction": 0.47058823704719543, "avg_line_length": 15.266666412353516, "blob_id": "b72aa107062799b12ed42695ef413b0f9adc5300", "content_id": "1ae2a200083088c6ef10955640dbe890080e69dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 731, "license_type": "no_license", "max_line_length": 41, "num_lines": 45, "path": "/Programming/C/The C programming Language/chapter1/reverse.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAXLINE 1000\n\nint getLine(char line[],int n);\nvoid reverse(char src[], int cnt);\nint main()\n{\n char input[MAXLINE];\n int cnt;\n while((cnt=getLine(input,MAXLINE))>0)\n {\n reverse(input,cnt);\n printf(\"%d\\n\",cnt);\n printf(\"%s\\n\",input);\n }\n}\n\nint getLine(char line[], int n)\n{\n int i=0;\n char c;\n c=getchar();\n while(c!='\\n'&&i!=n&&c!=EOF)\n {\n line[i]=c;\n c=getchar();\n ++i;\n }\n line[i]='\\0';\n return i;\n}\n\nvoid reverse(char src[], int cnt)\n{\n char tmp;\n for(int i=0;i<=(cnt-1)/2;++i)\n {\n tmp=src[i];\n src[i]=src[cnt-1-i];\n src[cnt-1-i]=tmp;\n printf(\"%d %d\\n\",i, cnt-i-1);\n }\n \n}" }, { "alpha_fraction": 0.449956476688385, "alphanum_fraction": 0.45982012152671814, "avg_line_length": 17.94505500793457, "blob_id": "01b4436903a268ab501f5293d2774fe7b69ba127", "content_id": "5036b5dfcff07a3032a63455e2ab38eb5dcac607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3447, "license_type": "no_license", "max_line_length": 81, "num_lines": 182, "path": "/Programming/C/The C programming Language/chapter6/hash_name_lookup.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\nstruct nlist{\n struct nlist *next;\n char *name;\n char *defn;\n};\n\n#define HASHSIZE 101\n\nstatic struct nlist *hashtab[HASHSIZE];\n\nunsigned _hash(char *s)\n{\n unsigned hashval;\n \n for(hashval=0; *s !='\\0'; s++)\n hashval=*s+31*hashval;\n return hashval%HASHSIZE;\n}\n\n#include<string.h>\nstruct nlist *lookup(char *s)\n{\n struct nlist *np;\n //printf(\"%s\\n\", s);\n //printf(\"%d\\n\", _hash(s));\n for(np=hashtab[_hash(s)]; np!=NULL; np=np->next)\n if(strcmp(s,np->name)==0)\n return np;\n return NULL;\n}\n\n#include<stdlib.h>\n\nchar *_strdup(char *);\n\nstruct nlist *install(char *name, char *defn)\n{\n struct nlist *np;\n unsigned hashval;\n if((np=lookup(name))==NULL)\n {\n np=(struct nlist *)malloc(sizeof(*np));\n if(np==NULL||(np->name=_strdup(name))==NULL)\n return NULL;\n hashval=_hash(name);\n np->next=hashtab[hashval];\n hashtab[hashval]=np;\n }\n else\n free((void *)np->defn);\n if((np->defn=_strdup(defn))==NULL)\n return NULL;\n return np;\n}\n\nchar *_strdup(char *s)\n{\n char *p=(char *)malloc(strlen(s)+1);\n if(p!=NULL)\n strcpy(p,s);\n return p;\n \n}\n\nint undef(char *name)\n{\n struct nlist *np=lookup(name);\n if(np==NULL)\n return 0;\n unsigned val=_hash(name);\n struct nlist *temp=hashtab[val];\n if(strcmp(temp->name,name)==0)\n {\n hashtab[val]=temp->next;\n goto find;\n }\n for(temp=hashtab[val]->next;strcmp(temp->next->name,name)!=0;temp=temp->next)\n ;\n temp->next=np->next;\nfind:\n free((void *)np->name);\n free((void *)np->defn);\n free((void *)np);\n return 1;\n}\n\n#include<ctype.h>\nint getword(char *word, int lim)\n{\n int c, getch(void);\n void ungetch(int);\n char *w=word;\n \n while (isspace(c=getch()))\n ;\n if(c!=EOF)\n *w++=c;\n if(!isalpha(c))\n {\n if(c=='_'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n if(c=='\\\"'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n if(c=='/'&&(*w++=getch())=='/')\n {\n if(isalpha(*w++=getch()))\n goto alpha;\n ungetch(*--w);\n ungetch('/');\n }\n if(c=='/'&&(*w++=c=getch())=='*')\n {\n if(isalpha(*w++=getch()))\n goto alpha;\n ungetch(*--w);\n ungetch('*');\n }\n if(c=='#'&&isalpha(*w++=getch()))\n {\n ungetch(*--w);\n goto alpha;\n }\n *w='\\0';\n return c;\n }\nalpha:\n for(;--lim>0&&isalnum(*w++=c=getch());)\n ;\n if(c=='\\\"')\n {\n *w='\\0';\n return word[0];\n }\n if(c=='*'&&(*w++=getch())=='/')\n {\n *w='\\0';\n return word[0];\n }\n *--w='\\0';\n return word[0];\n}\n\nint ch[5];\nint flag=0;\n\nint getch(void)\n{\n return flag>0?ch[--flag]:getchar();\n}\n\nvoid ungetch(int c)\n{\n ch[flag++]=c;\n}\n\n#include<string.h>\nint main()\n{\n char name[32];\n char defn[32];\n struct nlist *np;\n while(getword(name,32)!=EOF&&getword(defn,32)!=EOF)\n {\n install(name, defn);\n np=lookup(name);\n if(np==NULL)\n {\n printf(\"np==NULL\\n\");\n return 0;\n }\n printf(\"%s %s \", np->name, np->defn);\n printf(\"%d\\n\", undef(np->name));\n }\n return 0;\n}" }, { "alpha_fraction": 0.47891566157341003, "alphanum_fraction": 0.509036123752594, "avg_line_length": 11.807692527770996, "blob_id": "6020b618df218feeda0a4ee7577c6525bf1ec17b", "content_id": "cecdce0c9e5823af158b8c567b65dc990a1a44fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 332, "license_type": "no_license", "max_line_length": 39, "num_lines": 26, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/test_access_section.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<assert.h>\n\n\n\nclass A{\npublic:\n A():a(1),b(2),c(3){}\n int a;\n\nprivate:\n int c;\nprotected:\n int b;\n};\n\nint main()\n{\n A a;\n A *p=&a;\n int *p1=reinterpret_cast<int *>(p);\n std::cout<<*p1<<std::endl;\n std::cout<<*(p1+1)<<std::endl;\n std::cout<<*(p1+2)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.39728352427482605, "alphanum_fraction": 0.441426157951355, "avg_line_length": 16.878787994384766, "blob_id": "149eaad7766016ac6897a138860735cd219e4550", "content_id": "5a7be0ef34dc1bf02a634facec77fbc876647325", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 589, "license_type": "no_license", "max_line_length": 36, "num_lines": 33, "path": "/Programming/C/The C programming Language/chapter4/atof.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<ctype.h>\n\ndouble atof(char source[]);\n\nint main()\n{\n //double atof(char s[]);\n char s[]=\"-12.6666\";\n printf(\"%6.4f\\n\", atof(s));\n return 0;\n}\n\ndouble atof(char s[])\n{\n double val, power;\n int i, sign;\n for(i=0;isspace(s[i]);i++)\n ;\n sign=(s[i]=='-')?-1:1;\n if(s[i]=='-'||s[i]=='+')\n i++;\n for(val=0.0; isdigit(s[i]);i++)\n val=10.0*val+(s[i]-'0');\n if(s[i]=='.')\n i++;\n for(power=1.0;isdigit(s[i]);i++)\n {\n val=10.0*val+(s[i]-'0');\n power*=10;\n }\n return sign*val/power;\n}" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 14, "blob_id": "bcfc20c48e900c3d9fa1be8fcf0e3ca1fa8db3d1", "content_id": "843e777d3ac58ba00a9eaaa99a922c0238733566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 30, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/Programming/Lua/3Statements/first.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "x = math.pi\nprint(x - x%0.01)\n" }, { "alpha_fraction": 0.47826087474823, "alphanum_fraction": 0.510869562625885, "avg_line_length": 14.333333015441895, "blob_id": "f8e01482ef29f1f7f10f065c7a21d3aa7c0dd12f", "content_id": "61c5b45989792c2c68eec099362a94ac06b9875b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/Programming/Python/7String/exercise.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "s=\"spam\";\nprint(s.find('a'));\nl=list(s);\ns='s,pa,m';\nprint(s[2:4]);\nprint(s.split(',')[1]);\n" }, { "alpha_fraction": 0.5282726287841797, "alphanum_fraction": 0.5373095870018005, "avg_line_length": 29.983999252319336, "blob_id": "6528f2188bbdcec30ee7702891cff2c3ec6635fb", "content_id": "934d4d34b56437e33e93b6cb9f0e944af2ed9f17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3873, "license_type": "no_license", "max_line_length": 75, "num_lines": 125, "path": "/Programming/C/Programming_in_Unix/chapter14/test_mlock.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <errno.h>\n#include <fcntl.h>\n#include <sys/wait.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <sys/stat.h>\n\n/*\n * functions to lock or unlock a regio of a file\n */\nint\nlock_reg(int fd, int cmd, int type, off_t offset, int whence, off_t len)\n{\n struct flock lock;\n \n lock.l_type = type; /* F_RDLCK, F_WRLCK, F_UNLCK */\n lock.l_start = offset; /* byte offset, relative to l_whence */\n lock.l_whence = whence; /* SEEK_SET, SEEK_CUR, SEEK_END */\n lock.l_len = len; /* #bytes (0 means to EOF) */\n \n return (fcntl(fd, cmd, &lock));\n}\n\n#define read_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLK, F_RDLCK, (offset), (whence), (len))\n#define readw_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLKW, F_RDLCK, (offset), (whence), (len))\n#define write_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLK, F_WRLCK, (offset), (whence), (len))\n#define writew_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLKW, F_WRLCK, (offset), (whence), (len))\n#define un_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLK, F_UNLCK, (offset), (whence), (len))\n\n/*\n * testing for a lock\n */\n\npid_t\nlock_test(int fd, int type, off_t offset, int whence, off_t len)\n{\n struct flock lock;\n lock.l_type = type; /* F_RDLCK or F_WRLCK */\n lock.l_start = offset; /* byte offset, relative to l_whence */\n lock.l_whence = whence; /* SEEK_SET, SEEK_CUR, SEEK_END */\n lock.l_len = len; /* #bytes (0 means to EOF) */\n if(fcntl(fd, F_GETLK, &lock)<0)\n {\n fprintf(stderr,\"error: fcntl\\n\");\n exit(0);\n }\n if(lock.l_type==F_UNLCK)\n return(0); /*false, the region is not locked by another proc*/\n return (lock.l_pid); /*true, return pid*/\n}\n\n#define is_read_lockable(fd, offset, whence, len) \\\n(lock_test((fd), F_RDLCK, (offset), (whence), (len)) == 0)\n#define is_write_lockable(fd, offset, whence, len) \\\n(lock_test((fd), F_WRLCK, (offset), (whence), (len)) == 0)\n\nvoid err_sys(const char *s)\n{\n fprintf(stderr, \"%s\\n\", s);\n exit(0);\n}\n\nint\nmain(int argc, char *argv[])\n{\n int fd;\n pid_t pid;\n char buf[5];\n struct stat statbuf;\n if (argc != 2) {\n fprintf(stderr, \"usage: %s filename\\n\", argv[0]);\n exit(1);\n }\n if ((fd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, FILESEC_MODE)) < 0)\n err_sys(\"open error\");\n if (write(fd, \"abcdef\", 6) != 6)\n err_sys(\"write error\");\n \n /* turn on set-group-ID and turn off group-execute */\n if (fstat(fd, &statbuf) < 0)\n err_sys(\"fstat error\");\n if (fchmod(fd, (statbuf.st_mode & ~S_IXGRP) | S_ISGID) < 0)\n err_sys(\"fchmod error\");\n \n //TELL_WAIT();\n \n if ((pid = fork()) < 0) {\n err_sys(\"fork error\");\n } else if (pid > 0) { /* parent */\n /* write lock entire file */\n if (write_lock(fd, 0, SEEK_SET, 0) < 0)\n err_sys(\"write_lock error\");\n \n //TELL_CHILD(pid);\n \n if (waitpid(pid, NULL, 0) < 0)\n err_sys(\"waitpid error\");\n } else { /* child */\n //WAIT_PARENT(); /* wait for parent to set lock */\n \n //set_fl(fd, O_NONBLOCK);\n \n /* first let's see what error we get if region is locked */\n if (read_lock(fd, 0, SEEK_SET, 0) != -1) /* no wait */\n err_sys(\"child: read_lock succeeded\");\n printf(\"read_lock of already-locked region returns %d\\n\",\n errno);\n \n /* now try to read the mandatory locked file */\n if (lseek(fd, 0, SEEK_SET) == -1)\n err_sys(\"lseek error\");\n if (read(fd, buf, 2) < 0)\n err_sys(\"read failed (mandatory locking works)\");\n else\n printf(\"read OK (no mandatory locking), buf = %2.2s\\n\",\n buf);\n }\n exit(0);\n}\n" }, { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.5916442275047302, "avg_line_length": 16.690475463867188, "blob_id": "2bc5e08d34fe34a77fa596329c78f4e9937017c9", "content_id": "ea436a37dd2fbb6d490d5cc096c2150cfc892641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 742, "license_type": "no_license", "max_line_length": 62, "num_lines": 42, "path": "/Programming/C/Programming_in_Unix/chapter18/winsize.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <termios.h>\n#include <stdio.h>\n#include <stdlib.h>\n#ifndef TIOCGWINSZ\n#include <sys/ioctl.h>\n#endif\n#include <sys/ioctl.h>\n#include <unistd.h>\n\nstatic void\npr_winsize(int fd)\n{\n struct winsize size;\n if(ioctl(fd, TIOCGWINSZ, (char *)&size)<0)\n {\n fprintf(stderr, \"TIOCGWINSZ error\\n\");\n exit(0);\n }\n printf(\"%d rows, %d columns\\n\", size.ws_row, size.ws_col);\n}\n\nstatic void\nsig_winch(int signo)\n{\n printf(\"SIGWINCH received\\n\");\n pr_winsize(STDIN_FILENO);\n}\n\nint\nmain()\n{\n if(isatty(STDIN_FILENO)==0)\n exit(0);\n if(signal(SIGWINCH, sig_winch)==SIG_ERR)\n {\n fprintf(stderr, \"signal error\\n\");\n exit(0);\n }\n pr_winsize(STDIN_FILENO);\n for(;;)\n pause();\n}" }, { "alpha_fraction": 0.4878048896789551, "alphanum_fraction": 0.5365853905677795, "avg_line_length": 19.5, "blob_id": "91448856b405e83602ee82c29d2f62f840449fe8", "content_id": "67e1c884f5a29084fcb58f2d30ac88778afcb37f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 41, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/Programming/Lua/3Statements/stringcat.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print(\"Hello \" .. \"World\")\nprint(0 .. 1)\n" }, { "alpha_fraction": 0.48657718300819397, "alphanum_fraction": 0.5067114233970642, "avg_line_length": 13.949999809265137, "blob_id": "e5e8776dd65ff1746dab0db92a30ccbe2c0efcc6", "content_id": "bd8ec1a7a6ddc8400a785b59fb28c7f2c8994d80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 298, "license_type": "no_license", "max_line_length": 47, "num_lines": 20, "path": "/Programming/C++/Inside_the_C++_object_model/chapter6/static_initialization.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint i=10;\nint j=i;\nint fun()\n{\n int k=100;\n std::cout<<\"fun execute\"<<std::endl;\n return k;\n}\nint f=fun();\nint *p=new int(i);\n\nint main()\n{\n std::cout<<\"f: \"<<f<<std::endl;\n std::cout<<\"i: \"<<i<<\"; j: \"<<j<<std::endl;\n std::cout<<*p<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4794520437717438, "alphanum_fraction": 0.5616438388824463, "avg_line_length": 17.25, "blob_id": "6f8a08367ed57117f48153f31fe778850fa3e6bc", "content_id": "2f91379fdd7b3e057e8616f5c4cd6ac49992c76a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 219, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/Programming/Lua/3Statements/test.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a = {x=10, y=20}\nprint(a)\nprint(a.x, a.y)\npolyline = {color=\"blue\", thickness=2, npoints=4,\n\t\t{x=0, y=0},\n\t\t{x=-10, y=0},\n\t\t{x=-10, y=1},\n\t\t{x=0, y=1},\n\t}\nprint(polyline[2].x)\nprint(polyline[4].y)\nprint(polyline.color)\n" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3492063581943512, "avg_line_length": 8.142857551574707, "blob_id": "972d28c220a94f796614f26d2a2920470072149d", "content_id": "e011b8ebc8e6c17983aa177e9318cb7e72084b07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 63, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/Programming/C++/ISO_C++11/Basic_Concepts/scope.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "int main()\n{\n const int i=2;\n {\n int i[i];\n }\n}" }, { "alpha_fraction": 0.3190406858921051, "alphanum_fraction": 0.34593021869659424, "avg_line_length": 20.123077392578125, "blob_id": "9ff3d8351ae7649084c76aa71b0a86ce6cabcd03", "content_id": "68990c8cea7cb6f7e1a60e0ec4685038f59a3f75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1376, "license_type": "no_license", "max_line_length": 63, "num_lines": 65, "path": "/Programming/Practice/Google/RoundC/1.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <string>\n#include <vector>\nusing namespace std;\n\n\n\nint func(vector<string> &vec, vector<string> &flag)\n{\n for(int i=0; i<vec.size(); i++)\n {\n for(int j=0; j<vec.size(); j++)\n {\n if(vec[i][j]=='*')\n {\n flag[i][j]=-1;\n if(i-1>=0)\n flag[i-1][j]++;\n if(i+1<vec.size())\n flag[i+1][j]++;\n if(j-1>=0)\n flag[i][j-1]++;\n if(j+1<vec.size())\n flag[i][j+1]++;\n if(i-1>=0&&j-1>=0)\n flag[i-1][j-1]++;\n if(i-1>=0&&j+1<vec.size())\n flag[i-1][j+1]++;\n if(i+1<=vec.size()&&j-1>=0)\n flag[i+1][j-1]++;\n if(i+1<=vec.size()&&j+1<vec.size())\n flag[i+1][j+1]++;\n }\n }\n }\n \n}\n\n\n\nint main()\n{\n ifstream ifs(\"test\");\n if(!ifs.is_opend())\n {\n return 0;\n }\n \n int num;\n ifs>>num;\n for(int i=0; i<num; i++)\n {\n int n;\n ifs>>n;\n string str;\n vector<string> svec;\n for(int i=0; i<n; i++)\n {\n ifs>>str;\n svec.push_back(str);\n }\n vector<string> flag(svec.size(), string(svec.size, 0));\n }\n}\n\n\n\n" }, { "alpha_fraction": 0.5673469305038452, "alphanum_fraction": 0.6204081773757935, "avg_line_length": 34.07143020629883, "blob_id": "6b50e59d72b3dc157e81a03fa9c1f9923dbfa62e", "content_id": "c1773159fd4b9b86c2861693214bc28f7255bc2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 490, "license_type": "no_license", "max_line_length": 107, "num_lines": 14, "path": "/Project/source/SNS/test_main.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"rp2p.h\"\n#include <iostream>\n\nint main()\n{\n std::string s;\n //std::cout<<Action::login(\"account:JERRY password:jerry client:WP ip:54.86.202.236\", s)<<std::endl;\n //std::cout<<Action::log_out(\"account:JERRY\")<<std::endl;\n //std::cout<<Action::keep_alive(\"account:JERRY ip:54.86.202.236\", UPD_LOC, 0, s)<<std::endl;\n std::cout<<Action::keep_alive(\"account:123456\", UPD_INFO, 0, s)<<std::endl;\n std::cout<<s<<std::endl;\n const std::string &str=s;\n return 0;\n}" }, { "alpha_fraction": 0.5824742317199707, "alphanum_fraction": 0.5824742317199707, "avg_line_length": 16.636363983154297, "blob_id": "c0cc9290832e9a9c3fab246ed327b8ff251e64d6", "content_id": "6283904aababa867eb3ca2323af76a6bf67ca5bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 24, "num_lines": 11, "path": "/Programming/Python/4ObjectType/linghuo.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "L=[];\nprint(type(L));\nprint(type(type(L)));\nif type(L)==type([]):\n\tprint('yes');\nif type(L)==list:\n\tprint('yes');\nif isinstance(L, list):\n\tprint('yes');\n#if isinstance(list, L):\n#\tprint('yes');\n" }, { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6477272510528564, "avg_line_length": 13.666666984558105, "blob_id": "1e37c5cdab662ebf0a9b35c6fcb70e5b6601975d", "content_id": "91fbaa4de75c10101a13679d666bc994d9526f21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Programming/Python/20HigherIterator/gensqure.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def gensquares(N):\n\tfor i in range(N):\n\t\tyield i**2\n\nfor i in gensquares(10):\n\tprint(i)\n" }, { "alpha_fraction": 0.5206349492073059, "alphanum_fraction": 0.5460317730903625, "avg_line_length": 14.047618865966797, "blob_id": "2717d8164778c46bd6231a5c5fb284fedcd83c3e", "content_id": "8da6c889b05cfde37624acba5b2d4bd5ee1cc7e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 315, "license_type": "no_license", "max_line_length": 41, "num_lines": 21, "path": "/Programming/Practice/Interpretation/1.17_multiplication.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ndouble multiply(double base, int n)\n{\n bool flag;\n if(n%2!=0)\n flag=true;\n else flag=false;\n double sum=base;\n while((n=n/2)>0)\n sum+=sum;\n if(flag)\n return base+sum;\n return sum;\n}\n\nint main()\n{\n std::cout<<multiply(10,4)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6345298886299133, "alphanum_fraction": 0.6577777862548828, "avg_line_length": 27.125, "blob_id": "b92694c1c831fb33c74a6a631ff5981c5c68d9d2", "content_id": "538e12262bc5924e150e22930f4b9c040c4702d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2925, "license_type": "no_license", "max_line_length": 105, "num_lines": 104, "path": "/Cocos2d/Particle/Classes/HelloWorldScene.cpp", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"HelloWorldScene.h\"\n#include \"cocostudio/CocoStudio.h\"\n#include \"ui/CocosGUI.h\"\n\nUSING_NS_CC;\n\nusing namespace cocostudio::timeline;\n\nScene* HelloWorld::createScene()\n{\n // 'scene' is an autorelease object\n auto scene = Scene::create();\n \n // 'layer' is an autorelease object\n auto layer = HelloWorld::create();\n\n // add layer as a child to scene\n scene->addChild(layer);\n auto _emitter = ParticleSystemQuad::createWithTotalParticles(50);\n scene->addChild(_emitter, 10);\n _emitter->setTexture(Director::getInstance()->getTextureCache()->addImage(\"Close.png\"));\n _emitter->setAnchorPoint(Point(0,0));\n _emitter->setDuration(ParticleSystem::DURATION_INFINITY);\n _emitter->setEmitterMode(ParticleSystem::Mode::RADIUS);\n _emitter->setStartRadius(4);\n _emitter->setStartRadiusVar(1);\n _emitter->setEndRadius(ParticleSystem::START_RADIUS_EQUAL_TO_END_RADIUS);\n _emitter->setEndRadiusVar(0);\n \n _emitter->setRotatePerSecond(100);\n _emitter->setRotatePerSecondVar(0);\n \n _emitter->setAngle(90);\n _emitter->setAngleVar(0);\n \n auto size = Director::getInstance()->getWinSize();\n _emitter->setPosVar(Point::ZERO);\n \n _emitter->setLife(0.5);\n _emitter->setLifeVar(0);\n \n _emitter->setStartSpin(0);\n _emitter->setStartSpinVar(0);\n _emitter->setEndSpin(0);\n _emitter->setEndSpinVar(0);\n \n Color4F startColor(0.0f, 0.8f, 0.9f, 1.0f);\n _emitter->setStartColor(startColor);\n \n Color4F startColorVar(0, 0, 0, 1.0f);\n _emitter->setStartColorVar(startColorVar);\n \n Color4F endColor(1.0f, 1.0f, 1.0f, 0.1f);\n _emitter->setEndColor(endColor);\n \n Color4F endColorVar(0, 0, 0, 0.1f);\n _emitter->setEndColorVar(endColorVar);\n \n _emitter->setStartSize(20);\n _emitter->setStartSizeVar(1);\n _emitter->setEndSize(0);\n \n _emitter->setEmissionRate(_emitter->getTotalParticles()/_emitter->getLife());\n \n _emitter->setBlendAdditive(false);\n \n _emitter->setPosition(Point(200, 200));\n // return the scene\n return scene;\n}\n\n// on \"init\" you need to initialize your instance\nbool HelloWorld::init()\n{\n //////////////////////////////\n // 1. super init first\n if ( !Layer::init() )\n {\n return false;\n }\n \n auto rootNode = CSLoader::createNode(\"MainScene.csb\");\n\n addChild(rootNode);\n\n auto closeItem = static_cast<ui::Button*>(rootNode->getChildByName(\"Button_1\"));\n closeItem->addTouchEventListener(CC_CALLBACK_1(HelloWorld::menuCloseCallback, this));\n\n return true;\n}\n\nvoid HelloWorld::menuCloseCallback(Ref* pSender)\n{\n#if (CC_TARGET_PLATFORM == CC_PLATFORM_WP8) || (CC_TARGET_PLATFORM == CC_PLATFORM_WINRT)\n\tMessageBox(\"You pressed the close button. Windows Store Apps do not implement a close button.\",\"Alert\");\n return;\n#endif\n\n Director::getInstance()->end();\n\n#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)\n exit(0);\n#endif\n}\n" }, { "alpha_fraction": 0.5167173147201538, "alphanum_fraction": 0.5562310218811035, "avg_line_length": 12.199999809265137, "blob_id": "71143d57c4181ed8c800d19dba6ed546fb9fb687", "content_id": "3c8b4f71310d33f378d29ea4b4d5a73aa69913a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 329, "license_type": "no_license", "max_line_length": 38, "num_lines": 25, "path": "/Programming/Practice/Interpretation/1.15sine.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<cmath>\n\ndouble sine(double x);\n\nint main()\n{\n sine(10);\n std::cout<<sine(12.15)<<std::endl;\n return 0;\n}\n\ndouble p(double x)\n{\n static int i=1;\n std::cout<<i++<<std::endl;\n return 3*x-4*x*x*x;\n}\n\ndouble sine(double x)\n{\n if(abs(x)<0.1)\n return x;\n return p(sine(x/3));\n}" }, { "alpha_fraction": 0.41428571939468384, "alphanum_fraction": 0.4457142949104309, "avg_line_length": 15.690476417541504, "blob_id": "9a86d0557e3db7d7e748961395d8f58dbdd8f1d2", "content_id": "2174e95dd3f5a0536b73ab5214c99eba26a86b9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 700, "license_type": "no_license", "max_line_length": 42, "num_lines": 42, "path": "/Programming/C/The C programming Language/chapter5/tail.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<string.h>\n\nint _getline(char *s);\nint main(int argc, char **argv)\n{\n int n=10, temp;\n if(argc>1)\n {\n temp=atof(*(argv+1)+1);\n if(temp==0)\n return 0;\n n=temp;\n }\n char * text[36];\n char line[1000];\n char *p=line;\n int i=0;\n char c;\n while((c=_getline(p))!=EOF&&i<35)\n {\n text[i++]=p;\n p=p+strlen(p);\n }\n text[i]=NULL;\n i=(i-n)<0?0:i-n;\n for(int j=0;j<n&&text[j]!=NULL;j++)\n {\n printf(\"%d:%s\\n\", j+1, text[i++]);\n }\n return 0;\n}\n\nint _getline(char *s)\n{\n char c;\n while((c=getchar())!=EOF&&c!='\\n')\n *s++=c;\n *s='\\0';\n return c;\n}" }, { "alpha_fraction": 0.4888888895511627, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 44, "blob_id": "35ee3621259a5b82372a98dc5b71298fa9189f9d", "content_id": "ee55b6ef607fd3f84729d62750034a8f74fd5222", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/Programming/Python/14iteration/for_in_for.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print([x + y for x in 'abc' for y in '666'])\n" }, { "alpha_fraction": 0.5557894706726074, "alphanum_fraction": 0.5684210658073425, "avg_line_length": 18.83333396911621, "blob_id": "dbe30a87cd60c6c1d499450a0cb90d34b6cc1428", "content_id": "9fbefcfdc95306f21867379aa91299109a819546", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 475, "license_type": "no_license", "max_line_length": 64, "num_lines": 24, "path": "/Programming/C/Programming_in_Unix/chapter4/umask.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <unistd.h>\n#include <fcntl.h>\n\n#define RWRWRW (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)\n\nint main()\n{\n umask(0);\n if(creat(\"foo\", RWRWRW)<0)\n {\n fprintf(stderr, \"error: creat for foo\\n\");\n exit(0);\n }\n umask(S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH);\n if(creat(\"bar\", RWRWRW)<0)\n {\n fprintf(stderr, \"error: creat for bar\\n\");\n exit(0);\n }\n return 0;\n}" }, { "alpha_fraction": 0.5023885369300842, "alphanum_fraction": 0.5254777073860168, "avg_line_length": 21.446428298950195, "blob_id": "5f56793fde65bb4fe3c2a6a7f6724897975ebae8", "content_id": "00d24fd0e79826df67aac6f84c76943a604ceb0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1256, "license_type": "no_license", "max_line_length": 73, "num_lines": 56, "path": "/Programming/C/Network_Programming_in_Unix/chapter1/daytimetcpcli.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <arpa/inet.h>\n#include <string.h>\n\nint\nmain(int argc, char **argv)\n{\n int sockfd, n;\n char recvline[1500];\n struct sockaddr_in servaddr;\n \n if(argc!=2)\n {\n fprintf(stderr, \"usage: a.out <IPaddress>\\n\");\n exit(0);\n }\n if((sockfd = socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: create sock\\n\");\n exit(0);\n }\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=htons(13);\n if(inet_pton(AF_INET, argv[1], &servaddr.sin_addr)<=0)\n {\n fprintf(stderr, \"error: inet_pton for %s\\n\", argv[1]);\n exit(0);\n }\n if(connect(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"connect error\\n\");\n exit(0);\n }\n int i=10;\n //while(i>0)\n //{\n n=read(sockfd, recvline, 1500);\n printf(\"received %d bytes\\n\", n);\n recvline[n]=0;\n if(fputs(recvline+1, stdout)==EOF)\n {\n fprintf(stderr, \"error: fputs\\n\");\n exit(0);\n }\n //i--;\n //}\n if(n<0)\n {\n fprintf(stderr, \"read error\\n\");\n }\n return 0;\n}" }, { "alpha_fraction": 0.30166271328926086, "alphanum_fraction": 0.30641329288482666, "avg_line_length": 13.551724433898926, "blob_id": "138339477ec8a6836e0e7792db153aea94b872b7", "content_id": "1ec18011214a38e4c987f9d0e225ba1339aa51a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 421, "license_type": "no_license", "max_line_length": 36, "num_lines": 29, "path": "/Programming/C/The C programming Language/chapter1/wordline.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define IN 0\n#define OUT 1\n\nint main()\n{\n int c;\n int state;\n state=OUT;\n while((c=getchar())!=EOF)\n {\n if(c=='\\t'||c==' '||c=='\\n')\n {\n if(state==OUT);\n else\n {\n state=IN;\n printf(\"\\n\");\n }\n }\n else\n {\n putchar(c);\n state=IN;\n }\n \n }\n}" }, { "alpha_fraction": 0.46341463923454285, "alphanum_fraction": 0.4878048896789551, "avg_line_length": 9, "blob_id": "74779eed2530e7328465a53545cead80a37e7081", "content_id": "53c15de5343448f29389a4b42a5515d20c820af0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41, "license_type": "no_license", "max_line_length": 10, "num_lines": 4, "path": "/Programming/Python/13Loop/while_string.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "x = 'spam'\nwhile x:\n\tprint x\n\tx = x[1:]\n\n" }, { "alpha_fraction": 0.5041797161102295, "alphanum_fraction": 0.5114942789077759, "avg_line_length": 20.51685333251953, "blob_id": "54f988dc5e720225b1b6c2e5a4e4e5e929df6633", "content_id": "9cd40ccc7c4696cdb71d322a7dbf9981d7040dd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1914, "license_type": "no_license", "max_line_length": 72, "num_lines": 89, "path": "/Programming/C/Programming_in_Unix/chapter2/sysconf.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<errno.h>\n#include<unistd.h>\n#include<stdlib.h>\n#include<limits.h>\n\nstatic void pr_sysconf(char *, int);\nstatic void pr_pathconf(char *, char *, int);\n\nint main(int argc, char * argv[])\n{\n if(argc !=2)\n {\n fprintf(stderr, \"usage: a.out <dirname>\\n\");\n exit(1);\n }\n#ifdef ARG_MAX\n printf(\"ARG_MAX defined to be %d\\n\", ARG_MAX+0);\n#else\n printf(\"no symbol for ARG_MAX\\n\");\n#endif\n#ifdef _SC_ARG_MAX\n pr_sysconf(\"ARG_MAX=\", _SC_ARG_MAX);\n#else\n printf(\"no symbol for SC_ARG_MAX\\n\");\n#endif\n#ifdef MAX_CANON\n printf(\"MAX_CANON defined to be %d\\n\", MAX_CANON+0);\n#else\n printf(\"no symbol for MAX_CANON\\n\");\n#endif\n#ifdef _PC_MAX_CANON\n pr_pathconf(\"MAX_CANON =\", argv[1], _PC_MAX_CANON);\n#else\n printf(\"no symbol for _PC_MAX_CANON\\n\");\n#endif\n \n /* similar processing for all the rest of the pathconf symbols... */\n \n exit(0);\n}\n\nstatic void pr_sysconf(char *mesg, int name)\n{\n long val;\n fputs(mesg, stdout);\n errno=0;\n if((val=sysconf(name))<0)\n {\n if(errno!=0)\n {\n if(errno==EINVAL)\n fputs(\" (not supported)\\n\", stdout);\n else\n {\n fprintf(stderr, \"sysconf error\\n\");\n exit(0);\n }\n }\n else \n fputs(\"(no limit)\\n\", stdout);\n }\n else\n printf(\"%ld\\n\", val);\n}\n\nstatic void pr_pathconf(char *mesg, char *path, int name)\n{\n long val;\n fputs(mesg, stdout);\n errno=0;\n if((val=pathconf(path, name))<0)\n {\n if(errno!=0)\n {\n if(errno==EINVAL)\n fputs(\" (not supported)\\n\", stdout);\n else\n {\n fprintf(stderr, \"pathconf error, path=%s\", path);\n exit(0);\n }\n }\n else\n fputs(\" (no limit)\\n\", stdout);\n }\n else\n printf(\"%ld\\n\", val);\n}" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5625, "avg_line_length": 9.777777671813965, "blob_id": "9fd37aae99015248e2203f596206f14f451eae3f", "content_id": "00a8262c0041e5337a5440a24f6567492f7410ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 96, "license_type": "no_license", "max_line_length": 28, "num_lines": 9, "path": "/Programming/C++/ISO_C++11/Lexical_Conventions/preprocessing.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#define R \"x\"\nchar c[10]=R\"y\";\n\n#include <iostream>\n\nint main()\n{\n std::cout<<c<<std::endl;\n}" }, { "alpha_fraction": 0.4651162922382355, "alphanum_fraction": 0.5058139562606812, "avg_line_length": 20.5, "blob_id": "56616b3f930be77177ded885f758b67e276fab4c", "content_id": "5a7d16a21776b6930e4832de7e09cb36f4b6866e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/Programming/Python/13Loop/map.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "S1 = 'abc'\nS2 = 'xyz123'\nprint(map(None, S1, S2))\nprint(list(map(ord, 'spam')))\nprint('**************************')\nres = []\nfor c in 'spam': res.append(ord(c))\nprint(res)\n" }, { "alpha_fraction": 0.47330960631370544, "alphanum_fraction": 0.5088967680931091, "avg_line_length": 12.428571701049805, "blob_id": "f78bc8de9865ed4223ca67c32a2cb93c0b23a0d0", "content_id": "d088fe97506b76dd748da3f0586b154311a9341a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 281, "license_type": "no_license", "max_line_length": 34, "num_lines": 21, "path": "/Programming/C/Network_Programming_in_Unix/chapter3/byteorder.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nunion\n{\n short s;\n char a[sizeof(short)];\n}test;\n\nint\nmain()\n{\n test.s=0x0102;\n printf(\"%s\\n\", CPU_VENDOR_OS);\n if(test.a[0]==1&&test.a[1]==2)\n {\n printf(\"big endian\\n\");\n }\n else\n printf(\"little endian\\n\");\n return 0;\n}" }, { "alpha_fraction": 0.3627019226551056, "alphanum_fraction": 0.3935389220714569, "avg_line_length": 16.461538314819336, "blob_id": "c299f0f2236df3918b432790fa5ac1c1a6f8e362", "content_id": "8cd28449bcf9ccc510b1dd2e73216b95f7667680", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 681, "license_type": "no_license", "max_line_length": 46, "num_lines": 39, "path": "/Algorithm/Algorithm/chapter2/majority.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nT majority(T *a, int n);\n\nint main()\n{\n int a[]={2, 1, 2};\n std::cout<<majority(a, 3)<<std::endl;\n int b[]={2, 1, 2, 2};\n std::cout<<majority(b, 4)<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nT majority(T *a, int n)\n{\n if(n<=1)\n return *a;\n T *tar=new T[(n+1)/2];\n int i, k=0;\n for(i=0; i<n-1; i+=2)\n if(*(a+i)==*(a+i+1))\n {\n *(tar+k)=*(a+i);\n k++;\n }\n if(i==n-1)\n {\n if(*(a+i)==*(a+i-1)||*(a+i)==*(a+i-2))\n {\n *(tar+k)=*(a+i);\n k++;\n }\n }\n int can=majority(tar, k);\n delete [] tar;\n return can;\n}\n" }, { "alpha_fraction": 0.5849056839942932, "alphanum_fraction": 0.6037735939025879, "avg_line_length": 8, "blob_id": "755a1aaf81e9aae4fc3b35855f0247cfd1013c8b", "content_id": "005ebc55acb38062aa167920687203239b6659f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 53, "license_type": "no_license", "max_line_length": 18, "num_lines": 6, "path": "/Programming/C++/Effective_STL/Iterators/const.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int const i=1;\n}" }, { "alpha_fraction": 0.5040000081062317, "alphanum_fraction": 0.5887407660484314, "avg_line_length": 20.360759735107422, "blob_id": "5565256e1028fd2175356fa8dc6bcf42c471583f", "content_id": "4e4600501dad174519bbe6af0e6cf55b6a21cce2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3375, "license_type": "no_license", "max_line_length": 73, "num_lines": 158, "path": "/Programming/Python/5Number/variable.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a=3;\nb=4;\nprint(a+1, a-1);\nprint(b*3, b/2);\nprint(a%2, b**2);\nprint(2+4.0, 2.0**b);\nprint(b/2 + a);\nprint(b/(2.0+a));\nnum=1/3.0;\nprint(num);\nprint('%e' % num);\nprint('%4.2f' % num);\nprint('{0:4.2f}'.format(num));\nprint(1<2);\nprint(2.0>=1);\nnum=1/3.0;\nprint(str(num));\nprint(repr(num));\nprint(2.0==2.0);\nprint(2.0!=2.0);\nprint(2.0==2);\nx=2;\ny=4;\nz=6;\nprint(x<y<z);\nprint(x<y and y<z);\nprint(x<y>z);\nprint(x<y and y>z);\nprint(10/4);\nprint(10//4);\nprint(10/4.0);\nprint(10//4.0);\nimport math;\nprint(math.floor(2.5));\nprint(math.floor(-2.5));\nprint(math.trunc(2.5));\nprint(math.trunc(-2.5));\nprint('***********');\nprint(5/2, 5/-2);\nprint(5//2, 5//-2);\nprint(5/2.0, 5/-2.0);\nprint(5//2.0, 5//-2.0);\nprint('***********');\nprint(0o1, 0o20, 0o377);\nprint(0x01, 0x10, 0xff);\nprint(0b1, 0b10000, 0b11111111);\nprint('**********');\nprint(oct(255), hex(255), bin(255));\nx=0xffffffffffffffffffffffff;\nprint(oct(x));\nprint(hex(x));\nprint(bin(x));\nx=0b0001;\nprint(x<<2);\nprint(bin(x<<2));\nprint(bin(x|0b010));\nx=0xff;\nprint(bin(x));\nprint(x^0b10101010);\nprint(bin(x^0b10101010));\nprint(int('1010101', 2));\nprint(hex(85));\nx=99;\nprint(bin(x), x.bit_length());\nprint(len(bin(x)));\nprint(math.pi, math.e);\nprint(math.sin(2*math.pi/180));\nprint(math.sqrt(144), math.sqrt(2));\nprint(pow(2,4), 2**4);\nprint(abs(-42.0), sum((1,2,3,4)));\nprint(min(3,1,2,4), max(3,1,2,4));\nprint('********');\nprint(round(3.1));\nprint('%.1f' %2.567, '{0:.2f}'.format(2.567));\nprint('*********');\nprint(math.sqrt(144), 144**.5, pow(144,.5));\nimport random;\nprint('********');\nprint(random.random());\nprint(random.randint(1,10));\nprint(random.randint(1,10));\nprint(random.choice(['Life of Brain', 'Holy Grail', 'Meaning of Life']));\nprint('********');\nimport decimal;\ndecimal.getcontext().prec=4;\nprint(decimal.Decimal(1)/decimal.Decimal(8));\nprint('********');\nwith decimal.localcontext() as ctx:\n\tctx.prec=2;\n\tprint(decimal.Decimal('1.00')/decimal.Decimal('3.00'));\n\nprint(decimal.Decimal('1.00')/decimal.Decimal('3.00'));\nprint('********fraction*********');\nfrom fractions import Fraction;\nx=Fraction(1,3);\ny=Fraction(4,6);\nprint(x);\nprint(y);\nprint(x+y);\nprint(x-y);\nprint(x*y);\nprint(Fraction('.25'));\nprint(Fraction('1.25'));\nprint((2.5).as_integer_ratio());\nf=2.5;\nz=Fraction(*f.as_integer_ratio());\nprint(z);\nprint(float(z));\na=Fraction(225, 135);\nprint(a.limit_denominator(10));\nprint('*********set***********');\nx=set('abcde');\ny=set('bdxyz');\nprint(x, y);\nprint(x-y);\nprint(x|y);\nprint(x&y);\nprint(x^y);\nprint(x>y, x<y);\nz=x.intersection(y);\nprint(z);\nz.add('SPAM');\nprint(z);\nz.update(set(['x', 'Y']));\nprint(z);\nz.remove('b');\nprint(z);\nfor item in set('abc'): \n\tprint(item*3);\n\ns={'s', 'p', 'a', 'm'};\ns.add('alot');\nprint(s);\nprint({x**2 for x in [1, 2, 3, 5, 6]});\nprint({x for x in 'spam'});\nprint({c*4 for c in 'spam'});\nS={c*4 for c in 'spam'};\nprint(S|{'mmm', 'xxxx'});\nprint('**************set usage*************');\nL=[1,2,3,1,2,3,6];\nprint('L is', L);\nprint(list(set(L)));\nengineers={'bob', 'sue', 'ann', 'vic'};\nmanagers={'tom', 'sue'};\nprint('bob' in engineers);\nprint(engineers & managers);\nprint(engineers | managers);\nprint(engineers-managers);\nprint(managers-engineers);\nprint(engineers>managers);\nprint(engineers^managers);\nprint('****************bool*************');\nprint(type(True));\nprint(isinstance(True, int));\nprint(True ==1);\nprint(True is 1);\nprint(True or False);\nprint(True + 4);\n" }, { "alpha_fraction": 0.39344263076782227, "alphanum_fraction": 0.4754098355770111, "avg_line_length": 19.33333396911621, "blob_id": "b84c43fcbebf5f56ceb0931ec1b8902f53dae960", "content_id": "1ec5e510429d6a0084384d00c56af14f8f56025e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/Programming/Python/18Parameter/5exer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def func(a, b, c=3, d=4):\n\tprint(a, b, c, d)\nfunc(1, *(5,6))\n" }, { "alpha_fraction": 0.5961021780967712, "alphanum_fraction": 0.600806474685669, "avg_line_length": 23.37704849243164, "blob_id": "5db15f29f6c28564c01faa6cf61cde5d5f8eaf72", "content_id": "0a2d4ba99327762630a063e513975387307eca3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1488, "license_type": "no_license", "max_line_length": 111, "num_lines": 61, "path": "/Project/SNS/userdb.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include \"userdb.h\"\n\n#define server \"localhost\"\n#define user \"root\"\n#define password \"guoyuquan\"\n#define database \"Users\"\n\n/*\n *Users DB, users table, column account(primary key), pwd\n */\n\nDBConn::DBConn()\n{\n conn = mysql_init(NULL);\n mysql_real_connect(conn, server, user, password, database, 0, NULL, 0);\n}\n/*when the query is finished, close connection, forbid resource leak*/\nDBConn::~DBConn()\n{\n mysql_close(conn);\n}\n\nstring DBConn::query_pwd(const string & account)\n{\n if(mysql_query(conn, string(\"select pwd from Users.users where users.account='\"+account+\"'\").c_str()))\n {\n std::cerr<<\"error: query pwd for \"<<account<<std::endl;\n return \"\";\n }\n MYSQL_RES *res=mysql_use_result(conn);\n if(res==NULL)\n return \"\";\n MYSQL_ROW row=mysql_fetch_row(res);\n if(row==NULL)\n return \"\";\n string ret(row[0]);\n mysql_free_result(res);\n return ret;\n}\n\nint DBConn::insert(const string & account, const string & pwd)\n{\n if(mysql_query(conn, string(\"INSERT INTO users (account, pwd) VALUES ('\"+account+\"', '\"+pwd+\"')\").c_str()))\n {\n std::cerr<<\"error: insert\"<<std::endl;\n return -1;\n }\n return 0;\n}\n\n\nint DBConn::update(const string &account, const string & pwd)\n{\n if(mysql_query(conn, string(\"update users set pwd='\"+pwd+\"' where account='\"+account+\"'\").c_str()))\n {\n std::cerr<<\"error: update\"<<std::endl;\n return -1;\n }\n return 0;\n}\n\n" }, { "alpha_fraction": 0.43624159693717957, "alphanum_fraction": 0.463087260723114, "avg_line_length": 16.230770111083984, "blob_id": "1faccc08afaf2863fe16649bee52ca6b07f1f6e4", "content_id": "be2912abf82e4e254e9d21f6833be9ff8d626433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 447, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/Programming/C/The C programming Language/chapter4/strindex.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n#define MAX 1000\n\nint strindex(char source[], char target[]);\n\nint main()\n{\n char s[]=\"Zhang Qiu Chen Zhang Qiu chen\";\n char t[]=\"Qiu\";\n printf(\"%d\\n\",strindex(s,t));\n return 0;\n}\n\nint strindex(char s[], char t[])\n{\n int i, j, k;\n for(i=strlen(s)-1;i>=0;--i)\n {\n for(j=i,k=0;t[k]!='\\0'&&s[j]==t[k];k++,j++)\n ;\n if(k>0&&t[k]=='\\0')\n return i;\n }\n return -1;\n}" }, { "alpha_fraction": 0.4399999976158142, "alphanum_fraction": 0.5199999809265137, "avg_line_length": 15.666666984558105, "blob_id": "0b4680b2b053dbb192c7e81e7ac64703238b1193", "content_id": "521f1c169b08bb89891e06471f20f09a640af90b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/Programming/Python/18Parameter/1exer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def func(a, b=4, c=5):\n\tprint(a, b, c)\nfunc(1, 2)\n" }, { "alpha_fraction": 0.46457764506340027, "alphanum_fraction": 0.5013623833656311, "avg_line_length": 18.864864349365234, "blob_id": "f43f90ec6e2819ac1df5866b4c3be846d8fffaea", "content_id": "1b43b8a94e3b551be332ddb4f2d31af4ab1f3d29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 734, "license_type": "no_license", "max_line_length": 52, "num_lines": 37, "path": "/Algorithm/Algorithm/chapter1/select_k_largest.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nvoid quick_sort(T *, int size);\n\nint main()\n{\n int a[]={0,6,2,4,1,3,5,9,8,7};\n quick_sort(a, 10);\n for(int i=0;i<10; ++i)\n std::cout<<a[i]<<\" \";\n std::cout<<a[5]<<std::endl;\n return 0;\n}\n\ntemplate<typename T>\nvoid quick_sort(T *array, int size)\n{\n if(size<=1)\n return;\n std::swap(array[size/2], array[0]);\n int pivot=0;\n int i=size-1;\n while(pivot!=i)\n {\n if(array[i]<array[pivot])\n {\n std::swap(array[i], array[pivot+1]);\n std::swap(array[pivot], array[pivot+1]);\n pivot++;\n }\n else\n i--;\n }\n quick_sort(array, pivot-1);\n quick_sort(array+pivot+1, size-pivot-1);\n}" }, { "alpha_fraction": 0.5192307829856873, "alphanum_fraction": 0.5377492904663086, "avg_line_length": 24.089284896850586, "blob_id": "6dae38446d0b46b696a8a58ba5a61e5139580d8c", "content_id": "4af458d7a105897cf1e3b5d8e7975a532f24d469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 94, "num_lines": 56, "path": "/Programming/C/Programming_in_Unix/chapter14/mmap.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <fcntl.h>\n#include <sys/mman.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <string.h>\n#include <sys/stat.h>\n\nint main(int argc, char * argv[])\n{\n int fdin, fdout;\n void *src, *dst;\n struct stat statbuf;\n if(argc != 3)\n {\n fprintf(stderr, \"usage: %s <fromfile> <tofile>\\n\", argv[0]);\n exit(0);\n }\n if((fdin=open(argv[1], O_RDONLY))<0)\n {\n fprintf(stderr, \"error: cannot open %s for reading\\n\", argv[1]);\n exit(0);\n }\n if((fdout=open(argv[2], O_RDWR | O_CREAT | O_TRUNC, FILESEC_MODE))<0)\n {\n fprintf(stderr, \"error: cannot create %s for writing\\n\", argv[2]);\n exit(0);\n }\n if(fstat(fdin, &statbuf)<0)\n {\n fprintf(stderr, \"fstat error\\n\");\n exit(0);\n }\n if(lseek(fdout, statbuf.st_size-1, SEEK_SET)==-1)\n {\n fprintf(stderr, \"lseek error\\n\");\n exit(0);\n }\n if(write(fdout, \"\", 1)!=1)\n {\n fprintf(stderr, \"write error\\n\");\n exit(0);\n }\n if((src=mmap(0, statbuf.st_size, PROT_READ, MAP_SHARED, fdin, 0))==MAP_FAILED)\n {\n fprintf(stderr, \"mmap error for inputn\");\n exit(0);\n }\n if((dst=mmap(0, statbuf.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fdout, 0))==MAP_FAILED)\n {\n fprintf(stderr, \"mmap error for output\\n\");\n exit(0);\n }\n memcpy(dst, src, statbuf.st_size);\n exit(0);\n}" }, { "alpha_fraction": 0.6050228476524353, "alphanum_fraction": 0.6438356041908264, "avg_line_length": 47.66666793823242, "blob_id": "c3ef55822744e6b996080983f35f2505b4d52ba7", "content_id": "c0bbf8cb3a311544bd4a8eb68b727cc7b5e02769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 89, "num_lines": 9, "path": "/Programming/Python/7String/complex.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import sys;\nprint('My {1[spam]} runs {0.platform}'.format(sys, {'spam': 'laptop'}));\nprint('My {config[spam]} runs {sys.platform}'.format(sys=sys, config={'spam':'laptop'}));\nsomelist=list('SPAM');\nprint(somelist);\nprint('first={0[0]}, third={0[2]}'.format(somelist));\nprint('first={0}, last={1}'.format(somelist[0], somelist[-1]));\nparts=somelist[0], somelist[-1], somelist[1:3];\nprint('first={0}, last={1}, middle={2}'.format(*parts));\n" }, { "alpha_fraction": 0.41450777649879456, "alphanum_fraction": 0.4365285038948059, "avg_line_length": 16.177778244018555, "blob_id": "f80faa1b5f5eb395718713f07bcdca3ccbf63db2", "content_id": "622cd79a23a78531783ecb0f6c82b019a28805a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 772, "license_type": "no_license", "max_line_length": 52, "num_lines": 45, "path": "/Programming/C/The C programming Language/chapter4/grep.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#define MAX 1000\n\nint get_line(char line[], int max);\nint strindex(char source[], char serchfor[]);\n\nchar pattern[]=\"ould\";\n\nint main()\n{\n char line[MAX];\n int found=0;\n while(get_line(line,MAX)>0)\n if(strindex(line, pattern)>=0)\n {\n printf(\"%s\",line);\n found++;\n }\n return found;\n}\n\nint get_line(char s[], int lim)\n{\n int c, i;\n i=0;\n while(--lim>0&&(c=getchar())!=EOF&&c!='\\n')\n s[i++]=c;\n if(c=='\\n')\n s[i++]=c;\n s[i]='\\0';\n return i;\n}\n\nint strindex(char s[], char t[])\n{\n int i, j, k;\n for(i=0; s[i]!='\\0';++i)\n {\n for(j=i,k=0;t[k]!='\\0'&&s[j]==t[k];j++, k++)\n ;\n if(k>0&&t[k]=='\\0')\n return i;\n }\n return -1;\n}" }, { "alpha_fraction": 0.46570396423339844, "alphanum_fraction": 0.4837545156478882, "avg_line_length": 14, "blob_id": "97202968f22fe5676f1535beb63d36f5c92c505b", "content_id": "7621d5ede03e2b17b994090ac28cf7c1afcaeb8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 554, "license_type": "no_license", "max_line_length": 40, "num_lines": 37, "path": "/Programming/C/The C programming Language/chapter1/line80.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAXLINE 1000\n\nint getLine(char line[], int n);\nvoid copy(char to[], char from[]);\n\nint main()\n{\n int cnt;\n char line[MAXLINE];\n while((cnt=getLine(line,MAXLINE))>0)\n {\n if(cnt>80)\n printf(\"%s\\n\",line);\n }\n}\n\nint getLine(char line[], int n)\n{\n int i=0;\n char c;\n c=getchar();\n while(c!='\\n'&&i!=n&&c!=EOF)\n {\n line[i]=c;\n c=getchar();\n ++i;\n }\n return i;\n}\n\nvoid copy(char to[], char from[])\n{\n for(int i=0;from[i]!='\\0';++i)\n to[i]=from[i];\n}" }, { "alpha_fraction": 0.6209912300109863, "alphanum_fraction": 0.6239067316055298, "avg_line_length": 21.866666793823242, "blob_id": "29aa308c7f970ffe7e8909cfc644bb33a5d9fb85", "content_id": "269e0cc5e20b442cb2057e805adb6c06925c1bb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 343, "license_type": "no_license", "max_line_length": 85, "num_lines": 15, "path": "/Programming/JAVA/Thinking in JAVA/chapter14/finding/Finding.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import java.util.regex.*;\n\npublic class Finding{\n\tpublic static void main(String [] args){\n\t\tMatcher m=Pattern.compile(\"\\\\w+\").matcher(\"Evening is full of the linnet's wings\");\n\t\twhile(m.find())\n\t\t\tSystem.out.println(m.group()+\" \");\n\t\tSystem.out.println();\n\t\tint i=0;\n\t\twhile(m.find(i)){\n\t\t\tSystem.out.println(m.group()+\" \");\n\t\t\ti++;\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.7674418687820435, "alphanum_fraction": 0.7674418687820435, "avg_line_length": 25.875, "blob_id": "09fdf398b63f90582730ff7a9972c7863c65bb5f", "content_id": "76bb4b431bcb9c9247e2a797a6f49f0756f542bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 215, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/Programming/JAVA/Thinking in JAVA/chapter15/genericclassrefereces/GenericClassReferences.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "public class GenericClassReferences{\n\tpublic static void main(String [] args){\n\t\tClass intClass=int.class;\n\t\tClass<Integer> genericIntClass=int.class;\n\t\tgenericIntClass =Integer.class;\n\t\tintClass=double.class;\n\t}\n}\n" }, { "alpha_fraction": 0.5621761679649353, "alphanum_fraction": 0.5647668242454529, "avg_line_length": 23.1875, "blob_id": "9def42fb614d09fb953edcf08a37640e33aea8b3", "content_id": "7a051f2780bd4f112e0790c3313c7da1ecad7bc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 386, "license_type": "no_license", "max_line_length": 164, "num_lines": 16, "path": "/Programming/C/Programming_in_Unix/chapter6/getpwnam.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <pwd.h>\n#include <stddef.h>\n#include <stdio.h>\n\nint main()\n{\n struct passwd *ptr;\n \n setpwent(); //rewind the passwd file;\n while((ptr = getpwent())!=NULL)\n {\n printf(\"%s\\n%s\\n%d\\n%d\\n%s\\n%s\\n%s\\n%s\\n\", ptr->pw_name, ptr->pw_passwd, ptr->pw_uid, ptr->pw_gid, ptr->pw_gecos, ptr->pw_dir, ptr->pw_shell,ptr->pw_class);\n }\n endpwent();\n return 0;\n}" }, { "alpha_fraction": 0.5459166169166565, "alphanum_fraction": 0.5533717274665833, "avg_line_length": 27.941177368164062, "blob_id": "4c20c1ec971987fec30be217f1c6aa6ba63620a9", "content_id": "26bf4af30e9110192bf54cb051ba1996dda12ffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2951, "license_type": "no_license", "max_line_length": 72, "num_lines": 102, "path": "/Programming/C/Programming_in_Unix/chapter14/record_lock.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <fcntl.h>\n#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n/*\n * functions to lock or unlock a regio of a file\n */\nint\nlock_reg(int fd, int cmd, int type, off_t offset, int whence, off_t len)\n{\n struct flock lock;\n \n lock.l_type = type; /* F_RDLCK, F_WRLCK, F_UNLCK */\n lock.l_start = offset; /* byte offset, relative to l_whence */\n lock.l_whence = whence; /* SEEK_SET, SEEK_CUR, SEEK_END */\n lock.l_len = len; /* #bytes (0 means to EOF) */\n \n return (fcntl(fd, cmd, &lock));\n}\n\n#define read_lock(fd, offset, whence, len) \\\n lock_reg((fd), F_SETLK, F_RDLCK, (offset), (whence), (len))\n#define readw_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLKW, F_RDLCK, (offset), (whence), (len))\n#define write_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLK, F_WRLCK, (offset), (whence), (len))\n#define writew_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLKW, F_WRLCK, (offset), (whence), (len))\n#define un_lock(fd, offset, whence, len) \\\nlock_reg((fd), F_SETLK, F_UNLCK, (offset), (whence), (len))\n\n/*\n * testing for a lock\n */\n\npid_t\nlock_test(int fd, int type, off_t offset, int whence, off_t len)\n{\n struct flock lock;\n lock.l_type = type; /* F_RDLCK or F_WRLCK */\n lock.l_start = offset; /* byte offset, relative to l_whence */\n lock.l_whence = whence; /* SEEK_SET, SEEK_CUR, SEEK_END */\n lock.l_len = len; /* #bytes (0 means to EOF) */\n if(fcntl(fd, F_GETLK, &lock)<0)\n {\n fprintf(stderr,\"error: fcntl\\n\");\n exit(0);\n }\n if(lock.l_type==F_UNLCK)\n return(0); /*false, the region is not locked by another proc*/\n return (lock.l_pid); /*true, return pid*/\n}\n\n#define is_read_lockable(fd, offset, whence, len) \\\n(lock_test((fd), F_RDLCK, (offset), (whence), (len)) == 0)\n#define is_write_lockable(fd, offset, whence, len) \\\n(lock_test((fd), F_WRLCK, (offset), (whence), (len)) == 0)\n\nstatic void\nlockabyte(const char *name, int fd, off_t offset)\n{\n if(writew_lock(fd, offset, SEEK_SET, 1)<0)\n {\n fprintf(stderr, \"error: writew_lock for %s\\n\", name);\n exit(0);\n }\n printf(\"%s: got the lock, byte %lld\\n\", name, offset);\n}\n\nint main()\n{\n int fd;\n pid_t pid;\n /*\n * create a file and wirte two bytes to it;\n */\n if((fd=creat(\"templock\", FILESEC_MODE))<0)\n {\n fprintf(stderr, \"error: creat\\n\");\n exit(0);\n }\n if(write(fd, \"ab\", 2)!=2)\n {\n fprintf(stderr, \"write error\\n\");\n exit(0);\n }\n if ((pid = fork()) < 0) {\n fprintf(stderr, \"error: ford\\n\");\n } else if (pid == 0) { /* child */\n lockabyte(\"child\", fd, 0);\n //TELL_PARENT(getppid());\n //WAIT_PARENT();\n lockabyte(\"child\", fd, 1);\n } else { /* parent */\n lockabyte(\"parent\", fd, 1);\n //TELL_CHILD(pid);\n //WAIT_CHILD();\n lockabyte(\"parent\", fd, 0);\n }\n exit(0);\n}" }, { "alpha_fraction": 0.569767415523529, "alphanum_fraction": 0.5755813717842102, "avg_line_length": 18.11111068725586, "blob_id": "3cc61fce57bcb8764e0c5726930eb3c6863243c4", "content_id": "5ca0794bc99cfc1a102519ba7c034f8dc53e5a0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/Programming/Python/Class/fourth.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class Person:\n\tdef __init__(self, name, job=None, pay=0):\n\t\tself.name = name\n\t\tself.job = job\n\t\tself.pay = pay\n\nif __name__ == '__main__':\n\tbob = Person('Bob')\n\tprint(bob)\n" }, { "alpha_fraction": 0.5958701968193054, "alphanum_fraction": 0.6017699241638184, "avg_line_length": 21.600000381469727, "blob_id": "73a9af096161f55234ee895f80d58fb50f667f18", "content_id": "52b9a028d3d0e585f04ec5067ff6dffe4b5b4231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/Programming/Python/13Loop/enumerate.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "S = 'spam'\noffset=0\nfor item in S:\n\tprint(item, 'appears at offset', offset)\n\toffset+=1\nprint('***************************')\nfor (offset, item) in enumerate(S):\n\tprint(item+'appears at offset %d' % offset)\nfor item in enumerate(S):\n\tprint(item)\nE = enumerate(S)\nprint(next(E))\n#while E:\n#\tnext(E)\nprint([c * i for (i, c) in enumerate(S)])\n" }, { "alpha_fraction": 0.4653937816619873, "alphanum_fraction": 0.5083532333374023, "avg_line_length": 16.5, "blob_id": "c955518c10d09c129916d0c7d9e1f9996c2f7ce8", "content_id": "d89c9f878b81ee4b77e564c3b497f436f5b782de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 419, "license_type": "no_license", "max_line_length": 49, "num_lines": 24, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/class_address.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(int a1, int c1):a(a1), c(c1){}\n int ar(){return a;}\nprivate:\n int a;\n static int b;\n int c;\n};\n\nint main()\n{\n A a(10, 11);\n A *p=&a;\n int *p1=reinterpret_cast<int *>(p);\n *p1=20;\n std::cout<<*p1<<\" \"<<sizeof (int)<<std::endl;\n //p1=reinterpret_cast<int *>(p+1);\n std::cout<<*(p1+1)<<std::endl;\n std::cout<<a.ar()<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4923076927661896, "alphanum_fraction": 0.5179487466812134, "avg_line_length": 12.066666603088379, "blob_id": "8baeb2521d84be3547f5dcf538e9b2d449d7b9e4", "content_id": "df118d2ef1bff05b79618319f20f4646fb899670", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 195, "license_type": "no_license", "max_line_length": 36, "num_lines": 15, "path": "/Algorithm/Programming_Perls/testnew.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nstruct i\n{\n int i;\n};\n\nint main()\n{\n i *p=new i[10];\n std::cout<<sizeof(i)<<std::endl;\n std::cout<<p<<std::endl;\n std::cout<<&p[10]<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.488095223903656, "avg_line_length": 11, "blob_id": "27a981b5d17f6dabd6d242188103d89850bb1993", "content_id": "824293c9624d8ed867ea1c5407601c4796a3059c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/Programming/Python/18Parameter/changer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def changer(a, b):\n\ta = 2\n\tb[0] = 'spam'\nX = 1\nL = [1, 2]\nchanger(X, L)\nprint(X, L)\n" }, { "alpha_fraction": 0.4448484778404236, "alphanum_fraction": 0.45212119817733765, "avg_line_length": 34.869564056396484, "blob_id": "65718f81390d7e79d8b9363500908b9814252ed8", "content_id": "dd48ce87828f52fa1f27e94eff748262dfc18335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 65, "num_lines": 23, "path": "/Programming/Python/14iteration/file_parse.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "f = open('file_parse.py')\nL = f.readlines()\nL = [line.rstrip() for line in L]\nprint(L)\nfor line in L:\n\tprint(line)\nfor line in [l.rstrip() for l in open('file_parse.py')]:\n\tprint(line)\nprint('************************1*******************')\nfor line in [l.upper() for l in open('file_parse.py')]:\n\tprint(line)\nprint('************************2*******************')\nfor line in [l.split() for l in open('file_parse.py')]:\n\tprint(line)\nprint('************************3*******************')\nfor line in [l.replace(' ', '!') for l in open('file_parse.py')]:\n\tprint(line)\nprint('************************4*******************')\nfor (a, b) in [('sys' in l, l) for l in open('file_parse.py')]:\n\tprint(a, b)\nprint('***********************5********************')\nfor line in [l for l in open('file_parse.py') if l[0] == 'f']:\n\tprint(line)\n" }, { "alpha_fraction": 0.5130111575126648, "alphanum_fraction": 0.5600991249084473, "avg_line_length": 17.76744270324707, "blob_id": "2109336daf5c0cb19ff3cab39758844baf808fec", "content_id": "811ec873c96b275c55bc73a379b62250fc75bbd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 41, "num_lines": 43, "path": "/Programming/Python/4ObjectType/list.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "L=[123, 'spam', 1.23];\nprint(L);\nprint(len(L));\nprint(L[0]);\nprint(L[:-1]);\nprint(L+[4,5,6]);\nprint(L);\nL=L+[6];\nprint(L);\nL.append('NI');\nprint(L);\nL.pop(2);\nprint(L);\nL=['bb', 'aa', 'cc'];\nL.sort();\nprint(L);\nL.reverse();\nprint(L);\nM=[[1,2,3],[4,5,6],[7,8,9]];\nprint(M);\nprint(M[1]);\nprint(M[1][2]);\n#M[1]=L;\nprint(M);\ncol=[row[1] for row in M];\nprint(col);\ncol=[row[1]+1 for row in M];\nprint(col);\ncol=[row[1] for row in M if row[1]%2==0];\nprint(col);\ndiag=[M[i][i] for i in [0, 1, 2]];\nprint(diag);\ndoubles=(c*2 for c in 'spam');\nprint(doubles);\nG=(sum(row) for row in M);\nprint(next(G));\nprint(next(G));\nprint(list(map(sum, M)));\nprint({sum(row) for row in M});\nprint({i:sum(M[i]) for i in range(3)});\nprint([ord(x) for x in 'spam']);\nprint({ord(x) for x in 'spam'});\nprint({x:ord(x) for x in 'spam'})\n" }, { "alpha_fraction": 0.4107005298137665, "alphanum_fraction": 0.4337952136993408, "avg_line_length": 22.20535659790039, "blob_id": "2b1d94c38cddd6811fd6cf9e7010cbdc9ce55ad9", "content_id": "763759b9c4fd5a520f1567710812c808bf60342c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2598, "license_type": "no_license", "max_line_length": 75, "num_lines": 112, "path": "/Programming/C/Network_Programming_in_Unix/chapter6/tcpechocli.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <unistd.h>\n#include <string.h>\n#include <arpa/inet.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <sys/select.h>\n\nvoid\nproc(FILE *, int sock);\n\nint\nmax(int, int);\n\nint\nmain(int argc, char * argv[])\n{\n if(argc<2)\n {\n fprintf(stderr, \"usage: ./a.out <IP address>\\n\");\n exit(0);\n }\n int sock[5];\n for(int i=0; i<5; i++)\n {\n if((sock[i]=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: socket()\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n exit(0);\n }\n }\n for(int i=0; i<5; i++)\n {\n struct sockaddr_in sa;\n sa.sin_family=AF_INET;\n sa.sin_port=htons(9868);\n if(inet_pton(AF_INET, argv[1], &sa.sin_addr)<=0)\n {\n fprintf(stderr, \"error: inetpton\\n\");\n exit(0);\n }\n\n if(connect(sock[i], (struct sockaddr *)&sa, sizeof(sa))<0)\n {\n fprintf(stderr, \"error: connect()\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n exit(0);\n }\n }\n proc(stdin, sock[0]);\n //close(sock);\n return 0;\n}\n\nint\nmax(int a, int b)\n{\n return a>b?a:b;\n}\nvoid\nproc(FILE *fp, int sockfd)\n{\n int maxfdp1, stdineof;\n fd_set rset;\n char rcvbuf[1500], sndbuf[1500];\n int n;\n memset(rcvbuf, 0, 1500);\n memset(sndbuf, 0, 1500);\n stdineof=0;\n while(1)\n {\n if(stdineof==0)\n {\n FD_SET(fileno(fp), &rset);\n }\n FD_SET(sockfd, &rset);\n maxfdp1=max(fileno(fp), sockfd)+1;\n if(select(maxfdp1, &rset, NULL, NULL, NULL)>0)\n {\n if(FD_ISSET(sockfd, &rset))\n {\n if(read(sockfd, rcvbuf, 1500)==0)\n {\n if(stdineof==1)\n return;\n else\n {\n fprintf(stderr, \"server terminated permanantly\\n\");\n return;\n }\n }\n fputs(rcvbuf, stdout);\n }\n if(FD_ISSET(fileno(fp), &rset))\n {\n if((n=read(fileno(fp), sndbuf, 1500))==0)\n {\n stdineof=1;\n shutdown(sockfd, SHUT_WR);\n FD_CLR(fileno(fp), &rset);\n continue;\n }\n else if(n<0)\n fprintf(stderr, \"error: read() sock\\n\");\n write(sockfd, sndbuf, strlen(sndbuf));\n }\n }\n return;\n }\n}" }, { "alpha_fraction": 0.508571445941925, "alphanum_fraction": 0.5199999809265137, "avg_line_length": 8.722222328186035, "blob_id": "e423732f78afbf8e0c4c89dbf6c8c1719acae1dd", "content_id": "6de7b84a487c5d82dcd695e8b373c69f097afc88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 175, "license_type": "no_license", "max_line_length": 40, "num_lines": 18, "path": "/Programming/C/Programming_in_Unix/chapter4/same_function_struct_name.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nstruct fun{\n int i;\n};\n\nint fun()\n{\n std::cout<<\"just a test\"<<std::endl;\n return 0;\n}\n\nint main()\n{\n struct fun s;\n fun();\n return 0;\n}\n" }, { "alpha_fraction": 0.5094339847564697, "alphanum_fraction": 0.5132075548171997, "avg_line_length": 11.666666984558105, "blob_id": "0fe995ae7e134d76c4ea522f988cb4326897b5cb", "content_id": "2981a327e102b9e37edd8ff803c287fa34a10d38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 265, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/Programming/C++/Inside_the_C++_object_model/chapter2/test_virtual.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n virtual void fun(){std::cout<<\"A\"<<std::endl;}\n};\n\nclass B: public A{\npublic:\n virtual void fun(){std::cout<<\"B\"<<std::endl;}\n};\n\nint main()\n{\n B b;\n A *a=&b;\n A aa=*a;\n aa.fun();\n a->fun();\n return 0;\n}" }, { "alpha_fraction": 0.5254237055778503, "alphanum_fraction": 0.5762711763381958, "avg_line_length": 18.66666603088379, "blob_id": "b3ddc690932c1de6d63dc1b779cc68199dc830a0", "content_id": "1fdb13244204f309bbc2f49d219798f0cfeb8aa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 21, "num_lines": 3, "path": "/Programming/Python/18Parameter/4exer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def func(a, **kargs):\n\tprint(a, kargs)\nfunc(a=1, c=3, b=2)\n" }, { "alpha_fraction": 0.40640392899513245, "alphanum_fraction": 0.4433497488498688, "avg_line_length": 14.576923370361328, "blob_id": "61ca9dc2f4de1a226b538452d6fc80754c89d9a5", "content_id": "dea12b0a1d4949341c36a7928dfbecafb3c322a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 406, "license_type": "no_license", "max_line_length": 42, "num_lines": 26, "path": "/Programming/Practice/Interpretation/1.12_pascal.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint pascal(int n)\n{\n if(n<4)\n return 1;\n int sum=0;\n int i;\n for(i=1; i<n; i++)\n {\n sum+=i;\n if(n<=sum)\n break;\n }\n if(n==sum||n==sum-i+1)\n return 1;\n std::cout<<n<<std::endl;\n return pascal(n-i) + pascal(n-i+1);\n}\n\nint main()\n{\n //for(int i=0; i<100; i++)\n std::cout<<pascal(700)<<std::endl;\n return 0;\n}\n\n" }, { "alpha_fraction": 0.44297993183135986, "alphanum_fraction": 0.4716332256793976, "avg_line_length": 21.384614944458008, "blob_id": "0d722acb982fe69f8b11e65ce0481c8db0680e87", "content_id": "00077e1bb94afc8142687b9acc3956185927a5e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1745, "license_type": "no_license", "max_line_length": 66, "num_lines": 78, "path": "/Programming/C/Network_Programming_in_Unix/chapter5/tcpechocli.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <unistd.h>\n#include <string.h>\n#include <arpa/inet.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n\nvoid\nproc(int sock);\n\nint\nmain(int argc, char * argv[])\n{\n if(argc<2)\n {\n fprintf(stderr, \"usage: ./a.out <IP address>\\n\");\n exit(0);\n }\n int sock[5];\n for(int i=0; i<5; i++)\n {\n if((sock[i]=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: socket()\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n exit(0);\n }\n }\n for(int i=0; i<5; i++)\n {\n struct sockaddr_in sa;\n sa.sin_family=AF_INET;\n sa.sin_port=htons(9868);\n if(inet_pton(AF_INET, argv[1], &sa.sin_addr)<=0)\n {\n fprintf(stderr, \"error: inetpton\\n\");\n exit(0);\n }\n\n if(connect(sock[i], (struct sockaddr *)&sa, sizeof(sa))<0)\n {\n fprintf(stderr, \"error: connect()\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n exit(0);\n }\n }\n proc(sock[0]);\n //close(sock);\n return 0;\n}\n\nvoid\nproc(int sock)\n{\n char buf[1500];\n memset(buf, 0, 1500);\n while(fgets(buf, 1500, stdin)!=NULL)\n {\n if(write(sock, buf, strlen(buf))<0)\n {\n fprintf(stderr, \"error: write()\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n return;\n }\n memset(buf, 0, 1500);\n int n;\n if((n=read(sock, buf, 1500))<=0)\n {\n fprintf(stderr, \"error: read()\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n return;\n }\n printf(\"receive %d\\n\", n);\n printf(\"%s\\n\", buf);\n memset(buf, 0, 1500);\n }\n}" }, { "alpha_fraction": 0.5403226017951965, "alphanum_fraction": 0.5483871102333069, "avg_line_length": 9.416666984558105, "blob_id": "ebc48d5d003dfab83467ee0b74cdedd441354040", "content_id": "535a2f78eff1cf2d6af48aa456c89dc69d7ca197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 124, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/Programming/C++/Inside_the_C++_object_model/chapter6/abort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n ~A(){std::cout<<\"destructor\"<<std::endl;}\n};\n\nint main()\n{\n A a;\n return 0;\n}" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.5, "avg_line_length": 14.666666984558105, "blob_id": "df5e430ba422088162db0fc7d200babdac0754f7", "content_id": "f619b281f65c35bd3ac4ed89f460f1d31e2ffcfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 140, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/Programming/C++/Effective_STL/Functors/virtual.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int *p =new int[1000000000000];\n for(int i=0; ; i++)\n std::cout<<p[i]<<\" \";\n delete[] p;\n}" }, { "alpha_fraction": 0.5646551847457886, "alphanum_fraction": 0.5775862336158752, "avg_line_length": 15.5, "blob_id": "cda2be9bb84c459504c20a8d77dee5fd16947501", "content_id": "13963ec1303d7608e24ce07f1e4b2231193507ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 29, "num_lines": 14, "path": "/Programming/Python/14iteration/other_iterator.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "D = {'A':6, 'B': 6, 'C': 6}\nfor key in D:\n\tprint(key+\" => %d\" % D[key])\nimport os\np = os.popen('pwd')\nprint(p.next())\nE = enumerate('spam')\nI=iter(E)\nprint(next(I))\nwhile True:\n\ttry:\n\t\tprint(next(I))\n\texcept StopIteration:\n\t\tbreak\n\n" }, { "alpha_fraction": 0.5742574334144592, "alphanum_fraction": 0.5907590985298157, "avg_line_length": 15, "blob_id": "4f443772bf36cb0bef9b9af903187f2c4c269386", "content_id": "c05daccfc2f3a1cf1c5f951dbdc8e31cbc163d70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 303, "license_type": "no_license", "max_line_length": 37, "num_lines": 19, "path": "/Programming/C/The C programming Language/chapter8/low_cat.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<fcntc.h>\n#include<stdio.h>\n#include<sys/types.h>\n#include<sys/stat.h>\n\nvoid filecopy(FILE *, FILE *);\n\nint main(int argc, char *argv[])\n{\n if(argc==1)\n filecopy(stdin, stdout);\n}\n\nvoid filecopy(FILE * fp1, FILE * fp2)\n{\n int c;\n while((c=getc(fp1))!=EOF)\n putc(c, fp2);\n}" }, { "alpha_fraction": 0.6333333253860474, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 29, "blob_id": "ad3046ca1c2e17f41834df48fe628946ed7610ee", "content_id": "980ebf2b51744b6892213dadb61be5216330b80b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/Programming/Python/20HigherIterator/map.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print(list(map(ord, 'spam')))\n" }, { "alpha_fraction": 0.5465465188026428, "alphanum_fraction": 0.5615615844726562, "avg_line_length": 21.266666412353516, "blob_id": "23055ba678cfd33f26a3ac2e657f5fd7552f320e", "content_id": "a98257282f55a32902d925c37f229c367e23faa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 333, "license_type": "no_license", "max_line_length": 48, "num_lines": 15, "path": "/Programming/C++/Effective_STL/Associative_Container/map_key_constness.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<map>\n#include<iostream>\n\nint main()\n{\n std::map<int, int> imap;\n std::cout<<imap.size()<<std::endl;\n imap[10]++;\n std::cout<<imap.size()<<std::endl;\n std::cout<<imap[0]<<std::endl;\n std::cout<<(--imap.end())->first<<std::endl;\n std::cout<<imap.size()<<std::endl;\n const const int i=0;\n return 0;\n}" }, { "alpha_fraction": 0.5024154782295227, "alphanum_fraction": 0.5169082283973694, "avg_line_length": 16.33333396911621, "blob_id": "6a9f999f9eecdc9f733a2bf7e56c0a14ed175cbd", "content_id": "51774033e593bc421be59db26ce57036b5a4ae8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 207, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/Programming/C++/Inside_the_C++_object_model/chapter4/test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int *p=new int[10];\n short *q=reinterpret_cast<short *>(p);\n q--;\n std::cout<<p<<\" \"<<q<<std::endl;\n std::cout<<*q<<std::endl;\n delete []p;\n return 0;\n}" }, { "alpha_fraction": 0.6011470556259155, "alphanum_fraction": 0.6126173138618469, "avg_line_length": 25.985916137695312, "blob_id": "0d8e6911b6c67bcd6ed9c060351915b334075abf", "content_id": "6966b6dc161acee96a782a691e76bfa948772191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1918, "license_type": "no_license", "max_line_length": 142, "num_lines": 71, "path": "/Programming/C/libcap/pcap_filter.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<pcap.h>\n#include<stdio.h>\n#include<stdlib.h>\n#include<errno.h>\n#include<sys/socket.h>\n#include<netinet/in.h>\n#include<arpa/inet.h>\n#include<netinet/if_ether.h>\n\n/* just print a count every time got a packet */\n\nvoid my_callback(u_char *useless, const struct pcap_pkthdr *pkthdr, const u_char *packet)\n{\n static int count=1;\n fprintf(stdout, \"%d\", count);\n fflush(stdout);\n count++;\n}\n\nint main(int argc, char **argv)\n{\n int i;\n char *dev;\n char errbuf[PCAP_ERRBUF_SIZE];\n pcap_t *descr;\n struct pcap_pkthdr hdr;\n struct ether_header *eptr;\n struct bpf_program fp; //hold compiled program\n bpf_u_int32 maskp;//subnetmask\n bpf_u_int32 netp;//ip\n \n if(argc!=2)\n {\n fprintf(stdout,\"usage:%s\\\"filter program \\\" \\n\", argv[0]);\n return 0;\n }\n /*find a device*/\n dev=pcap_lookupdev(errbuf);\n if(dev==NULL)\n {\n fprintf(stderr,\"%s\\n\",errbuf);\n exit(1);\n }\n /*look up the network address and mask of the device*/\n pcap_lookupnet(dev,&netp,&maskp,errbuf);\n /*open device for reading this time let set it in the promiscuous mode so we are able to monitor traffic to another machine*/\n descr=pcap_open_live(dev,BUFSIZ, 1, 1000, errbuf);\n if(descr==NULL)\n {\n printf(\"pcap_open_live(): %s\\n\",errbuf);\n exit(0);\n }\n /*try and compile the program*/\n printf(\"%d\\n\",netp);\n \n /*first parameter is the identifier of the device, second is the file pointer to the compiled file, third is the filter rule, fourth is */\n if(pcap_compile(descr, &fp, \"ip\",1,maskp)==-1)\n {\n fprintf(stderr,\"Error calling pcap_compile\\n\");\n exit(1);\n }\n /*set the compiled program as the filter*/\n if(pcap_setfilter(descr,&fp)==-1)\n {\n fprintf(stderr,\"ERROR setting filter\\n\");\n exit(1);\n }\n /*loop*/\n pcap_loop(descr,-1,my_callback,NULL);\n return 0;\n}\n\n\n" }, { "alpha_fraction": 0.46404340863227844, "alphanum_fraction": 0.4789687991142273, "avg_line_length": 17.871795654296875, "blob_id": "6f81a8d0f40b769e07b498b9ebd940c98fe68e26", "content_id": "b7251eeda74655213d0a154dbba76b73c5751dca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 737, "license_type": "no_license", "max_line_length": 66, "num_lines": 39, "path": "/Project/source/Client/friends.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"friends.h\"\n#include <iostream>\n\nvoid Friends::update_vec(std::vector<f_info> tvec)\n{\n ava=0;\n fvec=tvec;\n ava=1;\n}\n\nint Friends::search_vec(const std::string & account, f_info * fin)\n{\n if(ava==0)\n return -1;\n for(int i=0; i<fvec.size(); i++)\n {\n if(fvec[i].account==account)\n {\n if(fin==NULL)\n return 1;\n fin->account=fvec[i].account;\n fin->ip=fvec[i].ip;\n fin->flag=1;\n return 1;\n }\n }\n return 0;\n}\n\nvoid Friends::list_all()\n{\n if(fvec.size()==0)\n {\n std::cout<<\"none\"<<std::endl;\n return;\n }\n for(int i=0; i<fvec.size(); i++)\n std::cout<<fvec[i].account<<std::endl;\n}\n\n" }, { "alpha_fraction": 0.4700186848640442, "alphanum_fraction": 0.48734501004219055, "avg_line_length": 18.744966506958008, "blob_id": "6743e1a469fe03b3ac9d7cd4603489f2cebe43bf", "content_id": "383b9a777d1e96aefc575a660ab01e6d4dbaf3e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5887, "license_type": "no_license", "max_line_length": 159, "num_lines": 298, "path": "/Algorithm/Algorithm/chapter9/graph.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<queue>\n#include<stdexcept>\n\ntemplate<typename T>\nstruct Node{\n Node(int ele=0, int w=0, Node *n=NULL):element(ele), weight(w), next(n){}\n T element;\n int weight;\n Node *next;\n};\n\ntemplate <typename T>\nclass List{\npublic:\n List(Node<T> * n=NULL):head(n){}\n void insert(T x, int w);\n void delete_node(T x);\n Node<T> * find(T x);\n T get_pos(int n);\n T get_min_weight();\n void print();\n ~List();\nprivate:\n Node<T> *head;\n};\n\ntemplate<typename T>\nT List<T>::get_min_weight()\n{\n Node<T> *p=head;\n int temp=666666, no=-1;\n while(p!=NULL)\n {\n if(p->weight<temp)\n {\n temp=p->weight;\n no=p->element;\n }\n p=p->next;\n }\n return no;\n}\n\ntemplate<typename T>\nT List<T>::get_pos(int n)\n{\n Node<T> *p=head;\n for(int i=0; p!=NULL&&i<n; i++)\n {\n p=p->next;\n }\n if(p!=NULL)\n return p->element;\n return -1;\n}\ntemplate<typename T>\nvoid List<T>::insert(T x, int w)\n{\n if(head==NULL)\n {\n head=new Node<T>(x, w);\n if(head==NULL)\n {\n std::bad_alloc ba;\n throw ba;\n }\n return;\n }\n Node<T> * p=head;\n while(p->next!=NULL)\n p=p->next;\n p->next=new Node<T>(x,w);\n if(p->next==NULL)\n {\n std::bad_alloc ba;\n throw ba;\n }\n}\n\ntemplate<typename T>\nvoid List<T>::delete_node(T x)\n{\n if(head==NULL)\n return;\n if(head->element==x)\n return;\n Node<T> *p=head;\n while(p->next!=NULL&&p->next->element!=x)\n p=p->next;\n if(p->next!=NULL)\n {\n Node<T> temp=p->next;\n p->next=temp->next;\n delete temp;\n }\n}\n\ntemplate<typename T>\nNode<T> * List<T>::find(T x)\n{\n Node<T> *p=head;\n while(p!=NULL&&p->element!=x)\n p=p->next;\n return p;\n}\n\ntemplate<typename T>\nList<T>::~List()\n{\n while(head!=NULL)\n {\n Node<T> *p=head;\n head=head->next;\n delete p;\n }\n}\n\ntemplate<typename T>\nvoid List<T>::print()\n{\n Node<T> *p=head;\n while(p!=NULL)\n {\n std::cout<<p->element<<\" weight: \"<<p->weight<<\", \";\n p=p->next;\n }\n std::cout<<std::endl;\n}\n\nstruct table{\n table(int v=0, int d=0, int pa=0, bool f= false):vertex(v), known(f), dis(d), path(pa){}\n int vertex;\n bool known;\n int dis;\n int path;\n};\n\ntemplate<typename T>\nclass Graph{\npublic:\n void short_path(T ver);\n Graph(int n, table * p=NULL):sz(n),list(new List<T>[n]), shortpath(new table[n]){}\n void insert(int node, T *ver, int*, int n);\n int indegree(int node);\n void print();\n int size(){return sz;}\n void shortest_path(T, T);\n ~Graph();\nprivate:\n bool all();\n int sz;\n List<T> * list;\n table *shortpath;\n};\ntemplate<typename T>\nbool Graph<T>::all()\n{\n for(int i=0; i<sz; i++)\n {\n if(shortpath[i].known==false)\n {\n return false;\n }\n }\n return true;\n}\n\ntemplate<typename T>\nvoid Graph<T>::insert(int node, T *ver, int * w, int n)\n{\n for(int i=0; i<n; i++)\n list[node-1].insert(ver[i], w[i]);\n}\n\ntemplate<typename T>\nint Graph<T>::indegree(int node)\n{\n int j, num=0;\n for(int i=0; i<sz; i++)\n if(list[i].find(node)!=NULL)\n num++;\n return num;\n}\n\ntemplate<typename T>\nvoid Graph<T>::print()\n{\n for(int i=0; i<sz; i++)\n {\n std::cout<<\"vertex \"<<i+1<<\": \";\n list[i].print();\n }\n}\n\ntemplate<typename T>\nvoid Graph<T>::shortest_path(T start, T end)\n{\n int cnt=0;\n for(int i=start; cnt<sz; cnt++)\n {\n std::cout<<i<<\" -> \";\n i=list[i-1].get_min_weight();\n if(i==-1)\n {\n std::cout<<end<<\" have no path\"<<std::endl;\n return;\n }\n if(i==end)\n {\n std::cout<<i;\n break;\n }\n }\n std::cout<<std::endl;\n}\n\ntemplate<typename T>\nGraph<T>::~Graph()\n{\n delete []list;\n if(shortpath!=NULL)\n delete []shortpath;\n}\n\n\ntemplate<typename T>\nvoid Graph<T>::short_path(T v)\n{\n std::queue<int> q;\n for(int i=0; i<sz; i++)\n {\n shortpath[i].vertex=i+1;\n }\n shortpath[v-1].path=v;\n shortpath[v-1].dis=1;\n shortpath[v-1].known=false;\n q.push(v);\n while(!q.empty()&&!all())\n {\n T temp=q.front();\n q.pop();\n for(int i=0, ver=0; (ver=list[temp-1].get_pos(i))!=-1; i++)\n {\n q.push(ver);\n Node<T> *node=list[temp-1].find(ver);\n if(shortpath[ver-1].known==false)\n {\n shortpath[ver-1].known=true;\n shortpath[ver-1].dis=shortpath[temp-1].dis+node->weight;\n shortpath[ver-1].path=temp;\n }\n else\n {\n if(shortpath[temp-1].dis+node->weight<shortpath[ver-1].dis)\n {\n shortpath[ver-1].dis=shortpath[temp-1].dis+node->weight;\n shortpath[ver-1].path=temp;\n }\n }\n }\n }\n for(int i=1; i<sz; i++)\n {\n std::cout<<\"node \"<<shortpath[0].vertex<<\" to node\"<<shortpath[i].vertex<<\" distance is :\"<<shortpath[i].dis<<\" through\"<<shortpath[i].path<<std::endl;\n }\n}\n\nint main()\n{\n Graph<int> graph(7);\n int a[]={2,4};\n int w1[]={2, 1};\n graph.insert(1, a, w1, 2);\n int b[]={4,5};\n int w2[]={3,10};\n graph.insert(2, b, w2, 2);\n int c[]={1,6};\n int w3[]={4, 5};\n graph.insert(3, c, w3, 2);\n int d[]={3,6,7};\n int w4[]={2, 8,5};\n graph.insert(4, d, w4, 2);\n int e[]={7};\n int w5[]={6};\n graph.insert(5, e, w5, 1);\n int f[]={6};\n int w6[]={1};\n graph.insert(7, f, w6, 1);\n graph.print();\n for(int i=0; i<graph.size(); i++)\n {\n std::cout<<\"indegree of vertex \"<<i+1<<\"is: \"<<graph.indegree(i+1)<<std::endl;\n }\n graph.shortest_path(1, 6);\n graph.short_path(1);\n return 0;\n}\n\n\n\n" }, { "alpha_fraction": 0.4462365508079529, "alphanum_fraction": 0.47311827540397644, "avg_line_length": 16, "blob_id": "a608c19b063ad328ee4eaac8476e8dff205c4d34", "content_id": "6905fd4f0b8891f964c66aac750155145c822764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 186, "license_type": "no_license", "max_line_length": 68, "num_lines": 11, "path": "/Programming/C/The C programming Language/chapter2/nologic.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAX 10\nint main()\n{\n char c[MAX];\n char ch;\n for(int i=0;((i<MAX-1)+((ch=getchar())!='\\n')+(ch!=EOF))==3;++i)\n c[i]=ch;\n printf(\"%s\\n\",c);\n}" }, { "alpha_fraction": 0.4304291307926178, "alphanum_fraction": 0.48114433884620667, "avg_line_length": 19.810810089111328, "blob_id": "73f197588f170f3f8fc2a5ed82f29836954745b6", "content_id": "f9dcd59b81af1151120cacc7c7268adb67d82ce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 769, "license_type": "no_license", "max_line_length": 68, "num_lines": 37, "path": "/Algorithm/Algorithm/chapter7/quickselect.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nT quickselect(T *a, int k, int n);\n\nint main()\n{\n int a[]={1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 16, 7, 15, 8, 16};\n std::cout<<quickselect(a, 5, 16)<<std::endl;\n return 0;\n}\n\ntemplate<typename T>\nT quickselect(T *a, int k, int n)\n{\n if(n<=1)\n return a[0];\n std::swap(a[0], a[n/2]);\n int pivot=0;\n for(int i=n-1; i>pivot;)\n {\n if(a[i]<=a[pivot])\n {\n std::swap(a[i], a[pivot+1]);\n std::swap(a[pivot], a[pivot+1]);\n pivot++;\n continue;\n }\n i--;\n }\n if(k<pivot+1)\n return quickselect(a, k, pivot);\n else if(k>pivot+1)\n return quickselect(a+pivot+1, k-pivot-1, n-pivot); \n else\n return a[pivot];\n}" }, { "alpha_fraction": 0.5941176414489746, "alphanum_fraction": 0.6117647290229797, "avg_line_length": 13.166666984558105, "blob_id": "e526300bf0f63303242cef9efb4d551e08a7a1cb", "content_id": "07a9256ebc4cb8e75e556de1a3fdd811fcc61d21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 170, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/Programming/JAVA/Thinking in JAVA/chapter6/Rock/Constructor.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class Rock{\n\tRock(){\n\t\tSystem.out.println(\"Rock\");\n\t}\n}\n\npublic class Constructor{\n\tpublic static void main(String []args){\n\t\tfor(int i=0; i<10; i++)\n\t\t\tnew Rock();\n\t}\n}\n" }, { "alpha_fraction": 0.48630136251449585, "alphanum_fraction": 0.48630136251449585, "avg_line_length": 8.800000190734863, "blob_id": "6dc2abacd2e1ab0188246e341f30fec54e51b110", "content_id": "5c0c42bfd748f8be4f2cdae6d09d9558bb1e4095", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 146, "license_type": "no_license", "max_line_length": 34, "num_lines": 15, "path": "/Programming/C++/Effective_C++/chapter2/dynamic_bound.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class A\n{\n};\n\nclass B:public A\n{\n};\n\nint main()\n{\n A a, *ap;\n B b, *bp;\n ap=&b; //base to derive: OK\n bp=&a; //derive to base: error\n}" }, { "alpha_fraction": 0.5777632594108582, "alphanum_fraction": 0.5899901986122131, "avg_line_length": 27.393518447875977, "blob_id": "b1da1a633bc921c4538ed6a3ed213ba7c3650fd6", "content_id": "9082956c58191ea636c61c2a41f0c6a991b0a1ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6134, "license_type": "no_license", "max_line_length": 113, "num_lines": 216, "path": "/Programming/C/libcap/packet_analysis.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<pcap.h>\n#include<stdio.h>\n#include<stdlib.h>\n#include<errno.h>\n#include<sys/socket.h>\n#include<netinet/in.h>\n#include<arpa/inet.h>\n#include<netinet/if_ether.h>\n#include<net/ethernet.h>\n#include<netinet/tcp.h>\n//#include<netinet/ether.h>\n\nstruct my_ip{\nu_int8_t\tip_vhl;\t\t/* header length, version */\n#define IP_V(ip)\t(((ip)->ip_vhl & 0xf0) >> 4)\n#define IP_HL(ip)\t((ip)->ip_vhl & 0x0f)\nu_int8_t\tip_tos;\t\t/* type of service */\nu_int16_t\tip_len;\t\t/* total length */\nu_int16_t\tip_id;\t\t/* identification */\nu_int16_t\tip_off;\t\t/* fragment offset field */\n#define\tIP_DF 0x4000\t\t\t/* dont fragment flag */\n#define\tIP_MF 0x2000\t\t\t/* more fragments flag */\n#define\tIP_OFFMASK 0x1fff\t\t/* mask for fragmenting bits */\nu_int8_t\tip_ttl;\t\t/* time to live */\nu_int8_t\tip_p;\t\t/* protocol */\nu_int16_t\tip_sum;\t\t/* checksum */\nstruct\tin_addr ip_src,ip_dst;\t/* source and dest address */\n};\n\n\nu_int16_t handle_ethernet(u_char* args, const struct pcap_pkthdr* pkthdr, const u_char* packet)\n{\n struct ether_header *eptr;\n eptr=(struct ether_header *) packet;\n \n /*check to see if we have an ip packet*/\n if(ntohs(eptr->ether_type)==ETHERTYPE_IP)\n {\n fprintf(stdout, \"ethernet header source: %s\", ether_ntoa((const struct ether_addr *)&eptr->ether_shost));\n fprintf(stdout,\"\\n\");\n fprintf(stdout, \"destination: %s\", ether_ntoa((const struct ether_addr *)&eptr->ether_dhost));\n fprintf(stdout,\"(IP)\");\n fprintf(stdout,\"\\n\");\n }\n //else if(ntohs(eptr->ether_type)==ETHERTYPE_ARP)\n //{\n // fprintf(stdout,\"(ARP)\");\n //}\n //else if(ntohs(eptr->ether_type)==ETHERTYPE_REVARP)\n //{\n // fprintf(stdout,\"(RARP)\");\n //}\n //else\n //{\n // fprintf(stdout,\"(?\");\n //exit(1);\n //}\n \n return eptr->ether_type;\n}\n\nu_char* handle_ip(u_char* args, const struct pcap_pkthdr* pkthdr, const u_char* packet)\n{\n const struct my_ip* ip;\n u_int length=pkthdr->len;\n u_int hlen, off, version;\n int i;\n int len;\n /*jump pass the etherent header*/\n ip=(struct my_ip *)(packet+sizeof(struct ether_header));\n struct tcphdr *tcp=(struct tcphdr*)(packet+sizeof(struct ether_header)+sizeof(struct my_ip));\n length -=sizeof(struct ether_header);\n /*check to see we have a pcaket of valid length*/\n if(length<sizeof(struct my_ip))\n {\n printf(\"truncated ip %d\", length);\n return NULL;\n }\n len=ntohs(ip->ip_len);\n hlen=IP_HL(ip); /*header length*/\n version =IP_V(ip);/*ipversion*/\n /*check version*/\n if(version!=4)\n {\n fprintf(stdout,\"Unknow version %d\\n\", version);\n return NULL;\n }\n /*check header length*/\n if(hlen<5)\n {\n fprintf(stdout,\"bad-hlen %d\\n\",hlen);\n }\n /*see if we have as much as packet as we should*/\n if(length<len)\n {\n printf(\"\\ntruncated IP -%d bytes missing\\n\",len-length);\n }\n /*check to see if we have the first fragment*/\n off=ntohs(ip->ip_off);\n if((off&0x1fff)==0)\n {\n fprintf(stdout,\"IP:\");\n fprintf(stdout,\"%s\\n\", inet_ntoa(ip->ip_src));\n fprintf(stdout,\"%s\\n %d %d %d %d\\n\", inet_ntoa(ip->ip_dst), hlen, version, len, off);\n fprintf(stdout,\"%d\\n\", tcp->th_off);\n fprintf(stdout,\"%d\\n\",ip->ip_p);\n }\n return NULL;\n}\n\nvoid my_callback(u_char *args, const struct pcap_pkthdr* pkthdr, const u_char* packet)\n{\n u_int16_t type=handle_ethernet(args, pkthdr, packet);\n if(type==8)\n {\n /*handle IP packet*/\n handle_ip(args, pkthdr, packet);\n }\n else if(type==ETHERTYPE_ARP)\n {\n \n }\n else if(type==ETHERTYPE_REVARP)\n {\n /*handle reverse arp packet*/\n }\n}\n\nint main(int argc, char **argv)\n{\n char *dev;\n char errbuf[PCAP_ERRBUF_SIZE];\n pcap_t* descr;\n struct bpf_program fp;\n bpf_u_int32 maskp;\n bpf_u_int32 netp;\n u_char * args=NULL;\n \n /*options must be passed in as a string*/\n if(argc<2)\n {\n fprintf(stdout,\"Usage: %s numpackets\\\"options\\\"\\n\",argv[0]);\n return 0;\n }\n /*grab a device*/\n dev=pcap_lookupdev(errbuf);\n if(dev==NULL)\n {\n printf(\"%s\\n\",errbuf);\n exit(1);\n }\n /*ask pcap for the network address and mask of the device*/\n pcap_lookupnet(dev,&netp,&maskp,errbuf);\n /*open device for reading*/\n descr=pcap_open_live(dev,BUFSIZ, 1, 1000, errbuf);\n if(descr==NULL)\n {\n printf(\"Pcap_open_live(): %s\\n\",errbuf);\n exit(1);\n }\n if(argc>2)\n {\n /* try and compile the program*/\n if(pcap_compile(descr, &fp, \"ip\",1, netp)==-1)\n {\n fprintf(stderr,\"Error calling pcap_compile\\n\");\n exit(1);\n }\n /*set the compilet program as the filter*/\n if(pcap_setfilter(descr,&fp)==-1)\n {\n fprintf(stderr,\"Error setting filter\\n\");\n exit(1);\n }\n }\n /*loop second parameter is how many packets sniffed before return*/\n pcap_loop(descr, atoi(argv[1]),my_callback,args);\n fprintf(stdout,\"\\nfinished\\n\");\n return 0;\n}\n/*something about the ethernet header and address conversion*/\n\n/* This is a name for the 48 bit ethernet address available on many\n systems. */\n/*\nstruct ether_addr\n{\n u_int8_t ether_addr_octet[ETH_ALEN];\n} __attribute__ ((__packed__));\n */\n\n/* 10Mb/s ethernet header */\n/*\nstruct ether_header\n{\n u_int8_t ether_dhost[ETH_ALEN];\tdestination eth addr\n u_int8_t ether_shost[ETH_ALEN]; source ether addr\n u_int16_t ether_type;\t\t packet type ID field\n} __attribute__ ((__packed__));\n*/\n\n/* Convert 48 bit Ethernet ADDRess to ASCII. */\n/*\nextern char *ether_ntoa (__const struct ether_addr *__addr) __THROW;\nextern char *ether_ntoa_r (__const struct ether_addr *__addr, char *__buf)\n__THROW;*/\n\n\n/* Convert ASCII string S to 48 bit Ethernet address. */\n/*\nextern struct ether_addr *ether_aton (__const char *__asc) __THROW;\nextern struct ether_addr *ether_aton_r (__const char *__asc,\n struct ether_addr *__addr) __THROW;\n extern int ether_hostton (__const char *__hostname, struct ether_addr *__addr)\n __THROW;\n */\n\n" }, { "alpha_fraction": 0.5634920597076416, "alphanum_fraction": 0.5634920597076416, "avg_line_length": 8.071428298950195, "blob_id": "912a2d49c3ad3b2d460298de2741d8132662ac9b", "content_id": "276736f3f5423fbc2e12fdcbe40720af1885e026", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 126, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/Programming/C++/Effective_C++/chapter2/class_completion.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A\n{\npublic:\n A &operator=(A a){return *this;}\n int what(A a);\nprivate:\n \n};\n\nint main()\n{\n}" }, { "alpha_fraction": 0.4850948452949524, "alphanum_fraction": 0.5121951103210449, "avg_line_length": 16.619047164916992, "blob_id": "253cdbea7894dfbffc160253d5f724d4abb6e929", "content_id": "11e68721bf1e2d43fd184b0388953b1d162a8ab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 369, "license_type": "no_license", "max_line_length": 43, "num_lines": 21, "path": "/Programming/C/Programming_in_Unix/chapter4/getcwd.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n\nint main()\n{\n char ptr[100];\n int size;\n if(chdir(\"/usr/spool/uucppublic\")<0)\n {\n fprintf(stderr, \"error: chdir\\n\");\n exit(0);\n }\n if(getcwd(ptr, 100)==NULL)\n {\n fprintf(stderr, \"error: getcwd\\n\");\n exit(0);\n }\n printf(\"cwd = %s\\n\", ptr);\n return 0;\n}" }, { "alpha_fraction": 0.4451783299446106, "alphanum_fraction": 0.45970937609672546, "avg_line_length": 16.227272033691406, "blob_id": "436ca0c31ad0fa9db0c35a84cdddd3d5fc238eee", "content_id": "79d9579758196054ecb99ee234dc4d409bb47eb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 757, "license_type": "no_license", "max_line_length": 43, "num_lines": 44, "path": "/Programming/C/The C programming Language/chapter1/longestline.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAXLINE 1000\n\nint getLine(char line[], int maxline);\nvoid copy(char to[], char from[]);\n\nint main()\n{\n int cnt,max;\n char line[MAXLINE];\n char newLine[MAXLINE];\n max=0;\n while((cnt=getLine(newLine,MAXLINE))>0)\n {\n if(cnt>max)\n {\n max=cnt;\n copy(line,newLine);\n }\n }\n if(max>0)\n printf(\"%s\\n\", line);\n return 0;\n}\n\nint getLine(char line[],int maxline)\n{\n int i=0;\n char c=getchar();\n while(c!='\\n'&&i<maxline&&c!=EOF)\n {\n line[i]=c;\n c=getchar();\n ++i;\n }\n return i;\n}\n\nvoid copy(char to[], char from[])\n{\n for(int i=0;from[i]!='\\0';++i)\n to[i]=from[i];\n}" }, { "alpha_fraction": 0.4333333373069763, "alphanum_fraction": 0.5166666507720947, "avg_line_length": 9, "blob_id": "50eebacd2fa7837352e7d58a828e75cabb414446", "content_id": "7cb433408abf45c998ec35bea19274ea65e31cea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 15, "num_lines": 6, "path": "/Programming/Python/13Loop/continue.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "x = 10\nwhile x:\n\tx=x-1\n\tif x % 2 != 0:\n\t\tcontinue\n\tprint(x)\n" }, { "alpha_fraction": 0.5480769276618958, "alphanum_fraction": 0.5480769276618958, "avg_line_length": 8.545454978942871, "blob_id": "10725b3e57fe5312922be3808863a7f09e3fdbed", "content_id": "1c4bfafbb73e3ddff95a9ec7af94f9e3b1ecbffb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 104, "license_type": "no_license", "max_line_length": 34, "num_lines": 11, "path": "/Programming/C++/Effective_C++/chapter7/type_function.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nvoid fun( int)\n{\n std::cout<<\"right\"<<std::endl;\n}\n\nint main()\n{\n fun( int);\n}" }, { "alpha_fraction": 0.5759999752044678, "alphanum_fraction": 0.5920000076293945, "avg_line_length": 10.181818008422852, "blob_id": "eef6ad66d171dc0c50be1d77f86e7877c97989a4", "content_id": "0c510698f65845c760fb85bb3b093f086616e22a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 125, "license_type": "no_license", "max_line_length": 21, "num_lines": 11, "path": "/Programming/Lua/4Expression/ifthenelse.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "local i = 1\na={\"hello\", \",world\"}\nwhile a[i] do\n\tprint(a[i])\n\ti=i+1\nend\n\nrepeat\n\tline=io.read()\nuntil line~=\"\"\nprint(line)\n\n\n" }, { "alpha_fraction": 0.4907521605491638, "alphanum_fraction": 0.49691739678382874, "avg_line_length": 21.55555534362793, "blob_id": "cc9b5516a62894db4b4e6afe0904c2f5c178a752", "content_id": "5262bd4cedf08b7745ece19cea10f99ad8485213", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 811, "license_type": "no_license", "max_line_length": 58, "num_lines": 36, "path": "/Programming/C/Programming_in_Unix/chapter4/utime.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <utime.h>\n#include <sys/stat.h>\n\nint main(int argc, char *argv[])\n{\n int i, fd;\n struct stat statbuf;\n struct utimbuf timebuf;\n for (i=1; i<argc; i++)\n {\n if(stat(argv[i], &statbuf)<0)\n {\n fprintf(stderr, \"error: stat %s\\n\", argv[i]);\n continue;\n }\n if((fd=open(argv[i], O_RDWR|O_TRUNC))<0)\n {\n fprintf(stderr, \"error: open %s\\n\", argv[i]);\n continue;\n }\n close(fd);\n timebuf.actime=statbuf.st_atime;\n timebuf.modtime=statbuf.st_mtime;\n if(utime(argv[i], &timebuf)<0)\n {\n fprintf(stderr, \"error: utime %s\\n\", argv[i]);\n continue;\n }\n }\n return 0;\n \n}" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.5310344696044922, "avg_line_length": 9.428571701049805, "blob_id": "247a70b87fdab37ba4e053f31fdf7c6e8751cd64", "content_id": "54fae78e3c9efbe7649839b04a34b3067c2d0276", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 145, "license_type": "no_license", "max_line_length": 26, "num_lines": 14, "path": "/Programming/C++/Effective_STL/Associative_Container/map_value_default.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<map>\n\nclass A{\npublic:\n A(int , int){};\n};\n\nint main()\n{\n std::map<int, A> imap;\n //imap[0];\n return 0;\n}" }, { "alpha_fraction": 0.43378376960754395, "alphanum_fraction": 0.4418918788433075, "avg_line_length": 19.58333396911621, "blob_id": "109f1f9daf36606ac973f2479b5ec28f157af1ac", "content_id": "6dd84c0c103beb1db2555c6dc52211dae7850688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 740, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/Programming/C/The C programming Language/chapter7/robust_cat.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n\nint main(int argc, char *argv[])\n{\n FILE *fp;\n void filecopy(FILE *, FILE *);\n char *prog=argv[0];\n if(argc==1)\n filecopy(stdin, stdout);\n else\n while(--argc>0)\n if((fp=fopen(*++argv, \"r\"))==NULL)\n {\n fprintf(stderr, \"%s: can't open %s\\n\", prog, *argv);\n exit(1);\n }\n else\n {\n filecopy(fp, stdout);\n fclose(fp);\n }\n if(ferror(stdout))\n {\n fprintf(stderr, \"%s: error writing stdout\\n\", prog);\n exit(2);\n }\n exit(0);\n}\n\nvoid filecopy(FILE * ifp, FILE *ofp)\n{\n char c;\n while((c=getc(ifp))!=EOF)\n putc(c,ofp);\n}" }, { "alpha_fraction": 0.5560747385025024, "alphanum_fraction": 0.5623052716255188, "avg_line_length": 19.09375, "blob_id": "dadace8b3543d84fbef6df394fe1d5e88d13e492", "content_id": "1e0263419981d287a8cff1d52883b62fecb80553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 642, "license_type": "no_license", "max_line_length": 66, "num_lines": 32, "path": "/Programming/C/multithread/printHello.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<pthread.h>\n\n#define NUM_THREADS 5\n\nvoid * PrintHello(void *threadid)\n{\n unsigned long tid;\n tid=(unsigned long) threadid;\n printf(\"Hello World! Thread ID, %ld\\n\",tid);\n pthread_exit(NULL);\n}\n\nint main()\n{\n pthread_t threads[NUM_THREADS];\n int rc;\n int i;\n for(i=0;i<NUM_THREADS; ++i)\n {\n printf(\"main(): creading thread, %d\\n\",i);\n rc=pthread_create(&threads[i],NULL,PrintHello,(void *)&i);\n if(rc)\n {\n printf(\"Error:unable to create thread,%d \\n\",rc);\n exit(-1);\n }\n }\n pthread_exit(NULL);\n return 0;\n}" }, { "alpha_fraction": 0.42073169350624084, "alphanum_fraction": 0.4573170840740204, "avg_line_length": 20.161291122436523, "blob_id": "8590025e971f788d99e3a94e79c81020c6772373", "content_id": "748bd101a38053c7abef5dc760099df98bfe23d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 42, "num_lines": 31, "path": "/Programming/Python/11Value_Statement_Print/equal_tuple.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "nudge = 1\nwink = 2\nnudge, wink = wink, nudge\nprint(nudge, wink)\nprint(type(nudge))\n[a, b, c] = (1, 2, 3)\nprint(a, c)\n(a, b, c) = \"ABC\"\nprint(a, c)\nstring='SPAM'\na, b, c, d=string\nprint(a, d)\n#a, b, c=string\na, b, c = string[0], string[1], string[2:]\nprint(a, b, c)\nprint('****************************')\na, b, c=list(string[:2])+[string[2:]]\nprint(a, b, c)\nprint('****************************')\n((a, b), c)=('SP', 'AM')\nprint(a, b, c)\nred, green, blue=range(3)\nprint(red, blue)\nprint('****************************')\nl = [1, 2, 3, 5, 6]\nwhile l:\n\tfront, l = l[0], l[1:]\n\tprint(front, l)\nfor (a, b, c) in [(6, 6, 6), (6, 6, 6)]:\n\tprint(a, b, c)\n#a, b, c='ab'\n" }, { "alpha_fraction": 0.5041899681091309, "alphanum_fraction": 0.5209497213363647, "avg_line_length": 13.9375, "blob_id": "2c0d5f4302552028569dc6dea655a773b0226d2e", "content_id": "f66a9e3fa86959c9d0317b81735cd1fe83c85475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 716, "license_type": "no_license", "max_line_length": 42, "num_lines": 48, "path": "/Programming/Practice/Interpretation/prime_logn.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<utility>\n\nint expr(int base, int exp)\n{\n int temp;\n if(exp==0)\n return 1;\n if(exp%2==0)\n temp=expr(base, exp/2);\n else\n {\n temp=base* expr(base, exp-1);\n }\n return temp;\n}\n\nbool try_it(int a, int n)\n{\n if(expr(a, n)%n==a)\n return true;\n return false;\n}\n\nbool fermat_test(int n)\n{\n return try_it(random()%n, n);\n}\n\nbool fast(int n, int times)\n{\n if(times==0)\n return true;\n else if(fermat_test(n))\n fast(n, times-1);\n else\n return false;\n return false;\n}\n\nint main()\n{\n if(fast(13, 5))\n std::cout<<\"is prime\"<<std::endl;\n else\n std::cout<<\"not prime\"<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6511628031730652, "alphanum_fraction": 0.6511628031730652, "avg_line_length": 14.357142448425293, "blob_id": "efd9e106485967dc24a88a00a8b3dd89325f5b98", "content_id": "4b59dcbe75377993d787b082f241933e09edac34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 215, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/Programming/JAVA/Thinking in JAVA/chapter10/interface/Interface.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "interface test{\n\tvoid f();\n}\n\nclass some implements test{\n\tpublic void f(){System.out.println(\"derived f()\");}\n}\n\npublic class Interface{\n\tpublic static void main(String [] args){\n\t\tsome s=new some();\n\t\ts.f();\n\t}\n}\n" }, { "alpha_fraction": 0.47154471278190613, "alphanum_fraction": 0.5447154641151428, "avg_line_length": 9.25, "blob_id": "73b35d03061291cdbe6e62a32d8b09da4c54cfac", "content_id": "481616b037c647fd65c67c9ba9b4a177431c4268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 123, "license_type": "no_license", "max_line_length": 21, "num_lines": 12, "path": "/Programming/Lua/2Type/for.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a = {}\nfor i = 1, 10 do\n\ta[i]=io.read()\nend\n\nfor i = 1, 10 do\n\tprint(a.tonumber(i))\nend\n\nfor i = 1, 10 do\n\tprint(a[i])\nend\n" }, { "alpha_fraction": 0.45525291562080383, "alphanum_fraction": 0.4902723729610443, "avg_line_length": 18.846153259277344, "blob_id": "332fd4541f752a6195fa0527572b8a1a0c5d9ed1", "content_id": "efb0bc92977e3d6561fe3e759861a829ffd30e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 257, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/Programming/C++/More_Effective_C++/chapter1/placement_new.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n void *pi=operator new [](10*sizeof(int));\n int *p=static_cast<int *>(pi);\n for(int i=0; i<10; i++)\n p[i]=i;\n for(int i=0; i<10; i++)\n std::cout<<p[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6211878061294556, "alphanum_fraction": 0.6444622874259949, "avg_line_length": 28.64285659790039, "blob_id": "db37ee692a79b8120eebd6d3315aefb5006a7b1f", "content_id": "c8d09f763f14813ee42a93fc536082d5e08b9fcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1246, "license_type": "no_license", "max_line_length": 85, "num_lines": 42, "path": "/Project/SNS/rp2p.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <string>\nstruct rp2p{\n unsigned char confld;\n unsigned char reserv;\n short seq;\n short ack;\n short cksum;\n};\n\n/*\n **************************\n confld|reserv|sequence_num\n **************************\n acknowledge |checksum\n **************************\n */\n#define LOG_IN 0x80 //when this bit is set, it's login action\n#define SIGN_ON 0x40 //when this bit is set, it's register action\n#define LOG_OUT 0x20 //when this bit is set, it's log out action\n#define LOGGED 0x10 //when this bit is set, the user has complete login action\n/*\n * following four all need the logged flag is set, Otherwise, error occur\n */\n#define UPD_IP 0x08 //this bit set for users update it's IP\n#define UPD_LOC 0x04 //this bit set for users to update its location\n#define UPD_FRD 0x02 //this bit set for users to update friendship\n#define UPD_INFO 0x01 //this bit set for request update peer's IP\n\n#define H_MASK 0xf0\n#define L_MASK 0x0f\n\n\nclass Action\n{\npublic:\n static int sign_on(const char *);\n static int login(const char *, std::string &);\n static int keep_alive(const char *, unsigned char, unsigned char, std::string &);\n static int log_out(const char *);\n};\n\nint proc_msg(const char *, std::string &, int);\n\n" }, { "alpha_fraction": 0.5409836173057556, "alphanum_fraction": 0.5819672346115112, "avg_line_length": 12.44444465637207, "blob_id": "0055a1bbebca43075c94ead8512eaeddcdff6538", "content_id": "fa18839200e558ec2d046cf00aa5c6c8f88faab8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 122, "license_type": "no_license", "max_line_length": 22, "num_lines": 9, "path": "/Programming/Python/14iteration/auto_iterator.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "L = [1, 2, 3]\nfor x in L:\n\tprint(x**2)\nI = iter(L)\nwhile True:\n\ttry:\n\t\tprint(I.next()**2)\n\texcept StopIteration:\n\t\tbreak\n\n" }, { "alpha_fraction": 0.5982906222343445, "alphanum_fraction": 0.6068376302719116, "avg_line_length": 18.5, "blob_id": "21b4c4285ae517daab2af8ccf95dc875d9a6329d", "content_id": "1db07a9e2bc3e49f68ca7deb6df801257de3c696", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 234, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/Programming/Python/4ObjectType/file.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "f=open('data.txt', 'w');\nprint(f.write('Hello\\n'));\nf.write('World\\n');\nf.close();\nf=open('data.txt');\ntext=f.read();\nprint(text);\nprint(text.split());\nprint(dir(f));\ndata=open('data.txt', 'rb').read();\nprint(data);\nprint(data[4:8]);\n" }, { "alpha_fraction": 0.3411726951599121, "alphanum_fraction": 0.35824742913246155, "avg_line_length": 23.2578125, "blob_id": "5f7be6a0412b8e6c6ce1ff0f4e1d708247af5f54", "content_id": "aaffa3a6f604b0cbc3fac43a8a3c0bf9f8c40bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3104, "license_type": "no_license", "max_line_length": 97, "num_lines": 128, "path": "/Algorithm/Leetcode/surround.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n\nusing namespace std;\n\nclass Solution {\npublic:\n void solve(vector<vector<char> > &board) {\n if(board.size()<3||board[1].size()<3)\n return;\n for(int i=0; i<board.size(); i++)\n {\n for(int j=0; j<board[i].size(); j++)\n {\n if(board[i][j]=='0')\n {\n if(left(board, i, j)&&right(board, i, j)&&up(board, i, j)&&down(board, i, j))\n board[i][j]='X';\n }\n }\n }\n }\n bool left(vector<vector<char> > &board, int i, int j)\n {\n if(i>0)\n {\n if(board[j][i-1]=='0')\n {\n if(left(board, i-1, j)&&up(board, i-1, j)&&down(board, i-1, j))\n {\n board[j][i-1]='X';\n return true;\n }\n else\n return false;\n }\n else\n return true;\n }\n return false;\n }\n bool right(vector<vector<char> > &board, int i, int j)\n {\n if(i<board[j].size()-1)\n {\n if(board[j][i+1]=='0')\n {\n if(right(board, i+1, j)&&up(board, i+1, j)&&down(board, i+1, j))\n {\n board[j][i+1]='X';\n return true;\n }\n else\n return false;\n }\n else\n return true;\n }\n return false;\n }\n bool up(vector<vector<char> > &board, int i, int j)\n {\n if(j>0)\n {\n if(board[j-1][i]=='0')\n {\n if(up(board, i, j-1)&&left(board, i, j-1)&&right(board, i, j-1))\n {\n board[j-1][i]='X';\n return true;\n }\n else\n return false;\n }\n else\n return true;\n }\n return false;\n }\n bool down(vector<vector<char> > &board, int i, int j)\n {\n if(j<board.size()-1)\n {\n if(board[j+1][i]=='0')\n {\n if(down(board, i, j+1)&&left(board, i, j+1)&&right(board, i, j+1))\n {\n board[j+1][i]='X';\n return true;\n }\n else\n return false;\n }\n else\n return true;\n }\n return false;\n }\n};\n\n\nint main()\n{\n vector<char> cvec1(3, 'X');\n vector<char> cvec2;\n cvec2.push_back('X');\n cvec2.push_back('0');\n cvec2.push_back('X');\n vector<vector<char> > vvec;\n vvec.push_back(cvec1);\n vvec.push_back(cvec2);\n vvec.push_back(cvec1);\n for(int i=0; i<3; i++)\n {\n for(int j=0; j<3; j++)\n std::cout<<vvec[i][j]<<\" \";\n std::cout<<std::endl;\n }\n Solution s;\n s.solve(vvec);\n for(int i=0; i<3; i++)\n {\n for(int j=0; j<3; j++)\n std::cout<<vvec[i][j]<<\" \";\n std::cout<<std::endl;\n }\n return 0;\n}" }, { "alpha_fraction": 0.4583333432674408, "alphanum_fraction": 0.5, "avg_line_length": 15, "blob_id": "8a35473ede258fde2e78b5f261b6710074998d12", "content_id": "516bb0879631b1cb513801023ba9aa6e4c30fa8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 192, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/Programming/C/The C programming Language/test/special.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint main()\n{\n/*\tint 10[a], i=0;\n\tfor(;i<10; i++)\n\t\tprintf(\"%d\", i[a]);\n\tprintf(\"\\n\");*/\n\tprintf(\"%d\\n\", \"Hello, World\"[0]);\n\tprintf(\"%d\\n\", 0[\"Hello World\"]);\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5237113237380981, "alphanum_fraction": 0.5360824465751648, "avg_line_length": 20.130434036254883, "blob_id": "7b27b2f2f331701dfccdcc5221aba4898f6b2dac", "content_id": "05024b140082a4726ddb36e1209acae769cfb9cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 485, "license_type": "no_license", "max_line_length": 59, "num_lines": 23, "path": "/Programming/C/Programming_in_Unix/chapter4/chmod.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/stat.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nint main()\n{\n struct stat statbuf;\n if(stat(\"foo\", &statbuf)<0)\n {\n fprintf(stderr, \"error: stat for foo\\n\");\n exit(0);\n }\n if(chmod(\"foo\", (statbuf.st_mode &~S_IXGRP)|S_ISGID)<0)\n {\n fprintf(stderr, \"error: chmod for foo\\n\");\n exit(0);\n }\n if(chmod(\"bar\", S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)<0)\n {\n fprintf(stderr, \"error: chmod for bar\\n\");\n }\n return 0;\n}" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 12.25, "blob_id": "60870c86821544a6203107b7c185eb1d12c7238b", "content_id": "d355ba75b0811372dd41ce8c9aafcf428c423fa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 105, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/Programming/C/The C programming Language/test/sizeof.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint main()\n{\n int i;\n printf(\"%d %d\\n\", sizeof i, sizeof (int));\n return 0;\n}" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.43802815675735474, "avg_line_length": 19.314285278320312, "blob_id": "593233039e4066e442d22d39a2949e302d12194b", "content_id": "2cb8dd25dc9bb6c6525b44e2b52fe3dc2b7f4069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 710, "license_type": "no_license", "max_line_length": 42, "num_lines": 35, "path": "/Algorithm/Programming_Perls/quicksort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nvoid quicksort(int a[], int low, int high)\n{\n if(low>=high)\n return;\n int mid=(low+high)/2;\n std::swap(a[low], a[mid]);\n mid=low;\n for(int i=high-1; i>mid; i--)\n {\n if(a[i]<a[mid])\n {\n std::swap(a[mid+1], a[i]);\n std::swap(a[mid], a[mid+1]);\n mid++;\n i++;\n }\n }\n for(int i=low; i<high; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n quicksort(a+low, 0, mid);\n quicksort(a+mid+1, 0, high-mid-1);\n}\n\nint main()\n{\n int a[10]={9,8,7,6,5,4,3,2,1,0};\n quicksort(a, 0, 10);\n for(int i=0; i<10; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6028110384941101, "alphanum_fraction": 0.6512233018875122, "avg_line_length": 26.840579986572266, "blob_id": "fb877d5b1996b030265d2c484e8f21c675f1a8ab", "content_id": "d92c201319188506b98c6411675e75914ba18eb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1921, "license_type": "no_license", "max_line_length": 105, "num_lines": 69, "path": "/Cocos2d/coordinate/Classes/HelloWorldScene.cpp", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"HelloWorldScene.h\"\n#include \"cocostudio/CocoStudio.h\"\n#include \"ui/CocosGUI.h\"\n\nUSING_NS_CC;\n\nusing namespace cocostudio::timeline;\n\nScene* HelloWorld::createScene()\n{\n // 'scene' is an autorelease object\n auto scene = Scene::create();\n \n // 'layer' is an autorelease object\n auto layer = HelloWorld::create();\n\n // add layer as a child to scene\n scene->addChild(layer);\n\n // return the scene\n return scene;\n}\n\n// on \"init\" you need to initialize your instance\nbool HelloWorld::init()\n{\n //////////////////////////////\n // 1. super init first\n if ( !Layer::init() )\n {\n return false;\n }\n \n auto rootNode = CSLoader::createNode(\"MainScene.csb\");\n\n addChild(rootNode);\n\n auto closeItem = static_cast<ui::Button*>(rootNode->getChildByName(\"Button_1\"));\n closeItem->addTouchEventListener(CC_CALLBACK_1(HelloWorld::menuCloseCallback, this));\n \n /*auto red = LayerColor::create(Color4B(255, 100, 100, 128), 800, 600);\n auto green = LayerColor::create(Color4B(100, 255, 100, 128), 400, 300);\n red->addChild(green);\n this->addChild(red);*/\n auto red = LayerColor::create(Color4B(255, 100, 100, 128), 800, 600);\n red->ignoreAnchorPointForPosition(true);\n //red->setAnchorPoint(Point(0.5, 0.5));\n red->setPosition(300, 200);\n auto green = LayerColor::create(Color4B(100, 255, 100, 128), 600, 300);\n green->ignoreAnchorPointForPosition(true);\n //green->setAnchorPoint(Point(0, 0));\n red->addChild(green);\n this->addChild(red);\n return true;\n}\n\nvoid HelloWorld::menuCloseCallback(Ref* pSender)\n{\n#if (CC_TARGET_PLATFORM == CC_PLATFORM_WP8) || (CC_TARGET_PLATFORM == CC_PLATFORM_WINRT)\n\tMessageBox(\"You pressed the close button. Windows Store Apps do not implement a close button.\",\"Alert\");\n return;\n#endif\n\n Director::getInstance()->end();\n\n#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)\n exit(0);\n#endif\n}\n" }, { "alpha_fraction": 0.38211381435394287, "alphanum_fraction": 0.49593496322631836, "avg_line_length": 19.5, "blob_id": "1a72ed806e42df4eb7a88606edc0a2cfdfaeeb03", "content_id": "4cfdd50f5177845718778871afbbb05b236bdc68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/Programming/Python/13Loop/zip.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "L1 = [1,2,3]\nL2 = [6, 6, 6]\nprint(type(zip(L1, L2)))\nprint(zip(L1, L2))\nfor (x, y) in zip(L1, L2):\n\tprint(x, y, '--', x+y)\n" }, { "alpha_fraction": 0.6745406985282898, "alphanum_fraction": 0.6745406985282898, "avg_line_length": 20.22222137451172, "blob_id": "a41541cf54e9d4d0a908c05d6512035e135ec84c", "content_id": "99a86a8b91ebe4f924cc25c1c87218e89ef4df2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 381, "license_type": "no_license", "max_line_length": 78, "num_lines": 18, "path": "/Project/source/Client/data.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\n#include <vector>\n\nclass Records\n{\npublic:\n static void add_rcd(const std::string & account, const std::string & msg);\n static void list(const std::string & account);\n};\n\nclass Frd_List\n{\npublic:\n static int read(std::vector<std::string> &vec);\n static int fadd(const std::string &);\n static int fdelete(const std::string &);\n};" }, { "alpha_fraction": 0.5208333134651184, "alphanum_fraction": 0.5208333134651184, "avg_line_length": 8.600000381469727, "blob_id": "ba554b79c5281137e511be2c5d9b1a14321a516a", "content_id": "e076195bf07d867987823753db835454b2a48b6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 11, "num_lines": 5, "path": "/Programming/Python/17Namespace/exer2.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "X = 'Spam'\ndef func():\n\tX='Ni!'\nfunc()\nprint(X)\n" }, { "alpha_fraction": 0.33280253410339355, "alphanum_fraction": 0.36942675709724426, "avg_line_length": 15.552631378173828, "blob_id": "22dac9c9f4370765dc7d2cb4ddd76833ecabcf86", "content_id": "d23322eefa97d8ba79a4cb276da1825955a6a4bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 628, "license_type": "no_license", "max_line_length": 37, "num_lines": 38, "path": "/Programming/C/The C programming Language/chapter2/any.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAX 100\n\nint any(char a[], char b[]);\n\nint main()\n{\n int ch, i=0;\n char s1[MAX],s2[MAX];\n for(i=0;(ch=getchar())!='\\n';++i)\n s1[i]=ch;\n s1[i]='\\0';\n for(i=0;(ch=getchar())!='\\n';++i)\n s2[i]=ch;\n s2[i]='\\n';\n printf(\"%s \\n %s \\n\",s1,s2);\n printf(\"%d\\n\",any(s1,s2));\n}\n\nint any(char a[], char b[])\n{\n int i=0;\n int flag=0;\n while(a[i]!='\\0')\n {\n for(int j=0;b[j]!='\\0';++j)\n if(a[i]==b[j])\n {\n flag=1;\n break;\n }\n ++i;\n if(flag)\n break;\n }\n return i;\n}" }, { "alpha_fraction": 0.49025973677635193, "alphanum_fraction": 0.5032467246055603, "avg_line_length": 12.434782981872559, "blob_id": "3dcb2945ee454829174d1255a240b4bc34643059", "content_id": "4f56352b8beffce4fb8bc94de2e7039b7a944bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 308, "license_type": "no_license", "max_line_length": 39, "num_lines": 23, "path": "/Programming/C++/Effective_STL/Containers/function.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint fun(double (a))\n{\n std::cout<<\"double (a)\"<<std::endl;\n return 0;\n}\nint fun(double ())\n{\n std::cout<<\"double ()\"<<std::endl;\n return 0;\n}\nint fun(int p())\n{\n std::cout<<\"double p()\"<<std::endl;\n return 0;\n}\nint main()\n{\n double (*p)();\n fun((1));\n fun(p);\n}" }, { "alpha_fraction": 0.5586206912994385, "alphanum_fraction": 0.565517246723175, "avg_line_length": 10.230769157409668, "blob_id": "558c4d65d074345af4fb96afbb2d727d058542f1", "content_id": "9c9c51145c93e9c46367d332a1eef064220a588f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 145, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/Programming/C++/More_Effective_C++/chapter4/test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<iostream>\n\nvoid p(std::string s)\n{\n std::cout<<s<<std::endl;\n}\n\nint main()\n{\n p(\"Guo\" \"Yuquan\");\n return 0;\n}" }, { "alpha_fraction": 0.5104166865348816, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 9.777777671813965, "blob_id": "0b306e694c86fc6f457f717c17984848a1f55fba", "content_id": "9eb58e05471ee1d48dd19f6e3caecaecfaa9aae4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 96, "license_type": "no_license", "max_line_length": 25, "num_lines": 9, "path": "/Programming/C/The C programming Language/test/label.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint main()\n{\n int ival=10;\nival:\n printf(\"%d\\n\", ival);\n return 0;\n}" }, { "alpha_fraction": 0.271702378988266, "alphanum_fraction": 0.31116122007369995, "avg_line_length": 17.12244987487793, "blob_id": "ffc55fccfadcbaa2982d3cd5f6913c52b4ae7929", "content_id": "02ffec8d2fa44ec2a749e68774834bd385633337", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 887, "license_type": "no_license", "max_line_length": 54, "num_lines": 49, "path": "/Programming/C/The C programming Language/chapter2/htoi.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAX 1000\nint main()\n{\n char s[MAX];\n int ch;\n int i=0;\n while((ch=getchar())!=EOF&&ch!='\\n')\n {\n s[i]=ch;\n ++i;\n }\n s[i]='\\0';\n int n=0;\n int flag=0;\n for(i=0;s[i]!='\\0';++i)\n {\n \n if(s[0]=='0'&&(s[1]=='x'||s[1]=='X')&&flag==0)\n {\n i=2;\n }\n flag=0;\n \n if(s[i]>='0'&&s[i]<='9')\n {\n n=n*16+(s[i]-'0');\n flag=1;\n }\n if(s[i]>='a'&&s[i]<='f')\n {\n n=n*16+(s[i]-'a'+10);\n flag=1;\n }\n if(s[i]>='A'&&s[i]<='F')\n {\n n=n*16+(s[i]-'A'+10);\n flag=1;\n }\n if(flag!=1)\n {\n printf(\"ERROR:Invalid input.\\n\");\n }\n }\n printf(\"Input is: %s\\n\",s);\n printf(\"The corresponding integer is: %d\\n\",n);\n return 0;\n}" }, { "alpha_fraction": 0.499459445476532, "alphanum_fraction": 0.5124324560165405, "avg_line_length": 19.130434036254883, "blob_id": "dbce5b86c64bb8d8efa410f9923aca3342009852", "content_id": "6a354d0d9cee6f23d6c663907ff1916fb6f33b80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 925, "license_type": "no_license", "max_line_length": 60, "num_lines": 46, "path": "/Programming/C/Programming_in_Unix/chapter15/popen.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <sys/wait.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <string.h>\n\n#define PAGER \"${PAGER:-more}\"\n\n#define MAXLINE 1024\n\nint\nmain(int argc, char *argv[])\n{\n char line[MAXLINE];\n FILE *fpin, *fpout;\n if(argc!=2)\n {\n fprintf(stderr, \"usage: a.out <pathname>\\n\");\n exit(0);\n }\n if((fpin=fopen(argv[1], \"r\"))==NULL)\n {\n fprintf(stderr, \"error: open %s\\n\", argv[1]);\n exit(0);\n }\n if((fpout=popen(PAGER, \"w\"))==NULL)\n {\n fprintf(stderr, \"error: popen\\n\");\n exit(0);\n }\n while(fgets(line, MAXLINE, fpin)!=NULL)\n {\n if(fputs(line, fpout)==EOF)\n {\n fprintf(stderr, \"error: fputs error to pipe\\n\");\n break;\n }\n }\n if(ferror(fpin))\n {\n fprintf(stderr, \"error, fgets\\n\");\n }\n if(pclose(fpout)==-1)\n fprintf(stderr, \"pclose error\\n\");\n exit(0);\n}" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5961538553237915, "avg_line_length": 16.33333396911621, "blob_id": "295a3d5bedce3791e36963c8b8c8897c61d8a387", "content_id": "2ca6f63c58ff4130dbeb4b501984cfde001d1764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/Programming/Python/18Parameter/3exer.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def func(a, *pargs):\n\tprint(a, pargs)\nfunc(1, 2, 3)\n" }, { "alpha_fraction": 0.454054057598114, "alphanum_fraction": 0.45945945382118225, "avg_line_length": 10.625, "blob_id": "06c1f806f809c1bac88da757de60581b2744919c", "content_id": "eb0dc6bfe9033595d0c8dc3f60eb7cc66ac6f4d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 185, "license_type": "no_license", "max_line_length": 77, "num_lines": 16, "path": "/Programming/C++/Inside_the_C++_object_model/chapter6/default_initialization.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint i;\n\nint fun()\n{\n int j;\n return j;\n}\n\nint main()\n{\n int j;\n std::cout<<\"i: \"<<i<<\"; \"<<\"j: \"<<j<<\"; in function: \"<<fun()<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.7046979665756226, "alphanum_fraction": 0.7046979665756226, "avg_line_length": 17.625, "blob_id": "d25bfd5fa267ae40e9a4db4ef09a22b2cb39b7fd", "content_id": "51a2ed7c939f347154c2fca1670c87aa5da6f885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 26, "num_lines": 8, "path": "/Programming/Python/Class/second.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class FirstClass:\n\tdef setdata(self, value):\n\t\tself.data=value\n\tdef display(self):\n\t\tprint(self.data)\nF = FirstClass()\nF.setdata('some')\nF.display()\n" }, { "alpha_fraction": 0.6096096038818359, "alphanum_fraction": 0.6546546816825867, "avg_line_length": 21.133333206176758, "blob_id": "c3406da7d91d6ea7597ebacf369222aa7103fa2e", "content_id": "88d8f8d8bdfba9d8e7949e67ac8215f8906ca3d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 31, "num_lines": 15, "path": "/Programming/Python/4ObjectType/class.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class Worker:\n\tdef __init__(self, name, pay):\n\t\tself.name=name;\n\t\tself.pay=pay;\n\tdef lastName(self):\n\t\treturn self.name.split()[-1]\n\tdef giveRaise(self, percent):\n\t\tself.pay*=(1.0+percent);\n\t\nbob=Worker('Bob Smith', 50000);\nsue=Worker('Sue Jones', 60000);\nprint(bob.lastName());\nprint(sue.lastName());\nsue.giveRaise(.10);\nprint(sue.pay);\n\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 6.363636493682861, "blob_id": "82dcae5767b5577fff4345ac8f7c496efa583b60", "content_id": "ea5a37dad6e2ec6c3091cc7de84d6da5d865151a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 80, "license_type": "no_license", "max_line_length": 28, "num_lines": 11, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/test_inherit.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\n};\n\nclass B: public A, public A{\n};\n\nint main()\n{\n}" }, { "alpha_fraction": 0.5252366065979004, "alphanum_fraction": 0.5615141987800598, "avg_line_length": 23.423076629638672, "blob_id": "628fb3a096b938d2365ce6a8eb5e0aebb11e7d05", "content_id": "f5427a36dfff9d88f69afb9b4b97273063edfbd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 634, "license_type": "no_license", "max_line_length": 65, "num_lines": 26, "path": "/Programming/C/The C programming Language/chapter8/cp.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<fcntl.h>\n#include<sys/types.h>\n#include<sys/stat.h>\n#include<sys/file.h>\n#include<unistd.h>\n\n#define PERMS 0666\n\n//void error(char *, ...);\n\nint main(int argc, char *argv[])\n{\n int f1, f2, n;\n char buf[BUFSIZ];\n if(argc!=3)\n printf(\"Usage: cp from tp\");\n if((f1=open(argv[1], O_RDONLY, 0))==-1)\n printf(\"cp: can't open %s\", argv[1]);\n if((f2=creat(argv[2], PERMS))==-1)\n printf(\"cp: can't create %s, mode %03o\", argv[2], PERMS);\n while((n=read(f1,buf,BUFSIZ))>0)\n if(write(f2,buf,n)!=n)\n printf(\"cp: write error on file %s\", argv[2]);\n return 0;\n}" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 16, "blob_id": "0c8ed9086f1ddae967c6e5b57f335378375600f9", "content_id": "7a5c48c1a1928b6d47479f87856fcdd0980be7de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 85, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/Programming/Lua/3Statements/logic.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "print(4 and 5)\nprint(nil and 13)\nprint(false and 16)\nprint(4 or 5)\nprint(false or 5)\n" }, { "alpha_fraction": 0.4818018078804016, "alphanum_fraction": 0.48972973227500916, "avg_line_length": 16.681528091430664, "blob_id": "97f151b1a6087efd6a968a1a1f37a370b85f6224", "content_id": "ad48aa18185bfbfd59df70763fe4ed73a916891e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2775, "license_type": "no_license", "max_line_length": 71, "num_lines": 157, "path": "/Algorithm/Algorithm/chapter4/binary_search_tree.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nstruct Node\n{\n Node(int x=0, Node *l=0, Node *r=0):element(x), left(l), right(r){}\n int element;\n Node *left;\n Node *right;\n};\n\nclass Binary\n{\npublic:\n Binary():root(NULL){}\n Node *find(int x);\n Node *insert(int x);\n Node *delete_(int x);\n Node *find_min();\n Node *find_max();\n void print();\nprivate:\n Node *root;\n Node *delete_tree(int x, Node *ptr);\n Node *find_min_1(Node *p);\n void real_print(Node *p);\n};\nNode * Binary::find(int x)\n{\n Node *p=root;\n while(p!=NULL)\n {\n if(x<p->element)\n p=p->left;\n else if(x>p->element)\n p=p->right;\n else\n return p;\n }\n return p;\n}\n\nNode * Binary::insert(int x)\n{\n if(root==NULL)\n {\n root=new Node(x);\n }\n Node *p=root, *temp;\n while(p!=NULL)\n {\n if(x<p->element)\n {\n temp=p;\n p=p->left;\n }\n else if(x>p->element)\n {\n temp=p;\n p=p->right;\n }\n else\n return p;\n }\n if(x<temp->element)\n temp->left=new Node(x);\n else\n temp->right=new Node(x);\n return p;\n}\n\nNode * Binary::find_min()\n{\n return find_min_1(root);\n}\n\nNode * Binary::find_max()\n{\n Node *p=root;\n if(p!=NULL)\n while(p->right!=NULL)\n p=p->right;\n return p;\n}\n\nNode * Binary::delete_(int x)\n{\n return delete_tree(x, root);\n}\n\nNode *Binary::find_min_1(Node *p)\n{\n if(p!=NULL)\n while(p->left!=NULL)\n p=p->left;\n return p;\n}\n\nNode * Binary::delete_tree(int x, Node *ptr)\n{\n if(ptr==NULL)\n {\n return ptr;\n }\n Node *temp, *child;\n if(x<ptr->element)\n ptr->left=delete_tree(x, ptr->left);\n else if(x>ptr->element)\n ptr->right=delete_tree(x, ptr->right);\n else if(ptr->left&&ptr->right)\n {\n temp=find_min_1(ptr->right);\n ptr->element=temp->element;\n ptr->right=delete_tree(ptr->element, ptr->right);\n }\n else\n {\n std::cout<<long(temp)<<std::endl;\n temp=ptr;\n if(ptr->left==NULL)\n child=ptr->right;\n if(ptr->right==NULL)\n child=ptr->left;\n delete temp;\n return child;\n }\n return ptr;\n}\n\nvoid Binary::print()\n{\n real_print(root);\n}\n\nvoid Binary::real_print(Node *r)\n{\n if(r==NULL)\n return;\n if(r->left!=NULL)\n real_print(r->left);\n std::cout<<r->element<<std::endl;\n if(r->right!=NULL)\n real_print(r->right);\n}\n\nint main()\n{\n int a[]={6,1,4,2,3,8,9,7,0,5};\n Binary tree;\n std::cout<<sizeof(tree)<<std::endl;\n for(int i=0; i<10; i++)\n tree.insert(*(a+i));\n tree.delete_(6);\n char * p=NULL;\n delete p;\n tree.print();\n return 0;\n}" }, { "alpha_fraction": 0.40909090638160706, "alphanum_fraction": 0.4160839021205902, "avg_line_length": 18.100000381469727, "blob_id": "70907b75e356475d47583c6afda385ac458bf48e", "content_id": "9e3863c233679d4c35e4c54c613c8c72df8baa6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 572, "license_type": "no_license", "max_line_length": 53, "num_lines": 30, "path": "/Programming/C/The C programming Language/chapter7/cat.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint main(int argc, char *argv[])\n{\n FILE *fp;\n void filecopy(FILE*, FILE*);\n \n if(argc==1)\n filecopy(stdin, stdout);\n else\n while(--argc>0)\n if((fp=fopen(*++argv, \"r\"))==NULL)\n {\n printf(\"cat: cant open %s\\n\", *argv);\n return 1;\n }\n else\n {\n filecopy(fp, stdout);\n fclose(fp);\n }\n return 0;\n}\n\nvoid filecopy(FILE *ifp, FILE *ofp)\n{\n int c;\n while((c=getc(ifp))!=EOF)\n putc(c,ofp);\n}" }, { "alpha_fraction": 0.5070707201957703, "alphanum_fraction": 0.5191919207572937, "avg_line_length": 18.84000015258789, "blob_id": "d208e9e67f34f7075df09a41419037faea1899e3", "content_id": "5884dd22c5134650059cec078c1c988e483b14d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 495, "license_type": "no_license", "max_line_length": 44, "num_lines": 25, "path": "/Programming/C/Programming_in_Unix/chapter5/tmpnam_tmpfile_function.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n\nint main()\n{\n char name[L_tmpnam], line[100];\n FILE *fp;\n printf(\"%s\\n\", tmpnam(NULL));\n tmpnam(name);\n printf(\"%s\\n\", name);\n if((fp=tmpfile())==NULL)\n {\n fprintf(stderr, \"error: tmpfile\\n\");\n exit(0);\n }\n fputs(\"one line of output\\n\", fp);\n rewind(fp);\n if(fgets(line, sizeof(line), fp)==NULL)\n {\n fprintf(stderr, \"error: fgets\\n\");\n exit(0);\n }\n fputs(line, stdout);\n return 0;\n}" }, { "alpha_fraction": 0.4804370701313019, "alphanum_fraction": 0.48607683181762695, "avg_line_length": 16.07831382751465, "blob_id": "e3f3c37a1d04680f33ddc647239207ebdb949c18", "content_id": "14db74e6e720114f5ae99656f22aca3581c7c7e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2837, "license_type": "no_license", "max_line_length": 56, "num_lines": 166, "path": "/Algorithm/Algorithm/chapter5/open_hash.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\ntemplate <typename T>\nstruct Node{\n Node(T ele=0, Node * n=NULL):element(ele), next(n){}\n T element;\n Node *next;\n};\n\ntemplate <typename T>\nclass List{\npublic:\n List():head(NULL){}\n Node<T> * find(T x);\n const Node<T> * insert(T x);\n void delete_(T x);\n void print();\n ~List();\nprivate:\n Node<T> * head;\n};\n\ntemplate <typename T>\nNode<T> * List<T>::find(T x)\n{\n Node<T> *p=head;\n while(p!=NULL&&p->element!=x)\n p=p->next;\n return p;\n}\n\n#include <stdexcept>\ntemplate <typename T>\nconst Node<T> * List<T>::insert(T x)\n{\n if(head==NULL)\n {\n try\n {\n head=new Node<T>(x);\n }catch(std::bad_alloc ba)\n {\n std::cout<<ba.what()<<std::endl;\n exit(1);\n }\n }\n else\n {\n Node<T> *temp=find(x);\n if(temp==NULL)\n {\n try\n {\n temp=new Node<T>(x);\n }catch(std::bad_alloc ba)\n {\n std::cout<<ba.what()<<std::endl;\n }\n temp->next=head;\n head=temp;\n }\n else\n return temp;\n }\n return head;\n}\n\ntemplate <typename T>\nvoid List<T>::delete_(T x)\n{\n if(head==NULL)\n return;\n Node<T> * temp=head, * pre=head;\n while(temp!=NULL&&temp->element!=x)\n {\n pre=temp;\n temp=temp->next;\n }\n if(temp==NULL)\n return;\n if(temp==head)\n {\n delete head;\n head=NULL;\n }\n pre->next=temp->next;\n delete temp;\n temp=NULL;\n}\n\ntemplate <typename T>\nvoid List<T>::print()\n{\n Node<T> * p=head;\n while(p!=NULL)\n {\n std::cout<<p->element<<\" \";\n p=p->next;\n }\n}\n\ntemplate <typename T>\nList<T>::~List()\n{\n Node<T> *p=head;\n while(p!=NULL)\n {\n delete p;\n p=p->next;\n }\n}\n\ntemplate <typename T>\nclass Hash{\npublic:\n Hash(int k=10):hptr(new List<T> [k]), H_Size(k){}\n const int size(){return H_Size;}\n Node<T> * find(T x);\n const Node<T> * insert(T x);\n void delete_(T x);\n void print();\n ~Hash(){delete []hptr;}\nprivate:\n int hash(T x){return x%H_Size;}\n List<T> * hptr;\n const int H_Size;\n};\n\ntemplate <typename T>\nconst Node<T> * Hash<T>::insert(T x)\n{\n return (*(hptr+hash(x))).insert(x);\n}\n\ntemplate <typename T>\nNode<T> * Hash<T>::find(T x)\n{\n return (*(hptr+hash(x))).find(x);\n}\n\ntemplate <typename T>\nvoid Hash<T>::delete_(T x)\n{\n (*(hptr+hash(x))).delete_(x);\n}\n\ntemplate <typename T>\nvoid Hash<T>::print()\n{\n for(int i=0; i<H_Size; i++)\n {\n (*(hptr+i)).print();\n std::cout<<std::endl;\n }\n}\n\nint main()\n{\n Hash<int> hash;\n for(int i=0; i<100; i++)\n hash.insert(i);\n hash.print();\n for(int i=1; i<100; i+=11)\n hash.delete_(i);\n hash.print();\n return 0;\n}\n\n\n" }, { "alpha_fraction": 0.6709265112876892, "alphanum_fraction": 0.6741213798522949, "avg_line_length": 30.399999618530273, "blob_id": "3472a06ce9ce0f69dc4f5f8825f0e345e3cacabd", "content_id": "6020cf1b17491142a7311ef0362bed55f8bd5fe1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 313, "license_type": "no_license", "max_line_length": 200, "num_lines": 10, "path": "/Programming/C/Programming_in_Unix/chapter6/OS_identify.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <sys/utsname.h>\n\nint main()\n{\n struct utsname un;\n uname(&un);\n printf(\"name of OS: %s\\nname of this node: %s\\ncurrent realease of OS: %s\\ncurrent version of release%s\\nname of hardware type: %s\\n\", un.sysname, un.nodename, un.release, un.version, un.machine);\n return 0;\n}" }, { "alpha_fraction": 0.43023255467414856, "alphanum_fraction": 0.5116279125213623, "avg_line_length": 16.200000762939453, "blob_id": "024d899a72123f1d3bd79a14a293899a4bcad3af", "content_id": "65b587c07a362b4e003dd6a4d672e7e6f803644d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/Programming/Python/14iteration/list_parse.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "L = [6, 6, 6]\nfor i in range(len(L)):\n\tL[i] += 10\nprint(L)\nprint([x + 10 for x in L])\n" }, { "alpha_fraction": 0.49295774102211, "alphanum_fraction": 0.5070422291755676, "avg_line_length": 11, "blob_id": "00bdd141da3efa929bdafc254ed4930ea6a335e0", "content_id": "48b2ac23872c310fae02d33dfd887c605fe91531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 71, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Programming/C/The C programming Language/chapter4/test.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\nint main()\n{\n printf(\"%c\\n\", 'a');\n return 0;\n}" }, { "alpha_fraction": 0.6985294222831726, "alphanum_fraction": 0.6985294222831726, "avg_line_length": 21.66666603088379, "blob_id": "dd87271bdee05023e218536bcd973d6ccb5f9a06", "content_id": "b1764cd8d4b64dfb4068d910cb1b0617bec9a283", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 46, "num_lines": 6, "path": "/Programming/Python/19HighLevelFunction/tkinter.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import sys\nfrom tkinter import Button, mainloop\nx = Button(\n\ttext = 'Press me',\n\tcommand=(lambda: sys.stdout.write('Spam\\n')))\nx.pack()\n" }, { "alpha_fraction": 0.6633663177490234, "alphanum_fraction": 0.6782178282737732, "avg_line_length": 19.200000762939453, "blob_id": "04b32776681409f48763dfc950787253980af162", "content_id": "272c9b8c5e36ba6ccde29c150a25e47e5cbe5ba6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 202, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/Programming/JAVA/Thinking in JAVA/chapter6/constructor/Constructor.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class cons{\n\tpublic cons(){System.out.println(\"1\");}\n\tpublic cons(int x){this();System.out.println(x);}\n}\n\npublic class Constructor{\n\tpublic static void main(String []args){\n\t\tcons x=new cons(12);\n\t}\n}\n" }, { "alpha_fraction": 0.5388861298561096, "alphanum_fraction": 0.5584545731544495, "avg_line_length": 19.978946685791016, "blob_id": "a4eee1f4d7dfb8e6cea2d98f0cecc4d94b38d0c9", "content_id": "fe5c59581cd0bc60e12af1f7093efe306fcc19b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1993, "license_type": "no_license", "max_line_length": 77, "num_lines": 95, "path": "/Algorithm/Algorithm/chapter6/leftist_heap.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stdexcept>\n\ntemplate<typename T>\nstruct Node{\n Node(T ele=0, int n=0):element(ele),left(NULL), right(NULL), npl(n){}\n T element;\n Node *left;\n Node *right;\n int npl;\n};\n\ntemplate<typename T>\nclass Leftist{\npublic:\n Leftist(Node<T> *p=NULL):root(p){}\n Leftist merge(Leftist l1, Leftist l2);\n void insert(T x);\n T delete_min();\nprivate:\n Leftist real_merge(Node<T> *, Node<T> *);\n void swap_children(Node<T> *p){\n Node<T> *temp=p->right;\n p->right=p->left;\n p->left=temp;\n }\n Node<T> * root;\n};\n\ntemplate <typename T>\nT Leftist<T>::delete_min()\n{\n if(root==NULL)\n {\n std::cout<<\"no element\"<<std::endl;\n return -1;\n }\n T temp=root->element;\n Node<T> * ret=merge(Leftist<T>(root->left),Leftist<T>(root->right)).root;\n delete root;\n root=ret;\n return temp;\n}\n\ntemplate <typename T>\nvoid Leftist<T>::insert(T x)\n{\n Node<T> *temp=new Node<T>(x);\n if(temp==NULL)\n {\n std::bad_alloc ba;\n std::cout<<\"out of space\"<<std::endl;\n throw ba;\n }\n root=merge(Leftist<T>(temp), Leftist<T>(root)).root;\n}\n\ntemplate <typename T>\nLeftist<T> Leftist<T>::real_merge(Node<T> *n1, Node<T> *n2)\n{\n if(n1->left==NULL)\n n1->left=n2;\n else\n {\n n1->right=merge(Leftist<T>(n1->right), Leftist<T>(n2)).root;\n if(n1->left->npl<n1->right->npl)\n swap_children(n1);\n n1->npl=n1->right->npl+1;\n }\n return n1;\n}\n\ntemplate <typename T>\nLeftist<T> Leftist<T>::merge(Leftist<T> l1, Leftist<T> l2)\n{\n if(l1.root==NULL)\n return l2;\n if(l2.root==NULL)\n return l1;\n if(l1.root->element <l2.root->element)\n return real_merge(l1.root, l2.root);\n return real_merge(l2.root, l1.root);\n}\n\nint main()\n{\n Leftist<int> left;\n for(int i=10;i>0;i--)\n left.insert(i);\n for(int i=0;i<10;i++)\n std::cout<<left.delete_min()<<\" \";\n std::cout<<std::endl;\n return 0;\n \n}\n" }, { "alpha_fraction": 0.4941176474094391, "alphanum_fraction": 0.5372549295425415, "avg_line_length": 14.96875, "blob_id": "3219b4120ca53f94900534c23f36c461fac24f12", "content_id": "c0eb190c09b20c8c0a7960ab0a9a0365be0e680d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 510, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/Programming/Practice/Interpretation/coins.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint first(int kinds)\n{\n if(kinds>5)\n {\n std::cout<<\"error:out of band\"<<std::endl;\n exit(1);\n }\n static int a[5]={1,5,10,25,50};\n return a[kinds-1];\n}\n\nint cc(int amount, int kinds)\n{\n if(amount==0)\n return 1;\n if(amount<0||kinds==0)\n return 0;\n else\n {\n return cc(amount, kinds-1)+cc(amount-first(kinds), kinds);\n }\n}\nint change(int amount)\n{\n return cc(amount, 5);\n}\nint main()\n{\n std::cout<<change(100)<<std::endl;\n}" }, { "alpha_fraction": 0.4177897572517395, "alphanum_fraction": 0.5256064534187317, "avg_line_length": 15.1304349899292, "blob_id": "a9b1f079218d780bd14cdb4d66b9b6fb9072384f", "content_id": "55bf6901eb7a54ed6b260f0c21f95ceefa6b00b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 371, "license_type": "no_license", "max_line_length": 24, "num_lines": 23, "path": "/Programming/Python/18Parameter/parameter.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def f(a, b, c):\n\tprint(a, b, c)\nf(1, 2, 3)\nf(c=1, b=2, a=3)\ndef fun1(a, b=2, c=3):\n\tprint(a, b, c, sep=',')\nfun1(6)\nfun1(1)\nfun1(6, 6, 6)\ndef fun2(*args):\n\tprint(args)\nfun2(1)\nfun2(6, 6)\nfun2(6, 6, \"hello\")\ndef fun3(**args):\n\tprint(args)\nfun3(a=1, b=2)\ndef fun5(a, *b, **c):\n\tprint(a, b, c)\nfun5(1, 2, 3, x=1, y=6)\ndef kwonly(a, *b, c):\n\tprint(a, b, c)\nkwonly(1, 2, c=3)\n" }, { "alpha_fraction": 0.5264536142349243, "alphanum_fraction": 0.5295966267585754, "avg_line_length": 15.902654647827148, "blob_id": "faf05ce5938361c57e0051389960c6699be88dea", "content_id": "7c6a109746f6421e02dccc37bb8604c90ec2e93a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1909, "license_type": "no_license", "max_line_length": 51, "num_lines": 113, "path": "/Algorithm/Algorithm/chapter3/list.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stdexcept>\n\ntemplate<typename T>\nstruct Node{\n Node():element(0), next(NULL){}\n T element;\n Node *next;\n};\n\ntemplate <typename T>\nclass List\n{\npublic:\n List():header(new Node<T>){}\n ~List();\n bool is_empty(){return header->next==NULL;}\n bool is_last(Node<T> *p){return p->next==NULL;}\n Node<T> *find(T x);\n Node<T> *find_previous(T x);\n void delete_(T x);\n void insert(T x, Node<T> *p);\n void print();\nprivate:\n Node<T> *header;\n List(List &);\n List & operator=(List &);\n};\n\ntemplate <typename T>\nList<T>::~List()\n{\n Node<T> *p=header->next, *dp;\n delete header;\n header=NULL;\n header->next=NULL;\n while(p!=NULL)\n {\n dp=p;\n p=p->next;\n delete dp;\n }\n}\n\ntemplate <typename T>\nNode<T> * List<T>::find(T x)\n{\n Node<T> *p=header->next;\n while(p!=NULL&&p->element!=x)\n p=p->next;\n return p;\n \n}\n\ntemplate <typename T>\nNode<T> *List<T>::find_previous(T x)\n{\n Node<T> *p=header;\n while(p->next!=NULL&&p->next->element==x)\n p=p->next;\n return p->next==NULL?NULL:p;\n}\n\ntemplate <typename T>\nvoid List<T>::delete_(T x)\n{\n Node<T> *p=find_previous(x);\n if(p==NULL)\n return;\n Node<T> *dp=p->next;\n p->next=dp->next;\n delete dp;\n}\n\ntemplate <typename T>\nvoid List<T>::insert(T x, Node<T> *p)\n{\n Node<T> *temp;\n try\n {\n temp=new Node<T>;\n }\n catch(std::bad_alloc ba)\n {\n std::cout<<ba.what()<<std::endl;\n exit(1);\n }\n temp->element=x;\n temp->next=p->next;\n p->next=temp;\n}\n\ntemplate <typename T>\nvoid List<T>::print()\n{\n Node<T> *p=header->next;\n while(p!=NULL)\n {\n std::cout<<p->element<<\" \";\n p=p->next;\n }\n std::cout<<std::endl;\n}\n\nint main()\n{\n std::cout<<sizeof (List<long>)<<std::endl;\n List<int> ilist;\n for(int i=0; i<10; i++)\n {\n }\n return 0;\n}" }, { "alpha_fraction": 0.4133504629135132, "alphanum_fraction": 0.4531450569629669, "avg_line_length": 16.727272033691406, "blob_id": "28587ee0ae668cb45dc9e91b6386a79eb97c4559", "content_id": "f2ce2c2bd8d3b1a037aa014086dfdf3ea8829b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 779, "license_type": "no_license", "max_line_length": 41, "num_lines": 44, "path": "/Algorithm/Programming_Perls/transposition.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nvoid trans1(int a[], int i, int n);\nvoid trans2(int a[], int i, int n);\nint main()\n{\n int a[11]={0,1,2,3,4,5,6,7,8,9,10};\n trans2(a, 3, 11);\n for(int i=0; i<11; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n}\n\nvoid trans1(int a[], int offset, int n)\n{\n for(int i=0; i<offset; i++)\n {\n int t=a[i];\n int j=i;\n for(; j<n;j+=offset)\n {\n a[j]=a[j+offset];\n }\n a[j]=t;\n }\n}\n\nvoid reverse(int a[], int start, int end)\n{\n while(start<end)\n {\n int t=a[start];\n a[start]=a[end];\n a[end]=t;\n start++;\n end--;\n }\n}\nvoid trans2(int a[], int offset, int n)\n{\n reverse(a, 0, offset-1);\n reverse(a, offset, n-1);\n reverse(a, 0, n-1);\n}" }, { "alpha_fraction": 0.6737805008888245, "alphanum_fraction": 0.6737805008888245, "avg_line_length": 22.39285659790039, "blob_id": "f679834a3353b47c18689d374bdebf1ba9b0c9fe", "content_id": "39d8040edb926c7988e88d1cf3202499f559b56b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 656, "license_type": "no_license", "max_line_length": 53, "num_lines": 28, "path": "/Programming/JAVA/Thinking in JAVA/chapter12/stack/StackTest.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "import java.util.LinkedList;\n\nclass Stack<T>{\n\tprivate LinkedList<T> storage=new LinkedList<T>();\n\tpublic void push(T v){storage.addFirst(v);}\n\tpublic T peek(){return storage.getFirst();}\n\tpublic T pop(){return storage.removeFirst();}\n\tpublic boolean empty(){return storage.isEmpty();}\n\tpublic String toString(){return storage.toString();}\n}\n\npublic class StackTest{\n\tpublic static void main(String[] args){\n\t\tStack<String> stack=new Stack<String>();\n\t\tfor(String s:\"My dog has legs\".split(\" \"))\n\t\t{\n\t\t\tstack.push(s);\n\t\t}\n\n\t\tSystem.out.println(stack);\n\t\twhile (!stack.empty())\n\t\t{\n\n\t\t\tSystem.out.print(stack.pop() + \" \");\n\t\t}\n\t\tSystem.out.println();\n\t}\n}\n\n" }, { "alpha_fraction": 0.4861878454685211, "alphanum_fraction": 0.5359116196632385, "avg_line_length": 12, "blob_id": "c0b2d355c362bb0853957b38511197181a8629fd", "content_id": "8c559ec57295dafc6aa866cc2041ebf5eb03b6c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 181, "license_type": "no_license", "max_line_length": 45, "num_lines": 14, "path": "/Programming/Practice/Interpretation/1.11_recursive.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint func(int n)\n{\n if(n<3)\n return n;\n return func(n-1)+2*func(n-2)+3*func(n-3);\n}\n\nint main()\n{\n std::cout<<func(10)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.700258195400238, "alphanum_fraction": 0.7072216868400574, "avg_line_length": 36.68141555786133, "blob_id": "997a866ef000a28b534d3be1cdbedfad162bfb7c", "content_id": "1302cce49c379a1a888eb4c5529d50e816a05c31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 12823, "license_type": "no_license", "max_line_length": 806, "num_lines": 339, "path": "/Programming/C/libcap/capengine.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<pcap.h>\n#include<stdio.h>\n#include<stdlib.h>\n#include<errno.h>\n#include<sys/socket.h>\n#include<netinet/in.h>\n#include<arpa/inet.h>\n#include<netinet/if_ether.h>\n\n/*\n callback function that is passed to pcap_loop(...) and called each time a packet is received\n*/\nvoid my_callback(u_char *useless, const struct pcap_hdr* pkthdr, const u_char *packet)\n{\n static int count =1;\n fprintf(stdout,\"%d \",count);\n if(count==4)\n fprintf(stdout,\"come on baby sayyy you love me!!!\");\n if(count==7)\n fprintf(stdout,\"Tiiimmmeesss!!\");\n fflush(stdout);\n count++;\n}\n\nint main(int argc, char **argv)\n{\n int i;\n char *dev;\n char errbuf[PCAP_ERRBUF_SIZE];\n pcap_t* descr;\n const u_char *packet;\n struct pcap_pkthdr hdr;//pcap.h\n struct ether_header *eptr;//net/ethernet.h\n if(argc!=2)\n {\n fprintf(stdout,\"Usage:%s numpackets\\n\", argv[0]);\n return 0;\n }\n /*grab a device peak into...*/\n dev=pcap_lookupdev(errbuf);\n if(dev==NULL)\n {\n printf(\"%s\\n\",errbuf);\n exit(1);\n }\n /*open device for reading*/\n descr =pcap_open_live(dev, BUFSIZ, 0, 1000, errbuf);\n if(descr==NULL)\n {\n printf(\"pcap_open_live(): %s\\n\", errbuf);\n exit(1);\n }\n /*allright here we call pcap_loop(..)and pass in our callback function*/\n /*int pcap_loop(pcap_t *p, int cnt, pcap_handler callback, u_char *user)*/\n /*if you are wondering what the user argument is all about, so am I*/\n pcap_loop(descr, atoi(argv[1]), my_callback, NULL);\n fprintf(stdout,\"\\nDone processing, packets... wheew!\\n\");\n return 0;\n}\n\n\n\n/*pcap_dispatch() is to collect and process packets. cnt specifies the maximum number of packets to process before returning. A cnt of -1 processes all the packets received in one buffer. A cnt of 0 processes all packets until an error occurs, EOF is reached or the read times out(when doing live reads andd a non-zero read timeout is specified). callback specifies a routine to be called with three arguments: au_char pointer which is passed in from pcap_dispatch(), apointer to the pcap_pkthdr struct(which precede the actual network headers and data), and a u_char pointer to the packet data. The number of packets read is returned.Zero is returned when EOF is reached in a \"savefile.\" A return of -1 indicates an error in which case pcap_perror() or pcap_geterr() may be used to display the error text.\n */\n\n\n/****************pcap_compile(..)and pcap_setfilter(...)*********************/\n/*\n The program consists of one or more primitives. Primitives usu�\n ally consist of an id (name or number) preceded by one or more\n qualifiers. There are three different kinds of qualifier:\n \n type qualifiers say what kind of thing the id name or number\n refers to. Possible types are host, net and port. E.g.,\n `host foo', `net 128.3', `port 20'. If there is no type\n qualifier, host is assumed.\n \n dir qualifiers specify a particular transfer direction to and/or\n from id. Possible directions are src, dst, src or dst and\n src and dst. E.g., `src foo', `dst net 128.3', `src or dst\n port ftp-data'. If there is no dir qualifier, src or dst is\n assumed. For `null' link layers (i.e. point to point proto�\n cols such as slip) the inbound and outbound qualifiers can\n be used to specify a desired direction.\n \n proto qualifiers restrict the match to a particular protocol.\n Possible protos are: ether, fddi, ip, arp, rarp, decnet,\n lat, sca, moprc, mopdl, tcp and udp. E.g., `ether src foo',\n `arp net 128.3', `tcp port 21'. If there is no proto quali�\n fier, all protocols consistent with the type are assumed.\n E.g., `src foo' means `(ip or arp or rarp) src foo' (except\n the latter is not legal syntax), `net bar' means `(ip or arp\n or rarp) net bar' and `port 53' means `(tcp or udp) port\n 53'.\n \n In addition to the above, there are some special `primitive' key�\n words that don't follow the pattern: gateway, broadcast, less,\n greater and arithmetic expressions. All of these are described\n below.\n \n More complex filter expressions are built up by using the words\n and, or and not to combine primitives. E.g., `host foo and not\n port ftp and not port ftp-data'. To save typing, identical quali�\n fier lists can be omitted. E.g., `tcp dst port ftp or ftp-data or\n domain' is exactly the same as `tcp dst port ftp or tcp dst port\n ftp-data or tcp dst port domain'.\n \n Allowable primitives are:\n \n dst host host\n True if the IP destination field of the packet is host,\n which may be either an address or a name.\n \n src host host\n True if the IP source field of the packet is host.\n \n host host\n True if either the IP source or destination of the packet is\n host. Any of the above host expressions can be prepended\n with the keywords, ip, arp, or rarp as in:\n ip host host\n which is equivalent to:\n ether proto \\ip and host host\n If host is a name with multiple IP addresses, each address\n will be checked for a match.\n \n ether dst ehost\n True if the ethernet destination address is ehost. Ehost\n may be either a name from /etc/ethers or a number (see\n ethers(3N) for numeric format).\n \n ether src ehost\n True if the ethernet source address is ehost.\n \n ether host ehost\n True if either the ethernet source or destination address is\n ehost.\n \n gateway host\n True if the packet used host as a gateway. I.e., the ether�\n net source or destination address was host but neither the\n IP source nor the IP destination was host. Host must be a\n name and must be found in both /etc/hosts and /etc/ethers.\n (An equivalent expression is\n ether host ehost and not host host\n which can be used with either names or numbers for host /\n ehost.)\n \n dst net net\n True if the IP destination address of the packet has a net�\n work number of net. Net may be either a name from /etc/net�\n works or a network number (see networks(4) for details).\n \n src net net\n True if the IP source address of the packet has a network\n number of net.\n \n net net\n True if either the IP source or destination address of the\n packet has a network number of net.\n \n net net mask mask\n True if the IP address matches net with the specific net�\n mask. May be qualified with src or dst.\n \n net net/len\n True if the IP address matches net a netmask len bits wide.\n May be qualified with src or dst.\n \n dst port port\n True if the packet is ip/tcp or ip/udp and has a destination\n port value of port. The port can be a number or a name used\n in /etc/services (see tcp(4P) and udp(4P)). If a name is\n used, both the port number and protocol are checked. If a\n number or ambiguous name is used, only the port number is\n checked (e.g., dst port 513 will print both tcp/login traf�\n fic and udp/who traffic, and port domain will print both\n tcp/domain and udp/domain traffic).\n \n src port port\n True if the packet has a source port value of port.\n \n port port\n True if either the source or destination port of the packet\n is port. Any of the above port expressions can be prepended\n with the keywords, tcp or udp, as in:\n tcp src port port\n which matches only tcp packets whose source port is port.\n \n less length\n True if the packet has a length less than or equal to\n length. This is equivalent to:\n len <= length.\n \n greater length\n True if the packet has a length greater than or equal to\n length. This is equivalent to:\n len >= length.\n \n ip proto protocol\n True if the packet is an ip packet (see ip(4P)) of protocol\n type protocol. Protocol can be a number or one of the names\n icmp, igrp, udp, nd, or tcp. Note that the identifiers tcp,\n udp, and icmp are also keywords and must be escaped via\n backslash (\\), which is \\\\ in the C-shell.\n \n ether broadcast\n True if the packet is an ethernet broadcast packet. The\n ether keyword is optional.\n \n ip broadcast\n True if the packet is an IP broadcast packet. It checks for\n both the all-zeroes and all-ones broadcast conventions, and\n looks up the local subnet mask.\n \n ether multicast\n True if the packet is an ethernet multicast packet. The\n ether keyword is optional. This is shorthand for `ether[0]\n & 1 != 0'.\n \n ip multicast\n True if the packet is an IP multicast packet.\n \n ether proto protocol\n True if the packet is of ether type protocol. Protocol can\n be a number or a name like ip, arp, or rarp. Note these\n identifiers are also keywords and must be escaped via back�\n slash (\\). [In the case of FDDI (e.g., `fddi protocol\n arp'), the protocol identification comes from the 802.2 Log�\n ical Link Control (LLC) header, which is usually layered on\n top of the FDDI header. Tcpdump assumes, when filtering on\n the protocol identifier, that all FDDI packets include an\n LLC header, and that the LLC header is in so-called SNAP\n format.]\n \n ip, arp, rarp, decnet\n Abbreviations for:\n ether proto p where p is one of the above protocols.\n \n tcp, udp, icmp\n Abbreviations for:\n ip proto p\n where p is one of the above protocols.\n \n expr relop expr\n True if the relation holds, where relop is one of >,\n <, >=, <=, =, !=, and expr is an arithmetic\n expression composed of integer constants (expressed in\n standard C syntax), the nor� mal binary operators [+, -,\n *, /, &, |], a length operator, and special packet\n data accessors. To access data inside the packet, use the\n following syntax: proto [ expr : size ] Proto is one of\n ether, fddi, ip, arp, rarp, tcp, udp, or icmp, and\n indicates the protocol layer for the index opera� tion.\n The byte offset, relative to the indicated protocol\n layer, is given by expr. Size is optional and indicates\n the number of bytes in the field of interest; it can be\n either one, two, or four, and defaults to one. The\n length opera� tor, indicated by the keyword len, gives the\n length of the packet.\n \n For example, `ether[0] & 1 != 0' catches all multicast traf�\n fic. The expression `ip[0] & 0xf != 5' catches all IP pack�\n ets with options. The expression `ip[6:2] & 0x1fff = 0'\n catches only unfragmented datagrams and frag zero of frag�\n mented datagrams. This check is implicitly applied to the\n tcp and udp index operations. For instance, tcp[0] always\n means the first byte of the TCP header, and never means the\n first byte of an intervening fragment.\n \n Primitives may be combined using:\n \n A parenthesized group of primitives and operators (parenthe�\n ses are special to the Shell and must be escaped).\n \n Negation (`!' or `not').\n \n Concatenation (`&&' or `and').\n \n Alternation (`||' or `or').\n \n Negation has highest precedence. Alternation and concatenation\n have equal precedence and associate left to right. Note that\n explicit and tokens, not juxtaposition, are now required for con�\n catenation.\n \n If an identifier is given without a keyword, the most recent key�\n word is assumed. For example,\n not host vs and ace\n is short for\n not host vs and host ace\n which should not be confused with\n not ( host vs or ace )\n \n Expression arguments can be passed to tcpdump as either a single\n argument or as multiple arguments, whichever is more convenient.\n Generally, if the expression contains Shell metacharacters, it is\n easier to pass it as a single, quoted argument. Multiple arguments\n are concatenated with spaces before being parsed.\n \n EXAMPLES\n To print all packets arriving at or departing from sundown:\n tcpdump host sundown\n \n To print traffic between helios and either hot or ace:\n tcpdump host helios and \\( hot or ace \\)\n \n To print all IP packets between ace and any host except helios:\n tcpdump ip host ace and not helios\n \n To print all traffic between local hosts and hosts at Berkeley:\n tcpdump net ucb-ether\n \n To print all ftp traffic through internet gateway snup: (note that the\n expression is quoted to prevent the shell from (mis-)interpreting the\n parentheses):\n tcpdump 'gateway snup and (port ftp or ftp-data)'\n \n To print traffic neither sourced from nor destined for local hosts (if you\n gateway to one other net, this stuff should never make it onto your local\n net).\n tcpdump ip and not net localnet\n \n To print the start and end packets (the SYN and FIN packets) of each TCP\n conversation that involves a non-local host.\n tcpdump 'tcp[13] & 3 != 0 and not src and dst net localnet'\n \n To print IP packets longer than 576 bytes sent through gateway snup:\n tcpdump 'gateway snup and ip[2:2] > 576'\n \n To print IP broadcast or multicast packets that were not sent via ethernet\n broadcast or multicast:\n tcpdump 'ether[0] & 1 = 0 and ip[16] >= 224'\n \n To print all ICMP packets that are not echo requests/replies (i.e., not\n ping packets):\n tcpdump 'icmp[0] != 8 and icmp[0] != 0\"\n\n */\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5756097435951233, "alphanum_fraction": 0.6146341562271118, "avg_line_length": 19.5, "blob_id": "acd261be335468d9d602aafa50a4218772cca517", "content_id": "e303c70f7f1711b21c420fa57667b6b27ba4d6c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 205, "license_type": "no_license", "max_line_length": 40, "num_lines": 10, "path": "/Programming/JAVA/Thinking in JAVA/chapter5/breakandcontinue/BreakAndContinue.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "public class BreakAndContinue{\n\tpublic static void main(String []args){\n\t\tfor(int i=0; i<100; i++){\n\t\t\tif(i==67)break;\n\t\t\tif(i%9==0)continue;\n\t\t\tSystem.out.println(i+\" \");\n\t\t}\n\t\tSystem.out.println();\n\t}\n}\n" }, { "alpha_fraction": 0.4768786132335663, "alphanum_fraction": 0.5202311873435974, "avg_line_length": 17.210525512695312, "blob_id": "b6a75240e084c241df2a45c413b786a919645262", "content_id": "01cf084157b68739a9529bc75b61eddcea71f3bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 46, "num_lines": 19, "path": "/Programming/Python/14iteration/internal.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "L = [1, 2, 3, 6, 6]\nprint(sum(L))\nprint('any', any(L))\nprint('all', all(L))\nprint('max', max(L))\nprint('min', min(L))\nprint('&&'.join([l for l in open('test.py')]))\n\nprint('**********************')\nZ = zip((1,2,3), (6, 6, 6,))\nI=iter(Z)\nI2=iter(Z)\nprint(I.next())\n\nprint(I2.next())\nprint(I.next())\nprint(I2.next())\nprint(next(I))\nprint(next(I2))\n" }, { "alpha_fraction": 0.5692307949066162, "alphanum_fraction": 0.5743589997291565, "avg_line_length": 20.66666603088379, "blob_id": "2a48752a35be465bcefa8c0d289ac00628b0383a", "content_id": "4df48f5efdc4a91befd800e352c4febeaf9e630e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/Programming/Python/13Loop/break.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "while True:\n\tname = raw_input('Enter name: ')\n\tif name == 'stop':\n\t\tbreak\n\tage = raw_input('Enter age: ')\n\tif age.isdigit():\n\t\tprint('Hello, '+name+' => ', int(age)**2)\n\telse:\n\t\tprint('Bad age')\n" }, { "alpha_fraction": 0.36758893728256226, "alphanum_fraction": 0.4110671877861023, "avg_line_length": 18.5, "blob_id": "5a02c486f2bf2cfa10fadd4cb51242c196f89447", "content_id": "808d1d2fc33ab389ccba2d5c47fc8c5541614804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 506, "license_type": "no_license", "max_line_length": 51, "num_lines": 26, "path": "/Programming/C/The C programming Language/chapter3/shell_sort.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nvoid shellsort(int v[], int n);\nint main()\n{\n int i,c;\n int v[10]={1,5,2,3,4,7,6,0,9,8};\n shellsort(v,10);\n for(i=0;i<10;++i)\n printf(\"%d \", v[i]);\n printf(\"\\n\");\n return 0;\n}\n\nvoid shellsort(int v[], int n)\n{\n int gap, i, j, temp;\n for(gap=n/2;gap>0;gap/=2)\n for(i=gap;i<n;i++)\n for(j=i-gap;j>=0&&v[j]>v[j+gap];j-=gap)\n {\n temp=v[j];\n v[j]=v[j+gap];\n v[j+gap]=temp;\n }\n}" }, { "alpha_fraction": 0.5472636818885803, "alphanum_fraction": 0.5621890425682068, "avg_line_length": 17.363636016845703, "blob_id": "df2bc87edacadab2228edb3254896e819b1bcc66", "content_id": "93e00b417ddd3a5feb995950bdf7f674132c0e6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 201, "license_type": "no_license", "max_line_length": 45, "num_lines": 11, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/reinterpret.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n long p=10;\n long *x=&p;\n long q=reinterpret_cast<long>(x);\n std::cout<<*(long *)q<<std::endl;\n std::cout<<sizeof (std::cout)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.48878923058509827, "alphanum_fraction": 0.5156950950622559, "avg_line_length": 10.789473533630371, "blob_id": "2c92e66a7b90e6ac55ad390835df4dc9ddd7e6c8", "content_id": "570eba14dc549eb755f44f30648637ad76251615", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 223, "license_type": "no_license", "max_line_length": 26, "num_lines": 19, "path": "/Algorithm/Leetcode/sortlist.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\nusing std::vector;\nusing std::cin;\nusing std::cout;\nusing std::endl;\n\n\nint main()\n{\n \n int iarr[10];\n for(int i=0;i!=10;++i)\n {\n a[0]=ivec[i];\n for(\n }\n \n}" }, { "alpha_fraction": 0.5240620970726013, "alphanum_fraction": 0.5264191627502441, "avg_line_length": 20.836910247802734, "blob_id": "2f2ff8e5ada7178e265d67632637546022564489", "content_id": "26125a7e6b42505a88bfda00cf7945a25765c867", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5091, "license_type": "no_license", "max_line_length": 107, "num_lines": 233, "path": "/Algorithm/Algorithm/chapter4/balance_tree/balance.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nstruct Node \n{\n Node(T ele=0, int hei=0, Node *le=NULL, Node *ri=NULL):element(ele), left(le), right(ri), height(hei){}\n T element;\n Node *left;\n Node *right;\n int height;\n};\n\ntemplate <typename T>\nclass Binary\n{\npublic:\n Binary():root(NULL){}\n void insert(T x);\n Node<T> * find(T x);\n void Delete(T x);\n void print();\nprivate:\n Node<T> * root;\n int height(Node<T> *p){\n if(p!=NULL)\n return p->height;\n return -1;\n }\n Node<T> * singleLeftRotate(Node<T> * p);\n Node<T> * singleRightRotate(Node<T> * p);\n Node<T> * doubleLRRotate(Node<T> * p);\n Node<T> * doubleRLRotate(Node<T> * p);\n Node<T> * real_insert(Node<T> *, T x);\n Node<T> * real_delete(Node<T> *, T);\n Node<T> * delete_tree(Node<T> *, T);\n Node<T> * find_min(Node<T> *);\n void real_print(Node<T> *);\n};\n\ntemplate <typename T>\nvoid Binary<T>::insert(T x)\n{\n real_insert(root, x);\n}\n\ntemplate <typename T>\nvoid Binary<T>::Delete(T x)\n{\n real_delete(root, x);\n}\n\ntemplate <typename T>\nNode<T> * Binary<T>::singleLeftRotate(Node<T> * p)//left left\n{\n Node<T> * temp=p->left;\n p->left=temp->right;\n temp->right=p;\n \n p->height=std::max(height(p->left), height(p->right))+1;\n temp->height=std::max(height(temp->left), height(temp->right))+1;\n return temp;\n}\n\ntemplate <typename T>\nNode<T> * Binary<T>::singleRightRotate(Node<T> * p)//right right\n{\n Node<T> * temp=p->right;\n p->right=temp->left;\n temp->left=p;\n \n p->height=std::max(height(p->right), height(p->left))+1;\n temp->height=std::max(height(temp->right), height(temp->right))+1;\n return temp;\n}\n\ntemplate <typename T>\nNode<T> * Binary<T>::doubleLRRotate(Node<T> *p) //left right\n{\n p->right=singleLeftRotate(p->right);\n return singleRightRotate(p);\n}\n\ntemplate <typename T>\nNode<T> *Binary<T>::doubleRLRotate(Node<T> *p) //right left\n{\n p->left=singleRightRotate(p->left);\n return singleLeftRotate(p);\n}\n\ntemplate <typename T>\nNode<T> *Binary<T>::real_insert(Node<T> * p, T x)\n{\n if(p==NULL)\n {\n p=new Node<T>(x);\n root=p;\n return p;\n }\n if(x<p->element)\n {\n real_insert(p->left, x);\n if(height(p->left)-height(p->right)==2)\n {\n if(x<p->left->element)\n p=singleLeftRotate(p);\n else\n p=doubleLRRotate(p);\n }\n }\n else if(x>p->element)\n {\n real_insert(p->right, x);\n if(height(p->right)-height(p->left)==2)\n {\n if(x>p->right->element)\n p=singleRightRotate(p);\n else\n p=doubleRLRotate(p);\n }\n }\n else\n {\n return p;\n }\n p->height=std::max(height(p->left), height(p->left));\n return p;\n}\n\ntemplate <typename T>\nNode<T> * Binary<T>::find(T x)\n{\n Node<T> *p=root;\n while(p!=NULL&&p->element!=x)\n if(x>p->element)\n p=p->right;\n else\n p=p->left;\n return p;\n}\n\ntemplate <typename T>\nNode<T> * Binary<T>::real_delete(Node<T> *p, T x)\n{\n if(p==NULL)\n return p;\n if(x<p->element)\n {\n real_delete(p->left, x);\n if(height(p->right)-height(p->left)==2)\n {\n if(p->right->left!=NULL&&height(p->right->left)>height(p->right->left))\n p=doubleRLRotate(p);\n else\n p=singleRightRotate(p);\n }\n }\n else if(x>p->element)\n {\n real_delete(p->right, x);\n if(height(p->left)-height(p->right)==2)\n {\n if(p->left->right!=NULL&&height(p->left->right)>height(p->left->left))\n p=doubleLRRotate(p);\n else\n p=singleLeftRotate(p);\n }\n }\n else\n {\n delete_tree(p, x);\n }\n p->height=std::max(height(p->left), height(p->right));\n return p;\n}\n\ntemplate <typename T>\nNode<T> * Binary<T>::delete_tree(Node<T> *ptr, T x)\n{\n if(ptr==NULL)\n {\n return ptr;\n }\n Node<T> *temp, *child;\n if(x<ptr->element)\n ptr->left=delete_tree(x, ptr->left);\n else if(x>ptr->element)\n ptr->right=delete_tree(x, ptr->right);\n else if(ptr->left&&ptr->right)\n {\n temp=find_min_1(ptr->right);\n ptr->element=temp->element;\n ptr->right=delete_tree(ptr->element, ptr->right);\n }\n else\n {\n std::cout<<long(temp)<<std::endl;\n temp=ptr;\n if(ptr->left==NULL)\n child=ptr->right;\n if(ptr->right==NULL)\n child=ptr->left;\n delete temp;\n return child;\n }\n return ptr;\n}\n\ntemplate <typename T>\nNode<T> *Binary<T>::find_min(Node<T> * p)\n{\n if(p!=NULL)\n while(p->left!=NULL)\n p=p->left;\n return p;\n}\n\ntemplate <typename T>\nvoid Binary<T>::print()\n{\n real_print(root);\n}\n\ntemplate <typename T>\nvoid Binary<T>::real_print(Node<T> * p)\n{\n if(p==NULL)\n return;\n if(p->left!=NULL)\n real_print(p->left);\n std::cout<<p->element<<std::endl;\n if(p->right!=NULL)\n real_print(p->right);\n}\n\n\n\n" }, { "alpha_fraction": 0.5512820482254028, "alphanum_fraction": 0.5559440851211548, "avg_line_length": 18.976743698120117, "blob_id": "544f5f6b7975032307ffb18b714fb4e75c09ed0b", "content_id": "30e7f3eb7ec680b3142c33c0d2fe5939a0090fea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 858, "license_type": "no_license", "max_line_length": 70, "num_lines": 43, "path": "/Programming/C/multithread/arg_to_thread.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<pthread.h>\n\n#define NUM 5\n\nstruct thread_data\n{\n int thread_id;\n char *message;\n};\n\nvoid * PrintHello(void *threadarg)\n{\n struct thread_data *my_data;\n my_data=(struct thread_data *) threadarg;\n printf(\"thread ID: %i\\n\", my_data->thread_id);\n printf(\"Message: %s \\n\",my_data->message);\n pthread_exit(NULL);\n}\n\nint main()\n{\n pthread_t threads[NUM];\n struct thread_data td[NUM];\n int rc;\n int i;\n \n for(i=0;i<NUM;++i)\n {\n printf(\"main(): create thread, %d\\n\", i);\n td[i].thread_id=i;\n td[i].message=\"this is message\";\n rc=pthread_create(&threads[i],NULL, PrintHello,(void*)&td[i]);\n if(rc)\n {\n printf(\"Error: unable to create thread,%d\",rc);\n exit(-1);\n }\n }\n pthread_exit(NULL);\n return 0;\n}" }, { "alpha_fraction": 0.5698924660682678, "alphanum_fraction": 0.6236559152603149, "avg_line_length": 14.5, "blob_id": "dfe6c3278b8fb82b138b6bc0658369e6caa0be8d", "content_id": "dbddab2afb514cbad6aa996ebf0c5d731539951f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/Programming/Python/Class/first_init.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class C1:\n\tdef __init__(self, who):\n\t\tself.who=who\n#print(C1.who)\nI1=C1('bob')\nprint(I1.who)\n" }, { "alpha_fraction": 0.442748099565506, "alphanum_fraction": 0.4541984796524048, "avg_line_length": 12.149999618530273, "blob_id": "a627575d4ca27630dd71c67e2476b4a357d4c7db", "content_id": "08f6170b0695409ed0692c6425f46a71b60b0d46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 262, "license_type": "no_license", "max_line_length": 31, "num_lines": 20, "path": "/Programming/C/The C programming Language/chapter5/strcat.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nvoid _strcat(char *s, char *t);\n\nint main()\n{\n char s[32]=\"ZHANG\";\n char t[]=\" QIU CHEN\";\n _strcat(s,t);\n printf(\"%s\\n\",s);\n return 0;\n}\n\nvoid _strcat(char *s, char *t)\n{\n while(*s)\n s++;\n while(*s++=*t++)\n ;\n}" }, { "alpha_fraction": 0.46794870495796204, "alphanum_fraction": 0.5641025900840759, "avg_line_length": 8.75, "blob_id": "f6959c15d72b42dfe3357da683f43c83cf73023f", "content_id": "2583abb48aaa4ab647abc173970681a99e4528c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 13, "num_lines": 16, "path": "/Programming/Python/6DynamicType/test.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a=3;\nprint(a);\na='abc';\nprint(a);\nl=[1,2,3];\nl2=l;\nprint(l);\nprint(l2);\nl.append(6);\nprint(l);\nprint(l2);\nl1=[];\nl1=l2;\nl2.append(8);\nprint(l1);\nprint(l2);\n" }, { "alpha_fraction": 0.40109556913375854, "alphanum_fraction": 0.4090079069137573, "avg_line_length": 16.677419662475586, "blob_id": "bb289304a5f1cc1b5da2fb2326b0ac0c2f67b058", "content_id": "c7f730d5bbed920f2bb6bfa8a5473135e292c5b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 53, "num_lines": 93, "path": "/Algorithm/Algorithm/chapter4/binary_tree.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\nstruct Node\n{\n Node():left(NULL), right(NULL){}\n std::string element;\n Node *left;\n Node *right;\n};\n\n\nclass Stack\n{\npublic:\n Stack(){}\n Node * pop();\n void push(Node * val){\n if(top>9)\n {\n std::cout<<\"stack overflow\"<<std::endl;\n exit(2);\n }\n a[top++]=val;\n }\n static bool is_empty(){return top==0?true:false;}\n ~Stack(){};\nprivate:\n Stack(Stack &);\n Stack & operator=(Stack &);\n Node * a[10];\n static int top;\n};\n\nint Stack::top=0;\n\nNode * Stack::pop()\n{\n if(is_empty())\n {\n std::cout<<\"stack underflow\"<<std::endl;\n exit(1);\n }\n return a[--top];\n \n}\n\nvoid print(Node *p)\n{\n if(p->left!=NULL)\n print(p->left);\n if(p!=NULL)\n std::cout<<p->element;\n if(p->right!=NULL)\n print(p->right);\n}\n\n\n\nint main()\n{\n //Node<int> node;\n Stack stack;\n std::string ival;\n while(std::cin>>ival)\n {\n //std::cout<<ival<<std::endl;\n switch(ival[0])\n {\n case '+' :\n case '*' :\n case '/' :\n case '-' :\n {\n Node *p1=new Node;\n p1->element=ival;\n p1->right=stack.pop();\n p1->left=stack.pop();\n stack.push(p1);\n break;\n }\n case ' ' :\n break;\n default:\n {\n Node *p=new Node;\n p->element=ival;\n stack.push(p);\n break;\n }\n }\n }\n print(stack.pop());\n std::cout<<std::endl;\n}" }, { "alpha_fraction": 0.5144508481025696, "alphanum_fraction": 0.5202311873435974, "avg_line_length": 9.235294342041016, "blob_id": "e092265880c6342217860871232b1ffb16bde503", "content_id": "391c92e1a55c8668dc32861e53c72f7a52669c59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 173, "license_type": "no_license", "max_line_length": 45, "num_lines": 17, "path": "/Programming/C++/Inside_the_C++_object_model/chapter5/nrv_test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(){std::cout<<\"constructor\"<<std::endl;}\n};\n\nA fun(){ \n A a;\n return a;\n}\n\nint main()\n{\n A result=fun();\n return 0;\n}" }, { "alpha_fraction": 0.5059523582458496, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 17.66666603088379, "blob_id": "aad25faaf269770d6ac98b785a2c776e52854bca", "content_id": "873a959edfe9867c080672fbf7b849d97646ec20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 168, "license_type": "no_license", "max_line_length": 43, "num_lines": 9, "path": "/Programming/Lua/3Statements/table.lua", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "w = {x=0, y=0, label=\"console\"}\nx = {math.sin(0), math.sin(1), math.sin(2)}\nw[1] = \"another field\"\nx.f = w\nprint(w[\"x\"])\nprint(w[1])\nprint(x.f[1])\nw.x = nil\nprint(w.x)\n" }, { "alpha_fraction": 0.5694050788879395, "alphanum_fraction": 0.5949008464813232, "avg_line_length": 22.600000381469727, "blob_id": "7dab7f49e4cbd3252be1d3511daeeada9bab482a", "content_id": "165d39105d566983b463a93c9ea23a35864d7da7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 353, "license_type": "no_license", "max_line_length": 55, "num_lines": 15, "path": "/Programming/C++/Effective_STL/Iterators/iterator_mix.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\nint main()\n{\n std::vector<int> ivec;\n std::vector<int>::iterator it1=ivec.begin();\n std::vector<int>::const_iterator it2=ivec.cbegin();\n if(it1==it2)\n std::cout<<\"support\"<<std::endl;\n if(it2==it1)\n std::cout<<\"also support\"<<std::endl;\n std::cout<<it2-it1<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4695121943950653, "alphanum_fraction": 0.48170730471611023, "avg_line_length": 14.666666984558105, "blob_id": "a917a79641547eca903f9c23db1b74c5fe2e1c2e", "content_id": "e0b4f39911a6c026e6b84f8a8b47882f368bb601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 328, "license_type": "no_license", "max_line_length": 44, "num_lines": 21, "path": "/Programming/C/The C programming Language/chapter5/strend.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n\nint _strend(char *s, char *t);\n\nint main()\n{\n char *s=\"ZHANG QIU CHEN\";\n char *t=\"CHEN\";\n printf(\"%s\\n%s\\n%d\\n\",s,t,_strend(s,t));\n return 0;\n}\n\nint _strend(char *s, char *t)\n{\n s=s+strlen(s)-strlen(t);\n for(;*s++==*t++;)\n if(*s=='\\0')\n return 1;\n return 0;\n}" }, { "alpha_fraction": 0.5609334707260132, "alphanum_fraction": 0.5790838599205017, "avg_line_length": 24.733333587646484, "blob_id": "3bb1aba0ba1ae6793e0d77554b3d3e9e6648a08a", "content_id": "78dd46e9b4bbb2e71ad7229c1d5fe5c4587b762a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 72, "num_lines": 45, "path": "/Programming/C/Network_Programming_in_Unix/chapter1/daytimetcpserv.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <unistd.h>\n#include <arpa/inet.h>\n#include <time.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nint\nmain(int argc, char *argv[])\n{\n int listenfd, connfd;\n struct sockaddr_in servaddr;\n char buf[1500];\n time_t ticks;\n if((listenfd=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: socket\\n\");\n exit(0);\n }\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=htons(13);\n servaddr.sin_addr.s_addr=htonl(INADDR_ANY);\n setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, NULL, 0);\n if(bind(listenfd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"error: bind\\n\");\n exit(0);\n }\n listen(listenfd, 1);\n while(1)\n {\n connfd=accept(listenfd, (struct sockaddr *)0, NULL);\n ticks=time(NULL);\n snprintf(buf, sizeof(buf), \"%.24s\\r\\n\", ctime(&ticks));\n for(int i=0; i<10; i++)\n {\n if(write(connfd, buf, strlen(buf))<strlen(buf))\n fprintf(stderr, \"error: write()\\n\");\n }\n close(connfd);\n }\n exit(0);\n}" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3909774422645569, "avg_line_length": 11.903225898742676, "blob_id": "c66fa342713850030d02f40ebf96ab1d5876012a", "content_id": "a92404c91f825003195ebba557592b900781ceda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 399, "license_type": "no_license", "max_line_length": 26, "num_lines": 31, "path": "/Programming/C/The C programming Language/chapter4/itoa_recur.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint itoa(char t[], int n);\n\nint main()\n{\n char t[32];\n int n=-289767;\n itoa(t, n);\n printf(\"%s\\n\",t);\n return 0;\n}\n\nint itoa(char t[], int n)\n{\n int i=1, flag=0;\n if(n<0)\n {\n n=-n;\n flag=1;\n }\n if(n/10)\n i=itoa(t,n/10);\n t[i++]=n%10+'0';\n if(flag)\n t[0]='-';\n else\n t[0]='+';\n t[i]='\\0';\n return i;\n}" }, { "alpha_fraction": 0.4051172733306885, "alphanum_fraction": 0.42146411538124084, "avg_line_length": 25.074073791503906, "blob_id": "960779885cd90bba40a5e6ba226093f238afb556", "content_id": "41aa37c5e8fbe4fbeadd9e5def49c21aa4963657", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1407, "license_type": "no_license", "max_line_length": 81, "num_lines": 54, "path": "/Algorithm/Leetcode/rpn.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\nint main()\n{\n using std::string;\n std::string str;\n std::vector<string> svec;\n while(std::cin>>str)\n svec.push_back(str);\n if(svec.size()==1)\n {\n std::cout<<stoi(svec[0])<<std::endl;\n return 0;\n }\n if(svec.size()==0)\n return 0;\n std::vector<string>::iterator it=svec.begin();\n int tmp;\n while(svec.size()!=1)\n {\n std::vector<string>::iterator begin=svec.begin();\n for(;(begin+2)!=svec.end()&&begin!=svec.end();++begin)\n {\n if(*(begin+2)==\"+\"||*(begin+2)==\"-\"||*(begin+2)==\"*\"||*(begin+2)==\"/\")\n {\n if(*(begin+2)==\"+\")\n tmp=stoi(*begin)+stoi(*(begin+1));\n else if(*(begin+2)==\"-\")\n tmp=stoi(*begin)-stoi(*(begin+1));\n else if(*(begin+2)==\"*\")\n tmp=stoi(*begin)*stoi(*(begin+1));\n else\n {\n if(stoi(*(begin+1))!=0)\n tmp=stoi(*begin)/stoi(*(begin+1));\n else\n {\n std::cerr<<\"error input: 0 cannot be the divider\"<<std::endl;\n exit(0);\n }\n }\n begin=svec.erase(begin);\n begin=svec.erase(begin);\n *begin=std::to_string(tmp);\n break;\n }\n }\n \n }\n \n std::cout<<tmp<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5175096988677979, "alphanum_fraction": 0.533073902130127, "avg_line_length": 13.333333015441895, "blob_id": "13d6aedd9a60e27b386dd0831579049e5d4589d7", "content_id": "eb4faa7d247228f98b99c11bf28d844e6e1131e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 257, "license_type": "no_license", "max_line_length": 35, "num_lines": 18, "path": "/Algorithm/Algorithm/chapter2/prime.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<cmath>\n\nbool prime(int num)\n{\n int div=sqrt(num);\n bool pri=true;\n for(int i=2; i<=div; i++)\n if(num%i==0)\n pri=false;\n return pri;\n}\n\nint main()\n{\n std::cout<<prime(7)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.2730404734611511, "alphanum_fraction": 0.29112833738327026, "avg_line_length": 17.140625, "blob_id": "2dd674670706e531c471a42a114abe5f6d1f48ec", "content_id": "d901ba027489f8b4b1a86ef52a345914556474b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1161, "license_type": "no_license", "max_line_length": 63, "num_lines": 64, "path": "/Programming/C/The C programming Language/chapter3/escape.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nvoid escape(char s[], char t[]);\nvoid rescape(char s[], char t[]);\nint main()\n{\n char s[10];\n char t[20];\n char c;\n int i=0;\n while((c=getchar())!=EOF)\n {\n s[i]=c;\n i++;\n }\n escape(s,t);\n printf(\"%s\\n\", t);\n rescape(s,t);\n printf(\"%s\\n\", t);\n return 0;\n}\nvoid escape(char s[], char t[])\n{\n int i,k=0;\n for(i=0;s[i]!='\\0';++i,++k)\n {\n switch(s[i])\n {\n case '\\n' :\n case '\\t' :\n for(int j=0;j<6;++j)\n t[k++]=' ';\n break;\n default:\n t[k]=s[i];\n break;\n }\n }\n t[k]='\\0';\n}\n\n\nvoid rescape(char s[], char t[])\n{\n int i, k=0;\n for(i=0;s[i]!='\\0';++i,++k)\n {\n switch(s[i])\n {\n case ' ' :\n for(int j=0;s[i+j]!='\\0'&&j<6&&s[i+j]==' ';++j)\n if(j==5)\n {\n t[k]='\\t';\n i+=5;\n }\n break;\n default :\n t[k]=s[i];\n break;\n }\n }\n t[k]='\\0';\n}\n" }, { "alpha_fraction": 0.5652819275856018, "alphanum_fraction": 0.574184000492096, "avg_line_length": 17.75, "blob_id": "a428d5acde7cd672416b74dfb3b7f7222d997c52", "content_id": "387dbc51375478b707afee9787be994f5ff34911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 674, "license_type": "no_license", "max_line_length": 102, "num_lines": 36, "path": "/Programming/C/Programming_in_Unix/chapter11/pthread_create.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <pthread.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n\npthread_t ntid;\n\nvoid printtids(const char *s)\n{\n pid_t pid;\n pthread_t tid;\n pid=getpid();\n tid=pthread_self();\n printf(\"%s pid %u tid%u (0x%x)\\n\", s, (unsigned int)pid, (unsigned int) tid, (unsigned int ) tid);\n}\n\nvoid * thr_fn(void * arg)\n{\n printtids(\"new thread:\");\n return ((void*)0);\n}\n\nint main()\n{\n int err;\n err=pthread_create(&ntid, NULL, thr_fn, NULL);\n if(err != 0)\n {\n fprintf(stderr, \"error: can't create thread: %s\\n\", strerror(err));\n exit(0);\n }\n printtids(\"main thread:\");\n sleep(1);\n exit(0);\n}" }, { "alpha_fraction": 0.5130929946899414, "alphanum_fraction": 0.5384025573730469, "avg_line_length": 22.523195266723633, "blob_id": "9fe9138e6009e539bb7016cd361938f4ae36178c", "content_id": "cb2f5a56bf843f6e19bebcf1f4aab04046d9e3c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9127, "license_type": "no_license", "max_line_length": 82, "num_lines": 388, "path": "/Project/Client/conserv.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"conserv.h\"\n#include <string.h>\n#include <iostream>\n#include <stdlib.h>\n#include <fstream>\n#include <sstream>\n#include <sys/socket.h>\n#include <unistd.h>\n#include <errno.h>\n#include <arpa/inet.h>\n\nstruct rp2p{\n unsigned char confld;\n unsigned char reserv;\n short seq;\n short ack;\n short cksum;\n};\n\nclass Sock\n{\npublic:\n Sock();\n int send_msg(const std::string & msg);\n int recv_msg(std::string & msg);\n ~Sock();\nprivate:\n int sock;\n int valid;\n};\n\nSock::Sock()\n{\n sock=socket(AF_INET, SOCK_STREAM, 0);\n if(sock==-1)\n {\n valid=-1;\n return;\n }\n std::cout<<sock<<std::endl;\n valid=1;\n sockaddr_in sa;\n sa.sin_family=AF_INET;\n sa.sin_port=htons(6868);\n sa.sin_addr.s_addr=inet_addr(\"127.0.0.1\");//(serv_ip.c_str());\n if(connect(sock, (sockaddr*)&sa, sizeof(sa))==-1)\n {\n valid=2;\n std::cout<<strerror(errno)<<std::endl;\n return;\n }\n valid=0;\n}\n\nSock::~Sock()\n{\n if(sock>=0)\n close(sock);\n}\n\nint Sock::send_msg(const std::string & msg)\n{\n if(valid!=0)\n return -256;\n int ret;\n ret=send(sock, msg.c_str(), msg.size(), 0);\n return ret;\n}\n\nint Sock::recv_msg(std::string &msg)\n{\n timeval tv;\n tv.tv_sec=2;\n tv.tv_usec=0;\n if(setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(timeval))!=0)\n return -3;\n char rcv_buf[1500];\n memset(rcv_buf, 0, 1500);\n int ret=recv(sock, rcv_buf, 1500, 0);\n if(ret!=-1)\n msg=std::string(rcv_buf+sizeof(rp2p));\n return ret;\n}\n\n\n/*\n **************************\n confld|reserv|sequence_num\n **************************\n acknowledge |checksum\n **************************\n */\n#define LOG_IN 0x80 //when this bit is set, it's login action\n#define SIGN_ON 0x40 //when this bit is set, it's register action\n#define LOG_OUT 0x20 //when this bit is set, it's log out action\n#define LOGGED 0x10 //when this bit is set, the user has complete login action\n/*\n * following four all need the logged flag is set, Otherwise, error occur\n */\n#define UPD_IP 0x08 //this bit set for users update it's IP\n#define UPD_LOC 0x04 //this bit set for users to update its location\n#define UPD_FRD 0x02 //this bit set for users to update friendship\n#define UPD_INFO 0x01 //this bit set for request update peer's IP\n\n#define H_MASK 0xf0\n#define L_MASK 0x0f\n\nint Conn::sign_on(const std::string & account, const std::string & pwd)\n{\n struct rp2p rpp;\n rpp.confld=SIGN_ON;\n rpp.reserv=~0;\n rpp.seq=~0;\n rpp.ack=~0;\n rpp.cksum=~0;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, (void *)&rpp, sizeof(rpp));\n system(\"ifconfig en0 inet >> data/ip\");\n std::ifstream ifs(\"data/ip\");\n if(!ifs.is_open())\n {\n std::cerr<<\">error to sign on, please try again!\"<<std::endl;\n return 0;\n }\n std::string ip;\n getline(ifs, ip);\n getline(ifs, ip);\n ifs.close();\n std::istringstream iss(ip);\n iss>>ip>>ip;\n //std::cout<<ip<<std::endl;\n std::string msg=\"account:\"+account+\" password:\"+pwd+\" ip:\"+ip;\n memcpy(buf+sizeof(rp2p), msg.c_str(), msg.size());\n //std::cout<<msg<<std::endl;\n /*\n * here need send message to server and receive message from server to judge \n * whether login action has succeed.\n */\n Sock sock;\n if(sock.send_msg(buf)==-256)\n {\n std::cerr<<\">error: connect to server\"<<std::endl;\n return 1;\n }\n std::string rcv_str;\n int rcv_cnt=0;\n while(sock.recv_msg(rcv_str)==-1)\n {\n if(rcv_cnt==2)\n {\n std::cerr<<\">network unavailable.\"<<std::endl;\n return -1;\n }\n sock.send_msg(buf);\n rcv_cnt++;\n }\n std::cout<<\">\"<<rcv_str<<std::endl;\n if(rcv_str==\"Sign on OK\")\n return 0;\n return 1;\n}\n\nint Conn::login(const std::string & account, const std::string & pwd)\n{\n struct rp2p rpp;\n rpp.confld=LOG_IN;\n rpp.reserv=~0;\n rpp.seq=~0;\n rpp.ack=~0;\n rpp.cksum=~0;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, (void *)&rpp, sizeof(rp2p));\n system(\"ifconfig en0 inet >> data/ip\");\n std::ifstream ifs(\"data/ip\");\n if(!ifs.is_open())\n {\n std::cerr<<\">error to login, please try again!\"<<std::endl;\n return 0;\n }\n std::string ip;\n getline(ifs, ip);\n getline(ifs, ip);\n ifs.close();\n std::istringstream iss(ip);\n iss>>ip>>ip;\n std::string msg=\"account:\"+account+\" password:\"+pwd+\" client:IOS\"+\" ip:\"+ip;\n //std::cout<<msg<<std::endl;\n memcpy(buf+sizeof(rp2p), msg.c_str(), msg.size());\n /*\n * send message to server\n */\n std::cout<<buf+sizeof(rp2p)<<std::endl;\n Sock sock;\n if(sock.send_msg(buf)==-256)\n {\n std::cerr<<\">error: connect to server\"<<std::endl;\n return 1;\n }\n std::string rcv_str;\n int rcv_cnt=0;\n while(sock.recv_msg(rcv_str)==-1)\n {\n if(rcv_cnt==3)\n {\n std::cerr<<\">network unavailable.\"<<std::endl;\n return -1;\n }\n sock.send_msg(buf);\n rcv_cnt++;\n }\n std::cout<<\">\"<<rcv_str<<std::endl;\n if(rcv_str==\"Login OK\")\n return 0;\n return 1;\n}\n\nint Conn::log_out(const std::string &account)\n{\n struct rp2p rpp;\n rpp.confld=LOG_OUT;\n rpp.reserv=~0;\n rpp.seq=~0;\n rpp.ack=~0;\n rpp.cksum=~0;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, (void *)&rpp, sizeof(rp2p));\n std::string msg=\"account:\"+account;\n memcpy(buf+sizeof(rp2p),msg.c_str(), msg.size());\n /*\n * send message to server\n */\n Sock sock;\n if(sock.send_msg(buf)==-256)\n {\n std::cerr<<\">error: connect to server\"<<std::endl;\n return 1;\n }\n std::string rcv_str;\n int rcv_cnt=0;\n while(sock.recv_msg(rcv_str)==-1)\n {\n if(rcv_cnt==2)\n {\n std::cerr<<\">network unavailable.\"<<std::endl;\n return -1;\n }\n sock.send_msg(buf);\n rcv_cnt++;\n }\n std::cout<<\">\"<<rcv_str<<std::endl;\n return 1;\n}\n\nint Conn::update_ip(const std::string & account)\n{\n struct rp2p rpp;\n rpp.confld=LOGGED|UPD_IP|UPD_LOC;\n rpp.reserv=~0;\n rpp.seq=~0;\n rpp.ack=~0;\n rpp.cksum=~0;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, (void *)&rpp, sizeof(rp2p));\n system(\"ifconfig en0 inet >> data/ip\");\n std::ifstream ifs(\"data/ip\");\n if(!ifs.is_open())\n {\n //std::cerr<<\"error to s, please try again!\"<<std::endl;\n return 0;\n }\n std::string ip;\n getline(ifs, ip);\n getline(ifs, ip);\n ifs.close();\n std::istringstream iss(ip);\n iss>>ip>>ip;\n std::string msg=\"account:\"+account+\" ip:\"+ip;\n memcpy(buf+sizeof(rp2p), msg.c_str(), msg.size());\n /*\n * send to server\n */\n Sock sock;\n if(sock.send_msg(buf)==-256)\n {\n //std::cerr<<\"error: connect to server\"<<std::endl;\n return 1;\n }\n std::string rcv_str;\n int rcv_cnt=0;\n while(sock.recv_msg(rcv_str)==-1)\n {\n if(rcv_cnt==2)\n {\n std::cerr<<\">network unavailable.\"<<std::endl;\n return -1;\n }\n sock.send_msg(buf);\n rcv_cnt++;\n }\n //std::cout<<rcv_str<<std::endl;\n return 1;\n}\n\nint Conn::request_fip(const std::string & account, std::string & rcv_str)\n{\n struct rp2p rpp;\n rpp.confld=LOGGED|UPD_INFO;\n rpp.reserv=~0;\n rpp.seq=~0;\n rpp.ack=~0;\n rpp.cksum=~0;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, (void *)&rpp, sizeof(rp2p));\n std::string msg=\"account:\"+account;\n memcpy(buf+sizeof(rp2p), msg.c_str(), msg.size());\n /*\n * send to server\n */\n Sock sock;\n if(sock.send_msg(buf)==-256)\n {\n //std::cerr<<\"error: connect to server\"<<std::endl;\n return 1;\n }\n //std::string rcv_str;\n int rcv_cnt=0;\n while(sock.recv_msg(rcv_str)==-1)\n {\n if(rcv_cnt==2)\n {\n std::cerr<<\">network unavailable.\"<<std::endl;\n return -1;\n }\n sock.send_msg(buf);\n rcv_cnt++;\n }\n //std::cout<<rcv_str<<std::endl;\n return 1;\n}\n\nint Conn::adfrd(const std::string & account, const std::string & frd, int flag)\n{\n struct rp2p rpp;\n rpp.confld=LOGGED|UPD_FRD;\n rpp.reserv=~0;\n rpp.seq=~0;\n rpp.ack=~0;\n rpp.cksum=~0;\n char buf[1500];\n memset(buf, 0, 1500);\n memcpy(buf, (void *)&rpp, sizeof(rp2p));\n std::string msg;\n if(flag==1)\n {\n msg=\"account:\"+account+\" add\"+\" friend:\"+frd;\n }\n else\n msg=\"account:\"+account+\" delete\"+\" friend:\"+frd;\n memcpy(buf+sizeof(rp2p), msg.c_str(), msg.size());\n /*\n * send to server\n */\n Sock sock;\n if(sock.send_msg(buf)==-256)\n {\n std::cerr<<\">error: connect to server\"<<std::endl;\n return 1;\n }\n std::string rcv_str;\n int rcv_cnt=0;\n while(sock.recv_msg(rcv_str)==-1)\n {\n if(rcv_cnt==2)\n {\n std::cerr<<\">network unavailable.\"<<std::endl;\n return -1;\n }\n sock.send_msg(buf);\n rcv_cnt++;\n }\n std::cout<<\">\"<<rcv_str<<std::endl;\n return 1;\n}\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.579365074634552, "avg_line_length": 14.75, "blob_id": "9186fa16002e958263d885ad6cd4b7cd917c19ab", "content_id": "a0fac1152acbec90f4ac18c4bce97129c066bc66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 19, "num_lines": 8, "path": "/Programming/Python/14iteration/next.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "f = open('next.py')\nprint(next(f))\nprint(f.next())\nL = [6, 6, 6]\nI = iter(L)\nprint(iter(f) is f)\nwhile True:\n\tprint(I.next())\n" }, { "alpha_fraction": 0.3297872245311737, "alphanum_fraction": 0.34278959035873413, "avg_line_length": 16.625, "blob_id": "f12216d40f1687e9e22f91bed0dd55329d5f7221", "content_id": "b57af4fc6a57e5c43d79735c41df2b0872f2faf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 846, "license_type": "no_license", "max_line_length": 57, "num_lines": 48, "path": "/Algorithm/test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\nusing namespace std;\n\nvoid func()\n{\n ifstream ifs(\"input\");\n int num_t;\n ofstream ofs(\"output\");\n ifs>>num_t;\n int M, N, ret=1, index=1;\n while(ifs>>M)\n {\n ifs>>N;\n ret=1;\n cout<<M<<\" \"<<N<<endl;\n if(M==N)\n {\n for(int i=1; i<=M; i++)\n {\n ret*=i;\n }\n ofs<<\"Case #\"<<index<<\": \"<<ret<<endl;\n }\n else\n {\n int sub=N-M+1;\n for(int i=1; i<=N; i++)\n {\n if(i<=sub)\n continue;\n ret*=i;\n }\n ret*=M;\n ofs<<\"Case #\"<<index<<\": \"<<ret%10e9+7<<endl;\n }\n index++;\n \n }\n ofs.close();\n ifs.close();\n}\n\nint main()\n{\n func();\n return 0;\n}\n" }, { "alpha_fraction": 0.342342346906662, "alphanum_fraction": 0.37387385964393616, "avg_line_length": 19.212121963500977, "blob_id": "03ac0659c1f657953c9ae94d9c6d9e7877db316b", "content_id": "a14e37a6ecf47b6ac6d56f8df72ae3619a214713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 666, "license_type": "no_license", "max_line_length": 40, "num_lines": 33, "path": "/Algorithm/Algorithm/chapter7/7shellsort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate<typename T>\nvoid shell(T *a, int n)\n{\n static int inc[3]={1,3,7};\n for(int i=2; i>=0; i--)\n {\n for(int j=inc[i]; j<n; j++)\n {\n T temp=a[j];\n int k=j;\n for(; k>inc[i]-1; k-=inc[i])\n if(a[k]<a[k-inc[i]])\n a[k]=a[k-inc[i]];\n else break;\n a[k]=temp;\n }\n for(int f=0; f<n; f++)\n std::cout<<a[f]<<\" \";\n std::cout<<std::endl;\n }\n}\n\nint main()\n{\n int a[]={9,8,7,6,5,4,3,2,1};\n shell(a, 9);\n for(int i=0; i<9; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.6036585569381714, "alphanum_fraction": 0.6158536672592163, "avg_line_length": 17.22222137451172, "blob_id": "a5ca52506d0bfaa819fe67ba11c20127b8e77a2d", "content_id": "bd18034943c297c85e99c2afee7154daad089d1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/Programming/Python/10Statement/robust.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "while True:\n\treply = raw_input('Enter text: ')\n\tif reply == 'stop':\n\t\tbreak\n\telif not reply.isdigit():\n\t\tprint('Bad!'*8)\n\telse:\n\t\tprint(int(reply)**2)\nprint('Bye')\n" }, { "alpha_fraction": 0.55078125, "alphanum_fraction": 0.55859375, "avg_line_length": 11.2380952835083, "blob_id": "e6fe127dedf31e80fec08a52907993a40f3d0dd2", "content_id": "5c181046753db03cc7351be2f7365a2383013dde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 256, "license_type": "no_license", "max_line_length": 61, "num_lines": 21, "path": "/Programming/C++/More_Effective_C++/chapter1/base_class_initialization.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(int a){std::cout<<\"A's constuctor\"<<std::endl;}\n};\n\nclass B:public A{\npublic:\n B(int a=0):A(a){std::cout<<\"b's constructor\"<<std::endl;}\n};\n\nclass C: public B{\n};\n\nint main()\n{\n B b;\n C c;\n return 0;\n}" }, { "alpha_fraction": 0.48627451062202454, "alphanum_fraction": 0.4941176474094391, "avg_line_length": 16.620689392089844, "blob_id": "686922c4e126174366ae7ac203c18a8948398313", "content_id": "b385df7b947a6ebcff735759781fbfa7932a0743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 510, "license_type": "no_license", "max_line_length": 49, "num_lines": 29, "path": "/Algorithm/Algorithm/chapter1/include.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<fstream>\n\nvoid read_file(const std::string &s)\n{\n std::ifstream ifs(s);\n if(!ifs.is_open())\n {\n std::cout<<\"error: open file\"<<std::endl;\n return;\n }\n std::string line;\n while (getline(ifs, line))\n {\n if(line.substr(0,8)==\"#include\")\n {\n line=line.substr(8);\n read_file(line);\n continue;\n }\n std::cout<<line<<std::endl;\n }\n}\n\nint main()\n{\n read_file(\"include.cc\");\n return 0;\n}" }, { "alpha_fraction": 0.5614035129547119, "alphanum_fraction": 0.5614035129547119, "avg_line_length": 8.5, "blob_id": "ea6ad4ea0ea572c892ec294dcbad17dc791cdd8d", "content_id": "81932e2077390685c728d1251ef72adbda1716b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 57, "license_type": "no_license", "max_line_length": 11, "num_lines": 6, "path": "/Programming/Python/17Namespace/exer4.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "X = 'Spam'\ndef func():\n\tglobal X\n\tX='Ni'\nfunc()\nprint(X)\n" }, { "alpha_fraction": 0.5164113640785217, "alphanum_fraction": 0.5251641273498535, "avg_line_length": 18.08333396911621, "blob_id": "6cf4328d0b4082ac41f4bf7b19b4e073ab911328", "content_id": "d0263912aa05bfeff5ea0b3e305e1828051dd8ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 457, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/Project/source/Client/test_main.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"command.h\"\n#include \"conserv.h\"\n#include \"init.h\"\n#include <iostream>\n\nint main(int argc, char * argv[])\n{\n if(argc<2)\n {\n std::cout<<\">please provide server ip\"<<std::endl;\n return 0;\n }\n serv_ip=std::string(argv[1]);\n //std::cout<<serv_ip<<std::endl;\n init();\n std::cout<<\">\";\n std::string temp;\n while(getline(std::cin, temp))\n {\n proc_comm(temp);\n std::cout<<\">\";\n }\n return 0;\n}" }, { "alpha_fraction": 0.5846994519233704, "alphanum_fraction": 0.5956284403800964, "avg_line_length": 15.727272987365723, "blob_id": "92ef4a9351aec9b3a16c726e28a2cbbb9fc90c50", "content_id": "b2f723901cfac9361df884d45e9ea28466bf0b78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 183, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/Programming/C++/Effective_STL/Vector_and_String/vector_bool.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\nint main()\n{\n std::vector<bool> bvec;\n bvec.push_back(true);\n //bool *p=&bvec[0];\n std::cout<<sizeof(bool)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.42718446254730225, "alphanum_fraction": 0.48543688654899597, "avg_line_length": 11.875, "blob_id": "d9755e6b0c944bf2b899894b3dc5a398b7b47e36", "content_id": "5a637b73737decf6cb7a03d38235b0960becd4e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 21, "num_lines": 8, "path": "/Programming/Python/18Parameter/ref_call.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "def multiple(x, y):\n\tx = 2\n\ty = [3, 4]\n\treturn x, y\nX = 1\nL = [1, 2]\nX, L = multiple(X, L)\nprint(X, L)\n" }, { "alpha_fraction": 0.5456140637397766, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 15.764705657958984, "blob_id": "21bd09b5bd21ff4f2f4240e75043b49838f5fe25", "content_id": "929e4a3a7e722d6d5e798704a1375f97b8515f10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "no_license", "max_line_length": 52, "num_lines": 34, "path": "/Programming/Python/4ObjectType/string.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "S='Python';\nY=\"Python\";\nprint(S[0]);\nprint(Y[1]);\nprint(S[-1]);\nprint(Y[-6]);\nprint(S[1:3]);\nprint(S[:]);\nprint(S[:3]);\nprint(S[1:]);\nprint(S+'xyz');\nprint(S*8);\nS='z'+S[1:];\nprint(S);\nprint(S.find('on'));\nprint(S.replace('on', \"XYZ\"));\nprint(S);\nline='aaa, bbb, ccc';\nprint(line.split(','));\nprint(S.upper);\nprint(S.isalpha());\nline='something\\n';\nprint(line);\nprint(line.rstrip());\nprint('%s, eggs, and %s'%('spam', \"SPAM!\"));\nprint('{0}, eggs, and {1}'.format('spam', 'SPAM!'));\nprint(dir(S));\nprint(len(S));\nprint(ord('\\n'));\nmsg=\"\"\"aaaaa\nbbbbb\ncccc\n\"\"\"\nprint(msg);\n" }, { "alpha_fraction": 0.43899205327033997, "alphanum_fraction": 0.46618038415908813, "avg_line_length": 20.55714225769043, "blob_id": "64b41e1b497f3c3ffab8bdc76f025b7dcb89a1e6", "content_id": "341496ad0648268b9496922c9b3923977494ca38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1508, "license_type": "no_license", "max_line_length": 59, "num_lines": 70, "path": "/Algorithm/Algorithm/chapter2/max_po_sub.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<vector>\n\ntemplate<typename T>\nT max_positive_sub(T *a, int n);\n\nint main()\n{\n int a[]={-1, 10, -20, 101, 11, 12};\n std::cout<<max_positive_sub(a, 6)<<std::endl;\n return 0;\n}\n\nvoid quick_sort(std::pair<int, int> *p, int n)\n{\n if(n<1)\n return;\n if(p[0].second>p[n/2].second)\n std::swap(p[0], p[n/2]);\n int pivot=0, i=n-1;\n while(pivot<=i)\n {\n if(p[i].second<p[pivot].second)\n {\n std::swap(p[i],p[pivot+1]);\n std::swap(p[pivot], p[pivot+1]);\n pivot++;\n }\n else i--;\n }\n quick_sort(p,pivot-1);\n quick_sort(p+pivot+1, n-pivot-1);\n}\n\ntemplate<typename T>\nT max_positive_sub(T *a, int n)\n{\n std::pair<int, int> *arr = new std::pair<int, int> [n];\n T sum=0;\n for(int i=0; i<n; i++)\n {\n sum+=*(a+i);\n arr[i]=std::make_pair(i, sum);\n }\n quick_sort(arr, n);\n sum=0;\n bool flag=true;\n int temp=0;\n for(int i=0; i<n-1; i++)\n {\n if(arr[i].first<arr[i+1].first)\n {\n temp=arr[i+1].second-arr[i].second;\n if(temp>arr[i+1].second&&arr[i+1].second>0)\n temp=arr[i+1].second;\n if(temp>arr[i].second&&arr[i].second>0)\n temp=arr[i].second;\n }\n //std::cout<<temp<<std::endl;\n if(temp>0&&flag)\n {\n sum=temp;\n flag=false;\n }\n if(temp>0&&temp<sum)\n sum=temp;\n }\n delete[] arr;\n return sum;\n}" }, { "alpha_fraction": 0.447408527135849, "alphanum_fraction": 0.46417683362960815, "avg_line_length": 22.872726440429688, "blob_id": "c37eb372cea2f454256fcb3214e26d1875449c31", "content_id": "0f6d5710c56d9c49d5f9c02a256b5d8774be254b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1312, "license_type": "no_license", "max_line_length": 92, "num_lines": 55, "path": "/Programming/C/The C programming Language/chapter7/pattern_finder.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n#include<stdlib.h>\n\nvoid find_pattern(char *, char *);\n\nint main(int argc, char *argv[])\n{\n char pattern[20];\n char file[20];\n if(argc<3)\n {\n fprintf(stdout, \"not exact arguments, please enter in\\n\");\n fprintf(stdout, \"Please enter pattern to be found:\");\n fscanf(stdin, \"%s\", pattern);\n fprintf(stdout, \"please enter file name:\");\n fscanf(stdin, \"%s\", file);\n }\n else\n {\n strcpy(pattern,argv[1]);\n strcpy(file, argv[2]);\n }\n find_pattern(pattern, file);\n return 0;\n}\n\nvoid find_pattern(char *pattern, char *file)\n{\n char line[100];\n int lineno=0;\n FILE *fp;\n if((fp=fopen(file, \"r\"))==NULL)\n {\n fprintf(stdout, \"error: open file %s\\n\", file);\n exit(1);\n }\n while(fgets(line, 100, fp)!=NULL)\n {\n lineno++;\n for(int i=0; *(line+i)!='\\0'; i++)\n if(*(line+i)==*pattern)\n {\n int k;\n for(k=0; *(line+i+k)!='\\0'&&*(pattern+k)==*(line+i+k);k++)\n ;\n if(*(pattern+k)=='\\0')\n {\n fprintf(stdout, \"find in file: %s lineno: %2d, %s\", file, lineno, line);\n break;\n }\n }\n }\n fclose(fp);\n}" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.5916666388511658, "avg_line_length": 11.100000381469727, "blob_id": "c78e9a02ed7855b29ba7370a9381a1509d4e6738", "content_id": "5231222eb02730bffe758d6926432341cf068ece", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 120, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/Programming/C++/Effective_STL/Associative_Container/callable_object.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<set>\n\ntypedef bool (*f) (int, int);\n\nint main()\n{\n std::set<int, f> iset;\n return 0;\n}" }, { "alpha_fraction": 0.5493273735046387, "alphanum_fraction": 0.560538113117218, "avg_line_length": 20.285715103149414, "blob_id": "874b5235e0089a6a1dedc0e97a70579b49bdb336", "content_id": "02d213dc5cdcf24b8f6ef17bf736bcd1a1844ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 446, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/Programming/C/Programming_in_Unix/chapter1/ls.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<unistd.h>\n#include<stdio.h>\n#include<dirent.h>\n#include<stdlib.h>\n\nint main(int argc, char *argv[])\n{\n DIR *dp;\n struct dirent *dirp;\n if(argc!=2)\n {\n fprintf(stderr, \"error: missing arguments\\n\");\n exit(1);\n }\n if((dp=opendir(argv[1]))==NULL)\n fprintf(stderr, \"error: open %s\\n\", argv[1]);\n while ((dirp=readdir(dp))!=NULL)\n printf(\"%s\\n\", dirp->d_name);\n closedir(dp);\n return 0;\n}" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.590062141418457, "avg_line_length": 11.461538314819336, "blob_id": "f888e150cb88d8d85914005eeffd81078be2df99", "content_id": "f3a027a4a89803b4a94d34e2095dda0007f45014", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 161, "license_type": "no_license", "max_line_length": 45, "num_lines": 13, "path": "/Programming/C++/More_Effective_C++/chapter2/explicit.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n explicit operator double (){return 10.0;}\n};\n\nint main()\n{\n A a;\n std::cout<<double(a)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.6050000190734863, "avg_line_length": 15.75, "blob_id": "660b40a43d672761cc5bd87f37a24b479af2fbe8", "content_id": "772ccf38be43dfac4dd45afe6c9098e35c5e8f9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 200, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/Programming/C/The C programming Language/chapter1/deexpression.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define LOWER 0\n#define UPPER 300\n#define STEP 20\n\nint main()\n{\n int fahr;\n for(fahr=LOWER;fahr<=UPPER;fahr+=STEP)\n printf(\"%3d %6.1f\\n\",fahr,(5.0/9.0)*(fahr-32.0));\n}" }, { "alpha_fraction": 0.506767749786377, "alphanum_fraction": 0.5262588262557983, "avg_line_length": 16.769229888916016, "blob_id": "d2be7bf7b046a3fbc70edc34d64234cea031ccd7", "content_id": "37a9ccbc14d3fb2dcefa2e1164a9ddcf3db03262", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1847, "license_type": "no_license", "max_line_length": 50, "num_lines": 104, "path": "/Programming/C/The C programming Language/chapter5/str_sort.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<string.h>\n\n#define MAXLINES 5000\n\nchar *lineptr[MAXLINES];\n\nint readlines(char *lineptr[], int nlines);\nvoid writelines(char *lineptr[], int nlines);\n\nvoid qsort(char *lineptr[], int left, int right);\n\nint main()\n{\n int nlines;\n if((nlines=readlines(lineptr, MAXLINES))>=0)\n {\n qsort(lineptr, 0, nlines-1);\n writelines(lineptr, nlines);\n return 0;\n }\n else\n {\n printf(\"error: inut too big to sort\\n\");\n return 1;\n }\n}\n\n#define MAXLEN 1000\n\nint _getline(char *, int);\nchar *alloc(int);\n\nint readlines(char *lineptr[], int maxlines)\n{\n int len, nlines;\n char *p, line[MAXLEN];\n \n nlines=0;\n while((len=_getline(line, MAXLEN))>0)\n if(nlines>=maxlines||(p=alloc(len))==NULL)\n return -1;\n else\n {\n line[len-1]='\\0';\n strcpy(p,line);\n lineptr[nlines++]=p;\n }\n return nlines;\n}\n\nvoid writelines(char *lineptr[], int nlines)\n{\n while(nlines-->0)\n printf(\"%s\\n\",*lineptr++);\n}\n\nint _getline(char *s, int n)\n{\n int i=1;\n char c;\n while(i<n&&(*s++=c=getchar())!=EOF&&c!='\\n')\n i++;\n if(c==EOF)\n return 0;\n *s='\\0';\n return i;\n}\n\n\nvoid qsort(char *v[], int left, int right)\n{\n int i, last;\n void swap(char *v[], int i, int j);\n if(left>=right)\n return;\n swap(v, left, (left+right)/2);\n last=left;\n for(i=left+1;i<=right;i++)\n if(strcmp(v[i],v[left])<0)\n swap(v, ++last, i);\n swap(v,left, last);\n qsort(v, left, last-1);\n qsort(v,last+1, right);\n}\n\nvoid swap(char *v[], int i, int j)\n{\n char *temp;\n temp=v[i];\n v[i]=v[j];\n v[j]=temp;\n}\n\nchar mem[5000];\nchar * bufp=mem;\nchar * alloc(int n)\n{\n char *p=bufp;\n if(bufp-mem+n>5000)\n return 0;\n bufp+=n;\n return p;\n}" }, { "alpha_fraction": 0.40860214829444885, "alphanum_fraction": 0.44086021184921265, "avg_line_length": 17.285715103149414, "blob_id": "4014af754c212b927ff8e908e3a911efc121b47c", "content_id": "241570a3870856e72d080a2c629d290dbbdc2bca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1023, "license_type": "no_license", "max_line_length": 48, "num_lines": 56, "path": "/Algorithm/parenthesis.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <fstream>\n#include <vector>\n#include <iomanip>\nusing namespace std;\n\nvoid func()\n{\n ifstream ifs(\"input\");\n if(!ifs.is_open())\n {\n exit(0);\n }\n ofstream ofs(\"output\");\n if(!ofs.is_open())\n {\n exit(0);\n }\n int num_t;\n ifs>>num_t;\n int index=1;\n int B, L, N;\n ofs<<setprecision(6);\n ofs<<setiosflags(ios::fixed);\n while(ifs>>B)\n {\n ifs>>L>>N;\n vector<int> sumvec;\n sumvec.push_back(1);\n for(int i=2; i<=L; i++)\n {\n int lsu=(i*(i+1))/2;\n sumvec.push_back(lsu);\n }\n long double tot_v=B*750.0;\n for(int i=0; i<L-1; i++)\n {\n tot_v=tot_v-(sumvec[i]*250.0);\n }\n tot_v/=sumvec[L-1];\n if(tot_v>250.0)\n tot_v=N*250.0;\n if(tot_v<=10e-6)\n tot_v=0.0;\n ofs<<\"Case #\"<<index<<\": \"<<tot_v<<endl;\n index++;\n }\n ifs.close();\n ofs.close();\n}\n\nint main()\n{\n func();\n return 0;\n}" }, { "alpha_fraction": 0.5294784307479858, "alphanum_fraction": 0.5600907206535339, "avg_line_length": 18.600000381469727, "blob_id": "02e4317aa8951835320c16640cd109cdf44931ca", "content_id": "ea3cbe6651fc7009a9296a1d2470ceb474e7e184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "no_license", "max_line_length": 79, "num_lines": 45, "path": "/Programming/Python/4ObjectType/dictionary.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "D={'food':'spam', 'quantity':4, 'color':'pink'};\nprint(D['food']);\nD['quantity']+=1;\nprint(D);\nDic={};\nDic['name']='Bob';\nDic['job']='dev';\nDic['age']=40;\nprint(Dic);\nrec={'name':{'first':'Bob', 'last':'Smith'}, 'job':['dev', 'mgr'], 'age':40.5};\nprint(rec['name']['last']);\nprint(rec['job']);\nprint(rec['job'][-1]);\nrec['job'].append('janitor');\nprint(rec);\nKs=list(D.keys());\nprint(Ks);\nKs.sort();\nfor key in Ks:\n\tprint(key, '=>', D[key]);\nfor c in 'spam':\n\tprint(c.upper());\nx=4;\nwhile x>0:\n\tprint('spam!'*x);\n\tx-=1;\nfor keys in sorted(D):\n\tprint(keys, '=>', D[keys]);\nsquares=[x**2 for x in[1,2,3,5,6]];\nprint(squares);\nsquares=[];\nfor x in [1,2,3,5,6]:\n\tsquares.append(x**2);\nprint(squares);\nD['e']=99;\nprint(D);\n#print(D['f']);\nif not 'f' in D:\n\tprint('missing');\nif 'f' in D:\n\tprint(\"there\");\nvalue=D.get('x', 0);\nprint(value);\nvalue=D['x'] if 'x' in D else 6;\nprint(value);\n" }, { "alpha_fraction": 0.4062870740890503, "alphanum_fraction": 0.4264531433582306, "avg_line_length": 20.087499618530273, "blob_id": "b73849a7e664ea78666afb4a3d3d39fed5482a0d", "content_id": "b89974716fa62eece3d68cf1f2ef15554e9f04cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1686, "license_type": "no_license", "max_line_length": 58, "num_lines": 80, "path": "/Programming/C/Programming_in_Unix/chapter15/pager_pipe.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <string.h>\n#include <sys/wait.h>\n\n#define MAXLINE 1024\n\nint main(int argc, char * argv[])\n{\n int n;\n int fd[2];\n pid_t pid;\n char *pager, *argv0;\n char line [MAXLINE];\n FILE *fp;\n if(argc != 2)\n {\n fprintf(stderr, \"usage: a.out <pathname>\\n\");\n exit(0);\n }\n if((fp=fopen(argv[1], \"r\"))==NULL)\n {\n fprintf(stderr, \"error: can't open %s\", argv[1]);\n exit(0);\n }\n if(pipe(fd)<0)\n {\n fprintf(stderr, \"error: pipe\\n\");\n exit(0);\n }\n if((pid=fork())<0)\n {\n fprintf(stderr, \"error: fork()\\n\");\n exit(0);\n }\n else if(pid>0)\n {\n close(fd[0]);\n while(fgets(line, MAXLINE, fp)!=NULL)\n {\n n=strlen(line);\n if(write(fd[1], line, n)!=n)\n {\n fprintf(stderr, \"error: write to pipe\\n\");\n break;\n }\n }\n if(ferror(fp))\n {\n fprintf(stderr, \"fgets error\\n\");\n }\n close(fd[1]);\n if(waitpid(pid, NULL, 0)<0)\n {\n fprintf(stderr, \"waitpid error\\n\");\n }\n exit(0);\n }\n else\n {\n close(fd[1]);\n if(fd[0]!=STDIN_FILENO)\n {\n if(dup2(fd[0], STDIN_FILENO)!=STDIN_FILENO)\n {\n fprintf(stderr, \"dup2 error to stdin\\n\");\n exit(0);\n }\n close(fd[0]);\n }\n pager=\"/usr/bin/more\";\n argv0=\"more\";\n if(execl(pager, argv0, (char *)0)<0)\n {\n fprintf(stdout, \"execl error for %s\", pager);\n }\n }\n exit(0);\n}" }, { "alpha_fraction": 0.5829596519470215, "alphanum_fraction": 0.5874439477920532, "avg_line_length": 15, "blob_id": "677c1162ee2fa75abf0ca9e95dc03f050b44e02d", "content_id": "5b8ddd8b9e1df43360e310847685f03d0337ea01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 223, "license_type": "no_license", "max_line_length": 55, "num_lines": 14, "path": "/Programming/C++/More_Effective_C++/chapter2/placement_new_init.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(){std::cout<<\"constructor is called\"<<std::endl;}\n};\n\nint main()\n{\n A *pa=static_cast<A *>(operator new(sizeof(A)));\n pa=new(pa)A();\n operator delete (pa);\n return 0;\n}" }, { "alpha_fraction": 0.581818163394928, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 19.045454025268555, "blob_id": "c00888af008f7c2a566a96ba3daf0d6090f29fe6", "content_id": "873ccc62dab16fc86eedb2f0ca369c980cd2fddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 440, "license_type": "no_license", "max_line_length": 43, "num_lines": 22, "path": "/Programming/C/Network_Programming_in_Unix/chapter18/sysctl.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <netinet/udp.h>\n#include <arpa/inet.h>\n#include <netinet/ip_var.h>\n#include <netinet/udp_var.h>\n#include <stdio.h>\n#include <sys/socket.h>\n#include <sys/param.h>\n\nint main()\n{\n int mib[4], val;\n size_t len;\n mib[0]=CTL_NET;\n mib[1]=AF_INET;\n mib[2]=IPPROTO_UDP;\n mib[3]=UDPCTL_CHECKSUM;\n \n len=sizeof(val);\n sysctl(mib, 4, &val, &len, NULL, 0);\n printf(\"udp chechsum flag: %d\\n\", val);\n return 0;\n}" }, { "alpha_fraction": 0.6934919357299805, "alphanum_fraction": 0.6938418745994568, "avg_line_length": 37.119998931884766, "blob_id": "912c17173987f10713f4c89e5bcf402b2ea9dc09", "content_id": "c30784ff0fa917fbd91975f80c607a6625c99fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2858, "license_type": "no_license", "max_line_length": 167, "num_lines": 75, "path": "/Project/source/SNS/neo4j.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <string>\n\nusing std::string;\n\nconst string news[]={\"news\", \"local\", \"international\", \"famous\", \"politics\", \"science\", \"games\"};\nconst string games[]={\"games\"};\nconst string food[]={\"food\"};\nconst string movies[]={\"movies\"};\nconst string constellation[]={\"constellation\"};\nconst string video[]={\"video\"};\nconst string * const caty[]={news, games, food, movies, constellation, video};\n/*\n *to access the neo4j database, will be used by both U_NeoDB and L_NeoDB; this\n *class should the socket to prevent the resource leak;\n */\n\nclass NeoDB{\npublic:\n NeoDB();\n int neo_query(const string &, string &);\n bool is_OK(){return state;}\n ~NeoDB();\nprivate:\n int sock;\n bool state;\n NeoDB(NeoDB&);\n NeoDB &operator=(NeoDB);\n};\n\n/*\n *U_NeoDB is a interface class that used to access the user's data. the interface\n *of this class is all static, thus do not need create and to forbid create\n *object of this class, constructor declared as private\n */\nclass U_NeoDB\n{\npublic:\n //static int create_account(const string & account);\n static string search_frd(const string &);\n static string get_client(const string &); //get login client type\n static int set_client(const string &, const string &); //set login clinet when login\n static int create_account(const string &); //new user\n static int add_friends(const string &, const string &); //add friends\n static int delete_friends(const string &, const string &); //delete friends\n static string get_ip(const string &); //get a specific users ip\n static int set_ip(const string &, const string &); //set ip when login\n static string get_all_fip(const string &); //get all friends ip\n static int set_last_login(const string &, const string &); //set last login time\n static string get_last_login(const string &);//get last login time\n static int is_login(const string &); //test whether login or not\n static int login(const string &, int status); //user login status\n static int set_preference(const string &, const string &, const string &); //prefered objects, first int is category and sub category, second int is the rank level\n static string get_preference(const string &);//get the users prefered items\n static int set_location(const string &, const string &);\n static string get_freq_location(const string &);\n static int set_freq_location(const string &, const string &);\nprivate:\n U_NeoDB();\n};\n\n/*\n *L_NeoDB use to manage the location information\n */\n\nclass L_NeoDB\n{\npublic:\n static int set_prefer(const string &, int, int);//string is coordinate, first int is the category and subcategory, third is the prefered level;\n static string get_prefer(const string &);\n static int set_new_info(const string &, const string &, int);\nprivate:\n L_NeoDB();\n};\n\nint get_ip_location(const string & ipstr, string & xml);" }, { "alpha_fraction": 0.4936014711856842, "alphanum_fraction": 0.511883020401001, "avg_line_length": 19.296297073364258, "blob_id": "c3b1276d8e398b20742cc54b0b746131583fa4de", "content_id": "f2923f83007e238645c7be6accd035b0e4bc28a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 547, "license_type": "no_license", "max_line_length": 59, "num_lines": 27, "path": "/Programming/C/Programming_in_Unix/chapter4/accessibility.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <fcntl.h>\n\nint main(int argc, char * argv[])\n{\n if(argc!=2)\n {\n fprintf(stderr, \"usage: a.out <pathname>\\n\");\n exit(0);\n }\n if(access(argv[1], R_OK)<0)\n {\n fprintf(stderr, \"error: access for %s\\n\", argv[1]);\n //exit(0);\n }\n else\n printf(\"read access OK\\n\");\n if(open(argv[1], O_RDONLY)<0)\n {\n fprintf(stderr, \"error: open %s\\n\", argv[1]);\n }\n else\n printf(\"open for reading OK\\n\");\n return 0;\n}" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5053191781044006, "avg_line_length": 13.538461685180664, "blob_id": "edfda4f008c8322391148badc353b34ecfc841ab", "content_id": "0ec55f3728d841ff2b74eb07610cb50635426f4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 188, "license_type": "no_license", "max_line_length": 42, "num_lines": 13, "path": "/Programming/C++/More_Effective_C++/chapter3/catch.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<stdexcept>\n\nint main()\n{\n try{\n std::bad_alloc ba;\n throw ba;\n }catch(...){\n std::cout<<\"caught it\"<<std::endl;\n }\n return 0;\n}" }, { "alpha_fraction": 0.382536381483078, "alphanum_fraction": 0.4054054021835327, "avg_line_length": 14.54838752746582, "blob_id": "ceaf660165d508e537ef5e3d0d4ae33211847261", "content_id": "8f27810d9098da8ad0f2e368043ac702eb100334", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 481, "license_type": "no_license", "max_line_length": 45, "num_lines": 31, "path": "/Programming/C/Programming_in_Unix/chapter8/childprocess.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n\nvoid fun()\n{\n printf(\"child exit\\n\");\n}\n\nint main()\n{\n pid_t pid;\n atexit(fun);\n for(int i=0; i<10; i++)\n {\n if((pid=fork())<0)\n {\n fprintf(stderr, \"error: fork\\n\");\n exit(0);\n }\n else if(pid==0)\n {\n printf(\"child%d\\n\", getpid());\n //sleep(1);\n //exit(0);\n }\n }\n //sleep(10);\n fork();\n return 0;\n}" }, { "alpha_fraction": 0.417391300201416, "alphanum_fraction": 0.4434782564640045, "avg_line_length": 11.88888931274414, "blob_id": "9b97675898b2d0a51e2ebb17730e557fc2d25a2d", "content_id": "70948d1f1b886b816b1872cff3711f1036201830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 115, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/Programming/C/The C programming Language/chapter1/varsize.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n\nint main()\n{\n int c=0;\n int ch=1;\n for(c=0;ch!=c;ch++)\n printf(\"%d\\n\", ch);\n}" }, { "alpha_fraction": 0.6046966910362244, "alphanum_fraction": 0.6099151968955994, "avg_line_length": 31.97849464416504, "blob_id": "ebb86296f4549e97acb7d7c71facdf94f15f7a3d", "content_id": "72280ccc6fdf6435d9eb929d07b7b13bdd4b405b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3066, "license_type": "no_license", "max_line_length": 258, "num_lines": 93, "path": "/Programming/C/libcap/capture_packet.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<pcap.h>\n#include<errno.h>\n#include<sys/socket.h>\n#include<netinet/in.h>\n#include<arpa/inet.h>\n#include<netinet/if_ether.h>\n\nint main()\n{\n int i;\n char *dev;\n char errbuf[PCAP_ERRBUF_SIZE];\n pcap_t* descr;\n const u_char *packet;\n struct pcap_pkthdr hdr; //pcap.h\n struct ether_header *eptr; //net/ethernet.h\n u_char *ptr; //printing out hardware info\n /* grab a device to peak in to*/\n dev=pcap_lookupdev(errbuf);\n if(dev==NULL)\n {\n printf(\"%s\\n\",errbuf);\n exit(1);\n }\n printf(\"DEV: %s\\n\", dev);\n /* open the device for sniffing.\n \n pcap_t * pcap_open_live( char *device, int snaplen, int prmisc, int to_ms, char *ebuf)\n snaplen- maximum size of packets to capture in bytes\n promisc- set card in promiscuous mode?\n to_ms - time to wait for packets in milliseconds before read times out\n errbuf - if something happens, place error string here\n Note if you change \"prmisc\" param to anything other than zero, you will get all packets your device sees, whether they are intendeed for you or not!! Be sure you know the rules of the network you are running before set your card in promiscuous node!! */\n descr =pcap_open_live(dev,BUFSIZ,0,1000,errbuf); //descr is pcap_t*\n if(descr==NULL)\n {\n printf(\"pcap_open_live():%s\\n\",errbuf);\n exit(1);\n }\n /*\n grab a packet from descr(yay!)\n u_char *pcap_next(pcap_t *p, struct pcap_pkthdr *h)\n so just pass in the descriptor we got from our call to pcap_open_live and an allocated struct pcap_pkthdr\n */\n packet=pcap_next(descr,&hdr);\n if(packet==NULL)\n {\n /*dinna work *sob**/\n printf(\"Didn't grab packet\\n\");\n exit(1);\n }\n /*\n struct pcap_pkthdr{\n struct timeval ts; time stamp\n bpf_u_int32 caplen; length of portion present\n bpf_u_int32; lebgth this packet (off wire)\n }\n */\n printf(\"Grabbed packet of length %d\\n\",hdr.len);\n printf(\"Received at ....%s\\n\",ctime((const time_t*)&hdr.ts.tv_sec));\n printf(\"Ethernet address length is %d\\n\", ETHER_HDR_LEN);\n /*start with the ethernet header*/\n eptr=(struct ether_header*)packet;\n if(ntohs(eptr->ether_type)==ETHERTYPE_IP)\n {\n printf(\"ETHERNET type hex:%x dec:%d is an IP packet\\n\", ntohs(eptr->ether_type),ntohs(eptr->ether_type));\n }\n else if (ntohs(eptr->ether_type)==ETHERTYPE_ARP)\n {\n printf(\"EHTERNET type hex:%x dec:%d is an ARP packet\\n\", ntohs(eptr->ether_type), ntohs(eptr->ether_type));\n }\n else{\n printf(\"ETHERNET type %x not IP\", ntohs(eptr->ether_type));\n exit(1);\n }\n ptr=eptr->ether_dhost;\n i=ETHER_ADDR_LEN;\n printf(\" Destination ADDRESS: \");\n do{\n printf(\"%s%x\",(i==ETHER_ADDR_LEN)?\" \":\":\", *ptr++);\n }while(--i>0);\n printf(\"\\n\");\n ptr=eptr->ether_shost;\n i=ETHER_ADDR_LEN;\n printf(\" Source Address: \");\n do{\n printf(\"%s%X\",(i==ETHER_ADDR_LEN)?\" \":\":\",*ptr++);\n }while(--i>0);\n printf(\"\\n\");\n return 0;\n}" }, { "alpha_fraction": 0.4562731385231018, "alphanum_fraction": 0.4648503065109253, "avg_line_length": 27.170616149902344, "blob_id": "4d719dcd1fcac759c287bc5adcba02dd0ee5b057", "content_id": "8e9d1ff89110dba096d7a8b1e53b506c9a261747", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5946, "license_type": "no_license", "max_line_length": 88, "num_lines": 211, "path": "/Programming/C/Network_Programming_in_Unix/chapter16/nonblock_cli.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <errno.h>\n#include <sys/time.h>\n#include <fcntl.h>\n#include <sys/select.h>\n\n#define MAXLINE 9128\n#define forever for(;;)\n\nint max(int a, int b)\n{\n return a>b?a:b;\n}\n\nchar *gf_time();\n\nvoid str_cli(FILE *fp, int sockfd)\n{\n int maxfdp1, val, stdineof;\n ssize_t n, nwritten;\n fd_set rset, wset;\n char to[MAXLINE], fr[MAXLINE];\n char *toiptr, *tooptr, *friptr, *froptr;\n if((val=fcntl(sockfd, F_GETFL, 0))<0)\n {\n fprintf(stderr, \"error: fcntl(get %d): %s\\n\", sockfd, strerror(errno));\n return;\n }\n if(fcntl(sockfd, F_SETFL, val|O_NONBLOCK)<0)\n {\n fprintf(stderr, \"error: fcntl(set %d): %s\\n\", sockfd, strerror(errno));\n return;\n }\n if((val=fcntl(STDIN_FILENO, F_GETFL, 0))<0)\n {\n fprintf(stderr, \"error: fcntl(get %d): %s\\n\", STDIN_FILENO, strerror(errno));\n return;\n }\n if(fcntl(STDIN_FILENO, F_SETFL, val|O_NONBLOCK)<0)\n {\n fprintf(stderr, \"error: fcntl(set %d): %s\\n\", STDIN_FILENO, strerror(errno));\n return;\n }\n \n toiptr=tooptr=to;\n friptr=froptr=fr;\n stdineof=0;\n \n maxfdp1=max(max(STDIN_FILENO, STDOUT_FILENO), sockfd)+1;\n \n forever\n {\n FD_ZERO(&rset);\n FD_ZERO(&wset);\n if(stdineof==0 &&toiptr<&to[MAXLINE])\n FD_SET(STDIN_FILENO, &rset); //read from stdin\n if(friptr<&fr[MAXLINE])\n FD_SET(sockfd, &rset);//read to socket\n if(tooptr!=toiptr)\n FD_SET(sockfd, &wset); //data to write to socket\n if(froptr!=friptr)\n FD_SET(STDOUT_FILENO, &wset); //data to write to stdout;\n if(select(maxfdp1, &rset, &wset, NULL, NULL)<0)\n {\n fprintf(stderr, \"error: select(), %s\\n\", strerror(errno));\n return;\n }\n if(FD_ISSET(STDIN_FILENO, &rset))\n {\n if((n=read(STDIN_FILENO, toiptr, &to[MAXLINE]-toiptr))<0)\n {\n if(errno != EWOULDBLOCK)\n {\n fprintf(stderr, \"error: read from stdin: %s\\n\", strerror(errno));\n return;\n }\n }\n else if(n==0)\n {\n fprintf(stderr, \"%s: EOF on stdin\\n\", gf_time());\n stdineof=1;\n if(tooptr==toiptr)\n {\n shutdown(sockfd, SHUT_WR);\n }\n }\n else\n {\n fprintf(stderr, \"%s: read %zd bytes from stdin\\n\", gf_time(), n);\n toiptr+=n;\n FD_SET(sockfd, &wset);\n }\n }\n if(FD_ISSET(sockfd, &rset))\n {\n if((n=read(sockfd, friptr, &fr[MAXLINE]-friptr))<0)\n {\n if(errno!=EWOULDBLOCK)\n {\n fprintf(stderr, \"read error on socket\");\n }\n }\n else if(n==0)\n {\n fprintf(stderr, \"%s: EOF on socket\\n\", gf_time());\n if(stdineof)\n return;\n else\n fprintf(stderr, \"server terminated prematurely\\n\");\n }\n else\n {\n fprintf(stderr, \"%s: read %zd bytes from socket\\n\", gf_time(), n);\n friptr+=n;\n FD_SET(STDOUT_FILENO, &wset);\n }\n }\n if(FD_ISSET(STDOUT_FILENO, &wset)&&((n=friptr-froptr)>0))\n {\n if((nwritten=write(STDOUT_FILENO, froptr, n))<0)\n {\n if(errno!=EWOULDBLOCK)\n {\n fprintf(stderr, \"error: write to stdout, %s\\n\", strerror(errno));\n return;\n }\n }\n else\n {\n fprintf(stderr, \"%s: wrote %zd bytes to stdout\\n\", gf_time(), nwritten);\n froptr+=nwritten;\n if(froptr==friptr)\n froptr=friptr=fr;//back to beginning of buffer\n }\n }\n if(FD_ISSET(sockfd, &wset)&&((n=toiptr-tooptr)>0))\n {\n if((nwritten=write(sockfd, tooptr, n))<0)\n {\n if(errno!=EWOULDBLOCK)\n {\n fprintf(stderr, \"error: write to socket: %s\\n\", strerror(errno));\n return;\n }\n }\n else\n {\n fprintf(stderr, \"%s: wrote %zd bytes to socket\\n\", gf_time(), nwritten);\n tooptr+=nwritten;\n if(tooptr==toiptr)\n {\n toiptr=tooptr=to;\n if(stdineof)\n shutdown(sockfd, SHUT_WR);\n }\n }\n }\n }\n}\n\nint main(int argc, char *argv[])\n{\n if(argc!=2)\n {\n fprintf(stderr, \"usage: time <IP address>\\n\");\n exit(0);\n }\n int sock;\n if((sock=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: socket() %s\\n\", strerror(errno));\n exit(0);\n }\n struct sockaddr_in serv;\n bzero(&serv, sizeof(serv));\n serv.sin_family=AF_INET;\n serv.sin_port=htons(9868);\n if(inet_pton(AF_INET, (void *)&serv.sin_addr, argv[1])<0)\n {\n fprintf(stderr, \"%s\\n\", strerror(errno));\n exit(0);\n }\n if(connect(sock, (struct sockaddr *)&serv, sizeof(serv))<0)\n {\n fprintf(stderr, \"error: connect to server %s : %s\\n\", argv[1], strerror(errno));\n exit(0);\n }\n str_cli(stdin, sock);\n exit(0);\n}\n\nchar *gf_time()\n{\n struct timeval tv;\n static char str[30];\n char *ptr;\n if(gettimeofday(&tv, NULL)<0)\n {\n fprintf(stderr, \"gettimeof day error: %s\\n\", strerror(errno));\n return ptr;\n }\n ptr=ctime(&tv.tv_sec);\n strcpy(str, &ptr[11]);\n snprintf(str+8, sizeof(str)-8, \".%06d\", tv.tv_usec);\n return str;\n}\n\n\n" }, { "alpha_fraction": 0.5306859016418457, "alphanum_fraction": 0.5415162444114685, "avg_line_length": 17.53333282470703, "blob_id": "4623af3b84a8d34721aec7bb2eaea4abfb579a59", "content_id": "e8e547041835d091451987fbfad0b8a3d6840c2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 277, "license_type": "no_license", "max_line_length": 50, "num_lines": 15, "path": "/Programming/C/Programming_in_Unix/chapter2/max_num_open_file.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<sys/param.h>\n#include<unistd.h>\n\nint main()\n{\n printf(\"* opened files number: %d\\n\", NOFILE);\n for(int i=0; i<NOFILE+0; i++)\n {\n printf(\"%d \", i);\n close(i);\n }\n printf(\"opened files number:%d\\n\", NOFILE);\n return 0;\n}" }, { "alpha_fraction": 0.5287958383560181, "alphanum_fraction": 0.5340313911437988, "avg_line_length": 11, "blob_id": "aebe8d64696efaa1f76ea67a5d03c3d6c6b84c4f", "content_id": "1a708373121e389687234ef495b17f37fbdb7113", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 191, "license_type": "no_license", "max_line_length": 44, "num_lines": 16, "path": "/Programming/C++/Inside_the_C++_object_model/chapter1/mix_struct_class.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(){std::cout<<\"I'm class\"<<std::endl;}\n};\n\nstruct B: public A{\n B(){std::cout<<\"I'm struct\"<<std::endl;}\n};\n\nint main()\n{\n B b;\n return 0;\n}" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 15.5, "blob_id": "22fa58d51e9ceb8c5640168ba1ccf370b5bad75a", "content_id": "ef5987a293fb03e4575314188a860384ad3a7120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/Programming/Python/11Value_Statement_Print/multi.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "a = b = c ='spam'\nprint(a, b, c)\n" }, { "alpha_fraction": 0.6265664100646973, "alphanum_fraction": 0.6365914940834045, "avg_line_length": 21.16666603088379, "blob_id": "3cf4d4335231c49c2812906aae247195ea2ee439", "content_id": "88ffbcb5bad01ad847640eb5175c501989613c21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "no_license", "max_line_length": 36, "num_lines": 18, "path": "/Programming/Python/9TupleandFile/file.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "test=open('test.txt', 'w');\ntest.write('hello text\\n');\nprint(test.write('goodbye text\\n'));\ntest.close();\ntest=open('test.txt');\nprint(test.readline());\nprint(test.readline());\nprint(test.readline());\ntest.close();\nprint(open('test.txt').read());\nd={'a':1, 'b':2};\nf=open('datafile.pk1', 'wb');\nimport pickle;\npickle.dump(d, f);\nf.close();\nf=open('datafile.pk1', 'rb');\ne=pickle.load(f);\nprint(d);\n" }, { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 7.900000095367432, "blob_id": "a0481e0b59dc6d0397ab8dbcf35b073cca57e881", "content_id": "4e287c8ffb5880d2a2351cc646cd242892935c53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 88, "license_type": "no_license", "max_line_length": 32, "num_lines": 10, "path": "/Programming/C++/Inside_the_C++_object_model/chapter4/virtual_static.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n static virtual void fun(){};\n};\n\nint main()\n{\n}" }, { "alpha_fraction": 0.47706422209739685, "alphanum_fraction": 0.5321100950241089, "avg_line_length": 11.11111068725586, "blob_id": "64d55e132235cd961851795ccc972ba63d3aaddd", "content_id": "a65a8327f41a588f903312af180093f19f641fb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/Programming/Python/13Loop/else.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "y=11\nx=y//2\nwhile x>1:\n\tif y % x == 0:\n\t\tprint(y, 'has factor', x)\n\t\tbreak\n\tx-=1\nelse:\n\tprint(y, 'is prime')\n" }, { "alpha_fraction": 0.34933334589004517, "alphanum_fraction": 0.36355555057525635, "avg_line_length": 17.459016799926758, "blob_id": "c55eceaba4c3d6f3dfcc40fbb414412886c297f6", "content_id": "6a3d85cccd0a2ae711ed3ebd4b365c30b06208b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 37, "num_lines": 61, "path": "/Programming/C/The C programming Language/chapter1/histogram.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define MAXLENGTH 45\n#define IN 0\n#define OUT 1\n\nint main()\n{\n int c,i,flag,len;\n int counter[MAXLENGTH];\n for(i=0;i<MAXLENGTH;++i)\n counter[i]=0;\n flag=OUT;\n for(i=0;(c=getchar())!=EOF;)\n {\n if(c==' '||c=='\\n'||c=='\\t')\n {\n if(flag==IN)\n {\n flag=OUT;\n ++counter[i];\n i=0;\n }\n }\n else\n {\n flag=IN;\n ++i;\n }\n }\n int max=0;\n for(i=0;i<MAXLENGTH;++i)\n {\n printf(\"length %d:\",i);\n for(int j=0;j<counter[i];++j)\n putchar('*');\n printf(\"\\n\");\n if(max<counter[i])\n max=counter[i];\n }\n int min=0;\n for(i=0;i<MAXLENGTH;++i)\n {\n counter[i]=counter[i]-max;\n if(min>counter[i])\n min=counter[i];\n }\n printf(\"\\n\");\n while(min!=0)\n {\n for(i=0;i<MAXLENGTH;++i)\n {\n if(counter[i]>=0)\n putchar('*');\n else\n putchar(' ');\n ++counter[i];\n }\n ++min;\n }\n}" }, { "alpha_fraction": 0.3588709533214569, "alphanum_fraction": 0.4032258093357086, "avg_line_length": 10.857142448425293, "blob_id": "ba85bf1daa18ae0f001de981015dd8f340fef1ac", "content_id": "1dd5313ed5f4ba4e87038b2cc1cc46c52b306bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 248, "license_type": "no_license", "max_line_length": 33, "num_lines": 21, "path": "/Programming/C++/Code_Complete/test/loop.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int i=0;\n goto sta;\n \n for(i=0; i<10; i++)\n {\n sta:\n std::cout<<6<<std::endl;\n }\n i=0;\n goto s;\n while(i<10)\n {\n s:\n std::cout<<66<<std::endl;\n }\n return 0;\n}" }, { "alpha_fraction": 0.5371286869049072, "alphanum_fraction": 0.5396039485931396, "avg_line_length": 14.576923370361328, "blob_id": "2b1a1a219b14402bb318f54e4bbf0e1f4dd11717", "content_id": "35733cd813c786af6335135bead831979358677b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 404, "license_type": "no_license", "max_line_length": 44, "num_lines": 26, "path": "/Programming/C++/Inside_the_C++_object_model/chapter3/class_size.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\n};\n\nclass B: public virtual A{\n};\n\nclass C: public virtual A{\n};\n\nclass D: public B, public C{\n};\n\nclass E: public C{\n};\n\nint main()\n{\n std::cout<<\"A: \"<<sizeof (A)<<std::endl;\n std::cout<<\"B: \"<<sizeof (B)<<std::endl;\n std::cout<<\"C: \"<<sizeof (C)<<std::endl;\n std::cout<<\"D: \"<<sizeof (D)<<std::endl;\n std::cout<<\"E: \"<<sizeof (E)<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.4035087823867798, "alphanum_fraction": 0.4385964870452881, "avg_line_length": 10.5, "blob_id": "a19bae23636f5cfcc50e054a97e6375621b317cd", "content_id": "6a8779340b398ba2aed1794653292ab13b19b2ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 114, "license_type": "no_license", "max_line_length": 28, "num_lines": 10, "path": "/Programming/C/The C programming Language/chapter1/integer.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint main()\n{\n int flag =0, n=1;\n while(n>flag)\n {\n printf(\"%d \",n*=10);\n }\n}" }, { "alpha_fraction": 0.4753915071487427, "alphanum_fraction": 0.4888143241405487, "avg_line_length": 16.54901885986328, "blob_id": "53323cd1f2593506ec7e9e0f1ec01ee3b6cf5f97", "content_id": "5d42c321cca8f45179bbec17ad59dee951802b08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 894, "license_type": "no_license", "max_line_length": 76, "num_lines": 51, "path": "/Programming/Practice/Interpretation/huffman_tree.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nstruct Node{\n Node(int w=0, Node *l=NULL, Node *r=NULL):weight(w), left(l), right(r){}\n char weight;\n Node *left;\n Node *right;\n};\n\nstruct Alphabet{\n char alpha;\n int frequency;\n};\n\nvoid shellsort(Alphabet *a, int n)\n{\n static int inc[]={1, 3, 7, 11, 13};\n for(int i=4; i>=0; i--)\n {\n for(int j=inc[i]-1; j<n; j++)\n {\n Alphabet temp=a[j];\n int k=j;\n for(; k>=inc[i]; k-=inc[i])\n if(temp.frequency<a[k-inc[i]].frequency)\n a[k]=a[k-1];\n else break;\n a[j]=temp;\n }\n \n }\n}\n\nclass Huffman{\npublic:\n Huffman(Alphabet *, int);\n void encoding(std::string &s);\n void decoding(std::string &s);\nprivate:\n void construct();\n Node *root;\n};\nvoid construct(Alphabet *a, int n)\n{\n shellsort(a, n);\n \n}\n\nint main()\n{\n}" }, { "alpha_fraction": 0.6456692814826965, "alphanum_fraction": 0.6456692814826965, "avg_line_length": 22.875, "blob_id": "7e1bea1b709dc8e76080fba2d238bf2a6ba8ac7c", "content_id": "868c4fd25e3c4d29f1aa847a50eb113da5f6bcf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 381, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/Project/SNS/userdb.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <mysql.h>\n#include <string>\nusing std::string;\n\nclass DBConn{\npublic:\n DBConn();\n string query_pwd(const string &); //used for check password\n int insert(const string &, const string &); //for new user\n int update(const string &, const string &); //change passwd\n ~DBConn();\nprivate:\n MYSQL *conn;\n DBConn(DBConn &);\n DBConn operator=(DBConn);\n};" }, { "alpha_fraction": 0.6820809245109558, "alphanum_fraction": 0.6820809245109558, "avg_line_length": 12.307692527770996, "blob_id": "db7c26420529311e66c4d9f7fe40fb068d0224af", "content_id": "230b5fa388d1684fd949785a405a312afd4d8928", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 173, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/Programming/JAVA/Thinking in JAVA/chapter7/test/Multi.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "public class Multi{\n\tpublic static void main (String []args)\n\t{\n\t\tSystem.out.println(\"just a test for multi class in a single file\");\n\t}\n\n}\n\nclass First{\n}\n\nclass Second{\n}\n" }, { "alpha_fraction": 0.5362318754196167, "alphanum_fraction": 0.5465838313102722, "avg_line_length": 16.925926208496094, "blob_id": "8e686090cbef75dc3b02bb4e59e8c090974c1424", "content_id": "d2d0a8e2a93d912641dd237f3f3ce3ed703d03ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 483, "license_type": "no_license", "max_line_length": 38, "num_lines": 27, "path": "/Algorithm/Leetcode/reverse_words_lib.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<sstream>\n#include<vector>\n\nvoid reverse_word(std::string &s)\n{\n std::istringstream iss(s);\n std::string str;\n std::vector<std::string> svec;\n while(iss>>str)\n {\n svec.push_back(str);\n }\n s.clear();\n for(int i=svec.size()-1; i>0; --i)\n s+=svec[i]+\" \";\n if(svec.size()>0)\n s+=svec[0];\n}\n\nint main()\n{\n std::string str=\"the sky is blue\";\n reverse_word(str);\n std::cout<<str<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.5878962278366089, "alphanum_fraction": 0.6157540678977966, "avg_line_length": 25.71794891357422, "blob_id": "100e424427bde2e4590062b8c89771786e61d078", "content_id": "9ce3530713b466053d800ad63f30e311acd471ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 110, "num_lines": 39, "path": "/Programming/C/Programming_in_Unix/chapter15/data_location.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <sys/shm.h>\n\n#define ARRAY_SIZE 40000\n#define MALLOC_SIZE 100000\n#define SHM_SIZE 100000\n#define SHM_MODE 0600\n\nchar array[ARRAY_SIZE];\n\nint\nmain()\n{\n int shmid;\n char *ptr, * shmptr;\n printf(\"array[] from %lx to %lx\\n\", (unsigned long)&array[0], (unsigned long)&array[ARRAY_SIZE]);\n printf(\"stack around %lx\\n\", (unsigned long)&shmid);\n if((ptr=malloc(MALLOC_SIZE))==NULL)\n {\n fprintf(stderr, \"error: molloc()\\n\");\n }\n printf(\"malloced from %lx to %lx\\n\", (unsigned long)ptr, (unsigned long)ptr+MALLOC_SIZE);\n if((shmid=shmget(IPC_PRIVATE, SHM_SIZE, SHM_MODE))<0)\n {\n fprintf(stderr, \"shmget error\\n\");\n }\n if((shmptr=shmat(shmid, 0, 0))==(void *)-1)\n {\n fprintf(stderr, \"shmat error\\n\");\n }\n printf(\"shared memory attached from %lx to %lx\\n\", (unsigned long)shmptr, (unsigned long)shmptr+SHM_SIZE);\n if(shmctl(shmid, IPC_RMID, 0)<0)\n {\n fprintf(stderr, \"shmctl error\\n\");\n }\n exit(0);\n}" }, { "alpha_fraction": 0.4016445279121399, "alphanum_fraction": 0.4193548262119293, "avg_line_length": 16.577777862548828, "blob_id": "5c34c5412592182f0709f12c99e49db24f54a9ea", "content_id": "d460f2f0b6d8d8744c21e5c20dce67a907305ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1581, "license_type": "no_license", "max_line_length": 43, "num_lines": 90, "path": "/Programming/Practice/Microsoft/test.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\n\nusing namespace std;\n\nvoid CMD1(string &str, int x, int i, int j)\n{\n for(; i<=j; i++)\n {\n str[i]=x;\n //std::cout<<i<<j<<std::endl;\n }\n //std::cout<<str<<std::endl;\n}\n\nchar func(int ch, int k)\n{\n int s=ch-'A';\n s=s+k;\n char t=s%26;\n return 'A'+t;\n}\n\nvoid CMD2(string &str, int i, int j, int k)\n{\n for(; i<=j; i++)\n {\n //std::cout<<i<<std::endl;\n str[i]=func(str[i], k);\n \n }\n}\n\nvoid CMD3(string &str, int i)\n{\n if(i>=str.size())\n return;\n string ret=str.substr(i);\n ret+=str.substr(0, i);\n str=ret;\n //std::cout<<str<<std::endl;\n //std::cout<<\"111\"<<std::endl;\n}\n\nvoid CMD4(string &str, int i, int j)\n{\n if(i>j)\n return;\n CMD4(str, i+1, j);\n CMD2(str, i, j, 1);\n //std::cout<<str<<std::endl;\n //std::cout<<\"111\"<<std::endl;\n}\n\n\nint main()\n{\n //std::cout<<func('Z', 26);\n int len, row;\n int i, j, k, m;\n char ch;\n string str, temp;\n std::cin>>len>>row>>str;\n //std::cout<<std::endl;\n //std::cout<<str<<std::endl;\n for(int z=0; z<row; z++)\n {\n if(z==0)\n {\n std::cin>>temp>>m>>i>>j>>ch;\n CMD1(str, ch, i, j);\n }\n if(z==1)\n {\n std::cin>>temp>>m>>i>>j>>k;\n CMD2(str, i, j, k);\n }\n if(z==2)\n {\n std::cin>>temp>>m>>i;\n CMD3(str,i);\n }\n if(z==4)\n {\n std::cin>>temp>>m>>i>>j;\n CMD4(str, i, j);\n }\n }\n std::cout<<str<<std::endl;\n}" }, { "alpha_fraction": 0.6441947817802429, "alphanum_fraction": 0.6441947817802429, "avg_line_length": 15.6875, "blob_id": "d9969f86ba110046689bbb39876e7b6b9fcea59b", "content_id": "5861257a9bb86ae91991a6f1bdf205297471a44e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 267, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/Programming/JAVA/Thinking in JAVA/chapter9/private/Private.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "public class Private{\n\tprivate void f(){\n\t\tSystem.out.println(\"private f()\");\n\t}\n\tpublic static void main(String []args){\n\t\tPrivate p=new Derived();\n\t\tp.f();\n\t}\n}\n\nclass Derived extends Private{\n\t//@Override\n\tpublic void f(){\n\t\tSystem.out.println(\"public f()\");\n\t}\n}\n" }, { "alpha_fraction": 0.4920993149280548, "alphanum_fraction": 0.53724604845047, "avg_line_length": 15.44444465637207, "blob_id": "cf75a033860d2bba1f3109ae870a7d2ad644b677", "content_id": "a4f6a2504d04967c1fd3482eccd0af00d92870e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 443, "license_type": "no_license", "max_line_length": 35, "num_lines": 27, "path": "/Algorithm/Leetcode/divide.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nint rec(int dividend, int divisor)\n{\n if(dividend<divisor)\n return 0;\n if(dividend==divisor)\n return 1;\n int i=1, j=divisor;\n while(dividend>divisor)\n {\n divisor<<=1;\n i<<=1;\n }\n std::cout<<dividend<<std::endl;\n divisor>>=1;\n i>>=1;\n dividend-=divisor;\n i+=3*rec(dividend, j+j+j);\n return i;\n}\n\nint main()\n{\n std::cout<<rec(2147483647, 2);\n return 0;\n}" }, { "alpha_fraction": 0.4931809902191162, "alphanum_fraction": 0.5101363658905029, "avg_line_length": 21.23770523071289, "blob_id": "35cffad881f4328328d34ef6a808cf1908cc38f3", "content_id": "7feb9ea6a40a184f237451b06c39399472831c10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2713, "license_type": "no_license", "max_line_length": 72, "num_lines": 122, "path": "/Programming/C/Network_Programming_in_Unix/chapter16/tcpechoserv.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n#include <stdio.h>\n\nvoid\nproc(int sock);\n\nvoid\nsig_chld(int signo);\n\nint\nmain()\n{\n int listenfd, connfd;\n if((listenfd=socket(AF_INET, SOCK_STREAM, 0))<0)\n {\n fprintf(stderr, \"error: socket()\\n\");\n exit(0);\n }\n struct sockaddr_in servaddr;\n bzero(&servaddr, sizeof(servaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=htons(9868);\n servaddr.sin_addr.s_addr=htonl(INADDR_ANY);\n if(bind(listenfd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"error: bind()\\n\");\n exit(0);\n }\n if(listen(listenfd, 5)<0)\n {\n fprintf(stderr, \"error: listen()\\n\");\n exit(0);\n }\n if(signal(SIGCHLD, sig_chld)==SIG_ERR)\n {\n fprintf(stderr, \"error: signal()\\n\");\n exit(0);\n }\n struct sockaddr_in cliaddr;\n socklen_t len=sizeof(struct sockaddr_in);\n pid_t pid;\n while(1)\n {\n connfd=accept(listenfd, (struct sockaddr*)&cliaddr, &len);\n if(connfd<0)\n {\n if(errno==EINTR)\n continue;\n fprintf(stderr, \"error: accept()\\n\");\n }\n if((pid=fork())<0)\n {\n fprintf(stderr, \"error: fork()\\n\");\n }\n if(pid==0)\n {\n if(close(listenfd)<0)\n {\n fprintf(stderr, \"error: close listen socket\\n\");\n }\n proc(connfd);\n if(close(connfd)<0)\n {\n fprintf(stderr, \"error: close child socket\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n }\n exit(0);\n }\n if(close(connfd)<0)\n {\n fprintf(stderr, \"%s\\n\", strerror(errno));\n }\n }\n return 0;\n}\n\nvoid\nproc(int sock)\n{\n char *buf=(char *)malloc(1500*sizeof(char));\n if(buf==NULL)\n {\n fprintf(stderr, \"error: molloc\\n\");\n fprintf(stderr, \"%s\\n\", strerror(errno));\n return;\n }\n memset(buf, 0, 1500);\n int ret=0;\nagain:\n while((ret=read(sock, buf, 1500))>0)\n {\n printf(\"received %d bytes \\n %s\\n\", ret, buf);\n write(sock, buf, strlen(buf));\n memset(buf, 0, 1500);\n }\n if(ret<0&&errno==EINTR)\n goto again;\n else if(ret<0)\n {\n fprintf(stderr, \"error: read socket: %d\\n\", sock);\n fprintf(stderr, \"%s\\n\", strerror(errno));\n }\n}\n\nvoid\nsig_chld(int signo)\n{\n pid_t pid;\n int stat;\n \n while((pid=waitpid(-1, &stat, WNOHANG))>0)\n {\n printf(\"chiald %d terminated\\n\", pid);\n }\n //printf(\"child %d terminated\\n\", pid);\n return ;\n}\n" }, { "alpha_fraction": 0.5878524780273438, "alphanum_fraction": 0.5965293049812317, "avg_line_length": 22.049999237060547, "blob_id": "3d6349577ad6614c4dc52e6fa6346a3d0882eec9", "content_id": "21611a72a15688adb04fe12c360cf8ea3a7a8234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 461, "license_type": "no_license", "max_line_length": 52, "num_lines": 20, "path": "/Programming/JAVA/Thinking in JAVA/chapter9/downcast/downcast.java", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "class use{\n\tpublic void f(){System.out.println(\"base f()\");}\n\tpublic void g(){System.out.println(\"base g()\");}\n}\n\nclass more extends use{\n\tpublic void f(){System.out.println(\"derived f()\");}\n\tpublic void g(){System.out.println(\"derived g()\");}\n\tpublic void u(){System.out.println(\"u()\");}\n}\n\npublic class downcast{\n\tpublic static void main(String [] args){\n\t\tuse []x={new use(), new more()};\n\t\tx[0].f();\n\t\tx[1].g();\n\t\t((more)x[1]).u();\n\t\t((more)x[0]).u();\n\t}\n}\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 17.33333396911621, "blob_id": "f8787e801918fb74ff4a608d550f53aa6a7a05d7", "content_id": "d13c0f062f1657935851131609fa2a08cdb97e7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 54, "license_type": "no_license", "max_line_length": 35, "num_lines": 3, "path": "/Project/source/Client/command.h", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <string>\n\nint proc_comm(const std::string &);" }, { "alpha_fraction": 0.3657817244529724, "alphanum_fraction": 0.4011799395084381, "avg_line_length": 17.351350784301758, "blob_id": "6c064e04ef4e227f909b67573966e34f91932f89", "content_id": "a12cf709ad1d53edceb38ce667a8debe1964b938", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 678, "license_type": "no_license", "max_line_length": 45, "num_lines": 37, "path": "/Algorithm/Algorithm/chapter2/min_subseq_positive.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nT min_positive(T *a, int n);\n\nint main()\n{\n int a[]={-1, 10, -20, 12, 11, 12};\n std::cout<<min_positive(a, 6)<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nT min_positive(T *a, int n)\n{\n T sum=0, temp=0;\n bool flag=true;\n for(int i=0; i<n; i++)\n {\n temp=0;\n for(int j=i; j<n; j++)\n {\n temp+=*(a+j);\n if(flag&&temp>0)\n {\n sum=temp;\n flag=false;\n }\n if(temp>0&&sum>temp)\n sum=temp;\n if(temp>0&&j<n-1&&*(a+j+1)>0)\n temp=0;\n }\n }\n \n return sum;\n}" }, { "alpha_fraction": 0.4693877696990967, "alphanum_fraction": 0.4721088409423828, "avg_line_length": 22, "blob_id": "3ba2699bbaa46e8e0b98ac07d81923db34fb1977", "content_id": "9d2c1671a613b4b2060eef77a045557c5b8f31f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 735, "license_type": "no_license", "max_line_length": 66, "num_lines": 32, "path": "/Project/source/Client/init.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include \"init.h\"\n#include \"conserv.h\"\n#include <iostream>\n\nvoid init()\n{\n std::cout<<\">Press Y to sign on, or enter account\"<<std::endl;\n std::cout<<\">account:\";\n std::cin>>log_acc;\n std::string pwd;\n if(log_acc==\"Y\"||log_acc==\"y\")\n {\n do\n {\n std::cout<<\">account:\";\n std::cin>>log_acc;\n std::cout<<\">password:\";\n std::cin>>pwd;\n }while(Conn::sign_on(log_acc, pwd)!=0);\n std::cout<<\">account:\";\n std::cin>>log_acc;\n }\n std::cout<<\">password:\";\n std::cin>>pwd;\n while(Conn::login(log_acc, pwd)!=0)\n {\n std::cout<<\">account:\";\n std::cin>>log_acc;\n std::cout<<\">password:\";\n std::cin>>pwd;\n }\n}" }, { "alpha_fraction": 0.5526315569877625, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 11.733333587646484, "blob_id": "362eb7f762b262b0738b5e61ae27aa3ab3cd79eb", "content_id": "fef33851acb06323f127b3eb199e7e1839f6535a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 190, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/Programming/C++/Inside_the_C++_object_model/chapter4/static_memeber.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n static const int fun() {i=11;return i;}\nprivate:\n static int i;\n};\nint A::i=10;\n\nint main()\n{\n std::cout<<A::fun()<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20, "blob_id": "6e45652a0cd85e1ab9aa1e8b895d7be82f8f4aff", "content_id": "3e43c94f41b3ba00c2c11192acda8f2cc510fec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/Programming/Python/3Execution/myfile.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "title=\"the meaning\";\n" }, { "alpha_fraction": 0.4253246784210205, "alphanum_fraction": 0.45129871368408203, "avg_line_length": 13.65079402923584, "blob_id": "70ac4e0ea5a0f13e942f759be9292fc460283d04", "content_id": "adfca35850e19b736610beb50744ea88b2f60b56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 924, "license_type": "no_license", "max_line_length": 43, "num_lines": 63, "path": "/Programming/C/The C programming Language/chapter5/getfloat.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<ctype.h>\n\nint getch(void);\nvoid ungetch(int);\ndouble getfloat(double *dp);\n\nint main()\n{\n double p;\n getfloat(&p);\n printf(\"%.6g\\n\",p);\n return 0;\n}\n\nchar _ch;\nint bufp=0;\n\nint getch(void)\n{\n if(bufp>0)\n {\n bufp--;\n return _ch;\n }\n return getchar();\n}\n\nvoid ungetch(int c)\n{\n bufp++;\n _ch=c;\n}\n\ndouble getfloat(double *dp)\n{\n int c, sign;\n double power=1.0;\n while(isspace(c=getch()))\n ;\n if(c!=EOF&&c!='+'&&c!='-'&&!isdigit(c))\n {\n ungetch(c);\n return 0;\n }\n sign=(c=='-')?-1:1;\n if(isdigit(c))\n ungetch(c);\n for(*dp=0.0;isdigit(c=getch());)\n {\n *dp=10.0**dp+(c-'0');\n }\n if(c=='.')\n for(power=1.0;isdigit(c=getch());)\n {\n *dp=10.0**dp+(c-'0');\n power*=10.0;\n }\n *dp=*dp/power*sign;\n if(c!=EOF)\n ungetch(c);\n return c;\n}\n\n" }, { "alpha_fraction": 0.5542420148849487, "alphanum_fraction": 0.57232266664505, "avg_line_length": 23.355932235717773, "blob_id": "e129471d5cd1bb8fcb8bc473a22b36d87f32b478", "content_id": "96d23ac4815eb8fbfed170b8ccddda322e92f042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 70, "num_lines": 59, "path": "/Programming/C/Network_Programming_in_Unix/chapter8/echoudpserv.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <arpa/inet.h>\n#include <unistd.h>\n#include <string.h>\n#include <errno.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nvoid echo(int, struct sockaddr *, socklen_t);\n\nint\nmain(int argc, char *argv[])\n{\n int sockfd;\n if((sockfd=socket(AF_INET, SOCK_DGRAM, 0))<0)\n {\n fprintf(stderr, \"error: create socket\\n\");\n exit(0);\n }\n struct sockaddr_in servaddr, cliaddr;\n bzero(&servaddr, sizeof(servaddr));\n bzero(&cliaddr, sizeof(cliaddr));\n servaddr.sin_family=AF_INET;\n servaddr.sin_port=htons(9868);\n servaddr.sin_addr.s_addr=htonl(INADDR_ANY);\n /*if(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, 0, 0)<0)\n {\n fprintf(stderr, \"error: setsockopt\\n\");\n exit(0);\n }\n */\n if(bind(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr))<0)\n {\n fprintf(stderr, \"bind error\\n\");\n exit(0);\n }\n echo(sockfd, (struct sockaddr *)&cliaddr, sizeof(cliaddr));\n}\n\nvoid\necho(int sock, struct sockaddr* addr, socklen_t cli)\n{\n int n;\n char msg[1500];\n socklen_t len=cli;\n while(1)\n {\n if((n=recvfrom(sock, msg, 1500, 0, addr, &len))<0)\n {\n fprintf(stderr, \"recvfrom error: %s\\n\", strerror(errno));\n continue;\n }\n if(sendto(sock, msg, n, 0, addr, len)<0)\n {\n fprintf(stderr, \"sendto error: %s\\n\", strerror(errno));\n continue;\n }\n }\n}\n\n" }, { "alpha_fraction": 0.4803149700164795, "alphanum_fraction": 0.5196850299835205, "avg_line_length": 11.800000190734863, "blob_id": "7ddbc706cb20109461e9d4ef31f5c3d1edc75ba0", "content_id": "21f5a6751ab6b812e4381dd8ab1705136e6d082a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 127, "license_type": "no_license", "max_line_length": 29, "num_lines": 10, "path": "/Programming/C++/Inside_the_C++_object_model/chapter6/delete.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nint main()\n{\n int *p=new int(10);\n delete p;\n *p=11;\n std::cout<<*p<<std::endl;\n return 0;\n}" }, { "alpha_fraction": 0.40061160922050476, "alphanum_fraction": 0.4266054928302765, "avg_line_length": 20.571428298950195, "blob_id": "2a01c82a0341fc1fe07d667d381f994ce38dc35a", "content_id": "6630eb7fbf5b16e4548333670d0744a7c50bcf29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1962, "license_type": "no_license", "max_line_length": 68, "num_lines": 91, "path": "/Programming/C++/Crossfire/find_domain/find_domain.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<fstream>\n#include<string>\n#include<vector>\nusing std::cout;\nusing std::endl;\nusing std::ifstream;\nusing std::ofstream;\nusing std::vector;\nusing std::string;\n\nint main()\n{\n ifstream ifs1(\"web_do.txt\");\n if(!ifs1.is_open())\n {\n cout<<\"faile to open web_do\"<<endl;\n return 0;\n }\n vector<string> svec1;\n string str;\n while(getline(ifs1,str))\n {\n svec1.push_back(str);\n }\n ifs1.close();\n ifstream ifs(\"web_domain.txt\");\n if(!ifs.is_open())\n {\n cout<<\"faile to open web_domain\"<<endl;\n return 0;\n }\n vector<string> svec2;\n while(getline(ifs,str))\n {\n svec2.push_back(str);\n }\n ifs.close();\n for(int i=0;i!=svec1.size();++i)\n {\n svec1[i]=svec1[i].substr(7);\n svec1[i]=svec1[i].substr(0,svec1[i].size()-1);\n cout<<svec1[i]<<endl;\n }\n for(int i=0;i!=svec2.size();++i)\n {\n bool flag=false;\n for(int j=0;j!=svec1.size();++j)\n {\n if(svec1[j][0]=='w'&&svec1[j][1]=='w')\n {\n if(svec1[j].substr(4)==svec2[i])\n {\n flag=true;\n break;\n }\n }\n if(svec1[j][0]=='t'&&svec1[j][1]=='w'&&svec1[j][2]=='.')\n {\n if(svec1[j].substr(3)==svec2[i])\n {\n flag=true;\n break;\n }\n }\n if(svec1[j]==svec2[i])\n {\n flag=true;\n break;\n }\n }\n if(!flag)\n {\n svec1.push_back(svec2[i]);\n }\n }\n ofstream ofs(\"domain.txt\");\n if(!ofs.is_open())\n {\n cout<<\"faile open domain.txt\"<<endl;\n return 0;\n }\n for(int i=0;i!=svec1.size();++i)\n {\n cout<<svec1[i]<<endl;\n cout<<i<<endl;\n ofs<<svec1[i]<<endl;\n }\n ofs.close();\n return 0;\n}" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5542168617248535, "avg_line_length": 9.375, "blob_id": "cc6985280a19c82caac888475575642722aa9761", "content_id": "d9b7765679733b867cc5a925cea9555928977c16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 14, "num_lines": 8, "path": "/Programming/Python/17Namespace/exer5.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "X = 'Spam'\ndef func():\n\tX='Ni'\n\tdef nested():\n\t\tprint(X)\n\tnested()\nfunc()\nprint(X)\n" }, { "alpha_fraction": 0.4189944267272949, "alphanum_fraction": 0.4972067177295685, "avg_line_length": 15.363636016845703, "blob_id": "c5a0f2b750e7bf22a01df83a6ffe8b1d097a6efa", "content_id": "3fc1f999e18d05059cecd42186540851bbf59aed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 179, "license_type": "no_license", "max_line_length": 34, "num_lines": 11, "path": "/Algorithm/Algorithm/chapter4/balance_tree/balance.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include\"balance.h\"\n\nint main()\n{\n Binary<int> tree;\n int a[]={9,1,3,2,4,7,6,5,8,0};\n for(int i=0; i<10; i++)\n tree.insert(a[i]);\n tree.print();\n return 0;\n}" }, { "alpha_fraction": 0.49015748500823975, "alphanum_fraction": 0.5039370059967041, "avg_line_length": 17.178571701049805, "blob_id": "577e6e942a406b739c5653da9af16727e8850817", "content_id": "8465b9bfd652215826e6648ebf6ec4c193f6266d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 508, "license_type": "no_license", "max_line_length": 47, "num_lines": 28, "path": "/Programming/C/Programming_in_Unix/chapter8/system.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <unistd.h>\n#include <stdio.h>\n\nvoid err_sys(char *s)\n{\n fprintf(stderr, \"%s\\n\", s);\n exit(0);\n}\n\nint main()\n{\n int status;\n \n if ((status = system(\"date\")) < 0)\n err_sys(\"system() error\");\n //pr_exit(status);\n \n if ((status = system(\"nosuchcommand\")) < 0)\n err_sys(\"system() error\");\n //pr_exit(status);\n \n if ((status = system(\"who; exit 44\")) < 0)\n err_sys(\"system() error\");\n //pr_exit(status);\n \n exit(0);\n}" }, { "alpha_fraction": 0.5540443658828735, "alphanum_fraction": 0.5590550899505615, "avg_line_length": 23.10344886779785, "blob_id": "d7240d113366a367f75ead3da2afa0451ccbf960", "content_id": "989dae58098d5f53c620eb343fcef6710fa9195a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1397, "license_type": "no_license", "max_line_length": 186, "num_lines": 58, "path": "/Programming/C/multithread/thread_attr.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<pthread.h>\n#include<unistd.h>\n\n#define NUM 5\n\nvoid * wait_n(void *t)\n{\n int i;\n long tid;\n tid=(long)t;\n sleep(1);\n printf(\"Sleeping in thread...\\n\");\n printf(\"thread with id: %ld ...exit\\n\", tid);\n pthread_exit(NULL);\n}\n\nint main()\n{\n int rc;\n int i;\n pthread_t threads[NUM];\n void *status;\n pthread_attr_t attr;\n //void * status;\n pthread_attr_init(&attr);\n pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);\n \n for(i=0; i<NUM; ++i)\n {\n printf(\"main(): creating thread, %d\\n\",i);\n/*\nfirst argument for pthread_create is the identifier of the thread, second is the thread attribute, third is the entry of the function, fourth is the argument for the function passed in. \n*/\n rc=pthread_create(&threads[i], NULL, wait_n, (void *)&i);\n if(rc)\n {\n printf(\"Error:unable to create thread,%d\\n\",rc);\n exit(-1);\n }\n }\n pthread_attr_destroy(&attr);\n for(i=0;i<NUM;++i)\n {\n rc=pthread_join(threads[i],&status);\n if(rc)\n {\n printf(\"ERROR: unable to join, %d\\n\",rc);\n exit(-1);\n }\n printf(\"main:completed thread id: %d\\n\",i);\n printf(\" exit with status: %d\\n\",*(int *)(status));\n }\n printf(\"main: program exiting.\\n\");\n pthread_exit(NULL);\n return 0;\n}" }, { "alpha_fraction": 0.5027933120727539, "alphanum_fraction": 0.5167597532272339, "avg_line_length": 17.894737243652344, "blob_id": "40ed27a5f2939fb87bf9a0250161fd656dc95784", "content_id": "d1ade22e984648b9aee4cf600dbc61e142a1e8bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 358, "license_type": "no_license", "max_line_length": 50, "num_lines": 19, "path": "/Programming/C/Programming_in_Unix/chapter3/status_flag.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <fcntl.h>\n#include <stdio.h>\n#include <stdlib.h>\n\nvoid set_fl(int fd, int flags)\n{\n int val;\n if((val=fcntl(fd, F_GETFL, 0))<0)\n {\n fprintf(stderr, \"error: fcntl F_GETFL\\n\");\n exit(0);\n }\n val |= flags;\n if(fcntl(fd, F_SETFL, val) < 0)\n {\n fprintf(stderr, \"error: fcntl F_SETFL\\n\");\n exit(0);\n }\n}" }, { "alpha_fraction": 0.40665435791015625, "alphanum_fraction": 0.44177448749542236, "avg_line_length": 18.35714340209961, "blob_id": "06499f77b3e6938de73552db2318713d3d1f58ed", "content_id": "98ac61a8bc61114bb967db1a63cf707239882cb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 541, "license_type": "no_license", "max_line_length": 38, "num_lines": 28, "path": "/Algorithm/Algorithm/chapter7/bubble_sort.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\ntemplate <typename T>\nvoid bubble_sort(T *a, int n);\n\nint main()\n{\n int a[]={9,8,7,6,5,4,3,2,1,0};\n bubble_sort(a, 10);\n for(int i=0; i<10; i++)\n std::cout<<a[i]<<\" \";\n std::cout<<std::endl;\n return 0;\n}\n\ntemplate <typename T>\nvoid bubble_sort(T *a, int n)\n{\n for(int i=0; i<n; i++)\n {\n for(int j=n-1; j>=i; j--)\n if(a[j]<a[i])\n std::swap(a[j], a[i]);\n //for(int k=0; k<n; k++)\n // std::cout<<a[k]<<\" \";\n //std::cout<<std::endl;\n }\n}" }, { "alpha_fraction": 0.49095022678375244, "alphanum_fraction": 0.5135746598243713, "avg_line_length": 16.719999313354492, "blob_id": "e621a0b9cd83f919baab2fea86b148d1aeff5be7", "content_id": "2667743ad9343bf77836df07ed272cff7dabcd74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 442, "license_type": "no_license", "max_line_length": 56, "num_lines": 25, "path": "/Programming/C/Programming_in_Unix/chapter3/cp_stdin_stdout.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<unistd.h>\n#include<stdlib.h>\n\n#define BUFFSIZE 4096\n\nint main()\n{\n int n;\n char buf[BUFFSIZE];\n while((n=read(STDIN_FILENO, buf, BUFFSIZE))>0)\n {\n if(write(STDOUT_FILENO, buf, n)!=n)\n {\n fprintf(stderr, \"error: write to STDOUT\\n\");\n exit(0);\n }\n }\n if(n<0)\n {\n fprintf(stderr,\"error:read, less than 1\\n\");\n exit(0);\n }\n return 0;\n}" }, { "alpha_fraction": 0.3464730381965637, "alphanum_fraction": 0.35062241554260254, "avg_line_length": 18.299999237060547, "blob_id": "18e610ba1e9afa3f5891cf55eac7242858bc6284", "content_id": "4900200d767b3e3c7c41fdf19639f1fa1b417e8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 964, "license_type": "no_license", "max_line_length": 42, "num_lines": 50, "path": "/Programming/C/The C programming Language/chapter7/minscanf.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdarg.h>\n\nvoid minscanf(char *fmt, ...);\n\nint main()\n{\n int i;\n double d;\n char s[20];\n minscanf(\"%d/%f/%s\", &i, &d, s);\n printf(\"%d %f %s\\n\", i, d, s);\n return 0;\n}\n\nvoid minscanf(char *fmt, ...)\n{\n va_list ap;\n int * ival;\n double * dval;\n char * sval, *p;\n va_start(ap, fmt);\n for(p=fmt; *p!='\\0'; p++)\n {\n if(*p!='%')\n {\n getchar();\n continue;\n }\n switch(*++p)\n {\n case 'd' :\n ival=va_arg(ap, int *);\n scanf(\"%d\", ival);\n break;\n case 'f' :\n dval=va_arg(ap, double *);\n scanf(\"%lf\", dval);\n break;\n case 's' :\n sval=va_arg(ap, char *);\n scanf(\"%s\", sval);\n break;\n default:\n getchar();\n break;\n }\n }\n va_end(ap);\n}" }, { "alpha_fraction": 0.6147540807723999, "alphanum_fraction": 0.6229507923126221, "avg_line_length": 14.375, "blob_id": "b249130620e172c4ad8e6a5a6e767f24581681d7", "content_id": "5e76d94a349c09739b438767bde251a0c33bbe38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 122, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/Programming/C/Programming_in_Unix/chapter1/getpid.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<unistd.h>\n\nint main()\n{\n printf(\"hello world from process: %d\\n\", getpid());\n return 0;\n}" }, { "alpha_fraction": 0.41322314739227295, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 11.199999809265137, "blob_id": "8666ed962a87d961b702b91b391997c964c57b9a", "content_id": "7a914d84e1d1fa0e15a2e799671ee4f32be66701", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 121, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/Programming/C/The C programming Language/chapter2/bitcount.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nint main()\n{\n int i=31;\n int j;\n for(j=1;i&=(i-1);++j);\n printf(\"%d\\n\",j);\n return 0;\n}" }, { "alpha_fraction": 0.5979021191596985, "alphanum_fraction": 0.6083915829658508, "avg_line_length": 19.5, "blob_id": "85feed2bb388b5e85ed6cd22ca5d71514e211d34", "content_id": "ac8a198e797ed8f1b1c9bdb46e02823042a8dd1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 286, "license_type": "no_license", "max_line_length": 67, "num_lines": 14, "path": "/Programming/C++/More_Effective_C++/chapter1/placement_delete.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass test{\npublic:\n test(){std::cout<<\"constructor is called\"<<std::endl;}\n ~test(){std::cout<<\"destructor is called\"<<std::endl;}\n};\n\nint main()\n{\n test *p=static_cast<test *>(operator new [] (10*sizeof(test)));\n operator delete []( p);\n return 0;\n}" }, { "alpha_fraction": 0.5389610528945923, "alphanum_fraction": 0.5779221057891846, "avg_line_length": 37.5, "blob_id": "4bad1f080d6782474cbc24a72138813d5e34b3b0", "content_id": "2e79d11669421dc412af48b12d470560cdaa5e78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 46, "num_lines": 4, "path": "/Programming/Python/19HighLevelFunction/reduce.py", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "from functools import reduce\nprint(reduce((lambda x, y: x + y), [1, 2, 3]))\nprint(reduce((lambda x, y: x * y), [1, 2, 3]))\nreduce((lambda x, y: x*y), [])\n" }, { "alpha_fraction": 0.4100545048713684, "alphanum_fraction": 0.4784978926181793, "avg_line_length": 18.66666603088379, "blob_id": "973fb3cb26905c41bea6913291bdcf812dc54145", "content_id": "a273f7f09c9f7e7673130e6ae1910fd8fbd6901e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1651, "license_type": "no_license", "max_line_length": 62, "num_lines": 84, "path": "/Programming/C/The C programming Language/chapter5/day.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\nstatic char daytab[2][13]={\n {0,31,28,31,30,31,30,31,31,30,31,30,31},\n {0,31,29,31,30,31,30,31,31,30,31,30,31}\n};\n\nint day_of_year(int year, int month, int day);\nvoid month_day(int year, int yearday, int *pmonth, int *pday);\n\nint main()\n{\n int i, m, d;\n printf(\"%d\\n\",i=day_of_year(2016, 12, 16));\n month_day(2016,i,&m,&d);\n printf(\"%d %d\\n\",m,d);\n return 0;\n}\n\nint day_of_year(int year, int month, int day)\n{\n int i, leap;\n if(year<0)\n {\n printf(\"error: invalid year\\n\");\n return 0;\n }\n if(month>12||month<1)\n {\n printf(\"error:invalid month\\n\");\n return 0;\n }\n leap=(year%4==0&&year%100!=0)||year%400==0;\n if(leap)\n {\n if(day<1||day>366)\n {\n printf(\"error: invalid day\\n\");\n return 0;\n }\n }\n else\n {\n if(day<1||day>365)\n {\n printf(\"error: invalid day\\n\");\n return 0;\n }\n }\n for(i=1;i<month;i++)\n day+=daytab[leap][i];\n return day;\n}\n\nvoid month_day(int year, int yearday, int *pmonth, int *pday)\n{\n int i, leap;\n if(year<0)\n {\n printf(\"error: invalid year\\n\");\n return;\n }\n leap=(year%4==0&&year%100!=0)||year%400==0;\n if(leap)\n {\n if(yearday<1||yearday>366)\n {\n printf(\"error: invalid day\\n\");\n return;\n }\n }\n else\n {\n if(yearday<1||yearday>365)\n {\n printf(\"error: invalid day\\n\");\n return;\n }\n }\n for(i=1;yearday>daytab[leap][i];i++)\n yearday-=daytab[leap][i];\n *pmonth=i;\n *pday=yearday;\n}" }, { "alpha_fraction": 0.5443037748336792, "alphanum_fraction": 0.5485231876373291, "avg_line_length": 13.875, "blob_id": "0b971ef116a34a4d239057b4e8b27c1136a30568", "content_id": "28fc077afd629f394790396b7ddf92e70d9ba685", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 237, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/Programming/C++/More_Effective_C++/chapter5/object_on_heap.cc", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<iostream>\n\nclass A{\npublic:\n A(){std::cout<<\"A's constructor\"<<std::endl;}\n void vir(){delete this;}\nprivate:\n ~A(){std::cout<<\"A's destructor\"<<std::endl;}\n};\n\nint main()\n{\n A *a=new A;\n a->vir();\n return 0;\n}" }, { "alpha_fraction": 0.5487805008888245, "alphanum_fraction": 0.6178861856460571, "avg_line_length": 14.4375, "blob_id": "ae3daf53d74a37c30bcb6c8135754fd3f9f7a55e", "content_id": "46022addea1be1d707851de50555cbffb54f8235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 246, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/Programming/C/The C programming Language/chapter1/tempfunc.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n\n#define STEP 20\n#define DIFF 32\n\nfloat temperature(float fahr);\nint main()\n{\n for(int i=0;i<=300;i+=STEP)\n printf(\"%3d %6.1f\\n\", i, temperature(i));\n}\n\nfloat temperature(float fahr)\n{\n return (5.0/9.0)*(fahr-32);\n}" }, { "alpha_fraction": 0.5100133419036865, "alphanum_fraction": 0.5180240273475647, "avg_line_length": 19.2702693939209, "blob_id": "4b9347371f8612d70e09a4413afb733897aeaaaa", "content_id": "3abd749fc61d3524a9f801cbfac6c7e2c9552426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 749, "license_type": "no_license", "max_line_length": 59, "num_lines": 37, "path": "/Programming/C/Programming_in_Unix/chapter10/signal.c", "repo_name": "guoyuquan/Graduate", "src_encoding": "UTF-8", "text": "#include <pwd.h>\n#include <unistd.h>\n#include <signal.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\nstatic void my_alarm(int signo)\n{\n struct passwd *rootptr;\n printf(\"in signal handler\\n\");\n if((rootptr=getpwnam(\"root\"))==NULL)\n {\n fprintf(stderr, \"error: getpwname(root)\\n\");\n exit(0);\n }\n alarm(1);\n}\n\nint main()\n{\n struct passwd * ptr;\n signal (SIGALRM, my_alarm);\n alarm(1);\n for(; ;)\n {\n printf(\"***\\n\");\n if((ptr=getpwnam(\"JERRY\"))==NULL)\n {\n fprintf(stderr, \"error: getpwnam(jerry)\\n\");\n exit(0);\n }\n if(strcmp(ptr->pw_name, \"JERRY\")!=0)\n printf(\"return value corrupted!\\n\");\n }\n return 0;\n}" } ]
390
oracid/IK-Inverse-Kinematics-for-3DOF-quadruped-robot-leg
https://github.com/oracid/IK-Inverse-Kinematics-for-3DOF-quadruped-robot-leg
f6e4e54b620199189dc2e4d8429c579c315b256e
c23fa7849880e9a99eb001ed1dcacddfad1a0c17
1f6aab7e0f2ee53f7d1c6cb8de8ffd89ecad4aa5
refs/heads/main
2023-08-11T15:47:32.650256
2021-09-14T05:16:01
2021-09-14T05:16:01
406,230,377
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4270976483821869, "alphanum_fraction": 0.4718019366264343, "avg_line_length": 36.26315689086914, "blob_id": "e0c973b67333d5ba74addee44f4a11f2e8b1afa6", "content_id": "6cb7c5c8f7f1f083c425f658f41ac9d9f2a93a34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2908, "license_type": "no_license", "max_line_length": 130, "num_lines": 76, "path": "/IKScrew.py", "repo_name": "oracid/IK-Inverse-Kinematics-for-3DOF-quadruped-robot-leg", "src_encoding": "UTF-8", "text": "from vpython import *\r\ncanvas(width=1500, height=720, center=vector(00,70,0), background=color.white, range=150)\r\nsphere( pos=vector(0,0,0), radius=2, color=color.red) # Origin of the orthonormal coordinate system\r\n\r\nfor i in range(-150,150,10): # Drawing floor\r\n for j in range(-150,150,10): #\r\n sphere( pos=vector(i,0,j), radius=0.3, color=color.black) #\r\n\r\nH =curve() # Diamond diagonal \r\nCL=curve() # Diamond left top side\r\nCR=curve() # Diamond right top side\r\nAL=curve() # Diamond left bottom side\r\nAR=curve() # Diamond right bottom side\r\n\r\ndef IK(x,y,z):\r\n\r\n global H\r\n global CL\r\n global CR\r\n global AL\r\n global AR\r\n\r\n H.clear()\r\n CL.clear()\r\n CR.clear()\r\n AL.clear()\r\n AR.clear()\r\n\r\n d=Ay-y # X Y diagonal calculations\r\n e=x #\r\n h=sqrt((e*e)+(d*d)) #\r\n E=acos(d/h) #\r\n if(e<0): #\r\n E=(-E) #\r\n X=sin(E)*h #\r\n Y=cos(E)*h #\r\n\r\n G=acos(h/(2*c)) # diamond sides calculations\r\n Clx=sin(E-G)*c #\r\n Cly=cos(E-G)*c #\r\n Crx=sin(E+G)*c #\r\n Cry=cos(E+G)*c #\r\n \r\n dz=h # Z diagonal calculations\r\n ez=z #\r\n hz=sqrt((ez*ez)+(dz*dz)) #\r\n D=acos(dz/hz) #\r\n if(ez<0): #\r\n D=(-D) #\r\n Z=sin(D)*hz #\r\n \r\n H =curve( A.pos, vector(X,Ay-Y,Z), radius=0.1, color=color.magenta, retain=30 ) # diagonal line\r\n CL=curve( A.pos, vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # top left side line of the diamond\r\n CR=curve( A.pos, vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # top right side line of the diamond\r\n AL=curve( vector(X,Ay-Y,Z), vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # bottom left side line of the diamond\r\n AR=curve( vector(X,Ay-Y,Z), vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # bottom right side line of the diamond\r\n\r\n################ Start Screw ################\r\nc=112 # length of diamond side \r\nAy=190 # coordinates of the main axis\r\nAx=0 #\r\nAz=0 #\r\nA=sphere( pos=vector(Ax,Ay,0), radius=4, color=color.red) # main paw axis\r\n\r\nr=120\r\nfor i in range(0,1080,3):\r\n rate(80)\r\n sphere( pos=vector(sin(i/57.296)*r, i*0.06, cos(i/57.296)*r ), radius=1, color=color.red)\r\nwhile True:\r\n for i in range(0,1080,3):\r\n rate(30)\r\n IK(sin(i/57.296)*r, i*0.06, cos(i/57.296)*r )\r\n for i in range(1080,0,-3):\r\n rate(30)\r\n IK(sin(i/57.296)*r, i*0.06, cos(i/57.296)*r )\r\n################ End Screw ################\r\n" }, { "alpha_fraction": 0.3721407651901245, "alphanum_fraction": 0.47096773982048035, "avg_line_length": 40.625, "blob_id": "256cdc49ccf1bec9b79bc44019d7736a9468097d", "content_id": "81573bbaece03db1f8e1ecab84df579b060524cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3410, "license_type": "no_license", "max_line_length": 176, "num_lines": 80, "path": "/IKZ.py", "repo_name": "oracid/IK-Inverse-Kinematics-for-3DOF-quadruped-robot-leg", "src_encoding": "UTF-8", "text": "from vpython import *\r\ncanvas(width=1500, height=720, center=vector(00,70,0), background=color.white, range=150)\r\nsphere( pos=vector(0,0,0), radius=2, color=color.red) # Origin of the orthonormal coordinate system\r\n\r\nfor i in range(-150,150,10): # Drawing floor\r\n for j in range(-150,150,10): #\r\n sphere( pos=vector(i,0,j), radius=0.3, color=color.black) #\r\n\r\nH =curve() # Diamond diagonal \r\nCL=curve() # Diamond left top side\r\nCR=curve() # Diamond right top side\r\nAL=curve() # Diamond left bottom side\r\nAR=curve() # Diamond right bottom side\r\n\r\ndef IK(x,y,z):\r\n\r\n global H\r\n global CL\r\n global CR\r\n global AL\r\n global AR\r\n\r\n H.clear()\r\n CL.clear()\r\n CR.clear()\r\n AL.clear()\r\n AR.clear()\r\n\r\n d=Ay-y # X Y diagonal calculations\r\n e=x #\r\n h=sqrt((e*e)+(d*d)) #\r\n E=acos(d/h) #\r\n if(e<0): #\r\n E=(-E) #\r\n X=sin(E)*h #\r\n Y=cos(E)*h #\r\n\r\n G=acos(h/(2*c)) # diamond sides calculations\r\n Clx=sin(E-G)*c #\r\n Cly=cos(E-G)*c #\r\n Crx=sin(E+G)*c #\r\n Cry=cos(E+G)*c #\r\n \r\n dz=h # Z diagonal calculations\r\n ez=z #\r\n hz=sqrt((ez*ez)+(dz*dz)) #\r\n D=acos(dz/hz) #\r\n if(ez<0): #\r\n D=(-D) #\r\n Z=sin(D)*hz #\r\n \r\n H =curve( A.pos, vector(X,Ay-Y,Z), radius=0.1, color=color.magenta, retain=30 ) # diagonal line\r\n CL=curve( A.pos, vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # top left side line of the diamond\r\n CR=curve( A.pos, vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # top right side line of the diamond\r\n AL=curve( vector(X,Ay-Y,Z), vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # bottom left side line of the diamond\r\n AR=curve( vector(X,Ay-Y,Z), vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # bottom right side line of the diamond\r\n\r\n################ Start Zigzag ################\r\nc=112 # length of diamond side \r\nAy=200 # coordinates of the main axis\r\nAx=0 #\r\nAz=0 #\r\nA=sphere( pos=vector(Ax,Ay,0), radius=4, color=color.red) # main paw axis\r\n\r\nPz=[-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-60,-50,-40,-30,-20,-10, 0, 10, 20, 30, 40, 50, 60, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70]\r\nPy=[ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]\r\nPx=[-70,-60,-50,-40,-30,-20,-10, 0, 10, 20, 30, 40, 50, 60, 70, 60, 50, 40, 30, 20, 10, 0,-10,-20,-30,-40,-50,-60,-70,-60,-50,-40,-30,-20,-10, 0, 10, 20, 30, 40, 50, 60, 70]\r\n\r\nfor i in range(0, 43,1):\r\n rate(20)\r\n sphere( pos=vector(Px[i],Py[i],Pz[i]), radius=1.5, color=color.red) # Path drawing with ball targets\r\n\r\nwhile True:\r\n for i in range(0, 43, 1):\r\n rate(20)\r\n IK(Px[i],Py[i],Pz[i])\r\n for i in range(42,-1, -1):\r\n rate(20)\r\n IK(Px[i],Py[i],Pz[i])\r\n################ End Zigzag ################\r\n" }, { "alpha_fraction": 0.7785714268684387, "alphanum_fraction": 0.7928571701049805, "avg_line_length": 45.66666793823242, "blob_id": "c0d6575eb756b83a77296a643e6f3206266991b2", "content_id": "6bc47ad1169f98341c88511a60bbff0021e9c341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "no_license", "max_line_length": 92, "num_lines": 6, "path": "/README.md", "repo_name": "oracid/IK-Inverse-Kinematics-for-3DOF-quadruped-robot-leg", "src_encoding": "UTF-8", "text": "# IK-Inverse-Kinematics-for-3DOF-quadruped-robot-leg\nGraphic validation with VPython of my inverse kinematics solution for a 3 DOF quadruped leg.\n\nThanks to the GlowScript team for the VPython solution, https://www.glowscript.org/ .\n\nYoutube video : https://youtu.be/kfc0hBcVoW8\n" } ]
3
howeypeter/limelightCDN
https://github.com/howeypeter/limelightCDN
e1a05ef9f5f0117848eadb290c07e125a9ac43c2
6e13bfecce8919140b63523da467aac14f932eb9
b1598f0aa39408b5a4cf34a5d6b773a6c649224a
refs/heads/master
2021-01-22T23:59:01.675464
2018-05-16T15:54:13
2018-05-16T15:54:13
102,430,117
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7602428197860718, "alphanum_fraction": 0.7632777094841003, "avg_line_length": 23.407407760620117, "blob_id": "52c5773a98e10217473b93a7b9472f6387acef7b", "content_id": "a2a6060c16e5ca800906d2af516c8a7725a02140", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 83, "num_lines": 27, "path": "/Example.py", "repo_name": "howeypeter/limelightCDN", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nsys.path.append('./limelightCDN')\nimport limelightCDN\nimport ConfigParser\nimport os\nimport urllib2\n\nprofile='default'\n\nurL = \"https://api.lldns.net/cfapi/v1/svcinst/delivery/manual/shortname/shutterfly\"\nquery = \"\"\n\ndef read_conf(profile):\n config = ConfigParser.RawConfigParser()\n config.read([os.path.expanduser('~/.llnw/credentials')])\n username = config.get(profile, 'username')\n apikey = config.get(profile, 'apikey')\n return username,apikey\n\nuserName,apiKey = read_conf(profile)\n\n#make request\nusageReport = limelightCDN.Auth(apiKey)\nresponse = usageReport.GET(urL,userName,queryParameters=query)\nprint response.read()\n" }, { "alpha_fraction": 0.6941015124320984, "alphanum_fraction": 0.701646089553833, "avg_line_length": 24.578947067260742, "blob_id": "0ed579509a0ecb03b976a3b96fd6a30812071f12", "content_id": "0c6075b7a87233a33e3fc2d70a0699bb99b363a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1458, "license_type": "no_license", "max_line_length": 105, "num_lines": 57, "path": "/limelightCDN/limelightCDN.py", "repo_name": "howeypeter/limelightCDN", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport hashlib\nimport hmac\nimport time\nimport os\nimport urllib\nimport urllib2\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nclass Auth:\n def __init__(self,apiKey):\n\tself.apiKey = apiKey\n\treturn None\n def hmac(\n\tself,\n\turl,\n httpMethod=\"GET\",\n queryParameters=None,\n postData=None):\n timestamp = str(int(round(time.time()*1000)))\n datastring = httpMethod + url\n if queryParameters != None :\n datastring += queryParameters\n datastring += timestamp\n if postData != None :\n datastring += postData\n self.postData = postData\n self.token = hmac.new(self.apiKey.decode('hex'), msg=datastring,digestmod=hashlib.sha256).hexdigest()\n #return token,timestamp\n return self.token,timestamp\n #built-in GET request for REST-API\n def GET(\n\tself,\n\turl,\n\tusername,\n\thttpMethod=\"GET\",\n\tqueryParameters=None,\n\tpostData=None):\n\t\n\ttoken,timestamp = self.hmac(url,httpMethod,queryParameters,postData)\n\tif queryParameters != None :\n\t url = url + \"?\" + queryParameters\n\tif postData != None :\n\t\treq = urllib2.Request(url, postData)\n\telse:\n\t\treq = urllib2.Request(url)\n\treq.add_header('Content-Type','application/json')\n\treq.add_header('Accept','application/json')\n\treq.add_header('X-LLNW-Security-Principal', username)\n\treq.add_header('X-LLNW-Security-Timestamp', timestamp)\n\treq.add_header('X-LLNW-Security-Token', token)\n\tresponse = urllib2.urlopen(req)\n\treturn response\n" } ]
2
thcoura/ZillaLibSamples
https://github.com/thcoura/ZillaLibSamples
4a50e85240795b2c6b4834237a332e0c4dc61fb2
3a56f6988dbb8a5a862a6e80e3be6ac01467dcc2
403253c057aa839e44a3e2c516e25a129e64d913
refs/heads/master
2022-04-16T07:07:34.602836
2020-03-07T15:03:04
2020-03-07T15:03:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5753701329231262, "alphanum_fraction": 0.6722745895385742, "avg_line_length": 41.70588302612305, "blob_id": "52169140956fec60a48e0171a18ddeaa8fa6d3b5", "content_id": "41876a1748f7d5147821b42b185e48a50a43772f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1486, "license_type": "no_license", "max_line_length": 109, "num_lines": 34, "path": "/05-2d-geometry-drawing.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\tvoid Draw()\r\n\t{\r\n\t\t//use sine of current tick counter for pulsating effect\r\n\t\tfloat ticksin = ssin(s(ZLTICKS)/s(1000))*s(10);\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tZL_Display::DrawLine(150+ticksin, 10+ticksin, 10, 250, ZL_Color::Blue);\r\n\t\tZL_Display::DrawRect(200,10, 400+ticksin, 100+ticksin, ZL_Color::Red, ZL_Color::Yellow);\r\n\t\tZL_Display::DrawCircle(140, 150, 100+ticksin, ZL_Color::Green, ZLRGBA(0,1,1,0.4));\r\n\t\tZL_Display::DrawTriangle(ZL_Vector(330, 180), ZL_Vector(50+ticksin, 320+ticksin), ZL_Vector(190, 460),\r\n\t\t\tZL_Color::Orange, ZL_Color::Magenta);\r\n\t\tZL_Display::DrawEllipse(660, 400+ticksin, 120+ticksin, 60, ZL_Color::Red);\r\n\t\tZL_Display::DrawBezier(400, 400, 500+ticksin*2, 500+ticksin*3, 600, 200, 770, 250+ticksin, ZL_Color::Blue);\r\n\t\tZL_Display::FillGradient(500+ticksin, 5, 795, 80+ticksin,\r\n\t\t\tZL_Color::Red, ZL_Color::Blue, ZL_Color::Green, ZL_Color::Yellow);\r\n\t\tZL_Vector p[] = { ZL_Vector(380, 170), ZL_Vector(380, 300), ZL_Vector(470+ticksin, 200),\r\n\t\t\tZL_Vector(780, 240+ticksin), ZL_Vector(700, 100) };\r\n\t\tZL_Polygon(ZL_Polygon::BORDER_FILL).Add(p, COUNT_OF(p)).Draw(ZL_Color::Red, ZL_Color::Yellow);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"2D Geometry Drawing\", 854, 480);\r\n\t\tZL_Display::SetThickness(4.0f);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6747720241546631, "alphanum_fraction": 0.7045592665672302, "avg_line_length": 38.121952056884766, "blob_id": "0199f97a241d71f92e058d2b43168c30bfa7d071", "content_id": "243edb538b034708c2e5d5945894dcc40ccd44a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1645, "license_type": "no_license", "max_line_length": 128, "num_lines": 41, "path": "/34-skeletal-mesh-ik.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "static ZL_Camera Camera;\r\nstatic ZL_Light Light = ZL_Light(ZL_Vector3(2.f, 5.f, 8.f));\r\nstatic ZL_SkeletalMesh SkeletalMesh;\r\nstatic ZL_RenderList RenderList;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tsMain() : ZL_Application() {}\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\t//Initialize the game with depth buffer and 3d rendering\r\n\t\tZL_Display::Init(\"Skeletal Mesh\", 1280, 720, ZL_DISPLAY_DEPTHBUFFER);\r\n\t\tZL_Display3D::Init();\r\n\t\tZL_Input::Init();\r\n\r\n\t\t//Load the skeletal mesh model file\r\n\t\tSkeletalMesh = ZL_SkeletalMesh::FromGLTF(\"Data/human.glb.zip\");\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tif (ZL_Input::Down(ZLK_ESCAPE)) ZL_Application::Quit();\r\n\r\n\t\t//Update the camera position every frame referencing the mouse coordinates\r\n\t\tfloat HoirzontalAngleRad = (ZL_Display::PointerX-ZLHALFW)/ZLHALFW*PI+PIHALF;\r\n\t\tfloat VerticalAngleRad = ((ZL_Display::PointerY-ZLHALFH)/ZLHALFH-.2f)*PIHALF*0.8f;\r\n\t\tCamera.SetLookAt(ZL_Vector3::FromRotation(HoirzontalAngleRad, VerticalAngleRad) * 3.f + ZL_Vector3(0,0,1), ZL_Vector3(0,0,1));\r\n\r\n\t\t//Update the foot positions with inverse kinematics (IK)\r\n\t\tSkeletalMesh.TwoBoneIK(12, ZL_Vector3(-.1f, 0, .23f - .15f * ssin(ZLSECONDS * 5)), ZL_Vector3::Forward); //Left foot\r\n\t\tSkeletalMesh.TwoBoneIK(15, ZL_Vector3( .1f, 0, .23f - .15f * scos(ZLSECONDS * 5)), ZL_Vector3::Forward); //Right foot\r\n\t\tSkeletalMesh.Update();\r\n\r\n\t\t//Setup and draw our dynamic render list with our skeletal mesh on a black background\r\n\t\tRenderList.Reset();\r\n\t\tRenderList.Add(SkeletalMesh, ZL_Matrix::Identity);\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tZL_Display3D::DrawListWithLight(RenderList, Camera, Light);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5770846009254456, "alphanum_fraction": 0.5980803966522217, "avg_line_length": 30.05769157409668, "blob_id": "e1e753034d7553e520d2d176c5126d6711eb1df6", "content_id": "a13b890f0a03009aea2f68cc115547d68ae0a4c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1667, "license_type": "no_license", "max_line_length": 124, "num_lines": 52, "path": "/27-json-read-write.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sMain : public ZL_Application\r\n{\r\n\tZL_Font fnt;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"JSON Reader and Writer\", 854, 480);\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Display::ClearFill();\r\n\r\n\t\t//Initialize a JSON object from a string with 3 keys and values\r\n\t\tZL_Json Test(\"{ \\\"string\\\" : \\\"text\\\", \\\"number\\\" : 10, \\\"boolean\\\" : true }\");\r\n\r\n\t\t//Add a fourth key \"array\" with 3 elements, change the third element to another number and then remove the second element\r\n\t\tTest[\"array\"] = ZL_Json(\"[1, 2, 3]\");\r\n\t\tTest[\"array\"](2) = 333;\r\n\t\tTest[\"array\"].EraseAt(1);\r\n\r\n\t\t//Load a file (not a file from disk, just 10 bytes from memory) and add it as another key\r\n\t\tZL_File MemoryJSONFile(\"\\\"jsontest\\\"\", 10);\r\n\t\tTest[\"from_other_file\"] = ZL_Json(MemoryJSONFile);\r\n\r\n\t\t//This adds key \"a\" with the JSON object { \"b\" : { \"c\" : 123 } } and then changes \"b\" to \"bbb\"\r\n\t\tTest[\"a\"][\"b\"][\"c\"] = 123;\r\n\t\tTest[\"a\"][\"b\"].SetKey(\"bbb\");\r\n\r\n\t\t//Erase an object key-value\r\n\t\tTest.Erase(\"number\");\r\n\r\n\t\t//Loop through all keys now present in Test and put them into a separate JSON array which afterwards is added as \"my_keys\"\r\n\t\tZL_Json JSONKeysArray;\r\n\t\tfor (ZL_Json& it : Test) JSONKeysArray.Add().SetString(it.GetKey());\r\n\t\tTest[\"my_keys\"] = JSONKeysArray;\r\n\r\n\t\t//Final JSON:\r\n\t\t/* {\r\n\t\t\t\"string\" : \"text\",\r\n\t\t\t\"boolean\" : true,\r\n\t\t\t\"array\" : [ 1, 333 ],\r\n\t\t\t\"from_other_file\" : \"jsontest\",\r\n\t\t\t\"a\" : { \"bbb\" : { \"c\" : 123 } },\r\n\t\t\t\"my_keys\" : [ \"string\", \"boolean\", \"array\", \"from_other_file\", \"a\" ]\r\n\t\t} */\r\n\r\n\t\t//Show JSON as string on screen\r\n\t\tfnt.Draw(ZLCENTER, Test.ToString(), ZL_Origin::Center);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5762304663658142, "alphanum_fraction": 0.6329531669616699, "avg_line_length": 41.272727966308594, "blob_id": "36acc93ab8c4aa373a0ba9f8ab26c1036776ef52", "content_id": "c74ae6b88a4d882528d7c88a2bd0ca0a3b59d43a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3332, "license_type": "no_license", "max_line_length": 105, "num_lines": 77, "path": "/15-collision-tests.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "static ZL_Vector c1(820, 34), c2(500, 240);\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Collision Tests\", 854, 480);\r\n\t\tZL_Input::Init();\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZLBLACK);\r\n\t\tif (ZL_Input::Down()) { c1 = c2; c2 = ZL_Display::PointerPos(); }\r\n\r\n\t\tscalar r = 20;\r\n\t\tbool coll = false;\r\n\t\tZL_Vector Collision, Collision2;\r\n\r\n\t\tZL_Vector recvar(sin(s(ZLTICKS)/1000)*10, cos(s(ZLTICKS)/1000)*20);\r\n\t\tZL_Rectf rec1(250, 350, ZL_Vector(60, 25)); rec1+=recvar; //area for line\r\n\t\tZL_Rectf rec2(650, 350, ZL_Vector(60, 25)); rec2+=recvar; //area for point\r\n\t\tZL_Rectf rec3(250, 150, ZL_Vector(60, 25)); rec3+=recvar; //area for aabb\r\n\t\tZL_Rectf rec4(650, 150, ZL_Vector(60, 25)); rec4+=recvar; //area for rotbb\r\n\r\n\t\t//visualize swipe test (from c1 to c2 with radius r)\r\n\t\tZL_Display::DrawLine(c1, c2, ZL_Color::Orange);\r\n\t\tZL_Display::DrawCircle(c1, r, ZLRGBA(1,1,0,.3));\r\n\t\tZL_Display::DrawCircle(c2, r, ZLRGBA(1,1,0,.3));\r\n\t\tZL_Display::DrawLine(c1 + (c1 - c2).Perp().Norm()*r, c2 + (c1 - c2).Perp().Norm()*r, ZLRGBA(1,1,0,.3));\r\n\t\tZL_Display::DrawLine(c1 - (c1 - c2).Perp().Norm()*r, c2 - (c1 - c2).Perp().Norm()*r, ZLRGBA(1,1,0,.3));\r\n\t\tZL_Display::DrawLine(c2, c2 - (c1 - c2).Perp().Norm()*r - (c2 - c1).Norm()*r, ZLRGBA(1,1,0,.3));\r\n\t\tZL_Display::DrawLine(c2, c2 + (c1 - c2).Perp().Norm()*r - (c2 - c1).Norm()*r, ZLRGBA(1,1,0,.3));\r\n\r\n\t\t//collision and swipe test on a line\r\n\t\tZL_Vector p1 = rec1.LowLeft(), p2 = rec1.HighRight();\r\n\t\tZL_Display::DrawLine(p1, p2, ZL_Color::White);\r\n\t\tint ncoll = ZL_Math::LineCircleCollision(p1, p2, c2, r, &Collision, &Collision2);\r\n\t\tif (ncoll >= 1) ZL_Display::DrawCircle(Collision, 5, ZL_Color::Green);\r\n\t\tif (ncoll >= 2) ZL_Display::DrawCircle(Collision2, 5, ZL_Color::Green);\r\n\t\tif (ncoll) coll = true;\r\n\t\tZL_Vector CollisionLine;\r\n\t\tif (ZL_Math::CircleLineSweep(c1, c2, r, p1, p2, &CollisionLine))\r\n\t\t\tZL_Display::DrawCircle(CollisionLine, r, ZL_Color::Red);\r\n\r\n\t\t//collision and swipe test on a single point\r\n\t\tZL_Vector pp = rec2.Center();\r\n\t\tZL_Display::DrawCircle(pp, 15, ZL_Color::White);\r\n\t\tZL_Display::DrawLine(pp.x, pp.y-20, pp.x, pp.y+20, ZL_Color::White);\r\n\t\tZL_Display::DrawLine(pp.x-20, pp.y, pp.x+20, pp.y, ZL_Color::White);\r\n\t\tif (ZL_Math::CirclePointSweep(c1, c2, r, pp, &Collision))\r\n\t\t\tZL_Display::DrawCircle(Collision, r, ZL_Color::Red);\r\n\t\tif (pp-c2 <= r) coll = true;\r\n\r\n\t\t//collision and swipe test on axis aligned bounding box\r\n\t\tZL_AABB aabb(rec3);\r\n\t\tZL_Display::DrawRect(rec3, ZL_Color::White, ZLALPHA(.4));\r\n\t\tif (ZL_Math::CircleAABBSweep(c1, c2, r, aabb, &Collision))\r\n\t\t\tZL_Display::DrawCircle(Collision, r, ZL_Color::Red);\r\n\t\tif (aabb.Overlaps(c2, r)) coll = true;\r\n\r\n\t\t//collision and swipe test on rotating bounding box\r\n\t\tZL_RotBB bb(rec4, s(ZLTICKS)/s(600)); //s(.3)\r\n\t\tZL_Display::PushMatrix();\r\n\t\tZL_Display::Translate(bb.P);\r\n\t\tZL_Display::Rotate(bb.A);\r\n\t\tZL_Display::Translate(-bb.P);\r\n\t\tZL_Display::DrawRect(rec4, ZL_Color::White, ZLALPHA(.4));\r\n\t\tZL_Display::PopMatrix();\r\n\t\tif (ZL_Math::CircleRotBBSweep(c1, c2, r, bb, &Collision))\r\n\t\t\tZL_Display::DrawCircle(Collision, r, ZL_Color::Red);\r\n\t\tif (bb.Overlaps(c2, r)) coll = true;\r\n\r\n\t\t//draw collision state at mouse cursor\r\n\t\tZL_Display::DrawCircle(c2, r, (coll ? ZL_Color::Red : ZL_Color::White));\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5904840230941772, "alphanum_fraction": 0.6397139430046082, "avg_line_length": 45.22077941894531, "blob_id": "3719a9d3b1bcd479ae4179457a3035afb7593d77", "content_id": "1c35095c74ba144a5a616085dce7a51fc93f6ccf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3636, "license_type": "no_license", "max_line_length": 149, "num_lines": 77, "path": "/13-easing.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "enum { SCENE_GAME = 1, EASING_FUNC_COUNT = 30 };\r\n\r\ntypedef scalar (*EasingFunc)(scalar);\r\nstatic EasingFunc funcs[EASING_FUNC_COUNT] = {\r\n\tZL_Easing::InSine, ZL_Easing::OutSine, ZL_Easing::InOutSine, ZL_Easing::InQuad, ZL_Easing::OutQuad, ZL_Easing::InOutQuad,\r\n\tZL_Easing::InCubic, ZL_Easing::OutCubic, ZL_Easing::InOutCubic, ZL_Easing::InQuart, ZL_Easing::OutQuart, ZL_Easing::InOutQuart,\r\n\tZL_Easing::InQuint, ZL_Easing::OutQuint, ZL_Easing::InOutQuint, ZL_Easing::InExpo, ZL_Easing::OutExpo, ZL_Easing::InOutExpo,\r\n\tZL_Easing::InCirc, ZL_Easing::OutCirc, ZL_Easing::InOutCirc, ZL_Easing::InBack, ZL_Easing::OutBack, ZL_Easing::InOutBack,\r\n\tZL_Easing::InElastic, ZL_Easing::OutElastic, ZL_Easing::InOutElastic, ZL_Easing::InBounce, ZL_Easing::OutBounce, ZL_Easing::InOutBounce,\r\n};\r\nstatic const char* funcnames[EASING_FUNC_COUNT] = {\r\n\t\"InSine\", \"OutSine\", \"InOutSine\", \"InQuad\", \"OutQuad\", \"InOutQuad\",\r\n\t\"InCubic\", \"OutCubic\", \"InOutCubic\", \"InQuart\", \"OutQuart\", \"InOutQuart\",\r\n\t\"InQuint\", \"OutQuint\", \"InOutQuint\", \"InExpo\", \"OutExpo\", \"InOutExpo\",\r\n\t\"InCirc\", \"OutCirc\", \"InOutCirc\", \"InBack\", \"OutBack\", \"InOutBack\",\r\n\t\"InElastic\", \"OutElastic\", \"InOutElastic\", \"InBounce\", \"OutBounce\", \"InOutBounce\",\r\n};\r\nstatic ZL_Font fnt;\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tint bigfuncindex;\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME), bigfuncindex(-1) { }\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tscalar t = sabs(smod(ZLSINCESECONDS(0), 2.0f) - 1.0f);\r\n\t\tZL_Display::ClearFill(ZLBLACK);\r\n\t\tif (bigfuncindex >= 0)\r\n\t\t{\r\n\t\t\tscalar f = funcs[bigfuncindex](t);\r\n\t\t\tfnt.Draw(360, 660, funcnames[bigfuncindex], ZL_Origin::Center);\r\n\t\t\tZL_Display::DrawRect(40, 40, 680, 640, ZLALPHA(.3));\r\n\t\t\tDrawCurve(ZL_Rectf(40, 40, 680, 640), funcs[bigfuncindex], 100);\r\n\t\t\tZL_Display::FillCircle(40+t*640, 40+f*600, 20, ZLWHITE);\r\n\t\t\tZL_Display::DrawRect(730, 40, 830, 40+640, ZLALPHA(.3));\r\n\t\t\tZL_Display::FillCircle(780, 60+f*600, 20, ZLWHITE);\r\n\t\t\tZL_Display::DrawRect(880, 40, 1240, 335, ZLALPHA(.3));\r\n\t\t\tZL_Display::FillRect(880, 40, 880+f*360, 335, ZLWHITE);\r\n\t\t\tZL_Display::DrawRect(880, 385, 1240, 680, ZLALPHA(.3));\r\n\t\t\tZL_Display::FillRect(880, 385, 1240, 680, ZLBLACK+ZLWHITE*f);\r\n\t\t\tif (ZL_Input::Clicked()) bigfuncindex = -1;\r\n\t\t\treturn;\r\n\t\t}\r\n\t\tfor (int funccount = 0; funccount < EASING_FUNC_COUNT; funccount++)\r\n\t\t{\r\n\t\t\tZL_Rectf rec = ZL_Rectf::BySize(30.0f + 130.0f * s(funccount % 9) + 40.0f * s((funccount % 9) / 3), 480.0f - 150.0f * s(funccount / 9), 100, 100);\r\n\t\t\tfnt.Draw(rec.left, rec.high, funcnames[funccount]);\r\n\t\t\tZL_Display::DrawRect(rec, ZLALPHA(.3), (ZL_Input::Hover(rec) ? ZLALPHA(.1) : ZLTRANSPARENT));\r\n\t\t\tDrawCurve(rec, funcs[funccount], 20);\r\n\t\t\tif (ZL_Input::Hover(rec))\r\n\t\t\t{\r\n\t\t\t\tfloat fY = funcs[funccount](t)*rec.Height();\r\n\t\t\t\tZL_Display::DrawLine(rec.left, rec.low+fY, rec.right, rec.low+fY, ZLALPHA(.3));\r\n\t\t\t\tZL_Display::FillCircle(rec.left+t*rec.Width(), rec.low+fY, 4, ZLWHITE);\r\n\t\t\t}\r\n\t\t\tif (ZL_Input::Clicked(rec)) bigfuncindex = funccount;\r\n\t\t}\r\n\t}\r\n\r\n\tvoid DrawCurve(const ZL_Rectf area, EasingFunc func, int steps)\r\n\t{\r\n\t\tfor (scalar w = area.Width(), h = area.Height(), step = 0, fa = 0, fb = 1.0f/s(steps); fa < 1.0f; fa = fb, fb = (++step)/s(steps))\r\n\t\t\tZL_Display::DrawLine(area.left+fa*w, area.low+func(fa)*h, area.left+fb*w, area.low+func(fb)*h, ZLWHITE);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Easing Functions\", 1280, 720);\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6072782278060913, "alphanum_fraction": 0.6542835235595703, "avg_line_length": 34.63888931274414, "blob_id": "912eed07936e94457bdd1681a944d36aedffb3a9", "content_id": "92f961294b33120bf1192c5271df207cb7a2a6dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1319, "license_type": "no_license", "max_line_length": 144, "num_lines": 36, "path": "/20-sound-samples.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sMain : public ZL_Application\r\n{\r\n\tZL_Font fnt;\r\n\tZL_Sound sndNormal, sndFrequency, sndStream;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Sound Samples\", 854, 480);\r\n\t\tZL_Audio::Init();\r\n\t\tZL_Input::Init();\r\n\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tsndNormal = ZL_Sound(\"Data/ACCURACY.ogg\");\r\n\t\tsndFrequency = sndNormal.Clone();\r\n\t\tsndStream = ZL_Sound(\"Data/ACCURACY.ogg\", true);\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black); //clear whole screen\r\n\t\tif (Button(ZL_Rectf::BySize( 50.0f, 190.0f, 200.0f, 100.0f), \"Play Audio Sample\"))\r\n\t\t\tsndNormal.Play();\r\n\t\tif (Button(ZL_Rectf::BySize(300.0f, 190.0f, 200.0f, 100.0f), \"Play sample with\\ndifferent frequency\"))\r\n\t\t\tsndFrequency.SetSpeedFactor(RAND_RANGE(0.5,2.0)).Play();\r\n\t\tif (Button(ZL_Rectf::BySize(550.0f, 190.0f, 200.0f, 100.0f), \"Stream sound\\n(sounds the same, but\\nstreamed from disk,\\nintended for music)\"))\r\n\t\t\tsndStream.Play();\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt)\r\n\t{\r\n\t\tZL_Display::DrawRect(rec, ZLALPHA(.8), ZLALPHA(ZL_Input::Held(rec) ? .6 : (ZL_Input::Hover(rec) ? .3 : .1)));\r\n\t\tfnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\treturn (ZL_Input::Clicked(rec) != 0);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5835294127464294, "alphanum_fraction": 0.6420168280601501, "avg_line_length": 31.05555534362793, "blob_id": "b2c04ea11b0c41a80385ad223f34d9d4028b3c1d", "content_id": "da51200c396263158bfe4d73fcc7195c707cb45f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2975, "license_type": "no_license", "max_line_length": 122, "num_lines": 90, "path": "/14-timer.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_MAIN 1\r\nstatic ZL_Font fnt;\r\n\r\nstruct sSceneMain : public ZL_Scene\r\n{\r\n\tbool FlagA, FlagD, FlagF;\r\n\tfloat ValB, ValC;\r\n\tint CountE;\r\n\tsSceneMain() : ZL_Scene(SCENE_MAIN), FlagA(true), FlagD(true), FlagF(true), ValB(0), ValC(0), CountE(0) { }\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\r\n\t\tfloat x = 40;\r\n\t\tZL_Display::FillCircle(x+95, 300, 50, (FlagA ? ZL_Color::Red : ZL_Color::Green));\r\n\t\tif (Button(ZL_Rectf::BySize(x, 100, 190, 100), \"[A]\\nClick here\\nto start a 1s timer\"))\r\n\t\t\tZL_Timer::AddSingleTimer(1000)->sigDone.connect(this, &sSceneMain::CallbackBasicTimer);\r\n\r\n\t\tx += 200;\r\n\t\tZL_Display::FillCircle(x+95, 300+ValB, 50, ZLWHITE);\r\n\t\tif (Button(ZL_Rectf::BySize(x, 100, 190, 100), \"[B]\\nClick here to\\nstart a transition\"))\r\n\t\t\tZL_Timer::AddTransitionFloat(&(ValB = 0.0f), 300.0f, 1000, 0, ZL_Timer::NoEasing);\r\n\r\n\t\tx += 200;\r\n\t\tZL_Display::FillCircle(x+95, 300+ValC, 50, ZLWHITE);\r\n\t\tif (Button(ZL_Rectf::BySize(x, 100, 190, 100), \"[C]\\nClick here to start\\na bouncy transition\"))\r\n\t\t\tZL_Timer::AddTransitionFloat(&(ValC = 0.0f), 300.0f, 1000, 0, ZL_Easing::OutBounce);\r\n\r\n\t\tx += 200;\r\n\t\tZL_Display::FillCircle(x+95, 300, 50, (FlagD ? ZL_Color::Red : ZL_Color::Green));\r\n\t\tif (Button(ZL_Rectf::BySize(x, 100, 190, 100), \"[D]\\nClick here to start a\\ntimer that is fired\\n4 times every 250 ms\"))\r\n\t\t\tZL_Timer::AddMultiTimer(250, 4)->sigCall.connect(this, &sSceneMain::CallbackMultiTimer);\r\n\r\n\t\tx += 200;\r\n\t\tfnt.Draw(x+95, 300, ZL_String::format(\"%d calls\", CountE), ZL_Origin::Center);\r\n\t\tif (Button(ZL_Rectf::BySize(x, 100, 190, 100), \"[E]\\nClick here to\\nstart a 1s ticker\"))\r\n\t\t\tZL_Timer::AddLimitedTicker(1000)->sigCall.connect(this, &sSceneMain::CallbackTicker);\r\n\r\n#ifdef ZL_LAMBDA_SUPPORT\r\n\t\tx += 200;\r\n\t\tZL_Display::FillCircle(x+95, 300, 50, (FlagF ? ZL_Color::Red : ZL_Color::Green));\r\n\t\tif (Button(ZL_Rectf::BySize(x, 100, 190, 100), \"[F]\\nClick here to start\\na 1s lambda timer\"))\r\n\t\t{\r\n\t\t\tZL_Timer::AddSingleTimer(1000)->sigDone.connect_lambda([this]()\r\n\t\t\t{\r\n\t\t\t\tFlagF ^= 1;\r\n\t\t\t});\r\n\t\t}\r\n#endif\r\n\t}\r\n\r\n\t//[A] timer callback function\r\n\tvoid CallbackBasicTimer()\r\n\t{\r\n\t\tFlagA ^= 1;\r\n\t}\r\n\r\n\t//[D] multi timer callback function\r\n\tvoid CallbackMultiTimer(ZL_RepeatingTimer* t)\r\n\t{\r\n\t\tFlagD ^= 1;\r\n\t}\r\n\r\n\t//[E] ticker callback function\r\n\tvoid CallbackTicker(ZL_RepeatingTimer*)\r\n\t{\r\n\t\tCountE++;\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt)\r\n\t{\r\n\t\tZL_Display::DrawRect(rec, ZLALPHA(.8), ZLALPHA(ZL_Input::Hover(rec) ? .3 : .1));\r\n\t\tfnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\treturn (ZL_Input::Clicked(rec) != 0);\r\n\t}\r\n} SceneMain;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Timers, Tickers, Transitions\", 1280, 720);\r\n\t\tZL_Timer::Init();\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_MAIN);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6259707808494568, "alphanum_fraction": 0.6505125761032104, "avg_line_length": 34.57954406738281, "blob_id": "42af8cf257bb8af6bb0f786e023856887d2dc37a", "content_id": "960174737e48de748ffd62bf86bf829f11e07af5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3219, "license_type": "no_license", "max_line_length": 116, "num_lines": 88, "path": "/24-networking-http.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "enum { SCENE_MAIN = 1 };\r\nstatic ZL_Font fnt;\r\n\r\nstruct sSceneMain : public ZL_Scene\r\n{\r\n\t//Construct the scene with its identifier\r\n\tsSceneMain() : ZL_Scene(SCENE_MAIN) { }\r\n\r\n\tZL_HttpConnection http;\r\n\tZL_Surface srfReceivedImage;\r\n\r\n\t//When receiving HTTP response, print header and content\r\n\tvoid OnHttpResponseString(int http_status, const ZL_String& html)\r\n\t{\r\n\t\tif (!IsActive()) return; //ignore if scene was switched\r\n\t\tMessage(ZL_String(\"HTTP HEADER: \") << \"Status: \" << http_status << \" - Length: \" << html.size());\r\n\t\tif (http_status != 200 || !html.length()) return;\r\n\t\tvector<ZL_String> lines = html.split(\"\\n\");\r\n\t\tfor (vector<ZL_String>::iterator it = lines.begin(); it != lines.end(); ++it) Message(ZL_String(\"HTTP: \") << *it);\r\n\t}\r\n\r\n\t//When receiving image data, load it into a surface (which is displayed in Draw())\r\n\tvoid OnHttpResponsePng(int http_status, const char* data, size_t size)\r\n\t{\r\n\t\tif (!IsActive()) return; //ignore if scene was switched\r\n\t\tMessage(ZL_String(\"HTTP HEADER: \") << \"Status: \" << http_status << \" - Length: \" << size);\r\n\t\tif (http_status != 200 || !size || !data) return;\r\n\t\tsrfReceivedImage = ZL_Surface(ZL_File(data, size)).SetOrigin(ZL_Origin::Center);\r\n\t}\r\n\r\n\t//Cleanup the networking connection object when leaving the scene (although not relevant to this sample)\r\n\tvoid DeInitAfterTransition()\r\n\t{\r\n\t\thttp = ZL_HttpConnection();\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\r\n\t\t//Draw the message buffer\r\n\t\tZL_Display::DrawRect(40.0f, ZLFROMH(75.0f + 13*20), ZLFROMW(40.0f), ZLFROMH(60.0f), ZL_Color::White, ZLALPHA(.5));\r\n\t\tfor (vector<ZL_String>::iterator it = msgs.begin(); it != msgs.end(); ++it)\r\n\t\t\tfnt.Draw(50.0f, ZLFROMH(87.0f + (it - msgs.begin())*20), *it);\r\n\r\n\t\tif (Button(ZL_Rectf::BySize(80.0f, 40.0f, 300.0f, 60.0f), \"Make regular HTTP request\"))\r\n\t\t{\r\n\t\t\t//HTTP request txt\r\n\t\t\tMessage(\"Requesting text data...\");\r\n\t\t\thttp = ZL_HttpConnection(\"http://zillalib.github.io/TEST.TXT\");\r\n\t\t\thttp.sigReceivedString().connect(this, &sSceneMain::OnHttpResponseString);\r\n\t\t}\r\n\r\n\t\tif (Button(ZL_Rectf::BySize(474.0f, 40.0f, 300.0f, 60.0f), \"Request PNG image over HTTP\"))\r\n\t\t{\r\n\t\t\t//HTTP request png\r\n\t\t\tMessage(\"Requesting png data...\");\r\n\t\t\thttp = ZL_HttpConnection(\"http://zillalib.github.io/TEST.PNG\");\r\n\t\t\thttp.sigReceivedData().connect(this, &sSceneMain::OnHttpResponsePng);\r\n\t\t}\r\n\r\n\t\tsrfReceivedImage.Draw(427.0f, 70.0f);\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt)\r\n\t{\r\n\t\tZL_Display::DrawRect(rec, ZLALPHA(.8), ZLALPHA(ZL_Input::Held(rec) ? .6 : (ZL_Input::Hover(rec) ? .3 : .1)));\r\n\t\tfnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\treturn (ZL_Input::Down(rec) != 0);\r\n\t}\r\n\r\n\t//Simple message buffer\r\n\tvector<ZL_String> msgs;\r\n\tvoid Message(const ZL_String& s) { msgs.push_back(s); if (msgs.size() > 13) msgs.erase(msgs.begin()); }\r\n} SceneMain;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Networking HTTP Requests\", 854, 480);\r\n\t\tZL_Network::Init();\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_MAIN);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6342710852622986, "alphanum_fraction": 0.6841432452201843, "avg_line_length": 40.27027130126953, "blob_id": "1741ac4c9898c7c90c9b49f0928bed9a45a0fa0d", "content_id": "09f2b7f206a76a3a64434f4bb1a18d81c2216939", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1564, "license_type": "no_license", "max_line_length": 127, "num_lines": 37, "path": "/19-render-to-texture.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sMain : public ZL_Application\r\n{\r\n\tZL_Surface srfBuffer;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Render To Texture\", 854, 480);\r\n\t\tZL_Input::Init();\r\n\r\n\t\tsrfBuffer = ZL_Surface(256, 256); //initialize a 256x256 render target texture\r\n\t\tsrfBuffer.RenderToBegin(); //start drawing onto the render target texture\r\n\t\tZL_Display::ClearFill(ZL_Color::Red); //clear the texture to fully red\r\n\t\tZL_Surface srfLogo(\"Data/ZILLALIB.png\"); //load another surface texture\r\n\t\tsrfLogo.DrawTo( 10.0f, 10.0f, 90.0f, 90.0f); //draw the newly loaded surface texture into the buffer\r\n\t\tsrfLogo.DrawTo( 70.0f, 70.0f, 150.0f, 150.0f); //again\r\n\t\tsrfLogo.DrawTo(130.0f, 130.0f, 210.0f, 210.0f); //again\r\n\t\tsrfBuffer.RenderToEnd(); //end drawing to the texture\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Vector SurfacePosition(299.0f, 112.0f);\r\n\t\tif (ZL_Input::Held())\r\n\t\t{\r\n\t\t\t//Draw into the buffer while the mouse is pressed, draw in 1 pixel steps from the old mouse position to the current position\r\n\t\t\tZL_Vector MousePosInImage = ZL_Input::Pointer() - SurfacePosition, MouseMoveDir = ZL_Input::PointerDelta().VecNorm();\r\n\t\t\tscalar MouseMoveTotal = (ZL_Input::Down() ? 0.0f : ZL_Math::Max(ZL_Input::PointerDelta().GetLength() - 1.0f, 0.0f));\r\n\t\t\tsrfBuffer.RenderToBegin();\r\n\t\t\tfor (scalar i = 0; i <= MouseMoveTotal; i++)\r\n\t\t\t\tZL_Display::FillCircle(MousePosInImage - MouseMoveDir*i, 2.0f, ZL_Color::Blue);\r\n\t\t\tsrfBuffer.RenderToEnd();\r\n\t\t}\r\n\r\n\t\tZL_Display::ClearFill(ZL_Color::Black); //clear whole screen\r\n\t\tsrfBuffer.Draw(SurfacePosition);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.7602667212486267, "alphanum_fraction": 0.800628662109375, "avg_line_length": 1889.22216796875, "blob_id": "d3cb2bee287f44856160baca35fda644d2f4ae72", "content_id": "241c852b9722091aa3a8d3fd0d19470620ac6728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34042, "license_type": "no_license", "max_line_length": 3010, "num_lines": 18, "path": "/README.md", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "# List of ZillaLib Samples\r\n\r\nSample repository for the [ZillaLib](https://github.com/schellingb/ZillaLib) game creation C++ framework.\r\n\r\n&nbsp;|||\r\n:---:|:---:|:---:|\r\n**Empty Game**<p>A minimal empty game</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-01.png\">](https://zillalib.github.io/samples/?01)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?01)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-01_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-01_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-01.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-01_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-01_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-01_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/01-empty-game.inl)|**Scene Manager With A Single Scene**<p>Introduction to the scene manager</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-02.png\">](https://zillalib.github.io/samples/?02)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?02)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-02_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-02_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-02.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-02_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-02_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-02_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/02-scene-manager-with-a-single-scene.inl)|**Scene Manager With Two Scenes**<p>Multiple scenes and transitions</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-03.png\">](https://zillalib.github.io/samples/?03)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?03)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-03_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-03_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-03.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-03_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-03_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-03_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/03-scene-manager-with-two-scenes.inl)\r\n**Scene Manager With Crossfade**<p>Nicer scene transitions</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-04.png\">](https://zillalib.github.io/samples/?04)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?04)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-04_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-04_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-04.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-04_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-04_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-04_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/04-scene-manager-with-crossfade.inl)|**2D Geometry Drawing**<p>Drawing 2D shapes</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-05.png\">](https://zillalib.github.io/samples/?05)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?05)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-05_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-05_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-05.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-05_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-05_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-05_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/05-2d-geometry-drawing.inl)|**Input And Other Events**<p>Listening to input and screen events</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-06.png\">](https://zillalib.github.io/samples/?06)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?06)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-06_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-06_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-06.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-06_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-06_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-06_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/06-input-and-other-events.inl)\r\n**Surface Loading And Drawing**<p>Drawing images</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-07.png\">](https://zillalib.github.io/samples/?07)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?07)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-07_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-07_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-07.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-07_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-07_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-07_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/07-surface-loading-and-drawing.inl)|**Rotating And Scaling Surfaces**<p>Rotation and scaling of images</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-08.png\">](https://zillalib.github.io/samples/?08)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?08)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-08_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-08_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-08.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-08_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-08_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-08_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/08-rotating-and-scaling-surfaces.inl)|**Surface Batch Rendering**<p>Render the same texture a bunch of times</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-09.png\">](https://zillalib.github.io/samples/?09)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?09)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-09_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-09_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-09.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-09_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-09_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-09_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/09-surface-batch-rendering.inl)\r\n**Surface With Repeating Texture**<p>Repeating instead of stretching</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-10.png\">](https://zillalib.github.io/samples/?10)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?10)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-10_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-10_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-10.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-10_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-10_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-10_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/10-surface-with-repeating-texture.inl)|**Tiled Texture Surfaces**<p>Usable for tile sets or animations</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-11.png\">](https://zillalib.github.io/samples/?11)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?11)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-11_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-11_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-11.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-11_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-11_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-11_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/11-tiled-texture-surfaces.inl)|**Font Rendering**<p>Various font rendering features</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-12.png\">](https://zillalib.github.io/samples/?12)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?12)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-12_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-12_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-12.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-12_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-12_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-12_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/12-font-rendering.inl)\r\n**Easing**<p>For adding bounciness and other juicy effects</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-13.png\">](https://zillalib.github.io/samples/?13)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?13)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-13_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-13_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-13.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-13_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-13_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-13_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/13-easing.inl)|**Timer**<p>Call functions or change values over time</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-14.png\">](https://zillalib.github.io/samples/?14)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?14)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-14_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-14_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-14.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-14_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-14_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-14_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/14-timer.inl)|**2D Collision Tests**<p>Demonstrating various 2D collision checks</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-15.png\">](https://zillalib.github.io/samples/?15)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?15)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-15_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-15_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-15.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-15_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-15_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-15_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/15-collision-tests.inl)\r\n**Post Process Effect**<p>Full screen effect shaders</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-16.png\">](https://zillalib.github.io/samples/?16)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?16)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-16_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-16_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-16.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-16_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-16_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-16_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/16-post-process-effect.inl)|**Surface Shader**<p>Single surface fragment and vertex shaders</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-17.png\">](https://zillalib.github.io/samples/?17)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?17)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-17_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-17_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-17.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-17_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-17_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-17_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/17-surface-shader.inl)|**Render Clipping**<p>Temporarily limit rendering to a screen rectangle</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-18.png\">](https://zillalib.github.io/samples/?18)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?18)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-18_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-18_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-18.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-18_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-18_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-18_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/18-render-clipping.inl)\r\n**Render To Texture**<p>Draw onto a surface and change it over time</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-19.png\">](https://zillalib.github.io/samples/?19)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?19)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-19_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-19_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-19.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-19_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-19_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-19_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/19-render-to-texture.inl)|**Sound Samples**<p>Play back basic sound wave files</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-20.png\">](https://zillalib.github.io/samples/?20)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?20)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-20_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-20_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-20.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-20_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-20_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-20_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/20-sound-samples.inl)|**ImcSynthesizer Sound**<p>Built in synthesizer that plays from source code</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-21.png\">](https://zillalib.github.io/samples/?21)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?21)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-21_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-21_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-21.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-21_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-21_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-21_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/21-ImcSynthesizer-Sound.inl)\r\n**Particles**<p>Demonstrating basic 2D particle effects</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-22.png\">](https://zillalib.github.io/samples/?22)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?22)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-22_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-22_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-22.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-22_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-22_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-22_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/22-particles.inl)|**Networking Client/Server**<p>Hosting server, connecting client, data transfers</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-23.png\">](https://zillalib.github.io/samples/?23)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?23)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-23_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-23_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-23.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-23_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-23_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-23_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/23-networking-clientserver.inl)|**Networking HTTP**<p>Doing simple HTTP web server requests</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-24.png\">](https://zillalib.github.io/samples/?24)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?24)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-24_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-24_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-24.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-24_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-24_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-24_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/24-networking-http.inl)\r\n**Saving Loading Settings**<p>Platform independent settings storage</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-25.png\">](https://zillalib.github.io/samples/?25)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?25)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-25_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-25_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-25.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-25_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-25_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-25_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/25-saving-loading-settings.inl)|**Open Web Link**<p>Opening an URL in the browser</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-26.png\">](https://zillalib.github.io/samples/?26)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?26)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-26_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-26_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-26.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-26_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-26_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-26_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/26-open-web-link.inl)|**JSON Reader and Writer**<p>Sample of how to read and write JSON formatted data</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-27.png\">](https://zillalib.github.io/samples/?27)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?27)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-27_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-27_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-27.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-27_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-27_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-27_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/27-json-read-write.inl)\r\n**Advanced Polygon Usage**<p>Polygons with multi contours and extrusion</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-28.png\">](https://zillalib.github.io/samples/?28)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?28)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-28_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-28_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-28.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-28_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-28_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-28_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/28-advanced-polygon.inl)|**Blend Modes Visualizer**<p>Tool to visualize all kinds of blending modes</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-29.png\">](https://zillalib.github.io/samples/?29)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?29)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-29_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-29_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-29.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-29_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-29_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-29_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/29-blend-modes.inl)|**Simple Game**<p>A simple game showing off various features</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-30.png\">](https://zillalib.github.io/samples/?30)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?30)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-30_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-30_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-30.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-30_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-30_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-30_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/30-simple-game.inl)\r\n**Basic 3D**<p>Basic example of rendering a 3D scene</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-31.png\">](https://zillalib.github.io/samples/?31)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?31)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-31_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-31_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-31.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-31_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-31_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-31_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/31-basic-3d.inl)|**Advanced 3D Materials**<p>Using advanced and custom shaders for materials</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-32.png\">](https://zillalib.github.io/samples/?32)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?32)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-32_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-32_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-32.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-32_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-32_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-32_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/32-3d-materials.inl)|**3D Particles**<p>A simple effect using the 3D particle system</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-33.png\">](https://zillalib.github.io/samples/?33)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?33)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-33_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-33_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-33.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-33_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-33_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-33_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/33-3d-particles.inl)\r\n**Skeletal Mesh IK**<p>Loading a skeletal mesh with inverse kinematics</p>[<img src=\"https://zillalib.github.io/samples/ZillaLibSample-34.png\">](https://zillalib.github.io/samples/?34)<br>[- Run in Web Browser -](https://zillalib.github.io/samples/?34)<br>[Win32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-34_Win32.zip)/[Win64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-34_Win64.zip)/[Android](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-34.apk)<br>[MacOSX](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-34_osx.zip)/[Linux32](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-34_linux_x86_32.zip)/[Linux64](https://github.com/schellingb/ZillaLibSamples/releases/download/bin/ZillaLibSample-34_linux_x86_64.zip)<br>[View Source](https://github.com/schellingb/ZillaLibSamples/blob/master/34-skeletal-mesh-ik.inl)\r\n" }, { "alpha_fraction": 0.6603773832321167, "alphanum_fraction": 0.6745283007621765, "avg_line_length": 19.931034088134766, "blob_id": "875f826484ca1759a4b70dbe4f8abad2d15cbe0e", "content_id": "961fc46eddfcf531afcd2570aee23e6cf3d37bea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 636, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/10-surface-with-repeating-texture.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_Surface srfPattern;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitGlobal()\r\n\t{\r\n\t\tsrfPattern = ZL_Surface(\"Data/PATTERN.png\").SetTextureRepeatMode();\r\n\t}\r\n\r\n\t//Clear screen and draw the surface many times with batch rendering enabled\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tsrfPattern.DrawTo(0,0,ZLWIDTH,ZLHEIGHT);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Surface with Repeating Texture\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6236335039138794, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 32.92727279663086, "blob_id": "98a4544ea8cb02c5bcbce153913f0cea7ae29c5b", "content_id": "e284d651982eebe65981255154d260ff6f3ca5a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3842, "license_type": "no_license", "max_line_length": 160, "num_lines": 110, "path": "/23-networking-clientserver.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "enum { SCENE_MAIN = 1 };\r\nstatic ZL_Font fnt;\r\n\r\nstruct sSceneMain : public ZL_Scene\r\n{\r\n\t//Construct the scene with its identifier\r\n\tsSceneMain() : ZL_Scene(SCENE_MAIN) { }\r\n\r\n#ifdef ZL_NO_SOCKETS\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tfnt.Draw(20.0f, 240.0f,\r\n\t\t\t\"On platforms with no direct udp socket access (web) the macro ZL_NO_SOCKETS is set.\" \"\\n\"\r\n\t\t\t\"On there we can't use the client/server model. Async HTTP connections are available everywhere\");\r\n\t}\r\n#else\r\n\r\n\tZL_Server server;\r\n\tZL_Client client;\r\n\r\n\t//Events listening to the networking events from server and client\r\n\tvoid OnServerConnect(const ZL_Peer &peer) { Message(ZL_String::format(\"Server: Connect From: %x\", peer.host)); }\r\n\tvoid OnServerDisconnect(const ZL_Peer &peer, unsigned int msg) { Message(ZL_String::format(\"Server: Disconnected Client: %x (CloseMsg %d)\", peer.host, msg)); }\r\n\tvoid OnServerReceive(const ZL_Peer &p, ZL_Packet &d) { Message(ZL_String::format(\"Server: Got: [%.*s] From: %x\", d.length, d.data, p.host)); }\r\n\tvoid OnClientDisconnect(unsigned int closemsg) { Message(ZL_String::format(\"Client: Disconnected (CloseMsg %d)\", closemsg)); }\r\n\r\n\t//As soon as the client is connected to the server, send a hello mesasge\r\n\tvoid OnClientConnect()\r\n\t{\r\n\t\tconst char* pcHelloMsg = \"Hello From ZL_Network\";\r\n\t\tMessage(ZL_String(\"Client: Connected - Sending Data [\") << pcHelloMsg << \"]\");\r\n\t\tclient.Send((void*)pcHelloMsg, strlen(pcHelloMsg));\r\n\t}\r\n\r\n\t//Cleanup the networking objects when leaving the scene (although not relevant to this sample)\r\n\tvoid DeInitAfterTransition()\r\n\t{\r\n\t\tserver = ZL_Server();\r\n\t\tclient = ZL_Client();\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\r\n\t\t//Draw the message buffer\r\n\t\tZL_Display::DrawRect(40.0f, ZLFROMH(75.0f + 13*20), ZLFROMW(40.0f), ZLFROMH(60.0f), ZL_Color::White, ZLALPHA(.5));\r\n\t\tfor (vector<ZL_String>::iterator it = msgs.begin(); it != msgs.end(); ++it)\r\n\t\t\tfnt.Draw(50.0f, ZLFROMH(87.0f + (it - msgs.begin())*20), *it);\r\n\r\n\t\tif (Button(ZL_Rectf::BySize(80.0f, 40.0f, 300.0f, 60.0f), \"Start Server\"))\r\n\t\t{\r\n\t\t\tif (server.IsOpened())\r\n\t\t\t{\r\n\t\t\t\tserver.Close(2222);\r\n\t\t\t\tMessage(\"Server: Stopped\");\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tserver = ZL_Server(5234, 4);\r\n\t\t\t\tserver.sigConnected().connect(this, &sSceneMain::OnServerConnect);\r\n\t\t\t\tserver.sigReceived().connect(this, &sSceneMain::OnServerReceive);\r\n\t\t\t\tserver.sigDisconnected().connect(this, &sSceneMain::OnServerDisconnect);\r\n\t\t\t\tMessage(\"Server: Started\");\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tif (Button(ZL_Rectf::BySize(474.0f, 40.0f, 300.0f, 60.0f), \"Connect Client\"))\r\n\t\t{\r\n\t\t\tif (client.IsConnected())\r\n\t\t\t{\r\n\t\t\t\tMessage(\"Client: Disconnecting...\");\r\n\t\t\t\tclient.Disconnect(1111);\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tMessage(\"Client: Connecting to server...\");\r\n\t\t\t\tclient = ZL_Client(\"localhost\", 5234);\r\n\t\t\t\tclient.sigConnected().connect(this, &sSceneMain::OnClientConnect);\r\n\t\t\t\tclient.sigDisconnected().connect(this, &sSceneMain::OnClientDisconnect);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt)\r\n\t{\r\n\t\tZL_Display::DrawRect(rec, ZLALPHA(.8), ZLALPHA(ZL_Input::Held(rec) ? .6 : (ZL_Input::Hover(rec) ? .3 : .1)));\r\n\t\tfnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\treturn (ZL_Input::Down(rec) != 0);\r\n\t}\r\n\r\n\t//Simple message buffer\r\n\tvector<ZL_String> msgs;\r\n\tvoid Message(const ZL_String& s) { msgs.push_back(s); if (msgs.size() > 13) msgs.erase(msgs.begin()); }\r\n#endif\r\n} SceneMain;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Networking with Client/Server\", 854, 480);\r\n\t\tZL_Network::Init();\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_MAIN);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.606813371181488, "alphanum_fraction": 0.6678495407104492, "avg_line_length": 34.128204345703125, "blob_id": "37855c89844aa9a86e9e3d9aa49dfdd964f5694f", "content_id": "54c0fa6c776bfab0e85e75381499104cdb293cbe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1409, "license_type": "no_license", "max_line_length": 138, "num_lines": 39, "path": "/12-font-rendering.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\nZL_Font fntTex;\r\nZL_Font fntTTF;\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\t//Clear screen and draw some fonts\r\n\tvoid Draw()\r\n\t{ \r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tfntTex.Draw(20, 450, \"Bitmap Font Text\");\r\n\t\tfntTex.Draw(ZLHALFW, 420, \"Centered Text\", ZL_Origin::BottomCenter);\r\n\t\tfntTex.Draw(ZLFROMW(10), 390, \"Right Aligned Text\", ZL_Origin::BottomRight);\r\n\t\tfntTex.Draw(20, 310, \"Scaled Text\", 4, 2);\r\n\t\tfntTex.Draw(20, 270, \"Colored Text\", ZL_Color::Red);\r\n\t\tfntTex.Draw(20, 250, \"Colored Text\", ZL_Color::Green);\r\n\t\tfntTex.Draw(20, 230, \"Colored Text\", ZL_Color::Blue);\r\n\t\tfntTTF.Draw(20, 140, \"TRUE TYPE FONT TEXT\");\r\n\r\n\t\tZL_Display::DrawRect(18, 18, 202, 102, ZL_Color::White);\r\n\t\tfntTex.CreateBuffer(\"Hello World! Automatically word-wrapped text is supported.\", 180, true).Draw(20, 100, ZL_Origin::TopLeft);\r\n\r\n\t\tZL_Display::DrawRect(ZLFROMW(202), 18, ZLFROMW(18), 102, ZL_Color::White);\r\n\t\tfntTex.CreateBuffer(s(1), \"Word-wrapped text can be aligned horizontally, too.\", 180, true).Draw(ZLFROMW(20), 100, ZL_Origin::TopRight);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Font Rendering\", 854, 480);\r\n\t\tfntTex = ZL_Font(\"Data/fntMain.png\");\r\n\t\tfntTTF = ZL_Font(\"Data/alphabot.ttf.zip\", 54);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.3485267460346222, "alphanum_fraction": 0.5855977535247803, "avg_line_length": 80.39444732666016, "blob_id": "1e49b998f6c139567ed674e44d92f58fc3546ae8", "content_id": "b8a6bbb7a12c1ddb52e1ecf7a65c2b2a8074b2a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14831, "license_type": "no_license", "max_line_length": 167, "num_lines": 180, "path": "/21-ImcSynthesizer-Sound.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "extern TImcSongData imcSongData, imcSFXData, imcDataBOOM;\r\nstatic ZL_SynthImcTrack imcSong(&imcSongData), imcSFX(&imcSFXData);\r\nstatic ZL_Sound sndConverted = ZL_SynthImcTrack::LoadAsSample(&imcDataBOOM);\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tZL_Font fnt;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"ImcSynthesizer Sound\", 854, 480);\r\n\t\tZL_Audio::Init();\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tif (Button(ZL_Rectf::BySize( 80.0f, 270.0f, 300.0f, 160.0f), \"Play IMC Music\\n(embedded inside\\nsource code)\\n\\n(sample song 1 by Reaby)\"))\r\n\t\t\t(imcSong.IsPlaying() ? imcSong.Stop() : imcSong.Play());\r\n\t\tif (Button(ZL_Rectf::BySize(474.0f, 270.0f, 300.0f, 160.0f), \"Play regular wave sample\\nconverted from IMC\\n(easier on CPU)\"))\r\n\t\t\tsndConverted.Play();\r\n\t\tif (Button(ZL_Rectf::BySize( 80.0f, 50.0f, 300.0f, 160.0f), \"Play IMC based\\nsound effect\"))\r\n\t\t\timcSFX.NoteOn(0, 72);\r\n\t\tif (Button(ZL_Rectf::BySize(474.0f, 50.0f, 300.0f, 160.0f), \"Play IMC based\\nsound effect in\\na random note\"))\r\n\t\t\timcSFX.NoteOn(0, ZL_Rand::Int(48,71));\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt)\r\n\t{\r\n\t\tZL_Display::DrawRect(rec, ZLALPHA(.8), ZLALPHA(ZL_Input::Held(rec) ? .6 : (ZL_Input::Hover(rec) ? .3 : .1)));\r\n\t\tfnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\treturn (ZL_Input::Down(rec) != 0);\r\n\t}\r\n} Main;\r\n\r\n// -------------------------------------------------- Sound data for Song --------------------------------------------------\r\nstatic const unsigned int IMCSONG_OrderTable[] = {\r\n\t0x011111103, 0x022211104, 0x011111103, 0x022211104, 0x011111253, 0x022211264, 0x011111253, 0x022211264,\r\n\t0x011111213, 0x022211224, 0x011111233, 0x022211244, 0x011111213, 0x022211224, 0x011111233, 0x022211244,\r\n\t0x011111253, 0x022211264, 0x011111253, 0x022211264, 0x033311275, 0x044411286, 0x011111291, 0x0222112A2,\r\n\t0x033311275, 0x044411286, 0x011111291, 0x0222112A2, 0x011111103, 0x022211104, 0x011111103, 0x022211104,\r\n\t0x011111103, 0x022211104, 0x011111103, 0x022211104,\r\n};\r\nstatic const unsigned char IMCSONG_PatternData[] = {\r\n\t0x30, 0, 0x30, 0x30, 0x30, 0, 0x30, 0x30, 0x28, 0, 0x28, 0x28, 0x28, 0, 0x28, 0x28,\r\n\t0x33, 0, 0x33, 0x33, 0x33, 0, 0x33, 0x33, 0x2A, 0, 0x2A, 0x2A, 0x2A, 0, 0x2A, 0x2A,\r\n\t0x30, 0, 0x40, 0, 0x30, 0, 0x40, 0, 0x28, 0, 0x38, 0, 0x28, 0, 0x38, 0,\r\n\t0x33, 0, 0x43, 0, 0x33, 0, 0x43, 0, 0x2A, 0, 0x3A, 0, 0x2A, 0, 0x3A, 0,\r\n\t0x33, 0, 0x43, 0, 0x33, 0, 0x43, 0, 0x2A, 0, 0x3A, 0, 0x2A, 0, 0x3A, 0,\r\n\t0x38, 0, 0x48, 0, 0x38, 0, 0x48, 0, 0x30, 0, 0x40, 0, 0x3A, 0, 0x4A, 0,\r\n\t0x60, 0, 0, 0, 0x57, 0, 0, 0x58, 0, 0, 0x53, 0, 0x50, 0x52, 0x53, 0,\r\n\t0, 0, 0, 0, 0x53, 0x55, 0x57, 0, 0x57, 0, 0, 0x55, 0, 0, 0x50, 0x52,\r\n\t0x53, 0, 0, 0, 0, 0, 0x52, 0, 0x53, 0, 0x55, 0x57, 0, 0, 0x58, 0,\r\n\t0x5A, 0, 0, 0x53, 0, 0, 0x4A, 0, 0x57, 0, 0, 0, 0x55, 0, 0, 0,\r\n\t0x60, 255, 0x57, 255, 0x50, 255, 0x47, 255, 0x63, 255, 0x58, 255, 0x50, 255, 0x48, 255,\r\n\t0x63, 255, 0x5A, 255, 0x57, 255, 0x53, 255, 0x4A, 255, 0x52, 255, 0x55, 255, 0x5A, 255,\r\n\t0x63, 0, 0, 0, 0x5A, 0, 0, 0, 0x57, 0, 0x55, 0x55, 0, 0, 0x50, 0x52,\r\n\t0x53, 0, 0x55, 0, 0x57, 0, 0x58, 0, 0x57, 0, 0x55, 0x55, 0, 0, 0x50, 0x52,\r\n\t0x53, 0, 0, 0, 0, 0, 0x52, 0, 0x53, 0, 0x55, 0x57, 0, 0, 0x57, 0x58,\r\n\t0x5A, 0, 0x60, 0, 0x62, 0, 0x63, 0, 0x5A, 0, 0, 0, 0, 0, 0, 0,\r\n\t0x50, 0, 0, 0, 0x50, 0, 0, 0, 0x50, 0, 0, 0, 0x50, 0, 0x50, 0,\r\n\t0x50, 0, 0, 0x50, 0x50, 0, 0, 0x50, 0x50, 0, 0, 0x50, 0x50, 0, 0, 0x50,\r\n\t0, 0, 0, 0, 0x40, 0, 0, 0, 0, 0, 0, 0, 0x40, 0, 0, 0,\r\n\t0, 0, 0x50, 0x50, 0, 0, 0x50, 0x50, 0, 0, 0x50, 0x50, 0, 0, 0x50, 0x50,\r\n\t0x50, 255, 0x50, 255, 0, 0x50, 255, 0, 0x48, 255, 0, 0x53, 255, 0, 0x48, 255,\r\n\t0x43, 255, 0x43, 255, 0, 0x43, 255, 0, 0x4A, 255, 0, 0x4A, 255, 0, 0x4A, 255,\r\n\t0x43, 255, 0x43, 255, 0, 0x43, 255, 0, 0x3A, 255, 0, 0x3A, 255, 0, 0x3A, 255,\r\n\t0x48, 255, 0x48, 255, 0, 0x48, 255, 0, 0x40, 255, 0, 0x40, 255, 0, 0x4A, 255,\r\n\t0x53, 255, 0x53, 255, 0, 0x53, 255, 0, 0x53, 255, 0, 0x58, 255, 0, 0x53, 255,\r\n\t0x57, 255, 0x57, 255, 0, 0x57, 255, 0, 0x52, 255, 0, 0x52, 255, 0, 0x52, 255,\r\n\t0x57, 255, 0x57, 255, 0, 0x57, 255, 0, 0x55, 255, 0, 0x55, 255, 0, 0x55, 255,\r\n\t0x58, 255, 0x58, 255, 0, 0x58, 255, 0, 0x57, 255, 0, 0x55, 255, 0, 0x55, 255,\r\n\t0x40, 0x47, 0x50, 0x60, 0x50, 0x40, 0x47, 0x50, 0x40, 0x48, 0x53, 0x63, 0x53, 0x48, 0x40, 0x48,\r\n\t0x43, 0x4A, 0x53, 0x63, 0x57, 0x53, 0x43, 0x47, 0x42, 0x4A, 0x52, 0x62, 0x5A, 0x55, 0x52, 0x4A,\r\n\t0x43, 0x53, 0x5A, 0x63, 0x43, 0x53, 0x5A, 0x63, 0x42, 0x52, 0x55, 0x62, 0x42, 0x52, 0x55, 0x62,\r\n\t0x40, 0x50, 0x58, 0x60, 0x40, 0x50, 0x58, 0x60, 0x40, 0x50, 0x57, 0x60, 0x42, 0x52, 0x5A, 0x62,\r\n};\r\nstatic const unsigned char IMCSONG_PatternLookupTable[] = { 0, 6, 16, 18, 19, 20, 24, 28, };\r\nstatic const TImcSongEnvelope IMCSONG_EnvList[] = {\r\n\t{ 0, 256, 434, 8, 16, 0, true, 255, }, { 0, 256, 130, 8, 16, 255, true, 255, }, { 0, 256, 64, 6, 18, 255, true, 255, },\r\n\t{ 0, 256, 152, 8, 16, 255, true, 255, }, { 0, 256, 9, 8, 255, 255, false, 3, }, { 0, 256, 523, 1, 23, 255, true, 255, },\r\n\t{ 128, 256, 174, 8, 16, 16, true, 255, }, { 0, 256, 871, 8, 16, 16, true, 255, }, { 0, 256, 523, 8, 16, 255, true, 255, },\r\n\t{ 0, 256, 64, 8, 16, 255, true, 255, }, { 0, 256, 228, 8, 16, 255, true, 255, }, { 0, 256, 136, 8, 16, 255, true, 255, },\r\n\t{ 128, 512, 2179, 0, 255, 255, true, 255, }, { 0, 256, 871, 24, 16, 255, true, 255, }, { 0, 256, 379, 8, 16, 255, true, 255, },\r\n\t{ 32, 256, 196, 8, 16, 255, true, 255, }, { 0, 256, 1089, 8, 255, 255, true, 255, }, { 0, 256, 182, 8, 16, 13, true, 255, },\r\n\t{ 0, 256, 1089, 8, 16, 16, true, 255, }, { 0, 256, 726, 8, 255, 255, true, 255, }, { 0, 256, 544, 8, 255, 255, true, 255, },\r\n\t{ 0, 256, 1089, 0, 255, 255, true, 255, }, { 0, 256, 209, 8, 16, 255, true, 255, },\r\n};\r\nstatic TImcSongEnvelopeCounter IMCSONG_EnvCounterList[] = {\r\n\t{ 0, 0, 256 }, { -1, -1, 256 }, { 1, 0, 256 }, { 2, 1, 248 }, { 3, 1, 256 }, { 4, 1, 256 }, { 5, 2, 158 }, { 6, 2, 256 },\r\n\t{ 7, 2, 256 }, { 8, 2, 256 }, { 9, 3, 256 }, { 10, 3, 256 }, { 11, 3, 256 }, { 12, 3, 320 }, { 13, 3, 0 }, { 14, 4, 256 },\r\n\t{ 15, 4, 256 }, { 16, 5, 256 }, { 17, 5, 256 }, { 18, 5, 256 }, { 19, 5, 256 }, { 20, 5, 256 }, { 21, 6, 128 }, { 17, 6, 256 },\r\n\t{ 18, 6, 256 }, { 18, 6, 256 }, { 19, 6, 256 }, { 20, 6, 256 }, { 22, 7, 256 }, { 17, 7, 256 }, { 18, 7, 256 }, { 18, 7, 256 },\r\n\t{ 19, 7, 256 }, { 20, 7, 256 },\r\n};\r\nstatic const TImcSongOscillator IMCSONG_OscillatorList[] = {\r\n\t{ 8, 2, IMCSONGOSCTYPE_SAW, 0, -1, 122, 1, 1 }, { 7, 0, IMCSONGOSCTYPE_SAW, 0, -1, 255, 1, 1 }, { 6, 0, IMCSONGOSCTYPE_SINE, 0, -1, 68, 1, 1 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_SQUARE, 0, 0, 26, 1, 1 }, { 8, 0, IMCSONGOSCTYPE_SINE, 1, -1, 100, 1, 1 }, { 8, 0, IMCSONGOSCTYPE_SINE, 1, -1, 66, 1, 1 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_SINE, 1, -1, 24, 1, 1 }, { 8, 0, IMCSONGOSCTYPE_SINE, 1, -1, 88, 4, 1 }, { 8, 0, IMCSONGOSCTYPE_SINE, 1, 5, 36, 1, 1 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_NOISE, 1, 7, 48, 1, 1 }, { 5, 15, IMCSONGOSCTYPE_SINE, 2, -1, 72, 1, 7 }, { 8, 0, IMCSONGOSCTYPE_NOISE, 2, -1, 204, 8, 1 },\r\n\t{ 5, 227, IMCSONGOSCTYPE_SINE, 2, -1, 126, 9, 1 }, { 7, 93, IMCSONGOSCTYPE_SINE, 3, -1, 255, 11, 1 }, { 9, 162, IMCSONGOSCTYPE_SAW, 3, -1, 180, 12, 1 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_NOISE, 3, -1, 108, 14, 1 }, { 8, 0, IMCSONGOSCTYPE_SAW, 3, 14, 196, 1, 13 }, { 8, 0, IMCSONGOSCTYPE_NOISE, 4, -1, 127, 1, 16 },\r\n\t{ 9, 0, IMCSONGOSCTYPE_SQUARE, 5, -1, 255, 18, 1 }, { 8, 127, IMCSONGOSCTYPE_SQUARE, 5, -1, 255, 1, 1 }, { 8, 0, IMCSONGOSCTYPE_SQUARE, 5, -1, 255, 1, 1 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_SQUARE, 5, -1, 242, 19, 20 }, { 8, 0, IMCSONGOSCTYPE_SQUARE, 5, 21, 255, 21, 1 }, { 9, 2, IMCSONGOSCTYPE_SQUARE, 6, -1, 255, 23, 1 },\r\n\t{ 8, 2, IMCSONGOSCTYPE_SQUARE, 6, -1, 255, 1, 1 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 6, -1, 255, 1, 1 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 6, -1, 228, 24, 1 },\r\n\t{ 8, 2, IMCSONGOSCTYPE_SQUARE, 6, -1, 142, 25, 26 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 6, 26, 255, 1, 1 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 6, 27, 255, 27, 1 },\r\n\t{ 9, 2, IMCSONGOSCTYPE_SQUARE, 7, -1, 255, 29, 1 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 7, -1, 255, 1, 1 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 7, -1, 255, 1, 1 },\r\n\t{ 8, 2, IMCSONGOSCTYPE_SQUARE, 7, -1, 228, 30, 1 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 7, -1, 142, 31, 32 }, { 8, 2, IMCSONGOSCTYPE_SQUARE, 7, 33, 255, 1, 1 },\r\n\t{ 8, 2, IMCSONGOSCTYPE_SQUARE, 7, 34, 255, 33, 1 },\r\n};\r\nstatic const TImcSongEffect IMCSONG_EffectList[] = {\r\n\t{ 2921, 1671, 1, 0, IMCSONGEFFECTTYPE_OVERDRIVE, 0, 1 }, { 236, 156, 1, 0, IMCSONGEFFECTTYPE_RESONANCE, 2, 1 }, { 218, 247, 1, 1, IMCSONGEFFECTTYPE_RESONANCE, 1, 5 },\r\n\t{ 124, 0, 1, 1, IMCSONGEFFECTTYPE_LOWPASS, 1, 0 }, { 71, 0, 16536, 1, IMCSONGEFFECTTYPE_DELAY, 0, 0 }, { 2286, 2666, 1, 1, IMCSONGEFFECTTYPE_OVERDRIVE, 0, 1 },\r\n\t{ 2286, 3669, 1, 2, IMCSONGEFFECTTYPE_OVERDRIVE, 0, 1 }, { 76, 0, 1, 2, IMCSONGEFFECTTYPE_LOWPASS, 1, 0 }, { 241, 175, 1, 3, IMCSONGEFFECTTYPE_RESONANCE, 1, 1 },\r\n\t{ 159, 0, 1, 3, IMCSONGEFFECTTYPE_LOWPASS, 1, 0 }, { 255, 110, 1, 4, IMCSONGEFFECTTYPE_RESONANCE, 1, 1 }, { 227, 0, 1, 4, IMCSONGEFFECTTYPE_HIGHPASS, 1, 0 },\r\n\t{ 87, 0, 1, 5, IMCSONGEFFECTTYPE_LOWPASS, 1, 0 }, { 243, 187, 1, 5, IMCSONGEFFECTTYPE_RESONANCE, 1, 1 }, { 5842, 656, 1, 5, IMCSONGEFFECTTYPE_OVERDRIVE, 0, 1 },\r\n\t{ 87, 0, 1, 6, IMCSONGEFFECTTYPE_LOWPASS, 1, 0 }, { 243, 187, 1, 6, IMCSONGEFFECTTYPE_RESONANCE, 1, 1 }, { 5842, 656, 1, 6, IMCSONGEFFECTTYPE_OVERDRIVE, 0, 1 },\r\n\t{ 87, 0, 1, 7, IMCSONGEFFECTTYPE_LOWPASS, 1, 0 }, { 243, 187, 1, 7, IMCSONGEFFECTTYPE_RESONANCE, 1, 1 }, { 5842, 656, 1, 7, IMCSONGEFFECTTYPE_OVERDRIVE, 0, 1 },\r\n\t{ 128, 0, 16536, 7, IMCSONGEFFECTTYPE_DELAY, 0, 0 },\r\n};\r\nstatic unsigned char IMCSONG_ChannelVol[8] = { 171, 255, 255, 255, 255, 99, 102, 48 };\r\nstatic const unsigned char IMCSONG_ChannelEnvCounter[8] = { 0, 3, 6, 10, 15, 17, 22, 28 };\r\nstatic const bool IMCSONG_ChannelStopNote[8] = { false, true, true, true, true, true, true, true };\r\nTImcSongData imcSongData = {\r\n\t/*LEN*/ 0x24, /*ROWLENSAMPLES*/ 5512, /*ENVLISTSIZE*/ 23, /*ENVCOUNTERLISTSIZE*/ 34, /*OSCLISTSIZE*/ 37, /*EFFECTLISTSIZE*/ 22, /*VOL*/ 28,\r\n\tIMCSONG_OrderTable, IMCSONG_PatternData, IMCSONG_PatternLookupTable, IMCSONG_EnvList, IMCSONG_EnvCounterList, IMCSONG_OscillatorList, IMCSONG_EffectList,\r\n\tIMCSONG_ChannelVol, IMCSONG_ChannelEnvCounter, IMCSONG_ChannelStopNote };\r\n\r\n// -------------------------------------------------- Sound data for SFX --------------------------------------------------\r\nstatic const TImcSongEnvelope SFX_EnvList[] = {\r\n\t{ 0, 256, 64, 8, 16, 255, true, 255, }, { 0, 256, 64, 8, 16, 255, true, 255, }, { 200, 256, 64, 8, 16, 255, true, 255, },\r\n\t{ 0, 256, 87, 8, 16, 255, true, 255, }, { 0, 256, 348, 8, 16, 255, true, 255, },\r\n};\r\nstatic TImcSongEnvelopeCounter SFX_EnvCounterList[] = { { 0, 0, 256 }, { -1, -1, 256 }, { 1, 0, 256 }, { 2, 0, 256 }, { 3, 0, 256 }, { 4, 0, 256 }, };\r\nstatic const TImcSongOscillator SFX_OscillatorList[] = {\r\n\t{ 8, 0, IMCSONGOSCTYPE_SINE, 0, -1, 158, 1, 1 }, { 5, 15, IMCSONGOSCTYPE_SINE, 0, -1, 218, 2, 3 },\r\n\t{ 7, 0, IMCSONGOSCTYPE_SINE, 0, -1, 120, 4, 5 }, { 10, 0, IMCSONGOSCTYPE_SINE, 0, 0, 136, 1, 1 },\r\n};\r\nstatic const TImcSongEffect SFX_EffectList[] = { { 64, 0, 11024, 0, IMCSONGEFFECTTYPE_DELAY, 0, 0 }, };\r\nstatic unsigned char SFX_ChannelVol[8] = { 255, 176, 100, 100, 100, 100, 100, 100 };\r\nstatic const unsigned char SFX_ChannelEnvCounter[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };\r\nstatic const bool SFX_ChannelStopNote[8] = { true, false, false, false, false, false, false, false };\r\nTImcSongData imcSFXData = { /*LEN*/ 0, /*ROWLENSAMPLES*/ 0, /*ENVLISTSIZE*/ 5, /*ENVCOUNTERLISTSIZE*/ 6, /*OSCLISTSIZE*/ 4, /*EFFECTLISTSIZE*/ 1, /*VOL*/ 100,\r\n\tNULL, NULL, NULL, SFX_EnvList, SFX_EnvCounterList, SFX_OscillatorList, SFX_EffectList, SFX_ChannelVol, SFX_ChannelEnvCounter, SFX_ChannelStopNote };\r\n\r\n// -------------------------------------------------- Sound data for BOOM --------------------------------------------------\r\nstatic const unsigned int BOOM_OrderTable[] = { 0x000000011, };\r\nstatic const unsigned char BOOM_PatternData[] = { 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };\r\nstatic const unsigned char BOOM_PatternLookupTable[] = { 0, 1, 2, 2, 2, 2, 2, 2, };\r\nstatic const TImcSongEnvelope BOOM_EnvList[] = {\r\n\t{ 0, 386, 65, 8, 16, 255, true, 255, }, { 0, 256, 174, 8, 16, 255, true, 255, }, { 128, 256, 173, 8, 16, 255, true, 255, },\r\n\t{ 0, 128, 2615, 8, 16, 255, true, 255, }, { 0, 256, 348, 5, 19, 255, true, 255, }, { 0, 256, 418, 8, 16, 255, true, 255, },\r\n\t{ 0, 256, 87, 8, 16, 255, true, 255, }, { 0, 256, 228, 8, 16, 255, true, 255, }, { 0, 256, 1046, 24, 15, 255, true, 255, },\r\n\t{ 256, 512, 1046, 8, 16, 255, true, 255, }, { 0, 256, 523, 8, 16, 255, true, 255, }, { 0, 512, 11073, 0, 255, 255, true, 255, },\r\n};\r\nstatic TImcSongEnvelopeCounter BOOM_EnvCounterList[] = {\r\n\t{ 0, 0, 386 }, { 1, 0, 256 }, { 2, 0, 256 }, { 3, 0, 128 }, { -1, -1, 258 }, { 4, 0, 238 }, { -1, -1, 256 }, { 5, 0, 256 },\r\n\t{ 6, 1, 256 }, { 7, 1, 256 }, { 8, 1, 0 }, { 9, 1, 512 }, { 10, 1, 256 }, { 11, 1, 256 },\r\n};\r\nstatic const TImcSongOscillator BOOM_OscillatorList[] = {\r\n\t{ 5, 150, IMCSONGOSCTYPE_SINE, 0, -1, 255, 1, 2 }, { 9, 15, IMCSONGOSCTYPE_NOISE, 0, -1, 255, 3, 4 }, { 5, 200, IMCSONGOSCTYPE_SINE, 0, -1, 170, 5, 6 },\r\n\t{ 5, 174, IMCSONGOSCTYPE_SINE, 0, -1, 230, 7, 6 }, { 6, 238, IMCSONGOSCTYPE_SINE, 1, -1, 255, 9, 6 }, { 8, 0, IMCSONGOSCTYPE_NOISE, 1, -1, 142, 10, 11 },\r\n\t{ 8, 213, IMCSONGOSCTYPE_SAW, 1, -1, 38, 12, 6 }, { 8, 0, IMCSONGOSCTYPE_SAW, 1, 6, 90, 6, 13 }, { 8, 0, IMCSONGOSCTYPE_SINE, 2, -1, 100, 0, 0 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_SINE, 3, -1, 100, 0, 0 }, { 8, 0, IMCSONGOSCTYPE_SINE, 4, -1, 100, 0, 0 }, { 8, 0, IMCSONGOSCTYPE_SINE, 5, -1, 100, 0, 0 },\r\n\t{ 8, 0, IMCSONGOSCTYPE_SINE, 6, -1, 100, 0, 0 }, { 8, 0, IMCSONGOSCTYPE_SINE, 7, -1, 100, 0, 0 },\r\n};\r\nstatic const TImcSongEffect BOOM_EffectList[] = {\r\n\t{ 113, 0, 1, 0, IMCSONGEFFECTTYPE_LOWPASS, 6, 0 }, { 220, 168, 1, 0, IMCSONGEFFECTTYPE_RESONANCE, 6, 6 },\r\n\t{ 241, 175, 1, 1, IMCSONGEFFECTTYPE_RESONANCE, 6, 6 }, { 159, 0, 1, 1, IMCSONGEFFECTTYPE_LOWPASS, 6, 0 },\r\n};\r\nstatic unsigned char BOOM_ChannelVol[8] = { 230, 128, 100, 100, 100, 100, 100, 100 };\r\nstatic const unsigned char BOOM_ChannelEnvCounter[8] = { 0, 8, 0, 0, 0, 0, 0, 0 };\r\nstatic const bool BOOM_ChannelStopNote[8] = { true, true, false, false, false, false, false, false };\r\nTImcSongData imcDataBOOM = {\r\n\t/*LEN*/ 0x1, /*ROWLENSAMPLES*/ 5512, /*ENVLISTSIZE*/ 12, /*ENVCOUNTERLISTSIZE*/ 14, /*OSCLISTSIZE*/ 14, /*EFFECTLISTSIZE*/ 4, /*VOL*/ 100,\r\n\tBOOM_OrderTable, BOOM_PatternData, BOOM_PatternLookupTable, BOOM_EnvList, BOOM_EnvCounterList, BOOM_OscillatorList, BOOM_EffectList,\r\n\tBOOM_ChannelVol, BOOM_ChannelEnvCounter, BOOM_ChannelStopNote };\r\n" }, { "alpha_fraction": 0.6687306761741638, "alphanum_fraction": 0.6873065233230591, "avg_line_length": 29.05769157409668, "blob_id": "4f4cd9fd1b73f24d3f7cc8643d989c7fa40b1860", "content_id": "b47d5ebd71626d91ffbf37d4b09ba9d66ffaa376", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 113, "num_lines": 52, "path": "/03-scene-manager-with-two-scenes.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_TITLE 1\r\n#define SCENE_GAME 2\r\nZL_Font fnt;\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\tint InitTransitionEnter(ZL_SceneType SceneTypeFrom, void* data) { return 500; }\r\n\tint DeInitTransitionLeave(ZL_SceneType SceneTypeTo) { return 500; }\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Blue);\r\n\t\tfnt.Draw(ZLHALFW, ZLHALFH, \"THIS IS THE GAME SCENE\", ZL_Origin::Center);\r\n\t}\r\n\tvoid DrawTransition(scalar f, bool IsLeaveTransition)\r\n\t{\r\n\t\tDraw();\r\n\t\tZL_Display::FillRect(0, 0, ZLWIDTH, ZLHEIGHT, ZLRGBA(0, 0, 0, f));\r\n\t}\r\n} sSceneGame;\r\n\r\nstruct sSceneTitle : public ZL_Scene\r\n{\r\n\tsSceneTitle() : ZL_Scene(SCENE_TITLE) { }\r\n\tint InitTransitionEnter(ZL_SceneType SceneTypeFrom, void* data) { return 500; }\r\n\tint DeInitTransitionLeave(ZL_SceneType SceneTypeTo) { return 500; }\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Cyan);\r\n\t\tfnt.Draw(ZLHALFW, ZLHALFH, \"TITLE SCENE\", ZL_Color::Black, ZL_Origin::Center);\r\n\t}\r\n\tvoid DrawTransition(scalar f, bool IsLeaveTransition)\r\n\t{\r\n\t\tDraw();\r\n\t\tZL_Display::FillRect(0, 0, ZLWIDTH, ZLHEIGHT, ZLRGBA(0, 0, 0, f));\r\n\t}\r\n} sSceneTitle;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Scene Manager With Two Scenes\", 854, 480);\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_TITLE);\r\n\t\tZL_Display::sigPointerDown.connect(this, &sMain::OnPointerDown);\r\n\t}\r\n\tvoid OnPointerDown(ZL_PointerPressEvent& e)\r\n\t{\r\n\t\tZL_SceneManager::GoToScene(ZL_SceneManager::GetCurrent()->SceneType == SCENE_TITLE ? SCENE_GAME : SCENE_TITLE);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6302902102470398, "alphanum_fraction": 0.6704957485198975, "avg_line_length": 42.702701568603516, "blob_id": "84073fffed800ce34736551151cddc56406ca890", "content_id": "51f0d67a6de51bf2c25b18e7716eb4d8cd6388b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3308, "license_type": "no_license", "max_line_length": 150, "num_lines": 74, "path": "/32-3d-materials.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "static ZL_Camera Camera;\r\nstatic ZL_Light Light;\r\nstatic ZL_Mesh mshWall, mshGround;\r\nstatic ZL_RenderList RenderList;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tsMain() : ZL_Application() {}\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\t//Initialize the game with depth buffer, 3d rendering and shadow mapping\r\n\t\tZL_Display::Init(\"Advanced 3D Materials\", 1280, 720, ZL_DISPLAY_DEPTHBUFFER);\r\n\t\tZL_Display3D::Init();\r\n\t\tZL_Display3D::InitShadowMapping();\r\n\t\tZL_Input::Init();\r\n\r\n\t\tusing namespace ZL_MaterialModes;\r\n\r\n\t\t//Create a material for the wall with parallax and normal mapping\r\n\t\tZL_Material matWall = ZL_Material(MM_DIFFUSEMAP | MM_SPECULARSTATIC | MM_NORMALMAP | MM_PARALLAXMAP | MO_PRECISIONTANGENT)\r\n\t\t\t.SetDiffuseTexture(ZL_Surface(\"Data/BRICKS.png\").SetTextureRepeatMode())\r\n\t\t\t.SetNormalTexture(ZL_Surface(\"Data/BRICKS_N.png\").SetTextureRepeatMode())\r\n\t\t\t.SetParallaxTexture(ZL_Surface(\"Data/BRICKS_D.png\").SetTextureRepeatMode())\r\n\t\t\t.SetUniformFloat(Z3U_SPECULAR, 1.f)\r\n\t\t\t.SetUniformFloat(Z3U_PARALLAXSCALE, .1f);\r\n\r\n\t\t//Create a material with a custom diffuse color function that renders grass with a fake depth\r\n\t\tZL_Material matGround(MM_DIFFUSEFUNC | MR_TEXCOORD | MR_NORMAL | MR_CAMERATANGENT | MR_TIME | MO_PRECISIONTANGENT, \r\n\t\t\tZL_GLSL_IMPORTSNOISE()\r\n\r\n\t\t\t\"vec4 CalcDiffuse()\"\r\n\t\t\t\"{\"\r\n\t\t\t\t\"vec2 pbase = \" Z3V_TEXCOORD \" * 150., view_offset = (\" Z3S_CAMERATANGENT \".xy / \" Z3S_CAMERATANGENT \".z) * -4.;\"\r\n\r\n\t\t\t\t\"float n = 0.;\"\r\n\t\t\t\t\"for (float z = 0.; z < .999; z+=.1)\"\r\n\t\t\t\t\"{\"\r\n\t\t\t\t\t\"vec2 p = pbase + view_offset * z;\"\r\n\t\t\t\t\t\"float grass = max(snoise(p) - z, 0.) + max(snoise(p+1277.) - z, 0.) + max(snoise(p+5737.) - z, 0.);\"\r\n\t\t\t\t\t\"n = n * .7 + (grass / (1. - z));\"\r\n\t\t\t\t\"}\"\r\n\t\t\t\t\"n = min(n * .5, 1.);\"\r\n\t\t\t\t\"return vec4(vec3(n * (0.25 + snoise(\" Z3V_TEXCOORD \" * 100.) * .5), .2 + n * .75 ,0.), 1.);\"\r\n\t\t\t\"}\"\r\n\t\t);\r\n\r\n\t\t//Create a box mesh for the wall and a plane for the ground with the materials set up above\r\n\t\tmshWall = ZL_Mesh::BuildBox(ZLV3(2, .5, 3), matWall, ZLV3(0,0,3), ZLV(10, 10));\r\n\t\tmshGround = ZL_Mesh::BuildPlane(ZLV(10, 10), matGround, ZL_Vector3::Up, ZL_Vector3::Zero, ZLV(2, 2));\r\n\r\n\t\t//set up the light position, direction and cover area size\r\n\t\tLight.SetLookAt(ZL_Vector3(0, 15.0f, 10.f), ZL_Vector3::Zero).SetDirectionalLight(10);\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tif (ZL_Input::Down(ZLK_ESCAPE)) ZL_Application::Quit();\r\n\r\n\t\t//Update the camera position every frame referencing the mouse coordinates and use the mouse wheel to zoom\r\n\t\tstatic float CameraDistance = 5.f;\r\n\t\tif (ZL_Input::MouseWheel()) CameraDistance = ZL_Math::Clamp(CameraDistance * (ZL_Input::MouseWheel() > 0 ? .8f : 1.25f), 1.6384f, 19.073486328125f);\r\n\t\tfloat HoirzontalAngleRad = (ZL_Display::PointerX-ZLHALFW)/ZLHALFW*PI+PIHALF;\r\n\t\tfloat VerticalAngleRad = -((ZL_Display::PointerY-ZLHALFH)/ZLHALFH-.6f)*PIHALF*0.5f;\r\n\t\tCamera.SetLookAt(ZLV3(0,0,3) + ZL_Vector3::FromRotation(HoirzontalAngleRad, VerticalAngleRad) * CameraDistance, ZLV3(0,0,3));\r\n\r\n\t\t//Setup and draw our dynamic render list with our three meshes\r\n\t\tRenderList.Reset();\r\n\t\tRenderList.Add(mshGround, ZL_Matrix::Identity);\r\n\t\tRenderList.Add(mshWall, ZL_Matrix::MakeRotateZ(ZLTICKS*.0005f));\r\n\t\tZL_Display::ClearFill(ZL_Color::DarkBlue);\r\n\t\tZL_Display3D::DrawListWithLight(RenderList, Camera, Light);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6361185908317566, "alphanum_fraction": 0.6563342213630676, "avg_line_length": 20.484848022460938, "blob_id": "32286159393b5325693bff76702d36d9eecfc6a0", "content_id": "3bfd397189ebc15a55b64a80d2896fc0980c32e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 742, "license_type": "no_license", "max_line_length": 77, "num_lines": 33, "path": "/08-rotating-and-scaling-surfaces.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_Surface srfLogo;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitGlobal()\r\n\t{\r\n\t\tsrfLogo = ZL_Surface(\"Data/ZILLALIB.png\").SetDrawOrigin(ZL_Origin::Center);\r\n\t}\r\n\r\n\t//Clear screen and draw the surface\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tscalar rotation = s(ZLTICKS)/s(1000);\r\n\t\tscalar scale = 1 + (s(0.2)*ssin(rotation*3));\r\n\t\tsrfLogo.SetRotate(rotation);\r\n\t\tsrfLogo.SetScale(scale );\r\n\t\tsrfLogo.Draw(ZLHALFW, ZLHALFH);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Rotating and Scaling Surfaces\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5815562009811401, "alphanum_fraction": 0.6420749425888062, "avg_line_length": 34.914894104003906, "blob_id": "e15b78e94eb5217488ac15db3955050524d65024", "content_id": "c8cee963b4e12bb8350a3e8e78e9d1fb0e5212fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1735, "license_type": "no_license", "max_line_length": 130, "num_lines": 47, "path": "/16-post-process-effect.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_PostProcess postproc;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitEnter(ZL_SceneType SceneTypeFrom, void* data)\r\n\t{\r\n\t\tconst char postproc_fragment_shader_src[] = ZL_SHADER_SOURCE_HEADER(ZL_GLES_PRECISION_HIGH)\r\n\t\t\t\"uniform sampler2D u_texture;\"\r\n\t\t\t\"uniform float randx, randy;\"\r\n\t\t\t\"varying vec2 v_texcoord;\"\r\n\t\t\t\"float rand(vec2 co) { return fract(sin(dot(co.xy, vec2(12.9898,78.233))) * 43758.5453); }\"\r\n\t\t\t\"void main()\"\r\n\t\t\t\"{\"\r\n\t\t\t\t\"if (v_texcoord.y > 0.5) { gl_FragColor = texture2D(u_texture, v_texcoord); return; }\"\r\n\t\t\t\t\"vec2 v_texcoordmirror = vec2(v_texcoord.x, 1.0-v_texcoord.y);\"\r\n\t\t\t\t\"v_texcoordmirror.x += (rand(v_texcoord+vec2(randx)) - 0.5) * 0.02;\"\r\n\t\t\t\t\"v_texcoordmirror.y += (rand(v_texcoord+vec2(randy)) - 0.5) * 0.02;\"\r\n\t\t\t\t\"gl_FragColor = texture2D(u_texture, v_texcoordmirror);\"\r\n\t\t\t\t\"gl_FragColor.rgb *= v_texcoord.y;\"\r\n\t\t\t\t\"gl_FragColor.b += 0.3;\"\r\n\t\t\t\"}\";\r\n\t\tpostproc = ZL_PostProcess(postproc_fragment_shader_src, false, \"randx\", \"randy\");\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tpostproc.Start();\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tZL_Display::FillRect(50.0f, 200.0f, ZLFROMW(50.0f), ZLFROMH(50.0f), ZLRGB(.1,.5,.1));\r\n\t\tZL_Display::FillRect(80.0f, 200.0f, ZLFROMW(80.0f), ZLFROMH(80.0f), ZLRGB(.1,.3,.1) );\r\n\t\tZL_Display::FillCircle(ZLHALFW + ssin(ZLSINCESECONDS(0)) * 400.0f, 350 + scos(ZLSINCESECONDS(0)) * 50.0f, 10, ZL_Color::Yellow);\r\n\t\tpostproc.Apply(ssin(ZLSINCESECONDS(0))*0.1f, scos(ZLSINCESECONDS(0))*0.1f);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Post Process Effect\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5952510833740234, "alphanum_fraction": 0.6230894923210144, "avg_line_length": 26.737255096435547, "blob_id": "785831dc9c71b1badc892885cd437446077db864", "content_id": "77104de10b88bb59728de46b161589c6876d85d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7328, "license_type": "no_license", "max_line_length": 132, "num_lines": 255, "path": "/30-simple-game.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "static ZL_Sound sndHitBall, sndBallOnWood;\r\nstatic ZL_Surface srfBall;\r\nstatic ZL_ParticleEffect particleSmoke, particleFire;\r\n\r\n//A block that can be destroyed, just a rectangle with a draw function\r\nstruct sBlock : public ZL_Rectf\r\n{\r\n\tsBlock(ZL_Vector pos) : ZL_Rectf(pos.x - 45, pos.y - 10, pos.x + 45, pos.y + 10) { }\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::DrawRect(*this, ZL_Color::Red, ZL_Color::Orange);\r\n\t}\r\n};\r\n\r\n//The players panel\r\nstruct sPanel : public ZL_Rectf\r\n{\r\n\tfloat width;\r\n\tsPanel() : ZL_Rectf(ZLHALFW - 50, 100, ZLHALFW + 50, 120), width(100) { }\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::DrawRect(*this, ZL_Color::Yellow, ZL_Color::Green);\r\n\t}\r\n};\r\n\r\n//The ball, the main actor in our game based on a 2d float coordinate\r\nstruct sBall : public ZL_Vector\r\n{\r\n\tbool glued; //If we are glued to the player panel\r\n\tZL_Vector angle; //Movement angle vector\r\n\tfloat speed, radius; //Settings\r\n\r\n\tsBall() : glued(true), speed(300), radius(10) { }\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tsrfBall.Draw(x, y);\r\n\t}\r\n\r\n\t//Handle bouncing off a rectangle (player or block)\r\n\tbool Collide(const ZL_Rectf& r)\r\n\t{\r\n\t\tif (x <= r.left-radius || x >= r.right+radius || y <= r.low-radius || y >= r.high+radius) return false;\r\n\t\twhile (x > r.left-radius && x < r.right+radius && y > r.low-radius && y < r.high+radius) *this += angle * -speed / 10 * ZLELAPSED;\r\n\t\tfloat colx = (x > r.right && angle.x < 0 ? x - r.right : (x < r.left && angle.x > 0 ? x - r.left : 0));\r\n\t\tfloat coly = (y > r.high && angle.y < 0 ? y - r.high : (y < r.low && angle.y > 0 ? y - r.low : 0));\r\n\t\tif (colx) angle.x *= -1;\r\n\t\tif (coly) angle.y *= -1;\r\n\t\tif (colx && coly) { float f = fabs(colx/coly); angle.x *= f; angle.y /= f; angle.Norm(); }\r\n\t\tif (angle.y < s(.2) && angle.y > s(-.2)) { angle.y = (coly < 0 ? s(-.3) : s(.3)); angle.Norm(); }\r\n\t\tspeed = MIN(1000, speed + 10);\r\n\t\treturn true;\r\n\t}\r\n\r\n\t//Handle moving with the player panel when glued to\r\n\tbool CalculateIsGlued(const sPanel& panel)\r\n\t{\r\n\t\tif (y < 0) glued = true;\r\n\t\tif (glued) { x = (panel.left + panel.right)/2; y = (panel.high+radius); }\r\n\t\treturn glued;\r\n\t}\r\n\r\n\t//Update position\r\n\tvoid CalculateMovement()\r\n\t{\r\n\t\t*this += angle * speed * ZLELAPSED;\r\n\t}\r\n\r\n\t//Handle bouncing off of walls\r\n\tbool CalculateHitWall()\r\n\t{\r\n\t\tbool hit = false;\r\n\t\tif (x > ZLFROMW(radius)) { x = ZLFROMW(radius); angle.x *= -1; speed = MAX(100, speed - 10); hit = true; }\r\n\t\telse if (x < radius) { x = radius; angle.x *= -1; speed = MAX(100, speed - 10); hit = true; }\r\n\t\tif (y > ZLFROMH(radius)) { y = ZLFROMH(radius); angle.y *= -1; speed = MAX(100, speed - 10); hit = true; }\r\n\t\treturn hit;\r\n\t}\r\n};\r\n\r\n//The world handles the main game state\r\nstatic struct sWorld\r\n{\r\n\tlist<sBlock> blocks;\r\n\tsPanel panel;\r\n\tsBall ball;\r\n\r\n\t//Reset all game parts, fill playfield with blocks\r\n\tvoid Init()\r\n\t{\r\n\t\tpanel = sPanel();\r\n\t\tball = sBall();\r\n\t\tblocks.clear();\r\n\t\tfor (float x = smod(ZLWIDTH, 100.0f) * 0.5f + 50; x < ZLFROMW(50); x += 100)\r\n\t\t\tfor (float y = 250; y < ZLFROMH(70); y += 30)\r\n\t\t\t\tblocks.push_back(sBlock(ZL_Vector(x, y)));\r\n\t}\r\n\r\n\t//Calculate the game state updates every frame\r\n\tvoid Calculate()\r\n\t{\r\n\t\tif (ball.CalculateIsGlued(panel) || blocks.empty()) return;\r\n\t\tfor (list<sBlock>::iterator itb = blocks.begin(); itb != blocks.end();)\r\n\t\t{\r\n\t\t\tif (ball.Collide(*itb))\r\n\t\t\t{\r\n\t\t\t\tsndBallOnWood.Play();\r\n\t\t\t\tparticleSmoke.Spawn(40, ball);\r\n\t\t\t\tparticleFire.Spawn(50, itb->Center(), 0, itb->Width(), itb->Height());\r\n\t\t\t\titb = blocks.erase(itb);\r\n\t\t\t}\r\n\t\t\telse ++itb;\r\n\t\t}\r\n\t\tball.CalculateMovement();\r\n\t\tif (ball.CalculateHitWall())\r\n\t\t{\r\n\t\t\tsndHitBall.Play();\r\n\t\t\tparticleSmoke.Spawn(40, ball);\r\n\t\t}\r\n\t\telse if (ball.Collide(panel))\r\n\t\t{\r\n\t\t\tball.angle.x += s(-1) + 2 * (ball.x - panel.left) / (panel.right - panel.left);\r\n\t\t\tball.angle.Norm();\r\n\t\t\tif (!ball.angle.y) ball.angle.y = s(.1);\r\n\t\t\tsndHitBall.Play();\r\n\t\t\tparticleSmoke.Spawn(40, ball);\r\n\t\t}\r\n\t}\r\n\r\n\t//Return if the player has won the game when all blocks are gone\r\n\tbool HasWon()\r\n\t{\r\n\t\treturn blocks.empty() && !particleFire.CountParticles();\r\n\t}\r\n\r\n\t//Draw the game state (blocks, player panel, ball, particles)\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::SetThickness(3);\r\n\t\tfor (list<sBlock>::iterator itb = blocks.begin(); itb != blocks.end(); ++itb)\r\n\t\t\titb->Draw();\r\n\t\tpanel.Draw();\r\n\t\tball.Draw();\r\n\t\tparticleSmoke.Draw();\r\n\t\tparticleFire.Draw();\r\n\t\tZL_Display::SetThickness(1);\r\n\t}\r\n\r\n\t//Handle input movement\r\n\tvoid MovePanel(float x)\r\n\t{\r\n\t\tpanel.left = x - panel.width/2;\r\n\t\tpanel.right = panel.left + panel.width;\r\n\t}\r\n\r\n\t//Handle firing the ball when glued to the player panel\r\n\tvoid Fire(const ZL_Vector& atpos)\r\n\t{\r\n\t\tif (ball.glued)\r\n\t\t{\r\n\t\t\tball.angle = (atpos - ball).Norm();\r\n\t\t\tball.glued = false;\r\n\t\t}\r\n\t}\r\n} World;\r\n\r\nenum { SCENE_GAME = 1 };\r\n\r\nstatic struct sSceneDemoGame : public ZL_Scene\r\n{\r\n\tsSceneDemoGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitEnter(ZL_SceneType SceneTypeFrom, void* data)\r\n\t{\r\n\t\tsndHitBall = ZL_Sound(\"Data/HitBall.ogg\");\r\n\t\tsndBallOnWood = ZL_Sound(\"Data/BallOnWood.ogg\");\r\n\t\tsrfBall = ZL_Surface(\"Data/ball.png\").SetDrawOrigin(ZL_Origin::Center);\r\n\t\tparticleSmoke = ZL_ParticleEffect(300, 150);\r\n\t\tparticleSmoke.AddParticleImage(ZL_Surface(\"Data/SPARK.png\").SetColor(ZLLUM(.5)), 1000);\r\n\t\tparticleSmoke.AddBehavior(new ZL_ParticleBehavior_LinearMove(30, 25));\r\n\t\tparticleSmoke.AddBehavior(new ZL_ParticleBehavior_LinearImageProperties(1, 0, s(1.1), s(.5)));\r\n\t\tparticleFire = ZL_ParticleEffect(500, 200);\r\n\t\tparticleFire.AddParticleImage(ZL_Surface(\"Data/SPARK.png\").SetColor(ZLRGB(1,.8,.1)), 1000);\r\n\t\tparticleFire.AddBehavior(new ZL_ParticleBehavior_LinearMove(30, 10));\r\n\t\tparticleFire.AddBehavior(new ZL_ParticleBehavior_LinearImageProperties(1, 0, 1, 3));\r\n\t}\r\n\r\n\tvoid DeInitAfterTransition()\r\n\t{\r\n\t\tsndHitBall = ZL_Sound();\r\n\t\tsndBallOnWood = ZL_Sound();\r\n\t\tsrfBall = ZL_Surface();\r\n\t\tparticleFire = ZL_ParticleEffect();\r\n\t\tparticleSmoke = ZL_ParticleEffect();\r\n\t}\r\n\r\n\tvoid StartGame()\r\n\t{\r\n\t\tWorld.Init();\r\n\t}\r\n\r\n\tvoid InitAfterTransition()\r\n\t{\r\n\t\tZL_Display::sigPointerDown.connect(this, &sSceneDemoGame::OnMouseDown);\r\n\t\tZL_Display::sigPointerMove.connect(this, &sSceneDemoGame::OnMouseMove);\r\n\t\tZL_Display::sigKeyDown.connect(this, &sSceneDemoGame::OnKeyDown);\r\n\t\tStartGame();\r\n\t}\r\n\r\n\tvoid DeInitLeave(ZL_SceneType SceneTypeTo)\r\n\t{\r\n\t\tZL_Display::AllSigDisconnect(this);\r\n\t}\r\n\r\n\tvoid OnMouseDown(ZL_PointerPressEvent& e)\r\n\t{\r\n\t\tif (e.y < 200) World.MovePanel(e.x);\r\n\t\telse World.Fire(e);\r\n\t}\r\n\r\n\tvoid OnMouseMove(ZL_PointerMoveEvent& e)\r\n\t{\r\n\t\tif (e.state) World.MovePanel(e.x);\r\n\t}\r\n\r\n\tvoid OnKeyDown(ZL_KeyboardEvent& e)\r\n\t{\r\n\t\tif (e.key == ZLK_ESCAPE) ZL_Application::Quit();\r\n\t}\r\n\r\n\tvoid Calculate()\r\n\t{\r\n\t\tWorld.Calculate();\r\n\t\tif (World.HasWon()) World.Init();\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::FillGradient(0, 120, ZLWIDTH, ZLHEIGHT, ZLRGB(0,0,.3), ZLRGB(0,0,.3), ZLRGB(.4,.4,.4), ZLRGB(.4,.4,.4));\r\n\t\tZL_Display::FillGradient(0, 80, ZLWIDTH, 140, ZLRGB(0,0,0), ZLRGB(0,0,0), ZLRGB(.1,.1,.1), ZLRGB(.1,.1,.1));\r\n\t\tZL_Display::FillGradient(0, 0, ZLWIDTH, 80, ZLRGB(0,.2,.4), ZLRGB(0,.2,.4), ZLRGB(0,0,.3), ZLRGB(0,0,.3));\r\n\t\tWorld.Draw();\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sSimpleGame : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Simple Game\", 854, 480);\r\n\t\tZL_Audio::Init();\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} SimpleGame;\r\n" }, { "alpha_fraction": 0.6909849047660828, "alphanum_fraction": 0.7159979343414307, "avg_line_length": 40.64444351196289, "blob_id": "7d16a94c1d7553695e25d608116d0cf188fb9331", "content_id": "a3e34614cba147eda08f4f981f573275d6bf75d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1919, "license_type": "no_license", "max_line_length": 134, "num_lines": 45, "path": "/33-3d-particles.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "static ZL_Camera Camera;\r\nstatic ZL_ParticleEmitter Particle;\r\nstatic ZL_RenderList RenderList;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tsMain() : ZL_Application() {}\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\t//Initialize the game with depth buffer and 3d rendering\r\n\t\tZL_Display::Init(\"3D Particles\", 1280, 720, ZL_DISPLAY_DEPTHBUFFER);\r\n\t\tZL_Display3D::Init();\r\n\t\tZL_Input::Init();\r\n\r\n\t\t//Setup the particle effect with random initial velocity, animation sheet, fixed color and size over lifetime\r\n\t\tParticle = ZL_ParticleEmitter(1.f);\r\n\t\tParticle.SetSpawnVelocityRanges(ZLV3(-1,-1,1.5), ZLV3(1,1,2.5));\r\n\t\tParticle.SetAnimationSheet(ZL_Surface(\"Data/Fire.png\"), 4, 4);\r\n\t\tParticle.SetColor(ZLLUM(.3));\r\n\t\tParticle.SetLifetimeSize(.35f, 1.9f);\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tif (ZL_Input::Down(ZLK_ESCAPE)) ZL_Application::Quit();\r\n\r\n\t\t//Update the camera position every frame referencing the mouse coordinates and use the mouse wheel to zoom\r\n\t\tstatic float CameraDistance = 3.0f;\r\n\t\tif (ZL_Input::MouseWheel()) CameraDistance = ZL_Math::Clamp(CameraDistance * (ZL_Input::MouseWheel() > 0 ? .8f : 1.25f), 2.f, 20.f);\r\n\t\tfloat HoirzontalAngleRad = (ZL_Display::PointerX-ZLHALFW)/ZLHALFW*PI;\r\n\t\tfloat VerticalAngleRad = -((ZL_Display::PointerY-ZLHALFH)/ZLHALFH-.6f)*PIHALF*0.5f;\r\n\t\tCamera.SetLookAt(ZL_Vector3::FromRotation(HoirzontalAngleRad, VerticalAngleRad) * CameraDistance, ZL_Vector3::Zero);\r\n\r\n\t\t//Spawn a single particle every frame at a fixed position and then step the entire particle system\r\n\t\tParticle.Spawn(ZL_Vector3(0, 0, -1.f));\r\n\t\tParticle.Update(Camera); //pass camera to have the particle polygons 'look' at the camera\r\n\r\n\t\t//Setup and draw our dynamic render list with our particle effect on a black background\r\n\t\tRenderList.Reset();\r\n\t\tRenderList.Add(Particle, ZL_Matrix::Identity);\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tZL_Display3D::DrawList(RenderList, Camera);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6029055714607239, "alphanum_fraction": 0.6355932354927063, "avg_line_length": 29.769229888916016, "blob_id": "6ed57a4172db2aaad1fa4108848a852348a2b49e", "content_id": "3c12ae9ed73afb429ae04e084c0a5e280308b555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 826, "license_type": "no_license", "max_line_length": 111, "num_lines": 26, "path": "/26-open-web-link.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sMain : public ZL_Application\r\n{\r\n\tZL_Font fnt;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Open Web Link\", 854, 480);\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tif (Button(ZL_Rectf::BySize(277.0f, 160.0f, 300.0f, 160.0f), \"Open Web Link\\nhttps://zillalib.github.io/\"))\r\n\t\t\tZL_Application::OpenExternalUrl(\"https://zillalib.github.io/\");\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt)\r\n\t{\r\n\t\tZL_Display::DrawRect(rec, ZLALPHA(.8), ZLALPHA(ZL_Input::Held(rec) ? .6 : (ZL_Input::Hover(rec) ? .3 : .1)));\r\n\t\tfnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\treturn (ZL_Input::Down(rec) != 0);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5587275624275208, "alphanum_fraction": 0.6370310187339783, "avg_line_length": 25.863636016845703, "blob_id": "e00403375a733821373ae76544c676b273376033", "content_id": "54e70217eaf0dfe782b4c0729488b0c96367a645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1226, "license_type": "no_license", "max_line_length": 85, "num_lines": 44, "path": "/11-tiled-texture-surfaces.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nchar tilemap[] = {\r\n0, 0, 0, 0, 5, 1, 6, 0,\r\n1, 6, 0, 0, 2, 0, 2, 0,\r\n0, 3, 1, 1, 4, 0, 2, 0,\r\n0, 0, 0, 0, 0, 0, 2, 0 };\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_Surface srfTiles, srfTilesUnclipped;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitGlobal()\r\n\t{\r\n\t\tsrfTilesUnclipped = ZL_Surface(\"Data/TILES.png\");\r\n\t\tsrfTiles = ZL_Surface(\"Data/TILES.png\");\r\n\t\tsrfTiles.SetTilesetClipping(4, 4);\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\t//draw the unclipped original surface for demonstration\r\n\t\tZL_Display::DrawRect(199, 99, 201+128, 101+128, ZL_Color::Red, ZL_Color::White);\r\n\t\tsrfTilesUnclipped.Draw(200, 100);\r\n\t\t//draw the tilemap of fixed size at fixed position\r\n\t\tfor (int i = 0; i < 8*4; i++)\r\n\t\t\tsrfTiles.SetTilesetIndex(tilemap[i]).Draw((i%8)*32.0f+200.0f, (i/8)*32.0f+300.0f);\r\n\t\t//animate the last 4 tiles of the tilemap use global ticks as timer\r\n\t\tsrfTiles.SetTilesetIndex(12+((ZLTICKS/100)%4));\r\n\t\tsrfTiles.Draw(400, 150);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Tiled Texture Surfaces (Tilesets)\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5829258561134338, "alphanum_fraction": 0.6456805467605591, "avg_line_length": 57.85365676879883, "blob_id": "0ad9cc13be3d6c9b4257a3b92b4c9896de1c6e04", "content_id": "54fc114e87122d5f2ce62f001850f02ceb6f8f74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9816, "license_type": "no_license", "max_line_length": 490, "num_lines": 164, "path": "/29-blend-modes.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "//This was supposed to be a bit more simple but it turned out that blending is quite a complex subject\r\n//So in the end this is just a visualization tool and not good sample code in itself\r\n\r\n//string and array representation of all blend equations (for display and looping)\r\nstatic const char* be_names[] = { \"ADD\", \"MIN\", \"MAX\", \"SUBTRACT\", \"REVERSE_SUBTRACT\" };\r\nstatic ZL_Display::BlendEquation bes[] = { ZL_Display::BLENDEQUATION_ADD, ZL_Display::BLENDEQUATION_MIN, ZL_Display::BLENDEQUATION_MAX, ZL_Display::BLENDEQUATION_SUBTRACT, ZL_Display::BLENDEQUATION_REVERSE_SUBTRACT };\r\n\r\n//string and array representation of all blend funcs (for display and looping)\r\nstatic const char* bf_names[] = { \"SRCALPHA\", \"INVSRCALPHA\", \"SRCCOLOR\", \"INVSRCCOLOR\", \"DESTCOLOR\", \"INVDESTCOLOR\", \"ZERO\", \"ONE\", \"DESTALPHA\", \"INVDESTALPHA\", \"CONSTCOLOR\", \"INVCONSTCOLOR\", \"CONSTALPHA\", \"INVCONSTALPHA\", \"SRCALPHASATURATE\" };\r\nstatic ZL_Display::BlendFunc bfs[] = { ZL_Display::BLEND_SRCALPHA, ZL_Display::BLEND_INVSRCALPHA, ZL_Display::BLEND_SRCCOLOR, ZL_Display::BLEND_INVSRCCOLOR, ZL_Display::BLEND_DESTCOLOR, ZL_Display::BLEND_INVDESTCOLOR, ZL_Display::BLEND_ZERO, ZL_Display::BLEND_ONE, ZL_Display::BLEND_DESTALPHA, ZL_Display::BLEND_INVDESTALPHA, ZL_Display::BLEND_CONSTCOLOR, ZL_Display::BLEND_INVCONSTCOLOR, ZL_Display::BLEND_CONSTALPHA, ZL_Display::BLEND_INVCONSTALPHA, ZL_Display::BLEND_SRCALPHASATURATE };\r\n\r\n//mode states, color states, surfaces and used font\r\nstatic int bf_rgb = 0, bf_alpha = 0, bm_alpha_src = -1, bm_alpha_dst = -1, bf_offset_src = 0, bf_offset_dst = 0, use_render_to_texture = 0;\r\nstatic float CD = 12.0f;\r\nstatic int colHues[] = { 0, 10, 8, 0 }, colSats[] = { 0, 12, 12, 0 }, colVals[] = { 6, 12, 12, 12 }, colAlphas[] = { 12, 12, 12, 12 };\r\nstatic ZL_Color GetHueSatVal(int num) { return ZLHSVA(colHues[num]/CD, colSats[num]/CD, colVals[num]/CD, colAlphas[num]/CD); }\r\nstatic ZL_Surface srfTests[5], *srfLayers[3], srfRenderTarget;\r\nstatic ZL_Font fnt;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tsMain() : ZL_Application(0) {}\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Blend Modes\", 1280, 720);\r\n\t\tZL_Input::Init();\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\").SetScale(0.6f).SetColor(ZLBLACK);\r\n\r\n\t\t//the render target with alpha channel that is used to demonstrate modes that rely on a target with alpha\r\n\t\tsrfRenderTarget = ZL_Surface(1024, 1024, true);\r\n\r\n\t\t//generate a test texture with one filled circle with 50% alpha\r\n\t\tsrfTests[0] = ZL_Surface(64, 64, true).SetTextureRepeatMode().SetOrigin(ZL_Origin::Center);\r\n\t\tsrfTests[0].RenderToBegin(true);\r\n\t\tZL_Display::FillCircle(32, 32, 32, ZL_Color(1,1,1,0.5));\r\n\t\tsrfTests[0].RenderToEnd();\r\n\r\n\t\t//generate a test texture with a gradient from black to white\r\n\t\tsrfTests[1] = ZL_Surface(64, 64).SetTextureRepeatMode().SetOrigin(ZL_Origin::Center);\r\n\t\tsrfTests[1].RenderToBegin(true);\r\n\t\tfor (float f = 0; f < 64; f += 1.0) ZL_Display::FillRect(0, f, 64, f+1, ZLLUM(f/64));\r\n\t\tsrfTests[1].RenderToEnd();\r\n\r\n\t\t//generate a test texture with three circles\r\n\t\tsrfTests[2] = ZL_Surface(64, 64, true).SetTextureRepeatMode().SetOrigin(ZL_Origin::Center);\r\n\t\tsrfTests[2].RenderToBegin(true);\r\n\t\tZL_Display::FillCircle(16, 16, 16, ZL_Color::Red);\r\n\t\tZL_Display::FillCircle(48, 16, 16, ZL_Color::Green);\r\n\t\tZL_Display::FillCircle(32, 48, 16, ZL_Color::Blue);\r\n\t\tsrfTests[2].RenderToEnd();\r\n\r\n\t\t//load a test texture from a file and scale it to be the same size as the other generated test textures\r\n\t\tsrfTests[3] = ZL_Surface(\"Data/ZILLALIB.png\").SetScaleTo(64, 64).SetTextureRepeatMode().SetOrigin(ZL_Origin::Center);\r\n\r\n\t\t//generate a test texture that is fully white\r\n\t\tsrfTests[4] = ZL_Surface(64, 64, true).SetTextureRepeatMode().SetOrigin(ZL_Origin::Center);\r\n\t\tsrfTests[4].RenderToBegin(true);\r\n\t\tZL_Display::ClearFill(ZLWHITE);\r\n\t\tsrfTests[4].RenderToEnd();\r\n\r\n\t\t//set the default surfaces for background, layer 1 and layer 2\r\n\t\tsrfLayers[0] = &srfTests[4];\r\n\t\tsrfLayers[1] = srfLayers[2] = &srfTests[0];\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Gray);\r\n\r\n\t\t//draw the main 8x8 grid of blended textures either to a texture or directly to the screen\r\n\t\tif (use_render_to_texture) srfRenderTarget.RenderToBegin(true);\r\n\t\tsrfLayers[0]->DrawTo(137.0f, 5.0f, 921.0f, 573.0f, GetHueSatVal(0));\r\n\t\tZL_Display::SetBlendEquationSeparate(bes[bf_rgb], bes[bf_alpha]);\r\n\t\tZL_Display::SetBlendConstantColor(GetHueSatVal(3));\r\n\t\tfor (int x = 0; x < 8 && bf_offset_src+x < (int)COUNT_OF(bfs); x++)\r\n\t\t{\r\n\t\t\tfor (int y = 0; y < 8 && bf_offset_dst+y < (int)COUNT_OF(bfs); y++)\r\n\t\t\t{\r\n\t\t\t\tZL_Display::SetBlendModeSeparate(bfs[bf_offset_src+x], bfs[bf_offset_dst+y], bfs[bm_alpha_src < 0 ? bf_offset_src+x : bm_alpha_src], bfs[bm_alpha_dst < 0 ? bf_offset_dst+y : bm_alpha_dst]);\r\n\t\t\t\tsrfLayers[1]->Draw(170.0f+98.0f*x, 540.0f-71.0f*y, GetHueSatVal(1));\r\n\t\t\t\tsrfLayers[2]->Draw(170.0f+98.0f*x+32, 540.0f-71.0f*y-5, GetHueSatVal(2));\r\n\t\t\t}\r\n\t\t}\r\n\t\tZL_Display::ResetBlendFunc();\r\n\t\tZL_Display::ResetBlendEquation();\r\n\t\tif (use_render_to_texture) srfRenderTarget.RenderToEnd();\r\n\t\tif (use_render_to_texture) srfRenderTarget.Draw(0, 0);\r\n\r\n\t\t//Draw the grid lines and the blend mode labels\r\n\t\tfor (int i = 0; i <= 8; i++)\r\n\t\t{\r\n\t\t\tif (i < 8 && bf_offset_src+i < (int)COUNT_OF(bfs)) fnt.Draw(185.0f+98.0f*i, 590, bf_names[bf_offset_src+i], ZL_Origin::Center);\r\n\t\t\tif (i < 8 && bf_offset_dst+i < (int)COUNT_OF(bfs)) fnt.Draw(125, 535.0f-71.0f*i, bf_names[bf_offset_dst+i], ZL_Origin::CenterRight);\r\n\t\t\tif (bf_offset_src+i <= (int)COUNT_OF(bfs)) ZL_Display::DrawLine(137.0f+98.0f*i, 0, 137.0f+98.0f*i, 610, fnt.GetColor());\r\n\t\t\tif (bf_offset_dst+i <= (int)COUNT_OF(bfs)) ZL_Display::DrawLine( 0, 573.0f-71.0f*i, 921.0f, 573.0f-71.0f*i, fnt.GetColor());\r\n\t\t}\r\n\r\n\t\t//Draw the render texture toggle\r\n\t\tif (Button(ZL_Rectf(10.0f, 620.0f, 160.0f, 620.0f+69.0f), \"Use render to texture\\n\\n(with empty alpha channel)\", (use_render_to_texture!=0))) use_render_to_texture ^= 1;\r\n\r\n\t\t//Draw the from/to toggles\r\n\t\tif (Button(ZL_Rectf(70.0f, 580.0f, 130.0f, 610.0f), \"FROM:\", (bf_offset_src!=0))) bf_offset_src = (bf_offset_src ? 0 : 8);\r\n\t\tif (Button(ZL_Rectf(10.0f, 580.0f, 65.0f, 610.0f), \"TO:\" , (bf_offset_dst!=0))) bf_offset_dst = (bf_offset_dst ? 0 : 8);\r\n\r\n\t\t//Draw the surface selection for background and the two layers\r\n\t\tfor (int layer = 0; layer <= 2; layer ++)\r\n\t\t{\r\n\t\t\tfnt.Draw(180.0f+170.0f+layer*370.0f, 705, (layer == 0 ? \"BACKGROUND\" : (layer == 1 ? \"LAYER1\" : \"LAYER2\")), ZL_Origin::Center);\r\n\t\t\tfor (int srf = 0; srf < (int)COUNT_OF(srfTests); srf++)\r\n\t\t\t\tif (Button(ZL_Rectf(180.0f+layer*370.0f+srf*70.0f, 620.0f, 180.0f+layer*370.0f+srf*70.0f+69.0f, 620.0f+69.0f), NULL, (srfLayers[layer] == &srfTests[srf]), &srfTests[srf]))\r\n\t\t\t\t\tsrfLayers[layer] = &srfTests[srf];\r\n\t\t}\r\n\r\n\t\t//Draw the color selection grid for the 4 colors\r\n\t\tfor (int c = 0; c < 4; c++)\r\n\t\t{\r\n\t\t\tfloat x = 1020.0f, y = 610.0f - 60.0f - c * 68.0f, bx = x;\r\n\t\t\tfnt.Draw(x-50.0f, y+30, (c == 0 ? \"BACK\\nGROUND\" : (c == 1 ? \"LAYER1\" : (c == 2 ? \"LAYER2\" : \"CONST\"))), ZL_Origin::CenterRight);\r\n\t\t\tfnt.Draw(x-10.0f, y+52, \"Hue\", ZL_Origin::CenterRight);\r\n\t\t\tfnt.Draw(x-10.0f, y+37, \"Sat\", ZL_Origin::CenterRight);\r\n\t\t\tfnt.Draw(x-10.0f, y+22, \"Val\", ZL_Origin::CenterRight);\r\n\t\t\tfnt.Draw(x-10.0f, y+ 7, \"Alpha\", ZL_Origin::CenterRight);\r\n\t\t\tfor (int a = 0; a <= 12; a++, bx += 19.0f)\r\n\t\t\t{\r\n\t\t\t\tif (Button(ZL_Rectf(bx, y+45.0f, bx+18.0f, y+59.0f), NULL, (colHues[c] == a), NULL, ZLHSV(a/CD,colSats[c]/CD,colVals[c]/CD))) colHues[c] = a;\r\n\t\t\t\tif (Button(ZL_Rectf(bx, y+30.0f, bx+18.0f, y+44.0f), NULL, (colSats[c] == a), NULL, ZLHSV(colHues[c]/CD,a/CD,colVals[c]/CD))) colSats[c] = a;\r\n\t\t\t\tif (Button(ZL_Rectf(bx, y+15.0f, bx+18.0f, y+29.0f), NULL, (colVals[c] == a), NULL, ZLHSV(colHues[c]/CD,colSats[c]/CD,a/CD))) colVals[c] = a;\r\n\t\t\t\tif (Button(ZL_Rectf(bx, y+ 0.0f, bx+18.0f, y+14.0f), NULL, (colAlphas[c] == a), NULL, ZLLUM(a/CD))) colAlphas[c] = a;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\t//Draw the separate blend func selections\r\n\t\tfnt.Draw(1030.0f, 330.0f, \"Separate Src Alpha Func\", ZL_Origin::Center);\r\n\t\tfnt.Draw(1195.0f, 330.0f, \"Separate Dest Alpha Func\", ZL_Origin::Center);\r\n\t\tfor (int i = -1; i < (int)COUNT_OF(bfs); i++)\r\n\t\t{\r\n\t\t\tif (Button(ZL_Rectf( 960.0f, 293.0f-i*14.0f, 1100.0f, 293.0f-i*14.0f+13.0f), (i < 0 ? \"SAME AS RGB\" : bf_names[i]), (bm_alpha_src == i))) bm_alpha_src = i;\r\n\t\t\tif (Button(ZL_Rectf(1125.0f, 293.0f-i*14.0f, 1265.0f, 293.0f-i*14.0f+13.0f), (i < 0 ? \"SAME AS RGB\" : bf_names[i]), (bm_alpha_dst == i))) bm_alpha_dst = i;\r\n\t\t}\r\n\r\n\t\t//Draw the blend equation selections\r\n\t\tfnt.Draw(1030.0f, 83.0f, \"Blend Equation RGB\", ZL_Origin::Center);\r\n\t\tfnt.Draw(1195.0f, 83.0f, \"Blend Equation Alpha\", ZL_Origin::Center);\r\n\t\tfor (int i = 0; i < (int)COUNT_OF(bes); i++)\r\n\t\t{\r\n\t\t\tif (Button(ZL_Rectf( 960.0f, 5+i*14.0f, 1100.0f, 5+i*14.0f+13.0f), be_names[i], (bf_rgb == i))) bf_rgb = i;\r\n\t\t\tif (Button(ZL_Rectf(1125.0f, 5+i*14.0f, 1265.0f, 5+i*14.0f+13.0f), be_names[i], (bf_alpha == i))) bf_alpha = i;\r\n\t\t}\r\n\t}\r\n\r\n\t//extremely simple UI, draw a rectangle with text, image or color in it and return if it has been clicked\r\n\tbool Button(const ZL_Rectf& rec, const char* txt, bool toggled = false, ZL_Surface* surface = NULL, const ZL_Color& color_add = ZLTRANSPARENT)\r\n\t{\r\n\t\tZL_Color fill = ZLALPHA(ZL_Input::Held(rec) ? .6 : (ZL_Input::Hover(rec) ? .3 : .1));\r\n\t\tif (color_add.a) fill -= ZLRGBA(1-color_add.r,1-color_add.g,1-color_add.b,-.5);\r\n\t\telse if (surface) fill -= ZLRGBA(1,.5,1,-.5);\r\n\t\telse if (toggled) fill -= ZLRGBA(.6,.6,.3,0);\r\n\t\tZL_Display::DrawRect(rec, (toggled ? ZLRGBA(1,0,0,.8) : ZLALPHA(.8)), fill);\r\n\t\tif (txt && txt[0]) fnt.Draw(rec.Center(), txt, ZL_Origin::Center);\r\n\t\tif (surface) surface->Draw(rec.Center());\r\n\t\treturn (ZL_Input::Down(rec) != 0);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6913390755653381, "alphanum_fraction": 0.7131449580192566, "avg_line_length": 43.22222137451172, "blob_id": "4efefa1db2ce4964b175776fab5390818a63c212", "content_id": "beed3116cf967e837a9f0574a7600d6d12d600eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3256, "license_type": "no_license", "max_line_length": 161, "num_lines": 72, "path": "/31-basic-3d.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "enum { SCENE_GAME = 1 };\r\nstatic ZL_Camera Camera;\r\nstatic ZL_Light Light;\r\nstatic ZL_Mesh mshPlanet, mshSun, mshSky;\r\nstatic ZL_RenderList RenderList;\r\nstatic float CameraDistance = 3.0f;\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvirtual void InitGlobal()\r\n\t{\r\n\t\t//Load planet model data\r\n\t\tmshPlanet = ZL_Mesh::FromPLY(\"Data/lowpolyplanet.ply.zip\");\r\n\r\n\t\t//Create a material for the planet which uses the vertex colors defined in the model data and renders it with the shadow mapping with a lot of shininess\r\n\t\tusing namespace ZL_MaterialModes;\r\n\t\tmshPlanet.SetMaterial(ZL_Material(MM_VERTEXCOLOR | MM_SPECULARSTATIC).SetUniformFloat(Z3U_SPECULAR, 1.0f).SetUniformFloat(Z3U_SHININESS, 16.0f));\r\n\r\n\t\t//Make a sphere mesh for the sun and set its material as unlit glowing orange\r\n\t\tmshSun = ZL_Mesh::BuildSphere(1, 23).SetMaterial(0, ZL_Material(MM_STATICCOLOR | MO_UNLIT | MO_CASTNOSHADOW).SetUniformVec4(Z3U_COLOR, ZL_Color::Orange));\r\n\r\n\t\t//Make an inverted sphere for the background sky map with a custom shaded material that draws noise as stars\r\n\t\tmshSky = ZL_Mesh::BuildSphere(20, 20, true).SetMaterial(0, ZL_Material(MM_DIFFUSEFUNC | MR_TEXCOORD | MO_UNLIT | MO_CASTNOSHADOW,\r\n\t\t\tZL_GLSL_IMPORTSNOISE()\r\n\t\t\t\"vec4 CalcDiffuse()\"\r\n\t\t\t\"{\"\r\n\t\t\t\t\"float s = clamp((snoise(\" Z3V_TEXCOORD \"*250.0)-.95)*15.,0.,1.);\"\r\n\t\t\t\t\"return vec4(s,s,s,1);\"\r\n\t\t\t\"}\"\r\n\t\t));\r\n\r\n\t\t//set up the light position, direction and color\r\n\t\tLight.SetLookAt(ZL_Vector3(0, 15.0f, 0), ZL_Vector3::Zero).SetDirectionalLight(2.f).SetColor(ZLRGB(1,1,.9));\r\n\t}\r\n\r\n\tvirtual void Draw()\r\n\t{\r\n\t\tif (ZL_Input::Down(ZLK_ESCAPE)) ZL_Application::Quit();\r\n\r\n\t\t//Update the camera position every frame referencing the mouse coordinates and use the mouse wheel to zoom\r\n\t\tif (ZL_Input::MouseWheel()) CameraDistance = ZL_Math::Clamp(CameraDistance * (ZL_Input::MouseWheel() > 0 ? .8f : 1.25f), 2.f, 20.f);\r\n\t\tCamera.SetPosition(ZL_Vector3::FromRotation((ZL_Display::PointerX-ZLHALFW)/ZLHALFW*PI, -(ZL_Display::PointerY-ZLHALFH)/ZLHALFH*PIHALF*0.99f) * CameraDistance);\r\n\t\tCamera.SetDirection(-Camera.GetPosition().VecNorm());\r\n\r\n\t\t//When clicking with the left mouse button, apply the current camera location/direction to the light\r\n\t\tif (ZL_Input::Down()) Light.SetPosition(Camera.GetPosition()).SetDirection(Camera.GetDirection());\r\n\r\n\t\t//Setup and draw our dynamic render list with our three meshes\r\n\t\tRenderList.Reset();\r\n\t\tRenderList.Add(mshSky, ZL_Matrix::Identity); //always untransformed at the center\r\n\t\tRenderList.Add(mshPlanet, ZL_Matrix::MakeRotateZ(ZLSECONDS*.3f)); //at the center with a rotation based on time\r\n\t\tRenderList.Add(mshSun, ZL_Matrix::MakeTranslate(Light.GetPosition())); //draw the sun at the lights position\r\n\t\tZL_Display3D::DrawListWithLight(RenderList, Camera, Light); //draw the list with shadow mapping\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tsMain() : ZL_Application() {}\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\t//Initialize the game with depth buffer, 3d rendering and shadow mapping\r\n\t\tZL_Display::Init(\"Basic 3D\", 1280, 720, ZL_DISPLAY_DEPTHBUFFER);\r\n\t\tZL_Display3D::Init();\r\n\t\tZL_Display3D::InitShadowMapping();\r\n\t\tZL_Input::Init();\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6521164178848267, "alphanum_fraction": 0.6640211343765259, "avg_line_length": 19, "blob_id": "b658f29d583a8b4ed54d45cd2e72099a3d6b492d", "content_id": "6cb891068637b86b20f21ae9c5f139684876171a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 756, "license_type": "no_license", "max_line_length": 60, "num_lines": 36, "path": "/07-surface-loading-and-drawing.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_Surface srfLogo;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\t//Load surface texture on entering the scene\r\n\tvoid InitEnter(ZL_SceneType SceneTypeFrom, void* data)\r\n\t{\r\n\t\tsrfLogo = ZL_Surface(\"Data/ZILLALIB.png\");\r\n\t}\r\n\r\n\t//Unload surface texture on eventual leaving of the scene\r\n\tvoid DeInitAfterTransition()\r\n\t{\r\n\t\tsrfLogo = ZL_Surface();\r\n\t}\r\n\r\n\t//Clear screen and draw the surface\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tsrfLogo.Draw(0, 0);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Surface Loading and Drawing\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5973154306411743, "alphanum_fraction": 0.6375839114189148, "avg_line_length": 19.285715103149414, "blob_id": "80cf8bb4c4f789f4845d5cdaabe942bb858db97c", "content_id": "527dd82d88e94d897f15abd91910d424afa823e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 149, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/01-empty-game.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sEmptyGame : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Empty Game\", 854, 480);\r\n\t}\r\n} EmptyGame;\r\n" }, { "alpha_fraction": 0.6461336612701416, "alphanum_fraction": 0.6815203428268433, "avg_line_length": 31.91111183166504, "blob_id": "31acc67b4765e69d10c283dcc3366e9764fdc9bc", "content_id": "128a090425daa4cd37305c42dd91b637943d05f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1526, "license_type": "no_license", "max_line_length": 97, "num_lines": 45, "path": "/25-saving-loading-settings.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "enum { SCENE_MAIN = 1 };\r\nstatic ZL_Font fnt;\r\n\r\nstruct sSceneMain : public ZL_Scene\r\n{\r\n\tsSceneMain() : ZL_Scene(SCENE_MAIN) { }\r\n\tZL_String GetTextData;\r\n\tfloat GetNumericalData;\r\n\tvector<unsigned char> GetBinaryData;\r\n\r\n\tvirtual void InitGlobal()\r\n\t{\r\n\t\t//write data to settings file\r\n\t\tunsigned char binary_data[] = { 1, 2, 3, 4 };\r\n\t\tZL_Application::SettingsSet(\"TextData\", \"Hello\");\r\n\t\tZL_Application::SettingsSet(\"NumericalData\", ZL_Rand::Range(1, 10));\r\n\t\tZL_Application::SettingsSet(\"BinaryData\", ZL_Base64::Encode(binary_data, sizeof(binary_data)));\r\n\t\tZL_Application::SettingsSynchronize();\r\n\r\n\t\t//read data from settings file\r\n\t\tGetTextData = ZL_Application::SettingsGet(\"TextData\");\r\n\t\tGetNumericalData = ZL_Application::SettingsGet(\"NumericalData\");\r\n\t\tZL_Base64::Decode(ZL_Application::SettingsGet(\"BinaryData\"), GetBinaryData);\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\t//show the data read from the settings file\r\n\t\tfnt.Draw(100, 400, \"TextData:\"); fnt.Draw(400, 400, GetTextData);\r\n\t\tfnt.Draw(100, 250, \"NumericalData:\"); fnt.Draw(400, 250, ZL_String(GetNumericalData));\r\n\t\tfnt.Draw(100, 100, \"BinaryData Length:\"); fnt.Draw(400, 100, ZL_String(GetBinaryData.size()));\r\n\t}\r\n} SceneMain;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Saving/Loading Settings\", 854, 480);\r\n\t\tZL_Application::SettingsInit(\"GameSettings\");\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_MAIN);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6797870397567749, "alphanum_fraction": 0.7256008982658386, "avg_line_length": 28.536945343017578, "blob_id": "47153326f816c44d982ed68254020f64bc8301dd", "content_id": "32fa1f180af8607677478144746a26bb174fa36e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6199, "license_type": "no_license", "max_line_length": 117, "num_lines": 203, "path": "/ZillaLibSampleMain.cpp", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "/*\r\n The samples in this directory tree are for demonstrating and testing\r\n the functionality of ZillaLib, and are placed in the public domain.\r\n*/\r\n\r\n#include <ZL_Application.h>\r\n#include <ZL_Display.h>\r\n#include <ZL_Surface.h>\r\n#include <ZL_Signal.h>\r\n#include <ZL_Audio.h>\r\n#include <ZL_Font.h>\r\n#include <ZL_Scene.h>\r\n#include <ZL_Input.h>\r\n#include <ZL_Display3D.h>\r\n#include <ZL_Timer.h>\r\n#include <ZL_Particles.h>\r\n#include <ZL_Math.h>\r\n#include <ZL_Data.h>\r\n#include <ZL_Network.h>\r\n#include <ZL_SynthImc.h>\r\n\r\n#include <iostream>\r\n#include <map>\r\n#include <vector>\r\n#include <algorithm>\r\nusing namespace std;\r\n\r\n#ifndef ZILLALIBSAMPLES_NUMBER\r\n#define ZILLALIBSAMPLES_NUMBER 34\r\n#define ZILLALIBSAMPLES_HASDATA 1\r\n#endif\r\n\r\n#if defined(NDEBUG) && !defined(__SMARTPHONE__) && !defined(__WEBAPP__) && ZILLALIBSAMPLES_HASDATA\r\n//Override ZL_Display::Init to automatically call ZL_Application::LoadReleaseDesktopDataBundle in release builds\r\nstruct ZL_Display_Sample : public ZL_Display\r\n{\r\n\tstatic inline bool Init(const char* title, int width = 640, int height = 480, int displayflags = ZL_DISPLAY_DEFAULT)\r\n\t{\r\n\t\tif (!ZL_Application::LoadReleaseDesktopDataBundle()) { exit(0); return false; }\r\n\t\treturn ZL_Display::Init(title, width, height, displayflags);\r\n\t}\r\n};\r\n#define ZL_Display ZL_Display_Sample\r\n#endif\r\n\r\n#if ZILLALIBSAMPLES_NUMBER == 1\r\n#include \"01-empty-game.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 2\r\n#include \"02-scene-manager-with-a-single-scene.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 3\r\n#include \"03-scene-manager-with-two-scenes.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 4\r\n#include \"04-scene-manager-with-crossfade.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 5\r\n#include \"05-2d-geometry-drawing.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 6\r\n#include \"06-input-and-other-events.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 7\r\n#include \"07-surface-loading-and-drawing.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 8\r\n#include \"08-rotating-and-scaling-surfaces.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 9\r\n#include \"09-surface-batch-rendering.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 10\r\n#include \"10-surface-with-repeating-texture.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 11\r\n#include \"11-tiled-texture-surfaces.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 12\r\n#include \"12-font-rendering.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 13\r\n#include \"13-easing.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 14\r\n#include \"14-timer.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 15\r\n#include \"15-collision-tests.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 16\r\n#include \"16-post-process-effect.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 17\r\n#include \"17-surface-shader.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 18\r\n#include \"18-render-clipping.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 19\r\n#include \"19-render-to-texture.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 20\r\n#include \"20-sound-samples.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 21\r\n#include \"21-ImcSynthesizer-Sound.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 22\r\n#include \"22-particles.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 23\r\n#include \"23-networking-clientserver.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 24\r\n#include \"24-networking-http.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 25\r\n#include \"25-saving-loading-settings.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 26\r\n#include \"26-open-web-link.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 27\r\n#include \"27-json-read-write.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 28\r\n#include \"28-advanced-polygon.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 29\r\n#include \"29-blend-modes.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 30\r\n#include \"30-simple-game.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 31\r\n#include \"31-basic-3d.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 32\r\n#include \"32-3d-materials.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 33\r\n#include \"33-3d-particles.inl\"\r\n#elif ZILLALIBSAMPLES_NUMBER == 34\r\n#include \"34-skeletal-mesh-ik.inl\"\r\n#endif\r\n\r\n//Test compile all samples with ZILLALIBSAMPLES_NUMBER set to 0\r\n#if ZILLALIBSAMPLES_NUMBER == 0\r\nnamespace NS01 {\r\n#include \"01-empty-game.inl\"\r\n};namespace NS02 {\r\n#include \"02-scene-manager-with-a-single-scene.inl\"\r\n};namespace NS03 {\r\n#include \"03-scene-manager-with-two-scenes.inl\"\r\n#undef SCENE_TITLE\r\n#undef SCENE_GAME\r\n};namespace NS04 {\r\n#include \"04-scene-manager-with-crossfade.inl\"\r\n#undef SCENE_GAME\r\n#undef SCENE_MENU\r\n};namespace NS05 {\r\n#include \"05-2d-geometry-drawing.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS06 {\r\n#include \"06-input-and-other-events.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS07 {\r\n#include \"07-surface-loading-and-drawing.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS08 {\r\n#include \"08-rotating-and-scaling-surfaces.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS09 {\r\n#include \"09-surface-batch-rendering.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS10 {\r\n#include \"10-surface-with-repeating-texture.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS11 {\r\n#include \"11-tiled-texture-surfaces.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS12 {\r\n#include \"12-font-rendering.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS13 {\r\n#include \"13-easing.inl\"\r\n};namespace NS14 {\r\n#include \"14-timer.inl\"\r\n#undef SCENE_MAIN\r\n};namespace NS15 {\r\n#include \"15-collision-tests.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS16 {\r\n#include \"16-post-process-effect.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS17 {\r\n#include \"17-surface-shader.inl\"\r\n#undef SCENE_GAME\r\n};namespace NS18 {\r\n#include \"18-render-clipping.inl\"\r\n};namespace NS19 {\r\n#include \"19-render-to-texture.inl\"\r\n};namespace NS20 {\r\n#include \"20-sound-samples.inl\"\r\n};namespace NS21 {\r\n#include \"21-ImcSynthesizer-Sound.inl\"\r\n};namespace NS22 {\r\n#include \"22-particles.inl\"\r\n};namespace NS23 {\r\n#include \"23-networking-clientserver.inl\"\r\n};namespace NS24 {\r\n#include \"24-networking-http.inl\"\r\n};namespace NS25 {\r\n#include \"25-saving-loading-settings.inl\"\r\n};namespace NS26 {\r\n#include \"26-open-web-link.inl\"\r\n};namespace NS27 {\r\n#include \"27-json-read-write.inl\"\r\n};namespace NS28 {\r\n#include \"28-advanced-polygon.inl\"\r\n};namespace NS29 {\r\n#include \"29-blend-modes.inl\"\r\n};namespace NS30 {\r\n#include \"30-simple-game.inl\"\r\n};namespace NS31 {\r\n#include \"31-basic-3d.inl\"\r\n};namespace NS32 {\r\n#include \"32-3d-materials.inl\"\r\n};namespace NS33 {\r\n#include \"33-3d-particles.inl\"\r\n};namespace NS34 {\r\n#include \"34-skeletal-mesh-ik.inl\"\r\n};\r\n#endif\r\n" }, { "alpha_fraction": 0.6211340427398682, "alphanum_fraction": 0.6391752362251282, "avg_line_length": 17.399999618530273, "blob_id": "f0b01108887e1f0179906e0a3cc3ccd0a5e7de74", "content_id": "7c50b1c9b693f8a028ea2d0e590f056c7257036d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 388, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/02-scene-manager-with-a-single-scene.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_MAIN 1\r\n\r\nstruct sSceneMain : public ZL_Scene\r\n{\r\n\tsSceneMain() : ZL_Scene(SCENE_MAIN) { }\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Blue);\r\n\t}\r\n} SceneMain;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Scene Manager With a Single Scene\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_MAIN);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6563993692398071, "alphanum_fraction": 0.6898565888404846, "avg_line_length": 35.65999984741211, "blob_id": "d5226a0a329dbe5d242451e691ea2d0af4c20ec9", "content_id": "dd0f639ed33d8de8093fb4f0f4ed75f36fb103df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1883, "license_type": "no_license", "max_line_length": 131, "num_lines": 50, "path": "/28-advanced-polygon.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sMain : public ZL_Application\r\n{\r\n\tsMain() : ZL_Application() {}\r\n\r\n\tZL_Polygon Poly, PolyExtrude;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Advanced Polygon Usage\", 854, 480);\r\n\r\n\t\tvector<ZL_Polygon::PointList> PointLists(3);\r\n\r\n\t\t//make an outline in a circle consisting of 20 points with positive winding (counterclockwise)\r\n\t\tfor (float a = 0; a < PI2; a+=(PI2/20)) PointLists[0].push_back(ZL_Vector::FromAngle(a) * 200 + ZL_Vector::Up*0);\r\n\r\n\t\t//make two thin quads that go across the circle but with negative winding (clockwise)\r\n\t\tfloat Angles[] = { 0, PIHALF/2, PIHALF/2*3 }, AngleSpread = PI2/50;\r\n\t\tfor (int i = 1; i <= 2; i++)\r\n\t\t{\r\n\t\t\tPointLists[i].push_back(ZL_Vector::FromAngle(Angles[i]+AngleSpread )*300);\r\n\t\t\tPointLists[i].push_back(ZL_Vector::FromAngle(Angles[i]-AngleSpread )*300);\r\n\t\t\tPointLists[i].push_back(ZL_Vector::FromAngle(Angles[i]+AngleSpread+PI)*300);\r\n\t\t\tPointLists[i].push_back(ZL_Vector::FromAngle(Angles[i]-AngleSpread+PI)*300);\r\n\t\t}\r\n\r\n\t\t//Tesselate a polygon with a border and filled area that filters to positive winding (curring out the thin quads from the circle)\r\n\t\tPoly = ZL_Polygon(ZL_Polygon::BORDER_FILL).Add(PointLists, ZL_Polygon::POSITIVE);\r\n\r\n\t\t//Create a textured 20 wide outside extrude from the generated borders\r\n\t\tPolyExtrude = ZL_Polygon(ZL_Surface(\"Data/extrude.png\").SetTextureRepeatMode()).ExtrudeFromBorder(Poly, 20.0f);\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\t//Clear screen to black\r\n\t\tZL_Display::ClearFill();\r\n\r\n\t\t//define the display to be horizontally -500 to 500 and vertically fitting by screen aspect ratio\r\n\t\tZL_Display::PushOrtho(-500, 500, -500/ZLASPECTR, 500/ZLASPECTR);\r\n\r\n\t\t//Draw the main poly filled with white\r\n\t\tPoly.Fill(ZL_Color::White);\r\n\r\n\t\t//Draw the textured extrude polygon\r\n\t\tPolyExtrude.Draw();\r\n\r\n\t\t//Reset view matrix to default\r\n\t\tZL_Display::PopOrtho();\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6987951993942261, "alphanum_fraction": 0.7650602459907532, "avg_line_length": 40.5, "blob_id": "fbcc5fa9a5cada410e1207668e750b44c7944c91", "content_id": "63c887ff2fbc39b69d6e22ef488fc7c20f7373f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 166, "license_type": "no_license", "max_line_length": 49, "num_lines": 4, "path": "/Android/jni/Application.mk", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "# This selects build targets for release builds\n# Debug builds only build the first listed target\nAPP_ABI := armeabi-v7a\n#APP_ABI := armeabi-v7a arm64-v8a x86 x86_64\n" }, { "alpha_fraction": 0.6123996376991272, "alphanum_fraction": 0.6418376564979553, "avg_line_length": 29.577465057373047, "blob_id": "f8cc347f8cdc33bb7129c0a5adc99600cfb791b3", "content_id": "9b65700782ba3675b79fce6ed523724f89f1bf55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2242, "license_type": "no_license", "max_line_length": 112, "num_lines": 71, "path": "/17-surface-shader.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_Shader shader_both, shader_fragment_only, shader_vertex_only;\r\n\tZL_Surface srfLogo;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitGlobal()\r\n\t{\r\n\t\tconst char shader_vertex_shader_src[] = ZL_SHADER_SOURCE_HEADER(ZL_GLES_PRECISION_LOW)\r\n\t\t\t\"uniform mat4 u_mvpMatrix;\"\r\n\t\t\t\"attribute vec4 a_position;\"\r\n\t\t\t\"attribute vec4 a_color;\"\r\n\t\t\t\"attribute vec2 a_texcoord;\"\r\n\t\t\t\"varying vec4 v_color;\"\r\n\t\t\t\"varying vec2 v_texcoord;\"\r\n\t\t\t\"uniform float shear;\"\r\n\t\t\t\"void main()\"\r\n\t\t\t\"{\"\r\n\t\t\t\t\"v_color = a_color;\"\r\n\t\t\t\t\"v_texcoord = a_texcoord;\"\r\n\t\t\t\t\"gl_Position = u_mvpMatrix * (a_position + vec4(a_texcoord.y*shear,0.0,0.0,0.0));\"\r\n\t\t\t\"}\";\r\n\t\tconst char shader_fragment_shader_src[] = ZL_SHADER_SOURCE_HEADER(ZL_GLES_PRECISION_LOW)\r\n\t\t\t\"uniform sampler2D u_texture;\"\r\n\t\t\t\"varying vec4 v_color;\"\r\n\t\t\t\"varying vec2 v_texcoord;\"\r\n\t\t\t\"uniform float brightness;\"\r\n\t\t\t\"void main()\"\r\n\t\t\t\"{\"\r\n\t\t\t\t\"gl_FragColor = texture2D(u_texture, v_texcoord);\"\r\n\t\t\t\t\"gl_FragColor.rgb = brightness*vec3((gl_FragColor.r+gl_FragColor.g+gl_FragColor.b)/3.0);\"\r\n\t\t\t\"}\";\r\n\t\tshader_both = ZL_Shader(shader_fragment_shader_src, shader_vertex_shader_src, \"shear\", \"brightness\");\r\n\t\tshader_fragment_only = ZL_Shader(shader_fragment_shader_src, NULL, \"brightness\");\r\n\t\tshader_vertex_only = ZL_Shader(NULL, shader_vertex_shader_src, \"shear\");\r\n\t\tsrfLogo = ZL_Surface(\"Data/ZILLALIB.png\");\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tfloat t = ZLSINCESECONDS(0);\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\r\n\t\tshader_both.Activate();\r\n\t\tshader_both.SetUniform(50.0f+50.0f*scos(t), 0.5f+0.5f*ssin(t));\r\n\t\tsrfLogo.Draw( 50, 250);\r\n\t\tshader_both.Deactivate();\r\n\r\n\t\tshader_fragment_only.Activate();\r\n\t\tshader_fragment_only.SetUniform(0.5f+0.5f*ssin(t));\r\n\t\tsrfLogo.Draw(450, 250);\r\n\t\tshader_fragment_only.Deactivate();\r\n\r\n\t\tshader_vertex_only.Activate();\r\n\t\tshader_vertex_only.SetUniform(50.0f+50.0f*scos(t));\r\n\t\tsrfLogo.Draw(850, 250);\r\n\t\tshader_vertex_only.Deactivate();\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Surface Shader\", 1280, 720);\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.659971296787262, "alphanum_fraction": 0.6979913711547852, "avg_line_length": 28.9777774810791, "blob_id": "0a5a4a7c6cea25a9af0e64152d056617c237e537", "content_id": "c96431c1ecbd7a3d737345ec706974b72658e56b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1394, "license_type": "no_license", "max_line_length": 147, "num_lines": 45, "path": "/22-particles.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "enum { SCENE_PARTICLES = 1 };\r\n\r\nstruct sSceneParticles : public ZL_Scene\r\n{\r\n\t//Construct the scene with its identifier\r\n\tsSceneParticles() : ZL_Scene(SCENE_PARTICLES) { }\r\n\tZL_ParticleEffect sparks;\r\n\r\n\t//Set up the particle effect when entering the scene\r\n\tvoid InitEnter(ZL_SceneType SceneTypeFrom, void* data)\r\n\t{\r\n\t\tsparks = ZL_ParticleEffect(3000, 500, s(0.1));\r\n\t\tsparks.AddParticleImage(ZL_Surface(\"Data/SPARK.png\").SetColor(ZLRGB(1,.8,.1)), 10000); //max 10000 particles\r\n\t\tsparks.AddBehavior(new ZL_ParticleBehavior_LinearMove(300, 50)); //move at a speed of 300 pixel per seconds with random variation 50 (250 to 350)\r\n\t\tsparks.AddBehavior(new ZL_ParticleBehavior_LinearImageProperties(1, 0, 1, 3)); //fade from 1 to 0, scale from 1 to 3\r\n\t}\r\n\r\n\t//Clean up after scene is done (not relevant to this sample)\r\n\tvoid DeInitAfterTransition()\r\n\t{\r\n\t\tsparks = ZL_ParticleEffect();\r\n\t}\r\n\r\n\t//Spawn some particles once a frame (before doing any drawing)\r\n\tvoid Calculate()\r\n\t{\r\n\t\tsparks.Spawn(ZLELAPSEDTICKS, ZL_Display::PointerX, ZL_Display::PointerY);\r\n\t}\r\n\r\n\t//clear the screen and draw the particles\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tsparks.Draw();\r\n\t}\r\n} SceneParticles;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Particles\", 854, 480);\r\n\t\tZL_SceneManager::Init(SCENE_PARTICLES);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.5998861789703369, "alphanum_fraction": 0.613545835018158, "avg_line_length": 56.17880630493164, "blob_id": "3bf3073e696819079c70b845b9d6a7ed1b8655d5", "content_id": "6a1c2e3230659d25691ebba0000498722babfbce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8785, "license_type": "no_license", "max_line_length": 257, "num_lines": 151, "path": "/build-all.py", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "import os, glob, sys, subprocess, zipfile, shutil, time\r\n\r\n#read MSBUILD_PATH, OUT_DIR and ANDROID_* variables for signing APK files from external file 'build-all.cfg' (not checked into version control)\r\nMSBUILD_PATH = 'C:/Program Files (x86)/MSBuild/12.0/Bin/MSBuild.exe'\r\nOUT_DIR = 'Builds'\r\nWEB_GZ = False\r\nexec (file('build-all.cfg').read() if os.path.exists('build-all.cfg') else '')\r\nWEB_GZ = ('.gz' if WEB_GZ else '')\r\n\r\n#check if directories for unused assets already exist, abort if so\r\nassert not os.path.exists('Data-Unused'), 'Temporary asset directory \"' + 'Data-Unused' + '\" still exists, please check (crashed when executed last time?)'\r\n\r\n#build list of assets with path names in Data and in Data-Unused\r\nassets = []\r\nfor root, dirs, filenames in os.walk('Data'):\r\n\tfor filename in filenames:\r\n\t\tassets += [[root.replace('\\\\','/') + '/' + filename,root.replace('Data','Data-Unused',1).replace('\\\\','/') + '/' + filename]]\r\n\r\n# platform specific setup\r\nzl_dir = os.path.realpath(__file__+'/../../ZillaLib').replace('\\\\', '/')\r\nif sys.platform == 'win32': os.environ['PATH'] += os.pathsep+zl_dir.replace('/', os.sep)+os.sep+'Tools'\r\nlinux_cpu_type = 'x86_64' if sys.maxsize > 2**32 else 'x86_32'\r\n\r\n#options\r\nis_rebuild = 'rebuild' in sys.argv\r\nselect_targets = [k for k in sys.argv if k in ['wasm','emscripten','nacl','android','win32','win64','linux','osx']]\r\nif select_targets == []: select_targets = ['wasm','android','win32','win64','linux','osx']\r\n\r\n#create directories for unused assets while building samples that don't need them, and at first move all assets over\r\nfor asset in assets:\r\n\tif not os.path.exists(os.path.dirname(asset[1])): os.makedirs(os.path.dirname(asset[1]))\r\n\tos.rename(asset[0], asset[1])\r\n\r\n#create output dir\r\nif not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR)\r\n\r\n#loop through all samples\r\nBuildLastRun = 0\r\nfor num in range(1, 99):\r\n\ttry:\r\n\t\tsnum = str(num).zfill(2);\r\n\t\tinl = (glob.glob(snum + \"-*\") or [''])[0]\r\n\t\tif not inl: continue\r\n\t\tinlcode = file(inl).read()\r\n\t\toneasset = ''\r\n\t\tprint '---------------------------------------------------------------------------------------------------------------------------------------------------------------------'\r\n\t\tprint '[ASSETS] Building Sample',num,'(\"' + inl + '\"):'\r\n\t\tfor asset in assets:\r\n\t\t\tif (asset[0] in inlcode):\r\n\t\t\t\tos.rename(asset[1], asset[0])\r\n\t\t\t\tprint ' Used Asset:',asset[0]\r\n\t\t\t\toneasset = asset[0]\r\n\t\tif oneasset: os.utime(oneasset, None) #touch asset file so assets get rebuilt\r\n\r\n\t\twhile BuildLastRun >= int(time.time()):pass #must be at least the next second since last build ended, otherwise make can get confused\r\n\t\tdef buildheader(typ):\r\n\t\t\tprint '---------------------------------------------------------------------------------------------------------------------------------------------------------------------'\r\n\t\t\tprint '[' + typ + '] Building Sample',num,'(\"' + inl + '\"):'\r\n\t\tdef buildfooter():\r\n\t\t\tprint '---------------------------------------------------------------------------------------------------------------------------------------------------------------------'\r\n\t\t\tprint ''\r\n\t\t\tsys.stdout.flush()\r\n\t\tdef building(pargs):\r\n\t\t\tprint ' **** Executing:',pargs,'...'\r\n\t\t\tp = subprocess.Popen(pargs, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\r\n\t\t\twhile True:\r\n\t\t\t\tl = p.stdout.readline()\r\n\t\t\t\tif not l: break\r\n\t\t\t\tif not l.strip(): continue\r\n\t\t\t\tsys.stderr.write(' ' + l.rstrip()[0:180] + \"\\n\")\r\n\t\t\t\tsys.stderr.flush()\r\n\t\t\tpret = p.wait()\r\n\t\t\tassert pret == 0, ' BUILD RETURNED ERROR, ABORTING'\r\n\t\t\tglobal BuildLastRun\r\n\t\t\tBuildLastRun = int(time.time())\r\n\t\tdef buildcopy(src, trg):\r\n\t\t\tprint ' **** Copying',src,'to',OUT_DIR+'/'+trg,'...'\r\n\t\t\tshutil.copy2(src, OUT_DIR+'/'+trg)\r\n\t\tdef buildzip(trgzip, src, trg):\r\n\t\t\tprint ' **** Zipping',src,'into',OUT_DIR+'/'+trgzip,'as',trg,'...'\r\n\t\t\tz = zipfile.ZipFile(OUT_DIR+'/'+trgzip,'w',zipfile.ZIP_DEFLATED);\r\n\t\t\tz.write(src, trg);[z.write(r+os.sep+f, r.replace(src, trg, 1)+os.sep+f) for r,d,fs in os.walk(src) for f in fs]\r\n\t\t\tz.close()\r\n\t\tdef buildcheck(name, trg):\r\n\t\t\tif select_targets and name not in select_targets: return False\r\n\t\t\treturn is_rebuild or not os.path.exists(OUT_DIR+'/'+trg) or os.path.getmtime(OUT_DIR+'/'+trg) < os.path.getmtime(inl)\r\n\r\n\t\tif sys.platform == 'win32':\r\n\t\t\tif buildcheck('wasm', 'ZillaLibSample-' + snum + '.js'+WEB_GZ):\r\n\t\t\t\tbuildheader('WEBASSEMBLY')\r\n\t\t\t\tbuilding(['make', '-j', '4', 'wasm-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp' + (' ' + oneasset if oneasset else '')])\r\n\t\t\t\tbuildcopy('Release-wasm/ZillaLibSamples.js'+WEB_GZ, 'ZillaLibSample-' + snum + '.js'+WEB_GZ)\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\t\tif buildcheck('emscripten', 'ZillaLibSample-' + snum + '.js'+WEB_GZ):\r\n\t\t\t\tbuildheader('EMSCRIPTEN')\r\n\t\t\t\tbuilding(['make', '-j', '4', 'emscripten-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp' + (' ' + oneasset if oneasset else '')])\r\n\t\t\t\tbuildcopy('Release-emscripten/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.js'+WEB_GZ, 'ZillaLibSample-' + snum + '.js'+WEB_GZ)\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\t\tif buildcheck('nacl', 'ZillaLibSample-' + snum + '.pexe'+WEB_GZ):\r\n\t\t\t\tbuildheader('NACL')\r\n\t\t\t\tbuilding(['make', '-j', '4', 'nacl-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp' + (' '+oneasset if oneasset else '')])\r\n\t\t\t\tbuildcopy('Release-nacl/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.pexe'+WEB_GZ, 'ZillaLibSample-' + snum + '.pexe'+WEB_GZ)\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\t\tif buildcheck('android', 'ZillaLibSample-' + snum + '.apk'):\r\n\t\t\t\tbuildheader('ANDROID')\r\n\t\t\t\tbuilding(['make', '-j', '4', 'android-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp'])\r\n\t\t\t\tbuilding(['make', 'android-sign', 'SIGN_OUTAPK='+OUT_DIR+'/ZillaLibSample-' + snum + '.apk', 'SIGN_KEYSTORE='+ANDROID_SIGN_KEYSTORE, 'SIGN_STOREPASS='+ANDROID_SIGN_STOREPASS, 'SIGN_KEYALIAS='+ANDROID_SIGN_KEYALIAS, 'SIGN_KEYPASS='+ANDROID_SIGN_KEYPASS])\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\t\tif buildcheck('win32', 'ZillaLibSample-' + snum + '_Win32.zip'):\r\n\t\t\t\tbuildheader('WIN32')\r\n\t\t\t\tif os.path.exists('Release-vs2013\\ZillaLibSampleMain.obj'): os.remove('Release-vs2013\\ZillaLibSampleMain.obj')\r\n\t\t\t\tbuilding('\"'+MSBUILD_PATH+'\" /p:Configuration=Release;Platform=Win32;CmdLinePreprocessorDefinitions=\"ZILLALIBSAMPLES_NUMBER=' + str(num) + (';ZILLALIBSAMPLES_HASDATA\"' if oneasset else '\";SkipDataAssets=1') + ' ZillaLibSamples-vs.vcxproj')\r\n\t\t\t\tbuildzip('ZillaLibSample-' + snum + '_Win32.zip', 'Release-vs2013/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.exe', 'ZillaLibSamples-' + snum + '.exe')\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\t\tif buildcheck('win64', 'ZillaLibSample-' + snum + '_Win64.zip'):\r\n\t\t\t\tbuildheader('WIN64')\r\n\t\t\t\tif os.path.exists('Release-vs2013x64\\ZillaLibSampleMain.obj'): os.remove('Release-vs2013x64\\ZillaLibSampleMain.obj')\r\n\t\t\t\tbuilding('\"'+MSBUILD_PATH+'\" /p:Configuration=Release;Platform=x64;CmdLinePreprocessorDefinitions=\"ZILLALIBSAMPLES_NUMBER=' + str(num) + (';ZILLALIBSAMPLES_HASDATA\"' if oneasset else '\";SkipDataAssets=1') + ' ZillaLibSamples-vs.vcxproj')\r\n\t\t\t\tbuildzip('ZillaLibSample-' + snum + '_Win64.zip', 'Release-vs2013x64/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.exe', 'ZillaLibSamples-' + snum + '.exe')\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\tif sys.platform == 'linux2':\r\n\t\t\tif buildcheck('linux', 'ZillaLibSample-' + snum + '_linux_' + linux_cpu_type + '.zip'):\r\n\t\t\t\tbuildheader('LINUX')\r\n\t\t\t\tbuilding(['make', '-j', '4', 'linux-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num) + (' ZILLALIBSAMPLES_HASDATA' if oneasset else ''), 'W=ZillaLibSampleMain.cpp' + (' ' + oneasset if oneasset else '')])\r\n\t\t\t\tbuildzip('ZillaLibSample-' + snum + '_linux_' + linux_cpu_type + '.zip', 'Release-linux/ZillaLibSamples_' + linux_cpu_type + ('_WithData' if oneasset else ''), 'ZillaLibSample-' + snum)\r\n\t\t\t\tbuildfooter()\r\n\r\n\t\tif sys.platform == 'darwin':\r\n\t\t\tif buildcheck('osx', 'ZillaLibSample-' + snum + '_osx.zip'):\r\n\t\t\t\tbuildheader('OSX')\r\n\t\t\t\tbuilding(['make', '-j', '4', 'osx-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num) + (' ZILLALIBSAMPLES_HASDATA' if oneasset else '')])\r\n\t\t\t\tbuildzip('ZillaLibSample-' + snum + '_osx.zip', 'ZillaLibSamples-OSX.xcodeproj/Release/ZillaLibSamples.app', 'ZillaLibSample-' + snum + '.app')\r\n\t\t\t\tbuildfooter()\r\n\r\n\texcept: import traceback; traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]); break;\r\n\tfinally:\r\n\t\t#move all assets back to unused for building the next sample\r\n\t\tfor asset in assets:\r\n\t\t\tif os.path.exists(asset[0]): os.rename(asset[0], asset[1])\r\n\r\n#removing temporary directories\r\nfor asset in assets:\r\n\tos.rename(asset[1], asset[0])\r\n\ttry: os.rmdir(os.path.dirname(asset[1]))\r\n\texcept: pass\r\n" }, { "alpha_fraction": 0.6600000262260437, "alphanum_fraction": 0.6926316022872925, "avg_line_length": 39.30434799194336, "blob_id": "9732aec85c6a47c35055463047dafeed16da1442", "content_id": "b16d7c071b58fc760c85528a565ade44d300df3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 950, "license_type": "no_license", "max_line_length": 125, "num_lines": 23, "path": "/18-render-clipping.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "struct sMain : public ZL_Application\r\n{\r\n\tZL_Surface srfLogo;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Render Clipping\", 854, 480);\r\n\t\tsrfLogo = ZL_Surface(\"Data/ZILLALIB.png\");\r\n\t}\r\n\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tfloat t = ZLSINCESECONDS(0);\r\n\t\tZL_Rectf ClipRect(ZLHALFW, ZLHALFH + scos(t)*50.0f, 160.0f);\r\n\t\tZL_Display::ClearFill(ZL_Color::Black); //clear whole screen\r\n\t\tZL_Display::FillRect(ClipRect+5.0f, ZL_Color::Green); //draw a green rectangle highliting the clipped area\r\n\t\tZL_Display::SetClip(ClipRect); //clip the rendering to the rect\r\n\t\tZL_Display::ClearFill(ZL_Color::Blue); //clear the screen (meaning only clear the now clipped area)\r\n\t\tsrfLogo.Draw(300 + ssin(t)*100, 100); //draw a surface moving left and right\r\n\t\tZL_Display::FillCircle(ZLCENTER + ZL_Vector::FromAngle(t)*200.0f, 50, ZL_Color::Yellow); //draw a circle moving in a circle\r\n\t\tZL_Display::ResetClip(); //reset the clipping rectangle\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6469020843505859, "alphanum_fraction": 0.6528980731964111, "avg_line_length": 25.290908813476562, "blob_id": "97cef1cca011a230bb31c13254f2df313226eaf9", "content_id": "f038cfd4b5751d3af5baf20f8928a7d40e79e1ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1501, "license_type": "no_license", "max_line_length": 153, "num_lines": 55, "path": "/06-input-and-other-events.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\nZL_Font fntMain;\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_String msg;\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid OnKeyDown(ZL_KeyboardEvent& e)\r\n\t{\r\n\t\tif (e.key == ZLK_SPACE) msg = \"Pressed the Space key\";\r\n\t\telse msg = ZL_String(\"Pressed another key: \") << ZL_Display::KeyScancodeName(e.key);\r\n\t}\r\n\r\n\tvoid OnPointerMove(ZL_PointerMoveEvent& e)\r\n\t{\r\n\t\tmsg = ZL_String::format(\"Pointer Num: %d - X: %d - Y: %d - RelX: %.1f - RelY: %.1f - State: %d\", e.which, (int)e.x, (int)e.y, e.xrel, e.yrel, e.state);\r\n\t}\r\n\r\n\tvoid OnResize(ZL_WindowResizeEvent& e)\r\n\t{\r\n\t\tmsg = ZL_String::format(\"Resized to: %d x %d\", (int)ZLWIDTH, (int)ZLHEIGHT);\r\n\t}\r\n\r\n\t//Set up the event listeners\r\n\tvoid InitAfterTransition()\r\n\t{\r\n\t\tZL_Display::sigKeyDown.connect(this, &sSceneGame ::OnKeyDown);\r\n\t\tZL_Display::sigPointerMove.connect(this, &sSceneGame ::OnPointerMove);\r\n\t\tZL_Display::sigResized.connect(this, &sSceneGame ::OnResize);\r\n\t}\r\n\r\n\t//Stop listening to the events and clear the message buffer\r\n\tvoid DeInitLeave(ZL_SceneType SceneTypeTo)\r\n\t{\r\n\t\tZL_Display::AllSigDisconnect(this);\r\n\t\tmsg = ZL_String();\r\n\t}\r\n\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Blue);\r\n\t\tfntMain.Draw(ZLHALFW, ZLHALFH, msg, ZL_Origin::Center);\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Input and Other Events\", 854, 480);\r\n\t\tfntMain = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.6584463715553284, "alphanum_fraction": 0.6732429265975952, "avg_line_length": 29.80392074584961, "blob_id": "459810066ff8426019800d09f5ef1d04806c46f8", "content_id": "bbe4e8b625bee899ad6cadca39ec95b4894de25b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1622, "license_type": "no_license", "max_line_length": 111, "num_lines": 51, "path": "/04-scene-manager-with-crossfade.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n#define SCENE_MENU 2\r\nZL_Font fnt;\r\n\r\nstruct sSceneMenu : public ZL_Scene\r\n{\r\n\tsSceneMenu() : ZL_Scene(SCENE_MENU) { }\r\n\tint InitTransitionEnter(ZL_SceneType SceneTypeFrom, void* data) { return -500; }\r\n\tint DeInitTransitionLeave(ZL_SceneType SceneTypeTo) { return -500; }\r\n\tvirtual void DrawCrossfade(scalar f, bool IsLeaveTransition, ZL_Scene* pOtherScene)\r\n\t{\r\n\t\tf = ZL_Easing::InOutCubic(f);\r\n\t\tZL_Display::ClearFill(ZLBLACK);\r\n\t\tZL_Display::Translate(ZLWIDTH * (1-f), -ZLHEIGHT * (1-f));\r\n\t\tpOtherScene->Draw();\r\n\t\tZL_Display::Translate(-ZLWIDTH * (1-f), ZLHEIGHT * (1-f));\r\n\t\tZL_Display::Translate(0, ZLHEIGHT * f);\r\n\t\tDraw();\r\n\t\tZL_Display::Translate(0, -ZLHEIGHT * f);\r\n\t}\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::FillRect(0, 0, ZLWIDTH, ZLHEIGHT, ZL_Color::Blue);\r\n\t\tfnt.Draw(ZLHALFW, ZLHALFH, \"THIS IS THE MENU SCENE\", ZL_Origin::Center);\r\n\t}\r\n} sSceneMenu;\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::FillRect(0, 0, ZLWIDTH, ZLHEIGHT, ZL_Color::Green);\r\n\t\tfnt.Draw(ZLHALFW, ZLHALFH, \"GAME SCENE\", ZL_Color::Black, ZL_Origin::Center);\r\n\t}\r\n} sSceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Scene Manager With Crossfade\", 854, 480);\r\n\t\tfnt = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t\tZL_Display::sigPointerDown.connect(this, &sMain::OnPointerDown);\r\n\t}\r\n\tvoid OnPointerDown(ZL_PointerPressEvent& e)\r\n\t{\r\n\t\tZL_SceneManager::GoToScene(ZL_SceneManager::GetCurrent()->SceneType == SCENE_GAME ? SCENE_MENU : SCENE_GAME);\r\n\t}\r\n} Main;\r\n" }, { "alpha_fraction": 0.619507908821106, "alphanum_fraction": 0.6414762735366821, "avg_line_length": 23.288888931274414, "blob_id": "4568682e9d51dae8085fcbffd62a91eda118a064", "content_id": "3458e18449ecbf33b279940a77ee3868d5a79016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1138, "license_type": "no_license", "max_line_length": 129, "num_lines": 45, "path": "/09-surface-batch-rendering.inl", "repo_name": "thcoura/ZillaLibSamples", "src_encoding": "UTF-8", "text": "#define SCENE_GAME 1\r\n\r\nstruct sSceneGame : public ZL_Scene\r\n{\r\n\tZL_Surface srfBlock;\r\n\r\n\tsSceneGame() : ZL_Scene(SCENE_GAME) { }\r\n\r\n\tvoid InitGlobal()\r\n\t{\r\n\t\tsrfBlock = ZL_Surface(\"Data/PATTERN.png\");\r\n\t}\r\n\r\n\t//Clear screen and draw the surface many times with batch rendering enabled\r\n\tvoid Draw()\r\n\t{\r\n\t\tZL_Display::ClearFill(ZL_Color::Black);\r\n\t\tsrfBlock.BatchRenderBegin(true);\r\n\t\tfor (scalar x = 10, maxX = ZLFROMW(34), maxY = ZLFROMH(34); x < maxX; x += s(24))\r\n\t\t\tfor (scalar y = 10; y < maxY; y += s(24))\r\n\t\t\t\tsrfBlock.Draw(x, y, ZL_Color::LUM(RAND_FACTOR));\r\n\t\tsrfBlock.BatchRenderEnd();\r\n\t}\r\n} SceneGame;\r\n\r\nstruct sMain : public ZL_Application\r\n{\r\n\t//we set fps limit to 0 to have unlocked frame rate\r\n\tsMain() : ZL_Application(0) {}\r\n\r\n\tZL_Font fntMain;\r\n\r\n\tvoid Load(int argc, char *argv[])\r\n\t{\r\n\t\tZL_Display::Init(\"Surface Batch Rendering\", 854, 480);\r\n\t\tfntMain = ZL_Font(\"Data/fntMain.png\");\r\n\t\tZL_SceneManager::Init(SCENE_GAME);\r\n\t}\r\n\r\n\t//display fps\r\n\tvoid AfterFrame()\r\n\t{\r\n\t\tfntMain.Draw(ZLFROMW(30), ZLFROMH(30), (const char*)ZL_String::format(\"%d FPS\", FPS), ZL_Color::White, ZL_Origin::CenterRight);\r\n\t}\r\n} Main;\r\n" } ]
38
dplusplus/anarchy_golf
https://github.com/dplusplus/anarchy_golf
d661bd2f4620d402f9506feacc511ccea19d5dd8
b5727ba96a09e67dd82c7410d52ed32472143bf1
73abe5b662f7e9320f7a1186e643aa721d16adf3
refs/heads/master
2021-01-12T18:26:43.843226
2013-06-15T10:33:35
2013-06-15T10:33:49
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 25, "blob_id": "601d82963b4d46774cc257330da2f4dbddb87a9a", "content_id": "12c3189f7e16681e978aae9ba80ed4ac88e8bdaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/python/2.echo.py", "repo_name": "dplusplus/anarchy_golf", "src_encoding": "UTF-8", "text": "while 1:print raw_input()\n" }, { "alpha_fraction": 0.6052631735801697, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 11.666666984558105, "blob_id": "e81e0a930e11cb6d66c2240f72a43ab208d60070", "content_id": "80332e8c11244c024fe29facb06e58da2fe3cdb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 14, "num_lines": 3, "path": "/python/8.delete_blank_lines.py", "repo_name": "dplusplus/anarchy_golf", "src_encoding": "UTF-8", "text": "while 1:\n s=raw_input()\n if s:print s\n" }, { "alpha_fraction": 0.27142858505249023, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 69, "blob_id": "1e8a44a806136645087f8623ed2c8b9ca8ca4e8e", "content_id": "4143273a6622c1105ffd6906173db053e16bbc8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 69, "num_lines": 1, "path": "/python/748.Bit_Grid.py", "repo_name": "dplusplus/anarchy_golf", "src_encoding": "UTF-8", "text": "for i in[501,24,25,77,388,22,0,324,297,376,296]:print format(i,'09b')\n" }, { "alpha_fraction": 0.5565611124038696, "alphanum_fraction": 0.6063348650932312, "avg_line_length": 72.66666412353516, "blob_id": "15cf6e7c2673478bcf174789331cc726aaf0a04a", "content_id": "14d9c909fc793c2bf3be8a4c17027458a31b19c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 135, "num_lines": 3, "path": "/python/3.99_shinichiroes_of_hamaji.py", "repo_name": "dplusplus/anarchy_golf", "src_encoding": "UTF-8", "text": "i=99;s=', %s.\\n'\nf=lambda i:'%d shinichiro%s of hamaji on the wall'%(i,'es'[:i*2-2])\nwhile i:print f(i)+s%f(i)[:-12]+{1:'Go to the store and buy some more'+s%f(99)}.get(i,'Take one down and pass it around'+s%f(i-1));i-=1\n" }, { "alpha_fraction": 0.737500011920929, "alphanum_fraction": 0.737500011920929, "avg_line_length": 39, "blob_id": "860a45f6f81d814371dcd4b159ee5aac5609d25d", "content_id": "cbf7800161ef14e4562b55c5697126e27dfa0c69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/python/7.permutater.py", "repo_name": "dplusplus/anarchy_golf", "src_encoding": "UTF-8", "text": "from itertools import permutations as p\nfor i in p(raw_input()):print''.join(i)\n" } ]
5
tanmayuw/ContainerProfiler
https://github.com/tanmayuw/ContainerProfiler
c42b9bfdda90301c6f0bdbf36ed7ccb1e02532e7
fefa73896d6e78dbf2c1073a1c5cf575c6248d8e
e5532d73673a8e9d9b579950d52dfa1bea1ddd33
refs/heads/main
2023-07-26T16:45:49.383956
2021-09-10T19:54:49
2021-09-10T19:54:49
392,123,827
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5077166557312012, "alphanum_fraction": 0.5278986692428589, "avg_line_length": 30.987340927124023, "blob_id": "c9a9820b1cc55f32f04fc5670eef38a8cb8321c4", "content_id": "93e22ed2d261c2523df0f5a53842bbef324e57a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2527, "license_type": "no_license", "max_line_length": 91, "num_lines": 79, "path": "/Profiler_Python/src/profiler", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#======================================================================\n#- IMPLEMENTATION\n#- version profiler (https://www.washington.edu/) 0.3\n#- author Varik Hoang <[email protected]> and Tanmay Shah <[email protected]>\n#- copyright Copyright (c) https://www.washington.edu/\n#- license GNU General Public License\n#======================================================================\n# HISTORY\n# 2021/05/19 : varikmp - script creation\n# 2021/08/12 : varikmp - implemented time steps for sampling\n#======================================================================\n# OPTION\n# PROFILER_OUTPUT_DIR # specify the output directory\n# PROFILER_TIME_STEPS # specify the time step each second\n#======================================================================\n\nfunction help()\n{\n echo \"Usage: $0 COMMAND\"\n echo \" PROFILER_OUTPUT_DIR: output directory for the profiling\"\n echo \" PROFILER_TIME_STEPS: time steps in seconds (must be non-negative integer)\"\n echo \"Example:\"\n echo \" PROFILER_OUTPUT_DIR=/tmp\"\n echo \" PROFILER_TIME_STEPS=1\"\n echo \" $0 \\\"sleep 10\\\"\"\n}\n\nif [ \"$#\" -eq \"0\" ]\nthen\n help\n exit\nfi\n\nPROFILER_OUTPUT_DIR=$(printenv PROFILER_OUTPUT_DIR)\nPROFILER_TIME_STEPS=$(printenv PROFILER_TIME_STEPS)\nif [ -z \"$PROFILER_OUTPUT_DIR\" ]\nthen\n PROFILER_OUTPUT_DIR=./.cprofiles\nfi\nif [ -z \"$PROFILER_TIME_STEPS\" ] || \\\n ! [[ \"$PROFILER_TIME_STEPS\" =~ ^[0-9]+$ ]] || [ $PROFILER_TIME_STEPS -lt 1 ]\nthen\n PROFILER_TIME_STEPS=0\nfi\n\n\n\n\n#echo $PROFILER_TIME_STEPS\nif [ $PROFILER_TIME_STEPS -eq 0 ]\nthen\n python3 ./rudataall-psutil.py -\"$VERBOSITY\" $PROFILER_OUTPUT_DIR\n eval $@\n python3 ./rudataall-psutil.py -\"$VERBOSITY\" $PROFILER_OUTPUT_DIR\nelse\n eval $@ & PID=$!\n while kill -0 $PID > /dev/null 2>&1\n do\n #____ This stub does not account for profiling overhead\n #python3 ./rudataall-psutil.py -\"$VERBOSITY\" $PROFILER_OUTPUT_DIR\n #sleep $PROFILER_TIME_STEPS\n #____\n\n #____ This stub accounts for profiling overhead\n \tt1=$(date '+%s%3N')\n python3 ./rudataall-psutil.py -\"$VERBOSITY\" $PROFILER_OUTPUT_DIR\n t2=$(date '+%s%3N')\n let diff=$t2-$t1\n let profile_time=`echo diff / 1000 | bc -l`\n let sleep_time=$PROFILER_TIME_STEPS-$profile_time\n #sleep_time=`echo $sleep_time / 1000 | bc -l`\n if [ $sleep_time -gt 0 ]\n then\n sleep $sleep_time\n fi\n #____\n done\nfi\n" }, { "alpha_fraction": 0.7287670969963074, "alphanum_fraction": 0.7299657464027405, "avg_line_length": 53.074073791503906, "blob_id": "eea122254adb3038111041156d31c76c05fc6861", "content_id": "2261ab3e1dde51fdf8247d2dffcd34f1ae1c5ad7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5840, "license_type": "no_license", "max_line_length": 131, "num_lines": 108, "path": "/Graphing/auto_generated_delta_script.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport shutil\nimport sys\nimport json\nimport copy\nimport configparser\nfrom collections import namedtuple\n\nparser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\nparser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')\nparser.add_argument('delta_interval_time', action='store', help='stores time interval of when to take delta sample')\nargs= parser.parse_args()\nfile_path = args.file_path\nif os.path.exists(file_path + '/delta_json'):\n\tshutil.rmtree(file_path + '/delta_json')\nif not os.path.exists(file_path + '/delta_json'):\n\tos.makedirs(file_path + '/delta_json')\n\njson_array = []\ndelta_name_array = []\ndirs= sorted([i for i in os.listdir( file_path ) if i.endswith(\".json\")])\nfor file_name in dirs:\n\twith open(file_path + '/' + file_name) as json_file: \n\t\tprint ('JSON FILES TANMAY:')\n\t\tprint(json_file)\n\t\ttry:\n\t\t\tnew_json_object = json.load(json_file)\n\t\t\tjson_array.append(new_json_object)\n\t\t\tnew_name= ((file_path+'/delta_json/'+file_name).split('.json')[0] + '_delta.json')\n\t\t\tdelta_name_array.append(new_name)\n\n\t\texcept Exception as e:\n\t\t\tprint (\"{} invalid file\".format(json_file))\n\t\t\tpass\ndef file_subtraction(the_json_one, the_json_two):\n\tjson_three = copy.deepcopy(the_json_two)\n\tif ('cCpuTime' in the_json_one.keys()):\n\t\tjson_three['cCpuTime']=the_json_two['cCpuTime']-the_json_one['cCpuTime']\n\tif ('cCpuTimeKernelMode' in the_json_one.keys()):\n\t\tjson_three['cCpuTimeKernelMode']=the_json_two['cCpuTimeKernelMode']-the_json_one['cCpuTimeKernelMode']\n\tif ('cCpuTimeUserMode' in the_json_one.keys()):\n\t\tjson_three['cCpuTimeUserMode']=the_json_two['cCpuTimeUserMode']-the_json_one['cCpuTimeUserMode']\n\tif ('cDiskReadBytes' in the_json_one.keys()):\n\t\tjson_three['cDiskReadBytes']=the_json_two['cDiskReadBytes']-the_json_one['cDiskReadBytes']\n\tif ('cDiskSectorIO' in the_json_one.keys()):\n\t\tjson_three['cDiskSectorIO']=the_json_two['cDiskSectorIO']-the_json_one['cDiskSectorIO']\n\tif ('cDiskWriteBytes' in the_json_one.keys()):\n\t\tjson_three['cDiskWriteBytes']=the_json_two['cDiskWriteBytes']-the_json_one['cDiskWriteBytes']\n\tif ('cNetworkBytesRecvd' in the_json_one.keys()):\n\t\tjson_three['cNetworkBytesRecvd']=the_json_two['cNetworkBytesRecvd']-the_json_one['cNetworkBytesRecvd']\n\tif ('cNetworkBytesSent' in the_json_one.keys()):\n\t\tjson_three['cNetworkBytesSent']=the_json_two['cNetworkBytesSent']-the_json_one['cNetworkBytesSent']\n\tif ('vCpuContextSwitches' in the_json_one.keys()):\n\t\tjson_three['vCpuContextSwitches']=the_json_two['vCpuContextSwitches']-the_json_one['vCpuContextSwitches']\n\tif ('vCpuIdleTime' in the_json_one.keys()):\n\t\tjson_three['vCpuIdleTime']=the_json_two['vCpuIdleTime']-the_json_one['vCpuIdleTime']\n\tif ('vCpuNice' in the_json_one.keys()):\n\t\tjson_three['vCpuNice']=the_json_two['vCpuNice']-the_json_one['vCpuNice']\n\tif ('vCpuSteal' in the_json_one.keys()):\n\t\tjson_three['vCpuSteal']=the_json_two['vCpuSteal']-the_json_one['vCpuSteal']\n\tif ('vCpuTime' in the_json_one.keys()):\n\t\tjson_three['vCpuTime']=the_json_two['vCpuTime']-the_json_one['vCpuTime']\n\tif ('vCpuTimeIOWait' in the_json_one.keys()):\n\t\tjson_three['vCpuTimeIOWait']=the_json_two['vCpuTimeIOWait']-the_json_one['vCpuTimeIOWait']\n\tif ('vCpuTimeKernelMode' in the_json_one.keys()):\n\t\tjson_three['vCpuTimeKernelMode']=the_json_two['vCpuTimeKernelMode']-the_json_one['vCpuTimeKernelMode']\n\tif ('vCpuTimeSoftIntSrvc' in the_json_one.keys()):\n\t\tjson_three['vCpuTimeSoftIntSrvc']=the_json_two['vCpuTimeSoftIntSrvc']-the_json_one['vCpuTimeSoftIntSrvc']\n\tif ('vCpuTimeUserMode' in the_json_one.keys()):\n\t\tjson_three['vCpuTimeUserMode']=the_json_two['vCpuTimeUserMode']-the_json_one['vCpuTimeUserMode']\n\tif ('vDiskMergedReads' in the_json_one.keys()):\n\t\tjson_three['vDiskMergedReads']=the_json_two['vDiskMergedReads']-the_json_one['vDiskMergedReads']\n\tif ('vDiskMergedWrites' in the_json_one.keys()):\n\t\tjson_three['vDiskMergedWrites']=the_json_two['vDiskMergedWrites']-the_json_one['vDiskMergedWrites']\n\tif ('vDiskReadTime' in the_json_one.keys()):\n\t\tjson_three['vDiskReadTime']=the_json_two['vDiskReadTime']-the_json_one['vDiskReadTime']\n\tif ('vDiskSectorWrites' in the_json_one.keys()):\n\t\tjson_three['vDiskSectorWrites']=the_json_two['vDiskSectorWrites']-the_json_one['vDiskSectorWrites']\n\tif ('vDiskSuccessfulReads' in the_json_one.keys()):\n\t\tjson_three['vDiskSuccessfulReads']=the_json_two['vDiskSuccessfulReads']-the_json_one['vDiskSuccessfulReads']\n\tif ('vDiskSuccessfulWrites' in the_json_one.keys()):\n\t\tjson_three['vDiskSuccessfulWrites']=the_json_two['vDiskSuccessfulWrites']-the_json_one['vDiskSuccessfulWrites']\n\tif ('vDiskWriteTime' in the_json_one.keys()):\n\t\tjson_three['vDiskWriteTime']=the_json_two['vDiskWriteTime']-the_json_one['vDiskWriteTime']\n\tif ('vNetworkBytesRecvd' in the_json_one.keys()):\n\t\tjson_three['vNetworkBytesRecvd']=the_json_two['vNetworkBytesRecvd']-the_json_one['vNetworkBytesRecvd']\n\tif ('vNetworkBytesSent' in the_json_one.keys()):\n\t\tjson_three['vNetworkBytesSent']=the_json_two['vNetworkBytesSent']-the_json_one['vNetworkBytesSent']\n\tif ('cProcessorStats' in the_json_one.keys()):\n\t\tfor (each_key) in the_json_two['cProcessorStats']:\n\t\t\tif ('cCpu' in each_key and 'TIME' in each_key):\n\t\t\t\tjson_three['cProcessorStats'][each_key] = the_json_two['cProcessorStats'][each_key] - the_json_one['cProcessorStats'][each_key]\n\treturn json_three\n\ndelta_json_array=[]\ncount = 0\nfirst = json_array[0]\nfor i in range(1, len(json_array)):\n\tcount += (json_array[i][\"currentTime\"] - json_array[i-1][\"currentTime\"])\n\tif count >= int(args.delta_interval_time):\n\t\tdelta_json_array.append(file_subtraction(first, json_array[i]))\n\t\tcount = 0\n\t\tfirst = json_array[i]\n\nfor i in range(len(delta_json_array)):\n\twith open(delta_name_array[i], 'w') as fp:\n\t\tjson.dump(delta_json_array[i], fp, sort_keys=True, indent=2)\n" }, { "alpha_fraction": 0.555630087852478, "alphanum_fraction": 0.5680127143859863, "avg_line_length": 24.73469352722168, "blob_id": "c44c446319bca23839d45479a1ef0b01b835e0ab", "content_id": "56ad505c6827dc2baf18e3460052759002689713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 16394, "license_type": "no_license", "max_line_length": 107, "num_lines": 637, "path": "/Profiler_Bash/src/rudataall.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#! /bin/bash\n#pmIp=\"192.168.10.102\"\n# Capture the resource utilization profile of the Virtural Machine, the \n# docker container, as well as the processes statistics inside the container. \n\n# The first time this is run current cpu, disk, and network storage is snapshot\n# The second time this is run the differences are calculated in order to determine \n# the CPU time, Sectors read/written, and Network bytes rcv'd/transmitted \n\n# flags -v, -c, and -p can be used to ommit vm, container, and/or process-level metric respectively\n\n# Notes for VM level statistics:\n# CPU time is in hundreths of a second (centisecond:cs)\n# Sectors read is number of sectors read, where a sector is typically 512 bytes (col 2) assumes /dev/sda1\n# Sectors written (col 3) assumes /dev/sda1\n# network Bytes recv'd assumes eth0 (col ?)\n# network Bytes written assumes eth0 (col ?)\n# col 6 cpu time for processes executing in user mode\n# col 7 cpu time for processes executing in kernel mode\n# col 8 cpu idle time\n# col 9 cpu time waiting for I/O to complete\n# col 10 cpu time servicing interrupts\n# col 11 cpu time servicing soft interrupts\n# col 12 number of context switches\n# col 13 number of disk reads completed succesfully\n# col 14 number of disk reads merged together (adjacent and merged for efficiency) \n# col 15 time in ms spent reading\n# col 16 number of disk writes completed succesfully\n# col 17 number of disk writes merged together (adjacent and merged for efficiency)\n# col 18 time in ms spent writing\n\n# Notes for container level statistics:\n# TBD...\n\n# Notes for process level statistics:\n# TBD...\n \nVM=false\nCONTAINER=false\nPROCESS=false\n\n#get the flags and omit levels as requested\nif [ $# -eq 0 ]\nthen\n VM=true;CONTAINER=true;PROCESS=true\nelse\n while [ -n \"$1\" ]\n do\n case \"$1\" in\n -v) VM=true;;\n -c) CONTAINER=true;;\n -p) PROCESS=true;;\n esac\n shift\n done\nfi \n\noutput=$''\noutput+=$'{\\n'\nepochtime=$(date +%s)\nwrite_time_start=$(date '+%s%3N')\n\n# Find the number of processes inside the container\nIFS=$'\\n'\nPPS=(`cat /sys/fs/cgroup/pids/tasks`)\nunset IFS\nlength=${#PPS[@]}\nPIDS=$((length-2)) \n\n## VM level metrics\n\nif [ $VM = true ]\nthen\n #echo \"VM is Running!!\"\n\n T_VM_1=$(date +%s%3N)\n\n # Get CPU stats\n CPU=(`cat /proc/stat | grep '^cpu '`)\n unset CPU[0]\n CPUUSR=${CPU[1]}\n T_CPUUSR=$(date +%s%3N)\n CPUNICE=${CPU[2]}\n T_CPUNICE=$(date +%s%3N)\n CPUKRN=${CPU[3]}\n T_CPUKRN=$(date +%s%3N)\n CPUIDLE=${CPU[4]} \n T_CPUIDLE=$(date +%s%3N)\n CPUIOWAIT=${CPU[5]}\n T_CPUIOWAIT=$(date +%s%3N)\n CPUIRQ=${CPU[6]}\n T_CPUIRQ=$(date +%s%3N)\n CPUSOFTIRQ=${CPU[7]}\n T_CPUSOFTIRQ=$(date +%s%3N)\n CPUSTEAL=${CPU[8]}\n T_CPUSTEAL=$(date +%s%3N)\n CPUTOT=`expr $CPUUSR + $CPUKRN`\n T_CPUTOT=$(date +%s%3N)\n CONTEXT=(`cat /proc/stat | grep '^ctxt '`)\n unset CONTEXT[0]\n CSWITCH=${CONTEXT[1]}\n T_CSWITCH=$(date +%s%3N) \n\n # Get disk stats\n COMPLETEDREADS=0\n MERGEDREADS=0\n SR=0\n READTIME=0\n COMPLETEDWRITES=0\n MERGEDWRITES=0\n SW=0\n WRITETIME=0\n\n IFS=$'\\n'\n CPU_TYPE=(`cat /proc/cpuinfo | grep 'model name' | cut -d\":\" -f 2 | sed 's/^ *//'`)\n CPU_MHZ=(`cat /proc/cpuinfo | grep 'cpu MHz' | cut -d\":\" -f 2 | sed 's/^ *//'`)\n CPUTYPE=${CPU_TYPE[0]}\n T_CPUTYPE=$(date +%s%3N)\n CPUMHZ=${CPU_MHZ[0]}\n T_CPUMHZ=$(date +%s%3N)\n\nDISK=\"$(lsblk -nd --output NAME,TYPE | grep disk)\"\nDISK=${DISK//disk/}\nDISK=($DISK)\n#DISK is now an array containing all names of our unique disk devices\n\nunset IFS\nlength=${#DISK[@]}\n\n\nfor (( i=0 ; i < length; i++ ))\n do\n currdisk=($(cat /proc/diskstats | grep ${DISK[i]}) )\n COMPLETEDREADS=`expr ${currdisk[3]} + $COMPLETEDREADS`\n MERGEDREADS=`expr ${currdisk[4]} + $MERGEDREADS`\n SR=`expr ${currdisk[5]} + $SR`\n READTIME=`expr ${currdisk[6]} + $READTIME`\n COMPLETEDWRITES=`expr ${currdisk[7]} + $COMPLETEDWRITES`\n MERGEDWRITES=`expr ${currdisk[8]} + $MERGEDWRITES`\n SW=`expr ${currdisk[9]} + $SW`\n WRITETIME=`expr ${currdisk[10]} + $WRITETIME`\n done\n\n # Get network stats\n BR=0\n BT=0\n IFS=$'\\n'\n NET=($(cat /proc/net/dev | grep 'eth0') )\n unset IFS\n length=${#NET[@]}\n #Parse multiple network adapters if they exist\n if [ $length > 1 ]\n then\n for (( i=0 ; i < length; i++ ))\n do\n currnet=(${NET[$i]})\n BR=`expr ${currnet[1]} + $BR`\n BT=`expr ${currnet[9]} + $BT`\n done\n else\n NET=(`cat /proc/net/dev | grep 'eth0'`)\n space=`expr substr $NET 6 1`\n # Need to determine which column to use based on spacing of 1st col\n if [ -z $space ]\n then\n BR=${NET[1]}\n BT=${NET[9]}\n else\n BR=`expr substr $NET 6 500`\n BT=${NET[8]}\n fi\n fi\n LOADAVG=(`cat /proc/loadavg`)\n LAVG=${LOADAVG[0]}\n\n # Get Memory Stats\n MEMTOT=$(cat /proc/meminfo | grep 'MemTotal' | cut -d\":\" -f 2 | sed 's/^ *//' | cut -d\" \" -f 1 ) # in KB\n\n MEMFREE=$(cat /proc/meminfo | grep 'MemFree' | cut -d\":\" -f 2 | sed 's/^ *//' | cut -d\" \" -f 1 ) # in KB\n\n BUFFERS=$(cat /proc/meminfo | grep 'Buffers' | cut -d\":\" -f 2 | sed 's/^ *//' | cut -d\" \" -f 1 ) # in KB\n\n CACHED=$(cat /proc/meminfo | grep -w 'Cached' | cut -d\":\" -f 2 | sed 's/^ *//' | cut -d\" \" -f 1 ) # in KB\n\n\n vmid=\"unavailable\"\n\n T_VM_2=$(date +%s%3N)\n let T_VM=$T_VM_2-$T_VM_1\n\n\t\n #experimental pagefault\n filedata() {\n volumes=$(cat $1 | grep -m 1 -i $2)\n tr \" \" \"\\n\" <<< $volumes | tail -n1 \n \n }\n vPGFault=$(filedata \"/proc/vmstat\" \"pgfault\")\n vMajorPGFault=$(filedata \"/proc/vmstat\" \"pgmajfault\")\n #\n\n output+=$' \\\"currentTime\\\": '\"$epochtime\"\n output+=$',\\n'\n output+=$' \\\"vMetricType\\\": \\\"VM level\\\",\\n'\n output+=$' \\\"vTime\\\": '\"$T_VM\"\n output+=$',\\n'\n\n ## print VM level data \n output+=\" \\\"vCpuTime\\\": $CPUTOT\"\n output+=$',\\n'\n output+=\" \\\"tvCpuTime\\\": $T_CPUTOT\"\n output+=$',\\n'\n output+=\" \\\"vDiskSectorReads\\\": $SR\"\n output+=$',\\n'\n output+=\" \\\"vDiskSectorWrites\\\": $SW\"\n output+=$',\\n'\n output+=\" \\\"vNetworkBytesRecvd\\\": $BR\"\n output+=$',\\n'\n output+=\" \\\"vNetworkBytesSent\\\": $BT\"\n output+=$',\\n'\n output+=\" \\\"vPgFault\\\": $vPGFault\"\n output+=$',\\n'\n output+=\" \\\"vMajorPageFault\\\": $vMajorPGFault\"\n output+=$',\\n'\n output+=\" \\\"vCpuTimeUserMode\\\": $CPUUSR\"\n output+=$',\\n'\n output+=\" \\\"tvCpuTimeUserMode\\\": $T_CPUUSR\"\n output+=$',\\n'\n output+=\" \\\"vCpuTimeKernelMode\\\": $CPUKRN\"\n output+=$',\\n'\n output+=\" \\\"tvCpuTimeKernelMode\\\": $T_CPUKRN\"\n output+=$',\\n'\n output+=\" \\\"vCpuIdleTime\\\": $CPUIDLE\"\n output+=$',\\n'\n output+=\" \\\"tvCpuIdleTime\\\": $T_CPUIDLE\"\n output+=$',\\n'\n output+=\" \\\"vCpuTimeIOWait\\\": $CPUIOWAIT\"\n output+=$',\\n'\n output+=\" \\\"tvCpuTimeIOWait\\\": $T_CPUIOWAIT\"\n output+=$',\\n'\n output+=\" \\\"vCpuTimeIntSrvc\\\": $CPUIRQ\"\n output+=$',\\n'\n output+=\" \\\"tvCpuTimeIntSrvc\\\": $T_CPUIRQ\"\n output+=$',\\n'\n output+=\" \\\"vCpuTimeSoftIntSrvc\\\": $CPUSOFTIRQ\"\n output+=$',\\n'\n output+=\" \\\"tvCpuTimeSoftIntSrvc\\\": $T_CPUSOFTIRQ\"\n output+=$',\\n'\n output+=\" \\\"vCpuContextSwitches\\\": $CSWITCH\"\n output+=$',\\n'\n output+=\" \\\"tvCpuContextSwitches\\\": $T_CSWITCH\"\n output+=$',\\n'\n output+=\" \\\"vCpuNice\\\": $CPUNICE\"\n output+=$',\\n'\n output+=\" \\\"tvCpuNice\\\": $T_CPUNICE\"\n output+=$',\\n'\n output+=\" \\\"vCpuSteal\\\": $CPUSTEAL\"\n output+=$',\\n'\n output+=\" \\\"tvCpuSteal\\\": $T_CPUSTEAL\"\n output+=$',\\n'\n output+=\" \\\"vDiskSuccessfulReads\\\": $COMPLETEDREADS\"\n output+=$',\\n'\n output+=\" \\\"vDiskMergedReads\\\": $MERGEDREADS\"\n output+=$',\\n'\n output+=\" \\\"vDiskReadTime\\\": $READTIME\"\n output+=$',\\n'\n output+=\" \\\"vDiskSuccessfulWrites\\\": $COMPLETEDWRITES\"\n output+=$',\\n'\n output+=\" \\\"vDiskMergedWrites\\\": $MERGEDWRITES\"\n output+=$',\\n'\n output+=\" \\\"vDiskWriteTime\\\": $WRITETIME\"\n output+=$',\\n'\n output+=\" \\\"vMemoryTotal\\\": $MEMTOT\" \n output+=$',\\n'\n output+=\" \\\"vMemoryFree\\\": $MEMFREE\"\n output+=$',\\n'\n output+=\" \\\"vMemoryBuffers\\\": $BUFFERS\"\n output+=$',\\n'\n output+=\" \\\"vMemoryCached\\\": $CACHED\"\n output+=$',\\n'\n output+=\" \\\"vLoadAvg\\\": $LAVG\"\n output+=$',\\n'\n output+=\" \\\"vId\\\": \\\"$vmid\\\"\"\n output+=$',\\n'\n output+=\" \\\"vCpuType\\\": \\\"$CPUTYPE\\\"\"\n output+=$',\\n'\n output+=\" \\\"tvCpuType\\\": $T_CPUTYPE\"\n output+=$',\\n'\n output+=\" \\\"vCpuMhz\\\": \\\"$CPUMHZ\\\"\"\n output+=$',\\n'\n \n\n\n if [ $CONTAINER = true ] || [ $PROCESS = true ];\n then\n\toutput+=$' \\\"tvCpuMhz\\\": '\"$T_CPUMHZ\"\n\toutput+=$',\\n'\n else\n\toutput+=$' \\\"tvCpuMhz\\\": '\"$T_CPUMHZ\"\n\toutput+=$'\\n'\n fi\nfi\n\n\n## Container level metrics\nif [ $CONTAINER = true ]\nthen\n #echo \"CONTAINER is Running!!\"\n T_CNT_1=$(date +%s%3N)\n\n output+=$' \\\"cMetricType\\\": '\"\\\"Container level\\\"\"\n output+=$',\\n'\n\n # Get CPU stats\n\n CPUUSRC=$(cat /sys/fs/cgroup/cpuacct/cpuacct.stat | grep 'user' | cut -d\" \" -f 2) # in cs\n T_CPUUSRC=$(date +%s%3N)\n\n CPUKRNC=$(cat /sys/fs/cgroup/cpuacct/cpuacct.stat | grep 'system' | cut -d\" \" -f 2) # in cs\n T_CPUKRNC=$(date +%s%3N)\n\n CPUTOTC=$(cat /sys/fs/cgroup/cpuacct/cpuacct.usage) # in ns\n T_CPUTOTC=$(date +%s%3N)\n\n IFS=$'\\n'\n\n PROS=(`cat /proc/cpuinfo | grep 'processor' | cut -d\":\" -f 2`)\n NUMPROS=${#PROS[@]}\n T_NUMPROS=$(date +%s%3N)\n\n\n # Get disk stats\n\n # Get disk major:minor numbers, store them in disk_arr\n # Grep disk first using lsblk -a, find type \"disk\" and then find the device number\n IFS=$'\\n'\n lines=($(lsblk -a | grep 'disk'))\n unset IFS\n disk_arr=()\n for line in \"${lines[@]}\"\n do \n temp=($line)\n disk_arr+=(${temp[1]})\n done\n\n\n arr=($(cat /sys/fs/cgroup/blkio/blkio.sectors | grep 'Total' | cut -d\" \" -f 2))\n\n # if arr is empty, then assign 0; else, sum up all elements in arr\n if [ -z \"$arr\" ]; then\n SRWC=0\n else\n SRWC=$( ( IFS=+; echo \"${arr[*]}\" ) | bc )\n fi\n\n\n IFS=$'\\n'\n arr=($(cat /sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes | grep 'Read')) # in Bytes\n unset IFS\n\n if [ -z \"$arr\" ]; then\n BRC=0\n else\n BRC=0\n for line in \"${arr[@]}\"\n do \n temp=($line)\n for elem in \"${disk_arr[@]}\"\n do \n if [ \"$elem\" == \"${temp[0]}\" ]\n then\n BRC=$(echo \"${temp[2]} + $BRC\" | bc)\n fi\n done\n done\n fi\n\n\n\n IFS=$'\\n'\n arr=($(cat /sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes | grep 'Write')) # in Bytes\n unset IFS\n\n if [ -z \"$arr\" ]; then\n BWC=0\n else\n BWC=0\n for line in \"${arr[@]}\"\n do \n temp=($line)\n for elem in \"${disk_arr[@]}\"\n do \n if [ \"$elem\" == \"${temp[0]}\" ]\n then\n BWC=$(echo \"${temp[2]} + $BWC\" | bc)\n fi\n done\n done\n fi\n\n\n # Get network stats\n\n NET=(`cat /proc/net/dev | grep 'eth0'`)\n NRC=${NET[1]} # bytes received\n [[ -z \"$NRC\" ]] && NRC=0\n\n NTC=${NET[9]} # bytes transmitted\n [[ -z \"$NTC\" ]] && NTC=0\n\n\n #Get container ID\n CIDS=$(cat /etc/hostname)\n\n # Get memory stats\n MEMUSEDC=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes)\n MEMMAXC=$(cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes)\n\n unset IFS\n CPUPERC=(`cat /sys/fs/cgroup/cpuacct/cpuacct.usage_percpu`) # in ns, 0, 1, 2, 3 elements\n T_CPUPERC=$(date +%s%3N)\n\n T_CNT_2=$(date +%s%3N)\n let T_CNT=$T_CNT_2-T_CNT_1\n\n cPGFault=$(filedata \"/sys/fs/cgroup/memory/memory.stat\" \"pgfault\")\n cMajorPGFault=$(filedata \"/sys/fs/cgroup/memory/memory.stat\" \"pgmajfault\")\n\n\n # print container level data\n output+=\" \\\"cTime\\\": $T_CNT\"\n output+=$',\\n'\n output+=\" \\\"cCpuTime\\\": $CPUTOTC\"\n output+=$',\\n'\n output+=\" \\\"tcCpuTime\\\": $T_CPUTOTC\"\n output+=$',\\n'\n output+=\" \\\"cNumProcessors\\\": $NUMPROS\"\n output+=$',\\n'\n output+=\" \\\"cPGFault\\\": $cPGFault\"\n output+=$',\\n'\n output+=\" \\\"cMajorPGFault\\\": $cMajorPGFault\"\n output+=$',\\n'\n output+=\" \\\"tcNumProcessors\\\": $T_NUMPROS\"\n output+=$',\\n'\n output+=\" \\\"cProcessorStats\\\": {\"\n output+=$'\\n'\n\n\n for (( i=0; i<NUMPROS; i++ ))\n do \n output+=$\" \\\"cCpu${i}TIME\\\": ${CPUPERC[$i]}\"\n output+=$',\\n'\n done\n\n output+=\" \\\"tcCpu#TIME\\\": $T_CPUPERC\"\n output+=$',\\n'\n output+=\" \\\"cNumProcessors\\\": $NUMPROS\"\n output+=$'\\n },\\n'\n output+=\" \\\"cCpuTimeUserMode\\\": $CPUUSRC\"\n output+=$',\\n'\n output+=\" \\\"tcCpuTimeUserMode\\\": $T_CPUUSRC\"\n output+=$',\\n'\n output+=\" \\\"cCpuTimeKernelMode\\\": $CPUKRNC\"\n output+=$',\\n'\n output+=\" \\\"tcCpuTimeKernelMode\\\": $T_CPUKRNC\"\n output+=$',\\n'\n output+=\" \\\"cDiskSectorIO\\\": $SRWC\"\n output+=$',\\n'\n output+=\" \\\"cDiskReadBytes\\\": $BRC\"\n output+=$',\\n'\n output+=\" \\\"cDiskWriteBytes\\\": $BWC\"\n output+=$',\\n'\n output+=\" \\\"cNetworkBytesRecvd\\\": $NRC\"\n output+=$',\\n'\n output+=\" \\\"cNetworkBytesSent\\\": $NTC\"\n output+=$',\\n'\n output+=\" \\\"cMemoryUsed\\\": $MEMUSEDC\"\n output+=$',\\n'\n\n\n output+=\" \\\"cMemoryMaxUsed\\\": $MEMMAXC\"\n output+=$',\\n'\n output+=\" \\\"cId\\\": \\\"$CIDS\\\"\"\n output+=$',\\n'\n output+=\" \\\"cNumProcesses\\\": $PIDS\"\n output+=$',\\n'\n output+=\" \\\"pMetricType\\\": \\\"Process level\\\"\"\n\n\n\n if [ $PROCESS = true ];\n then\n output+=$',\\n'\n else\n output+=$'\\n'\n fi\nfi\n\n## Process level metrics\n\nif [ $PROCESS = true ]\nthen\n #echo \"PROCESS is Running!!\"\n\n T_PRC_1=$(date +%s%3N)\n # For each process, parse the data\n\n # command cat $outfile in the last line of the script\n # and ./rudataall.sh are counted as 2 extra processes, so -2 here for PIDS\n\n output+=\" \\\"pProcesses\\\": [\"\n output+=$'\\n'\n\n\n declare -A \"profilerPid=( $(pgrep \"rudataall.sh\" -v | sed 's/[^ ]*/[&]=&/g') )\"\n for i in \"${!profilerPid[@]}\"\n do\n\tparent=$(ps -o ppid= ${profilerPid[$i]})\n\tparent_nowhite_space=\"$(echo -e \"${parent}\" | tr -d '[:space:]')\"\n\t\n\tif [[ ! \" ${profilerPid[@]} \" =~ \" ${parent_nowhite_space} \" ]]; then\t\t\n\t\t#this if statement checks if parent of pid is not in the list of all profiler proesses.\n\t\t#check if pid still exists\n\n\t\tSTAT=(`cat /proc/${profilerPid[$i]}/stat 2>/dev/null`)\n\t\tif (( ${#STAT[@]} )); then\n\t\t\t PID=${STAT[0]}\n\t\t\t PSHORT=$(echo $(echo ${STAT[1]} | cut -d'(' -f 2 ))\n\t\t\t PSHORT=${PSHORT%?}\n\t\t\t NUMTHRDS=${STAT[19]}\n\n\t\t\t # Get process CPU stats\n\t\t\t UTIME=${STAT[13]}\n\t\t\t STIME=${STAT[14]}\n\t\t\t CUTIME=${STAT[15]}\n\t\t\t CSTIME=${STAT[16]}\n\t\t\t TOTTIME=$((${UTIME} + ${STIME}))\n\n\t\t\t # context switch !! need double check result format\n\t\t\t VCSWITCH=$(cat /proc/${profilerPid[$i]}/status | grep \"^voluntary_ctxt_switches\" | \\\n\t\t\t cut -d\":\" -f 2 | sed 's/^[ \\t]*//') \n\t\t\t NVCSSWITCH=$(cat /proc/${profilerPid[$i]}/status | grep \"^nonvoluntary_ctxt_switches\" | \\\n\t\t\t cut -d\":\" -f 2 | sed 's/^[ \\t]*//') \n\n\t\t\t # Get process disk stats\n\t\t\t DELAYIO=${STAT[41]}\n\t\t\t pPGFault=$(cat /proc/${profilerPid[$i]}/stat | cut -d' ' -f 10)\n\t\t\t pMajorPGFault=$(cat /proc/${profilerPid[$i]}/stat | cut -d' ' -f 12)\n\n\t\t\t # Get process memory stats\n\t\t\t VSIZE=${STAT[22]} # in Bytes\n\t\t\t RSS=${STAT[23]} # in pages\n\n\t\t\t PNAME=$(cat /proc/${profilerPid[$i]}/cmdline | tr \"\\0\" \" \")\n\t \t\t PNAME=${PNAME%?}\n\n\t\t\t # print process level data\n\t \t\t output+=$' {\\n'\n\t \t\t output+=\" \\\"pId\\\": $PID\"\n\t \t\t output+=$',\\n'\n\t \n\n\n\t\t\t \n\t\t\t if jq -e . >/dev/null 2>&1 <<<\"\\\"$PNAME\\\"\"; then\n\t\t\t\t:\n\t\t\t else\n\t\t\t\tPNAME=\"Invalid Json\"\n\t\t\t fi\n\n\n\t \t\t output+=\" \\\"pCmdLine\\\":\\\"$PNAME\\\"\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pName\\\":\\\"$PSHORT\\\"\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pNumThreads\\\": $NUMTHRDS\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pCpuTimeUserMode\\\": $UTIME\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pCpuTimeKernelMode\\\": $STIME\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pChildrenUserMode\\\": $CUTIME\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pPGFault\\\": $pPGFault\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pMajorPGFault\\\": $pMajorPGFault\"\n\t \t\t output+=$',\\n'\n\t \t\t output+=\" \\\"pChildrenKernelMode\\\": $CSTIME\"\n\t \t\t output+=$',\\n'\n\n\n\n\t\t\t if [ -z \"$VCSWITCH\" ];\n\t\t\t then\n\t\t\t\tVCSWITCH=\"NA\"\n\t\t\t fi\n\t\t\t output+=\" \\\"pVoluntaryContextSwitches\\\": $VCSWITCH\"\n\t\t\t output+=$',\\n'\n\n\t\t\t if [ -z \"$NVCSSWITCH\" ];\n\t\t\t then\n\t\t\t\tNVCSSWITCH=\"NA\"\n\t\t\t fi\n\t\t output+=\" \\\"pNonvoluntaryContextSwitches\\\": $NVCSSWITCH\"\n\t\t output+=$',\\n'\n\n\n\n\t\t\t output+=\" \\\"pBlockIODelays\\\": $DELAYIO\"\n\t\t\t output+=$',\\n'\n\t\t\t output+=\" \\\"pVirtualMemoryBytes\\\": $VSIZE\"\n\t\t\t output+=$',\\n'\n\t\t\t output+=\" \\\"pResidentSetSize\\\": $RSS\"\n\t\t\t output+=$'\\n }, \\n'\n\t\tfi\n\tfi\n\n\n done\n\n T_PRC_2=$(date +%s%3N)\n let T_PRC=$T_PRC_2-$T_PRC_1\n output+=\" {\\\"cNumProcesses\\\": $PIDS\"\n output+=$',\\n'\n output+=\" \\\"pTime\\\": $T_PRC\"\n output+=$',\\n'\n write_time_end=$(date '+%s%3N')\n let profile_time=$write_time_end-$write_time_start\n output+=\" \\\"profileTime\\\": $profile_time\"\n output+=$'}'\n \n \n output+=$'\\n ]\\n'\nfi\n\noutput+=$'}'\necho \"$output\" & > experimental.json\n\n" }, { "alpha_fraction": 0.6948739290237427, "alphanum_fraction": 0.7068690657615662, "avg_line_length": 32.114906311035156, "blob_id": "669bed372f812e3d2595740354b1afcae5b9ed79", "content_id": "08727904bfbc34e7541affa3b920976c9ef95249", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10671, "license_type": "no_license", "max_line_length": 124, "num_lines": 322, "path": "/Profiler_Python/src/rudataall-psutil.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "import psutil\nimport json\nimport argparse\nfrom datetime import datetime\nimport re \nimport subprocess\nimport os.path\nfrom os import path\n\n#add the virtual level.\nCORRECTION_MULTIPLIER=100\nCORRECTION_MULTIPLIER_MEMORY=(1/1000)\n\nparser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\nparser.add_argument('output_dir', action='store', help='stores directory to where the files will be output to')\nparser.add_argument(\"-v\", \"--vm_profiling\", action=\"store_true\", default=False, help='list of metrics to graph over')\nparser.add_argument(\"-c\", \"--container_profiling\", action=\"store_true\", default=False, help='list of metrics to graph over')\nparser.add_argument(\"-p\", \"--processor_profiling\", action=\"store_true\", default=False, help='list of metrics to graph over')\nargs= parser.parse_args()\noutput_dir = args.output_dir\n\nif all(v is False for v in [args.vm_profiling, args.container_profiling, args.processor_profiling]):\n\targs.vm_profiling = True\n\targs.container_profiling=True\n\targs.processor_profiling=True\n\nfilename = datetime.now().strftime(output_dir+\"/%Y_%m_%d_%H_%M_%S.json\")\noutput_dict={}\n\n\ndef getContainerInfo():\n\t\n\t\n\tcpuTime_file = open(\"/sys/fs/cgroup/cpuacct/cpuacct.usage\", \"r\")\n\tcpuTime=int(cpuTime_file.readline())\n\n\n\tcontainer_mem_file = open(\"/sys/fs/cgroup/memory/memory.stat\", \"r\")\n\tcontainer_mem_stats=container_mem_file.read()#line().split()\n\tcpgfault = int(re.findall(r'pgfault.*', container_mem_stats)[0].split()[1])\n\tcpgmajfault = int(re.findall(r'pgmajfault.*', container_mem_stats)[0].split()[1])\n\t\n\tcpuinfo_file= open(\"/proc/stat\", \"r\")\n\tcpuinfo_file_stats=cpuinfo_file.read()\n\tcCpuTimeUserMode = int(re.findall(r'cpu.*', cpuinfo_file_stats)[0].split()[1])\n\tcCpuTimeKernelMode = int(re.findall(r'cpu.*', cpuinfo_file_stats)[0].split()[3])\n\t\n\n\tcProcessorStatsFile= open(\"/sys/fs/cgroup/cpuacct/cpuacct.usage_percpu\", \"r\")\n\tcProcessorStatsFileArr= cProcessorStatsFile.readline().split()\n\tcProcessorDict={}\n\tcount =0\n\tfor el in cProcessorStatsFileArr:\n\t\ttemp_str=\"cCpu${}TIME\".format(count)\n\t\tcount+=1\n\t\tcProcessorDict[temp_str]=int(el)\n\t\n\tcDiskSectorIO=0\n\tif path.exists('/sys/fs/cgroup/blkio/blkio.sectors'):\n\t\tcDiskSectorIOFile=open(\"/sys/fs/cgroup/blkio/blkio.sectors\", \"r\")\n\t\tcDiskSectorIOFileArr = re.findall(r'cpu.*', cDiskSectorIOFile)[0].split()\n\t\tcDiskSectorIO=sum(cDiskSectorIOFileArr)\n\tcDiskReadBytes=0\n\tcDiskWriteBytes=0\n\n\t\n\ttry:\n\t\tcmd1= ['lsblk', '-a']\n\t\tcmd2=['grep', 'disk']\n\t\tp1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)\n\t\tp2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)\n\t\to, e = p2.communicate()\n\t\tmajor_minor_arr=[]\n\t\t\n\t\tfor line in o.decode('UTF-8').split(sep='\\n')[:-1]:\n\t\t\tmajor_minor_arr.append(line.split()[1])\n\n\t\t # temp=($line)\n\t\t # disk_arr+=(${temp[1]})\n\t\t #done\n\t\t#major_minor=str(o.decode('UTF-8')).split()[1]\n\t\t\n\n\t\tcDiskReadBytesFile=open(\"/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes\", \"r\")\n\t\tcProcessorStatsFile_info=cDiskReadBytesFile.read()\n\t\tcDiskReadBytesArr=re.findall(r'.*Read.*', cProcessorStatsFile_info)\n\n\t\tfor el in cDiskReadBytesArr:\n\t\t\ttemp_arr = el.split()\n\t\t\tfor major_minor in major_minor_arr:\n\t\t\t\tif (temp_arr[0] == major_minor):\n\t\t\t\t\tcDiskReadBytes += int(temp_arr[2])\n\n\texcept:\n\t\tpass\n\n\n\ttry:\n\t\tcmd1= ['lsblk', '-a']\n\t\tcmd2=['grep', 'disk']\n\t\tp1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)\n\t\tp2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)\n\t\to, e = p2.communicate()\n\t\tmajor_minor_arr=[]\n\t\t\n\t\tfor line in o.decode('UTF-8').split(sep='\\n')[:-1]:\n\t\t\tmajor_minor_arr.append(line.split()[1])\n\n\t\tcDiskWriteBytesFile=open(\"/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes\", \"r\")\n\t\tcProcessorStatsFile_info=cDiskWriteBytesFile.read()\n\t\tcDiskWriteBytesArr=re.findall(r'.*Write.*', cProcessorStatsFile_info)\n\t\tfor el in cDiskWriteBytesArr:\n\t\t\ttemp_arr = el.split()\n\t\t\tfor major_minor in major_minor_arr:\n\t\t\t\tif (temp_arr[0] == major_minor):\n\t\t\t\t\tcDiskWriteBytes += int(temp_arr[2])\n\n\texcept:\n\t\tpass\n\n\n\n\n\t\n\tcNetworkBytesFile=open(\"/proc/net/dev\", \"r\")\n\tcNetworkBytesFileStats=cNetworkBytesFile.read()\n\tcNetworkBytesRecvd=0\n\tcNetworkBytesSent=0\n\ttry:\n\t\tcNetworkBytesArr=re.findall(r'eth0.*',cNetworkBytesFileStats)[0].split()\n\t\tcNetworkBytesRecvd=int(cNetworkBytesArr[1])\n\t\tcNetworkBytesSent=int(cNetworkBytesArr[9])\n\n\texcept:\t\n\t\tpass\n\t\t\n\n\n\tMEMUSEDC_file=open(\"/sys/fs/cgroup/memory/memory.usage_in_bytes\", \"r\")\n\tMEMMAXC_file=open(\"/sys/fs/cgroup/memory/memory.max_usage_in_bytes\", \"r\")\n\tcMemoryUsed=int(MEMUSEDC_file.readline().rstrip('\\n'))\n\tcMemoryMaxUsed=int(MEMMAXC_file.readline().rstrip('\\n'))\n\n\tcId_file=open(\"/etc/hostname\", \"r\")\n\tcId=cId_file.readline().rstrip('\\n')\n\t#CPU=(`cat /proc/stat | grep '^cpu '`)\n\n\tcNumProcesses = sum(1 for line in open(\"/sys/fs/cgroup/pids/tasks\", \"r\")) -2\n\n\n\n\tcontainer_dict={\t\t\n\t\t\"cCpuTime\": cpuTime,\n\t\t\"cNumProcessors\": psutil.cpu_count(),\n\t\t\"cPGFault\": cpgfault,\n\t\t\"cMajorPGFault\": cpgmajfault,\n\t\t\"cProcessorStats\": cProcessorDict,\n\t\t\"cCpuTimeUserMode\": cCpuTimeUserMode,\n\t\t\"cCpuTimeKernelMode\": cCpuTimeKernelMode,\n\t\t\"cDiskSectorIO\": cDiskSectorIO,\n\t\t\"cDiskReadBytes\": cDiskReadBytes,\n\t\t\"cDiskWriteBytes\": cDiskWriteBytes\t,\n\t\t\"cNetworkBytesRecvd\":cNetworkBytesRecvd,\n\t\t\"cNetworkBytesSent\": cNetworkBytesSent,\n\t\t\"cMemoryUsed\": cMemoryUsed,\n\t\t\"cMemoryMaxUsed\": cMemoryMaxUsed,\t\n\t\t\"cId\": cId,\n\t\t\"cNumProcesses\": cNumProcesses,\n\t\t\"pMetricType\": \"Process level\"\n\t}\n\treturn container_dict\n\ndef getVmInfo():\n\tcpu_info=psutil.cpu_times()\n\tnet_info=psutil.net_io_counters(nowrap=True)\n\tcpu_info2=psutil.cpu_stats()\n\tdisk_info=psutil.disk_io_counters()\n\tmemory=psutil.virtual_memory()\n\tloadavg=psutil.getloadavg()\n\tcpu_freq=psutil.cpu_freq()\n\n\n\tvm_file = open(\"/proc/vmstat\", \"r\")\n\tvm_file_stats=vm_file.read()#line().split()\n\tpgfault = int(re.findall(r'pgfault.*', vm_file_stats)[0].split()[1])\n\tpgmajfault = int(re.findall(r'pgmajfault.*', vm_file_stats)[0].split()[1])\n\n\t\n\tcpuinfo_file= open(\"/proc/cpuinfo\", \"r\")\n\tcpuinfo_file_stats=cpuinfo_file.read()\n\tvCpuType = re.findall(r'model name.*', cpuinfo_file_stats)[0].split(sep=\": \")[1]\n\n\tkernel_info=str(subprocess.Popen(\"uname -a\", shell=True, stdout =subprocess.PIPE).communicate()[0][:-1], 'utf-8')\n\n\tcmd1=['lsblk', '-nd', '--output', 'NAME,TYPE']\n\tcmd2=['grep','disk']\n\tp1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)\n\tp2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)\n\to, e = p2.communicate()\n\n\tmounted_filesys=str(o.decode('UTF-8').split()[0])\n\tvm_disk_file=open(\"/proc/diskstats\", \"r\")\n\tvm_disk_file_stats=vm_disk_file.read()\n\tvDiskSucessfulReads=int(re.findall(rf\"{mounted_filesys}.*\", vm_disk_file_stats)[0].split(sep=\" \")[1])\n\tvDiskSucessfulWrites=int(re.findall(rf\"{mounted_filesys}.*\", vm_disk_file_stats)[0].split(sep=\" \")[5])\n\n\n\tvm_dict={\n\t\t\"vMetricType\" : \"VM Level\",\n\t\t\"vKernelInfo\" : kernel_info,\n\t\t\"vCpuTime\" : (cpu_info[0] + cpu_info[2]) *CORRECTION_MULTIPLIER ,\n\t\t\"vDiskSectorReads\" : disk_info[2]/512, \n\t\t\"vDiskSectorWrites\" : disk_info[3]/512,\n\t\t\"vNetworkBytesRecvd\" : net_info[1],\n\t\t\"vNetworkBytesSent\" : net_info[0], \n\t\t\"vPgFault\" : int(pgfault),\n\t\t\"vMajorPageFault\" : int(pgmajfault),\n\t\t\"vCpuTimeUserMode\" : cpu_info[0] * CORRECTION_MULTIPLIER, \n\t\t\"vCpuTimeKernelMode\" : cpu_info[2] * CORRECTION_MULTIPLIER,\n\t\t\"vCpuIdleTime\" : cpu_info[3]* CORRECTION_MULTIPLIER,\n\t\t\"vCpuTimeIOWait\" : cpu_info[4]* CORRECTION_MULTIPLIER,\n\t\t\"vCpuTimeIntSrvc\" : cpu_info[5]* CORRECTION_MULTIPLIER,\n\t\t\"vCpuTimeSoftIntSrvc\" : cpu_info[6] * CORRECTION_MULTIPLIER,\n\t\t\"vCpuContextSwitches\" : cpu_info2[0]* CORRECTION_MULTIPLIER,\n\t\t\"vCpuNice\" : cpu_info[1]* CORRECTION_MULTIPLIER,\n\t\t\"vCpuSteal\" : cpu_info[7]* CORRECTION_MULTIPLIER,\n\t\t\"vBootTime\" : psutil.boot_time(),\n\n\t\t\"vDiskSuccessfulReads\" : vDiskSucessfulReads,\n\t\t\"vDiskMergedReads\" : disk_info[6],\n\t\t\"vDiskReadTime\" : disk_info[4],\n\t\t\"vDiskSuccessfulWrites\" : vDiskSucessfulWrites,\n\t\t\"vDiskMergedWrites\" : disk_info[7],\n\t\t\"vDiskWriteTime\" : disk_info[5],\n\t\t\"vMemoryTotal\" : round(memory[0] * CORRECTION_MULTIPLIER_MEMORY),\t\n\t\t\"vMemoryFree\" : round(memory[4]* CORRECTION_MULTIPLIER_MEMORY),\n\t\t\"vMemoryBuffers\" : round(memory[7]* CORRECTION_MULTIPLIER_MEMORY),\n\t\t\"vMemoryCached\" : round(memory[8]* CORRECTION_MULTIPLIER_MEMORY),\n\t\t\"vLoadAvg\" : loadavg[0],\n\t\t\"vId\" : \"unavailable\",\n\t\t\"vCpuType\" : vCpuType,\n\t\t\"vCpuMhz\" : cpu_freq[0]\n\t}\n\treturn vm_dict\n\ndef getProcInfo():\n\t#need to get pPGFault/pMajorPGFault in a different verbosity level: maybe called MP for manual process\n\t#pResidentSetSize needs to be get in MP\n\t\n\tdictlist=[]\n\tfor proc in psutil.process_iter():\n\t\t#procFile=\"/proc/{}/stat\".format(proc.pid) \n\t\t#log = open(procFile, \"r\")\n\t\t#pidProcStat=log.readline().split()\n\n\t\tcurr_dict={\n\t\t\t\"pId\" : proc.pid,\n\t\t\t\"pCmdline\" : \" \".join(proc.cmdline()),\n\t\t\t\"pName\" : proc.name(),\n\t\t\t\"pNumThreads\" : proc.num_threads(),\n\t\t\t\"pCpuTimeUserMode\" : proc.cpu_times()[0]* CORRECTION_MULTIPLIER,\n\t\t\t\"pCpuTimeKernelMode\" : proc.cpu_times()[1]* CORRECTION_MULTIPLIER,\n\t\t\t\"pChildrenUserMode\" : proc.cpu_times()[2]* CORRECTION_MULTIPLIER,\n\t\t\t\"pChildrenKernelMode\" : proc.cpu_times()[3]* CORRECTION_MULTIPLIER,\n\t\t\t#\"pPGFault\" : int(pidProcStat[9]),\n\t\t\t#\"pMajorPGFault\" : int(pidProcStat[11]),\n\t\t\t\"pVoluntaryContextSwitches\" : proc.num_ctx_switches()[0],\t\t\n\t\t\t\"pInvoluntaryContextSwitches\" : proc.num_ctx_switches()[1],\t\t\n\t\t\t\"pBlockIODelays\" : proc.cpu_times()[4]* CORRECTION_MULTIPLIER,\n\t\t\t\"pVirtualMemoryBytes\" : proc.memory_info()[1]\n\t\t\t#\"pResidentSetSize\" : proc.memory_info()[0] \t \n\n\t\t}\n\t\t\n\n\t\tdictlist.append(curr_dict)\n\treturn dictlist\n\n\nseconds_since_epoch = round(datetime.now().timestamp())\noutput_dict[\"currentTime\"] = seconds_since_epoch\t\t#bad value.\n\nif args.vm_profiling == True:\n\ttime_start_VM=datetime.now()\n\tvm_info=getVmInfo()\n\ttime_end_VM=datetime.now()\n\tVM_write_time=time_end_VM-time_start_VM\n\n\toutput_dict.update(vm_info)\nif args.container_profiling == True:\n\ttime_start_container=datetime.now()\n\tcontainer_info=getContainerInfo()\n\ttime_end_container=datetime.now()\n\tcontainer_write_time=time_end_container-time_start_container\n\n\toutput_dict.update(container_info)\nif args.processor_profiling == True:\n\ttime_start_proc=datetime.now()\n\tprocces_info=getProcInfo()\n\ttime_end_proc=datetime.now()\n\tprocess_write_time=time_end_proc-time_start_proc\n\n\toutput_dict[\"pProcesses\"] = procces_info\n\n\nif args.vm_profiling == True:\n\toutput_dict[\"VM_Write_Time\"] = VM_write_time.total_seconds()\nif args.container_profiling == True:\n\toutput_dict[\"Container_Write_Time\"] = container_write_time.total_seconds()\nif args.processor_profiling == True:\n\toutput_dict[\"Process_Write_Time\"] = process_write_time.total_seconds()\n\n\n\n\n\n\n\n\n\nwith open(filename, 'w') as outfile: \n json.dump(output_dict, outfile, indent=4)\n \n\n\n\n\n\n\n" }, { "alpha_fraction": 0.8591549396514893, "alphanum_fraction": 0.8591549396514893, "avg_line_length": 71, "blob_id": "960fb0a0ef79b8b113f68865965faeb1b7d932b0", "content_id": "005e05e8031e2ae8af9f889266271340c6845bfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 71, "num_lines": 1, "path": "/Profiler_Bash/ubuntu/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "Sampling container for Ubuntu release of Bash based Container profiler." }, { "alpha_fraction": 0.6526104211807251, "alphanum_fraction": 0.6526104211807251, "avg_line_length": 21.68181800842285, "blob_id": "68bdb4a6726ebd8585b7888f430eb18c3d77804c", "content_id": "f6d3c7851b8a67e24df47ba507190bb21646746f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 498, "license_type": "no_license", "max_line_length": 194, "num_lines": 22, "path": "/Profiler_Python/ubuntu/runubuntupythonsampler.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]>\n\nsource $PWD/run.cfg\n\nmkdir $OUTDIR\n\nresult=$(docker images -q \"$RUN_IMAGE_TAG\" )\n\necho \"Result....\"\necho \"$result\"\n\nif [ -n \"$result\" ] ; then\n echo \"image exists\"\nelse\n echo \"image missing, building\"\n docker build -t \"$RUN_IMAGE_TAG\" . \nfi\n\necho \"running...\"\n\ndocker run --rm -it -v ${PWD}/$OUTDIR:\"/$PROFILER_OUTPUT_DIR\" -e PROFILER_OUTPUT_DIR=\"/$PROFILER_OUTPUT_DIR\" -e PROFILER_TIME_STEPS=\"$DELTA\" -e VERBOSITY=\"$VERBOSITY\" \"$RUN_IMAGE_TAG\" \"$COMMAND\"" }, { "alpha_fraction": 0.6583463549613953, "alphanum_fraction": 0.6583463549613953, "avg_line_length": 26.913043975830078, "blob_id": "dc774f49f9b9ca875c83f789d5bd17ae985c99c5", "content_id": "42dfd133dffdd20e98780fec2f207311d02fbc92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 641, "license_type": "no_license", "max_line_length": 168, "num_lines": 23, "path": "/Profiler_Bash/alpine/runalpinebashsampler.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]>\n\nsource run.cfg\nmkdir $OUTDIR\n\nresult=$(docker images -q \"$RUN_IMAGE_TAG\" )\n\necho \"Result....\"\necho \"$result\"\n\nif [ -n \"$result\" ] ; then\n echo \"image exists\"\nelse\n echo \"image missing, building\"\n docker build -t \"$RUN_IMAGE_TAG\" . \nfi\n\n#echo docker run --rm -it -v $PWD/$outdir:/.cprofiles alpinepythonsampler $command\necho \"running...\"\n#docker run --rm -it -v ${PWD}/$outdir:/.cprofiles -e DELTA=\"$delta\" \"$imagetag\" \"$command\"\n\ndocker run --rm -it -v $PWD/$OUTDIR:\"/$PROFILER_OUTPUT_DIR\" -e OUTPUTDIR=\"$PROFILER_OUTPUT_DIR\" -e VERBOSITY=\"$VERBOSITY\" -e DELTA=\"$DELTA\" \"$RUN_IMAGE_TAG\" \"$COMMAND\"" }, { "alpha_fraction": 0.7462498545646667, "alphanum_fraction": 0.7531828880310059, "avg_line_length": 39.67692184448242, "blob_id": "ce849efa178c5e1d85bea678aa888a485d8873df", "content_id": "5b80d6d0916be8be517a4985ab84dd831fd1f444", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7933, "license_type": "no_license", "max_line_length": 456, "num_lines": 195, "path": "/Graphing/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "# Time-Series Graphing \n\n# Table of Contents\n * [FAQ](#faq)\n * [General](#general)\n * [Why should I use these Graphing Scripts?](#why-should-i-use-these-graphing-scripts)\n * [Usage](#usage)\n * [How do you graph JSON from the Container Profiler?](#how-do-you-graph-json-from-the-container-profiler)\n * [How do I control which metrics are delta'd and which are raw?](#how-do-I-control-which-metrics-are-delta'd-and-which-are-raw)\n * [GENERAL INFORMATION](#general-information)\n * [Setup and Dependencies](#setup-and-dependencies)\n * [Linux](#linux)\n * [Graphing](#graphing)\n * [Metrics](#metrics)\n * [Flags](#flags)\n * [Example Runs](#example-runs)\n * [Using a pre - built Graphing image](#using-a-pre---built-graphing-image)\n\n# FAQ\n## General\n\n### Why should I use these Graphing Scripts?\n\n#### Data deltas are done for you\n\nThe JSON from the ContainerProfiler is the raw data collected from many linux metrics aimed to collect information about your Computer's resource utilization. So while there are many alternatives\nto creating graphical visualizations with the collected JSON, certain metrics from the JSON need to have a delta operation done on them. This is because not all linux resource contention metrics are the same. Some are constant values as the maximum memory in your computer, some are dynamic and will raise or fall such as amount of memory being used currently, and some will only ever rise in value such as the number of write operations performed or time.\n\nThese Graphing Scripts by default will delta only metrics that are non-dynamic, and non-static, such that what you see will be a time-series visualization. If you just used any graphing tool with the JSON from the container profiler without any modifications, many of the created graphs may just be straight upward lines.\n\n## Usage\n\n### How do you graph JSON from the Container Profiler?\n\nGraphing JSON from the Container Profiler can be done by calling the graph_all.py script in the Graphing Directory of the Container Profiler repository.\nThis can be done with any JSON from the Container Profiler, by included by default is a JSON folder created with the Container Profiler on a simple pgbench test.\n\nTo create time-series graphs using this sample JSON folder, from the command line you can call:\n```bash\npython3 graph_all.py -f ./json\n```\n### How do I control which metrics are delta'd and which are raw?\nIncluded in the repository is a config.ini file named delta_configuration.ini\nIncluded is a list of every metric in the Container Profiler currently and in the format of one of the three:\n\nmetric=numeric-delta\nmetric=non-delta\nmetric=non-numeric\n\nnumeric-delta means that this metric should be delta'd\nnon-delta means that this metric should be left raw\nnon-numeric means that the recorded metric is not a numeric value, and usually is a string\n\nIf you want a metric to be a delta value instead or left raw, you can find it in this configuration file and change it to be equal to a value of numeric-delta or non-delta.\n\n## GENERAL INFORMATION\n\nThe Container Profiler has included Graphing Scripts which can create time-series graph visualizations of linux resource contention metrics.\nThese Graphs are saved locally and can be created in a browser dynamically.\n\n## Setup and Dependencies\n\n**NOTE:** In case any dependencies installed using pip are not properly working, reinstall them using conda. \n\n### Linux\n<a name=\"DockerInstall\"></a>\n1\\. Update your package manager index. \n\nOn Debian based distros such as Ubuntu the package manager is apt-get\n```bash\nsudo apt-get -y update\n```\n\nInstall Python (both 2 and 3) and Python-pip\n```bash\nsudo apt install python3\nsudo apt install python\nsudo apt-get install python3-pip\n``` \nInstall MiniConda (https://docs.conda.io/en/latest/miniconda.html)\n```bash\nsudo apt-get install -y wget\nsudo wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh\nsudo bash Miniconda3-latest-Linux-x86_64.sh\nsudo rm -f Miniconda3-latest-Linux-x86_64.sh \n````\nRefresh/Restart your shell and check if conda was installed properly\n```bash\nconda --version\n```\n\nInstall Pandas\n```bash\nsudo conda install pandas\n```\nInstall Plotly and Matplotlib\n```bash\nsudo conda install plotly\nsudo conda install matplotlib\n```\nInstall Tkinter (for dynamic graph creation)\n```bash\nsudo apt-get install python-tk\n```\nInstall Orca dependencies and Orca(needed for image exports)\n```bash\nsudo pip install psutil\nsudo conda install requests\nsudo apt install npm\nnpm install -g [email protected] --unsafe-perm=true --allow-root\n```\nInstall kaleido\n```bash\nsudo pip install -U kaleido\n```\n\nAdditional dependencies that may be needed incase you are on a 64bit machine with 32-bit software\n```bash\nsudo apt install libcanberra-gtk-module libcanberra-gtk3-module\nsudo apt install libgconf-2-4\n\n```\n\n## Graphing\n\nAfter you have installed the dependencies on your machine, Graphs of any Container Profiler JSON folder(s) can be made.\n\nThe syntax to create the graphs is:\n```bash\npython3 graph_all.py -f <relative path to the folder with collected samples>\n```\n\nAn example basic Time-Series graphing of all metrics of a run can be performed with the command:\n```bash\npython3 graph_all.py -f ./json.\n```\n### Flags\n\n| **Flag** | **Type** | **Description** |\n| --------- | ------------------- |--------------- |\n| -f | Space Delimited String |This flag is mandatory. Following this flag in the command line will be a space delimited list of paths to JSON folders |\n| -s | int | This flag is non-mandatory and defaults to 0. Following this flag is a time interval that determines when to apply a delta operation on the JSON files |\n| -m | Space Delimited String | This flag is non-mandatory. Following this flag is a space delimited list of Container Profiler metrics that you want to graph |\n| -d | boolean | This flag is non-mandatory and defaults to False. | If this flag is included then if your browser is supported, all graphs will be created in your browser as well as being exported locally |\n\n### Example Runs\n\nCreating Graphs from two folders\n```bash\npython3 graph_all.py -f dir_path dir_path2\n```\nCreating Graphs with a delta interval of 5 seconds\n```bash\npython3 graph_all.py -f dir_path -s 5\n```\nCreating Graphs with a delta interval of 10 seconds\n```bash\npython3 graph_all.py -f dir_path -s 10\n```\nCreating Graphs only with the metrics of currentTime, cId, and vCpuTime\n```bash\npython3 graph_all.py -f dir_path -m currentTime cId vCpuTime\n```\nCreating graphs from multiple folders with only metrics from cId and vCpuTime with a sampling interval of 60\n```bash\npython3 graph_all.py -f dir_path dir_path1 dir_path2 -m cId vCpuTime -s 60\n```\n\n# Using a pre - built Graphing image\n\nI have created a docker container for graphing. It is available as `tanmayuw/cp_graphing:minimal` and `tanmayuw/cp_graphing:latest`.\n\n## video demonstration\nHere is a video demonstration: https://youtu.be/_LEMn2Xh1tM\n\nTo use that image, you need to pull it first. I recommend using `tanmayuw/cp_graphing:minimal` as it is a smaller image: \n```bash\ndocker pull tanmayuw/cp_graphing:minimal\n```\nThen, change your current working directory to the directory containing the JSON samples. If the \ndirectory containing the JSON samples is newjson_test, then: \n```bash\ncd <path to new_json_test>/newjson_test\ndocker run -it -v $PWD:/input -v $PWD/output:/output tanmayuw/cp_graphing:minimal bash\n```\nThis will open up a container built from the pulled image in an interactive mode.\nNote that you must use this container in an interactive mode. \nNext, enter the following commands:\n```bash\npython3 graph_all.py -f input\nmv vm_container_images/* output\nexit\n```\nIn the end, you will have the generated plots in the `output` directory within your directory \ncontaining the JSON samples. In this case, the graphs will be in the `output` directory of the newjson_test directory. \n" }, { "alpha_fraction": 0.5375638008117676, "alphanum_fraction": 0.5412107706069946, "avg_line_length": 41.340206146240234, "blob_id": "1c176aae89b1cef6d56466b4bd2737166f0f6fe8", "content_id": "38be3a7a21a7e289848781d2b42b13be2f2d41dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4113, "license_type": "no_license", "max_line_length": 907, "num_lines": 97, "path": "/Graphing/process_filter.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport sys\nimport json\nimport copy\n#import ConfigParser\nimport pandas as pd\nimport time\nimport csv\nimport glob\nimport shutil\nimport re\n#import path\nfrom collections import namedtuple\n\n\ndef read_metrics_file(metrics):\n\n\tif (len(metrics) == 1): #and path.exists(metrics[0])):\n\t\tmetrics_file= metrics[0]\n\t\twith open(metrics_file, 'r') as f:\n\t\t\tmetrics= f.readline().split()\n\t\t\tprint(metrics)\n\t\tf.close()\n\t\treturn metrics\n\t\t\n\telse:\n\t\tprint(\"Error: Too many arguments or path does not exist\")\n\ndef read_cmdline_metrics(metrics):\n\treturn metrics\n\n\nparser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\nparser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')\nparser.add_argument('metrics', type=str, nargs='*', help='list of metrics or file for metrics')\nparser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')\n\n\nargs= parser.parse_args()\nfile_path = args.file_path\nmetrics = args.read_metrics(args.metrics)\n\n\n\nfor i in range(0, len(metrics)):\n\tif os.path.exists('{}/{}'.format(file_path, metrics[i])):\n\t\tshutil.rmtree('{}/{}'.format(file_path, metrics[i]))\n\tif not os.path.exists('{}/{}'.format(file_path, metrics[i])):\n\t\tos.makedirs('{}/{}'.format(file_path, metrics[i]))\n\n\ndirs= [i for i in os.listdir( file_path ) if i.endswith(\".csv\")]\ndirs.sort()\n\nused_count = []\nfor file_name in dirs:\n\twith open(file_path + '/' + file_name) as csv_file: \n\t\tdata_frame = pd.read_csv(csv_file)\n\t\tdata_frame.head()\n\n\n\t\tfor i in range(0, len(metrics)):\n\t\t\tcontains_metric = data_frame['pCmdLine'].astype(str).str.contains(metrics[i], na=False, flags=re.IGNORECASE)\n\t\t\tfiltered = data_frame[contains_metric]\n\t\t\tfiltered.head()\n\t\t\tif (len(filtered.index) > 1) :\n\t\t\t\tfiltered = filtered.loc[:, ~filtered.columns.str.contains('^Unnamed')]\n\t\t\t\tfiltered.to_csv('{}/{}/{}'.format(file_path, metrics[i], file_name))\n\n\n\nfor i in range(0, len(metrics)):\n\t#path = \"{}/{}\".format(file_path, metrics[i])\n\tpath = file_path\n\tall_files = glob.glob(path+ \"/*.csv\")\n\tli = []\n\tprint(path)\n\tfor filtered_file in all_files:\n\t\tdf = pd.read_csv(filtered_file, index_col=None, header=0)\n\t\tli.append(df)\n\t\tprint(filtered_file)\n\n\tframe = pd.concat(li, axis=0, ignore_index=True)\n\tframe = frame.sort_values(by='currentTime', ascending=True)\n\tframe = frame.loc[:, ~frame.columns.str.contains('^Unnamed: 0')]\n\tframe.drop(frame.columns[0], axis=1)\n\t#frame= frame.groupby(['currentTime']).agg({ \n\t#\t'filename':'first', 'pBlockIODelays':'sum','pChildrenKernelMode':'sum', 'pChildrenUserMode':'sum','pCmdLine':'first', 'pCpuTimeUserMode':'sum', 'pId':'sum', 'pName':'first', \t\t\t'pNonvoluntaryContextSwitches':'sum', 'pNumThreads':'sum', 'pResidentSetSize':'sum','pVirtualMemoryBytes': 'sum', 'pVoluntaryContextSwitches':'sum'})\n\n\t#frame = frame.groupby(['currentTime']).sum()\n\n\t#frame = frame.diff(axis=1, periods=1)\n\tframe.drop(frame.index[0])\n\tframe['pCpuTime'] = frame['pCpuTimeUserMode'] + frame['pCpuTimeKernelMode']\n\t#print frame\n\tframe.to_csv('{}/{}/{}'.format(file_path, metrics[i], \"agg_sum.csv\"))\n\n\t\n\n\n\n" }, { "alpha_fraction": 0.6515581011772156, "alphanum_fraction": 0.6515581011772156, "avg_line_length": 22.53333282470703, "blob_id": "1c6d1ff664911aaa1a98ab076ae84be4444dc3c6", "content_id": "e47237ff37978c9bac4ee51b0141d5010126f339", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 353, "license_type": "no_license", "max_line_length": 86, "num_lines": 15, "path": "/Profiler_Bash/ubuntu/buildubuntubashsampler.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]>\n\nsource $PWD/build.cfg\n\nif [ \"$CHECK_ONLINE_RESPONSE\" = \"${answer#[Yy]}\" ] ;\nthen\n\tdocker pull \"$ONLINE_IMAGE\" ; # Needs to be set to latest release! #TODO on dockerhub\nelse\n\tcp ../src/entrypoint.sh .\n\tcp ../src/rudataall.sh .\n\tdocker build -t \"$BUILD_IMAGE_TAG\" .\n\trm -rf ./entrypoint.sh\n\trm -rf ./rudataall.sh\nfi\n" }, { "alpha_fraction": 0.7513513565063477, "alphanum_fraction": 0.7729730010032654, "avg_line_length": 29.83333396911621, "blob_id": "a1b1d808b8b4a80b0c673d99bf028a5f259a3e8c", "content_id": "6e3e2bbe9a1b24df59b527a8204a8167c4a3c7c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 185, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/Profiler_Bash/alpine/Dockerfile", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "From alpine:3.11.3\nRUN apk add jq bash bc util-linux\nCOPY rudataall.sh /usr/local/bin\nCOPY entrypoint.sh /usr/local/bin\nCOPY run_commands.sh /usr/local/bin\nENTRYPOINT [\"entrypoint.sh\"]\n" }, { "alpha_fraction": 0.8513513803482056, "alphanum_fraction": 0.8513513803482056, "avg_line_length": 73, "blob_id": "26206ce25c0a4d5e31bb394e003181a7595ecda7", "content_id": "a626d069efbcdc222d4cc9a8563792eb3387f35c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 73, "num_lines": 1, "path": "/Profiler_Python/alpine/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "sampling container for Alpine Linux Python version of Container Profiler. " }, { "alpha_fraction": 0.8387096524238586, "alphanum_fraction": 0.8387096524238586, "avg_line_length": 93, "blob_id": "d0f15aff88681b58a347316f662317463710c39a", "content_id": "31858437f94e99db4b83eff0b44d132a9e55f6dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 93, "license_type": "no_license", "max_line_length": 93, "num_lines": 1, "path": "/Profiler_Bash/src/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "common source files for ubuntu and alpine releases of the bash version of container profiler." }, { "alpha_fraction": 0.6770076751708984, "alphanum_fraction": 0.6783820986747742, "avg_line_length": 48.931373596191406, "blob_id": "8b979791fd77ebb898e2b2041fabeaafdbed70c2", "content_id": "60220c7b52fd90a5085ccbf889f46722d303992d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5093, "license_type": "no_license", "max_line_length": 167, "num_lines": 102, "path": "/Graphing/delta_json_generation.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#Creates a script based on graph_generation_config.ini to create a delta script to delta certain metrics, and avoids others.\n#authors: David Perez and Tanmay Shah\n\nimport argparse\nimport os\nimport json\nimport configparser\n\n\nfrom collections import namedtuple\n\ngenerated_script= open(\"auto_generated_delta_script.py\",\"w\")\ngenerated_script.write(\"import argparse\\nimport os\\nimport shutil\\nimport sys\\nimport json\\nimport copy\\nimport configparser\\nfrom collections import namedtuple\\n\\n\")\n\ngenerated_script.write(\"parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\\n\")\ngenerated_script.write(\"parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')\\n\")\ngenerated_script.write(\"parser.add_argument('delta_interval_time', action='store', help='stores time interval of when to take delta sample')\\n\")\ngenerated_script.write(\"args= parser.parse_args()\\n\")\ngenerated_script.write(\"file_path = args.file_path\\n\")\n\ngenerated_script.write(\"if os.path.exists(file_path + \\'/delta_json\\'):\\n\")\ngenerated_script.write(\"\\tshutil.rmtree(file_path + \\'/delta_json\\')\\n\")\n\n\ngenerated_script.write(\"if not os.path.exists(file_path + '/delta_json'):\\n\")\ngenerated_script.write(\"\\tos.makedirs(file_path + '/delta_json')\\n\\n\")\n\ngenerated_script.write(\"json_array = []\\n\")\ngenerated_script.write(\"delta_name_array = []\\n\")\ngenerated_script.write(\"dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(\\\".json\\\")])\\n\")\n#generated_script.write(\"dirs.sort()\\n\")\n\ngenerated_script.write(\"for file_name in dirs:\\n\")\ngenerated_script.write(\"\\twith open(file_path + '/' + file_name) as json_file: \\n\")\n#generated_script.write(\"\\t\\tprint ('JSON FILES TANMAY:')\\n\")\ngenerated_script.write(\"\\t\\tprint(json_file)\\n\")\ngenerated_script.write(\"\\t\\ttry:\\n\")\n\ngenerated_script.write(\"\\t\\t\\tnew_json_object = json.load(json_file)\\n\")#, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\\n\")\ngenerated_script.write(\"\\t\\t\\tjson_array.append(new_json_object)\\n\")\ngenerated_script.write(\"\\t\\t\\tnew_name= ((file_path+'/delta_json/'+file_name).split('.json')[0] + '_delta.json')\\n\")\n\ngenerated_script.write(\"\\t\\t\\tdelta_name_array.append(new_name)\\n\\n\")\ngenerated_script.write(\"\\t\\texcept Exception as e:\\n\")\n\ngenerated_script.write(\"\\t\\t\\tprint (\\\"{} invalid file\\\".format(json_file))\\n\")\ngenerated_script.write(\"\\t\\t\\tpass\\n\")\nconfig = configparser.ConfigParser()\nconfig.optionxform = str \nconfig.read('graph_generation_config.ini')\n\n\n#script generation\ngenerated_script.write(\"def file_subtraction(the_json_one, the_json_two):\\n\")\ngenerated_script.write(\"\\tjson_three = copy.deepcopy(the_json_two)\\n\")\n\n#all common attributes across all verbos\nfor (each_key, each_val) in config.items('all'):\n if ( each_val == 'numeric_delta'): #and each_key.isdigit()):\n json_one = \"the_json_one['\" +each_key+\"']\"\n json_two = \"the_json_two['\" +each_key+\"']\"\n json_three = \"json_three['\" +each_key+\"']\"\n generated_script.write(\"\\t\" + json_three +\"=\" + json_two +'-' + json_one+\"\\n\")\n\n\n#check and process attributes only for CPU or VM\nverbos = ['cpu_level','vm_level']\nfor vKey in verbos:\n\tfor (each_key, each_val) in config.items(vKey):\n \tif ( each_val == 'numeric_delta'): #and each_key.isdigit()):\n generated_script.write(\"\\tif ('\" + each_key + \"' in the_json_one.keys()):\\n\")\n json_one = \"the_json_one['\" +each_key+\"']\"\n json_two = \"the_json_two['\" +each_key+\"']\"\n json_three = \"json_three['\" +each_key+\"']\"\n generated_script.write(\"\\t\\t\" + json_three +\"=\" + json_two +'-' + json_one+\"\\n\")\n\nif (config.get('cprocessorstats','cCpu#TIME')):\n generated_script.write(\"\\tif ('cProcessorStats' in the_json_one.keys()):\\n\")\n generated_script.write(\"\\t\\tfor (each_key) in the_json_two['cProcessorStats']:\\n\")\n generated_script.write(\"\\t\\t\\tif ('cCpu' in each_key and 'TIME' in each_key):\\n\")\n generated_script.write(\"\\t\\t\\t\\tjson_three['cProcessorStats'][each_key] = the_json_two['cProcessorStats'][each_key] - the_json_one['cProcessorStats'][each_key]\\n\")\ngenerated_script.write(\"\\treturn json_three\\n\\n\")\n\ngenerated_script.write(\"delta_json_array=[]\\n\")\ngenerated_script.write(\"count = 0\\n\")\ngenerated_script.write(\"first = json_array[0]\\n\")\n\ngenerated_script.write(\"for i in range(1, len(json_array)):\\n\")\n\ngenerated_script.write(\"\\tcount += (json_array[i][\\\"currentTime\\\"] - json_array[i-1][\\\"currentTime\\\"])\\n\")\ngenerated_script.write(\"\\tif count >= int(args.delta_interval_time):\\n\")\ngenerated_script.write(\"\\t\\tdelta_json_array.append(file_subtraction(first, json_array[i]))\\n\")\ngenerated_script.write(\"\\t\\tcount = 0\\n\")\ngenerated_script.write(\"\\t\\tfirst = json_array[i]\\n\")\n\n\ngenerated_script.write(\"\\n\")\ngenerated_script.write(\"for i in range(len(delta_json_array)):\\n\")\n\n \ngenerated_script.write(\"\\twith open(delta_name_array[i], 'w') as fp:\\n\")\ngenerated_script.write(\"\\t\\tjson.dump(delta_json_array[i], fp, sort_keys=True, indent=2)\\n\")\n" }, { "alpha_fraction": 0.8152173757553101, "alphanum_fraction": 0.8152173757553101, "avg_line_length": 91, "blob_id": "dd21c20228002fb03c33280cdecce01c5795b91c", "content_id": "553f3b2c62a2823d9bd30eebaeba309640b8e448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 91, "num_lines": 1, "path": "/Profiler_Python/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "Python version of the Container Profiler. It has two environments: Alpine and Ubuntu Linux. " }, { "alpha_fraction": 0.577876091003418, "alphanum_fraction": 0.5929203629493713, "avg_line_length": 24.68181800842285, "blob_id": "5c13dfc0769896da7617c36be43fddae265c4910", "content_id": "8b5a5767530262833163efa83500e1266f70742f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 149, "num_lines": 44, "path": "/Profiler_Bash/src/entrypoint.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]> and Biodepot team\n\nif [ -z $OUTPUTDIR ]; then\n\tOUTPUTDIR=\"/.cprofiles\"\nfi\nif [ -z $DELTA ]; then\n\tDELTA=1000\nfi\n\nif [ ! -d \"$OUTPUTDIR\" ]; then\n\t${@}\n\texit\nfi\n\necho \"Command is: ${@}\"\n\nif [ \"$DELTA\" -eq 0 ]\nthen\n\trudataall.sh $VERBOSITY > \"${OUTPUTDIR}/$(date '+%Y_%m_%d__%H_%M_%S').json\"\n\t${@}\n\trudataall.sh $VERBOSITY > \"${OUTPUTDIR}/$(date '+%Y_%m_%d__%H_%M_%S').json\"\nelse\n\t#run command goes in background\n\t${@} &\n\t#capture the pid of the run command\n\trpid=$!\n\t#kill the runcmd if there is an error\n\ttrap \"kill -9 $rpid 2> /dev/null\" EXIT\n\twhile [ -n \"$rpid\" -a -e /proc/$rpid ]\n\tdo\n\t\n \tt1=$(date '+%s%3N')\n \trudataall.sh $VERBOSITY > \"${OUTPUTDIR}/$(date '+%Y_%m_%d__%H_%M_%S').json\" &\n \tt2=$(date '+%s%3N')\n \tlet profile_time=$t2-$t1\n \tlet sleep_time=$DELTA-$profile_time\n \tsleep_time=`echo $sleep_time / 1000 | bc -l`\n\n \t#if $sleep_time is negative, will result in non-crashing error. Is it faster to let this error happen or to do a check to not sleep if negative?\n \tsleep $sleep_time\n\tdone\nfi\n#rudataall.sh > \"${OUTPUTDIR}/$(date '+%Y_%m_%d__%H_%M_%S').json\"\n" }, { "alpha_fraction": 0.6655112504959106, "alphanum_fraction": 0.6655112504959106, "avg_line_length": 24.130434036254883, "blob_id": "b513c30e185af2316af431924e48583897180a71", "content_id": "609d1c810738a02c79d4f35f7bbf5dc9d56e16b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 577, "license_type": "no_license", "max_line_length": 192, "num_lines": 23, "path": "/Profiler_Python/alpine/runalpinepythonsampler.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]>\n\nsource run.cfg\n\nmkdir $OUTDIR\n\n\nresult=$(docker images -q \"$RUN_IMAGE_TAG\" )\n\necho \"Result....\"\necho \"$result\"\n\nif [ -n \"$result\" ] ; then\n echo \"image exists\"\nelse\n echo \"image missing, building...\"\n docker build -t \"$RUN_IMAGE_TAG\" . \nfi\n\n#echo docker run --rm -it -v $PWD/$outdir:/.cprofiles alpinepythonsampler $command\necho \"running...\"\ndocker run --rm -it -v ${PWD}/$OUTDIR:\"/$PROFILER_OUTPUT_DIR\" -e PROFILER_TIME_STEPS=$DELTA -e PROFILER_OUTPUT_DIR=\"/$PROFILER_OUTPUT_DIR\" -e VERBOSITY=\"$VERBOSITY\" \"$RUN_IMAGE_TAG\" \"$COMMAND\"" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6497326493263245, "avg_line_length": 22.25, "blob_id": "f62ab397cce8eb8bd8fc66cb8b804ffe96baee15", "content_id": "e7b7109e9d060d0a051e2844b8c652c9a25aad37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 374, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/utilities/clean_dangling_images.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]>\n\necho \"Found the following dangling images: \"\ndocker images --filter \"dangling=true\" -q --no-trunc\n\nread -p \"Do you with to remove these images to free space? \" -n 1 -r\necho\nif [[ $REPLY =~ ^[Yy]$ ]]\nthen\n\techo \"removing...\"\n\tdocker rmi $(docker images --filter \"dangling=true\" -q --no-trunc)\n\techo \"done\"\nelse\n\techo \"exiting... \"\nfi\n\n\n" }, { "alpha_fraction": 0.7530174851417542, "alphanum_fraction": 0.7554114460945129, "avg_line_length": 60.88271713256836, "blob_id": "c981065564934f52336da43b619ed76ce755a451", "content_id": "67faf95d1fa5178f63841b54480f4b4f4186d4cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20082, "license_type": "no_license", "max_line_length": 505, "num_lines": 324, "path": "/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "# Container Profiler\nUpdated version of Container Profiler\n\nUniversity of Washington Tacoma\n\n# Table of Contents\n * [MANUAL](#manual)\n * [Container Profiler](#container-profiler-1)\n * [General Information](#general-information)\n * [Overview](#overview)\n * [Tutorial - Profiling a Container](#tutorial-profiling-a-container)\n * [Video Demonstration](#video-demonstration)\n * [Install the Container Profiler](#install-the-container-profiler)\n * [Using the Container Profiler](#using-the-container-profiler)\n * [Metrics Description](#metrics-description)\n * [VM Level Metrics](#vm-level-metrics)\n * [Container Level Metrics](#container-level-metrics)\n * [Process Level Metrics](#process-level-metrics)\n\n * [FAQ](#faq)\n * [General](#general)\n * [Why should I use the Container Profiler?](#why-should-i-use-the-Container-Profiler)\n * [Usage](#usage)\n * [How do I use Container Profiler on my own container?](#how-do-i-use-the-Container-Profiler-on-my-own-container)\n * [Miscellaneous](#miscellaneous)\n * [How should I reference the Container Profiler if I use it?](#how-should-i-reference-the-Container-Profiler-if-i-use-it)\n \n\n\n# MANUAL\n___\n___\n\n## Container Profiler\n___\nThe Container Profiler can be used as a tool to profile an application or workflow by taking interval snapshots of a collection of linux resource utilization metrics throughout the course of the job. These snapshots are then stored as JSON data which can then be plotted and used to see how the metrics changed once the job is finished.\n\n**Authors:** Wes Lloyd & Huazeng Deng & Ling-hong Hung & David Perez & Varik Hoang & Tanmay Shah\n\n**Version:** 0.4\n\n**GitHub:** https://github.com/tanmayuw/ContainerProfiler\n\n**License:** Copyright.\n\n___\n## General Information\n___\nIn order to use the Container Profiler, a containerized application/workflow/script to be run and profiled is needed.\nA custom docker image of the Container Profiler can be created using the build scripts which can be run to benchmark the\ncustom application/workflow/script.\n\nContainerProfiler includes scripts **rudataall.sh** and **rudataall-psutil.py** to profile the resource utilization on VM level, container level and process level. \nvisual guidance on usage of the profiler script can be found in the YouTube video linked below.\n\n\n## Overview\n___\n\nThe Container profiler repository is organized in the following structure: \n\n![](./common/structure.PNG)\n\nThere are two versions of the profiler - Bash and Python, each with an Alpine and a Ubuntu environment. Each of the Profiler versions have three folders. The src folder contains the files common between the ubuntu and alpine directories (and environments).\n\nAs seen in the diagram above, the root directory has four folders of importance: **Graphing, Profiler_Bash, Profiler_Python** and **utilities.** \nThe Profiler_Bash and Profiler_Python house the alpine and ubuntu environments of the Bash and Python versions of Container Profiler respectively. \nUnder each of the Profiler_Bash and Profiler_Python, there are three folders:\n* **src** : which houses the common files between the ubuntu and alpine environment releases\n* **alpine** : which houses the scripts pertaining to the alpine release of the bash or python version of the profiler, depending on the parent directory.\n* **ubuntu** : which houses the scripts pertaining to the ubuntu release of the bash or python version of the profiler, depending on the parent directory.\n\nThe ‘src’ folder has two files - the **entrypoint.sh** (**profiler** for python version but different contents), \nand the **rudataall.sh** (**rudataall-psutil.py** in python version). The entrypoint.sh file is the first file to execute when the docker container is invoked. It is responsible for executing the specified command and collecting samples at specified time delta. The rudataall.sh file (rudataall-psutil.py in python version) is the worker file which collects a sample when invoked. It is invoked by entrypoint.sh file at required intervals to collect the samples.\n\nThe *< environment >* folders (environment can be ubuntu or alpine) contain the following files:\n* **Dockerfile**: This file is required to build the needed docker image from source. Users can modify this file and add dependencies they require in the image using the RUN commands.\n* **build.cfg**: This file allows settings three parameters:\n * CHECK_ONLINE_RESPONSE (“y”/”n”) : which allows users to set whether they want to pull image from docker hub\n * ONLINE_IMAGE : which is the tag of the image the user intends to pull from docker hub (not required when building from source)\n * BUILD_IMAGE_TAG : which is the tag of the image the user wants to put on the image built from source.\n* **run_commands.sh**: This file is where the user can put the commands to execute. It gets copied in the image and is executed depending on the COMMAND parameter set in the run.cfg file. Note that to execute a new set of commands in this file, you have to build the image again.\n* **build< environment >< version >sampler.sh**: This file is the executable which does the build process using the Dockerfile and the specified parameters build.cfg. It tags the image built from source with the specified tag.\n* **run.cfg**: This file allows the user to specify the parameters needed to control the container functioning on run. Users have the ability to provide the parameters of delta between the sample collection, the command to execute, the output directory on the host machine to volume map the collected data, the tag of the image to use to create the container, the level of verbosity and the directory in the container to store the collected data to be mapped with the output directory on the host machine.\n * COMMAND: These are the commands to be benchmarked. You can provide one line commands as arguments to this parameter. If you have a fixed set of commands to execute, it is the best idea to write them in the run_commands.sh file, build the image, and pass ` bash run_commands.sh` as the value to this parameter.\n * DELTA: This parameter takes as input the delta time interval for collecting samples. This value should be provided in milliseconds when using the bash version and in seconds when using the python version. If DELTA is set to 0, it will only collect two samples - one before and one after the task is completed.\n * OUTDIR: This is the output directory you want the collected samples to be stored in on the host machine, and is default set to ‘newjson’.\n * RUN_IMAGE_TAG: This is the tag of the image you wish to use to build the container and execute the commands to benchmark\n * VERBOSITY: This is the level of detail in verbosity of VM, Container and Process levels of data you need in the collected data. It can be specified by individual flags (as “-v -p -c” or “-v -c” ) in the bash version and they can be collectively passed in a single flag (as “-vpc” or “-vc” ) in the python versions of the profiler.\n * The PROFILER_OUTPUT_DIRECTORY: This is the directory in the container which is volume mapped with the output directory on the host machine and stores the collected samples.\n\n * **run< environment >< version >sampler.sh** : This file is the executable which executes the run process - including the creation of container, execution of commands to benchmark in the container, collection of data with given verbosity and saving the output in the specified directory.\n\nThe **Graphing** directory is used to graph the output generated by the profilers in Profiler_Bash and Profiler_Python. \nIt takes in as input, a folder of json samples collected, takes the deltas, turns them to csv files and then plots them.\nThe output is stored in the vm_container_images folder. A test output file `newjson_test` is provided in the root directory, \nwhich can be used to test graphing scripts using `python3 graph_all.py -f ../newjson_test` command. \n___\n### Utilities Reference\n___\n### deltav2.sh\n___\n**deltav2.sh** calculates the delta from 2 json files produced by **rudataall.sh**\n\nIt writes the deltas to stdout in json format and the missing fields to stderr\n\n#### Usage:\n```bash\ndeltav2.sh file1.json file2.json 2>missing.txt 1>delta.json\n```\nTest file and scripts are found in testFiles\n\n#### Description\nBasically it loops through file1 to find key : numeric_value pairs and store them in an associative array. It then loops through file2 to print out the json elements and calculate deltas. Missing values in file1 are printed here and a second key numericValue associative array is mad. A third loop then searches through the first associative array to fine missing values in file2. \n\nAs long as there is no more than one key : value pair per line in the json files and the key is unique (i.e. doesn't depend on the structure of the higher order json objects), the script should work fine. It is tolerant of order permutations, new or different fields, and missing lines but depends on file2 being valid json.\n___\n### clean_dangling_images.sh\n___\n**clean_dangling_images.sh** script is useful for cleaning the now dangling images which are not associated with any containers to free memory.\n\n#### Usage\n```bash\nsudo bash clean_dangling_images.sh\n```\n#### Description\nThis script finds all docker images not associated with any containers and tagged with a `<none>` tag.\nIt shows the image IDs to the user and allows the user to remove them. \n\n\n\n\n# Tutorial: Profiling a Container\n\n## Video Demonstration\n**Video:** https://youtu.be/HzuDeuPpE1g\n\n## Install the Container Profiler\n```bash\ngit clone https://github.com/tanmayuw/ContainerProfiler.git\n```\n\n## Using the Container Profiler\n\nIn this tutorial and in general usage, all docker related commands need to be executed with root privileges. \nIt is recommended that you use Container Profiler with root privileges. You can enter superuser mode by `sudo su` command. \n\nNavigate to /< Profiler version >/< environment >/ directory according to your preferred version and environment. \n\nFor this tutorial, I am using the Alpine Python version: \n\n![](./common/demo_dir.PNG)\n\n1) By specifying the parameters in `build.cfg` file, you can choose to either build your custom image from source\n or to pull an already built vanilla image from dockerhub which you can modify, commit and run using the run< environment >< version >sampler.sh scripts.\n In this tutorial, I am building this image from source.\n\n![](./common/demo_buildcfg.PNG)\n\n2) Next, open the `Dockerfile` to specify the packages to be installed for your custom container. \n I am currently testing the Alpine Python version with `stress-ng` package, and so I added the \n RUN commands in dockerfile to install it on my container. In a similar manner, you can add your \n own required packages and change the environment/OS version. DO NOT CHANGE ANYTHING ELSE.\n\n![](./common/demo_dockerfile.PNG)\n\n3) If you have a specific set of commands to execute in the custom container, you can put them in the run_commands.sh file.\n \n![](./common/demo_run_commands.png)\n\nIn this tutorial, I will only be testing a single command which I can input later in run.cfg file.\n\n4) Build the image with `sudo bash build<environment><version>sampler.sh` command.\n\n![](./common/demo_buildscript.PNG)\n\n5) Next, you can modify the `run.cfg` file to provide runtime parameters: \n\n![](./common/demo_runcfg.PNG)\n\n6) Now, you can execute `sudo bash run<environment><version>sampler.sh` to run the built container. \n Make sure that the RUN_IMAGE_TAG is set to the tag of the image you built and want to run. Don't worry if you see an \n error like `cat: /sys/fs/cgroup/blkio/blkio.sectors` as that metric is no longer maintained in newer versions of Linux kernel. \n\n![](./common/demo_runscript.PNG)\n\nIt will store the output in the specified directory.\n\n![](./common/demo_output.PNG)\n\n7) Finally, you can graph the results. First, make sure you have installed all the dependencies mentioned in the Graphing folder.\n Note that you must be a root privileges for the graphing scripts to work. \n I specified the output directory to be newjson and to graph the results, \n I need to go into the Graphing directory and provide the following command containing the appropriate path: \n\n![](./common/demo_graphingscripts.PNG)\n\nThe generated graphs are stored in the `vm_container_images` folder of the Graphing directory.\n\n![](./common/demo_graphs.PNG)\n\n\n\nMetrics Description \n=======\n\nThe text below describes the metrics captured by the script **rudataall.sh** and **rudataall-psutil.py** for profiling resource utilization on the \nvirtual machine (VM) level, container level and process level. A complete metrics description spreadsheet can be found at \nhttps://github.com/wlloyduw/ContainerProfiler/blob/master/metrics_description_for_rudataall.xlsx \n\nVM Level Metrics\n----------------\n\n\n| **Attribute** | **Description** |\n| ------------- | --------------- |\n| vCpuTime | Total CPU time (cpu_user+cpu_kernel) in centiseconds (cs) (hundreths of a second) |\n| vCpuTimeUserMode | CPU time for processes executing in user mode in centiseconds (cs) | \n| vCpuTimeKernelMode | CPU time for processes executing in kernel mode in centiseconds (cs) | \n| vCpuIdleTime | CPU idle time in centiseconds (cs) | \n| vCpuTimeIOWait | CPU time waiting for I/O to complete in centiseconds (cs) | \n| vCpuTimeIntSrvc | CPU time servicing interrupts in centiseconds (cs) | \n| vCpuTimeSoftIntSrvc | CPU time servicing soft interrupts in centiseconds (cs) | \n| vCpuContextSwitches | The total number of context switches across all CPUs | \n| vCpuNice | Time spent with niced processes executing in user mode in centiseconds (cs) | \n| vCpuSteal | Time stolen by other operating systems running in a virtual environment in centiseconds (cs) | \n| vCpuType | The model name of the processor | \n| vCpuMhz | The precise speed in MHz for thee processor to the thousandths decimal place | \n| vDiskSectorReads | The number of disk sectors read, where a sector is typically 512 bytes, assumes /dev/sda1| \n| vDiskSectorWrites | The number of disk sectors written, where a sector is typically 512 bytes, assumes /dev/sda1 | \n| vDiskSuccessfulReads | Number of disk reads completed succesfully |\n| vDiskMergedReads | Number of disk reads merged together (adjacent and merged for efficiency) |\n| vDiskReadTime | Time spent reading from the disk in millisecond (ms) |\n| vDiskSuccessfulReads | Number of disk reads completed succesfully |\n| vDiskSuccessfulWrites | Number of disk writes completed succesfully |\n| vDiskMergedWrites | Number of disk writes merged together (adjacent and merged for efficiency) |\n| vDiskWriteTime | Time spent writing in milliseconds (ms) |\n| vMemoryTotal | Total amount of usable RAM in kilobytes (KB) |\n| vMemoryFree | The amount of physical RAM left unused by the system in kilobytes (KB) |\n| vMemoryBuffers | The amount of temporary storage for raw disk blocks in kilobytes (KB) |\n| vMemoryCached | The amount of physical RAM used as cache memory in kilobytes (KB) |\n| vNetworkBytesRecvd | Network Bytes received assumes eth0 in bytes |\n| vNetworkBytesSent | Network Bytes written assumes eth0 in bytes |\n| vLoadAvg | The system load average as an average number of running plus waiting threads over the last minute |\n| vPgFault | type of exception raised by computer hardware when a running program accesses a memory page that is not currently mapped by the memory management unit (MMU) into the virtual address space of a process|\n| vMajorPageFault | Major page faults are expected when a prdocess starts or needs to read in additional data and in these cases do not indicate a problem condition |\n| vId | VM ID (default is \"unavailable\") |\n| currentTime | Number of seconds (s) that have elapsed since January 1, 1970 (midnight UTC/GMT) |\n\n\n \n \n \nContainer Level Metrics\n----------------\n\n| **Attribute** | **Description** |\n| ------------- | --------------- |\n| cCpuTime | Total CPU time consumed by all tasks in this cgroup (including tasks lower in the hierarchy) in nanoseconds (ns) |\n| cProcessorStats | Self-defined parameter |\n| cCpu${i}TIME | CPU time consumed on each CPU by all tasks in this cgroup (including tasks lower in the hierarchy) in nanoseconds (ns) |\n| cNumProcessors | Number of CPU processors |\n| cCpuTimeUserMode | CPU time consumed by tasks in user mode in this cgroup in centiseconds (cs) |\n| cCpuTimeKernelMode | PU time consumed by tasks in kernel mode in this cgroup in centiseconds (cs) |\n| cDiskSectorIO | Number of sectors transferred to or from specific devices by a cgroup (no longer collected for newer Linux kernels) |\n| cDiskReadBytes | Number of bytes transferred from specific devices by a cgroup in bytes |\n| cDiskWriteBytes | Number of bytes transferred to specific devices by a cgroup in bytes |\n| cMemoryUsed | Total current memory usage by processes in the cgroup in bytes |\n| cMemoryMaxUsed | Maximum memory used by processes in the cgroup in bytes |\n| cNetworkBytesRecvd | The number of bytes each interface has received |\n| cNetworkBytesSent | The number of bytes each interface has sent |\n| cId | Container ID |\n\n\n##\n\nProcess Level Metrics\n----------------\n\n| **Attribute** | **Description** |\n| ------------- | --------------- |\n| pId | Process ID | \n| pNumThreads | Number of threads in this process | \n| pCpuTimeUserMode | Total CPU time this process was scheduled in user mode, measured in clock ticks (divide by sysconf(\\_SC_CLK_TCK)) | \n| pCpuTimeKernelMode | Total CPU time this process was scheduled in kernel mode, measured in clock ticks (divide by sysconf(\\_SC_CLK_TCK)) |\n| pChildrenUserMode | Total time children processes of the parent were scheduled in user mode, measured in clock ticks |\n| pChildrenKernelMode | Total time children processes of the parent were scheduled in kernel mode, measured in clock ticks |\n| pVoluntaryContextSwitches | Number of voluntary context switches | \n| pNonvoluntaryContextSwitches | Number of involuntary context switches | \n| pBlockIODelays | Aggregated block I/O delays, measured in clock ticks | \n| pVirtualMemoryBytes | Virtual memory size in bytes | \n| pResidentSetSize | Resident Set Size: number of pages the process has in real memory. This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out | \n| pNumProcesses | Number of processes inside a container | \n\n\n\n\n# FAQ\n## General\n\n### Why should I use the Container Profiler?\nContainer Profiler is an easy to use profiling tool for profiling applications or workflows in a container.\n\n## Usage\n\n### How do I use the Container Profiler on my own container?\n\n1. Clone the Container Profiler repository.\n\n2. Pick one of the four version-environment pairs and enter its corresponding directory.\n\n3. Edit the Dockerfile to install the required dependencies for creating your container and then alter build.cfg to provide your custom tag for the local built images. If you need to pull a pre-built vanilla version of the profiler image from docker hub, you can choose that in the build.cfg. You may alter run_commands.sh to pre-load the set of commands in your image if you will run the same set of commands on the image.\n\n4. Execute the `bash build<environment><version>sampler.sh` command to build your custom image with tag provided tag in build.cfg.\n\n5. Edit the run.cfg file to provide the runtime parameters. If you intend to use run_commands.sh file, provide `bash run_commands.sh` to the COMMAND variable.\n\n6. Execute the `bash run<environment><version>sampler.sh` command to execute your container which will output the collected samples in the provided output directory.\n\n7. You can then plot the samples by going into the *Graphing* directory and following the instructions to use them.\n\n## Miscellaneous\n### How should I reference the Container Profiler if I use it?\nContact the BioDepot team for more information.\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 71, "blob_id": "7dbed34592773b405404cff660be3043732a0a40", "content_id": "9c377eb2ecb8aac4f4d01c6a68a72dcd2ddc3be2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 71, "num_lines": 1, "path": "/Profiler_Python/src/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "source files common for both Alpine and Ubuntu Python version releases. " }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 23.25, "blob_id": "666b1787ee72534a5829b0d17f04c63ec73cb4d4", "content_id": "3bc775ee3d5bfad259d8e0723767426ea7dd5c8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 96, "license_type": "no_license", "max_line_length": 39, "num_lines": 4, "path": "/Profiler_Python/alpine/run_commands.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"This is the start for testing...\"\nsleep 10\necho \"This is the end for testing.\"" }, { "alpha_fraction": 0.6387154459953308, "alphanum_fraction": 0.6564451456069946, "avg_line_length": 28.597360610961914, "blob_id": "7042ef2d835f7dc4269a340a0e30770e33645ce3", "content_id": "14a1b2262d2bf37ced7379fe57797925cd269dba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8968, "license_type": "no_license", "max_line_length": 184, "num_lines": 303, "path": "/Graphing/plotly_stack_graphs.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "from plotly.subplots import make_subplots\nimport random\nimport json\nimport os, sys\nimport pandas as pd\nimport subprocess\nimport numpy as np\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport argparse\nfrom os import path\nimport math\n\ngraphing_methods=['scatter', 'bar']\nFONT_SIZE=26;\nMARGIN_SIZE=20\nTICKFONT_SIZE=20\n\ndef export_graphs_as_images(fig, file_name, title):\n\tfile_name=file_name.split('.',1)[0]\n\tif not os.path.exists(file_name +\"_images\"):\n\t\tos.mkdir(file_name +\"_images\")\n\tfig.write_image(file_name +\"_images/\"+title +\".png\")\n\tprint(\"saved image: \" +title +\".png to \" + os.path.abspath(file_name +\"_images\"))\n\t\n\ndef read_metrics_file(metrics, data_frame):\n\n\tif (len(metrics) == 1 and path.exists(metrics[0])):\n\t\tmetrics_file= metrics[0]\n\t\twith open(metrics_file, 'r') as f:\n\t\t\tmetrics= f.readline().split()\n\t\tf.close()\n\t\treturn metrics\n\t\t\n\telse:\n\t\tprint(\"Error: Too many arguments or path does not exist\")\n\ndef read_cmdline_metrics(metrics, data_frame):\n\tif (len(metrics) == 0):\n\t\treturn list(data_frame.columns[1:])\n\telse:\n\t\treturn metrics\n\ndef update_fig(figure, y_title, the_title):\n\n\tfigure.update_layout(\n annotations=[go.layout.Annotation(\n x=.5,\n y=-0.29,\n showarrow=False,\n text=\"Time (h)\",\n xref=\"paper\",\n yref=\"paper\"\n ),\n \n go.layout.Annotation(\n x=-0.14,\n y=0.5,\n\t\t\tfont=dict(\n\t\t family=\"Courier New, monospace\",\n\t\t size=FONT_SIZE,\n\t\t color=\"#000000\"\n ),\n showarrow=False,\n text=y_title,\n textangle=0,\n xref=\"paper\",\n yref=\"paper\"\n )\n ],\n #autosize=True,\n margin=dict(\n b=120,\n ),\n\tfont=dict(\n family=\"Courier New, monospace\",\n size=MARGIN_SIZE,\n color=\"#000000\"\n ),\t\n\tshowlegend=True\n\t)\n\n\n\n\n\n\n\tfigure.update_xaxes(\n #ticktext=[\"end of split\", \"end of align\", \"end of merge\"],\n #tickvals=[\"2000\", \"20000\", \"27500\"],\n\t#ticktext=[\"split\", \"align\", \"merge\"],\n\t#tickvals=[\"10\", \"2100\", \"20000\"],\n\tdomain=[0.03, 1],\n\ttickangle=45, \n\tshowline=True, linewidth=3, linecolor='black', mirror=True, \n\ttickfont=dict(\n family='Courier New, monospace',\n size=TICKFONT_SIZE,\n color='black'\n )\n\n\t)\n\n\tfigure.update_yaxes(showline=True, linewidth=3, linecolor='black', mirror=True)\n\tfigure.update_layout(legend_orientation=\"h\")\n\tfigure.update_layout(legend=dict(x=0, y=-.28))\n\tfigure.update_layout(title = { 'text':the_title, 'x':.1, 'y':.91})\n\ndef normalize(data_frame):\n\tdata_frame[\"vDiskSectorWrites\"] = data_frame[\"vDiskSectorWrites\"]*512;\n\tdata_frame[\"cCpuTime\"]= data_frame[\"cCpuTime\"]/1000000000;\n\t#return data_frame;\n\n\ndef make_four(data_frame):\n\ttitles1=[\"Cpu Utilization\", \"Memory Utilization\", \"Network Utilization\", \"Disk Utilization\"]\n\n\n\tytitles=[\"% of CPU Utilization\", \"% Memory Usage Utilization\", \"# of Bytes recieved/sent\", \"# of Bytes Read/Written\"]\n\tapplypercent=[True, True, False, False]\n\tmetrics1=[\"cCpuTime\"]\n\n\tmetrics2=[\"cMemoryUsed\", \"cMemoryMaxUsed\"]\n\tmetrics3=[\"cNetworkBytesRecvd\", \"cNetworkBytesSent\"]\n\tmetrics4=[\"cDiskReadBytes\", \"cDiskWriteBytes\"]\n\n\n\t\n\tmetricslist1 = [metrics1, metrics2, metrics3, metrics4]\n\n\n\ttitles2=[\"CPU usage\", \"Memory Usage\", \"Network transfer\", \"Disk Uwrites\"]\n\tytitles2=[\"Percentage\", \"Percentage\", \"GB received\",\"GB written\"]\n\tapplypercent2=[True, True, False, False]\n\n\tmetrics1=[\"vCpuTime\", \"cCpuTime\"]\n\tmetrics2=[\"vMemoryFree\", \"cMemoryUsed\"]\n\tmetrics3=[\"vNetworkBytesRecvd\", \"cNetworkBytesRecvd\"]\n\tmetrics4=[\"cDiskSectorWrites\", \"vDiskSectorWrites\"]\n\n\n\tmetricslist2 = [metrics1, metrics2, metrics3, metrics4]\n\n\tfull_metrics = [metricslist1, metricslist2]\n\tall_percents = [applypercent, applypercent2]\n\n\tfig = make_subplots(rows=2, cols=2)#, subplot_titles=titles)\n\n\ttitles_all = [titles1, titles2]\n\tytitles_all = [ytitles, ytitles2]\n\t\n\tfolder_names=[\"Container_images\", \"VM_images\"]\n\tnum = 0\n\tfor metrics in full_metrics:\n\t\t\n\t\tcurrent_row = 1\n\t\tcurrent_col = 1\n\t\taxiscounter=1\n\t\tcount = 0\n\n\t\tfor sublist in metrics:\n\n\t\t\texport_fig = go.Figure()\n\n\t\t\tfor el in sublist:\n\n\t\t\t\t#the_max= data_frame[sublist].max().max()\n\t\t\t\tthe_max= data_frame[el].max()\n\t\t\t\tif all_percents[num][count] == True:\n\t\t\t\t\tif el == \"vMemoryFree\":\n\t\t\t\t\t\tfig.add_trace(go.Scatter(x=data_frame['currentTime'], y=1-(data_frame[el]/the_max), name=el, hoverinfo='x+y+name'), row=current_row, col=current_col)\n\t\t\t\t\t\texport_fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=1-(data_frame[el]/the_max), name=el, hoverinfo='x+y+name'))\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tfig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el]/the_max, name=el, hoverinfo='x+y+name'), row=current_row, col=current_col)\n\t\t\t\t\t\texport_fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el]/the_max, name=el, hoverinfo='x+y+name'))\n\t\t\t\telse:\n\t\t\t\t\tfig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el], name=el, hoverinfo='x+y+name'), row=current_row, col=current_col)\n\t\t\t\t\texport_fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el], name=el, hoverinfo='x+y+name'))\n\t\t\t\t#fig.add_trace(go.Scatter(x=data_frame['currentTime'], y=data_frame[el]),\n\t\t\t\t\t\t\n\n\t\t\t\t\n\t\t\tcurrent_col = current_col +1\n\t\t\tif (current_col == 3):\n\t\t\t\tcurrent_col =1\n\t\t\t\tcurrent_row +=1\n\t\t\tcurrentXAxis='xaxis{}'.format(axiscounter)\n\t\t\tcurrentYAxis='yaxis{}'.format(axiscounter)\n\t\t\tfig['layout'][currentXAxis].update(title=\"Time (h)\")\n\t\t\tfig['layout'][currentYAxis].update(title=ytitles[count])\n\t\t\taxiscounter+=1\n\t\t\tupdate_fig(export_fig, ytitles_all[num][count], titles_all[num][count])\n\t\t\tcount +=1\n\n\t\t\texport_graphs_as_images(export_fig, folder_names[num].format(num), str(count))\n\t\tnum +=1\n\n\t\ndef makegraphs(metrics, dfs, percentage_flag, graph_title, x_title, y_title):\n\tstart =0\n\tfig = go.Figure()\n\tfig.update_layout(\n\t title=go.layout.Title(\n\t\ttext=graph_title,\n\t\txref=\"paper\",\n\t\tx=0,\n\t\tfont=dict(\n\t\t\tfamily=\"Courier New, monospace\",\n\t\t\tsize=FONT_SIZE,\n\t\t\tcolor=\"#7f7f7f\"\n\t\t)\n\t ),\n\t xaxis=go.layout.XAxis(\n\t\ttitle=go.layout.xaxis.Title(\n\t\t text=x_title,\n\t\t font=dict(\n\t\t\tfamily=\"Courier New, monospace\",\n\t\t\tsize=FONT_SIZE,\n\t\t\tcolor=\"#7f7f7f\"\n\t\t )\n\t\t)\n\t ),\n\t yaxis=go.layout.YAxis(\n\t\ttitle=go.layout.yaxis.Title(\n\t\t text=y_title,\n\t\t font=dict(\n\t\t\tfamily=\"Courier New, monospace\",\n\t\t\tsize=FONT_SIZE,\n\t\t\tcolor=\"#7f7f7f\"\n\t\t )\n\t\t)\n\t )\n\t)\n\t\n\tdf = dfs[0]\n\tthe_max= df[metrics].max().max()\n\tfor df in dfs:\n\t\tfor x in metrics:\n\t\t\tif x in list(df.columns.values):\t\n\t\t\t\tif percentage_flag == True:\n\t\t\t\t\tfig.add_trace(go.Scatter(x=df['currentTime'], y=df[x]/the_max, name=x, hoverinfo='x+y+name'))\n\t\t\t\telse:\n\t\t\t\t\tfig.add_trace(go.Scatter(x=df['currentTime'], y=df[x], name=x, hoverinfo='x+y+name', marker=dict(\n color='Blue',\n size=120,\n line=dict(\n color='Blue',\n width=12\n )\n )))\n\t\t\t\t\n\texport_graphs_as_images(fig, graph_title, \"temp3\")\n\tfig.show()\n\n\n\n\n\n\nparser = argparse.ArgumentParser(description=\"generates plotly graphs\")\nparser.add_argument('-c', \"--csv_file\", action=\"store\", help='determines sampling size')\nparser.add_argument(\"-c2\", \"--csv_second\", action=\"store\", help='determines sampling size')\n\nparser.add_argument(\"-s\", \"--sampling_interval\", type=int, nargs='?', action=\"store\", help='determines sampling size')\n\nparser.add_argument(\"-t\", \"--title\", action=\"store\", help='determines sampling size')\nparser.add_argument(\"-xt\", \"--x_title\", action=\"store\", help='determines sampling size')\nparser.add_argument(\"-yt\", \"--y_title\", action=\"store\", help='determines sampling size')\n\nparser.add_argument(\"-p\", \"--percentage\", action=\"store_true\", help='determines sampling size')\n\nparser.add_argument('metrics', type=str, nargs='*', help='list of metrics to graph over')\nparser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')\nargs= parser.parse_args()\n\n#dataframe read into from cmdline\ndata_frame = pd.read_csv(args.csv_file)\ndata_frame.head()\ndata_frame['currentTime'] = (data_frame['currentTime'] - data_frame['currentTime'][0])/3600\n\ndata_frame.name=args.csv_file\n\ndfs = []\ndfs.append(data_frame)\nif args.csv_second != None:\n\tdata_frame = pd.read_csv(args.csv_second)\n\tdata_frame.head()\n\tdata_frame['currentTime'] = data_frame['currentTime'] - data_frame['currentTime'][0]\n\tdata_frame.name=args.csv_second\n\tdfs.append(data_frame)\n#choosing which method to make the graphs\n\n#preparing the x axis of time for all graphs\n#obtains the graphs from cmdline, can have no input for every metric in the csv, n metrics space delimited, or a file if --infile tag included at the end\nmetrics = args.read_metrics(args.metrics, data_frame)\n\nprint(metrics)\n#makegraphs(metrics, dfs, args.percentage, args.title, args.x_title, args.y_title)\nnormalize(data_frame);\nmake_four(data_frame)\n" }, { "alpha_fraction": 0.6311562061309814, "alphanum_fraction": 0.6339040398597717, "avg_line_length": 38.09090805053711, "blob_id": "408d51a277cd5874b43894c5429426516f58e91f", "content_id": "c371e3ec6fb7390d32b2098ab85a57f0a328cec7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4731, "license_type": "no_license", "max_line_length": 192, "num_lines": 121, "path": "/Graphing/csv_generation_2.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#Authors: David Perez and Tanmay Shah\n\nimport json\nimport os\nimport pandas as pd\nimport argparse\n\n\n#usage: python csv_generation_2.py path_of_folder_with_json sampling_delta metrics(file or space delimited list, if file include --infile, leave blank for all metrics found in the json files.)\n\ndef read_metrics_file(metrics):\n\tif (len(metrics) == 1 and path.exists(metrics[0])):\n\t\tmetrics_file= metrics[0]\n\t\twith open(metrics_file, 'r') as f:\n\t\t\tmetrics= f.readline().split()\n\t\t\t#\tprint(metrics)\n\t\tf.close()\n\t\treturn metrics\n\telse:\n\t\tprint(\"Error: Too many arguments or path does not exist\")\n\ndef read_cmdline_metrics(metrics):\n\treturn metrics\n\n# vm_container dictionary to store the virtual machine and container data. Key is the filename and value is the virtual machine and container data.\nvm_container = {}\n#Parse for folder path, and metrics to add.\nparser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\nparser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')\nparser.add_argument('sampling_delta', type=int, nargs='?', default=1, help='determines sampling size')\nparser.add_argument('metrics', type=str, nargs='*', help='list of metrics or file for metrics')\nparser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')\n\nargs= parser.parse_args()\nfile_path = args.file_path\nmetrics = args.read_metrics(args.metrics)\n#currentTime is necessary to be included in metrics as it is used to create time series. We add it here incase its not already included\nmetrics.append('currentTime')\nmetrics = set(metrics)\ndirs = os.listdir( file_path )\n\n# processes dictionary to store process level data\nprocesses = dict()\ndirs= sorted([i for i in os.listdir( file_path ) if i.endswith(\".json\")])\n\nfor file in dirs:\n with open(file_path+'/'+file) as f:\n # Deserialize into python object\n y = json.load(f)\n # A dictionary which contains the value of vm_container dictionary\n r = {}\n\t\n\n # Check for any list or dictionary in y\n # determines what is chosen out of the metrics.\n\t#print metrics\n for k in y:\n if not (k == \"pProcesses\" or k == \"cProcessorStats\"):\n if k in metrics or len(metrics) == 1:\n r[k] = y[k]\n \n \n if (\"cProcessorStats\" in y and \"cNumProcessors\" in y):\n for k in y[\"cProcessorStats\"]:\n if (k in metrics or len(metrics) == 0):\n r[k] = y[\"cProcessorStats\"][k]\n \n if (\"pProcesses\" in y): \n totalProcesses = len(y[\"pProcesses\"]) - 1\n\t #print y[\"pProcesses\"][len(y[\"pProcesses\"]) - 1]\n\t\n\t\n for k in y[\"pProcesses\"][totalProcesses]:\n if k == \"pTime\":\n r[\"pTime\"] = y[\"pProcesses\"][totalProcesses][\"pTime\"]\n\t\n\t\n # Loop through the process level data\n for i in range(totalProcesses):\n # A dictinary containing process level data\n s = {\"filename\": file}\n\n for k in y[\"pProcesses\"][i]:\n s[k] = y[\"pProcesses\"][i][k]\n\n s[\"currentTime\"] = r[\"currentTime\"]\n\n # If the process id is already in the processes, append to the list of processes\n pids = []\n if y[\"pProcesses\"][i][\"pId\"] in processes:\n pids = processes[y[\"pProcesses\"][i][\"pId\"]]\n pids.append( s )\n processes[y[\"pProcesses\"][i][\"pId\"]] = pids\n \n #write all metrics to csv file\n vm_container[file] = r\n\n\n#creates empty folder for process info\nif not os.path.exists('./process_info/{}'.format(os.path.basename(os.path.normpath(file_path)))):\n\tos.makedirs('./process_info/{}'.format(os.path.basename(os.path.normpath(file_path))))\n\nfor key, value in processes.items():\n df1 = pd.DataFrame(value)\n df1 = df1.sort_values(by='currentTime', ascending=True)\n df1.to_csv(\"./process_info/{}/Pid, {}.csv\".format(os.path.basename(os.path.normpath(file_path)),str(key)))\n\n# Create a separate CSV files for each of the processes\n# Dump dictionary to a JSON file\nwith open(\"vm_container.json\",\"w\") as f:\n f.write(json.dumps(vm_container))\n\n# Convert JSON to dataframe and convert it to CSV\ndf = pd.read_json(\"vm_container.json\").T\ndf=df.iloc[::args.sampling_delta]\ndf.to_csv(\"vm_container.csv\", sep=',')\n\n# Convert JSON to dataframe and convert it to CSV\ndf = pd.read_json(\"vm_container.json\").T\ndf=df.iloc[::args.sampling_delta]\ndf.to_csv(\"vm_container.tsv\", sep='\\t')\n\n" }, { "alpha_fraction": 0.8818336129188538, "alphanum_fraction": 0.8818336129188538, "avg_line_length": 23.957626342773438, "blob_id": "b3a4f377f53b4ac3fd32ee4d246bdcf0474d3d81", "content_id": "a2bf0e46666d73099bf583e9ab2b4012700a040b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 2945, "license_type": "no_license", "max_line_length": 42, "num_lines": 118, "path": "/Graphing/graph_generation_config.ini", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "[all]\ncurrentTime=non-delta\n\n[all_old]\ncCpuTime=numeric_delta\ncCpuTimeKernelMode=numeric_delta\ncCpuTimeUserMode=numeric_delta\ncDiskReadBytes=numeric_delta\ncDiskSectorIO=numeric_delta\ncDiskWriteBytes=numeric_delta\ncId=non-numeric\ncMemoryMaxUsed=non-delta\ncMemoryUsed=non-delta\ncMetricType=non-numeric\ncNetworkBytesRecvd=numeric_delta\ncNetworkBytesSent=numeric_delta\ncNumProcesses=non-delta\ncNumProcessors=non-delta\ncurrentTime=non-delta\npMetricType=non-numeric\nvCpuContextSwitches=numeric_delta\nvCpuIdleTime=numeric_delta\nvCpuMhz=non-numeric\nvCpuNice=numeric_delta\nvCpuSteal=numeric_delta\nvCpuTime=numeric_delta\nvCpuTimeIOWait=numeric_delta\nvCpuTimeIntSrvc=non-delta\nvCpuTimeKernelMode=numeric_delta\nvCpuTimeSoftIntSrvc=numeric_delta\nvCpuTimeUserMode=numeric_delta\nvCpuType=non-numeric\nvDiskMergedReads=numeric_delta\nvDiskMergedWrites=numeric_delta\nvDiskReadTime=numeric_delta\n#vDiskSectorReads=numeric_delta\nvDiskSectorWrites=numeric_delta\nvDiskSuccessfulReads=numeric_delta\nvDiskSuccessfulWrites=numeric_delta\nvDiskWriteTime=numeric_delta\nvId=non-numeric\nvLoadAvg=non-delta\nvMemoryBuffers=non-delta\nvMemoryCached=non-delta\nvMemoryFree=non-delta\nvMemoryTotal=non-delta\nvMetricType=non-numeric\nvNetworkBytesRecvd=numeric_delta\nvNetworkBytesSent=numeric_delta\n\n[cpu_level]\ncCpuTime=numeric_delta\ncCpuTimeKernelMode=numeric_delta\ncCpuTimeUserMode=numeric_delta\ncDiskReadBytes=numeric_delta\ncDiskSectorIO=numeric_delta\ncDiskWriteBytes=numeric_delta\ncId=non-numeric\ncMemoryMaxUsed=non-delta\ncMemoryUsed=non-delta\ncMetricType=non-numeric\ncNetworkBytesRecvd=numeric_delta\ncNetworkBytesSent=numeric_delta\ncNumProcesses=non-delta\ncNumProcessors=non-delta\ncurrentTime=non-delta\n\n[vm_level]\nvCpuContextSwitches=numeric_delta\nvCpuIdleTime=numeric_delta\nvCpuMhz=non-numeric\nvCpuNice=numeric_delta\nvCpuSteal=numeric_delta\nvCpuTime=numeric_delta\nvCpuTimeIOWait=numeric_delta\nvCpuTimeIntSrvc=non-delta\nvCpuTimeKernelMode=numeric_delta\nvCpuTimeSoftIntSrvc=numeric_delta\nvCpuTimeUserMode=numeric_delta\nvCpuType=non-numeric\nvDiskMergedReads=numeric_delta\nvDiskMergedWrites=numeric_delta\nvDiskReadTime=numeric_delta\n#vDiskSectorReads=numeric_delta\nvDiskSectorWrites=numeric_delta\nvDiskSuccessfulReads=numeric_delta\nvDiskSuccessfulWrites=numeric_delta\nvDiskWriteTime=numeric_delta\nvId=non-numeric\nvLoadAvg=non-delta\nvMemoryBuffers=non-delta\nvMemoryCached=non-delta\nvMemoryFree=non-delta\nvMemoryTotal=non-delta\nvMetricType=non-numeric\nvNetworkBytesRecvd=numeric_delta\nvNetworkBytesSent=numeric_delta\n\n[cprocessorstats]\ncCpu#TIME=True\n\n[process_level]\npMetricType=non-numeric\npBlockIODelays=non-delta\npChildrenKernelMode=numeric_delta\npChildrenUserMode=numeric_delta\npCpuTimeKernelMode=numeric_delta\npCpuTimeUserMode=numeric_delta\npId=non-delta\npNonvoluntaryContextSwitches=numeric_delta\npNumThreads=non-delta\npResidentSetSize=non-delta\npVirtualMemoryBytes=non-delta\npVoluntaryContextSwitches=numeric_delta\n\npName=non-numeric\npCmdLine=non-numeric\nfileName=non-numeric\n" }, { "alpha_fraction": 0.7045454382896423, "alphanum_fraction": 0.7329545617103577, "avg_line_length": 15.030303001403809, "blob_id": "5dc0118b83220f46865ac14f556d2adeb6f005a3", "content_id": "7facb36056d7086786b196344441f4461a421e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 528, "license_type": "no_license", "max_line_length": 92, "num_lines": 33, "path": "/Profiler_Bash/ubuntu/run_commands.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# This script runs a test for 2 cpu bounded processes and a memory bounded process\n\necho \"This is the start for testing...\"\necho \"The fisrt process: \"\necho \"\"\necho \necho\nsysbench --test=cpu --cpu-max-prime=20000 --max-requests=4000 run\n\necho\necho\necho\necho \"The second process: \"\necho\necho\necho\n\n/data/stress_ng.sh\n\necho\necho\necho\necho \"The last process: \"\necho\necho\necho\nsysbench --test=memory --memory-block-size=1M --memory-total-size=100G --num-threads=1 run\n\necho\necho\necho\necho \"This is the end for testing.\"" }, { "alpha_fraction": 0.6901408433914185, "alphanum_fraction": 0.715179979801178, "avg_line_length": 48.153846740722656, "blob_id": "1654107b5f2c5ece6c1815b6b080a2c851f305dd", "content_id": "b66c86b3d417a1eb711e99d2766f9ecb10f62dec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 639, "license_type": "no_license", "max_line_length": 167, "num_lines": 13, "path": "/Profiler_Python/ubuntu/Dockerfile", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "# Varik Hoang <[email protected]>\nFROM ubuntu:18.04\nENV DEBIAN_FRONTEND noninteractive\nENV HOME /data\nCOPY ./profiler .\nCOPY ./rudataall-psutil.py .\nCOPY ./run_commands.sh .\nRUN apt-get update \\\n && apt-get install --no-install-recommends -y build-essential gcc cmake libbz2-dev zlib1g-dev python3 python3-dev python3-setuptools python3-pip docker docker.io \\\n && pip3 install psutil \\\n && apt-get remove -y python3-pip python3-setuptools python3-dev zlib1g-dev libbz2-dev cmake gcc build-essential \\\n && apt-get autoclean -y && apt-get autoremove -y --purge && rm -rf /var/lib/apt/lists/* && rm -rf /var/cache/apk*\nENTRYPOINT [\"./profiler\"]\n" }, { "alpha_fraction": 0.693989098072052, "alphanum_fraction": 0.7049180269241333, "avg_line_length": 32.272727966308594, "blob_id": "bfebcf21e884392ca8e3dd45b6cd43bebc2b660f", "content_id": "0905a00592411aae443d93fa9e79d397cbe52917", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 366, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/Profiler_Bash/ubuntu/Dockerfile", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "From ubuntu:18.04\nRUN apt-get -y update \\\n && apt-get install -y jq bc util-linux \\\n && rm -rf /var/lib/apt/lists/*\nRUN apt-get -y update && apt-get -y upgrade\nRUN apt-get install -y stress-ng\nRUN apt-get install -y sysbench\nCOPY ./rudataall.sh /usr/local/bin\nCOPY entrypoint.sh /usr/local/bin\nCOPY run_commands.sh /usr/local/bin\nENTRYPOINT [\"entrypoint.sh\"]\n" }, { "alpha_fraction": 0.8684210777282715, "alphanum_fraction": 0.8684210777282715, "avg_line_length": 76, "blob_id": "ec08fc738c8965447c4558d96ab0b3787cfb2965", "content_id": "57bac5a60105dc276dd167f993c8437ae8abd42e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "no_license", "max_line_length": 76, "num_lines": 1, "path": "/Profiler_Python/ubuntu/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "sampling container for the ubuntu release of Python based Container Profiler" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 20.875, "blob_id": "1596f91bb377bf138d6adb0e1a6a60df313d63ff", "content_id": "8c987ce3f2b7576066fe5261382b6abd7a8f1bd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 350, "license_type": "no_license", "max_line_length": 55, "num_lines": 16, "path": "/Profiler_Python/ubuntu/buildubuntupythonsampler.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#author: Tanmay Shah <[email protected]>\n\nsource $PWD/build.cfg\n\nif [ \"$CHECK_ONLINE_RESPONSE\" = \"${answer#[Yy]}\" ] ;\nthen\n\t#Needs to be set to latest release! #TODO on dockerhub\n\tdocker pull \"$ONLINE_IMAGE\" ; \nelse\n\tcp ../src/profiler .\n\tcp ../src/rudataall-psutil.py .\n\tdocker build -t \"$BUILD_IMAGE_TAG\" .\n\trm ./profiler\n\trm ./rudataall-psutil.py\nfi\n" }, { "alpha_fraction": 0.7017543911933899, "alphanum_fraction": 0.7251461744308472, "avg_line_length": 38.46154022216797, "blob_id": "e674c45a4a1b1b66b93b8137a473cf5419ae43df", "content_id": "865b8ea7d3baf856daecd4692b70448b027e4451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 513, "license_type": "no_license", "max_line_length": 95, "num_lines": 13, "path": "/Profiler_Python/alpine/Dockerfile", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "From alpine:3.11.3\nCOPY ./profiler .\nCOPY ./rudataall-psutil.py .\nCOPY ./run_commands.sh .\nENV PYTHONUNBUFFERED=1\nRUN apk update && apk add python3-dev && apk add linux-headers && apk add util-linux\nRUN apk add stress-ng && apk add bc\nRUN apk add --no-cache bash\nRUN apk add build-base && apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python\nRUN python3 -m ensurepip\nRUN pip3 install --no-cache --upgrade pip setuptools && pip3 install psutil \nRUN apk del python3-dev \nENTRYPOINT [\"./profiler\"]\n" }, { "alpha_fraction": 0.8367347121238708, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 96, "blob_id": "2600ce5f1489e93d3688bf89c3c725c6aac65544", "content_id": "89f975bd6dc35505cc5ba95d0b05ff9802a1b910", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 96, "num_lines": 1, "path": "/Profiler_Bash/alpine/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "Container for sampling in Alpine Linux environment for Bash based version of Container Profiler. " }, { "alpha_fraction": 0.7101045250892639, "alphanum_fraction": 0.712195098400116, "avg_line_length": 22.866666793823242, "blob_id": "336fe871f9994e99b7d2bc922eaf42832c64baef", "content_id": "263aedc233752f5d5bbbe6488d67312867e5b1e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "no_license", "max_line_length": 92, "num_lines": 60, "path": "/Graphing/process_info_report.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport sys\nimport json\nimport copy\nimport ConfigParser\nimport pandas as pd\nimport time\n\nimport os\nimport glob\nimport pandas as pd\n\n\nfrom collections import namedtuple\n\nparser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\nparser.add_argument('file_path', action='store', help='')\nargs= parser.parse_args()\nfile_path = args.file_path\n\n\ndirs= [i for i in os.listdir( file_path ) if i.endswith(\".csv\")]\ndirs.sort()\ndfObj = pd.DataFrame()\n\n\nused_count = []\npcmd_list =[]\nfor file_name in dirs:\n\twith open(file_path + '/' + file_name) as csv_file: \n\t\tdata_frame = pd.read_csv(csv_file)\n\t\tdata_frame.head()\n\t\tvalue_counts= data_frame['pCmdLine'].value_counts()\n\t\t#df = value_counts.rename_axis('unique_values').reset_index(name='counts')\n\t\tdf = pd.DataFrame(value_counts)\n\t\tpcmd_list.append(df)\n\n\t\tseries=data_frame.median()\n\t\tseries = series.rename(file_name)\n\n\t\tdfObj = dfObj.append(series)\n\t\tused_count.append(len(data_frame.index))\n\ntotal = pcmd_list[0]\nfor i in pcmd_list[1:]:\n\ttotal = total.add(i, fill_value=0)\n\n\ntotal = total.sort_values(by=\"pCmdLine\", ascending=False)\ntotal.to_csv(\"processes_used.csv\", sep=',')\n\n\ndfObj.insert(len(dfObj.columns) ,\"Times Used\", used_count)\ndfObj= dfObj.sort_values(by=\"Times Used\", ascending=False)\n\ndfObj.index=dfObj[\"pId\"]\ndfObj = dfObj.loc[:, ~dfObj.columns.str.contains('^Unnamed')]\n\ndfObj.to_csv(\"process_info.csv\", sep=',')\n\n\n\n" }, { "alpha_fraction": 0.8117647171020508, "alphanum_fraction": 0.8117647171020508, "avg_line_length": 84, "blob_id": "3c91ef52c0449d46d5133d89bb611953c2c9b40b", "content_id": "ddf745330147e39c02d60b1365e8dad3a3677dca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 85, "license_type": "no_license", "max_line_length": 84, "num_lines": 1, "path": "/Profiler_Bash/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "Bash version of container profiler. It has two enviroments: Alpine and Ubuntu Linux. " }, { "alpha_fraction": 0.7344425916671753, "alphanum_fraction": 0.7364392876625061, "avg_line_length": 31.65217399597168, "blob_id": "9d49e3c3c53e5dcaca2f20be9becc07a9a36daf8", "content_id": "2fa100220f9cc7026d0a97e0b92b8ebea925806b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3005, "license_type": "no_license", "max_line_length": 184, "num_lines": 92, "path": "/Graphing/graph_all.py", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#author: David Perez\nfrom plotly.subplots import make_subplots\nimport random\nimport json\nimport os, sys\nimport pandas as pd\nimport subprocess\nimport numpy as np\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport argparse\nfrom os import path\nimport math\nimport shutil\nfrom os.path import abspath\nfrom subprocess import call\n\n\nfrom distutils.dir_util import copy_tree\n\n\ndef read_metrics_file(metrics):\n\n\tif (len(metrics) == 1 and path.exists(metrics[0])):\n\t\tmetrics_file= metrics[0]\n\t\twith open(metrics_file, 'r') as f:\n\t\t\tmetrics= f.readline().split()\n\t\tf.close()\n\t\treturn ' '.join(metrics)\n\n\t\t\n\telse:\n\t\tprint(\"Error: Too many arguments or path does not exist\")\n\ndef read_cmdline_metrics(metrics):\n\treturn ' '.join(metrics)\n\n\n\n#give, x folders, give metrics, give smoothening delta,\n\nparser = argparse.ArgumentParser(description=\"generates plotly graphs by giving folders, metrics, and delta smoothening value\")\nparser.add_argument('-f', \"--folders\", action=\"store\", nargs='*', help='determines sampling size')\nparser.add_argument(\"-s\", \"--sampling_interval\", type=str, nargs='?', default=1, action=\"store\", help='determines sampling size')\nparser.add_argument(\"-m\", \"--metrics\", action=\"store\", nargs='*', default=[], help='list of metrics to graph over')\nparser.add_argument(\"-d\", \"--dynamic_creation\", action=\"store_true\", default=False, help='list of metrics to graph over')\nparser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')\n\n\n\nargs= parser.parse_args()\nmetrics = args.read_metrics(args.metrics)\n\n#print(args.folders);\n#print(args.sampling_interval);\n\nprint(\"making delta_json_gen script\")\nos.system(\"python delta_json_generation.py\")\nprint(\"finished delta_json_gen script\")\n\ncurrent_directory = os.getcwd()\nfinal_directory = os.path.join(current_directory, r'graph_all_json')\n\nif os.path.exists(final_directory):\n\tshutil.rmtree(final_directory)\nif not os.path.exists(final_directory):\n os.makedirs(final_directory)\n\nprint(\"running delta_json_gen on each path given\")\nfor path in args.folders:\n\tpath = os.path.expanduser(path)\n\tos.system(\"python auto_generated_delta_script.py {} {}\".format(path, args.sampling_interval))\n\tcopy_tree(path+\"/delta_json\", final_directory)\n\t\nprint(\"Finished running delta_json_gen on each path given\")\n\nprint(\"Creating a csv file based on dela information created\")\nos.system(\"python csv_generation_2.py {} {} {}\".format(final_directory, \"1\", metrics))\nprint(\"Finished Creating a csv file based on dela information created\")\n\nprint(\"Starting Graphing process\")\nif (args.dynamic_creation) :\n\t#print (\"Tanmay METRICS HERE:\")\n\t#print (metrics)\n\tos.system(\"python plotly_graph_generation.py {} {} -d\".format(\"vm_container.csv\", metrics)) \nelse :\n\tprint (\"Tanmay METRICS HERE:\")\n\tprint (metrics)\n\tos.system(\"python plotly_graph_generation.py {} {}\".format(\"vm_container.csv\", metrics)) \n\nprint(\"Finished Graphing process\")\n\n" }, { "alpha_fraction": 0.7237569093704224, "alphanum_fraction": 0.7348066568374634, "avg_line_length": 21.75, "blob_id": "12455cf838b3b484a0121b78ccf56e66aba6d142", "content_id": "1c7810cce59bd6cfb7adc10ed685aecf77d514a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 181, "license_type": "no_license", "max_line_length": 82, "num_lines": 8, "path": "/Profiler_Bash/alpine/run_commands.sh", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# This script runs a test for 2 cpu bounded processes and a memory bounded process\n\necho \"This is the start for testing...\"\n\nsleep 5\n\necho \"This is the end for testing.\"" }, { "alpha_fraction": 0.84375, "alphanum_fraction": 0.84375, "avg_line_length": 64, "blob_id": "6af35522657005ef62dbfdecc040bc48c990bcf5", "content_id": "69d94fdfd56335a8890f429689e245bd4383260b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 64, "num_lines": 1, "path": "/common/README.md", "repo_name": "tanmayuw/ContainerProfiler", "src_encoding": "UTF-8", "text": "folder containing general files including images for README.md ." } ]
36
avinash-arjavalingam/262_project
https://github.com/avinash-arjavalingam/262_project
eded9bbf4907478fe446988ba3713fcd8aeef950
7105c6b673aadb8721342bc6a008acd67010e722
2e20d0f8047eda4ac538b1df9a803d49488446f1
refs/heads/main
2023-02-02T05:21:49.324265
2020-12-19T06:52:18
2020-12-19T06:52:18
322,778,827
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6619911789894104, "alphanum_fraction": 0.672717273235321, "avg_line_length": 41.29069900512695, "blob_id": "50cea9b9f96f4d0b5253ec30c2ad1a6f9ab1786d", "content_id": "9a7b15fee95073fc292eb95119be6425f2314807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3636, "license_type": "no_license", "max_line_length": 112, "num_lines": 86, "path": "/workloads/toy/simple_system.py", "repo_name": "avinash-arjavalingam/262_project", "src_encoding": "UTF-8", "text": "from simulator.event_queue import EventQueue\nfrom simulator.resource import *\nfrom simulator.dag import Dag\nfrom simulator.system import System\nfrom workloads.toy.linear_dag import linear_dag_clockwork_data, linear_instance_list, linear_instance_placements\n\nclass SimpleSystem(System):\n\tpools: Dict[str, ResourcePool]\n\n\tdef __init__(self,_events: EventQueue, _pools: Dict[str, ResourcePool]):\n\t\tsuper().__init__(_events)\n\t\tself.pools = _pools\n\t\tself.dag_maps = {}\n\n\tdef schedule(self, curr_time, events, *args, **kwargs):\n\t\t# First check for any completed functions\n\t\tfor name, pool in self.pools.items():\n\t\t\tfor resource in pool.get_all_resources():\n\t\t\t\tcompleted = resource.remove_at_time(curr_time)\n\t\t\t\tfor (fid, tag) in completed:\n\t\t\t\t\tassert tag in self.outstanding_requests, \"Tag needs to map to an outstanding request\"\n\t\t\t\t\tself.outstanding_requests[tag] = (True, self.outstanding_requests[tag][1])\n\t\t# Now process any new events\n\t\tfor (dag, input) in events:\n\t\t\t# for linear_instance in linear_instance_list:\n\t\t\t# \tprint(linear_instance.id_res_map)\n\t\t\t# \tprint(linear_instance.running_time)\n\t\t\t# \tprint(linear_instance.running_cost)\n\t\t\t# for price_instance in linear_instance_placements.price_list:\n\t\t\t# \tprint(price_instance.running_cost)\n\t\t\t# for time_instance in linear_instance_placements.time_list:\n\t\t\t# \tprint(time_instance.running_time)\n\t\t\t# sample_placement = (linear_instance_placements.get_sample_list(10000, 10000))[0]\n\t\t\t# self.dag_maps = sample_placement.id_res_map\n\t\t\tprint(linear_dag_clockwork_data)\n\t\t\tif linear_dag_clockwork_data[1][0] < 20 and linear_dag_clockwork_data[1][1] < 85:\n\t\t\t\tself.dag_maps[dag.name] = 'STD_GPU'\n\t\t\telif linear_dag_clockwork_data[0][0] < 20 and linear_dag_clockwork_data[0][1] < 85:\n\t\t\t\tself.dag_maps[dag.name] = 'STD_CPU'\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\t# print(dag_maps)\n\t\t\t# for sample_instance in linear_instance_placements.get_sample_list(10000, 10000):\n\t\t\t# \tprint(sample_instance.running_time)\n\t\t\t# \tprint(sample_instance.running_cost)\n\t\t\t# print(\"Done\")\n\t\t\t# print(\"Hello\")\n\t\t\tdag.execute() # Need to do this to seal the DAG\n\t\t\tself.outstanding_requests[self.__generate_tag(dag, curr_time)] = (True, dag)\n\t\t# Now schedule functions\n\t\tfor tag, (flag, dag) in self.outstanding_requests.copy().items():\n\t\t\tif flag:\n\t\t\t\tif dag.has_next_function():\n\t\t\t\t\t# Find which resource is faster\n\t\t\t\t\tnxt = dag.peek_next_function()\n\t\t\t\t\t# std_cpu = nxt.resources['STD_CPU']\n\t\t\t\t\t# std_gpu = nxt.resources['STD_GPU']\n\t\t\t\t\t# cpu_time = std_cpu['pre'].get_runtime() + std_cpu['exec'].get_runtime() + std_cpu['post'].get_runtime()\n\t\t\t\t\t# gpu_time = std_gpu['pre'].get_runtime() + std_gpu['exec'].get_runtime() + std_gpu['post'].get_runtime()\n\t\t\t\t\t# if cpu_time < gpu_time:\n\t\t\t\t\t# \tpool = self.pools['STD_CPU_POOL']\n\t\t\t\t\t# else:\n\t\t\t\t\t# \tpool = self.pools['STD_GPU_POOL']\n\t\t\t\t\t# print(self.dag_maps)\n\t\t\t\t\t# print(nxt.unique_id)\n\t\t\t\t\tif self.dag_maps[dag.name] == 'STD_GPU':\n\t\t\t\t\t\tpool = self.pools['STD_GPU_POOL']\n\t\t\t\t\t\t# print(\"GPU\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tpool = self.pools['STD_CPU_POOL']\n\t\t\t\t\t\t# print(\"CPU\")\n\t\t\t\t\t# If there is a resource available, schedule it\n\t\t\t\t\tresult : Optional[Tuple[str, Resource]] = pool.find_first_available_resource(nxt, tag)\n\t\t\t\t\tif result:\n\t\t\t\t\t\t(name, rsrc) = result\n\t\t\t\t\t\trsrc.add_function(dag.next_function(), tag, curr_time)\n\t\t\t\t\t\tself.outstanding_requests[tag] = (False, self.outstanding_requests[tag][1])\n\t\t\t\telse:\n\t\t\t\t\t# Remove if there is no next function\n\t\t\t\t\tself.outstanding_requests.pop(tag)\n\n\tdef __generate_tag(self, dag: Dag, time: int):\n\t\treturn f\"{dag.name}:{time}:{id(dag)}\"\n\n\tdef __decode_tag(self, tag: str) -> Dag:\n\t\treturn self.outstanding_requests[tag]" }, { "alpha_fraction": 0.6436837911605835, "alphanum_fraction": 0.6544723510742188, "avg_line_length": 30.667701721191406, "blob_id": "4ff0084c284d19510266ef35323f871717eefde9", "content_id": "d36a6906db39a36ac9f58e1e677cb4819a62de86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10196, "license_type": "no_license", "max_line_length": 145, "num_lines": 322, "path": "/workloads/toy/linear_dag.py", "repo_name": "avinash-arjavalingam/262_project", "src_encoding": "UTF-8", "text": "from simulator.dag import Dag, Function\nfrom simulator.resource import ResourceType\nfrom simulator.runtime import ConstantTime\nfrom .constants import *\nfrom random import randint, sample\nfrom bisect import bisect\n\n# linear_first = Function(\n# \tunique_id='linear_first',\n# \tresources= {\n# \t\t'STD_CPU' : {\n# \t\t\t'type' : ResourceType.CPU,\n# \t\t\t'space': 100.0, # Ignoring space this function requires on the CPU\n# \t\t\t'pre' : ConstantTime(1),\n# \t\t\t'exec' : ConstantTime(3),\n# \t\t\t'post' : ConstantTime(0)\n# \t\t},\n# \t\t'STD_GPU' : {\n# \t\t\t'type' : ResourceType.GPU,\n# \t\t\t'space': 100.0,\n# \t\t\t'pre' : ConstantTime(1),\n# \t\t\t'exec' : ConstantTime(2),\n# \t\t\t'post' : ConstantTime(0)\n# \t\t}\n# \t}\n# )\n#\n# linear_second = Function( # This function takes a long time to run on a CPU\n# \tunique_id='linear_second',\n# \tresources= {\n# \t\t'STD_CPU' : {\n# \t\t\t'type' : ResourceType.CPU,\n# \t\t\t'space': 100.0, # Ignoring space this function requires on the CPU\n# \t\t\t'pre' : ConstantTime(1),\n# \t\t\t'exec' : ConstantTime(5),\n# \t\t\t'post' : ConstantTime(0)\n# \t\t},\n# \t\t'STD_GPU' : {\n# \t\t\t'type' : ResourceType.GPU,\n# \t\t\t'space': 100.0,\n# \t\t\t'pre' : ConstantTime(1),\n# \t\t\t'exec' : ConstantTime(1),\n# \t\t\t'post' : ConstantTime(0)\n# \t\t}\n# \t}\n# )\n#\n# linear_third = Function( # This function takes a long time to run on a GPU\n# \tunique_id='linear_third',\n# \tresources= {\n# \t\t'STD_CPU' : {\n# \t\t\t'type' : ResourceType.CPU,\n# \t\t\t'space': 100.0, # Ignoring space this function requires on the CPU\n# \t\t\t'pre' : ConstantTime(1),\n# \t\t\t'exec' : ConstantTime(1),\n# \t\t\t'post' : ConstantTime(0)\n# \t\t},\n# \t\t'STD_GPU' : {\n# \t\t\t'type' : ResourceType.GPU,\n# \t\t\t'space': 100.0,\n# \t\t\t'pre' : ConstantTime(1),\n# \t\t\t'exec' : ConstantTime(5),\n# \t\t\t'post' : ConstantTime(0)\n# \t\t}\n# \t}\n# )\n\nlinear_first = Function(\n\tunique_id='linear_first',\n\tresources= {\n\t\t'STD_CPU' : {\n\t\t\t'type' : ResourceType.CPU,\n\t\t\t'space': 100.0, # Ignoring space this function requires on the CPU\n\t\t\t'pre' : ConstantTime(1),\n\t\t\t'exec' : ConstantTime(3),\n\t\t\t'post' : ConstantTime(0)\n\t\t},\n\t\t'STD_GPU' : {\n\t\t\t'type' : ResourceType.GPU,\n\t\t\t'space': 100.0,\n\t\t\t'pre' : ConstantTime(1),\n\t\t\t'exec' : ConstantTime(1),\n\t\t\t'post' : ConstantTime(0)\n\t\t}\n\t}\n)\n\nlinear_second = Function( # This function takes a long time to run on a CPU\n\tunique_id='linear_second',\n\tresources= {\n\t\t'STD_CPU' : {\n\t\t\t'type' : ResourceType.CPU,\n\t\t\t'space': 100.0, # Ignoring space this function requires on the CPU\n\t\t\t'pre' : ConstantTime(1),\n\t\t\t'exec' : ConstantTime(5),\n\t\t\t'post' : ConstantTime(0)\n\t\t},\n\t\t'STD_GPU' : {\n\t\t\t'type' : ResourceType.GPU,\n\t\t\t'space': 100.0,\n\t\t\t'pre' : ConstantTime(1),\n\t\t\t'exec' : ConstantTime(2),\n\t\t\t'post' : ConstantTime(0)\n\t\t}\n\t}\n)\n\nlinear_third = Function( # This function takes a long time to run on a GPU\n\tunique_id='linear_third',\n\tresources= {\n\t\t'STD_CPU' : {\n\t\t\t'type' : ResourceType.CPU,\n\t\t\t'space': 100.0, # Ignoring space this function requires on the CPU\n\t\t\t'pre' : ConstantTime(1),\n\t\t\t'exec' : ConstantTime(8),\n\t\t\t'post' : ConstantTime(0)\n\t\t},\n\t\t'STD_GPU' : {\n\t\t\t'type' : ResourceType.GPU,\n\t\t\t'space': 100.0,\n\t\t\t'pre' : ConstantTime(1),\n\t\t\t'exec' : ConstantTime(3),\n\t\t\t'post' : ConstantTime(0)\n\t\t}\n\t}\n)\n\n# Add costs to functions\nall_funs = [linear_first, linear_second, linear_third]\nfor f in all_funs:\n\tfor rsrc_name, specs in f.resources.items():\n\t\tif rsrc_name == 'STD_CPU':\n\t\t\tspecs['cost'] = COST_PER_CPU_TIME * specs['exec'].get_runtime()\n\t\telse:\n\t\t\tspecs['cost'] = COST_PER_GPU_TIME * specs['exec'].get_runtime()\n\n\nlinear_dag = Dag('linear', funs=[linear_first, linear_second, linear_third])\nlinear_dag.add_edge(linear_first, linear_second)\nlinear_dag.add_edge(linear_second, linear_third)\nlinear_dag.sanity_check()\n\n\ndef gen_clockwork(dag_functions):\n\tdag_cpu_time = 0\n\tdag_cpu_cost = 0\n\tdag_gpu_time = 0\n\tdag_gpu_cost = 0\n\tfor func in list(dag_functions):\n\t\tdag_cpu = func.resources['STD_CPU']\n\t\tdag_gpu = func.resources['STD_GPU']\n\t\tdag_cpu_time += dag_cpu['pre'].get_runtime() + dag_cpu['exec'].get_runtime() + dag_cpu['post'].get_runtime()\n\t\tdag_gpu_time += dag_gpu['pre'].get_runtime() + dag_gpu['exec'].get_runtime() + dag_gpu['post'].get_runtime()\n\t\tdag_cpu_cost += dag_cpu['cost']\n\t\tdag_gpu_cost += dag_gpu['cost']\n\treturn [[dag_cpu_time, dag_cpu_cost], [dag_gpu_time, dag_gpu_cost]]\n\nlinear_dag_clockwork_data = gen_clockwork(linear_dag.functions.values())\n\n\nclass DAGInstance:\n\n\tdef __init__(self, dag):\n\t\tself.dag = dag\n\t\tself.running_time = 0\n\t\tself.running_cost = 0\n\t\t# self.functions_per_resource = {}\n\t\tself.id_res_map = {}\n\t\t# self.id_max_map = {}\n\n\t\t# for res in [\"GPU\", \"CPU\"]:\n\t\t# \tself.functions_per_resource[res] = []\n\n\t# def add_func_res(self, function, resource):\n\t# \tfunc_tuple = (function.id, function.get_max_memory(resource))\n\t# \tself.functions_per_resource[resource].append(func_tuple)\n\n\tdef copy_dag_instance(self):\n\t\tnew_dag_instance = DAGInstance(self.dag)\n\t\tfor id_one, res in list(self.id_res_map.items()):\n\t\t\tnew_dag_instance.id_res_map[id_one] = res\n\t\t# for id_two, max_prev in self.id_max_map:\n\t\t# \tnew_dag_instance.id_max_map[id_two] = max_prev\n\t\t# for i in range(len(self.functions_per_resource)):\n\t\t# \tfor func_tuple in self.functions_per_resource[i]:\n\t\t# \t\tnew_tuple = (func_tuple[0], func_tuple[1])\n\t\t# \t\tnew_dag_instance.functions_per_resource[i].append(new_tuple)\n\t\tnew_dag_instance.running_cost = self.running_cost\n\t\tnew_dag_instance.running_time = self.running_time\n\t\treturn new_dag_instance\n\n\tdef update_dag_instance(self, this_function, res):\n\t\tself.id_res_map[this_function.unique_id] = res\n\t\t# func_time = func.get_resource_runtime(res) + self.id_max_map[func.id]\n\t\t# for root_next_func in func.next_funcs:\n\t\t# \tnext_max_time = 0\n\t\t# \tif root_next_func.id in self.id_max_map:\n\t\t# \t\tnext_max_time = self.id_max_map[root_next_func.id]\n\t\t# \tself.id_max_map[root_next_func.id] = max(func_time, next_max_time)\n\t\t# self.running_time = max(self.running_time, func_time)\n\t\tfunc_res = this_function.resources[res]\n\t\tself.running_time = self.running_time + func_res['pre'].get_runtime() + func_res['exec'].get_runtime() + func_res['post'].get_runtime()\n\t\tself.running_cost = self.running_cost + func_res['cost']\n\t\t# self.add_func_res(func, res)\n\t\t# self.id_max_map.pop(func.id, None)\n\nresource_list = ['STD_CPU', 'STD_GPU']\n\ndef gen_dag_instances(dag):\n\tdep_queue = dag\n\tinstance_list = []\n\n\troot = dep_queue.pop(0)\n\tfor root_res in list(resource_list):\n\t\troot_dag_instance = DAGInstance(dag)\n\t\troot_dag_instance.id_res_map[root.unique_id] = root_res\n\t\t# print(root_dag_instance.id_res_map[root.unique_id])\n\t\t# for root_next_func in root.next_funcs:\n\t\t# \troot_dag_instance.id_max_map[root_next_func.id] = root.get_resource_runtime(root_res)\n\t\troot_func_res = root.resources[root_res]\n\t\troot_dag_instance.running_time = root_func_res['pre'].get_runtime() + root_func_res['exec'].get_runtime() + root_func_res['post'].get_runtime()\n\t\troot_dag_instance.running_cost = root_func_res['cost']\n\t\t# root_dag_instance.add_func_res(root, root_res)\n\t\tinstance_list.append(root_dag_instance)\n\n\twhile len(dep_queue) > 0:\n\t\tfunction = dep_queue.pop(0)\n\t\tnew_instance_list = []\n\t\tfor dag_instance in instance_list:\n\t\t\tfor res in list(resource_list):\n\t\t\t\tnew_dag_instance = dag_instance.copy_dag_instance()\n\t\t\t\tnew_dag_instance.update_dag_instance(function, res)\n\t\t\t\tnew_instance_list.append(new_dag_instance)\n\t\tinstance_list = new_instance_list\n\n\t# for finished_dag_instance in instance_list:\n\t# \tfor func_res in list(finished_dag_instance.functions_per_resource.values()):\n\t# \t\tsorted(func_res, key=lambda x: x[1])\n\n\treturn instance_list\n\n\ndef select_pareto_instances(instance_list):\n\tpareto_list = []\n\n\tfor instance in instance_list:\n\t\tpareto_add = True\n\t\tfor comp_instance in instance_list:\n\t\t\tif not (instance is comp_instance):\n\t\t\t\tif (comp_instance.running_time <= instance.running_time) and (comp_instance.running_cost <= instance.running_cost):\n\t\t\t\t\tpareto_add = False\n\t\t\t\t\tbreak\n\t\tif pareto_add:\n\t\t\tpareto_list.append(instance)\n\n\treturn pareto_list\n\nlinear_instance_list = select_pareto_instances(gen_dag_instances([linear_first, linear_second, linear_third]))\n\nclass DAGSelector:\n\n\tdef __init__(self, instance_list, sample_size):\n\t\tself.price_list = sorted(instance_list, key=lambda x: x.running_cost)\n\t\tself.time_list = sorted(instance_list, key=lambda x: x.running_time)\n\t\tself.sample_size = int(max(min(sample_size, len(self.price_list)), 1))\n\n\tdef binary_find_index(self, value, this_list, type):\n\t\tkeys = []\n\t\tif type == \"price\":\n\t\t\tkeys = [this_inst.running_cost for this_inst in this_list]\n\t\telse:\n\t\t\tkeys = [this_inst.running_time for this_inst in this_list]\n\t\tpos = (bisect(keys, value, 0, len(this_list)))\n\t\treturn pos\n\n\tdef get_sample_list(self, price_slo, time_slo):\n\t\tsample_list = []\n\t\tprice_index = self.binary_find_index(price_slo, self.price_list, \"price\")\n\t\ttime_index = self.binary_find_index(time_slo, self.time_list, \"cost\")\n\t\tif (price_index <= 0) or (time_index <= 0):\n\t\t\treturn []\n\n\t\tend_index = len(self.price_list) - time_index\n\t\tvalid_size = price_index - end_index\n\t\tif valid_size <= 0:\n\t\t\treturn []\n\n\t\tvalid_list = self.price_list[end_index:price_index]\n\t\tmin_size = min(self.sample_size, len(valid_list))\n\t\tsample_list = sample(valid_list, min_size)\n\t\treturn sample_list\n\n\tdef get_placements(self, cluster, sample_instance):\n\t\tfunction_place_map = {}\n\t\tfor res, res_list in list(sample_instance.functions_per_resource.items()):\n\t\t\tres_nodes = cluster.nodes_by_res[res]\n\t\t\tupdated_nodes = []\n\t\t\tfor func_id, func_mem in res_list:\n\t\t\t\tplaced = False\n\t\t\t\tdone = False\n\t\t\t\twhile (not placed) and (not done):\n\t\t\t\t\tif len(res_nodes) == 0:\n\t\t\t\t\t\tdone = True\n\t\t\t\t\telif func_mem <= res_nodes[0].memory:\n\t\t\t\t\t\tfunction_place_map[func_id] = res_nodes[0].id\n\t\t\t\t\t\tres_nodes[0].memory = res_nodes[0].memory - func_mem\n\t\t\t\t\t\tplaced = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tpopped_node = res_nodes.pop(0)\n\t\t\t\t\t\tupdated_nodes.append(popped_node)\n\t\t\t\tif done:\n\t\t\t\t\tbreak\n\t\t\tif len(res_nodes) == 0:\n\t\t\t\tcluster.nodes_by_res[res] = sorted(updated_nodes, key=lambda x: x.memory)\n\t\t\t\treturn {}\n\t\t\telse:\n\t\t\t\tres_nodes.extend(updated_nodes)\n\t\t\t\tcluster.nodes_by_res[res] = sorted(res_nodes, key=lambda x: x.memory)\n\n\t\treturn function_place_map\n\nlinear_instance_placements = DAGSelector(linear_instance_list, 1)" } ]
2
davew-msft/MLOps-E2E
https://github.com/davew-msft/MLOps-E2E
a3188135238530d3f117617ea5ec8e1e1c87ae49
076115f527287845c1c8c6b5f06214277b0a8e3e
74b483e71c6041e9ea46e07471db236ae342bff1
refs/heads/master
2022-02-12T04:07:11.602203
2022-01-21T16:31:28
2022-01-21T16:31:28
217,067,799
11
11
null
null
null
null
null
[ { "alpha_fraction": 0.7803511619567871, "alphanum_fraction": 0.784460186958313, "avg_line_length": 81.71875, "blob_id": "d8e6198ee7dcb6691af61da27c32d4c31a505734", "content_id": "182a2f3591fb4628029fe6ee185e9a4784db12aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2677, "license_type": "no_license", "max_line_length": 512, "num_lines": 32, "path": "/Lab12/README-13.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 13: Pipelines and Inference Deployment\r\n\r\nYou just transformed the experimental notebook into a Python script that can be managed and run independently of the notebook environment. You used the script to train the model, then you used code in the notebook to register the model that the script produced.\r\n\r\n Your team would like to have the training, registration, and future steps such as evaluation be easily reproduced and increase the speed and scale of those steps being executed. To achieve this objective, you'll encapsulate both model training and model registration as steps in an Azure ML pipeline which utilizes provisioned on-demand scalable Azure compute targets. The Azure compute target optimizes the time spent running training with a full set of data and can scale down to no cost when the job is done.\r\n\r\nIn order to improve their insurance application approval software, the team would also like a real-time prediction of the likelihood that a driver will file a claim. To accomplish this objective, you'll deploy the registered model as a real-time inferencing service using the provided model scoring script.\r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n* [What are AMLS pipelines?](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines)\r\n* [Create and run machine learning pipelines with Azure Machine Learning SDK](https://docs.microsoft.com/azure/machine-learning/how-to-create-your-first-pipeline)\r\n* [Troubleshooting machine learning pipelines](https://docs.microsoft.com/azure/machine-learning/how-to-debug-pipelines)\r\n\r\n\r\n### Steps\r\n1. Consider building an Azure ML compute target on which to run the pipeline and its steps. If you do this now it will save time during the lab. Any small cluster will do. \r\n * To avoid automatic scale down of Azure ML managed compute the training compute option **Idle seconds before scale down** has been set to 1800. This can save time between pipeline runs if you are frequently debugging AML pipelines. **Or set the minimum nodes to 1.** \r\n1. Navigate to [Lab12/pipelines.ipynb](./pipelines.ipynb). \r\n * upload this notebook to your JupyterLab and run through the cells\r\n\r\n\r\n\r\n## Helpful Hints\r\n\r\n\r\n## Resources\r\n\r\n* [Documentation - What are compute targets in Azure Machine Learning?](https://docs.microsoft.com/azure/machine-learning/concept-compute-target)\r\n* [Create Azure Machine Learning datasets](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-register-datasets)\r\n* [Tutorial: Convert ML experiments to production Python code](https://docs.microsoft.com/en-gb/azure/machine-learning/tutorial-convert-ml-experiment-to-production)" }, { "alpha_fraction": 0.8009708523750305, "alphanum_fraction": 0.8203883767127991, "avg_line_length": 39.599998474121094, "blob_id": "27916a016e42e39da22527e38ca29190a359cfb8", "content_id": "e8fe37d00a554a0079ec5acd3aff0cb0307d0b94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 206, "license_type": "no_license", "max_line_length": 69, "num_lines": 5, "path": "/samples/dl-ts-forecasting/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "Notebooks should be run in the numbered order you see in this folder.\r\n\r\ndataset can be found in davewdemoblobs container: gefcom2014-energy\r\n\r\nhttps://github.com/Azure/DeepLearningForTimeSeriesForecasting" }, { "alpha_fraction": 0.7444507479667664, "alphanum_fraction": 0.7541263699531555, "avg_line_length": 55.2717399597168, "blob_id": "1c121f4fdfb4cadf212663e797ae8bf01527c186", "content_id": "bd94bc0fbbec20924e6258b0b1b22f6dc5a94cf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5271, "license_type": "no_license", "max_line_length": 352, "num_lines": 92, "path": "/Lab24/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 24: Monitoring Model Performance\r\n\r\nIn this lab we will monitor the performance of the deployed model. \r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. \r\n2. \r\n\r\n## Steps\r\n\r\n### Activate App Insights and data collection on the deployed model\r\n\r\n1. Browse to your Azure Notebooks project and navigate to the [Model Telemetry](notebooks/Model%20Telemetry.ipynb) notebook. \r\n\r\n2. Follow the instructions within the notebook to complete the task. When finished, your deployed model has now both [Application Insights](https://docs.microsoft.com/en-us/azure/azure-monitor/app/app-insights-overview) integration and data collection activated.\r\n\r\n3. Note that if there are errors (for example, `Too many requests for service compliance-classifier-service (overloaded)`) when you make calls against the deployed web service after your enable app insights (last cell in the `Model Telemetry` notebook). Please wait for 5 minutes and rerun the cell to make a few calls against the deployed web service.\r\n\r\n\r\n### Task 2: Check Application Insights telemetry\r\n\r\n1. Navigate to the Azure Portal and locate the resource group you created for this lab (the one where the Azure Machine Learning service workspace was created in).\r\n\r\n2. Locate the Application Insights instance in the resource group and click on it.\r\n\r\n ![Application Insights instance in resource group.](media/telemetry-01.png 'Resource Group Overview')\r\n\r\n3. Go to **Overview**.\r\n\r\n4. From the top row of the right section select **Logs (Analytics)**. This will open the Application Insights query editor with an empty new query.\r\n\r\n ![From Application Insights Dashboard, select Logs to open the Query Editor.](media/telemetry-02.png 'Application Insights - Dashboard')\r\n\r\n5. In the left pane, make sure the **Schema** tab is selected. \r\n\r\n6. Hover over **requests** and click the icon on the right side - \"Show sample records from this table\". \r\n\r\n ![In Application Insights create requests query.](media/telemetry-03.png 'Create Requests Query')\r\n\r\n7. Look at the results displayed. Application Insights is tracing all requests made to your model. Sometimes, a couple of minutes are needed for the telemetry information to propagate. If there are no results displayed, wait a minute, call again your model, and click **Run** to re-execute the Application Insights query. \r\n\r\n ![In Application Insights observe requests query results.](media/telemetry-04.png 'Requests Query Results')\r\n\r\n*Note that if you do not see telemetry information after selecting **Run** to re-execute the Application insights query. Please rerun the last cell in the `Model Telemetry` notebook few more times to generate more data. Then select **Run** on this page to re-execute the Application insights query.*\r\n\r\n### Task 3: Check the data collected\r\n\r\n1. Navigate to the Azure Portal and locate the resource group you created for this lab (the one where the Azure Machine Learning service workspace was created in).\r\n2. Locate the Storage Account instance in the resource group and click on it.\r\n\r\n ![From the Resource Group Overview locate the Telemetry Storage account](media/telemetry-05.png 'Resource Group Overview')\r\n\r\n3. Go to **Storage Explorer (preview)**.\r\n\r\n4. Expand the **BLOB CONTAINERS** section and identify the **modeldata** container. Select **More->Refresh** if you do not see **modeldata** container.\r\n\r\n ![Locate the telemetry blob container in the storage account.](media/telemetry-06.png 'Storage Explorer') \r\n\r\n5. Identify the CSV files containing the collected data. The path to the output blobs is based on the following structure:\r\n\r\n `modeldata -> subscriptionid -> resourcegroup -> workspace -> webservice -> model -> version -> identifier -> year -> month -> day -> data.csv`\r\n\r\n ![Locate telemetry data in the blob container.](media/telemetry-07.png 'Storage Explorer - data.csv')\r\n\r\n\r\n## After the hands-on lab \r\n\r\nDuration: 5 minutes\r\n\r\nTo avoid unexpected charges, it is recommended that you clean up all of your lab resources when you complete the lab.\r\n\r\n### Task 1: Clean up lab resources\r\n\r\n1. Navigate to the Azure Portal and locate the `MCW-AI-Lab` Resource Group you created for this lab.\r\n\r\n2. Select **Delete resource group** from the command bar.\r\n\r\n ![Screenshot of the Delete resource group button.](media/image71.png 'Delete resource group button')\r\n\r\n3. In the confirmation dialog that appears, enter the name of the resource group and select **Delete**.\r\n\r\n4. Wait for the confirmation that the Resource Group has been successfully deleted. If you don't wait, and the delete fails for some reason, you may be left with resources running that were not expected. You can monitor using the Notifications dialog, which is accessible from the Alarm icon.\r\n\r\n ![The Notifications dialog box has a message stating that the resource group is being deleted.](media/image72.png 'Delete Resource Group Notification Dialog')\r\n\r\n5. When the Notification indicates success, the cleanup is complete.\r\n\r\n ![The Notifications dialog box has a message stating that the resource group has been deleted.](media/image73.png 'Delete Resource Group Notification Dialog')\r\n\r\nYou should follow all steps provided _after_ attending the Hands-on lab.\r\n\r\n" }, { "alpha_fraction": 0.484175443649292, "alphanum_fraction": 0.6107717752456665, "avg_line_length": 38.931819915771484, "blob_id": "c6ff166a0b405848f2341d754ad62d264f068ca2", "content_id": "9daa975ecaacb675a1094285d367972cc763b7ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1801, "license_type": "no_license", "max_line_length": 403, "num_lines": 44, "path": "/Lab12/score.py", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "import json\r\nimport numpy\r\nfrom azureml.core.model import Model\r\nimport joblib\r\n\r\n\r\ndef init():\r\n global LGBM_MODEL\r\n # Load the model from file into a global object\r\n model_path = Model.get_model_path(\r\n model_name=\"driver_model\")\r\n LGBM_MODEL = joblib.load(model_path)\r\n\r\n\r\ndef run(raw_data, request_headers):\r\n data = json.loads(raw_data)[\"data\"]\r\n data = numpy.array(data)\r\n result = LGBM_MODEL.predict(data)\r\n\r\n # Demonstrate how we can log custom data into the Application Insights\r\n # traces collection.\r\n # The 'X-Ms-Request-id' value is generated internally and can be used to\r\n # correlate a log entry with the Application Insights requests collection.\r\n # The HTTP 'traceparent' header may be set by the caller to implement\r\n # distributed tracing (per the W3C Trace Context proposed specification)\r\n # and can be used to correlate the request to external systems.\r\n print(('{{\"RequestId\":\"{0}\", '\r\n '\"TraceParent\":\"{1}\", '\r\n '\"NumberOfPredictions\":{2}}}'\r\n ).format(\r\n request_headers.get(\"X-Ms-Request-Id\", \"\"),\r\n request_headers.get(\"Traceparent\", \"\"),\r\n len(result)\r\n ))\r\n\r\n return {\"result\": result.tolist()}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Test scoring\r\n init()\r\n TEST_ROW = '{\"data\":[[0,1,8,1,0,0,1,0,0,0,0,0,0,0,12,1,0,0,0.5,0.3,0.610327781,7,1,-1,0,-1,1,1,1,2,1,65,1,0.316227766,0.669556409,0.352136337,3.464101615,0.1,0.8,0.6,1,1,6,3,6,2,9,1,1,1,12,0,1,1,0,0,1],[4,2,5,1,0,0,0,0,1,0,0,0,0,0,5,1,0,0,0.9,0.5,0.771362431,4,1,-1,0,0,11,1,1,0,1,103,1,0.316227766,0.60632002,0.358329457,2.828427125,0.4,0.5,0.4,3,3,8,4,10,2,7,2,0,3,10,0,0,1,1,0,1]]}' # NOQA: E501\r\n PREDICTION = run(TEST_ROW, {})\r\n print(\"Test result: \", PREDICTION)\r\n" }, { "alpha_fraction": 0.7219192981719971, "alphanum_fraction": 0.7508178949356079, "avg_line_length": 37.69565200805664, "blob_id": "03d3882e205a4482e95d0bd42caab5ba3acbbf62", "content_id": "e36eaabb49b43ec3e849459e0bfdc0453422f862", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1834, "license_type": "no_license", "max_line_length": 170, "num_lines": 46, "path": "/instructor-readme.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "\r\n\r\nNotes:\r\n\r\n* check if the starter repo has been updated, and if so, pull that into my repo. (There's probably an easier way to do this). [Instructions are here](./Lab20/README.md)\r\n\r\n## Other Resources \r\n\r\n[tweets using DBX, EH, TimeSeries Insights](https://github.com/Azure/data-ai-iot/tree/master/databricks) \r\n\r\n### DBX Notebooks\r\n\r\n[Genomics Pipelines](https://pages.databricks.com/rs/094-YMS-629/images/Simplifying%20Genomics%20Pipelines%20at%20Scale%20with%20Databricks.html) \r\n\r\n[Streaming stock analysis with delta lake](https://pages.databricks.com/rs/094-YMS-629/images/streaming-stock-data-analysis-setup.html) setup \r\n\r\n[part 2](https://pages.databricks.com/rs/094-YMS-629/images/streaming-stock-data-analysis-main.html) \r\n \r\n\r\n\r\ndata generator simulator\r\n https://github.com/DataSnowman/iotedgeProject/blob/master/iotedge/datagenerators/SimulatedDevice.py\r\n\r\n[dbx training](https://www.linkedin.com/learning/azure-databricks-essential-training/what-you-should-know?u=3322)\r\n\r\n[advert attribution notebook](https://s3.us-east-2.amazonaws.com/databricks-dennylee/notebooks/attribution-delta-blog.html) \r\n\r\n[streaming mobile game events notebook](https://docs.databricks.com/_static/notebooks/mobile-event-stream-etl.html) \r\n\r\n[anagha dbx structured streaming taxi data with EH and dbx delta](https://github.com/anagha-microsoft/databricks-workshops/tree/master/msready2019)\r\n\r\n[Advanced dbx hack](https://github.com/annedroid/Ready2019_AA_AI319). supervised, unsupervised, etc\r\n\r\n### AMLS\r\n\r\n[dog breeds classifier](https://github.com/maxluk/dogbreeds-webinar/blob/master/dog-breed-classifier.ipynb)\r\n\r\n### DevOps\r\n\r\n[Pipelines](https://github.com/microsoft/WhatTheHack/tree/master/010-AzureDevOps) \r\n\r\n[MCW MLOps](https://github.com/microsoft/MCW-ML-Ops) \r\n\r\n* AzDO\r\n* ACI\r\n* AKS\r\n* AMLS\r\n* jupyter\r\n\r\n\r\n" }, { "alpha_fraction": 0.708578884601593, "alphanum_fraction": 0.724950909614563, "avg_line_length": 39.216217041015625, "blob_id": "53a61d1ed4aecff6eacb904b5c09403001ce1e35", "content_id": "cc82233577032e4257e47baa24776b16c27faf92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1527, "license_type": "no_license", "max_line_length": 198, "num_lines": 37, "path": "/Lab2/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 2: Azure Machine Learning Services\r\n\r\nWe are going to begin doing some rudimentary data science. For this workshop we'll use combinations of Azure services to create an end-to-end solution. Here's the architecture we are shooting for:\r\n\r\n![AMLserviceWorkspace](../images/dsWithAzureDatabricksAML.png). \r\n\r\nIn this lab we are going to deploy and learn a little bit about what Azure Machine Learning Services (AMLS) is, what problems it solves, etc. This is probably the shortest and simplest lab. \r\n\r\n[What is MLOps and How Does AMLS Help - Presentation](AMLS.pptx) \r\n\r\n### The AMLS workspace taxonomy\r\n\r\n[The taxonomy and AMLS explained](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace)\r\n\r\n![](../images/amls.png)\r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. What is AMLS? Why use it? \r\n2. Alternatives?\r\n3. Why do we introduce this now and not AFTER we throw our models over the wall to the data engineer/DevOps/SRE Engineer? \r\n\r\n### Deploy AMLS\r\n\r\n1. Create an Azure Machine Learning service workspace in the Resource Group created earlier using the Azure Portal\r\n1. Follow all prompts and deploy the service. \r\n1. You will need to make note of the following AMLS values from the Overview page after deployment: \r\n\r\nName | Value | Example Value|\r\n------|------------------|--------|\r\nSubscription ID| | 52061d21-01dd-4f9e-aca9-60fff4d67ee2|\r\nResource Group| | MLOpsWorkshop|\r\nLocation||East US|\r\nWorkshop Name ||mlops|\r\n\r\n**We will need these values later**\r\n\r\n" }, { "alpha_fraction": 0.7090128660202026, "alphanum_fraction": 0.7218884229660034, "avg_line_length": 41.22222137451172, "blob_id": "4aa73739e64c29f2c9f2c4134aec156a8947c752", "content_id": "45cfba8d245d7dae4a3ff48295f82844c9bbba70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1167, "license_type": "no_license", "max_line_length": 124, "num_lines": 27, "path": "/Lab11/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 11: Let's Build a Basic Model\r\n\r\nIn this lab a data scientist builds a basic model without any AMLS integration. \r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. Make sure AMLS is spun up. \r\n1. To run your notebooks in the workspace, create a **Compute Instance** and wait for it to start.\r\n * Why do our work via remote compute? \r\n1. Make sure your kernel is set to **Python 3.6 - AzureML** \r\n## Step 1: Train a Model\r\n\r\nFirst we are going to do the absolutely minimum to get a training environment and trained model working with our use case. \r\n\r\n1. Navigate to [Lab11/safe-driver-prediction.ipynb](./Lab11/safe-driver-prediction.ipynb). \r\n * this is a basic notebook that trains and validates a model. \r\n * upload this file to your JupyterLab environment or open it from JupyterLab if you cloned the repo. \r\n1. Walk through each cell in the notebook.\r\n\r\n>>Let us know if you have any questions or concerns. \r\n\r\n\r\n## Resources\r\n\r\n* [pickle — Python object serialization](https://docs.python.org/3/library/pickle.html)\r\n* [Jupyter Notebook for Beginners: A Tutorial](https://www.dataquest.io/blog/jupyter-notebook-tutorial/)" }, { "alpha_fraction": 0.7739307284355164, "alphanum_fraction": 0.7739307284355164, "avg_line_length": 161.6666717529297, "blob_id": "2c497477084581fd42f9f3b6b66cd2a0f99c32d7", "content_id": "d6c7c23e1c97e9e0adaccfaef824a4f7b089266a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 491, "license_type": "no_license", "max_line_length": 368, "num_lines": 3, "path": "/Lab303/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "We build a computer vision solution entirely from scratch using tensorflow. First we learn all of the concepts of how neural nets work. Then we show just how difficult it is to train a CNN from scratch. But don't dispair...we then look at how to use a pre-built tf model from the internet and use a trick called transfer learning to make our model almost perfect. \r\n\r\nI have this in a [separate repo](https://https://git.davewentzel.com/demos/datasciencehack) because it is so popular. " }, { "alpha_fraction": 0.6572554111480713, "alphanum_fraction": 0.6728496551513672, "avg_line_length": 37.558441162109375, "blob_id": "11242a23e99860f8aa05991d3b7caf2eefce13cc", "content_id": "9be1a3c06191b1cd352d1fdce6724faa097e04d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6092, "license_type": "no_license", "max_line_length": 257, "num_lines": 154, "path": "/Lab22/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 22: Setup and Run a Release Pipeline\r\n\r\nOnce the build pipeline is created and a model is tested and its \"metric\" (accuracy in this case) is logged, we want to know whether we should deploy it. \r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. review the workflow\r\n\r\n## Steps\r\n\r\n1. Return to Azure DevOps and navigate to **Pipelines, Releases** and select **New pipeline**.\r\n\r\n ![To create new Release Pipeline navigate to Pipelines, Releases and select New pipeline.](media/19.png 'New Release Pipeline')\r\n\r\n2. Select **Empty job**. \r\n\r\n ![Select empty job to start building the release pipeline.](../images/20.png 'Select a template: Empty job')\r\n\r\n3. Provide Stage name: `Deploy & Test` and close the dialog.\r\n\r\n1. Select **Add an artifact**.\r\n\r\n ![Add a new artifact to the release pipeline.](../images/22.png 'Add an artifact')\r\n\r\n2. Select Source type: `Build`, follow the prompts: \r\n\r\n ![Provide information to add the build artifact.](../images/23.png 'Add a build artifact')\r\n \r\n2. Open the **Variables** tab.\r\n\r\n ![Open variables tab.](../images/25.png 'Release Pipeline Variables')\r\n\r\n3. Add four Pipeline variables as name - value pairs and then select **Save**:\r\n\r\n a. Name: `aks_name` Value: `<use the aks name from your AMLS workspace`\r\n \r\n b. Name: `description` Value: `\"Compliance Classifier Web Service\"` *Note the double quotes around description value*.\r\n \r\n c. Name: `service_name` Value: `compliance-classifier-service`\r\n\r\n d. Name: `aks_region` Value: `eastus`\r\n\r\n >**Note**:\r\n >- Keep the scope for the variables to `Deploy & Test` stage.\r\n >- The name of the Azure region should be the same one that was used to create Azure Machine Learning workspace earlier on.\r\n \r\n ![Add four pipeline variables as name value pairs and save.](../images/26.png 'Add Pipeline Variables')\r\n \r\n### Setup Agent Pool for Deploy & Test stage\r\n \r\n1. Open the **Tasks** tab.\r\n\r\n ![Open view stage tasks link.](../images/27.png 'Pipeline Tasks')\r\n \r\n2. Select **Agent job** and change **Agent pool** to `Azure Pipelines` and change **Agent Specification** to `ubuntu-16.04`.\r\n\r\n ![Change Agent pool to be Hosted Ubuntu 1604.](../images/28_2.png 'Agent Job Setup')\r\n \r\n### Add Use Python Version task\r\n\r\n1. Select **Add a task to Agent job**, search for `Use Python Version`, and select **Add**.\r\n\r\n ![Add Use Python Version task to Agent job.](../images/29.png 'Add Use Python Version Task')\r\n\r\n2. Provide **Display name:** `Use Python 3.6` and **Version spec:** `3.6`.\r\n\r\n ![Provide Display name and Version spec for the Use Python version task.](../images/30.png 'Use Python Version Task Dialog')\r\n \r\n### Add Install Requirements task\r\n\r\n1. Select **Add a task to Agent job**, search for `Bash`, and select **Add**.\r\n \r\n ![Add Bash task to Agent job.](../images/31.png 'Add Bash Task')\r\n\r\n2. Provide **Display name:** `Install Requirements` and select **object browser ...** to provide **Script Path**.\r\n\r\n ![Provide Display name for the Bash task.](../images/32.png 'Bash Task Dialog')\r\n\r\n3. Navigate to **Linked artifacts/_mlops-quickstart/devops-for-ai/environment_setup** and select **install_requirements.sh**.\r\n\r\n ![Provide Script Path to the Install Requirements bash file.](../images/33.png 'Select Path Dialog')\r\n\r\n4. Expand **Advanced** and select **object browser ...** to provide **Working Directory**.\r\n\r\n ![Expand advanced section to provide Working Directory.](../images/34.png 'Bash Task - Advanced Section')\r\n \r\n5. Navigate to **Linked artifacts/_mlops-quickstart/devops-for-ai** and select **environment_setup**.\r\n\r\n ![Provide path to the Working Directory.](../images/35.png 'Select Path Dialog')\r\n \r\n### Add Deploy & Test Webservice task\r\n \r\n1. Select **Add a task to Agent job**.\r\n\r\n ![Select Add a task to Agent job.](../images/36_1.png 'Add a Task to Agent job')\r\n \r\n2. Search for `Azure CLI`, and select **Add**\r\n\r\n ![Add Azure CLI task to Agent job.](../images/36_2.png 'Azure CLI Task')\r\n\r\n3. Provide the following information for the Azure CLI task:\r\n\r\n a. Display name: `Deploy & Test Webservice`\r\n \r\n b. Azure subscription: *This is the service connection*.\r\n \r\n c. Script Location: `Inline script`\r\n \r\n d. Inline Script: `python aml_service/deploy.py --service_name $(service_name) --aks_name $(aks_name) --aks_region $(aks_region) --description $(description)`\r\n \r\n ![Setup the Azure CLI task using the information above.](../images/38.png 'Azure CLI Task Dialog')\r\n\r\n4. Expand **Advanced** and provide **Working Directory:** `$(System.DefaultWorkingDirectory)/_mlops-quickstart/devops-for-ai`.\r\n\r\n ![Provide Working Directory for the Azure CLI task.](../images/39.png 'Azure CLI Task - Working Directory')\r\n \r\n\r\n>Please review the code in `aml_service/deploy.py`. This step will read the `eval_info.json` and if the evaluation step recommended to deploy the new trained model, it will deploy the new model to production in an **Azure Kubernetes Service (AKS)** cluster.\r\n\r\n### Define Deployment Trigger\r\n\r\n1. Navigate to **Pipeline** tab, and select **Pre-deployment conditions** for the `Deploy & Test` stage.\r\n\r\n2. Select **After release**.\r\n\r\n ![Setup Pre-deployment conditions for the Deploy & Test stage.](../images/40.png 'Pre-deployment Conditions Dialog')\r\n\r\n3. Close the dialog.\r\n\r\n### Enable Continuous Deployment Trigger\r\n\r\n1. Select **Continuous deployment trigger** for `_mlops-quickstart` artifact.\r\n\r\n2. Enable: **Creates a release every time a new build is available**.\r\n\r\n ![Enable Continuous Deployment Trigger for the Release pipeline.](../images/41.png 'Continuous Deployment Trigger Dialog')\r\n \r\n3. Close the dialog\r\n\r\n### Task 10: Save the Release Pipeline\r\n\r\n1. Provide name: `Classifier Release Pipeline`.\r\n\r\n2. Select: **Save**.\r\n\r\n ![Provide name for the release pipeline and select save.](../images/42.png 'Save')\r\n\r\n3. Select: **Ok**.\r\n\r\n ![Select Ok.](../images/43.png 'Save - Ok')\r\n\r\n\r\nWe will test everything in the next lab. " }, { "alpha_fraction": 0.7815873026847839, "alphanum_fraction": 0.784761905670166, "avg_line_length": 76.80000305175781, "blob_id": "b922f537f1f00b7f7689ef9d065ef1b168ce72e9", "content_id": "085ba73daac914356025749394edcc831f2f812c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3150, "license_type": "no_license", "max_line_length": 577, "num_lines": 40, "path": "/Lab12/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 12: Refactor the Model to use AMLS for training\r\n\r\nNow you can successfully reproduce the safe driver prediction model using the notebook. The team would like to continue to ensure quality and improve the model code as well as centrally share models and results with others during development. All of these goals are challenging with the training code embedded in a notebook and no centralized services are being utilized to facilitate sharing. \r\n\r\nWhat can be done to help the team with these goals? \r\n* Extracting the notebook code into Python scripts is an important step to ensure code quality via *lint* and unit tests and also clean execution on remote compute targets. \r\n* Logging parameter values and model validation metrics centrally with the Azure Machine Learning service makes it easy to compare the performance of different versions of the model and the parameters they were trained with.\r\n\r\nIn this experimentation phase your data scientists will want to execute many experiments with hyper-tuned parameters or configurations, so execution against 'remote' compute can save the team time in finding the best model while easily sharing results.\r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. When is the right time to refactor Jupyter notebooks into python files? Are there any alternatives to doing this? \r\n1. What is the benefit of running your experiments using Azure Machine Learning?\r\n1. What are the benefits of using a model repository and tracking the metrics and other parameters?\r\n\r\n### Steps\r\n1. Navigate to [Lab12/safe-driver-prediction-v2.ipynb](./safe-driver-prediction-v2.ipynb). \r\n * this is basically the previous notebook but we are going to take some additional steps to refactor the code to work better with AMLS\r\n * upload this file to your JupyterLab environment. \r\n\r\nWe are going to build something like this:\r\n\r\n![](./process.png)\r\n\r\n* we want to refactor the Jupyter code into a `train.py` file\r\n* it will generate a model .pkl file based on the training data\r\n* we will register that model into the AMLS model registry\r\n\r\n## Helpful Hints\r\n\r\n* To open a terminal (command prompt) in Jupyter, click the \"New\" button on the notebook dashboard and select \"Terminal\".\r\n* To connect to your workspace from the Jupyter environment, the best practice when using the Azure ML SDK is to use the `Workspace.from_config()` method to read the connection information from a workspace configuration file. On compute instances in your workspace, this file is created automatically. When using your own development environment, you must create this file in the same folder as your code. See [Configure a development environment for Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-configure-environment#workspace) for details.\r\n\r\n\r\n## Resources\r\n\r\n* [Documentation - How to monitor Azure ML experiment runs and metrics](https://docs.microsoft.com/azure/machine-learning/how-to-track-experiments)\r\n* [Documentation - Train scikit-learn models at scale with Azure Machine Learning](https://docs.microsoft.com/azure/machine-learning/how-to-train-scikit-learn)" }, { "alpha_fraction": 0.7070484757423401, "alphanum_fraction": 0.715859055519104, "avg_line_length": 39.272727966308594, "blob_id": "ca6bea09fbfc2d66a4fee335f7a173f5410643fa", "content_id": "b113acef2fe9901328d6999f3d373c0893dea624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1362, "license_type": "no_license", "max_line_length": 225, "num_lines": 33, "path": "/Lab20/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 20: Setup Azure DevOps\r\n\r\n\r\n## Steps\r\n\r\n1. Put your code repo into AzDO. If you don't have a code repo you can clone MY repo locally and push it to AzDO.\r\n1. Navigate to your Azure Repo in AzDO. \r\n1. Select and open the `azure-pipelines.yml` file.\r\n\r\n2. Select **Edit** and update the `variables:` accordingly. \r\n\r\n**You will need to work with your data scientist to determine the name of the experiment.** It _might_ be `deep-learning`. \r\n\r\n5. Commit your change. \r\n\r\n\r\n### Create new Service Connection\r\n\r\n1. From the left navigation select **Project settings** and then select **Service connections**.\r\n2. Select **New service connection** and then select **Azure Resource Manager**.\r\n\r\n\r\n3. Provide the following information in the `Add an Azure Resource Manager service connection` dialog box and then select **Ok**:\r\n \r\n a. Connection name: `<Your Subscription Name>`\r\n \r\n b. Subscription: Select the Azure subscription to use.\r\n \r\n c. Resource Group: This value should match the value you provided in the `azure-pipelines.yml` file.\r\n \r\n ![Provide connection name, and Azure Resource Group and then select Ok. The resource group should match the value you provided in the YAML file.](../images/09.png 'Add an Azure Resource Manager service connection dialog')\r\n\r\nClick OK and then go back in to Update and `Verify Connection`. " }, { "alpha_fraction": 0.7250000238418579, "alphanum_fraction": 0.762499988079071, "avg_line_length": 25.66666603088379, "blob_id": "97c01c75f2e90083cd4a95e58d32e25dbd5030b4", "content_id": "eb1198168bfdfe7e3347ac5bb7d49a7c3c568c07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 80, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/environment_setup/install_requirements.sh", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "python --version\npip install azure-cli==2.4.0\npip install --upgrade azureml-sdk\n" }, { "alpha_fraction": 0.6366459727287292, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 12.545454978942871, "blob_id": "b2297a46c8eb7b7e1b731289db25bd7899c566d2", "content_id": "ed07f02f4650adb6e4dffed30b3aff54d06fd8bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 322, "license_type": "no_license", "max_line_length": 71, "num_lines": 22, "path": "/Lab12/README-14.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 14: Deploying a Real-time Inferencing Service\r\n\r\n\r\n\r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n\r\n\r\n\r\n### Steps\r\n\r\n1. Navigate to [Lab12/inferencing.ipynb](./inferencing.ipynb). \r\n * upload this notebook to your JupyterLab and run through the cells\r\n\r\n\r\n\r\n## Helpful Hints\r\n\r\n\r\n## Resources\r\n\r\n" }, { "alpha_fraction": 0.7216696739196777, "alphanum_fraction": 0.7241154313087463, "avg_line_length": 48.86178970336914, "blob_id": "53bfcc30156d7b1b084fadc00d4c25266b08e8ec", "content_id": "9ddc10f696568ec82c4582e4f194c921c3b63228", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6133, "license_type": "no_license", "max_line_length": 429, "num_lines": 123, "path": "/Lab300/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## A Day in the Life of a Data Scientist - The Data Science Process\n\nYou are somewhat new to data science and your boss hands you a new dataset and says, \"make something out of this.\" Where do you even start? In this lab we do exactly that. In this lab we are given a dataset of support tickets and we are told, \"unlock some insights to help the support team become more efficient and provide better service to customers.\" We work through how to get started on an analytics problem like this. \n\n * What do we do?\n * use standard data science techniques to explore the data\n * determine deductively what are some interesting problems we can solve\n * use automl to see if we can quickly predict those problems\n * deploy the best model, assuming it meets our goals\n * present your interesting analytics to executive leadership\n * How we do it?:\n * AMLS\n * Jupyter/python/pandas/visualizations\n * automl\n * deploy an automl \"no-code\" container\n * consume the model's REST API \n * Power BI for the final data presentation\n\n\n## Getting Your Environment Ready\n\n* Power BI desktop\n* Azure Subscription\n* AMLS service\n * Choose to Upgrade the workspace to the **Enterprise edition (Preview)** [see more information on current pricing here](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) - you will need enterprise edition to complete the experiment section.\n* In AMLS launch your Compute instance and JupyterLab. To create one:\n * Select 'Compute' from left pane\n * Select 'New' under 'Compute Instances'\n * Provide a 'Compute VM Name' (all lowercase)\n * Keep the default size VM provided\n![](./img/1.png)\n* select the `JupyterLab` Link\n * Enter the user folder by double clicking\n * Select the upload button and upload the files listed below in the folders listed below -- or -- clone this repo into the JupyterLab VM using the `Terminal`:\n * [data/data_train.csv](data/data_train.csv)\n * [code/explore.ipynb](code/explore.ipynb)\n * [code/deploy.ipynb](code/deploy.ipynb)\n * [code/config.json](code/config.json)\n* Create a **Compute Cluster** or use your existing one. We will use this for automl training later\n * Select 'Compute' from left pane\n * Select 'New' under the 'Training Clusters' tab\n * Provide a 'Compute Name' (all lowercase)\n * Choose a VM size\n * For standard compute select something like 'Standard_DS2_v2'\n * For GPU compute select 'Standard_NC6'\n * Select 'Low Priority' for Virtual machine priority\n * Minimum number of nodes set to 0 (then it will scale down completely and reduce costs)\n * Set maximum nodes from 3-6\n * Click 'Create'\n![](./img/2.png)\n\n* Upload the **Dataset** (we could also do this from code, but we do it from the UI to take a first look at the data)\n * Select 'Datasets' from left pane\n * Select 'Create dataset' and then 'from local files'\n * Select the 'Browse' button and find the `data_train_experiment.csv` file\n * Select 'Next'\n * Review the data and select 'Next' and 'Next' again. **Watch the headers**\n * Finally review the dataset settings and select 'Create'\n * If you choose to Profile you should see some interesting summary statistics \n![](./img/3.png)\n\n## Data Exploration/Sandboxing\n\nWe want to examine the data\n\nIn JupyterLab:\n\n* Open [**Explore.ipynb**](code/explore.ipynb)\n* Run through all code cells using <kbd>Ctrl</kbd> + <kbd>Enter</kbd>\n\nWe think we can probably predict the number of days a ticket will take until final disposition. But we aren't sure. Let's see if we can predict that next using automl\n\n## Creating an ML algorithm\n\n* Launch your AMLS workspace\n* Select **Automated ML** on the left pane\n\n![](./img/5.png)\n\n* Click **'New automated ML run'**\n* Select the training data dataset (data_train_experiment-XXXXXX)\n* Provide an experiment name\n* Select **Duration** for Target column\n* Select the 'Training Cluster' of compute you setup above\n* Select **Regression** for Prediction Task\n* Select **'View additional configuration settings'** and set Primary metric to **'normalized_root_mean_squared_error'**\n* Set Concurrency, Max concurrent iterations to 3\n* Set Exit criterion, Training job time (hours) to .5\n* Select **'Save'**\n* Select **Finish**\n\nMonitor the automl run from the portal. Again, all of this we could've done from code. This will take about a half hour. **Don't close the run after you are done, we will jump right into deploying the model next.** Get familiar with what the interface is trying to tell you. \n\n## Deploying the Best Model\n\nAfter your run completes you'll probably see something like this:\n![](./img/6.png)\n\nThe models are ordered by best RMSE (assuming you chose that metric). The voting ensemble will always be the best b/c it is a conglomeration of everything. But deploying it will likely be slow. Perhaps it is best to just deploy the next best model???\n\n* Click the model you want and chose `Deploy`\n* Create a deployment name, example: `support-ticket-duration`\n* Add a deployment description if you wish\n* Compute Type set to ACI (Azure Container Instance) unless you have an AKS inferencing cluster already running. \n* authentication disabled\n* Choose **Deploy** button\n\n>This will take a while to run - ~10 mins. You can monitor this under `Endpoints`\n\n\n* Once complete select **Endpoints** on the left pane\n* copy the REST API endpoint, we are going to test it next\n* Open [**Deploy.ipynb**](code/deploy.ipynb) in your JupyterLab\n* Run each cell of the notebook\n\n## Present the data with Power BI\n\n* Open Power BI Desktop with `Present.pbix`\n* Interact with the data and visuals\n\n**Can you determine how to publish this report to Power BI Online? Then, can you make predictions based on your new web service against the data? Can you then create a visual to display that data for an executive dashboard?**\n\n[Consume an AMLS webservice from Power BI](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-consume-web-service?tabs=python#consume-the-service-from-power-bi)\n" }, { "alpha_fraction": 0.7212543487548828, "alphanum_fraction": 0.7259001135826111, "avg_line_length": 33.95833206176758, "blob_id": "a6253216703f3dfbfcd484ef9bce1bdfe8a5d388", "content_id": "d26fbf6a0011c78befd8a651c5ca6ba9fd09c083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 861, "license_type": "no_license", "max_line_length": 112, "num_lines": 24, "path": "/Lab401/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Simplified MLOps Solution\r\n\r\n * We deploy a simple model quickly and don't worry about the implementation details of model development\r\n * we do focus on \r\n * how `score.py` works\r\n * how to build a conda env that will work with azdo\r\n * we build up the azdo pipelines and focus there on how to do the automation\r\n * the focus of this lab is simply to understand the patterns for the azdo pipelines\r\n\r\n## Get a model ready\r\n\r\nLet's quickly get an employee attrition model working. \r\n\r\n* In your AMLS compute (or local dev env if desired) open `Lab401/attrition-model.ipynb` and follow instructions\r\n\r\n\r\n## Let's do MLOps\r\n\r\n### Import this repo\r\n\r\nGo to your Azure DevOps project, into the Repos area.\r\nClick on the Git repo dropdown at the top of the page and then on \"Import Repository\".\r\n\r\nUnder clone URL: `github.com/davew-msft/MLOps-E2E.git`" }, { "alpha_fraction": 0.7489194869995117, "alphanum_fraction": 0.7667027115821838, "avg_line_length": 77.4805679321289, "blob_id": "fd57737d50d2b9eb9392f47ce88349058228a680", "content_id": "4b13329c49aab20f4205db9fdd843560a701052d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 22212, "license_type": "no_license", "max_line_length": 776, "num_lines": 283, "path": "/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "# MLOps End to End Workshop\n\nhttps://github.com/davew-msft/MLOps-E2E\n\n## Workshop Objectives\n\nIn this workshop, you will learn how to:\n\n* setup and configure Azure Databricks for analytics\n* setup and configure Azure ML Services to integrate your data science work into your DevOps pipelines. \n* *Think Like a Data Scientist* by looking at a few different use cases\n## What Technologies are Covered\n\n* Databricks (dbx)\n* Azure Machine Learning Service (AMLS)\n* Azure Container Instances (ACI)\n* Azure Kubernetes Service (AKS)\n* Azure DevOps (AzDO) or GitHub (and gh actions)\n* JupyterLab Notebooks using vscode and AMLS Compute targets\n\n## Target audience\n\n- Data Scientists\n- App Developers\n- AI Engineers\n- DevOps Engineers \n- Anyone wanting to learn to think like a Data Scientist and take their analytics to the next level\n\nThe solution will look something like this: \n\n![End-to-end Custom AI Solution](images/e2e.png)\n\n## Day 1 Workshop Agenda: Databricks\n\nOur company sells bikes. We have data about who does and does not buy bikes. We'd like to use that data to build a predictive model to determine who will buy bikes in the future. \n\n1. [Lab 1: Setup the resources needed for this workshop](Lab1/README.md)\n1. [Lab 2: Learn about Azure Machine Learning Services](Lab2/README.md)\n1. [Lab 2a: Getting Databricks Ready for DevOps/MLOps](Lab2a/README.md)\n1. [Lab 3: Build and Deploy an ML Model](Lab3/README.md) \n\n## Day 2 Workshop Agenda: DevOps/MLOps in Depth\n\nOur company is interested in building a _safe driver_ prediction model that we want to integrate into our website. We'll use Jupyter notebooks, AMLS, and AzDO/github actions to manage the MLOps pipelines. This will build on Day 1 by adding in `Pipelines` concepts. We'll determine how to retrieve the best model, package it with a WebApp, and deploy an inferencing web service. Then we'll figure out how to monitor the model's performance after it is deployed (on AKS). Our model will be deployed as ONNX format, which means it can be run on the Edge or anywhere else. \n\nWe are going to build something that looks similar to this: \n\n![stuffs](images/architecture-overview.png 'Solution Architecture')\n\nThe overall approach used in this lab is to orchestrate continuous integration and continuous delivery Azure Pipelines from Azure DevOps (or github actions). These pipelines are triggered by changes to artifacts that describe a machine learning pipeline, that is created with the Azure Machine Learning SDK. In the lab, you make a change to the model training script that executes the Azure Pipelines Build Pipeline, which trains the model and creates the container image. Then this triggers an Azure Pipelines Release pipeline that deploys the model as a web service to AKS, by using the Docker image that was created in the Build pipeline. Once in production, the scoring web service is monitored using a combination of Application Insights and Azure Storage.\n\n**Day 2 has only a few dependencies on Day 1 workshop. Make sure you run Lab 1 above and you should be fine**\n\nThese labs can be divided between data scientists and DevOps engineers. \n\n\nThese tasks are geared toward data scientists: \n\n1. [Lab10: Setup](./Lab10/README.md)\n * make sure you run [Lab 1](./Lab1/README.md) above, which has the prerequisites. \n1. [Lab11: Create a Classification Model using JupyterLab](./Lab11/README.md)\n1. [Lab12: Refactor the Model Training Code to use AMLS](./Lab12/README.md)\n * we want to use an `experiment` to train and register a `model` and log various `metrics`\n1. [Lab13: Build Pipelines and Deploy the Model for Inferencing](./Lab12/README-13.md)\n1. [Lab14: Deploy a Real-time Inferencing Service](./Lab12/inferencing.ipynb)\n * open this notebook in your AMLS compute environment and follow the steps\n1. [Lab15: Deploy a Real-time Inferencing Service to AKS (kubernetes)](./Lab12/inferencingAKS.ipynb)\n * open this notebook in your AMLS compute environment and follow the steps to deploy to AKS\n * this is very similar to ACI example in Lab14\n1. Lab16: Monitoring the webservice with AppInsights\n * wip\n\n1. [Lab19: A different example with deep learning on text](./jupyter-notebooks/DeepLearningwithText.ipynb)\n * There are a lot of steps to remember with those previous labs. In this example we'll look at a different example and won't get into the weeds and hopefully you will see the patterns. \n * Start by uploading [./jupyter-notebooks/DeepLearningwithText.ipynb](./jupyter-notebooks/DeepLearningwithText.ipynb) to your AMLS environment and begin there.\n\n## DevOps \n\nHopefully your team now understands and has implemented the fundamental concepts in a local development \"execute-from-my-notebook\" experience and needs to apply all of these concepts to a production-ready workflow. A notebook is convenient for experimentation, but is not suited for automating a full workflow. You could use AMLS pipelines, like we did above, which is geared to data scientists. Or our workflows could be implemented in a true DevOps tool like Azure DevOps. Using Azure Pipelines to operationalize Azure ML pipelines enables powerful tools such as version management, model/data validation, model evaluation/selection, and staged deployments to QA/production. Your team will take the learnings and relevant python scripts from the previous labs to do this.\n\n* The word 'pipeline' has started to take on multiple meanings - make sure you don't get pipeline types mixed up. See [here](https://docs.microsoft.com/azure/machine-learning/concept-ml-pipelines#which-azure-pipeline-technology-should-i-use) for a description of the pipeline types. For clarity, these challenges are referring to 'Azure Pipelines' as 'DevOps pipelines'.\n\nThese tasks are geared toward **Azure DevOps** engineers and can be done in parallel with the tasks above, if desired. If you are using **GitHub Actions** please see Labs 30-34. \n\n\n[Overview of the MLOps/DevOps Approach for Data Science](./Lab20/overview.md)\n\n* recommended reading \n* a templatized approach to do MLOps using a starter repo. This should work for gh actions or azdo pipelines but focuses on the latter. \n\n### Azure DevOps \n\n1. [Lab20: Setup AzDO](./Lab20/README.md). \n * This also includes some _Recommended Reading_. \n\nThis task should be done by both the data scientist and DevOps engineer **when using Azure DevOps**: \n\n1. [Lab21: Setup and Run a Build Pipeline](Lab21/README.md)\n1. [Lab22: Setup and Run a Release Pipeline](Lab22/README.md)\n1. [Lab23: Test Our Pipelines](Lab23/README.md)\n1. [Lab24: Monitoring Model Performance](Lab24/README.md)\n\n### GitHub Actions\n\nThese tasks are geared toward **GitHub Actions** engineers and can be done in parallel with the tasks above, if desired. If you are using **Azure DevOps** please see Labs 20-24. \n\n1. [Lab30: Setup GitHub](./Lab20/README-gh.md): this is an alternate lab if you'd rather use github for git repos and CI/CD pipelines\n\nThis task should be done by both the data scientist and DevOps engineer **when using GitHub Actions**: \n\n**TODO: these labs are wip**\n\n1. [Lab31: Setup and Run a Build Workflow](./Lab21/README-gh.md)\n1. [Lab32: Setup and Run a Release Pipeline](Lab22/README-gh.md)\n1. [Lab33: Test Our Pipelines](Lab23/README-gh.md)\n1. [Lab34: Monitoring Model Performance](Lab24/README-gh.md)\n\n## AutoML Labs\n\nThese labs aren't specific to automl but they build upon each other. In these labs we'll look at employee attrition using a dataset provided by IBM. \n\n1. [Lab40: Using Datasets and Datastores in AMLS](./Lab40/README.md): we'll first get the data into a dataset and explore the data\n1. [Lab40a: Using \"Filesets\" Datasets in AMLS](./Lab40/FileSets.ipynb)\n * this is the pattern I use to connect to files in a data lake or blob store\n * good for training CNNs with directories of image files\n1. [Lab41: Automated Machine Learning (automl)](./Lab41/README.md): we'll use the IBM dataset to look at the causes of employee attrition, using the AMLS GUI. \n1. [Lab42: automl from a python notebook](./samples/automl-forecast-model.ipynb) : We will look at running automl from within a python notebook using automl API calls. We'll forecast energy demand using NYCs open dataset. **Please see the [sample notebooks area](./samples/README.md) for other approaches using ARIMA and deep learning.**\n1. [Lab43: automl full end-to-end MLOps pipelines](./Lab43/README.md). We use an open source accounts receivable dataset to predict late payments. We use that dataset and automl for predictions and we deploy the model to AKS or ACI. \n * we use the AMLS UI for the initial setup\n * we will do `continuous retraining` whenever the dataset changes using AMLS Pipelines. For this we will do everything programmatically from Jupyter notebooks. \n\n## Other Labs\n\n1. [Lab80: Batch inferencing](./samples/batch-inferencing.ipynb) : generally most ML models are deployed for real-time inferencing and therefore are deployed on something like AKS as a container. But this pattern doesn't work well for batch inferencing. In this notebook we look at one possible pattern for batch inferencing by leveraging AMLS Pipelines feature. \n1. [Lab85: Batch Scoring Videos Using Deep Learning Models With Azure Machine Learning](./Lab85/README.md) \n * demonstrates batch inferencing using NNs by doing _neural style transfer_ to an uploaded video. \n * Upload a video file to storage.\n * The video file will trigger Logic App to send a request to the AML pipeline published endpoint.\n * The pipeline will then process the video, apply style transfer with MPI, and postprocess the video.\nThe output will be saved back to blob storage once the pipeline is completed.\n * _we can also do this using AKS_\n1. [Lab90: Time Series Analysis](./Lab90/README.md) : we specifically look at time series analytics in these labs with a focus on how AMLS can help. \n\n\n## Kubeflow Labs\n\nMLOps currently has very few industry-wide best practices to improve time-to-market. Obviously, we like MLOps using AMLS, but Kubeflow is an excellent alternative that we can integrate into AMLS. We'll build a solution using Kubeflow in these labs. \n\n>>The [kubeflow project](https://github.com/kubeflow/kubeflow) is dedicated to making deployments of machine learning (ML) workflows on Kubernetes simple, portable and scalable. Our goal is not to recreate other services, but to provide a straightforward way to deploy best-of-breed open-source systems for ML to diverse infrastructures. Anywhere you are running Kubernetes, you should be able to run Kubeflow.\n\nKubeflow is really the following:\n* [JupyterHub](https://jupyterhub.readthedocs.io/en/latest/), which allows you to request an instance of a dedicated Jupyter Notebook. We recommend using AMLS compute instances, but this is an excellent alternative. The problem is it's harder to control costs by spinning down Jupyter container instances with kubernetes HPA (horizontal pod autoscaler). \n* Training controllers: this component makes training jobs deployment easier. We will only deploy training controllers for tf jobs in this workshop, but there are controllers for PyTorch and others. The AMLS analog is training compute instances, again the benefit being these are able to be better autoscaled when not in use. \n* a model serving component: this isn't much different from AMLS inference clusters. \n\n### MLFlow vs Kubeflow\nKubeflow is meant to build E2E workflow pipelines, MLFlow is used to track metrics and deploy models. AMLS experiments and pipelines are really a superset of MLFlow and you can use the MLFlow APIs to talk to AMLS, essentially making AMLS a PaaS offering for an MLFlow server. Kubeflow is its own thing that you deploy into an AKS/k8s cluster. \n\n1. [Lab100: Kubeflow Prerequisites, Background, and Motivation](./Lab100/README.md)\n1. [Lab101: Containerizing a TensorFlow Model](./Lab101/README.md)\n1. [Lab102: Kubeflow Installation](./Lab102/README.md)\n1. [Lab103: Running JupyterHub with Kubeflow on AKS](./Lab103/README.md)\n1. [Lab104: Using tfjob to run training jobs](./Lab104/README.md)\n\n## Using MLflow with AMLS Labs\n\n[MLflow](https://mlflow.org/) is an OSS platform for tracking machine learning experiments and managing models. You can use MLflow logging APIs with Azure Machine Learning service: the metrics and artifacts are logged to your Azure ML Workspace. MLflow is deeply embedded with Databricks and is their model management tool-of-choice. MLflow is a great tool for local ML experimentation tracking. However, using it alone is like using git without GitHub. \n\n1. [Lab120: Basic MLflow usage in a Jupyter notebook](./Lab120/README.md)\n * we do \"local\" training. The only requirement is AMLS SDK must be installed. To do that quickly we use an AMLS compute target. \n1. [Lab121: MLflow with Databricks and AMLS backend](./Lab121/README.md) \n * wip\n\n## AMLS Deeper Dive Labs\n\nThese labs will get you a little more intimate with the AMLS service. You may want to start here on your journey with AMLS. Most of these are ipynb files and can be run either from your local workstation (vscode, pycharm, whatever) or using the JupyterLab notebooks on the AMLS compute engines (or anywhere else supporting .ipynb files)\n\n**Remember, we do all of these labs in code, but much of it can be examined using the AMLS workspace.**\n\n|Lab|Decription|\n|----|------|\n|[Lab200: The Azure ML SDK](./Lab200/200-intro.ipynb)|<li>basic calls to the service <li> getting your env config'd|\n|[Lab201: Running Experiments](./Lab200/201-experiments.ipynb) |<li>running experiments using the diabetes sample data set <li> creating .py files from a Jupyter notebook and executing those with logged experiments|\n|[Lab202: Working with conda and python envs using Jupyter notebooks](./Lab200/202-envs.ipynb)|<li>all about python environments <li> When you run a Python script as an experiment in Azure Machine Learning, a Conda environment is created to define the execution context for the script <li>we also create some AMLS datasets|\n|[Lab203: Working with Pipelines](./Lab200/203-pipelines.ipynb)|<li>Pipelines consist of one or more _steps_, which can be Python scripts, or specialized steps like an Auto ML training estimator or a data transfer step that copies data from one location to another. <li>Each step can run in its own compute context. <li>Pipelines are a good start on the automation journey, it also separates work so various team members can focus on particular areas in your solution. |\n|[Lab204: Model Interpretability and Explainability with AMLS](./Lab200/204-interpret.ipynb)|we use features of AMLS and LIME|\n|[Lab205: Monitoring a Model](./Lab200/205-monitor.ipynb)|<li>we quickly train and deploy a model for inferencing<li>we monitor it using the service and AppInsights<li>also sets up conda envs using yaml<li>inference deployment uses ACI and not AKS|\n|[Lab206: Monitor Data Drift](./Lab200/206-drift.ipynb)|Over time, models can become less effective at predicting accurately due to changing trends in feature data. This phenomenon is known as data drift, and it's important to monitor your machine learning solution to detect it so you can retrain your models if necessary.|\n\n\n ## End-to-End Labs \n\n These are alternate labs with different approaches to solving problems. \n\n 1. [Lab300: A Day in the Life of a Data Scientist...or...The Data Science Process in Action](./Lab300/README.md) \nYou are somewhat new to data science and your boss hands you a new dataset and says, \"make something out of this.\" Where do you even start? In this lab we do exactly that. In this lab we are given a dataset of support tickets and we are told, \"unlock some insights to help the support team become more efficient and provide better service to customers.\" We work through how to get started on an analytics problem like this. \n * What do we do?\n * use standard data science techniques to explore the data\n * determine deductively what are some interesting problems we can solve\n * use automl to see if we can quickly predict those problems\n * deploy the best model, assuming it meets our goals\n * present your interesting analytics to executive leadership\n * How we do it?:\n * AMLS\n * Jupyter/python/pandas/visualizations\n * automl\n * deploy an automl \"no-code\" container\n * consume the model's REST API \n * Power BI for the final data presentation\n1. [Lab301: Text Analytics from Cognitive Services to a custom solution (wip)](./Lab301/README.md)\nCompanies would like to do text analytics but they want to move quickly and iterate. The goal is to have a solution quickly (Cognitive Services) and only if the project seems to have a positive NPV then build a custom model (if needed). CogSvc handles the \"80/20\" rule. It will handle 80% of the use cases you'll need and solve problems in 20% of the time. It's also not required to have a data scientist do this initial work. We will explore the complete path of integrating text analysis into our business processes, starting from pre-build models available as cognitive services, up to training a third-party neural custom model for Aspect-Based Sentiment Analysis available as part of Intel NLP Architect using Azure Machine Learning Service.\nWe can talk about cases when one needs a custom model, and demonstrate quick ways to create such a model from scratch using AutoML, and show how to fine-tune model hyperparameters using HyperDrive\n1. [Lab302: Integrating Pre-Built AI into your application(wip)](./Lab302/README.md)\n * We are going to use a pre-built e-commerce website and deploy 2 AI services to add a \"human touch\" <img src=\"./images/tasks.png\" align=right width=200>\n * First, the website will allow a customer to upload a picture of what they are looking for and it will give a recommendation. (With some add'l tweaks we could use our phone's camera to take the picture). The recommendation won't be very accurate using the pre-built Computer Vision model\n * Next, we'll customize it and train it on additional photos using Azure Custom Vision Service. Then we'll take the generated model (in ONNX format) and deploy that into our website which will make the item identification much more accurate\n * Finally, we'll look at using the Personalizer Service (which is reinforcement learning under-the-covers) to quickly make a recommender service for our e-commerce site. \n * What you'll learn:\n * How to integrate AI into a website\n * How to re-train a pre-built AI model to make it better\n * How to use pre-built AI to build a recommender engine...QUICKLY\n1. [Lab303: Deep Learning Hack: Build a CNN from scratch, then make it perfect with transfer learning](./Lab303/README.md) \nWe build a computer vision solution entirely from scratch using tensorflow. First we learn all of the concepts of how neural nets work. Then we show just how difficult it is to train a CNN from scratch. But don't dispair...we then look at how to use a pre-built tf model from the internet and use a trick called transfer learning to make our model almost perfect. \n1. [Lab304 wip: Computer Vision at the Edge, E2E with MLOps/DevOps deployments](./Lab304/README.md)\n * this is WIP\n * works with both LVA (Live Video Analytics) and OpenCV module.\n * create an IoT Edge deployment with simulated cameras\n * upload simulated video and train/re-train the models and deploy them to the iot edge devices.\n \n## Alternative MLOps Labs\n\n1. [Lab400: (wip) training and deployment using AMLS Pipelines](./Lab400/README.md)\n * we still keep the code in git but we use AMLS Pipelines (via its REST APIs) to call and handle the training and validation\n1. [Lab401: (wip) a simplified Azure DevOps/MLOps solution](./Lab401/README.md)\n * We deploy a simple model quickly and don't worry about the implementation details of model development\n * we do focus on how `score.py` works\n * we build up the azdo pipelines and focus there on how to do the automation\n * the focus of this lab is simply to understand the patterns for the azdo pipelines\n1. [Lab410: AMLS Compute auto-shutdown](https://github.com/ruoccofabrizio/azureml-compute-instances-shutdown)\n\n\n## Cognitive Services Labs\n\nFor this set of labs we are going to use the CogSvcs SDKs. Instead of using a remote AMLS Compute instance we will use a Docker DevContainer and vscode to do our development. If you don't have Docker Desktop and vscode you can most definitely still use a remote AMLS Compute instance. \n\n1. Open a new instance of vscode to the folder: `MLOps-E2E\\Lab500`\n1. vscode will prompt you to open the folder in the container. Do that. \n\n![](./images/prompt.png)\n\nNow open each ipynb file within the container. \n\n>> We do this because the docker container already has all of the Azure and AMLS dependencies baked in, saving us a lot of time and workstation configuration misery. \n\n|Lab|Description|\n|----|------|\n|[Lab500: Computer Vision](./Lab500/CompViz.ipynb)|Using CompViz with the SDK from Python|\n|[Lab501: Face API](./Lab500/face.ipynb)|Using the Face API|\n|[Lab502: OCR capabilities](./Lab500/OCR.ipynb)|optical character recognition|\n|[Lab503: text analytics](./Lab500/text.ipynb)|text analytics|\n|[Lab503a:sentiment analysis on Yelp! data](./Lab500/yelp.ipynb)|text analytics|\n|[Lab504: Form Recognizer](./Lab500/FormRec.ipynb)|Form Recognizer|\n\n## Other Interesting Labs\n\n1. [Lab900: RFM Analytics](./Lab900/RFM.ipynb)\n1. [Lab901: Association Rules](./Lab900/AssociationRules.ipynb)\n * if you have several events we can search for links between those events. This is useful to determine things like:\n * cross-selling behaviors of customers\n * find the links between many products\n * analyze the paths customers take through our stores/website\n * what products should be highlighted or removed\n\n## WrapUp\n\n1. **Remove the RG you created to prevent incurring Azure costs**\n\n\n## Reference Material\n\n* [Microsoft's MLOpsPython repo](https://github.com/microsoft/MLOpsPython)\n* [MLOps on Azure](https://github.com/microsoft/MLOps)\n* [My Sample Notebooks](./samples/README.md)\n\n\n" }, { "alpha_fraction": 0.7806026339530945, "alphanum_fraction": 0.7846829891204834, "avg_line_length": 70.45454406738281, "blob_id": "b9eb726a69330584914cfae0225a15c8d3154dda", "content_id": "11be1abb236872fa9c262533212579efb85494e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3192, "license_type": "no_license", "max_line_length": 486, "num_lines": 44, "path": "/Lab41/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "# Automated Machine Learning\r\n\r\nautoml builds ML models for you by automating model and hyperparameter selection. Bring a labelled dataset that you want to build a model for, automated ML will give you a high quality machine learning model that you can use for predictions.\r\n\r\nIf you are new to Data Science, automated ML will help you get jumpstarted by simplifying machine learning model building. It abstracts you from needing to perform model selection, hyperparameter selection, and in one step creates a high quality trained model for you to use.\r\n\r\nIf you are an experienced data scientist, automated ML will help increase your productivity by intelligently performing the model and hyperparameter selection for your training and generates high quality models much quicker than manually specifying several combinations of the parameters and running training jobs. Automated ML provides visibility and access to all the training jobs and the performance characteristics of the models to help you further tune the pipeline if you desire.\r\n\r\nautoml with AMLS can be setup and run using\r\n\r\n* the azure portal/ml workspace (GUI experience): we use this method for this lab\r\n* Jupyter notebooks in a AMLS compute target\r\n * [Here are some examples for various use cases](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning)\r\n* any other python environment where we can call the AMLS APIs\r\n* databricks\r\n\r\n## Using Automated Machine Learning\r\n\r\n1. Go to your AMLS workspace and select Automated Machine Learning under the authoring section.\r\n\r\n1. Enter your experiment name, then select a compute from the list of your existing computes or [create a new compute](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#create-an-experiment). \r\n\r\n1. Select a the IBM-Employee-Attrition dataset that you had created in Lab40.\r\n\r\n1. Preview data and keep all columns selected for training.\r\n\r\n1. Select the training job type: **Classification**\r\n1. Select target column: **Attrition**\r\n\r\n1. Open “**Advanced settings**”, set the 'Primary metric' to 'AUC_weighted' and training job time to 15 minutes (for the workshop).\r\n\r\n1. Hit \"**Start**\" and wait for the training job to start. You’ll be able to see the models which are created during the run, click on any of the models to open the detailed view of that model, where you can analyze the [graphs and metrics](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-understand-automated-ml).\r\n\r\n1. Once the run is completed, click **deploy the best model** to create a deployed endpoint from the model.\r\n\r\n\r\n## Reference Topics\r\n\r\n* To learn more about automated ML, see documentation [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml).\r\n* [automl sample notebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning)\r\n\r\n## Optional Tasks\r\n\r\n* Once your model has been deployed, follow these [instructions](https://docs.microsoft.com/en-us/power-bi/service-machine-learning-integration) to consume the model from Power BI." }, { "alpha_fraction": 0.6825938820838928, "alphanum_fraction": 0.7027303576469421, "avg_line_length": 37.52702713012695, "blob_id": "4921a57dd3d41b061a061debc6d709f7ae9535e6", "content_id": "6ba9b5629635935de42a90460af285f50ce21160", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2930, "license_type": "no_license", "max_line_length": 204, "num_lines": 74, "path": "/Lab23/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 23: Test Our Pipelines\r\n\r\nNow that we have the build and release pipelines configured, let's test them. \r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. epochs and learning rate\r\n2. \r\n\r\n## Steps\r\n\r\n### Make Edits to Source Code\r\n\r\n1. **Edit** `scripts/train.py`. This is very close to the version we created in AzNotebooks. \r\n\r\n3. Change the **learning rate (lr)** for the optimizer from **0.1** to **0.001**.\r\n\r\n4. Change the number of training **epochs** from **1** to **5**.\r\n\r\n5. Select **Commit**.\r\n\r\n ![Make edits to train.py by changing the learning rate. Select Commit after editing.](../images/44_1.png 'Edit Train.py')\r\n \r\n6. Provide comment: `Improving model performance: changed learning rate.` and select **Commit**.\r\n\r\n ![Provide commit comment for train.py.](../images/45_1.png 'Commit - Comment')\r\n \r\n### Task 2: Monitor Build Pipeline\r\n\r\n1. Navigate to **Pipelines, Builds**. Observe that the CI build is triggered because of the source code change. \r\n\r\n2. Select the pipeline run and monitor the pipeline steps. The pipeline will run for 20 minutes. Proceed to the next task when the build pipeline successfully completes.\r\n \r\n\r\n### Task 3: Monitor Release Pipeline\r\n\r\n1. Navigate to **Pipelines, Releases**. Observe that the Release pipeline is automatically triggered upon successful completion of the build pipeline. Select as shown in the figure to view pipeline logs. \r\n \r\n ![Navigate to Pipelines, Releases and Select as shown in the figure to view pipeline logs.](../images/48.png 'Pipelines - Releases')\r\n \r\n2. The release pipeline will run for about 15 minutes. Proceed to the next task when the release pipeline successfully completes.\r\n\r\n### Task 4: Review Release Pipeline Outputs\r\n\r\n1. From the pipeline logs view, select **Deploy & Test Webservice** task to view details.\r\n\r\n ![Select Deploy & Test Webservice task to view details.](../images/50.png 'Pipeline Logs')\r\n \r\n2. Observe the **Scoring URI** and **API Key** for the deployed webservice. Please note down both the `Scoring URI` and `API Key`.\r\n\r\nExamples:\r\n\r\n|Name|Value|Example|\r\n|----|-----|--------|\r\n|Scoring URI||http://40.121.22.12:80/api/v1/service/compliance-classifier-service/score |\r\n|API Key||0eukNRewJss1wBU23ddSAnWGfXN7qk7M|\r\n\r\n![View Deploy & Test Webservice task logs and note down the Scoring URI of the deployed webservice.](../images/51.png 'Deploy & Test Webservice Task Logs')\r\n\r\n3. In AMLS go to your deployed webservice. You can see the values there as well. \r\n\r\n ![View deployed webservice in Azure Portal.](../images/52.png 'Azure Portal - Workspace, Deployments')\r\n\r\n\r\n\r\n\r\n## Testing the deployed solution\r\n\r\nIn this exercise, you verify that the first release of the application works.\r\n\r\n### Task 1: Test the Deployment\r\n\r\n1. Browse to your Azure Notebooks project and navigate to `Test Deployment.ipynb`. This is the notebook you will step through executing in this lab.\r\n\r\n \r\n" }, { "alpha_fraction": 0.7097600102424622, "alphanum_fraction": 0.7129600048065186, "avg_line_length": 30.87368392944336, "blob_id": "61ff9fbc828133aac2798183c9edb5e7927ee365", "content_id": "ea7cf621b424abccb359d07dae207ac457e10bbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3125, "license_type": "no_license", "max_line_length": 235, "num_lines": 95, "path": "/Lab1/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 1: Workshop Setup\r\n\r\n### Discussion topics before we start this section \r\n\r\n* We use `bash` and `az cli` commands throughout this lab. What is your comfort level with bash? \r\n* Why do we need to do \"resource prefixing\"? \r\n* What is your preferred IDE? I like to use vscode\r\n* Cloud Shell overview? \r\n* What is your git experience? \r\n* To speed up Azure deployments we use ARM Templates. What is your experience? \r\n\r\n\r\nWe are going to deploy the following to Azure : \r\n* 1 Storage account\r\n* 1 Data Factory\r\n* 1 Databricks workspace\r\n* 1 SQL Server\r\n* 1 SQL database\r\n\r\n\r\n### Prerequisites\r\n\r\n* We will use `bash` and `az cli` commands throughout the workshop. You can use Powershell as an alternative, but the commands will be different. \r\n\r\n* You **will need** an Azure account with at least one Resource Group with owner permissions. \r\n\r\n> Note: If you don't have an account you can create your free Azure account [here](https://azure.microsoft.com/en-us/free/). **But we aware that free trial accounts do not always allow every Azure service to be spun up and utilized.**\r\n\r\n\r\n### On Your Local Laptop...\r\n\r\nyou'll likely need the following, which can be installed now or as we progress through the workshop:\r\n\r\n* [Azure Storage Explorer](https://azure.microsoft.com/en-au/features/storage-explorer/)\r\n* vscode\r\n* Clone this GitHub repository using Git and the following commands: \r\n\r\n `git clone https://github.com/davew-msft/MLOps-E2E`\r\n* I prefer to use WSL in Windows but Azure Cloud Shell is fine. If you do NOT want to use Cloud Shell then you'll need the following:\r\n * [Azure CLI Installer (MSI) for Windows](https://aka.ms/InstallAzureCliWindows)\r\n * or the WSL/Linux version\r\n\r\n\r\n### Deploy Resources to Azure\r\n\r\nYou need one resource group. \r\n\r\n* Suggested name (used throughout this workshop): `MLOpsWorkshop`. \r\n* Suggested region: `East US 2`.\r\n\r\n\r\n\r\nRun the following to create the RG using Cloud Shell:\r\n\r\n``` bash\r\n#vars/changeme\r\nResGroup='MLOpsHackathon'\r\nSubscription='davew'\r\nlocation='eastus2'\r\n\r\n# ensure you are connected to the desired subscription\r\naz account list --output table\r\naz account set --subscription \"$Subscription\"\r\naz account list --output table\r\n\r\naz group create --name $ResGroup --location $location\r\n```\r\n\r\nNow we need to provision our resources using the ARM templates. \r\n\r\nI assume Cloud Shell. Here's the steps: \r\n\r\n```bash\r\n# clone the repo first in your cloud shell\r\nmkdir -p git && cd $_\r\ngit clone https://github.com/davew-msft/MLOps-E2E mlops\r\ncd mlops\r\n\r\naz deployment group create \\\r\n -g $ResGroup \\\r\n --template-file setup/azureclideploy.json \\\r\n --parameters setup/parameters.json\r\n\r\n# check that everything deployed successfully\r\naz resource list -g $ResGroup -o table\r\n```\r\n\r\nThis may take some time. \r\n\r\n|Questions|\r\n|----------|\r\n|Do you know how to check the status of your deployment? |\r\n|Why is your deployment named `azureclideploy` in the Azure Portal?|\r\n|How can you ensure everything deployed successfully, even if there was failure the first time? |\r\n|How can we delete all resources at the end of the day? |\r\n\r\n" }, { "alpha_fraction": 0.7821543216705322, "alphanum_fraction": 0.7829582095146179, "avg_line_length": 80.93333435058594, "blob_id": "e44b8ccff2cd18a748d208ab6e254eafaf0e4a10", "content_id": "7704dbbb93154b3376cd7e459cc23fc5664f958c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1244, "license_type": "no_license", "max_line_length": 242, "num_lines": 15, "path": "/Lab20/overview.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Recommended Reading \r\n\r\n* [Key concepts for new Azure Pipelines users](https://docs.microsoft.com/azure/devops/pipelines/get-started/key-pipelines-concepts?view=azure-devops)\r\n* [*MLOpsPython* - templates to create Azure DevOps CI/CD pipelines for Azure ML](https://github.com/microsoft/MLOpsPython)\r\n* [Azure Pipelines YAML schema reference](https://docs.microsoft.com/azure/devops/pipelines/yaml-schema?view=azure-devops&tabs=schema)\r\n* [Machine Learning extension for Azure DevOps](https://marketplace.visualstudio.com/items?itemName=ms-air-aiagility.vss-services-azureml)\r\n\r\n\r\nAs a team, we want to complete the following tasks:\r\n\r\n1. Using [MLOpsPython](https://github.com/microsoft/MLOpsPython) as a template, create an Azure DevOps pipeline (or gh action) that creates and runs an Azure ML pipeline to train, validate, and register a model based on your training scripts.\r\n\r\n You may wish to work through the MLOpsPython [Getting Started guide](https://github.com/microsoft/MLOpsPython/blob/master/docs/getting_started.md) before tackling this task.\r\n\r\n Follow the [bring your own code guide](https://github.com/microsoft/MLOpsPython/blob/master/docs/custom_model.md) to integrate your scripts with the MLOpsPython repository.\r\n" }, { "alpha_fraction": 0.7322011590003967, "alphanum_fraction": 0.7576746940612793, "avg_line_length": 47.3870964050293, "blob_id": "e74012bb257f85f5ffbdfb3ebedf15a45d3b0653", "content_id": "1a5f0bc70eabbc94bcc01ec3333ce5f0ddb0ae23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1531, "license_type": "no_license", "max_line_length": 432, "num_lines": 31, "path": "/Lab103/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## JupyterHub on AKS/Kubeflow\r\n\r\n[JupyterHub](https://jupyterhub.readthedocs.io/en/latest/) is a multi-user Hub that spawns, manages, and proxies multiple instances of the single-user Jupyter notebook server (JupyterLab). JupyterHub can be used to serve notebooks to a class of students, a corporate data science group, or a scientific research group. Let's look at how we can create JupyterHub to spawn multiple instances of Jupyter Notebook on AKS using Kubeflow.\r\n\r\nFor familiarity, let's use the tf docker container locally, which has Jupyter notebooks available on it\r\n\r\n```bash\r\ndocker run -d -p 8888:8888 jupyter/tensorflow-notebook\r\n# the output will be your container id\r\n# we need to use that here to get the URL for jupyter, the first few characters of the id are enough\r\ndocker exec 139891b235f jupyter notebook list\r\n# that should give you a URL like this:\r\n#http://0.0.0.0:8888/?token=73e2ed91d737edcc4f9ef1f5f2b77fee82705251da96146f\r\n# you may have to change 0.0.0.0 to localhost\r\n\r\n```\r\n\r\nFeel free to experiment. We want a similar, but scalable to a team, solution on AKS. Let's look at that using Kubeflow. kubeflow-core already has JupyterHub available. \r\n\r\n```bash\r\nNAMESPACE=kubeflow\r\nkubectl get svc -n=${NAMESPACE}\r\n# you should see entries for jupyter\r\n\r\n#let's connect to kubeflow dashboard using a proxy\r\nkubectl port-forward svc/istio-ingressgateway -n istio-system 8080:80\r\n```\r\n\r\nNow browse to `http://localhost:8080`, this is kubeflow dashboard. Take a minute to familiarize yourself with it. \r\n\r\nYou should now be able to connect. " }, { "alpha_fraction": 0.7728426456451416, "alphanum_fraction": 0.7728426456451416, "avg_line_length": 69.81818389892578, "blob_id": "2691afba5ba3fb2f98cc17de7be23efcb8cc2157", "content_id": "d3537efceaac8bcfc650abbcc7bd34477ddc48f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 788, "license_type": "no_license", "max_line_length": 160, "num_lines": 11, "path": "/samples/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Sample Notebooks\r\n\r\n*Additional sample notebooks can be found here:* [Notebooks Everywhere](https://git.davewentzel.com/demos/notebooks-everywhere/-/blob/master/README.md)\r\n\r\n|Notebook|Purpose|\r\n|----|----|\r\n|[automl-forecast-model.ipynb](./automl-forecast-model.ipynb)|uses AMLS automl to do time series forecasting with NYC energy dataset|\r\n|[batch-inferencing.ipynb](batch-inferencing.ipynb)|demonstrates using AMLS pipelines for batch inferencing. We use the iris dataset as csvs in a AMLS dataset|\r\n|[Deep Learning for Forecasting Time Series](./dl-ts-forecasting/README.md)|using sample data we use CNNs and RNNs for energy demand forecasting|\r\n|[NYC Energy Dataset Time Series forecasting using ARIMA](./forecasting/README.md)|other approaches to time series forecasting|\r\n|||" }, { "alpha_fraction": 0.721136748790741, "alphanum_fraction": 0.7439312934875488, "avg_line_length": 36.8505744934082, "blob_id": "0d778f50d7527255a497c74b5c70be7918e56663", "content_id": "7f9275be1840137ad7002560f40a5359f8cfd9d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3378, "license_type": "no_license", "max_line_length": 225, "num_lines": 87, "path": "/Lab102/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Kubeflow \r\n\r\n[kubeflow documentation](https://github.com/kubeflow/kubeflow)\r\n\r\nKubeflow uses ksonnet templates to package and deploy components. ksonnet simplifies defining an application configuration, updating the configuration over time, and specializing it for different clusters and environments. \r\n\r\n### Ksonnet (not needed anymore?)\r\n\r\n_shouldn't be needed anymore_ \r\n\r\n\r\n### Istio\r\n\r\nNow install Istio into your AKS cluster. Note: Istio releases after 1.6 don't work with kubeflow, _as of this writing_. \r\n\r\n```bash\r\n\r\nISTIO_VERSION=1.6.10\r\n\r\ncurl -sL \"https://github.com/istio/istio/releases/download/$ISTIO_VERSION/istioctl-$ISTIO_VERSION-linux-amd64.tar.gz\" | tar xz\r\nsudo mv ./istioctl /usr/local/bin/istioctl\r\nsudo chmod +x /usr/local/bin/istioctl\r\nistioctl operator init\r\nkubectl get all -n istio-operator\r\nistioctl profile dump default\r\nkubectl create ns istio-system\r\nkubectl apply -f /path_to/Lab102/istio.aks.yaml \r\n#wait until everything is ready\r\n#kubectl describe pod can help with any errors as can\r\n#kubectl logs -n istio-operator -l name=istio-operator -f\r\nkubectl get all -n istio-system\r\n```\r\n\r\n### Install Kubeflow on AKS and create our kubeflow app\r\n\r\nI assume WSL/ubuntu.\r\n\r\nKubeflow is really just a bunch of commands that look a lot like `kubectl` wrapped in a command called `kfctl`. \r\n\r\nFind the release you want to download from here: https://github.com/kubeflow/kfctl/releases/tag/v1.1.0 then copy the link address to the env var below.\r\n\r\nv1.1.0 seems to be the only thing that works with k8s 1.18. As of this writing\r\n\r\n```bash\r\n# vars\r\n\r\n# this is what we will call our kubeflow app\r\nexport KF_NAME=tfmnist\r\n# see above\r\nSRC_LOC=https://github.com/kubeflow/kfctl/releases/download/v1.1.0/kfctl_v1.1.0-0-g9a3621e_linux.tar.gz\r\n# Set the path to the base directory where you want to store one or more Kubeflow deployments. \r\n# Then set the Kubeflow application directory for this deployment.\r\nexport BASE_DIR=\"/mnt/c/dave/kubeflow\"\r\nexport KF_DIR=${BASE_DIR}/${KF_NAME}\r\n# Set the configuration file to use, such as the file specified below:\r\n#export CONFIG_URI=\"https://raw.githubusercontent.com/kubeflow/manifests/v1.1-branch/kfdef/kfctl_azure.v1.1.0.yaml\"\r\n#export CONFIG_URI=\"https://raw.githubusercontent.com/kubeflow/manifests/v1.1-branch/kfdef/kfctl_azure_aad.v1.1.0.yaml\"\r\n# this version works on k8s 1.18?\r\nexport CONFIG_URI=\"https://raw.githubusercontent.com/kubeflow/manifests/v1.1-branch/kfdef/kfctl_azure.v1.1.0.yaml\"\r\n\r\n# download kfctl src\r\nwget $SRC_LOC\r\ntar -xvf kfctl_v1.1.0-0-g9a3621e_linux.tar.gz\r\n\r\nexport PATH=$PATH:<path to where kfctl was unpacked>\r\n\r\ncurl -L -o kfctl_azure_aad.v1.1.0.yaml ${CONFIG_URI}\r\n\r\n# Generate and deploy Kubeflow:\r\nmkdir -p ${KF_DIR}\r\ncd ${KF_DIR}\r\n#kfctl apply -V -f kfctl_azure_aad.v1.1.0.yaml\r\nkfctl apply -V -f ${CONFIG_URI}\r\n# this might go into a loop of WARN messages around cert-manager\r\n# look in kf-istio-resources.yaml and change sni_hosts to sniHosts\r\n\r\n\r\n#check for errors and wait until everything is running\r\nkubectl get all -n kubeflow\r\n\r\n#let's connect to kubeflow dashboard using a proxy\r\nkubectl port-forward svc/istio-ingressgateway -n istio-system 8080:80\r\n```\r\n\r\nNow browse to `http://localhost:8080`, this is kubeflow dashboard. Take a minute to familiarize yourself with it. \r\n\r\n**Don't close your terminal window yet, we will use it for subsequent labs**" }, { "alpha_fraction": 0.7258116602897644, "alphanum_fraction": 0.7512053847312927, "avg_line_length": 48.04838562011719, "blob_id": "20bcce28a87ca4ac67e7aa1b157f146d54244a18", "content_id": "5128d08ed84311297c5e5d2b3f765bd249e25258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3111, "license_type": "no_license", "max_line_length": 502, "num_lines": 62, "path": "/Lab2a/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 2a: Getting Databricks Ready for MLOps/DevOps\r\n\r\nVersion-controlling notebooks is challenging. In this lab we are going to wire-up our Databricks workspace to connect to our AzDO environment. We could also use [github or BitBucket](https://medium.com/@cprosenjit/azure-databricks-version-management-35fc78e11d7#targetText=Azure%20Databricks%20Configuration&targetText=Integrate%20Azure%20Databricks%20with%20Azure,extra%20authentication%20to%20be%20supplied.&targetText=2.2%20Select%20a%20Notebook%2C%20click,and%20input%20path%20as%20appropriate.). \r\n\r\n### Discussion topics before we start this section \r\n\r\n* What is databricks? Basic navigation of the workspace\r\n* git integration\r\n* notebook walkthrough\r\n* navigating a notebook\r\n\r\n## Spin up a Cluster\r\n\r\nIt can be as small as possible. You don't need to wait for this to complete. \r\n\r\n\r\n## \"Clone\" this repo into AzDO \r\n\r\nWe want to utilize the ipynb files that already exist in this repo, in DBX. And we want to version control them appropriately. \r\n\r\n1. Create a repo in AzDO\r\n2. Create a personal access token and copy it somewhere secure\r\n1. on your laptop (or cloud shell), add this new \"remote\" to your repo and push the latest code. \r\n\r\nNote, I am calling my \"remote\" `azdo` because origin is already used when you cloned my repo. \r\n\r\nThe remote URL needs to follow a structure similar to this: \r\n\r\n```bash\r\ngit remote add azdo https://something:<pat here>@dev.azure.com/davewentzel/MLOps-E2E/_git/MLOps-E2E\r\ngit push -u azdo --all\r\n\r\n```\r\n\r\n1. Open DBX, go to User Settings, choose `AzDO` as the Git provider. \r\n\r\n\r\n## Version Controlling Notebooks - PRACTICE\r\n\r\nYou will need to follow these steps generally:\r\n\r\n* Import an existing notebook into dbx\r\n* Under Revision History link it to **a new folder** in AzDO. \r\n * Don't use DBXNotebooks. Consider using just `Notebooks` folder. You can't use DBX and git to save a notebook back to DBXNotebooks folder without getting a git error. This is by design. \r\n\r\nTake some time and load all of the ipynb notebook files from DBXNotebooks into databricks, and then sync them to your AzDO repo. Ensure you can make changes and commit them back to AzDO as you work. \r\n\r\n**You do not need to run these notebooks yet. Just get them loaded into your workspace and wired up to AzDO**\r\n\r\n[01.Installation_and_Configuration.ipynb](../DBXNotebooks/01.Installation_and_Configuration.ipynb) \r\n[02.Bike_Buyer_Ingest.ipynb](../DBXNotebooks/02.Bike_Buyer_Ingest.ipynb) \r\n[03a.Bike_Buyer_Build_model.ipynb](../DBXNotebooks/03a.Bike_Buyer_Build_model.ipynb) \r\n[03b.Bike_Buyer_Build_model_runHistory.ipynb](../DBXNotebooks/03b.Bike_Buyer_Build_model_runHistory.ipynb) \r\n[04.Bike_Buyer_Deploy_to_ACI.ipynb](../DBXNotebooks/04.Bike_Buyer_Deploy_to_ACI.ipynb) \r\n[05.Bike_Buyer_Deploy_to_AKS_existingImage.ipynb](../DBXNotebooks/05.Bike_Buyer_Deploy_to_AKS_existingImage.ipynb) \r\n\r\n\r\n## Ensure the AML SDK is installed in your workspace/cluster\r\n\r\nLoad, run, and save to AzDO: 01.Installation_and_Configuration.ipynb\r\n\r\nThis will configure the AML SDK within your dbx workspace. \r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7365623116493225, "alphanum_fraction": 0.7448316812515259, "avg_line_length": 45, "blob_id": "730ca18c0cd1bdb40723d028c57cc0f3a211cf9c", "content_id": "5cd24234b86520965fed65748a21c9545c31f14d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3386, "license_type": "no_license", "max_line_length": 260, "num_lines": 72, "path": "/Lab101/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Containerizing a TF Model\r\n\r\nIn this lab we want to containerize a TF model that we will improve upon in subsequent labs. This is an MNIST classifier. Source code is `./src/main.py`. This file can be run, if desired on your laptop now. You may need to install pip packages. \r\n\r\nIf we run this with Docker directly we actually shouldn't need to install pip packages since those are declared in the dockerfile. See `./src/Dockerfile`.\r\n\r\nHere we are using the base tf image from their [docker hub](https://hub.docker.com/r/tensorflow/tensorflow/tags/). Different tags are needed depending on if we have GPUs available or not. And note we also have a `Dockerfile.gpu` if you have gpus available. \r\n\r\nThe Dockerfile is also copying our main.py into the container and then setting the entry point script for the container. \r\n\r\nLet's build the image. I use WSL2 and Docker Desktop for Windows, you may need to adjust the instructions for your use case. \r\n\r\n```bash\r\n#change this to your docker hub username, we will push to docker hub later. If you'd rather use ACR, feel free to adjust the instructions accordingly\r\nDOCKER_USERNAME=dwentzel\r\ncd src\r\ndocker build -t ${DOCKER_USERNAME}/tf-mnist .\r\n\r\n# check your image is available locally\r\ndocker images|grep tf\r\n\r\n# now run it, we decrease the training steps to 100 so it only takes a few mins on our machine\r\ndocker run -it ${DOCKER_USERNAME}/tf-mnist --max_steps 100\r\n\r\n# with any luck your accuracy should be above 90%\r\n\r\n# But this will create a CPU optimized docker container. We want a GPU optimized container. We can do this even if your laptop doesn't have a GPU to test with.\r\n# Notice we are using the `Dockerfile.gpu` this time\r\n\r\ndocker build -t ${DOCKER_USERNAME}/tf-mnist:gpu -f Dockerfile.gpu .\r\n# we won't be able to test this image without installing nvidia-docker, which you can research on your own. Note that gpu-based docker containers will only work on Linux or WSL, hence why we aren't testing locally. We'll instead test in Azure later. \r\n\r\n# publish both images\r\ndocker login\r\ndocker push ${DOCKER_USERNAME}/tf-mnist\r\ndocker push ${DOCKER_USERNAME}/tf-mnist:gpu\r\n```\r\n\r\n## Run our MNIST model on AKS\r\n\r\nWe need to use the YAML template provided. Note that:\r\n* the deployment is a Job because we want it to complete and not restart\r\n* it should run the image that YOU created, not mine (although you can use that if you want to)\r\n* We call the job `mnist-training-v1`. We'll modify it in the next few labs\r\n* we want 500 steps\r\n* it should use the GPU\r\n\r\n```bash\r\n# deploy it\r\nkubectl apply -f mnist-training.yaml\r\n#kubectl delete pod mnist-training-v1-6fl7k\r\nkubectl get job\r\nkubectl get pods\r\n# this may take awhile\r\nkubectl logs mnist-training-v1-qdj9t\r\n```\r\n\r\nAt this point the training doesn't do anything valuable. We aren't saving the model or metrics anywhere, we'll do that next. \r\n\r\nBut first, let's get helm working locally, if needed.\r\n\r\n```bash\r\n# make sure everything is working locally\r\nhelm repo add stable https://charts.helm.sh/stable\r\nhelm install stable/wordpress --generate-name\r\n# we won't use this helm chart, we just want to make sure helm is working\r\nhelm delete wordpress-1604957819\r\n\r\n# now you can create your own chart using\r\nhelm create mnist-gpu\r\n# note that this creates a folder which includes all of the files necessary to create your own package. \r\n```\r\n\r\n" }, { "alpha_fraction": 0.7261871695518494, "alphanum_fraction": 0.7400721907615662, "avg_line_length": 45.98666763305664, "blob_id": "3ee88ff337ce5121f102aed52b2f9ca58ea17457", "content_id": "cb57f783b1b5aa56e0ed69bb186662cda6d24521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3601, "license_type": "no_license", "max_line_length": 651, "num_lines": 75, "path": "/Lab85/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "# Batch Scoring Deep Learning Models With Azure Machine Learning\r\n\r\n## Overview\r\n\r\nWe want to apply `style transfer` onto a video (a video is really just a collection of images). I doubt any business would want to do this in the real world but this architecture can be generalized for any **batch scoring with deep learning scenario**. \r\n\r\nAn alternative solution using Azure Kubernetes Service can be found [here](https://github.com/Azure/Batch-Scoring-Deep-Learning-Models-With-AKS).\r\n\r\n## Architecture \r\n![Reference Architecture Diagram](https://happypathspublic.blob.core.windows.net/assets/batch_scoring_for_dl/batchscoringdl-aml-architecture-diagram.jpg)\r\n\r\nThe above architecture works as follows:\r\n1. Upload a video file to storage.\r\n2. The video file will trigger Logic App to send a request to the AML pipeline published endpoint.\r\n3. The pipeline will then process the video, apply style transfer with MPI, and postprocess the video.\r\n4. The output will be saved back to blob storage once the pipeline is completed.\r\n\r\n### What is Neural Style Transfer \r\n\r\n| Style image: | Input/content video: | Output video: | \r\n|--------|--------|---------|\r\n| <img src=\"https://happypathspublic.blob.core.windows.net/assets/batch_scoring_for_dl/style_image.jpg\" width=\"300\"> | [<img src=\"https://happypathspublic.blob.core.windows.net/assets/batch_scoring_for_dl/input_video_image_0.jpg\" width=\"300\" height=\"300\">](https://happypathspublic.blob.core.windows.net/assets/batch_scoring_for_dl/input_video.mp4 \"Input Video\") *click to view video* | [<img src=\"https://happypathspublic.blob.core.windows.net/assets/batch_scoring_for_dl/output_video_image_0.jpg\" width=\"300\" height=\"300\">](https://happypathspublic.blob.core.windows.net/assets/batch_scoring_for_dl/output_video.mp4 \"Output Video\") *click to view* |\r\n\r\n\r\n## Setup\r\n\r\nYou can run this locally on your laptop but then you'll likely need to install a bunch of dependencies like conda, az cli, etc. \r\n\r\nA better solution is to use JupyterLab from your AML compute instance. \r\n\r\n1. Open JupyterLab, then open a terminal\r\n\r\n![](../images/vscode85_2.png)\r\n\r\n\r\n```bash\r\n\r\n## clone this repo on the remote compute, if needed\r\n## git clone https://github.com/davew-msft/MLOps-E2E\r\ncd Lab85\r\n\r\n## login to Azure\r\naz login\r\n\r\n## this will create a conda environment called __batchscoringdl_aml__\r\necho \". /anaconda/etc/profile.d/conda.sh\" >> ~/.bashrc\r\necho \"conda activate\" >> ~/.bashrc\r\nconda activate\r\nconda env create -f environment.yml\r\nconda activate batchscoringdl_aml\r\n\r\n\r\n```\r\n\r\n\r\n## Steps\r\nRun through the following notebooks, opening each one in JupyterLab:\r\n1. [Test the scripts](notebooks/01_local_testing.ipynb)\r\n\r\nThe notebook should look something like this: \r\n\r\n![](../images/vscode85_3.png)\r\n\r\n\r\n2. [Setup AML](notebooks/02_setup_aml.ipynb).\r\n3. [Develop & publish AML pipeline](notebooks/03_develop_pipeline.ipynb)\r\n4. [Deploy Logic Apps](notebooks/04_deploy_logic_apps.ipynb)\r\n5. [Clean up](notebooks/05_clean_up.ipynb)\r\n\r\n## Clean up\r\nTo clean up your working directory, you can run the `clean_up.sh` script that comes with this repo. This will remove all temporary directories that were generated as well as any configuration (such as Dockerfiles) that were created during the tutorials. This script will _not_ remove the `.env` file. \r\n\r\nTo clean up your Azure resources, you can simply delete the resource group that all your resources were deployed into. This can be done in the `az group delete --name <name-of-your-resource-group>`. \r\n\r\nAll the step above are covered in the final [notebook](notebooks/05_clean_up.ipynb).\r\n\r\n" }, { "alpha_fraction": 0.7538366913795471, "alphanum_fraction": 0.7556783556938171, "avg_line_length": 50.54838562011719, "blob_id": "94bb04bae371a4a1bf10633eca35ef48c2cc92ba", "content_id": "4be404a3aa0a243ea97efe011b454d73fc737f8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1629, "license_type": "no_license", "max_line_length": 173, "num_lines": 31, "path": "/Lab90/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## What is Time Series Analysis\r\n\r\n* a statistical technique that deals with temporal data, to extract meaningful information from the data\r\n* time-series data: a sequence of observations taken sequentially in time, generally taken at specific time intervals (60 seconds, 5 mins, etc). \r\n* time series forecasting: a model to predict future alues based on previously observed values\r\n* cross-sectional data: data of other variables that are collected near the same period of time as the time series data that can enrich the data's meaning\r\n* pooled data: a combination of time series data and cross-sectional data. \r\n\r\nExample using covid data:\r\n\r\n![](../images/ts.png)\r\n\r\n### Lagging Features \r\n\r\nThis is a common feature engineering approach to TS data:\r\n\r\n![](../images/ts-lag.png)\r\n\r\none column will create many features as \"lags\". \r\n\r\n### Rolling Predictions\r\n\r\n* model performance degrade squickly for various reasons, but usually mostly because the engineered features aren't available the further out the forecast horizon you are. \r\n* don't set the forecast horizon longer than necessary and use the next two techniques to help you. \r\n\r\n![](../images/ts-degrade.png)\r\n\r\n* one solution is to augment the test data using TTA (Test Time Augmentation) where we add data so the predictions are better. \r\n * fast and works well when a model is released to production\r\n * all we are really doing is pre-computing the lags and data so model degradation will get worse\r\n* another solution: Extend Train and Refit. We slide the prediction window step by step over the whole test period and keep retraining our model. " }, { "alpha_fraction": 0.8057073354721069, "alphanum_fraction": 0.809350311756134, "avg_line_length": 76.52381134033203, "blob_id": "5abb6607ef7c3f51821638aa92498fb63e9db763", "content_id": "a1117df05c35f0fd5a2f2af864eb5e412e5f1790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1647, "license_type": "no_license", "max_line_length": 519, "num_lines": 21, "path": "/samples/forecasting/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "run the notebooks in order\r\n\r\n01-process.ipynb\r\n\r\nThis notebook illustrates how to fill the time series with missing timeslots, remove outliers, and aggregate the data to handle the respective seasonality for different forecasting granularity -\r\n\r\nhourly patterns repeated daily\r\ndaily patterns repeated weekly\r\nmonthly patterns repeated yearly\r\n\r\n02-arima.ipynb\r\n\r\nexplore how to test stationarity, and if data is not stationary, how to remove trend and seasonality to forecast on the residual and then add trend and seasonality back in the forecast.\r\n\r\nDetermining the parameters for ARIMA requires a lot of trial and error, even with the help of ACF (auto correlation function) and PACF (partial auto correlation function) graphs. Auto ARIMA tries different parameters automatically and often produces much better results with far less effort. It's also not necessary to make the data stationary for Auto ARIMA.\r\n\r\n03-ml-ipynb\r\n\r\nWith machine learning, we transform the data out of the timeseries domain into, for example, regression problems. It's not necessary to convert data to stationary for machine learning. This Jupyter notebook explores Random Forest for forecasting by manually adding features such as lags and day of week. The sample dataset does include weather data which is often very helpful in this type of forecasting. We didn't use weather data in this case because we want to mimic datasets that don't have weather data available.\r\n\r\nAzure AutoML forecasting is capable of fitting different ML models and choosing the best model with stack or voting ensemble. It's also not necessary to manually calcuate the lags for AutoML." }, { "alpha_fraction": 0.7312691807746887, "alphanum_fraction": 0.7390688061714172, "avg_line_length": 56.79166793823242, "blob_id": "56a01320c9c71f4d9091c11a105d8ff54143baa3", "content_id": "251dd07a1a5e6cf62ade05a9526786eb297b3306", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4231, "license_type": "no_license", "max_line_length": 490, "num_lines": 72, "path": "/Lab21/README-gh.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 31: Setup and Run a GitHub Action Workflow\r\n\r\nIn this lab we build the build pipeline for our ML model. \r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. Review the [final yaml](../gh-actions.yml) for a build pipeline that we are going to customize\r\n1. Let's understand the basic steps now, and what we need to change in the pattern for our text classifier use case\r\n1. Note that in this pattern we are going to always redeploy the model as long as there were no failures. \r\n\r\n## Steps\r\n\r\n### Make yaml changes\r\n\r\n1. [The github actions yaml](../gh-actions.yml) is just a starter. We need to make changes. \r\n1. Specifically, search for `TODO` and we will need to make the necessary changes\r\n * **You will need to read the associated documentation next to each TODO to figure out exactly how to configure the pipelines. This is not an easy process.**\r\n\r\n\r\n\r\n## Reference Documentation\r\n\r\n* [github actions marketplace for AMLS](https://github.com/marketplace/actions/azure-machine-learning-compute-action)\r\n* [MLOps github actions](https://mlops-github.com/actions.html)\r\n* [Azure ML gh templates](https://github.com/machine-learning-apps/ml-template-azure)\r\n\r\n----------\r\n\r\nThe build pipeline has four key steps:\r\n \r\n* Attach folder to workspace and experiment. This command creates the `.azureml` subdirectory that contains a `config.json` file that is used to communicate with your Azure Machine Learning workspace. All subsequent steps rely on the `config.json` file to instantiate the workspace object.\r\n \r\n* Create the AML Compute target to run your master pipeline for model training and model evaluation.\r\n \r\n* Run the master pipeline. The master pipeline has two steps: \r\n\r\n * (1) Train the machine learning model, and \r\n * (2) Evaluate the trained machine learning model. \r\n \r\n The evaluation step evaluates if the new model performance is better than the currently deployed model. If the new model performance is improved, the evaluate step will create a new Image for deployment. The results of the evaluation step will be saved in a file called `eval_info.json` that will be made available for the release pipeline. You can review the code for the master pipeline and its steps in `aml_service/pipelines_master.py`, `scripts/train.py`, and `scripts/evaluate.py`.\r\n \r\n* Publish the build artifacts. The `snapshot of the repository`, `config.json`, and `eval_info.json` files are published as build artifacts and thus can be made available for the release pipeline.\r\n\r\n1. Select **Save and Run** to start running the build pipeline. \r\n\r\n2. Monitor the build run. The build pipeline, for the first run, will take around 15-20 minutes to run.\r\n\r\n\r\n### Review Build Artifacts\r\n\r\n1. The build will publish an artifact named `devops-for-ai`. Select **Artifacts, devops-for-ai** to review the artifact contents.\r\n\r\n ![Select Artifacts, devops-for-ai to review the artifact contents.](../images/16.png 'Build Artifacts')\r\n\r\n2. Select **outputs, eval_info.json** and then select **Download**. The `eval_info.json` is the output from the *model evaluation* step and the information from the evaluation step will be later used in the release pipeline to deploy the model. Select **Close** to close the dialog.\r\n\r\n ![Download output from the model evaluation step.](../images/17.png 'Download JSON file')\r\n\r\n3. Open the `eval_info.json` in a json viewer or a text editor and observe the information. The json output contains information such as if the model passed the evaluation step (`deploy_model`: *true or false*), and the name and id of the created image (`image_name` and `image_id`) to deploy.\r\n\r\n ![Review information in the eval_info json file.](../images/18.png 'Eval Info JSON File')\r\n\r\n### Review Build Outputs\r\n\r\n1. Observe the registered model: `compliance-classifier` in AMLS. This is likely version 3 (1 was from Jupyter and 2 was the manual model upload). \r\n\r\n ![Review registered model in Azure Portal.](../images/53.png 'Registered Models in Azure Portal')\r\n\r\n2. Observe the deployment image created during the build pipeline: `compliance-classifier-image`.\r\n\r\n ![Review deployment image in Azure Portal.](../images/54.png 'Images in Azure Portal')" }, { "alpha_fraction": 0.7169679999351501, "alphanum_fraction": 0.7260083556175232, "avg_line_length": 42.5, "blob_id": "871cd8aacacc90e4477ee809aac743dcb02ecfff", "content_id": "1689d381f3d6c9176d751c9ffa41d4778ec20aa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 348, "num_lines": 32, "path": "/Lab3/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 3: Build and Deploy an ML Model\r\n\r\nWe are going to look at some sample data for a fictitious company that sells bicycles. We have a dataset of consumers with decisions as to whether they bought or did not buy a bike. We're going to use that data to build a predictive model and show the steps to train, test, and deploy the model using some of the services we've deployed so far. \r\n\r\n### Discussion topics before we start this section \r\n\r\n1. What is ACI? AKS? ACR? \r\n1. Why use one vs the other? \r\n\r\n\r\n## Importing and running Databricks notebooks\r\n\r\nRun the following notebooks in order. You will need to import them, link them to git, and make some changes before actually executing them:\r\n\r\n* 02.Bike_Buyer_Ingest.ipynb\r\n* 03a.Bike_Buyer_Build_model.ipynb\r\n* 03b.Bike_Buyer_Build_model_runHistory.ipynb\r\n* 04.Bike_Buyer_Deploy_to_ACI.ipynb\r\n* 05.Bike_Buyer_Deploy_to_AKS_existingImage.ipynb\r\n\r\n\r\n\r\n**Let us know as soon as you don't understand something**\r\n\r\nQuestions|\r\n--------|\r\nWhy would you want to declare your csv schema vs inferring it? |\r\nHow do we use kbd shortcuts to add a cell above, delete a cell, and add a cell below? |\r\nWhat is the definition of `regularization rate`? |\r\nDid we really need to use Databricks for this use case? What other technologies could've been used?|\r\nWhat is APIM and why should we use it? |\r\nWhat is the default VM size in AKS when you don't specify the size?|\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6914814114570618, "alphanum_fraction": 0.715463399887085, "avg_line_length": 39.26803970336914, "blob_id": "2c25b56539275e68c00420fb895ed28b52225567", "content_id": "06d3b273e4555da94adba7f926edd47379937629", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4003, "license_type": "no_license", "max_line_length": 254, "num_lines": 97, "path": "/Lab20/README-gh.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 20: Setup GitHub for MLOps\r\n\r\n*I'm not a github actions expert so I hope this works repeatably. I've provided reference material at the bottom of this page*\r\n\r\n### Discussion topics before we start this section \r\n\r\nThis lab assembles all of the pieces needed in github to be ready to deploy an ML model using MLOps principles. We are going to deploy our text classifier model using github actions. \r\n\r\n\r\n\r\n## Steps\r\n\r\n1. Navigate to your git repo on gh. If needed, push your changes from your repo to gh as a new remote. \r\n1. **You will need to work with your data scientist to determine the name of the experiment.** It _might_ be `deep-learning`. \r\n\r\n\r\n### Setup Github Actions\r\n\r\n1. Click `Actions` in your repo on github\r\n1. Note that it wants to build a file called `./.github/workflows/main.yml`\r\n1. For now, just commit that file. \r\n\r\n### Setup Service Principals and secrets\r\n\r\n1. A service principal needs to be generated for authentication and getting access to your Azure subscription. I suggest adding a service principal with contributor rights to the RG where you have deployed your existing Azure Machine Learning workspace. \r\n1. Login to Azure Portal and navigate to the details pane of your AMLS workspace. \r\n1. Start CloudShell\r\n1. Run the following after changing the vars:\r\n\r\n```bash\r\n# CHANGE ME\r\nSP_NAME='davew-ghaction-svcprn'\r\nSUB_ID='52061d21-01dd-4f9e-aca9-60fff4d67ee2'\r\nRG='MLOpsWorkshop'\r\nSCOPES=\"/subscriptions/$SUB_ID/resourceGroups/$RG\"\r\n\r\naz ad sp create-for-rbac --name $SP_NAME \\\r\n --role contributor \\\r\n --scopes $SCOPES \\\r\n --sdk-auth\r\n\r\n```\r\n\r\nThe output should look something like this:\r\n\r\n```JSON\r\n{\r\n \"clientId\": \"767c9788-931d-4552-a76d-1b858c1a3fad\",\r\n \"clientSecret\": \"blahblahblah\",\r\n \"subscriptionId\": \"52061d21-01dd-4f9e-aca9-60fff4d67ee2\",\r\n \"tenantId\": \"72f988bf-86f1-41af-91ab-2d7cd011db47\",\r\n \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com\",\r\n \"resourceManagerEndpointUrl\": \"https://management.azure.com/\",\r\n \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\",\r\n \"sqlManagementEndpointUrl\": \"https://management.core.windows.net:8443/\",\r\n \"galleryEndpointUrl\": \"https://gallery.azure.com/\",\r\n \"managementEndpointUrl\": \"https://management.core.windows.net/\"\r\n}\r\n```\r\n\r\nAdd this JSON as a secret called `AZURE_CREDENTIALS` in your GH repo:\r\n\r\n1. click on the Settings tab in your repository\r\n1. then click on Secrets \r\n1. add the new secret with the name `AZURE_CREDENTIALS` \r\n\r\n### Create a workspace config file\r\n\r\n1. Create the file in your repo under [./.cloud/.azure/workspace.json](../.cloud/.azure/workspace.json). I have created one for you pointed to MY workspace, please change that file and commit and push to gh. The file follows this format:\r\n\r\n```JSON\r\n{\r\n \"name\": \"<your-workspace-name>\",\r\n \"resource_group\": \"<your-resource-group-name>\",\r\n \"create_workspace\": true\r\n}\r\n```\r\n\r\nAt this point, the process of pushing this change to gh should've started our \"shell\" workflow that we created in the beginning of this lab. Click the `Actions` option in gh and check that the workflow launched (it probably didn't do anything). \r\n\r\n\r\n\r\n## What's Next?\r\n\r\nIn the next lab, we are going to build the github action to \"build\" the MLOps Pipeline. This will include:\r\n\r\n* whenever a change is committed we will trigger a build\r\n* the build will check out the code then\r\n* then do a bunch of \"environment setup\"\r\n* then connect to our AMLS workspace\r\n* we will use the latest code to train the model using our compute instance in AMLS (or build one if needed)\r\n* look at the metrics (accuracy in this case) for the new model and if it is better than what we have we will deploy the new model\r\n\r\n## Reference Documentation\r\n\r\n* [github actions marketplace for AMLS](https://github.com/marketplace/actions/azure-machine-learning-compute-action)\r\n* [MLOps github actions](https://mlops-github.com/actions.html)\r\n" }, { "alpha_fraction": 0.6101677417755127, "alphanum_fraction": 0.6224707961082458, "avg_line_length": 34.3776969909668, "blob_id": "731df32501ccbfdc2561fee906cd7313f58630dc", "content_id": "71a4684f796975f39efd12cd74caa24800fb0976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9835, "license_type": "no_license", "max_line_length": 132, "num_lines": 278, "path": "/scripts/train.py", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "import argparse\nimport json\nimport urllib\n\nimport os\nimport numpy as np\nimport pandas as pd\n\nimport keras\nfrom keras import models \nfrom keras import layers\nfrom keras import optimizers\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Embedding, Flatten, Dense\n\nimport azureml.core\nfrom azureml.core import Run\nfrom azureml.core.dataset import Dataset\nfrom azureml.core.datastore import Datastore\nfrom azureml.core.model import Model\n\nprint(\"Executing train.py\")\nprint(\"As a data scientist, this is where I write my training code.\")\nprint(\"Azure Machine Learning SDK version: {}\".format(azureml.core.VERSION))\n\n#-------------------------------------------------------------------\n#\n# Processing input arguments\n#\n#-------------------------------------------------------------------\n\nparser = argparse.ArgumentParser(\"train\")\n\nparser.add_argument(\"--model_name\", type=str, help=\"model name\", dest=\"model_name\", required=True)\nparser.add_argument(\"--build_number\", type=str, help=\"build number\", dest=\"build_number\", required=True)\n\nargs = parser.parse_args()\n\nprint(\"Argument 1: %s\" % args.model_name)\nprint(\"Argument 2: %s\" % args.build_number)\n\n#-------------------------------------------------------------------\n#\n# Define internal variables\n#\n#-------------------------------------------------------------------\n\ndatasets_folder = './datasets'\n\n# this is the URL to the CSV file containing the GloVe vectors\nglove_url = ('https://quickstartsws9073123377.blob.core.windows.net/'\n 'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'\n 'quickstarts/connected-car-data/glove.6B.100d.txt')\n\nglove_ds_name = 'glove_6B_100d'\nglove_ds_description ='GloVe embeddings 6B 100d'\n\n# this is the URL to the CSV file containing the connected car component descriptions\ncardata_url = ('https://quickstartsws9073123377.blob.core.windows.net/'\n 'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'\n 'quickstarts/connected-car-data/connected-car_components.csv')\n\ncardata_ds_name = 'connected_car_components'\ncardata_ds_description = 'Connected car components data'\n\nembedding_dim = 100 \ntraining_samples = 90000 \nvalidation_samples = 5000 \nmax_words = 10000\n\nrun = Run.get_context()\nws = run.experiment.workspace\nds = Datastore.get_default(ws)\n\n#-------------------------------------------------------------------\n#\n# Process GloVe embeddings dataset\n#\n#-------------------------------------------------------------------\n\n# The GloVe embeddings dataset is static so we will only register it once with the workspace\n\nprint(\"Downloading GloVe embeddings...\")\n\ntry:\n glove_ds = Dataset.get_by_name(workspace=ws, name=glove_ds_name)\n print('GloVe embeddings dataset already registered.')\nexcept:\n print('Registering GloVe embeddings dataset...')\n glove_ds = Dataset.File.from_files(glove_url)\n glove_ds.register(workspace=ws, name=glove_ds_name, description=glove_ds_description)\n print('GloVe embeddings dataset successfully registered.')\n \nfile_paths = glove_ds.download(target_path=datasets_folder, overwrite=True)\nglove_file_path = file_paths[0]\nprint(\"Download complete.\")\n\n#-------------------------------------------------------------------\n#\n# Process connected car components dataset\n#\n#-------------------------------------------------------------------\n\nprint('Processing connected car components dataset...')\n\n# Download the current version of the dataset and save a snapshot in the datastore\n# using the build number as the subfolder name\n\nlocal_cardata_path = '{}/connected-car-components.csv'.format(datasets_folder)\nds_cardata_path = 'datasets/{}'.format(args.build_number)\n\nurllib.request.urlretrieve(cardata_url, local_cardata_path)\nds.upload_files([local_cardata_path], target_path=ds_cardata_path, overwrite=True)\n\ncardata_ds = Dataset.Tabular.from_delimited_files(path=[(ds, 'datasets/{}/connected-car-components.csv'.format(args.build_number))])\n\n# For each run, register a new version of the dataset and tag it with the build number.\n# This provides full traceability using a specific Azure DevOps build number.\n\ncardata_ds.register(workspace=ws, name=cardata_ds_name, description=cardata_ds_description,\n tags={\"build_number\": args.build_number}, create_new_version=True)\nprint('Connected car components dataset successfully registered.')\n\ncar_components_df = cardata_ds.to_pandas_dataframe()\ncomponents = car_components_df[\"text\"].tolist()\nlabels = car_components_df[\"label\"].tolist()\n\nprint(\"Processing car components data completed.\")\n\n#-------------------------------------------------------------------\n#\n# Use the Tokenizer from Keras to \"learn\" a vocabulary from the entire car components text\n#\n#-------------------------------------------------------------------\n\nprint(\"Tokenizing data...\") \n\ntokenizer = Tokenizer(num_words=max_words)\ntokenizer.fit_on_texts(components)\nsequences = tokenizer.texts_to_sequences(components)\n\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\n\ndata = pad_sequences(sequences, maxlen=embedding_dim)\n\nlabels = np.asarray(labels)\nprint('Shape of data tensor:', data.shape)\nprint('Shape of label tensor:', labels.shape)\nprint(\"Tokenizing data complete.\")\n\n#-------------------------------------------------------------------\n#\n# Create training, validation, and testing data\n#\n#-------------------------------------------------------------------\n\nindices = np.arange(data.shape[0]) \nnp.random.shuffle(indices)\ndata = data[indices]\nlabels = labels[indices]\n\nx_train = data[:training_samples]\ny_train = labels[:training_samples]\n\nx_val = data[training_samples: training_samples + validation_samples]\ny_val = labels[training_samples: training_samples + validation_samples]\n\nx_test = data[training_samples + validation_samples:]\ny_test = labels[training_samples + validation_samples:]\n\n#-------------------------------------------------------------------\n#\n# Apply the vectors provided by GloVe to create a word embedding matrix\n#\n#-------------------------------------------------------------------\n\nprint(\"Applying GloVe vectors...\")\n\nembeddings_index = {}\nf = open(glove_file_path)\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\nf.close()\n\nprint('Found %s word vectors.' % len(embeddings_index))\n\nembedding_matrix = np.zeros((max_words, embedding_dim))\nfor word, i in word_index.items():\n if i < max_words:\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector \nprint(\"Applying GloVe vectors completed.\")\n\n#-------------------------------------------------------------------\n#\n# Build and train the model\n#\n#-------------------------------------------------------------------\n\n# Use Keras to define the structure of the deep neural network \nprint(\"Creating model structure...\")\n\nmodel = Sequential()\nmodel.add(Embedding(max_words, embedding_dim, input_length=embedding_dim))\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.summary()\n\n# fix the weights for the first layer to those provided by the embedding matrix\nmodel.layers[0].set_weights([embedding_matrix])\nmodel.layers[0].trainable = False\nprint(\"Creating model structure completed.\")\n\nopt = optimizers.RMSprop(lr=0.1)\n\nprint(\"Training model...\")\nmodel.compile(optimizer=opt,\n loss='binary_crossentropy',\n metrics=['acc'])\nhistory = model.fit(x_train, y_train,\n epochs=3, \n batch_size=32,\n validation_data=(x_val, y_val))\nprint(\"Training model completed.\")\n\nprint(\"Saving model files...\")\n# create a ./outputs/model folder in the compute target\n# files saved in the \"./outputs\" folder are automatically uploaded into run history\nos.makedirs('./outputs/model', exist_ok=True)\n# save model\nmodel.save('./outputs/model/model.h5')\nprint(\"model saved in ./outputs/model folder\")\nprint(\"Saving model files completed.\")\n\n#-------------------------------------------------------------------\n#\n# Evaluate the model\n#\n#-------------------------------------------------------------------\n\nprint('Model evaluation will print the following metrics: ', model.metrics_names)\nevaluation_metrics = model.evaluate(x_test, y_test)\nprint(evaluation_metrics)\n\nrun = Run.get_context()\nrun.log(model.metrics_names[0], evaluation_metrics[0], 'Model test data loss')\nrun.log(model.metrics_names[1], evaluation_metrics[1], 'Model test data accuracy')\n\n#-------------------------------------------------------------------\n#\n# Register the model the model\n#\n#-------------------------------------------------------------------\n\nos.chdir(\"./outputs/model\")\n\n# The registered model references the data set used to provide its training data\n\nmodel_description = 'Deep learning model to classify the descriptions of car components as compliant or non-compliant.'\nmodel = Model.register(\n model_path='model.h5', # this points to a local file\n model_name=args.model_name, # this is the name the model is registered as\n tags={\"type\": \"classification\", \"run_id\": run.id, \"build_number\": args.build_number},\n description=model_description,\n workspace=run.experiment.workspace,\n datasets=[('training data', cardata_ds), ('embedding data', glove_ds)])\n\nprint(\"Model registered: {} \\nModel Description: {} \\nModel Version: {}\".format(model.name, \n model.description, model.version))\n" }, { "alpha_fraction": 0.6845849752426147, "alphanum_fraction": 0.6990777254104614, "avg_line_length": 50.01369857788086, "blob_id": "fbbd90e08ce05a46bb423fab4d9d2d3fb183b760", "content_id": "1cec6e36ad8b84e2b45cf13bf86605642593660c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3795, "license_type": "no_license", "max_line_length": 483, "num_lines": 73, "path": "/Lab43/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Accounts Receivable Prediction Using automl\r\n\r\n[Background](https://www.kaggle.com/ahammedjaleel/account-receivable-prediction)\r\n\r\nWe are going to use a copy of this dataset and simplify it to just do binary classification -- will the payment be late or not. \r\n\r\nThe modified dataset is available [here](./WA_Fn-UseC_-Accounts-Receivable.csv). All we've done is add a `LatePayment` binary column to the end of the dataset. \r\n\r\nWe will use Postman to test the API. You can download Postman now. \r\n\r\nWe will use the UI for most of this lab to get you familiar with automl capabilities. All of this can be done in code however. [Here is a slightly different example that you can modify](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/local-run-classification-credit-card-fraud/auto-ml-classification-credit-card-fraud-local.ipynb) that demonstrates how to do all of these steps from a Jupyter notebook programmatically. \r\n\r\n## automl steps\r\n\r\n1. Upload the file as a `dataset` using your AMLS workspace. Make sure you choose all of the correct settings when you upload the file. The most important is the header rows.\r\n1. Create a new `Automated ML` run in your AMLS workspace. \r\n * Choose your new dataset\r\n * Choose Create New Experiment\r\n * Target Column is `LatePayment`\r\n * Choose a Compute Cluster or create it\r\n * This is a Classification task type\r\n * **Make sure you change the `View add'l configuration settings`**\r\n * primary metric: accuracy\r\n * in the interest of time you can block quite a few of the algorithms. \r\n * Make sure you leave at least `Logistic Regression`\r\n * Exit Criterion = training job time of .25 hours (this means training will run for about 15 mins)\r\n * Start the automl run\r\n\r\nMonitor the run using the AMLS UI. When it's done, check the following: \r\n* Data guardrails. Any interesting issues?\r\n* What was the best model? Why are ensemble models always the best?\r\n* Create a model explainability run for one of the non-ensemble models\r\n\r\n## Deploy the model\r\n\r\nWe can now deploy one of the models to either AKS (if you have one running) or ACI (if you don't have AKS cluster running this will be much faster). You should not need to change any other options. This is a \"no-code deployment\" since it is so simple. \r\n\r\nYou can monitor the deployment under `Endpoints` in AMLS \r\n\r\nWe can now test the deployment using Postman. \r\n* Open Postman and build a new collection. \r\n* Choose Import and import the swagger link from the deployment endpoint page.\r\n\r\n![](./img/sw1.png)\r\n\r\nYou can test with this data in the Body:\r\n\r\n```json\r\n{\r\n \"data\": [\r\n {\r\n \"countryCode\": \"391\",\r\n \"customerID\": \"0379-NEVHP\",\r\n \"PaperlessDate\": \"4/6/2013\",\r\n \"invoiceNumber\": \"611365\",\r\n \"InvoiceDate\": \"1/2/2013\",\r\n \"DueDate\": \"2/1/2013\",\r\n \"InvoiceAmount\": \"55.94\",\r\n \"Disputed\": \"No\",\r\n \"SettledDate\": \"1/15/2013\",\r\n \"PaperlessBill\": \"Paper\",\r\n \"DaysToSettle\": \"13\",\r\n \"DaysLate\": \"5\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\nNow that you have a working model, let's look at `continuous retraining` (a type of MLOps) using AMLS Pipelines. We will build these programmatically using a Jupyter notebook as a guide. Open JupyterLab using your Jupyter compute instance in AMLS (if you don't have one, build a small one now). \r\n\r\nNow open [amls-automl-continuous-retraining.ipynb](amls-automl-continuous-retraining.ipynb).\r\n\r\nExecute each of the cells, changing any parameters as needed. **This notebook is not 100% working code. It shows the basic scaffolding. You may need to make some slight modifications. As always, let me know if I can help.**" }, { "alpha_fraction": 0.7738515734672546, "alphanum_fraction": 0.7738515734672546, "avg_line_length": 185.3333282470703, "blob_id": "6b69379001d4a0e2d22cc4744e95d3bc498a442e", "content_id": "71316e43c2e03d181441da4281135af83c2f0caa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 566, "license_type": "no_license", "max_line_length": 429, "num_lines": 3, "path": "/Lab120/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "[In this notebook](./mlflow-train.ipynb) we do some \"local\" training and we log the metrics to AMLS but we use the MLflow APIs. AMLS is really a superset of MLflow but in many regards the data science community has landed on MLflow for model management. In this notebook we look at how we can leverage AMLS as a PaaS-style offering for MLflow. You can even use AMLS as the MLflow endpoint for training you do in AWS and GCP. \r\n\r\nOpen this notebook in your AMLS Compute target or vscode using Remote-SSH development, or anywhere you have AMLS SDK installed. \r\n\r\n" }, { "alpha_fraction": 0.6424242258071899, "alphanum_fraction": 0.6606060862541199, "avg_line_length": 22.78333282470703, "blob_id": "5d2551794ce4f24a178828465e5c4b83e2cacf70", "content_id": "fc8605852ae4352bd6ba682d097cdcd586cf6139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 118, "num_lines": 60, "path": "/Lab104/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## TFJob\r\n\r\nWe are going to use our AKS cluster to submit tensorflow jobs. \r\n\r\nIt's worth researching TFJob and learning about it on your own. \r\n\r\nBut here is what a simple tf training job looks like:\r\n\r\n```yaml\r\napiVersion: kubeflow.org/v1beta1\r\nkind: TFJob\r\nmetadata:\r\n name: example-tfjob\r\nspec:\r\n tfReplicaSpecs:\r\n MASTER:\r\n replicas: 1\r\n template:\r\n spec:\r\n containers:\r\n - image: <DOCKER_USERNAME>/tf-mnist:gpu\r\n name: tensorflow\r\n resources:\r\n limits:\r\n nvidia.com/gpu: 1\r\n restartPolicy: OnFailure\r\n```\r\n\r\nThere's a ton more we could say, but won't. \r\n\r\nLet's use our docker container for mnist that we built in Lab101. \r\n\r\nWe want to ensure we are using GPUs.\r\n\r\nChange tf-mnist.yaml as needed. \r\n\r\n```bash\r\nkubectl create -f Lab104/tf-mnist.yaml \r\nkubectl get tfjob\r\nkubectl get pod\r\nkubectl logs lab104-gpu-master-0\r\n```\r\n\r\n## Persistent Storage\r\n\r\nOnce the container stops we lose the trained model so we still have some work to do. We'll use PVC to do this\r\n\r\n```bash\r\n# first we need a storage class\r\nkubectl create -f Lab104/az-file-sc.yaml \r\n# then the PVC\r\nkubectl create -f Lab104/az-file-pvc.yaml \r\n\r\nkubectl get pvc my-azurefile\r\n\r\n# now we modify the yaml to include the storage options. I've done this in a different yaml file that you can compare\r\nkubectl apply -f Lab104/tf-mnist-pvc.yaml \r\nkubectl get pod\r\nkubectl logs lab104-gpu-master-0\r\n```" }, { "alpha_fraction": 0.7313961982727051, "alphanum_fraction": 0.7377746105194092, "avg_line_length": 61.681819915771484, "blob_id": "73e795d9121beab6595df3c66b7bc043b1b2eecb", "content_id": "1d16179c784a38dd216bc1af475a1a839fa722fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 223, "num_lines": 22, "path": "/Lab10/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 10: Setup\r\n\r\n* Do Lab 1 if you have not done that yet. \r\n### Discussion topics before we start this section \r\n\r\n* There are different tools we can use for python/jupyter development including:\r\n * **Best Approach**: spin up a compute instance in AMLS and use that for development\r\n * compute instances have a built-in JupyterLab server. Each data scientist needs their own compute instance, but the underlying storage is shared. This means everyone in the workspace can collaborate and share code. \r\n * you can use vscode as the IDE and use Remote development in vscode using ssh\r\n * pretty much anything else that supports python and can call an API (basically, python) should work including everything from pyCharm to vim and emacs. \r\n\r\n## Setup\r\n\r\n1. Make sure you have an Azure DevOps (AzDO) subscription **or** a github account.\r\n1. Clone this repo into AzDO/gh\r\n2. Make sure you have an AMLS workspace. \r\n1. decide which Jupyter/python development tool you want to use (suggestion: AMLS compute)\r\n1. clone this git repo into your Juyter/python environment: `https://github.com/davew-msft/MLOps-E2E`\r\n\r\n>In the real world you would want to use your own gh repo. Why? You may notice a `Terminal` option in Azure Notebooks. That option allows you to `git pull` and `git push` to gh. \r\n\r\nPlay around with the Jupyter environment by examining some of the .ipynb files. \r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7647058963775635, "avg_line_length": 84, "blob_id": "ad38282aa5207763c5649a92c00867aea289d96d", "content_id": "26c1b368716ae7bdce10cbd22100a8e71ebbe549", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 85, "license_type": "no_license", "max_line_length": 84, "num_lines": 1, "path": "/Lab200/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "this folder contains various AMLS tutorials, see [README](../README.md) for details. " }, { "alpha_fraction": 0.7056538462638855, "alphanum_fraction": 0.7121277451515198, "avg_line_length": 26.629629135131836, "blob_id": "21906cd492c1b0131ae88a72b65000274af8dc77", "content_id": "669a531a158eb50075fc70dec8d5fa1aa915a07a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2317, "license_type": "no_license", "max_line_length": 119, "num_lines": 81, "path": "/Lab100/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Kubeflow \r\n\r\n### Why kubeflow?\r\n\r\nKubernetes solves a lot of thorny data science problems:\r\n* training is parallelizable\r\n* distributed training can be distributed (we'll do that with TensorFlow)\r\n\r\nTraining at scale: \r\n<a href=\"https://www.youtube.com/watch?v=v4N3Krzb8Eg\">![OpenAI](./thumbnail.png)</a>\r\n\r\n### Prerquisites on your local dev machine\r\n\r\n* docker\r\n* az cli\r\n* kubectl\r\n* git\r\n* helm\r\n* ksonnet\r\n\r\n### Install AKS\r\n\r\nWe are going to create a GPU-enabled AKS cluster. **Make sure you shut this down when you are done to control costs.**\r\n\r\nIf you already have a GPU enabled AKS cluster then you can skip this step. \r\n\r\n```bash\r\n# change your env vars as needed\r\nSUBSCRIPTION='davew demo'\r\nRES_GROUP=MLOpsWorkshop\r\nLOCATION=\"eastus\"\r\nNODE_SIZE_GPU=Standard_NC6\r\nNODE_SIZE_CPU=Standard_D2_v2\r\nAKS_NAME=\"davew-aks-gpu2\" \r\nNODE_COUNT=2 #number of AKS VMs\r\n\r\naz login\r\naz account set --subscription \"${SUBSCRIPTION}\"\r\naz group create --name $RES_GROUP --location $LOCATION\r\n\r\n# determine AKS available versions, then set the most recent\r\naz aks get-versions --location $LOCATION -o table\r\nAKS_VER=1.18.8 \r\n\r\n\r\naz aks create \\\r\n --node-vm-size $NODE_SIZE_GPU \\\r\n --resource-group $RES_GROUP \\\r\n --name $AKS_NAME \\\r\n --node-count $NODE_COUNT \\\r\n --kubernetes-version $AKS_VER \\\r\n --location $LOCATION \\\r\n --generate-ssh-keys\r\n\r\n# this will probably take a while\r\n# now, get the credentials to connect to the AKS cluster. All this is really is the entry for the kubeconfig file\r\n\r\naz aks get-credentials --name $AKS_NAME --resource-group $RES_GROUP\r\n\r\n# make sure we are pointing to the correct context\r\nkubectl config get-contexts\r\nkubectl config set-context $AKS_NAME\r\n\r\n#For AKS we need to install the NVIDIA Device Plugin (actually, this step should not be needed anymore)\r\n\r\n#let's validate\r\nkubectl get nodes\r\n#this should indicate GPU\r\nkubectl describe node <any node name> | grep nvidia.com/gpu\r\n# if not then we need to fix it by installing the daemonset\r\nkubectl create namespace gpu-resources\r\nkubectl apply -f path/to/Lab100/nvidia-device-plugin-ds.yaml\r\n#now doublecheck\r\nkubectl describe node <any node name> | grep nvidia.com/gpu\r\n\r\n\r\n\r\n# don't close your bash shell, we'll use it for the next lab\r\n```\r\n\r\n**don't close your bash shell, we'll use it for the next lab**" }, { "alpha_fraction": 0.7360970973968506, "alphanum_fraction": 0.7444388270378113, "avg_line_length": 58.89230728149414, "blob_id": "bc449f23d924673041cb55d997afbfe7e89ceb5b", "content_id": "9f829294a642f2797650060be69e02bf4e2cf052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3956, "license_type": "no_license", "max_line_length": 490, "num_lines": 65, "path": "/Lab21/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab 21: Setup and Run a Build Pipeline\r\n\r\nIn this lab we build the build pipeline for our ML model. This lab is lengthy and sometimes frustrating due to connectivity issues. These are \"one-time\" issues. \r\n\r\n[More information on MLOps using Azure DevOps](https://github.com/microsoft/MLOpsPython/blob/master/docs/getting_started.md#set-up-the-release-deployment-andor-batch-scoring-pipelines) including samples. \r\n\r\n\r\n### Discussion topics before we start this section \r\n\r\n1. Review the yaml for a build pipeline\r\n2. Service Connections errors\r\n\r\n## Steps\r\n\r\n1. From left navigation select **Pipelines** . Our pipeline already shows up because we have our template yml file. It probably also has a failure run because you just updated the variables. That's ok. \r\n\r\n4. Review the YAML file. \r\n\r\n1. Change EVERY `azureSubscription` variable to your subscription.\r\n\r\n>Question: Is there a better way to do this? \r\n\r\nThe build pipeline has four key steps:\r\n \r\n* Attach folder to workspace and experiment. This command creates the `.azureml` subdirectory that contains a `config.json` file that is used to communicate with your Azure Machine Learning workspace. All subsequent steps rely on the `config.json` file to instantiate the workspace object.\r\n \r\n* Create the AML Compute target to run your master pipeline for model training and model evaluation.\r\n \r\n* Run the master pipeline. The master pipeline has two steps: \r\n\r\n * (1) Train the machine learning model, and \r\n * (2) Evaluate the trained machine learning model. \r\n \r\n The evaluation step evaluates if the new model performance is better than the currently deployed model. If the new model performance is improved, the evaluate step will create a new Image for deployment. The results of the evaluation step will be saved in a file called `eval_info.json` that will be made available for the release pipeline. You can review the code for the master pipeline and its steps in `aml_service/pipelines_master.py`, `scripts/train.py`, and `scripts/evaluate.py`.\r\n \r\n* Publish the build artifacts. The `snapshot of the repository`, `config.json`, and `eval_info.json` files are published as build artifacts and thus can be made available for the release pipeline.\r\n\r\n1. Select **Save and Run** to start running the build pipeline. \r\n\r\n2. Monitor the build run. The build pipeline, for the first run, will take around 15-20 minutes to run.\r\n\r\n\r\n### Review Build Artifacts\r\n\r\n1. The build will publish an artifact named `devops-for-ai`. Select **Artifacts, devops-for-ai** to review the artifact contents.\r\n\r\n ![Select Artifacts, devops-for-ai to review the artifact contents.](../images/16.png 'Build Artifacts')\r\n\r\n2. Select **outputs, eval_info.json** and then select **Download**. The `eval_info.json` is the output from the *model evaluation* step and the information from the evaluation step will be later used in the release pipeline to deploy the model. Select **Close** to close the dialog.\r\n\r\n ![Download output from the model evaluation step.](../images/17.png 'Download JSON file')\r\n\r\n3. Open the `eval_info.json` in a json viewer or a text editor and observe the information. The json output contains information such as if the model passed the evaluation step (`deploy_model`: *true or false*), and the name and id of the created image (`image_name` and `image_id`) to deploy.\r\n\r\n ![Review information in the eval_info json file.](../images/18.png 'Eval Info JSON File')\r\n\r\n### Review Build Outputs\r\n\r\n1. Observe the registered model: `compliance-classifier` in AMLS. This is likely version 3 (1 was from Jupyter and 2 was the manual model upload). \r\n\r\n ![Review registered model in Azure Portal.](../images/53.png 'Registered Models in Azure Portal')\r\n\r\n2. Observe the deployment image created during the build pipeline: `compliance-classifier-image`.\r\n\r\n ![Review deployment image in Azure Portal.](../images/54.png 'Images in Azure Portal')" }, { "alpha_fraction": 0.7543859481811523, "alphanum_fraction": 0.8070175647735596, "avg_line_length": 57, "blob_id": "a13d24454842d899f27d06d51ef8e39b869854a5", "content_id": "2b8e24696067c646d8ebd20deabf0035a3a9a13f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 57, "license_type": "no_license", "max_line_length": 57, "num_lines": 1, "path": "/Lab302/README.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "## Lab302: Integrating Pre-Built AI into your application" }, { "alpha_fraction": 0.7755661606788635, "alphanum_fraction": 0.7899898886680603, "avg_line_length": 41.274391174316406, "blob_id": "d1dc2becba068cfefeaf0fe88963e70c23573310", "content_id": "13b75774d5a22a055f83f61c1be5c29c9ff440e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6945, "license_type": "no_license", "max_line_length": 191, "num_lines": 164, "path": "/todo.md", "repo_name": "davew-msft/MLOps-E2E", "src_encoding": "UTF-8", "text": "demo:\n Lab11/safe-driver.ipynb\n Lab12/safe-driver-prediction-v2.ipynb\n Lab12/pipelines.ipynb\n Lab12/inferencing.ipynb\n\nHERE: \nredo the old deep learning text example as a new set of labs. \nshould all be in jupyter-notebooks folder\n\n\nmeasuring model performance and making decsions is Challenge05 in DevOps4DS\n\n\nLab 16 maps to Challenge06\n\ngood project template: https://dev.azure.com/DAISolutions/MLOperationalization/_git/MLOpsBasic\n\n\n\nalternate mlops workshop\n I have most of this\n https://github.com/microsoft/MCW-ML-Ops/blob/master/Hands-on%20lab/HOL%20step-by-step%20-%20MLOps.md\n\n\n\nmodel deployment to iotedge: \n https://github.com/nthacker/AML-service-labs/blob/master/lab-6/visual-studio/README.md\n https://github.com/Azure/azureml-examples/blob/main/tutorials/deploy-edge/ase-gpu.ipynb\n https://docs.microsoft.com/en-us/azure/machine-learning/concept-model-management-and-deployment\n\nairflow\n\nmask detection on the edge\n https://github.com/retkowsky/MaskDetection\n\nazfunc\n https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-functions\n\nfull e2e employee attrition model toward end of prezis/MLops Quick Pitch.pptx and airefarch2.pptx\n\npapermill\n https://github.com/nteract/papermill\n\ninterface to adls\n https://github.com/dask/adlfs\n\ngh actions: \n https://mlops.githubapp.com//\n https://techcommunity.microsoft.com/t5/azure-ai/using-github-actions-amp-azure-machine-learning-for-mlops/ba-p/1419027\n https://github.com/Azure/azureml-examples\n https://github.com/Azure/azureml-template\n https://github.com/machine-learning-apps/ml-template-azure\n\ntransfer learning: https://github.com/maxluk/dogbreeds-webinar\n\naml notebooks: https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml\n\n\nimg data labelling?\n https://github.com/balakreshnan/mlops/tree/master/MLAssitDataLabelling\n\n\nmlops python: https://github.com/microsoft/MLOpsPython\nmlops: https://github.com/microsoft/MLOps\nmlops scripts templates: \n https://github.com/anagha-microsoft/ncr-mlops-hol-code\n https://github.com/retkowsky/ncr-mlops-hol\n\ncogsvc workshop\nhttps://github.com/Microsoft/MCW-Cognitive-Services-and-Deep-Learning\n\ncustom vision\n You can try the Microsoft COCO DataSet - https://cocodataset.org/#home\nThere are implementations around the same for Azure Custom Vision. One such example below- https://github.com/vladkol/CustomVision.COCO\n\nedge models \n simplified \n end-to-end AI object detection on a Raspberry Pi 3 edge\ndevice, starting (almost) “from scratch” to consider each builing blocks of such a solution. This guide is designed\nfor developers as well as data scientists who wish to easily put their AI models in practice on edge devices without\nfocusing too much on the deployment.\n see \"Bringing Computer Vision models to the Intelligent Edge with Azure IoT Edge - A guide for developers and data scientists.pdf\"\n\ndataset labeling: https://github.com/MercyPrasanna/dataset-labeling/blob/master/dataset-labeling.ipynb\n\n\naml acceleration template, do it yourself, this might be a lab instead of my \"starter\" stuff\nhttps://github.com/microsoft/aml-acceleration-template\n\n\ndatasets with automl and even designer...about midway down\nhttps://github.com/shkumar64/AzureMachineLearningWorkshop\n\nenable model data collection:\nhttps://docs.microsoft.com/en-us/azure/machine-learning/how-to-enable-data-collection\n\ncall center analytics, audio\nhttps://github.com/rlagh2/callcenteranalytics with powerbi\n\n\nvision: https://github.com/microsoft/computervision-recipes/\nvideo anomaly detection: https://github.com/microsoft/mlops_videoanomalydetection\n\nhttps://github.com/lockedata/realtimeAIpipeline\n\naudio language in containers: https://techcommunity.microsoft.com/t5/azure-ai/automatically-detect-audio-language-with-the-speech-language/ba-p/1694363\nhttps://techcommunity.microsoft.com/t5/azure-ai/accelerate-self-paced-learning-at-the-edge-with-speech/ba-p/1636986\n\nautonomous driver e2e deep learning tutorial \n https://github.com/Microsoft/AutonomousDrivingCookbook\n https://github.com/microsoft/AutonomousDrivingCookbook/tree/master/AirSimE2EDeepLearning\n seems cool\n \"ML-Blueprints-for-Finance.pdf\"\n\n\ndask on amls\n https://github.com/danielsc/azureml-and-dask\nSparkML on Databricks is more mature & faster on large dataset and can deal with very large dataset (>100 GB).\nBut I don’t like Spark’s set of algorithms with limited hyper parameters for tunning which may lead to poor prediction performance.\nFor example for same Randomforest algorithm on unbalanced dataset, you may have much better performance on Sklearn than on Spark ML.\nIf your customer doesn’t have Spark skill and is very familiar with sklearn, XGBoost…then DaskML may be easier to learn for them.\nhttps://github.com/Azure/azureml-examples/blob/main/tutorials/using-dask\n\na/b testing using controlled rolloout\n https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-azure-kubernetes-service?tabs=python#deploy-models-to-aks-using-controlled-rollout-preview\n\nevent grid with AMLS\n https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-event-grid#sample-scenarios\n# Other Resources\n\n* [AMLS automl](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning)\n* [Lab 43 based on this](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb)\n* Lab 121: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow-azure-databricks\n\n## skipping for now\n\nanother mlops demo/lab/workshop\nhttps://github.com/microsoft/ignite-learning-paths-training-aiml/blob/main/aiml50/README.md\n this is more of a demo\n model is already deployed\n model makes faulty prediction\n we build up the MLOps solution with azdo\n we change a parameter and retrain via the azdo pipeline\n everything gets deployed\n we prove the pipeline works. \n\ntext analytics from cogs to aml to hyperdrive\n https://github.com/microsoft/ignite-learning-paths-training-aiml/blob/main/aiml40/README.md\n I have this started on the Lab301 branch\n\nLab400 is https://github.com/csiebler/azureml-workshop-2020\n https://github.com/csiebler/azureml-workshop-2020/blob/master/3-mlops/MLOps_with_ML_pipelines.md\n\nLab401 is https://github.com/csiebler/azureml-workshop-2020/blob/master/3-mlops/MLOps_basic_example.md\nright now this doesn't deploy correctly\n\ncontainers/onnx: this is Lab 302 \nhttps://github.com/microsoft/ignite-learning-paths-training-aiml/blob/main/aiml20/README.md\n\nvision on edge\n Lab 304\n https://github.com/Azure-Samples/azure-intelligent-edge-patterns/tree/master/factory-ai-vision\n https://docs.microsoft.com/en-us/azure/media-services/live-video-analytics-edge/custom-vision-tutorial?pivots=programming-language-csharp\n" } ]
41
StrikerEureka/DLL
https://github.com/StrikerEureka/DLL
18a2c1c63cffa959c273ad7758f90bb73351987c
48053255588a0eaa55663131cc94e80c6ff46ce6
8dd4cada63292d41da2769c5b3725344309422fa
refs/heads/master
2023-09-04T08:15:05.684169
2021-11-04T15:41:36
2021-11-04T15:41:36
424,649,609
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.484640508890152, "alphanum_fraction": 0.4879084825515747, "avg_line_length": 31.90322494506836, "blob_id": "bf438890d9581ae7e79643f835bad7b442e336ea", "content_id": "3d8ada21693ddae05e7cbde637b01106e18c9d3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6120, "license_type": "no_license", "max_line_length": 79, "num_lines": 186, "path": "/Double Linked List.py", "repo_name": "StrikerEureka/DLL", "src_encoding": "UTF-8", "text": "class Node :\n def __init__(self, data) :\n self.data = data\n self.next = None\n self.prev = None\n\nclass doublelinkedlist(object) :\n def __init__(self) :\n self.head = None\n self.tail = None\n\n def tambahbelakang(self, data) :\n if self.head is None :\n new_node = Node(data)\n new_node.prev = None\n self.head = new_node\n else :\n new_node = Node(data)\n current_node = self.head\n while current_node.next is not None :\n current_node = current_node.next\n current_node.next = new_node\n new_node.prev = current_node\n new_node.next = None\n self.tail = new_node\n\n print(\"Data ditambahkan.\")\n print(\"\")\n \n def tambahdepan(self, data) :\n if self.head is None :\n new_node = Node(data)\n new_node.prev = None\n self.head = new_node\n else :\n new_node = Node(data)\n self.head.prev = new_node\n new_node.next = self.head\n self.head = new_node\n new_node.prev = None\n \n print(\"Data ditambahkan.\")\n print(\"\")\n\n def tambahsetelah(self, key, data) :\n current_node = self.head\n while current_node is not None :\n if current_node.next is None and current_node.data == key :\n self.tambahbelakang(data)\n return\n elif current_node.data == key :\n new_node = Node(data)\n nxt = current_node.next\n current_node.next = new_node\n new_node.next = nxt\n new_node.prev = current_node\n nxt.prev = new_node\n current_node = current_node.next\n\n print(\"Data ditambahkan.\")\n print(\"\")\n\n def tambahsebelum(self, key, data) :\n current_node = self.head\n while current_node is not None :\n if current_node.prev is None and current_node.data == key :\n self.tambahdepan(data)\n return\n elif current_node.data == key :\n new_node = Node(data)\n prev = current_node.prev\n prev.next = new_node\n current_node.prev = new_node\n new_node.next = current_node\n new_node.prev = prev\n current_node = current_node.next\n \n print(\"Data ditambahkan.\")\n print(\"\")\n\n def hapusdepan(self) :\n if self.head is None :\n print (\"Data masih kosong.\")\n else :\n if self.head.next is not None :\n self.head.next.prev = None\n self.head = self.head.next\n \n print(\"Data dihapus.\")\n print(\"\")\n \n def hapusbelakang(self) :\n if self.tail is None :\n print (\"Data masih kosong.\")\n else :\n if self.tail.prev is not None :\n self.tail.prev.next = None\n self.tail = self.tail.prev\n return\n\n print(\"Data dihapus.\")\n print(\"\")\n\n def hapustarget (self, data) :\n if self.head is None :\n print (\"Data masih kosong.\")\n return\n current_node = self.head\n while current_node.data is not data and current_node.next is not None :\n current_node = current_node.next\n if current_node.data is not data :\n print (\"Data tidak ditemukan.\")\n return\n if current_node.prev is not None :\n current_node.prev.next = current_node.next\n else :\n self.head = current_node.next\n\n if current_node.next is not None :\n current_node.next.prev = current_node.prev\n else :\n self.tail = current_node.prev\n \n print(\"Data dihapus.\")\n print(\"\")\n\n def tampil(self) :\n print(\"Data : \")\n print(\"\")\n\n current_node = self.head\n while current_node is not None :\n print (current_node.data, end=\" -> \")\n current_node = current_node.next \n\n def tampilreverse(self) :\n current_node = self.tail\n while current_node is not None :\n print (current_node.data, end=\", \")\n current_node = current_node.prev\n\n def menuUmum(self):\n pilih = \"y\"\n while ((pilih == \"y\") or (pilih == \"Y\")):\n # os.system('clear')\n print('Pilih menu yang anda inginkan')\n print('==============================')\n print('1. Tambah data di belakang')\n print('2. Tambah data di depan')\n print('3. Tambah data setelah data')\n print('4. Tambah data sebelum data')\n print('5. Hapus data di depan')\n print('6. Hapus data di belakang')\n print('7. Hapus data pilihan')\n print('8. Tampilkan data')\n pilihan = str(input(\"Masukkan Menu yang anda pilih : \"))\n if(pilihan == \"1\"):\n node = str(input(\"Masukkan data : \"))\n self.tambahbelakang(node)\n elif(pilihan == \"2\"):\n node = str(input(\"Masukkan data : \"))\n self.tambahdepan(node)\n elif(pilihan == \"3\"):\n node = str(input(\"Masukkan data : \"))\n node2 = str(input(\"Masukkan setelah : \"))\n self.tambahsetelah(node2, node)\n elif(pilihan == \"4\"):\n node = str(input(\"Masukkan data : \"))\n node2 = str(input(\"Masukkan sebelum : \"))\n self.tambahsebelum(node2, node)\n elif(pilihan == \"5\"):\n self.hapusdepan()\n elif(pilihan == \"6\"):\n self.hapusbelakang()\n elif(pilihan == \"7\"):\n node = str(input(\"Masukkan data yang ingin dihapus : \"))\n self.hapustarget(node)\n elif(pilihan == \"8\"):\n self.tampil()\n x = input(\"\")\n else :\n pilih =\"n\"\n\nif __name__ == \"__main__\" :\n d = doublelinkedlist()\n d.menuUmum()\n" } ]
1
jDiazPrieto/real_estate_website
https://github.com/jDiazPrieto/real_estate_website
279e1fd99f105efb0afdfa974bae2cfde5bb8b54
da60618fb5d1dbcadee5ad8b798cdf304580999b
9da21b7a114afd12ba4764ad98ebfac29a53fa5f
refs/heads/master
2021-10-15T23:46:12.327979
2019-02-07T00:34:51
2019-02-07T00:34:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7659175992012024, "alphanum_fraction": 0.7659175992012024, "avg_line_length": 27.157894134521484, "blob_id": "d83fff72fbf3a6532a095a3d4c8ae42ac9dba6b4", "content_id": "59e55d0c2f0326cba47fca7a3ac46cf6d9ec200f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 222, "num_lines": 19, "path": "/python_sandbox_starter/modules.py", "repo_name": "jDiazPrieto/real_estate_website", "src_encoding": "UTF-8", "text": "# A module is basically a file containing a set of functions to include in your application. There are core python modules, modules you can install using the pip package manager (including Django) as well as custom modules\nimport datetime\nimport time\nimport camelcase\n\nimport validator\n\ntoday = datetime.date.today()\nprint(today)\nprint(time.time())\n\ncamel = camelcase.CamelCase()\nprint(camel.hump(\"camelCASE\"))\n\nemail = \"testtest.com\"\nif validator.validate_email(email):\n print(\"email is good\")\nelse:\n print(\"emal is fucked up\")" } ]
1
Mou97/safeSpace
https://github.com/Mou97/safeSpace
b6a7b5587158c38c5f29da1435d690e7e1a97e94
5b5e5c917f265bfaef09f43f8030974618323acb
4db211b30ab8d9702f3f6e15c3e3728a50f4cc6d
refs/heads/master
2022-12-11T15:51:01.563198
2021-05-11T03:09:42
2021-05-11T03:09:42
217,432,298
4
2
null
2019-10-25T02:13:41
2022-11-16T21:55:44
2022-12-11T10:50:19
Jupyter Notebook
[ { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 12.399999618530273, "blob_id": "dc320c86037f23fb61d1bcba04575b153215d303", "content_id": "c0dc72d68526de3be8cb6eaf32997fe8088952ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/.env.example", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "DB_HOST=localhost\nDB_NAME=gopps\nDB_USER=root\nDB_PASS=\nDB_PORT=3307" }, { "alpha_fraction": 0.4587407112121582, "alphanum_fraction": 0.459913969039917, "avg_line_length": 32.657894134521484, "blob_id": "15ded9871b08737cbb704fce37db4c626bbb8c28", "content_id": "6993a8430090c76c8643767d21500c79847775c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2557, "license_type": "no_license", "max_line_length": 89, "num_lines": 76, "path": "/controllers/reportHelper.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "const { MongoClient } = require('mongodb');\nconst ProvenDB = require('@southbanksoftware/provendb-node-driver').Database;\n\n\n// Replace this with the URI from the ProvenDB UI.\nconst { provenDB_URI } = require('../config/provenDb');\nconst { db_name } = require('../config/provenDb')\nlet dbObject;\nlet collection;\nlet pdb;\n\n\n// First we establish a connection to ProvenDB.\nMongoClient.connect(provenDB_URI, {\n useNewUrlParser: true,\n // useUnifiedTopology: true\n})\n .then(client => {\n // Replace this with the database name from the ProvenDB UI.\n dbObject = client.db('devfest2k19');\n pdb = new ProvenDB(dbObject); // Mongo Database with helper functions.\n collection = pdb.collection('provenReport'); // With ProvenDB Driver.\n console.log('db connected')\n\n })\n .catch(err => {\n console.error('Error connecting to ProvenDB:');\n console.error(err);\n //process.exit();\n });\n\n\nmodule.exports = {\n getAllReports: report =>\n new Promise((resolve, reject) => {\n if (collection) {\n collection.find(report).toArray((queryError, result) => {\n if (queryError) {\n reject(new Error('Error fetching reports.'));\n } else {\n resolve(result);\n }\n });\n }\n }),\n proveNewReport: report =>\n new Promise((resolve, reject) => {\n const newDocument = {\n report,\n uploadDate: Date.now()\n };\n if (collection) {\n collection.insertOne(newDocument, insertError => {\n if (insertError) {\n reject(new Error('Error inserting document'));\n } else {\n /**\n * With the ProvenDB Driver.\n */\n pdb\n .submitProof()\n .then(result => {\n console.log(result);\n resolve('New Proof Created');\n })\n .catch(error => {\n console.error(error);\n reject(new Error('ERROR: Could not prove new version.'));\n });\n }\n });\n } else {\n reject(new Error('Could not acquire collection'));\n }\n })\n}" }, { "alpha_fraction": 0.4806201457977295, "alphanum_fraction": 0.4806201457977295, "avg_line_length": 17.571428298950195, "blob_id": "1541e200898f93119f0c7310a3eee1b498051e4b", "content_id": "6dc5b90b5852ab2c5efdaef5049563a81c7a31c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 129, "license_type": "no_license", "max_line_length": 38, "num_lines": 7, "path": "/controllers/home.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "module.exports = async (req, res) => {\n try {\n res.render('home')\n } catch (error) {\n res.send(error)\n }\n}" }, { "alpha_fraction": 0.6254295706748962, "alphanum_fraction": 0.6254295706748962, "avg_line_length": 21.461538314819336, "blob_id": "cab5f8b18710d4c69a01ef76793fd65ffae62d79", "content_id": "b9ab0110dcb11c478e43beee4493062cbd8e6388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 291, "license_type": "no_license", "max_line_length": 67, "num_lines": 13, "path": "/controllers/reports.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "const provenDb = require('@southbanksoftware/provendb-node-driver')\nconst Report = require('./reportHelper')\n\nmodule.exports = async (req, res) => {\n try {\n let reports = await Report.getAllReports()\n res.send(reports)\n } catch (error) {\n res.send(error)\n }\n\n\n}" }, { "alpha_fraction": 0.5425330996513367, "alphanum_fraction": 0.5504095554351807, "avg_line_length": 30.117647171020508, "blob_id": "e0c63a91e4d3b1c7a1276eb06d5633293a511186", "content_id": "03ea4db46973d74a5cf513c49a57dad4c8d1d1ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3174, "license_type": "no_license", "max_line_length": 95, "num_lines": 102, "path": "/public/js/main.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "\n$(document).ready(function () {\n\n console.log('script loaded')\n // get speech from input \n let speech = 'muslims are annoying '\n\n let url = \"http://127.0.0.1:5000/api?data=\" + speech\n\n //wrap inside on click event\n fetch(url)\n .then(response => response.json())\n .then(data => {\n console.log(data)\n console.log(data == \"Hate speech detected.\")\n if (data == \"Hate speech detected.\") {\n // JSON.stringify({ report: data + ' : ' + speech })\n\n $.post(\"/saveReport\", { 'report': data + ' : ' + speech }, function (result) {\n console.log(result)\n\n $('.report_tb tbody').append(`\n \n <tr>\n <td colspan=\"2\" class=\"text\">${data + ' : ' + speech}</td>\n <td>11:50 AM</td>\n <td><i class=\"fa fa-ellipsis-v\"></i></td>\n </tr>\n\n `)\n\n });\n\n } else {\n console.log('no hatefull comment')\n }\n })\n .catch(error => console.error(error))\n\n})\n\n\nlet tab = $(\".s-sidebar__nav-link.active\");\n$(\".s-sidebar__nav-link:not('.active')\").on({\n mouseenter: function () {\n tab.removeClass(\"active\");\n },\n mouseleave: function () {\n tab.delay(\"slow\").addClass(\"active\");\n }\n});\n\n//------------keep it always on the bottom ---------------//\nvar headerProfileAvatar = document.getElementById(\"avatarWrapper\");\nvar headerProfileDropdownArrow = document.getElementById(\"dropdownWrapperArrow\");\nvar headerProfileDropdown = document.getElementById(\"dropdownWrapper\");\n\ndocument.addEventListener(\"click\", function (event) {\n var headerProfileDropdownClickedWithin = headerProfileDropdown.contains(event.target);\n\n if (!headerProfileDropdownClickedWithin) {\n if (headerProfileDropdown.classList.contains(\"active\")) {\n headerProfileDropdown.classList.remove(\"active\");\n headerProfileDropdownArrow.classList.remove(\"active\");\n }\n }\n});\n\nheaderProfileAvatar.addEventListener(\"click\", function (event) {\n headerProfileDropdown.classList.toggle(\"active\");\n headerProfileDropdownArrow.classList.toggle(\"active\");\n event.stopPropagation();\n});\n//fetching the API\n\nfunction getHate() {\n //getting text\n var text = $(\"#text\").val();\n var alertH = `<div class=\"alert alert-danger\">There is hate speech in the text</div>`;\n var alertnH = `<div class=\"alert alert-success\">There is no hate speech in the text</div>`;\n var result;\n let speech = text\n\n let url = \"http://127.0.0.1:5000/api?data=\" + speech\n\n //wrap inside on click event\n fetch(url)\n .then(response => response.json())\n .then(data => {\n console.log(data)\n console.log(data == \"Hate speech detected.\")\n if (data == \"Hate speech detected.\") {\n $('#alerts').html(alertH)\n\n } else {\n console.log('no hatefull comment')\n $('#alerts').html(alertnH)\n\n\n }\n })\n .catch(error => console.error(error))\n}" }, { "alpha_fraction": 0.8160919547080994, "alphanum_fraction": 0.8160919547080994, "avg_line_length": 42, "blob_id": "bbfa60ff801f0d7a4e4c11095c920789fa09c4a6", "content_id": "4494b00c9a448efe78cdd814e97300c591f96dff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 72, "num_lines": 2, "path": "/README.md", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "# SafeSpace \nautonomous system for hate speech moderation for an inclusive work-space \n" }, { "alpha_fraction": 0.6209134459495544, "alphanum_fraction": 0.6286057829856873, "avg_line_length": 28.707143783569336, "blob_id": "d412bf335adba83a103e6d15b6a84df6a4ec072c", "content_id": "ad548e2d1a4613eb4744a64c88e7863e07767efe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4160, "license_type": "no_license", "max_line_length": 100, "num_lines": 140, "path": "/source/forDeployment/script.py", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "import time\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.optim as optim\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom PIL import Image\nimport seaborn as sns\nimport numpy as np \nimport pandas as pd \nimport json\n\n\n# %%\nimport torch.nn as nn\n\nclass SentimentRNN(nn.Module):\n\n def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):\n super(SentimentRNN, self).__init__()\n\n self.output_size = output_size\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n \n # embedding and LSTM layers\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, \n dropout=drop_prob, batch_first=True)\n \n # dropout layer\n self.dropout = nn.Dropout(0.3)\n \n # linear and sigmoid layers\n self.fc = nn.Linear(hidden_dim, output_size)\n self.sig = nn.Sigmoid()\n \n\n def forward(self, x, hidden):\n \"\"\"\n Perform a forward pass of our model on some input and hidden state.\n \"\"\"\n batch_size = x.size(0)\n # embeddings and lstm_out\n x = x.long()\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n \n # stack up lstm outputs\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n \n # dropout and fully-connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n # sigmoid function\n sig_out = self.sig(out)\n \n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n sig_out = sig_out[:, -1] # get last batch of labels\n \n # return last sigmoid output and hidden state\n return sig_out, hidden\n \n \n def init_hidden(self, batch_size):\n ''' Initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x hidden_dim,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),\n weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())\n \n return hidden\n \n\n\n# %%\ncheckpoint = torch.load('model_devfest_2019.json', map_location=lambda storage, loc: storage)\nvocab_to_int = json.load( open( \"vocab_to_int.json\" ) )\n\n\n# %%\nnet = SentimentRNN(7366, 1, 800, 300, 2)\nnet.load_state_dict(checkpoint)\nnet.eval()\n\n\n# %%\nfrom string import punctuation\n\ndef pad_features(reviews_ints, seq_length):\n features = np.zeros((len(reviews_ints), seq_length), dtype=int)\n\n for i, row in enumerate(reviews_ints):\n features[i, -len(row):] = np.array(row)[:seq_length]\n \n return features\n\ndef tokenize_review(test_review):\n test_review = test_review.lower() # lowercase\n # get rid of punctuation\n test_text = ''.join([c for c in test_review if c not in punctuation])\n\n # splitting by spaces\n test_words = test_text.split()\n\n # tokens\n test_ints = []\n test_ints.append([vocab_to_int[word] for word in test_words])\n return test_ints\n\ndef predict(net, test_review, sequence_length=200):\n net.eval()\n test_ints = tokenize_review(test_review)\n seq_length=sequence_length\n features = pad_features(test_ints, seq_length)\n feature_tensor = torch.from_numpy(features)\n batch_size = feature_tensor.size(0)\n h = net.init_hidden(batch_size)\n output, h = net(feature_tensor, h)\n pred = torch.round(output.squeeze()) \n if(pred.item()==1):\n return {\"no hate detected!\",output.squeeze().item()}\n else:\n return {\"Hate speech detected.\",output.squeeze().item()}\n\ndef getOutput(model,speech,seq_length):\n test_ints = tokenize_review(speech)\n features = pad_features(test_ints, seq_length)\n feature_tensor = torch.from_numpy(features)\n return predict(model,speech,seq_length)\n\n\n# %%\nspeech = \"please kill your self\"\ncls, probToNoHate =getOutput(net,speech,200)\nprint(cls)\nprint(probToNoHate)\n\n" }, { "alpha_fraction": 0.7126168012619019, "alphanum_fraction": 0.7126168012619019, "avg_line_length": 27.600000381469727, "blob_id": "41a42ca1389f18a5a4b2a979a7db6de102d0bcfd", "content_id": "a15195b4b195ebffbecd0d12a58be9a2b68d3cf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 428, "license_type": "no_license", "max_line_length": 68, "num_lines": 15, "path": "/routes/dashboard.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "const express = require(\"express\");\nconst router = express.Router();\nconst provenDb = require('@southbanksoftware/provendb-node-driver');\nconst Report = require('../controllers/reportHelper');\n//require controllers\n\n//require middleware\n\n//handling requests\nrouter.get(\"/\", async(req, res) => {\n //let reports = await Report.getAllReports()\n //console.log(reports);\n res.render('dashboard')\n});\nmodule.exports = router;" }, { "alpha_fraction": 0.5882353186607361, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 16.25, "blob_id": "7df5e4096d4ff88c0c19e72f1e7132093ff33fc5", "content_id": "e021d1a5f0e7fdd2f02ad8fa3806ccb9c985be48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 68, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/config/keys.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "module.exports = {\n mongoURI: '',\n secret: 'we love secrets'\n}" }, { "alpha_fraction": 0.6441351771354675, "alphanum_fraction": 0.6441351771354675, "avg_line_length": 24.200000762939453, "blob_id": "3395018f05c94788de04b7f2a32b3a58d63061ba", "content_id": "a1dd4b6b2bd4c66da7d461978563157ae851131b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 503, "license_type": "no_license", "max_line_length": 54, "num_lines": 20, "path": "/routes/home.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "const express = require(\"express\");\nconst router = express.Router();\n//require controllers\nconst homeController = require(\"../controllers/home\");\n//require middleware\n\n//handling requests\nrouter.get(\"/\", homeController);\nrouter.get('/upload', (req, res) => {\n res.render('upload');\n});\nrouter.get('/test', (req, res) => {\n res.render('test');\n});\nrouter.post('/upload', (req, res) => {\n console.log(\"files\");\n console.log(req.files);\n res.redirect(\"/upload\");\n});\nmodule.exports = router;" }, { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 26.625, "blob_id": "6eca8cc0f606c9a249a1f141897d501ce555d35a", "content_id": "a33f261311e9d41edca3659237b063ef866c7437", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 441, "license_type": "no_license", "max_line_length": 66, "num_lines": 16, "path": "/routes/reports.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "const express = require(\"express\");\nconst router = express.Router();\n//require controllers\nconst reportsController = require(\"../controllers/reports\");\nconst saveReportController = require(\"../controllers/saveReport\");\n//require middleware\n\n//handling requests\nrouter.get('/', (req, res) => {\n res.render('reports')\n});\nrouter.get('/reports', reportsController)\nrouter.post('/saveReport', saveReportController);\n\n\nmodule.exports = router;" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.7260273694992065, "avg_line_length": 35.5, "blob_id": "a244e88f6d3d234362cfbbc1aaff14e76e6c7a95", "content_id": "76f7b076ab022588638e129cdab6eb75e7348cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 146, "license_type": "no_license", "max_line_length": 90, "num_lines": 4, "path": "/config/provenDb.js", "repo_name": "Mou97/safeSpace", "src_encoding": "UTF-8", "text": "module.exports = {\n db_name: process.env.DB_NAME,\n provenDB_URI: `mongodb://salade:[email protected]/devfest2k19?ssl=true`\n}\n" } ]
12
faraoman/MachineLearning
https://github.com/faraoman/MachineLearning
3534e09e2578149292386c27bd6ab61b82a78c04
c1d4ca3989c7584d1523f90752ad640fc0a3de89
c925020c4c8ecd621938f2d2bdb596597f20ae93
refs/heads/master
2021-09-16T04:21:16.697566
2018-06-16T09:44:34
2018-06-16T09:44:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7436375617980957, "alphanum_fraction": 0.7582944631576538, "avg_line_length": 42.13218307495117, "blob_id": "8b8973542c1a277f9aa4d9afd58fec5d22f2e070", "content_id": "268bc7f08e2aa55b50b107ef912f9066b4671096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 7505, "license_type": "no_license", "max_line_length": 308, "num_lines": 174, "path": "/Classification/DecisionTrees/DecisionTrees.Rmd", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "Hi MLEnthusiasts! Today, we will dive deeper into classification and will learn about Decision trees, how to analyse which variable is important among many given variables and how to make prediction for new data observations based on our analysis and model. \nAgain, we will continue working on the titanic dataset. This will serve our two purposes. One is to learn how to implement classification using decision treess in R and other is by doing this, we will be able to come out with the comparison among the different classification algorithms, which one is better?\nSo, like always, the first step is to set our working directory and import the dataset. \n```{r}\nsetwd(\"C:/Users/jyoti/Desktop/MachineLearning/Classification\")\ntitanicData <- read.csv(\"titanic.csv\")\n```\nLet's then find out the summary of this data.\n```{r}\nsummary(titanicData)\n```\nAs you can see, there are 261 missing values in the age column. Let's fix that first. Let's find out the distribution of age variable so that we can understand which value can be used to do missing value imputation.\n```{r}\nhist(titanicData$age)\n```\nThe distribution is more or less normal in nature. Let's then go ahead with replacing all the missing values by the mean of the age variable. This can be done by using the following R code.\n```{r}\ntitanicData$age[is.na(titanicData$age)] = 29.07\nsummary(titanicData)\n```\nNext step is to view how the dataset looks like.\n```{r}\nhead(titanicData)\n```\nLet's do some data manipulation to make the dataset useful for model making.\n```{r}\ntitanicData$female = ifelse(titanicData$sex==\"female\", 1, 0)\ntitanicData$embarked_c = ifelse(titanicData$embarked==\"C\", 1, 0)\ntitanicData$embarked_s = ifelse(titanicData$embarked==\"S\", 1, 0)\ntitanicData$pclass = as.factor(titanicData$pclass)\ntitanicData$survived = as.factor(titanicData$survived)\nhead(titanicData)\n```\nHaving done that, we also realize that the variables name, sex, embarked are no longer useful to us. So we remove them from our dataframe.\n```{r}\ntitanicData <- titanicData[-c(3, 4, 9)]\n```\nLet's not see if the numerical variables like age and fare have expectile quantiles or that also needs manipulation.\n```{r}\nbx = boxplot(titanicData$age)\n```\nAs you can see there are outliers which need to be handled.\n```{r}\nbx$stats\nquantile(titanicData$age, seq(0, 1, 0.02))\n```\n```{r}\ntitanicData$age = ifelse(titanicData$age >= 52, 52, titanicData$age)\ntitanicData$age = ifelse(titanicData$age <= 4, 4, titanicData$age)\nboxplot(titanicData$age)\n```\nPerfect! Let's do the same for fare variable.\n```{r}\nbx = boxplot(titanicData$fare)\n```\n```{r}\nbx$stats\nquantile(titanicData$fare, seq(0, 1, 0.02))\n```\nTo avoid data loss, let's limit the significance level to 96%.\n```{r}\ntitanicData$fare = ifelse(titanicData$fare >= 136, 136, titanicData$fare)\nboxplot(titanicData$fare)\n```\nLet's now start the bivariate analysis of our dataset.\nFirst let's do the boxplot analysis of survived with age and survived with fare.\n```{r}\nboxplot(titanicData$age~titanicData$survived, main=\"Boxplot for age variable\")\n```\nIt looks like people who died were mainly of middle age as the whiskers for 0 start at 10 and end at 48 approximately.\n```{r}\nboxplot(titanicData$fare~titanicData$survived, main=\"Boxplot for fare variable\")\n```\nIt looks like people who died had also soe relation with fare! Those who died had paid lower(though there are outliers too). Those who survived had paid comparatively higher fare.\nFor categorical variables, we will do bivariate analysis using mosaic plot.\n```{r}\nmosaicplot(titanicData$pclass~titanicData$survived, main=\"Boxplot for pclass variable\", color=\"skyblue\")\n```\nThis indeed reveals a useful trend.\n1. People of 1st class had a better survival rate among all the classes.\n2. People of 3sr class had the worst survival rate. \n```{r}\nmosaicplot(titanicData$female~titanicData$survived, main=\"Boxplot for gender vs survival analysis\", color=\"skyblue\")\n```\nMale passengers had worse survival rate than the females. It seems like females were saved first when the mishap happened.\n```{r}\nmosaicplot(titanicData$embarked_c~titanicData$survived, main=\"Boxplot for embarkment as C variable\", color=\"skyblue\")\n```\nIt looks like the survival rate for the embarkment other than port \"C\" is worse than port \"C\".\n```{r}\nmosaicplot(titanicData$embarked_s~titanicData$survived, main=\"Boxplot for embarkment as S variable\", color=\"skyblue\")\n```\nIt looks like the survival rate for port \"S\" was very very good, far better than the other two ports.\nLet's now do the correlation analysis of the above data. As the cor() function takes only numerical data, let's first convert all the categorical columns into numerical and store it into new dataframe.\n```{r}\ntitanicDataNumerical = data.frame(titanicData)\ntitanicDataNumerical$pclass = as.numeric(titanicData$pclass)\ntitanicDataNumerical$survived = as.numeric(titanicData$survived)\ntitanicDataNumerical$sibsp = as.numeric(titanicData$sibsp)\ntitanicDataNumerical$parch = as.numeric(titanicData$parch)\ntitanicDataNumerical$female = as.numeric(titanicData$female)\ntitanicDataNumerical$embarked_c = as.numeric(titanicData$embarked_c)\ntitanicDataNumerical$embarked_s = as.numeric(titanicData$embarked_s)\ntitanicDataNumerical$age = titanicData$age\ntitanicDataNumerical$fare = titanicData$fare\n```\nNow, let's find the correlation among all of them.\n```{r}\nlibrary(corrplot)\ncor(titanicDataNumerical)\n```\n```{r}\ncorrplot(cor(titanicDataNumerical), method=\"circle\")\n```\nSo, we can say that survival is mainly related to age, pclass, fare, female, embarked_c and embarked_S.\nLet's do the splitting of dataset between training and test sets.\n```{r}\nset.seed(1234)\nsplit = sample(1:nrow(titanicData), 0.7*nrow(titanicData))\ntrainSplit = titanicData[split, ]\ntestSplit = titanicData[-split,]\nprint(table(trainSplit$survived))\n```\n```{r}\nprint(table(testSplit$survived))\n```\nNow let's check for event rate.\n```{r}\nprop.table(table(trainSplit$survived))\n```\n```{r}\nprop.table(table(testSplit$survived))\n```\nSo, the probabilities are approx same in both train and test datasets.\nWe can now start building our decision tree using rpart algorithm.\n```{r}\nlibrary(rpart)\nlibrary(rpart.plot)\nfit = rpart(survived~., data=trainSplit, method=\"class\", control=rpart.control(minsplit=10, cp=0.008))\nrpart.plot(fit)\n```\nThus, total 13 nodes get created in this case. Each node shows:\n1. the predicted class(0 for not survived, 1 for survived)\n2. predicted probability of survival\n3. The percentage of observations in each of the node.\n```{r}\nsummary(fit)\n```\nCP is the complexity parameter. It prevents overfitting and controls the size of the tree. To get added into a node, a variable has to be having CP less than 0.008 or else tree building will not continue.\n```{r}\nprint(fit)\n```\n```{r}\nprp(fit)\n```\nThere are 6 leaf nodes representing class 0 and 7 leaf nodes representing class 1. Now, let's plot CP values.\n```{r}\nplotcp(fit)\n```\n```{r}\nprintcp(fit)\n```\nNow, let's do the predictions. \n```{r}\npredictTrain = predict(fit, trainSplit, type=\"class\")\ntable(predictTrain, trainSplit$survived)\n```\nThus, Accuracy for training dataset = 730/879 = 83.05%.\n```{r}\npredictTest = predict(fit, testSplit, type = \"class\")\ntable(predictTest, testSplit$survived)\n```\nThus, Accuracy for test dataset = 308/378 = 81.48%.\nAs compared to the logistic regression, which gives the accuracy of 80% on the training dataset and 78.83% on test dataset, decision tree gives the accuracy of 83.05% on the training dataset and 81.5% on the test dataset.\n" }, { "alpha_fraction": 0.8161435127258301, "alphanum_fraction": 0.8251121044158936, "avg_line_length": 73, "blob_id": "15d229b82c670f5e5ca7bc6c385997bed85c74f7", "content_id": "55f9e1b2bc01b79dd0c51a409992577a5d7a2512", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 223, "license_type": "no_license", "max_line_length": 120, "num_lines": 3, "path": "/readme.md", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "1. A repository containing implementation of all the machine learning algorithms.\n2. A repository containing the code of data science and machine learning models which are made for predictive analytics \nof large datasets.\n\n" }, { "alpha_fraction": 0.47434553503990173, "alphanum_fraction": 0.5382198691368103, "avg_line_length": 19.340425491333008, "blob_id": "9f6f767783c229372483c168eb7c71766e372fb6", "content_id": "652a367948726c4fb7a3b933e5b767c024004141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 955, "license_type": "no_license", "max_line_length": 87, "num_lines": 47, "path": "/LogisticRegression/LogisticRegressionWithGradientDescent.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 27 15:06:16 2018\n\n@author: jyoti\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 100\nD = 2\n\nX = np.random.randn(N, D)\nX[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2\nX[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2\n\nT = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1\n\nones = np.array([[1]*N]).T\nXb = np.concatenate((ones, X), axis = 1)\nw = np.random.randn(D + 1)\n\n\ndef sigmoid(a):\n return 1/(1 + np.exp(-a))\n\nY = sigmoid(Xb.dot(w))\n\ndef crossEntropyErrorFunction(T, Y):\n E = 0\n for i in range(N):\n if T[i] == 1:\n E -= np.log(Y[i])\n else:\n E -= np.log(1 - Y[i])\n return E\n\nlearning_rate = 0.1\nfor i in range(100):\n if i % 10 == 0:\n print(crossEntropyErrorFunction(T, Y))\n \n w += learning_rate*Xb.T.dot(T - Y)\n Y = sigmoid(Xb.dot(w))\n \nprint(\"Final weight, w: \", w)" }, { "alpha_fraction": 0.45885634422302246, "alphanum_fraction": 0.482566237449646, "avg_line_length": 21.645160675048828, "blob_id": "862545c116d08d4b42059973caa6801c487be787", "content_id": "cc2b147da242fdfca355fbfb33b3d80f00376c9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 76, "num_lines": 31, "path": "/Projects/FacialExpressionRecognition/show_images.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 9 13:01:51 2018\n\n@author: jyoti\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom util import getData\n\nlabels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\n\ndef main():\n X, Y = getData(balance_ones = False)\n \n while(True):\n for i in range(7):\n x, y = X[Y == i], Y[Y == i]\n N = len(y)\n j = np.random.choice(N)\n plt.imshow(x[j].reshape(48, 48), cmap = 'gray')\n plt.title(labels[y[j]])\n plt.show()\n prompt = input(\"Quit the program? Y/N\\n\")\n if prompt == 'Y':\n break\n \nif __name__ == '__main__':\n main()\n \n\n " }, { "alpha_fraction": 0.728300154209137, "alphanum_fraction": 0.7463833689689636, "avg_line_length": 41.739131927490234, "blob_id": "66ee53492cde8c5649bfde10ca032ee5242ea933", "content_id": "c525e1ba3a6d46afd0997b4408e981222b1bcd83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "RMarkdown", "length_bytes": 8848, "license_type": "no_license", "max_line_length": 495, "num_lines": 207, "path": "/LogisticRegression/TitanicDataAnalysis.Rmd", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "---\ntitle: \"R Notebook\"\noutput:\n html_document:\n df_print: paged\n---\n\nHi MLEnthusiasts! Today, we will learn how to implement logistic regression using R that too on a well-known dataset, The Titanic Dataset! So, our analysis becomes by getting some information about the dataset, like what all variables are in our dataset and what do we have to predict. \n\nThe dataset can be found on this link of kaggle. Following are the variables of this dataset:\nsurvival: Tells whether a particular passenger survived or not. 0 for not survived, 1 for survived.\npClass: Ticket class, 1 for 1st class, 2 for 2nd class and 3 for 3rd class.\nsex: Tells us the gender of the passenger\nAge: in years\nsibsp: # of siblings or spouses aboarding the titanic\nparch: # of parents/children of the passenger\nfare: passenger fare\nembarked: The port of embarkment; C for Cherbourg, Q for Queenstown and S for Southampton\n\nHaving seen what the data is all about, let's also understand the problem statement. The problem is to make a logistic regression model using all the variables mentioned above with dependent variable being Survived and other variables being independent. Thus, it will be a predictive model which predicts whether a passenger having given parameters will survive or not. By looking closely at the problem, we can say that it's a binary classification problem(0/1) or logistic regression problem. \n\nLet us first set our working directory and import our dataset. \n\n```{r}\ndata <- read.csv(\"C:/Users/jyoti/Downloads/LogisticRegression/train.csv\")\n```\n\nHere, data is a dataframe having all the variables and data of those variables. The dataframe has 891 observations of 12 variables. \nThe next step is to view the data inside the dataframe.\n```{r}\nView(data)\n```\nNow starts the first main step, \"Data Preparation\". To see if there is any missing data or to know about the mean or standard deviation, we use the summary() function.\n```{r}\nsummary(data)\n```\nAs can be seen, there are 177 missing values in the Age variable. We need to do missing value imputation in this case. But, before doing that, we need to check how the age distribution looks like so that we can know which imputation method to choose and apply. \n```{r}\nhist(data$Age)\n```\nSince the distribution looks somewhat normal, we can use mean value imputation in this case. That is, we can replace the missing values with the mean of the age. This doesn't deviate the mean and the distribution of the age remains the same. \n```{r}\ndata$Age[is.na(data$Age)] = 29.07\nsummary(data)\n```\nAs can be seen above, age doesn't have any missing value now.\nLet's see how the data looks like now.\n```{r}\nhead(data)\n```\nNow, let us understand the concept of dummy variables. Suppose a variable \"A\" has n classes. This variable A can be replaced by n-1 variables. If A has i, j, k, ..., classes, then A_i = 1 in the rows at which i appears in A's column and 0 for the rest of the rows. Same applies for j, k.. etc. The last value gets taken care of by the intercept.\nSo, let's introduce dummy variables inside our data for sex and embarked columns since they are holding the categorical data.\n```{r}\ndata$female = ifelse(data$Sex==\"female\", 1, 0)\ndata$embarked_c = ifelse(data$Embarked==\"C\", 1, 0)\ndata$embarked_s = ifelse(data$Embarked==\"S\", 1, 0)\nhead(data)\n```\nNow, if you have a look at dataframe, it contains 15 variables and not 12.\nThe next step is to remove those variables which we no longer need in the model making, Name, Sex since it is already taken into account by female variable, Ticket, Cabin, embarked, i.e. column number 4, 5, 9, 11 and 12.\n```{r}\nPassengerData = data[-c(4, 5, 9, 11, 12)]\nhead(PassengerData)\n```\nLet's do univariate analysis of the numerical variables, age and fare now. \n```{r}\nbx = boxplot(PassengerData$Age)\n```\nThus, there are outliers in the age variable and we need to do outlier handling in this case.\n```{r}\nbx$stats\nquantile(PassengerData$Age, seq(0, 1, 0.02))\n```\nWe can replace the outliers above 96% of the quantile range and below 4% of the quantile range so that more accuracy is obtained and the data loss is also not very significant.\n```{r}\nPassengerData$Age = ifelse(PassengerData$Age>=56, 56, PassengerData$Age)\nPassengerData$Age = ifelse(PassengerData$Age<=3, 3, PassengerData$Age)\nboxplot(PassengerData$Age)\n```\nThe boxplot comes out to be neat in this case after outlier handling. Let us now do analysis for fare variable.\n```{r}\nbx = boxplot(PassengerData$Fare)\n```\n```{r}\nbx$stats\n```\nThus, there is a very large amount of outlier data on the upper end.\n```{r}\nquantile(PassengerData$Fare, seq(0, 1, 0.02))\n```\nAs can be seen above, the major difference between the values arises above 96% of the quantile. \n```{r}\nPassengerData$Fare = ifelse(PassengerData$Fare>=133.99, 133.99, PassengerData$Fare)\nboxplot(PassengerData$Fare)\n```\nLet us now start our bivariate analysis. \n```{r}\nlibrary(car)\nscatterplot(PassengerData$Age, PassengerData$Survived)\n```\nIt is to be noted that children and old passengers were saved first during the titanic mishap.\n```{r}\nscatterplot(PassengerData$Fare, PassengerData$Survived)\n```\nLet's now make a model and check for multi-collinearity using variance inflation factor technique.\n```{r}\nlibrary(car)\nmodel <- lm(Survived~Pclass+Age+SibSp+Parch+Fare+female+embarked_c+embarked_s, , data=PassengerData)\nt = vif(model)\nsort(t, decreasing=TRUE)\n```\nAs you can see, all the values of VIF for all the variables are less than 5, we need not reject any varible and we can straight away start our analysis.\n```{r}\nmodel1<- glm(as.factor(Survived)~Pclass+Age+SibSp+Parch+Fare+female+embarked_c+embarked_s, family=\"binomial\", data=PassengerData)\nsummary(model1)\n```\nAs you can see, for some variables like Parch, Fare, embarked_c and embarked_s, the P value is greater than 0.05. Thus, here we cannot reject null hypothesis that there is no relation between survived and them. Thus, we need to accept the null hypothesis and discard these three variables from our analysis.\nWell, step function does it all for us.\n```{r}\nstepmodel = step(model1, direction=\"both\")\nformula(stepmodel)\nsummary(stepmodel)\n```\nThus, now the main formula becomes as.factor(Survived) ~ Pclass + Age + SibSp + female + embarked_s.\nNow, we can use stepmodel to predict the score for PassengerData dataset.\n```{r}\nPassengerData$score <- predict(stepmodel, newdata = PassengerData, type=\"response\")\nhead(PassengerData$score)\ntail(PassengerData$score)\n```\nThese are the probabilities values of whether the corresponding passenger survived or not.\nLet's now start the model evaluation.\n```{r}\nlibrary(lattice)\nlibrary(ggplot2)\nlibrary(caret)\nlibrary(e1071)\nPassengerData$prediction <- ifelse(PassengerData$score>=0.5, 1, 0)\ntable(factor(PassengerData$prediction), factor(PassengerData$Survived))\n```\nThus, accuracy = (TP+TN)/(TP+TN+FP+FN)=(472+244)/(472+98+77+244)=716/891=0.8036=80.36%. \nNow, let's check the ROC and AUC curves of the model.\n```{r}\nlibrary(InformationValue)\nplotROC(actuals=PassengerData$Survived, predictedScores=as.numeric(fitted(stepmodel)))\n```\n```{r}\nks_plot(actuals=PassengerData$Survived, predictedScores=as.numeric(fitted(stepmodel)))\n```\nThus, the model has AUCRC value equal to 0.8562 which implies that the model quality is very good.\nNow predict the scores on the test data.\n```{r}\ntest <- read.csv(\"C:/Users/jyoti/Downloads/LogisticRegression/test.csv\")\nsummary(test)\n```\nSince there are missing values in the test dataset also, we will follow the same series of steps as we had done for train data.\n```{r}\nhist(test$Age)\n```\nLet's replace missing values with mean as the distribution is more or less normal in nature.\n```{r}\ntest$Age[is.na(test$Age)]=30.27\nsummary(test)\n```\nThere is one missing value in fare too. \n```{r}\nhist(test$Fare)\n```\nSince the variable is skewed, let's replace the missing value with mean.\n```{r}\ntest$Fare[is.na(test$Fare)] = 14.454\nsummary(test)\n```\nLet's now do feature engineering.\n```{r}\ntest$female = ifelse(test$Sex==\"female\", 1, 0)\ntest$embarked_c = ifelse(test$Embarked==\"C\", 1, 0)\ntest$embarked_s = ifelse(test$Embarked==\"S\", 1, 0)\nhead(test)\n```\nLet's remove name, sex etc variables as we did in training set.\n```{r}\nnewtest = data.frame(test)\nnewtest = newtest[-c(1, 3, 4, 8, 10, 11)]\nhead(newtest)\n```\nLet's now do the predictions. \n```{r}\nnewtest$score<-predict(stepmodel, newtest, type = \"response\")\nhead(newtest$score)\n```\n\n```{r}\nnewtest$predicted<-ifelse(newtest$score>=0.5, 1, 0)\nhead(newtest$predicted)\n```\nLet's now make the submission dataframe.\n```{r}\nmysubmission<- data.frame(matrix(nrow=nrow(test)))\nmysubmission$PassengerId <- test$PassengerId\nmysubmission$Survived <- newtest$predicted\nmysubmission <- mysubmission[-c(1)]\n```\nLet's now convert the mysubmission dataframe into a .csv file.\n```{r}\nwrite.csv(mysubmission, 'mysubmission.csv', row.names = FALSE)\n```\n\n" }, { "alpha_fraction": 0.5112847089767456, "alphanum_fraction": 0.5677083134651184, "avg_line_length": 20.314815521240234, "blob_id": "ee4f535d36d09fb39b7b3b5a85248f1a7772e79d", "content_id": "74e03b24795bb75c1c99b8fcfa89580b5e0fa050", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 87, "num_lines": 54, "path": "/LogisticRegression/L2regularisation.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 28 10:59:55 2018\n\n@author: j.dixit\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 100\nD = 2\n\nX = np.random.randn(N, D)\nX[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2\nX[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2\n\nT = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1\n\nones = np.array([[1]*N]).T\nXb = np.concatenate((ones, X), axis = 1)\n\nw = np.random.randn(D + 1)\nZ = Xb.dot(w)\n\ndef sigmoid(a):\n return 1/(1 + np.exp(-a))\n#def forward(X, w, b):\n# return sigmoid(X.dot(w) + b)\nY = sigmoid(Z)\n\ndef crossEntropyErrorFunction(T, Y):\n E = 0\n for i in range(N):\n if T[i] == 1:\n E -= np.log(Y[i])\n else:\n E -= np.log(1 - Y[i])\n return E\n\ncrossEntropyError = crossEntropyErrorFunction(T, Y)\nprint(\"With random/normally distributed weights: \",crossEntropyError)\n\nlearning_rate = 0.1\nL2 = 0.1\n\nfor i in range(100):\n if i % 10 == 0:\n print(crossEntropyErrorFunction(T, Y))\n \n w += learning_rate*(np.dot((T-Y).T, Xb) - L2*w)\n Y = sigmoid(Xb.dot(w))\n\nprint(\"Final w: \", w)\n\n" }, { "alpha_fraction": 0.4447488486766815, "alphanum_fraction": 0.49589040875434875, "avg_line_length": 16.109375, "blob_id": "b0a2a8d707f54547dc23b87970872542f273f1d6", "content_id": "0b7d67e95ddc3e1ef7fc6f46d6f3f4d7c7639f69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 67, "num_lines": 64, "path": "/LogisticRegression/XOR.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 28 16:22:16 2018\n\n@author: j.dixit\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 4\nD = 2\n\nX = np.array([\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1]\n ])\n\nT = np.array([0, 1, 1, 0])\n\nones = np.array([[1]*N]).T\n#plt.scatter(X[:, 0], X[:, 1], c=T)\n#plt.show()\n\nxy = np.matrix(X[:, 0]*X[:, 1]).T\nXb = np.array(np.concatenate((ones, xy, X), axis = 1))\n\nw = np.random.rand(D + 2)\n\nz = Xb.dot(w)\n\ndef sigmoid(z):\n return 1/(1 + np.exp(-z))\n\nY = sigmoid(z)\n\ndef cross_entropy(T, Y):\n E = 0\n for i in range(N):\n if T[i] == 1:\n E -= np.log(Y[i])\n else:\n E -= np.log(1-np.log(Y[i]))\n return E\n\nlearning_rate = 0.0001\nerror = []\n\nfor i in range(5000):\n e = cross_entropy(T, Y)\n error.append(e)\n if i % 100 == 0:\n print(e)\n \n w += learning_rate*(np.dot((T-Y).T, Xb) - 0.01*w)\n \n Y = sigmoid(Xb.dot(w))\n \nplt.plot(error)\nplt.title(\"Cross-entropy\")\nprint(\"Final w: \", w)\nprint(\"Final classification rate\", 1-np.abs(T-np.round(Y)).sum()/N)\n" }, { "alpha_fraction": 0.5717868208885193, "alphanum_fraction": 0.6087774038314819, "avg_line_length": 25.600000381469727, "blob_id": "b2dd7cf103e325484ff6d6c2ec9ddf44a4690535", "content_id": "08d5abcb68284f135c6a8379a983617cbf246ff6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1595, "license_type": "no_license", "max_line_length": 109, "num_lines": 60, "path": "/LinearRegression/L1reg.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 22:07:08 2018\n\n@author: jyoti\n\"\"\"\n\nimport numpy as np #importing the numpy package with alias np\nimport matplotlib.pyplot as plt #importing the matplotlib.pyplot as plt\n\nN = 50 \nD = 50\n\nX = (np.random.random((N, D))-0.5)*10 \nw_dash = np.array([1, 0.5, -0.5] + [0]*(D-3))\nY = X.dot(w_dash) + np.random.randn(N)*0.5\n\nY[-1]+=30 #setting last element of Y as Y + 30\nY[-2]+=30 #setting second last element of Y as Y + 30\n\nplt.scatter(X, Y)\nplt.title('Relationship between Y and X[:, 1]')\nplt.xlabel('X[:, 1]')\nplt.ylabel('Y')\nplt.show()\n\nX = np.vstack([np.ones(N), X]).T #appending bias data points colummn to X\n\nw_ml = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Y)) #finding weights for maximum likelihood estimation\nY_ml = np.dot(X, w_ml) \n\nplt.scatter(X[:,1], Y)\nplt.plot(X[:,1],Y_ml, color='red')\nplt.title('Graph of maximum likelihood method(Red line: predictions)')\nplt.xlabel('X[:, 1]')\nplt.ylabel('Y')\nplt.show()\n\ncosts = []\nw = np.random.randn(D)/np.sqrt(D)\nL1_coeff = 5 \nlearning_rate = 0.001\nfor t in range(500):\n Yhat = X.dot(w)\n delta = Yhat - Y\n w = w - learning_rate*(X.T.dot(delta) + L1_coeff*np.sign(w))\n meanSquareError = delta.dot(delta)/N\n costs.append(meanSquareError)\n \nw_map = w\nY_map = X.dot(w_map)\n \nplt.scatter(X[:,1], Y)\nplt.plot(X[:,1],Y_ml, color='red',label=\"maximum likelihood\")\nplt.plot(X[:,1],Y_map, color='green', label=\"map\")\nplt.title('Graph of MAP v/s ML method')\nplt.legend()\nplt.xlabel('X[:, 1]')\nplt.ylabel('Y')\nplt.show()" }, { "alpha_fraction": 0.4584980309009552, "alphanum_fraction": 0.5043478012084961, "avg_line_length": 19.721311569213867, "blob_id": "089bcb4fdd499336e72efb1894223bc2235861b7", "content_id": "733906eff73213ce7c61b88ed09b0894dbd06c30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1265, "license_type": "no_license", "max_line_length": 53, "num_lines": 61, "path": "/LogisticRegression/predict_logistic.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 26 19:13:44 2018\n\n@author: jyoti\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\n\n\ndef get_data():\n df = pd.read_csv(\"ecommerce_data.csv\")\n data = df.as_matrix()\n X = data[:, :-1]\n Y = data[:, -1]\n X = np.array(X)\n Y = np.array(Y)\n X[:, 1] = (X[:, 1]-X[:, 1].mean())/X[:, 1].std()\n X[:, 2] = (X[:, 2]-X[:, 2].mean())/X[:, 2].std()\n N, D = X.shape\n \n X2 = np.zeros((N, D+3))\n X2[:, 0: D-2] = X[:, 0: D-2]\n \n for n in range(N):\n t = int(X[n, D-1])\n X2[n, t+(D-1)] = 1\n \n Z = np.zeros((N, 4))\n Z[np.arange(N), X[:, D-1].astype(np.int32)] = 1\n #X2[:, -4:] = Z\n assert(np.abs(X2[:, -4:]- Z).sum() < 10e-10)\n return X2, Y\n\ndef get_binary_data():\n X, Y = get_data()\n X2 = X[Y <= 1]\n Y2 = Y[Y <= 1]\n return X2, Y2\n\nX, Y = get_binary_data()\nD = X.shape[1]\nW = np.random.randn(D)\nb = 0\n\ndef sigmoid(a):\n return 1/(1 + np.exp(-a))\n\ndef forward(x, w, b):\n return sigmoid(x.dot(w) + b)\n\nP_Y_Given_X = forward(X, W, b)\npredictions = np.round(P_Y_Given_X)\n\ndef classification_rate(Y, P):\n return np.mean(Y == P)\n\nprint(\"Score: \", classification_rate(Y, predictions))\n\n" }, { "alpha_fraction": 0.5530129671096802, "alphanum_fraction": 0.604881763458252, "avg_line_length": 21.620689392089844, "blob_id": "a0e6e9c848e1b04eb77574c622375ffaf846f276", "content_id": "0b2af57b2100799672b420f5e5201812d05a4cf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1311, "license_type": "no_license", "max_line_length": 87, "num_lines": 58, "path": "/LogisticRegression/CrossEntropyErrorFunction.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 27 13:33:29 2018\n\n@author: jyoti\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 100\nD = 2\n\nX = np.random.randn(N, D)\nX[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2\nX[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2\n\nT = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1\n\nones = np.array([[1]*N]).T\nXb = np.concatenate((ones, X), axis = 1)\n\nw = np.random.randn(D + 1)\nZ = Xb.dot(w)\n\ndef sigmoid(a):\n return 1/(1 + np.exp(-a))\n\n#def forward(X, w, b):\n# return sigmoid(X.dot(w) + b)\nY = sigmoid(Z)\n\ndef crossEntropyErrorFunction(T, Y):\n E = 0\n for i in range(N):\n if T[i] == 1:\n E -= np.log(Y[i])\n else:\n E -= np.log(1 - Y[i])\n return E\n\ncrossEntropyError = crossEntropyErrorFunction(T, Y)\nprint(\"With random/normally distributed weights: \",crossEntropyError)\n\nw = np.array([0, 4, 4])\nZ = Xb.dot(w)\n\nY = sigmoid(Z)\n\ncrossEntropyError = crossEntropyErrorFunction(T, Y)\nprint(\"With calculated weights/closed form solution: \",crossEntropyError)\n\nplt.scatter(X[:, 0], X[:, 1], c = T, s = 100, alpha = 0.5)\nplt.title(\"Two Gaussian clouds and the discriminating line\")\nx_axis = np.linspace(-6, 6, 100)\ny_axis = -x_axis\nplt.plot(x_axis, y_axis)\nplt.show()" }, { "alpha_fraction": 0.5142982602119446, "alphanum_fraction": 0.5600528120994568, "avg_line_length": 21.514850616455078, "blob_id": "a0e1eec032e9726d4cb6c0cf33ad7704c71fdcbc", "content_id": "0f461eaeefbbc9411c68dc5f3dcafa7ca9893580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2273, "license_type": "no_license", "max_line_length": 95, "num_lines": 101, "path": "/LogisticRegression/EcommerceProject.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 27 15:21:54 2018\n\n@author: jyoti\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 26 19:13:44 2018\n\n@author: jyoti\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\n\n\ndef get_data():\n df = pd.read_csv(\"ecommerce_data.csv\")\n data = df.as_matrix()\n X = data[:, :-1]\n Y = data[:, -1]\n X = np.array(X)\n Y = np.array(Y)\n X[:, 1] = (X[:, 1]-X[:, 1].mean())/X[:, 1].std()\n X[:, 2] = (X[:, 2]-X[:, 2].mean())/X[:, 2].std()\n N, D = X.shape\n \n X2 = np.zeros((N, D+3))\n X2[:, 0: D-2] = X[:, 0: D-2]\n \n for n in range(N):\n t = int(X[n, D-1])\n X2[n, t+(D-1)] = 1\n \n Z = np.zeros((N, 4))\n Z[np.arange(N), X[:, D-1].astype(np.int32)] = 1\n #X2[:, -4:] = Z\n assert(np.abs(X2[:, -4:]- Z).sum() < 10e-10)\n return X2, Y\n\ndef get_binary_data():\n X, Y = get_data()\n X2 = X[Y <= 1]\n Y2 = Y[Y <= 1]\n return X2, Y2\n\nX, Y = get_binary_data()\nX, Y = shuffle(X, Y)\nX_train = X[:-100]\nY_train = Y[:-100]\nX_test = X[-100:]\nY_test = Y[-100:]\n\nD = X.shape[1]\nN = X.shape[0]\nw = np.random.randn(D)\nb = 0\n\ndef sigmoid(a):\n return 1/(1 + np.exp(-a))\n\n\ndef forward(x, w, b):\n return sigmoid(x.dot(w) + b)\n\ndef classification_rate(Y, P):\n return np.mean(Y == P)\n\ndef crossEntropyErrorFunction(T, Y):\n return -np.mean(T*np.log(Y) + (1 - T)*np.log(1 - Y))\n\ntrain_costs = []\ntest_costs = []\nlearning_rate = 0.001\n\nfor i in range(10000):\n pY_train = forward(X_train, w, b)\n pY_test = forward(X_test, w, b)\n \n ctrain = crossEntropyErrorFunction(Y_train, pY_train)\n ctest = crossEntropyErrorFunction(Y_test, pY_test)\n train_costs.append(ctrain)\n test_costs.append(ctest)\n \n w -= learning_rate*X_train.T.dot(pY_train - Y_train)\n b -= learning_rate*(pY_train - Y_train).sum()\n if i % 1000 == 0:\n print(i, ctrain, ctest)\n \nprint(\"Final training classification rate: \", classification_rate(Y_train, np.round(pY_train)))\nprint(\"Final test classification rate: \", classification_rate(Y_test, np.round(pY_test)))\n\nlegend1, = plt.plot(train_costs, label=\"train cost\")\nlegend2, = plt.plot(test_costs, label=\"test cost\")\n\nplt.legend([legend1, legend2])\nplt.show()" }, { "alpha_fraction": 0.488178014755249, "alphanum_fraction": 0.5055632591247559, "avg_line_length": 21.492063522338867, "blob_id": "a32f95339e14fd8bb05d8f7de8fc9565cef575cd", "content_id": "5dd44bc729a5900b247dada237d6942c7b07c09c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1438, "license_type": "no_license", "max_line_length": 85, "num_lines": 63, "path": "/LinearRegression/TemplateCode.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 10 17:55:24 2018\n\n@author: jyoti\n\"\"\"\nfrom __future__ import division, print_function\nfrom builtins import range\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass LinearRegression(object):\n def __init__(self):\n pass\n \n def fit(self, X, Y, eta=10, epochs=2000):\n N, D = X.shape\n self.w = np.random.randn(D)\n #self.b = 0\n \n \n for i in range(epochs):\n Yhat = self.predict(X)\n delta = Yhat - Y #the error between predicted output and actual output\n self.w = self.w - eta*(X.T.dot(delta)) #performing gradient descent for w\n \n print(\"Final weights are \", self.w)\n #print(\"Final bias point is \", self.b)\n print(\"Final cost is \", self.costs)\n \n \n \n def predict(self, X):\n Y_cap = X.dot(self.w)\n return Y_cap\n \n def costs(self, X, Y):\n Yhat = self.predict(X)\n cost = (Yhat-Y).dot(Yhat-Y)\n return cost\n \ndef main():\n X = []\n Y = []\n \n for line in open(\"data_2d.csv\"):\n x1, x2, y = line.split(\",\")\n X.append([float(x1), float(x2)])\n Y.append(float(y))\n X = np.array(X)\n Y = np.array(Y)\n \n \n model = LinearRegression()\n model.fit(X, Y)\n #prediction = model.predict()\n \n \n \n \nif __name__ == '__main__':\n main()\n \n \n " }, { "alpha_fraction": 0.6818735003471375, "alphanum_fraction": 0.7116788029670715, "avg_line_length": 37.25581359863281, "blob_id": "10100a9254eed895bc8ca08268fa24927be54a93", "content_id": "222432c723dd81f7be1f0efea17e86f50746c668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1644, "license_type": "no_license", "max_line_length": 149, "num_lines": 43, "path": "/LinearRegression/L1regularisation.py", "repo_name": "faraoman/MachineLearning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 21:54:38 2018\n\n@author: jyoti\n\"\"\"\nfrom __future__ import print_function, division\nfrom builtins import range\n\nimport numpy as np # importing numpy with alias np\nimport matplotlib.pyplot as plt # importing matplotlib.pyplot with alias plt\n\nNo_of_observations = 50 \nNo_of_Dimensions = 50\n\nX_input = (np.random.random((No_of_observations, No_of_Dimensions))-0.5)*10 #Generating 50x50 matrix forX with random values centered round 0.5 \nw_dash = np.array([1, 0.5, -0.5] + [0]*(No_of_Dimensions-3)) # Making first 3 features significant by setting w for them as non-zero and others zero\nY_output = X_input.dot(w_dash) + np.random.randn(No_of_observations)*0.5 #Setting Y = X.w + some random noise\n\ncosts = [] #Setting empty list for costs\nw = np.random.randn(No_of_Dimensions)/np.sqrt(No_of_Dimensions) #Setting w to random values\nL1_coeff = 5 \nlearning_rate = 0.001\n\nfor t in range(500):\n Yhat = X_input.dot(w)\n delta = Yhat - Y_output #the error between predicted output and actual output\n w = w - learning_rate*(X_input.T.dot(delta) + L1_coeff*np.sign(w)) #performing gradient descent for w\n meanSquareError = delta.dot(delta)/No_of_observations #Finding mean square error\n costs.append(meanSquareError) #Appending mse for each iteration in costs list\n \nplt.plot(costs)\nplt.title(\"Plot of costs of L1 Regularization\")\nplt.ylabel(\"Costs\")\nplt.show()\n\nprint(\"final w:\", w) #The final w output. As you can see, first 3 w's are significant , the rest are very small\n\n# plot our w vs true w\nplt.plot(w_dash, label='true w')\nplt.plot(w, label='w_map')\nplt.legend()\nplt.show()" } ]
13
jananijaan12000/CIP_Batch21
https://github.com/jananijaan12000/CIP_Batch21
a09dedc013e407ae84c31053a708a77014496174
77c0d8cdfc990797da6e05da76097c38e08d1087
e64e534efc5c7d2ef5fe3c7781a46104119138e4
refs/heads/main
2023-05-27T12:36:09.170084
2021-06-13T14:41:41
2021-06-13T14:41:41
376,560,166
0
3
null
null
null
null
null
[ { "alpha_fraction": 0.5869565010070801, "alphanum_fraction": 0.5986621975898743, "avg_line_length": 20.80769157409668, "blob_id": "f84455666052eea46eb46693b58427e1c4e4c345", "content_id": "70bd3435e9fdadad6ec8a75c30353051ccfecec3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 48, "num_lines": 26, "path": "/Chat_App/chat/jjj.py", "repo_name": "jananijaan12000/CIP_Batch21", "src_encoding": "UTF-8", "text": "def output_lable(n):\r\n if n == 1:\r\n return \"Offensive \"\r\n elif n ==0:\r\n return \"Not Offensive \"\r\n\r\ndef manual_testing(news):\r\n testing_news = {\"text\":[news]}\r\n new_def_test = pd.DataFrame(testing_news)\r\n new_def_test[\"text\"] = new_def_test[\"text\"]\r\n new_x_test = new_def_test[\"text\"]\r\n new_xv_test = tfidf_vect.transform(new_x_test)\r\n pred_sgdc = model.predict(new_xv_test)\r\n return pred_sgdc\r\n\r\nwords=news.split()\r\nwords2 =[]\r\nfor x in words:\r\n res=manual_testing(x)\r\n if res == 1:\r\n words2.append('****')\r\n else:\r\n words2.append(x)\r\n\r\ns=' '.join(words2)\r\nreturn s " } ]
1
ddward/ansible
https://github.com/ddward/ansible
45ca3bd99fd15fcd4001f8473a9b73a3e255dd6a
739c2fc3de03b8cfaaf728ffa4a2de6f3ea7595e
10148835f910e162651a436f1b00099550da9763
refs/heads/master
2021-06-27T23:09:58.966196
2021-01-23T04:10:09
2021-01-23T04:10:09
212,227,912
0
1
null
2019-10-02T00:51:25
2021-01-23T04:10:13
2021-01-27T03:46:18
CSS
[ { "alpha_fraction": 0.6520495414733887, "alphanum_fraction": 0.6549094319343567, "avg_line_length": 32.85483932495117, "blob_id": "2005d2413065e5accadd640a30dd1a8b6d79c4a3", "content_id": "42d398b7a47a2d016daf118129e191b730224ff4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2098, "license_type": "no_license", "max_line_length": 87, "num_lines": 62, "path": "/user.py", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "from db import insert, exists, select_one, update\nfrom werkzeug.security import check_password_hash, generate_password_hash\nimport logging\nimport traceback\n\ndef create_user(username,password):\n try:\n formattedUsername = format_username(username)\n hashedPassword = generate_password_hash(password)\n insert( 'user', ('username', 'password'), (formattedUsername, hashedPassword))\n except Exception as e:\n logging.error(traceback.format_exc())\n\ndef user_exists(username):\n try:\n formattedUsername = format_username(username)\n return exists('user','username',formattedUsername)\n except Exception as e:\n logging.error(traceback.format_exc())\n print(\"User existence check failed\")\n\ndef get_user(username):\n try:\n formattedUsername = format_username(username)\n return select_one('user',('username','password'), 'username',formattedUsername)\n except Exception as e:\n logging.error(traceback.format_exc())\n print(\"Failed to get user\")\n\n\ndef update_user(username,password,new_password):\n try:\n formattedUsername = format_username(username)\n user = get_user(formattedUsername)\n user_password = user[1]\n if(user is not None):\n if(check_password_hash(user_password,password)):\n newHashedPassword = generate_password_hash(new_password)\n update('user',{'password':newHashedPassword},'username',formattedUsername)\n except:\n logging.error(traceback.format_exc())\n\n\ndef gen_default_user():\n\n while(True):\n password = getpass(prompt='Create a password, at least 8 characters: ')\n password2 = getpass(prompt='Confirm password: ')\n if password == password2:\n if len(password) < 8:\n print('Password must be at least 8 characters.')\n else:\n break\n else:\n print('Passwords do not match')\n try:\n create_user('default',password)\n except:\n logging.error(traceback.format_exc())\n\ndef format_username(username):\n return username.lower()" }, { "alpha_fraction": 0.5448916554450989, "alphanum_fraction": 0.5603715181350708, "avg_line_length": 28.454545974731445, "blob_id": "12b84141b0b523fedb7c7046aaf8081e2f61db16", "content_id": "9fcb132114033b94ad40ca45daa15162e021f17b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/sanitize_path.py", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "import re\n\ndef sanitize(path):\n # escape nasty double-dots\n path = re.sub(r'\\.\\.', '', path)\n # then remove any duplicate slashes\n path = re.sub(r'(/)\\1+', r'\\1', path)\n # then remove any leading slashes and dots\n while(path and (path[0] == '/' or path[0] == '.')):\n path = path[1:]\n return path" }, { "alpha_fraction": 0.5497498512268066, "alphanum_fraction": 0.5603112578392029, "avg_line_length": 30.017240524291992, "blob_id": "1efbfd1466f8da661a0599cba29ad0ca1fa40f65", "content_id": "12a4435cb9f95a71b71e5dc5008499a1e0ac9193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1799, "license_type": "no_license", "max_line_length": 92, "num_lines": 58, "path": "/penetrationTesting.py", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport getpass\nimport requests\nimport os\n\ndef pTest(attack_string, attack_url, password):\n payload = {'password': password}\n with requests.Session() as s:\n p = s.post(attack_url + 'login', data=payload)\n r = requests.Request('GET', attack_url)\n prepared = s.prepare_request(r)\n prepared.url += attack_string\n response = s.send(prepared)\n print('Sending request with url:', prepared.url)\n #print('Request successful:', response.ok)\n\n if response.ok:\n soup = BeautifulSoup(response.text, 'html.parser')\n safeResponse = s.get(attack_url)\n soup2 = BeautifulSoup(safeResponse.text, 'html.parser')\n\n if (response.text == safeResponse.text):\n print(\"Attack Failed - Attack Led to Top Directory\")\n else:\n print(\"Attack may have succeded\")\n print(\"Attack response tags:\")\n for link in soup.find_all('a'):\n print(link.get('href'))\n print('')\n print('Safe Output')\n print('')\n for link in soup2.find_all('a'):\n print(link.get('href'))\n else:\n print('Attack Failed - No Such Directory')\n\n\n\ndef pWrap(attack_string):\n pTest(attack_string=attack_string, attack_url=ATTACK_URL, password=PASSWORD)\n\nPASSWORD = os.getenv('PWRD')\nATTACK_URL ='http://127.0.0.1:5050/'\nATTACK_STRINGS = [\n'../../../..',\n'test/../.././.../',\n'..',\n'level1/../..',\n'level1/../../',\n'pwd'\n]\n\nif __name__ == '__main__':\n if not PASSWORD:\n PASSWORD = print('First set environment variable PWRD. (export PWRD=YOUR_PASSWORD)')\n else:\n for attack in ATTACK_STRINGS:\n pWrap(attack)\n" }, { "alpha_fraction": 0.5385996699333191, "alphanum_fraction": 0.5403949618339539, "avg_line_length": 33.875, "blob_id": "9d681b1d71a489e5e385567de0447e0efcc4ceb7", "content_id": "faa5fdfd052d6f518455e6a4f2abf7701f5d76e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "no_license", "max_line_length": 71, "num_lines": 16, "path": "/build_dir.py", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "# build_dir.py\nimport os\n\n\ndef build_dir(curPath):\n directoryDict = {}\n with os.scandir(curPath) as directory:\n for entry in directory:\n #dont include shortcuts and hidden files\n if not entry.name.startswith('.'):\n #stat dict reference:\n #https://docs.python.org/2/library/stat.html\n fileStats = entry.stat()\n directoryDict[entry.name] = {\"is_dir\" : entry.is_dir(),\n \"size\" : fileStats.st_size}\n return directoryDict" }, { "alpha_fraction": 0.584488570690155, "alphanum_fraction": 0.5901086330413818, "avg_line_length": 30.023256301879883, "blob_id": "757ee850e0fb8387480f8f5629149d787584b35f", "content_id": "41cb8cc56fa2e20ce7d3205ebf5f553fb0eb80e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2669, "license_type": "no_license", "max_line_length": 111, "num_lines": 86, "path": "/db.py", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "from getpass import getpass\nimport os\nimport sqlite3\nfrom werkzeug.security import generate_password_hash\nfrom flask import g\nimport traceback\nimport logging\n\npath = os.getcwd()\nDATABASE = os.path.join(path, 'ansible.db')\n\ndef init_db():\n with app.app_context():\n db = sqlite3.connect(DATABASE)\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\ndef get_db(app):\n with app.app_context():\n if 'db' not in g:\n g.db = sqlite3.connect(\n DATABASE,\n detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db\n\ndef insert(table,columnTuple,valueTuple):\n try:\n dbConnection = sqlite3.connect(DATABASE)\n columnTupleString = ', '.join(columnTuple)\n dbConnection.execute(\n 'INSERT INTO ' + table + ' (' + columnTupleString + ') VALUES (?, ?)',\n (valueTuple)\n )\n dbConnection.commit()\n except Exception as e:\n logging.error(traceback.format_exc())\n\ndef select_one(table, return_columns, query_column, value):\n try:\n dbConnection = sqlite3.connect(DATABASE)\n result = (dbConnection.execute(\n 'SELECT ' + ', '.join(return_columns) + ' FROM ' + table + ' WHERE ' + query_column + '= (?) Limit 1', \n (value,)\n ).fetchone())\n return result\n except Exception as e:\n logging.error(traceback.format_exc())\n print(\"User existence check failed\")\n\ndef exists(table,column,value):\n try:\n dbConnection = sqlite3.connect(DATABASE)\n result = dbConnection.execute(\n 'SELECT CASE WHEN EXISTS( SELECT 1 FROM ' + table + ' WHERE ' + column + '= (?)) THEN 1 ELSE 0 END', \n (value,)\n ).fetchone()\n if result[0] == 1:\n return True\n else:\n return False\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\ndef update(table, update_dict, query_column, query_value):\n try:\n dbConnection = sqlite3.connect(DATABASE)\n result = (dbConnection.execute(\n 'UPDATE ' + table + ' SET ' + build_set_statement(update_dict) + ' WHERE ' + query_column + '= (?)', \n (query_value,)\n ).fetchone())\n dbConnection.commit()\n return result\n except Exception as e:\n logging.error(traceback.format_exc())\n\ndef build_set_statement(updated_field_dict):\n setItems = []\n for field in updated_field_dict:\n setItems.append(field + ' = \\'' + updated_field_dict[field] + '\\'')\n setFields = ', '.join(setItems)\n return setFields\n\n" }, { "alpha_fraction": 0.7562723755836487, "alphanum_fraction": 0.7795698642730713, "avg_line_length": 24.363636016845703, "blob_id": "ab265e8a687375562ea2eb6530d80879921257fc", "content_id": "d612bf46d6c3e0f28c6984e46ffafa9ef74fed3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 558, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/Dockerfile", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "# Use an official Python runtime as a parent image\nFROM python:3\n\n# Set the working directory to /app\nWORKDIR /app\n\n# Copy the current directory contents into the container at /app\nCOPY . /app\n\n# Install any needed packages specified in requirements.txt\nRUN pip install --trusted-host pypi.python.org -r requirements.txt\n\n# Make port 5000 available to the world outside this container\nEXPOSE 5000\n\n# Define environment variable\nENV NAME World\nENV FLASK_ENV development\nENV FLASK_APP app\n\n# Run app.py when the container launches\nCMD flask run --host=0.0.0.0\n" }, { "alpha_fraction": 0.649761974811554, "alphanum_fraction": 0.6516249179840088, "avg_line_length": 30.148387908935547, "blob_id": "6ea872682aa5b5e9fdf066a31d667f75ded542a9", "content_id": "717dbebb24513a95bdde7bc67ab2c5bf5d746900", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4831, "license_type": "no_license", "max_line_length": 150, "num_lines": 155, "path": "/app.py", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "from cryptography.fernet import Fernet\nimport datetime\nfrom flask import (flash, Flask, g, Markup, redirect, render_template, request,\n send_from_directory, session, url_for)\nimport functools\nimport logging\nimport os\nfrom secrets import token_urlsafe\nimport sqlite3\nimport sys\nfrom werkzeug.utils import secure_filename\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom build_dir import build_dir\nimport sanitize_path\nfrom db import get_db\nfrom user import create_user, user_exists, gen_default_user, get_user, update_user\nimport html\n\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.urandom(256) # TODO: change to environemnt variable\napp.config[\"CRYPTO_KEY\"] = Fernet.generate_key() # TODO put this somewhere where it wont update often possibly environmnet analize impact of changing.\n\npath = os.getcwd()\ndatabase = os.path.join(path, 'ansible.db')\n\ndb = get_db(app)\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if 'authenticated' not in session:\n return redirect(url_for('login'))\n return view(**kwargs)\n return wrapped_view\n\[email protected]('/', defaults={'loc': \"\"}, methods=('GET',))\[email protected]('/<path:loc>', methods=('GET',))\n@login_required\ndef ansible(loc):\n logging.debug('made it here')\n sanitize_path.sanitize(loc)\n\n # TODO: if loc is empty return the home directory for the node\n # possible security concern - could ask for a higher level node\n # TODO: for future addition of link sending - store encrypted version\n # of top level directory in session can possibly use a werkzeug module\n # TODO: check if input is an encrypted link (use a /share/ or something to indicate)\n # TODO: process encrypted link\n # TODO: process a normal link\n # TODO: get the the home directory\n\n # TODO: authenticate the requested directory\n\n logging.debug(loc)\n\n currentDir = os.path.join('cloud-drive', loc) #update to be maliable for sharing\n\n currentPath = os.path.join(path, currentDir)\n\n logging.debug(os.path.splitext(currentPath)[1])\n logging.debug(currentDir)\n logging.debug(path)\n logging.debug(currentPath)\n logging.debug(loc)\n\n fileExtension = os.path.splitext(currentPath)[1]\n if fileExtension:\n splitUrl = currentPath.rsplit('/', 1)\n localDir = splitUrl[0]\n filename = splitUrl[1]\n absPath = os.path.join(path, 'cloud-drive', localDir)\n return send_from_directory(directory=absPath, filename=filename)\n\n directoryDict = build_dir(currentPath)\n\n return render_template('index-alt.html', directory=directoryDict, curDir=loc)\n\[email protected](\"/login\", methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n error = None\n\n user = get_user(username)\n if user is not None:\n user_password = user[1]\n if not check_password_hash(user_password, password):\n error = 'Incorrect password, please try again.'\n else:\n error = 'User not found'\n\n if error is None:\n session.clear()\n session['authenticated'] = 'true'\n session['user_id'] = token_urlsafe()\n return redirect(url_for('ansible'))\n\n flash(error)\n\n return render_template('login.html')\n\[email protected](\"/signup\", methods=('GET','POST'))\ndef signup():\n if request.method == 'POST':\n username = request.form['name']\n password = request.form['password']\n error = None\n\n if not user_exists(username):\n create_user(username,password)\n else:\n error = 'Username already exists.'\n\n if error is None:\n return redirect(url_for('login'))\n\n flash(error)\n\n\n return render_template('signup.html')\n\[email protected](\"/updatepassword\", methods=('GET','POST'))\ndef update_password():\n if request.method == 'POST':\n\n username = request.form['username']\n prev_password = request.form['password']\n new_password = request.form['new_password']\n verified_new_password = request.form['verify_new_password']\n\n error = None\n if(new_password == verified_new_password):\n if user_exists(username):\n update_user(username,prev_password,new_password)\n else:\n error = 'User doesnt exist.'\n else:\n error = 'Passwords do not match'\n\n if error is None:\n return redirect(url_for('login'))\n\n flash(error)\n\n\n return render_template('update-password.html')\n\[email protected](\"/logout\", methods=('GET',))\ndef logout():\n del session['authenticated']\n return redirect(url_for('login'))\n\n\n\n" }, { "alpha_fraction": 0.7662061452865601, "alphanum_fraction": 0.7789585590362549, "avg_line_length": 38.16666793823242, "blob_id": "67fdd31766f668280000306488892cd428b078ac", "content_id": "6e9aae0326b668113a9410cc96cfea4a1c2138a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 941, "license_type": "no_license", "max_line_length": 127, "num_lines": 24, "path": "/documentation/todo.txt", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "1. Add a log-out function\n * button on file directory page\n * redirects to /logout\n * /logout function in flask - remove authenticatated from session\nDONE\n\n2. change functionality to be based on get-requests and url paths similar to apache\n\t* redirect to login required before pulling any route variables from url\n3. change the secret key to environment variable - init on docker load (either random num or require user input\n\n4. update image to remove weird greyed out background\n\n5. add ability to preview files\n\t1. images\n\n6. handle dynamic dns with intermittent pinging of host server - default to hourly\n\n7. add a header and Jane Storage logo\n\n8. add a way to download ansible from jane-storage site include a github link\n\n9. look into Upnp as a default for initiation - how to start upnp on docker init\n\n10. add remove and modify file name function with sockets - broadcast that a file has been moved or renamed to all active users\n\n" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 7, "blob_id": "cd2201c3a86666469514404ef10cef3d8f3d1fc3", "content_id": "ac651a4897bd60e03ece2b7328ba5e0737c67807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "no_license", "max_line_length": 12, "num_lines": 5, "path": "/requirements.txt", "repo_name": "ddward/ansible", "src_encoding": "UTF-8", "text": "Flask\nuwsgi\nnginx\nwerkzeug\ncryptography\n" } ]
9
Lucasgb7/Simulacao_Discreta
https://github.com/Lucasgb7/Simulacao_Discreta
aa52b051732b640ecc2681f7fb2823db492cd328
81a2b0e7a91b2310b62521aba0bf0ef26588019c
9a7d8830080a68f97a2a0a1e34acc5202fa0575e
refs/heads/main
2023-05-29T21:14:47.706125
2021-06-18T17:43:44
2021-06-18T17:43:44
356,298,179
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4305039644241333, "alphanum_fraction": 0.4480106234550476, "avg_line_length": 40.900001525878906, "blob_id": "c8dc8f52bda4c9acee3c41f395f892a7ebd225da", "content_id": "d59a3fdddea7ea4636e129b6ecea7d04d6a23a04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3771, "license_type": "no_license", "max_line_length": 100, "num_lines": 90, "path": "/AV1/elevador.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom random import randrange, uniform\n\nclass Material():\n Type = 0\n Time = 0\n Weight = 0 \n TimeStamp = 0\n\n def __init__(self, Type):\n self.Type = Type\n\n def materialValues(self):\n if self.Type == 0: # Material A\n self.Weight = 200 # 200kg\n self.Time = int(uniform(3,8)) # 5 +- 2 (uniforme)\n elif self.Type == 1: # Material B\n self.Weight = 100 # 100kg\n self.Time = 6 # 6 (constante)\n else: # Material C\n self.Weight = 50 # 50kg\n if randrange(100) <= 33: \n self.Time = 2 # P(2) = 0.33\n else:\n self.Time = 3 # P(3) = 0.67\n\n\nif __name__ == \"__main__\":\n simulationTime = 60 # Tempo de simulacao (min)\n totalWeight = 0 # Peso do elevador\n i = 0 # Contador de minutos\n averageTimeA = [] # Calcular tempo medio Mat A\n averageTimeB = [] # Calcular tempo medio Mat B\n movedMaterialC = 0 # Contagem de Material C \n materialsLift = [] # Materiais dentro do elevador\n materialsQueue = [] # Materiais na fila do elevador\n\n while i < simulationTime:\n print(\"\\nTempo: \", int(i),\"min\")\n mat = Material(randrange(3)) # Criando material (0~2)=(A~C)\n mat.materialValues() # Definindo tempo e pesos\n mat.TimeStamp = i # Definindo tempo que o material chegou\n materialsQueue.append(mat) # Adicionando material na fila\n\n print(\"MAT[\",mat.Type,\"]\")\n for m in materialsQueue: # Verifica a fila de materiais\n if m.Weight + totalWeight <= 400: # Checa se pode entrar no elevador\n if m.Type == 1:\n averageTimeB.append(i - m.TimeStamp) # Monitora o material B\n materialsQueue.remove(m)\n materialsLift.append(m)\n totalWeight += m.Weight\n i = i + m.Time\n if m.Type == 0: # Monitorar Material A\n m.TimeStamp = i\n elif m.Type == 2: # Monitorar Material C\n movedMaterialC =+ 1\n\n print(\"-----------------------------------\")\n waiting = []\n \n queue = []\n for m in materialsQueue:\n queue.append(m.Type)\n\n print(\"Fila:\", queue)\n lift = []\n for m in materialsLift:\n lift.append(m.Type)\n print(\"Elevador:\", lift)\n\n print(\"Peso elevador:\", totalWeight,\"kg\")\n print(\"Tempo:\", i,\"min\")\n print(\"-----------------------------------\")\n\n if totalWeight == 400: # Chega no peso maximo\n i = i + 4 # Tempo de subir, descarregar e descer\n totalWeight = 0\n\n for m in materialsLift: \n if m.Type == 0: \n averageTimeA.append((i - 1) - m.TimeStamp) # Monitora tempo total do Material A\n\n materialsLift.clear() # Remove todos os itens do elevador\n\n i += 1\n \n print(\"\\nTempo medio de transito Material A: \", sum(averageTimeA)/len(averageTimeA), \"min\")\n print(\"Tempo medio de espera do Material B: \", sum(averageTimeB)/len(averageTimeB), \"min\")\n print(\"Números de caixas de Material C: \", movedMaterialC)" }, { "alpha_fraction": 0.5846042037010193, "alphanum_fraction": 0.6165577173233032, "avg_line_length": 28.319149017333984, "blob_id": "b19b7568d89f5af8a8da8c7dcd1b4834f7e86bbe", "content_id": "e69c94f9d610f5829577b7249a5870ced839b8b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1379, "license_type": "no_license", "max_line_length": 63, "num_lines": 47, "path": "/AV1/padaria.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom random import randrange\n\n# gera o numero de clientes com base na probabilidade\ndef numberCustomers(value):\n if value > 65:\n return 8\n elif value > 35 and value < 65:\n return 10\n elif value > 10 and value < 35:\n return 12\n else:\n return 14\n\n# gera o numero de duzias por cliente com base na probabilidade\ndef numberBagelsPerCustomer(value):\n if value > 60:\n return 1\n elif value > 30 and value < 60:\n return 2\n elif value > 10 and value < 30:\n return 3\n else:\n return 4\n\nif __name__ == \"__main__\":\n days = 15 # nº iteracoes\n bagelCost = 3.8 # custo de fabrica da duzia de baguete\n bagelPrice = 5.4 # preco da duzia de baguete\n bagelsAverage = 0\n for day in range(days):\n print(\"\\nDia \", day)\n # Clientes\n value = randrange(100)\n customers = numberCustomers(value)\n print(\"Nº Clientes: \", customers)\n # Baguetes por cliente\n value = randrange(100)\n bagelsPerCustomer = numberBagelsPerCustomer(value)\n print(\"Baguetes/Cliente: \", bagelsPerCustomer)\n # Baguetes para assar\n bagelsToCook = customers * bagelsPerCustomer\n print(\"Baguetes para assar: \", bagelsToCook)\n\n bagelsAverage += bagelsToCook\n\n print(\"\\n\\nMedia de Baguetes: \", bagelsAverage/days)" }, { "alpha_fraction": 0.47289034724235535, "alphanum_fraction": 0.4981655180454254, "avg_line_length": 47.58415985107422, "blob_id": "3d8a4a22c6313092d7b5112d3dfe477a35cf2faa", "content_id": "89efac5d9002ad953cea7e1dfeec035df4a0e98a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4912, "license_type": "no_license", "max_line_length": 134, "num_lines": 101, "path": "/AV1/maquina.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom random import randrange\n\n\ndef draw(value, probability):\n return int(np.random.choice(value, 1, replace=False, p=probability))\n\nif __name__ == \"__main__\":\n # Criando os vetores de valores e suas probabilidades\n bearingLifeExpect = np.arange(1000, 2000, 100)\n probabilityLifeExpect = np.array([0.1, 0.13, 0.25, 0.13, 0.09, 0.12, 0.02, 0.06, 0.05, 0.05])\n waitingTimeArray = np.arange(5, 20, 5)\n probabilityWaitingTime = [0.6, 0.3, 0.1]\n\n simluationTime = 10000 # 10.000h\n bearing = [0,0,0] # Rolamentos\n changingTime = [20, 30, 40] # Tempo de troca = 1: 20, 2: 30, 3: 40\n\n # Sorteia tempo de vida para os rolamentos\n for i in range(len(bearing)):\n bearing[i] = draw(bearingLifeExpect, probabilityLifeExpect)\n\n t = 0 # Contador para o tempo de simulacao\n brokenBearings = 0 # Numero de rolamentos quebrados\n totalCost = 0 # Custo total da simulacao\n\n commingEvent = []\n exitEvent = []\n\n\n print(\"--------------------------------\\nDefina o numero de rolamentos a serem trocados: \")\n print(\"[1]: Troca UM rolamento quando algum rolamento quebra.\")\n print(\"[2]: Troca TRÊS rolamentos quando algum rolamento quebra.\")\n option = int(input(\"> \"))\n print(\"--------------------------------\")\n\n if option == 1:\n print(\"Simulação 1: Troca de UM rolamento por vez\\n\")\n print(\"--------------------------------\")\n while t <= simluationTime:\n\n for i in range(len(bearing)):\n if bearing[i] == t: # Caso rolamento atinga a vida util\n newTime = draw(bearingLifeExpect, probabilityLifeExpect) # Define um novo tempo de vida para o rolamento\n print(\"---------------\")\n print(\"Rolamento[\", i, \"]\")\n print(\"Quebrou em: \", t, \"h\\tExpectativa de vida: \", bearing[i], \"h\")\n print(\"Nova expectativa de vida: \", newTime, \"h\")\n bearing[i] += newTime # Soma lifetime anterior com novo para posteriormente\n brokenBearings += 1 # Incrementa o numero de rolamentos quebrados\n\n if brokenBearings > 0: # Caso haja um rolamento quebrado\n waitingTime = draw(waitingTimeArray, probabilityWaitingTime) # Atribui nova vida util\n spentTime = changingTime[brokenBearings-1] # Pega o tempo gasto para consertar os bearing\n cost = 5 * (waitingTime + spentTime) + spentTime + brokenBearings * 20 # Calcula o valor do concerto\n totalCost += cost\n\n\n print(\"Tempo concerto: \", spentTime,\"\\tTempo espera: \", waitingTime)\n print(\"Custo concerto: \", cost, \"R$\\tCusto total: \", totalCost, \"R$\")\n\n brokenBearings = 0\n\n t += 100\n\n elif option == 2:\n print(\"Simulação 2: Troca de TRÊS rolamento por vez\\n\")\n print(\"--------------------------------\")\n while t <= simluationTime:\n\n for i in range(len(bearing)):\n if bearing[i] == t:\n newTime1 = draw(bearingLifeExpect, probabilityLifeExpect)\n newTime2 = draw(bearingLifeExpect, probabilityLifeExpect)\n newTime3 = draw(bearingLifeExpect, probabilityLifeExpect)\n print(\"---------------\")\n print(\"Rolamento[1]:\")\n print(\"Quebrou em: \", t, \"h\\tExpectativa de vida: \", bearing[0], \"h\")\n print(\"Nova expectativa de vida: \", newTime1, \"h\")\n print(\"---------------\")\n print(\"Rolamento[2]:\")\n print(\"Quebrou em: \", t, \"h\\tExpectativa de vida: \", bearing[1], \"h\")\n print(\"Nova expectativa de vida: \", newTime2, \"h\")\n print(\"---------------\")\n print(\"Rolamento[3]:\")\n print(\"Quebrou em: \", t, \"h\\tExpectativa de vida: \", bearing[2], \"h\")\n print(\"Nova expectativa de vida: \", newTime3, \"h\")\n print(\"---------------\")\n bearing[0] += newTime1\n bearing[1] += newTime2\n bearing[2] += newTime3\n\n waitingTime = draw(waitingTimeArray, probabilityWaitingTime)\n spentTime = changingTime[2]\n cost = 5 * (waitingTime +spentTime) + spentTime + 3 * 20\n totalCost += cost\n\n print(\"Tempo concerto: \", spentTime,\"\\tTempo espera: \", waitingTime)\n print(\"Custo concerto: \", cost, \"R$\\tCusto total: \", totalCost, \"R$\")\n \n t += 100" }, { "alpha_fraction": 0.5657649040222168, "alphanum_fraction": 0.5886194109916687, "avg_line_length": 26.5, "blob_id": "0c75894582a2ae51b6975214d6468744a68f64c1", "content_id": "84776103154e91efa1d73fe79bb510cc4f83791a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2151, "license_type": "no_license", "max_line_length": 94, "num_lines": 78, "path": "/RNGs/role30_RNG.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport time\nimport qi2\n\n# left XOR entre o cara do centro e da direita\ndef rule(array):\n return array[0] ^ (array[1] or array[2])\n \n\n# primeira linha do mosaico\ndef init(largura):\n array = [0] * largura # inicio do mosaico, no começa inicializa com 1\n # se for impar, coloca 1 no meio\n if largura % 2:\n array[largura // 2] = 1\n else: # caso for par coloca so na metade (nao exata)\n array.insert(largura//2, 1)\n\n return array\n\ndef rule30(linhaAntiga):\n largura = len(linhaAntiga)\n linhaAntiga = [0] + linhaAntiga + [0] # ajustar com zeros na direita e esquerda da linha\n novaLinha = []\n \n for i in range(largura):\n novaLinha.append( rule(linhaAntiga[i:i+3]) ) # coloca uma celula (1 ou 0)\n\n return novaLinha\n\n# usa largura e quantos bits vai utilizar pra fazer essa largura\ndef applyRule(largura, bits):\n matriz = [init(largura)]\n\n colunaCentro = []\n colunaCentro.append(matriz[0][largura // 2])\n\n while not matriz[-1][0]:\n matriz.append(rule30(matriz[-1])) # executa a regra na ultima linha\n colunaCentro.append(matriz[-1][largura // 2]) # atualiza o centro da matriz\n\n return [matriz, colunaCentro[-bits:]]\n\ndef listToString(s): \n # initialize an empty string\n str1 = \"\" \n # traverse in the string \n for ele in s: \n str1 += str(ele) \n # return string \n return str1\n\nif __name__ == \"__main__\":\n seed = int(str(time.time_ns())[14:17])\n bits = 8\n\n #start = time.time()\n n = int(input(\"Número de iterações (n): \"))\n k = int(input(\"Número de categorias (k): \")) \n results = []\n for i in range(n):\n time.sleep(1)\n result = applyRule((seed+bits)*2, bits)\n rng = listToString(result[1])\n rng = int(listToString(rng), 2)\n print(rng)\n results.append(rng)\n\n #end = time.time()\n '''\n x2 = qi2.qi2Test(k, n, results)\n\n print(\"================= RESULTADOS =================\")\n #print(\"Tempo de simulacao: \", end - start)\n print(\"X²: \", x2)\n print(\"Graus de Liberdade (GL):\", k - 1)\n print(\"Significância: 0.05\")\n '''" }, { "alpha_fraction": 0.6730769276618958, "alphanum_fraction": 0.7213352918624878, "avg_line_length": 38.371429443359375, "blob_id": "f41027561ee69d53bb4549b98a9a81006460e314", "content_id": "9e19d0316d51ad0bee2ae6dced802540daea073c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2805, "license_type": "no_license", "max_line_length": 138, "num_lines": 70, "path": "/Fast Counter-Based RNG/README.md", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "## Introdução\n\n- Proposto por Bernard Widynski em 2020. ([Squares: A Fast Counter-Based RNG](https://arxiv.org/abs/2004.06278))\n- Derivado do método: “Middle Square Weyl Sequence RNG”. ([Middle Square Weyl Sequence RNG](https://arxiv.org/abs/1704.00358))\n- Utiliza transformação middle-square de von Neummann.\n- Usa apenas 4 estados de “squaring”, diferentemente de Philox que utiliza 10.\n\n\n## Algoritmo\n\n```\ndef squares(ctr, key):\n y = x = ctr * key; z = y + key\n two5 = np.uint64(32) # 2^5\n x = x * x + y; x = (x >> two5) | (x << two5)\n x = x * x + z; x = (x >> two5) | (x << two5)\n x = x * x + y; x = (x >> two5) | (x << two5)\n\n return (x*x + z) >> two5 \n```\n- Substitui a sequência Weyl (w += s) por um contador multiplicado por uma chave.\n- Se mutiplicarmos o contador e a chave e somarmos o quadrado, teremos o mesmo efeito da sêquência Weyl.\n- A saída será uniforme e 2^64 valores aleatórios podem ser gerados por chave.\n- São realizados quatro rodadas de “enquadramento” para ser suficiente ao passar nos testes estatísticos.\n\n## Vantagens e Desvantagens\n### Vantagens\n\n- Significativamente mais rápido que o modelo Philox.\n- Produz dados concretos e uniformes.\n- Pode ser calculado com até 2 bilhões de chaves, sendo cada chave gerando até 2^64 valores aleatórios.\n\n### Desvantagens\n\n- Estudo recente (2020) e ainda não utilizado amplamente.\n\n## Testes\n\n- 300 testes BigCrush usando chaves aleatórias.\n- Testes de correlação entre fluxos, testes de contador, testes de bits invertidos e testes de uniformidade. (Não especificado qual teste)\n- 300 testes PractRand usando chaves aleatórias.\n- Para obter 1 bilhão de números aleatórios, Philox demorou 2,21s enquanto esse durou 1,34s. (Intel Core i7-9700 3.0 GHz)\n\n## Resultados obtidos\n\n#### Grafico que gera cores através do range dos valores gerados\n![Grafico1](https://raw.githubusercontent.com/Lucasgb7/Simulacao_Discreta/main/Fast%20Counter-Based%20RNG/resultados/grafico1.png)\n\nN = 1048576; Tempo de Simulação = 703,27 segundos; (Intel Core i5-4690)\n\n#### Grafico que gera cores através do range dos valores gerados\n![Grafico2](https://raw.githubusercontent.com/Lucasgb7/Simulacao_Discreta/main/Fast%20Counter-Based%20RNG/resultados/grafico2.png)\n\nN = 16; Tempo de Simulação = 0,029 segundos; (Intel Core i5-4690)\n\n#### Grafico Scatter\n![Grafico3](https://raw.githubusercontent.com/Lucasgb7/Simulacao_Discreta/main/Fast%20Counter-Based%20RNG/resultados/grafico3.png)\n\nN = 125; Tempo de Simulação = 0,091 segundos; (Intel Core i5-4690)\n\n## Autores\n\n### Desenvolvedores do código\n\n- [Lucas Jose da Cunha](https://github.com/Lucasgb7)\n- [Luiz Alberto Zimmermann Zabel Martins Pinto](https://github.com/Luiz-Zimmermann)\n \n### Autor do método\n\n[Bernard Widynski](https://arxiv.org/search/cs?searchtype=author&query=Widynski%2C+B)\n" }, { "alpha_fraction": 0.5118259191513062, "alphanum_fraction": 0.5506149530410767, "avg_line_length": 22.511110305786133, "blob_id": "796c2068277874c1a580f02b1937b823f8575ae3", "content_id": "71dda2e9df47d3d8c7c156871b6bbb928466703f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "no_license", "max_line_length": 55, "num_lines": 45, "path": "/RNGs/fibonacci_RNG.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import qi2\n\ndef fbn(option, array, mod, k, j):\n if option == 0:\n result = (array[j-1] + array[k-1]) % mod\n elif option == 1:\n result = (array[j-1] - array[k-1]) % mod\n elif option == 2:\n result = (array[j-1] * array[k-1]) % mod\n else:\n result = (array[j-1] ^ array[k-1]) % mod\n\n return result\n\nseed = '123456789'\n#j = int(input(\"J:\"))\nj = 1\n#k = int(input(\"K:\"))\nk = 8\n#mod = int(input(\"MOD:\"))\nmod = 1000\nn = int(input(\"Numero de iteracoes:\"))\ncategories = int(input(\"Numero de categorias: \"))\nresults = []\n\narray = []\nfor i in range(len(seed)):\n array.append(int(seed))\n\nprint(\"0: '+' \\n1: '-' \\n2: '*' \\n3: '^'\")\noption = int(input(\"Defina a operação: \"))\nfor i in range(n):\n result = fbn(option, array, mod, k, j)\n print(\"Resultado: \", result)\n array.remove(array[0])\n array.append(result)\n results.append(result)\n\nx2 = qi2.qi2Test(categories, n, results)\n\n\nprint(\"================= RESULTADOS =================\")\nprint(\"X^2: \", x2)\nprint(\"GL =\", categories - 1)\nprint(\"Probabilidade = 0.05\")" }, { "alpha_fraction": 0.48446646332740784, "alphanum_fraction": 0.5150833129882812, "avg_line_length": 25.77108383178711, "blob_id": "48f4507082ff9fc3ef47e04491d97b9cd02e35de", "content_id": "bdfc4f115b0b041594612542ebea6a62f4b0a52d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2221, "license_type": "no_license", "max_line_length": 99, "num_lines": 83, "path": "/Fast Counter-Based RNG/counterBasedRNG.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import NoNorm\n\nimport qi2\n\ndef squares(ctr, key):\n y = x = ctr * key\n z = y + key\n two5 = np.uint64(32)\n x = x * x + y; x = (x >> two5) | (x << two5)\n x = x * x + z; x = (x >> two5) | (x << two5)\n x = x * x + y; x = (x >> two5) | (x << two5)\n\n return (x*x + z) >> two5 \n\n\ndef draw(i):\n nx = int(math.sqrt(i))\n #print(\"tamanho da imagem\", nx)\n imagem = np.zeros((nx,nx), dtype=np.uint8)\n #print(\"tam: \", i)\n p = 0\n ny = nx\n for i in range(nx):\n for j in range(ny):\n \n imagem[i,j] = pixelvet[p]\n #print(i, j, pixelvet[p])\n p += 1\n \n \n return imagem\n\nif __name__ == \"__main__\":\n np.seterr(all='ignore') # ignora erros de overflow, divisao/zero, underflow, etc...\n key = np.uint64(0xf6235eca95b2c1e7)\n #sum = np.uint64(0)\n #pixelvet = []\n #vetVal = []\n\n n = np.uint64(input(\"Numero de iteracoes (n): \"))\n k = int(input(\"Numero de categorias (k): \"))\n gl = k - 1; print(\"Grau de Liberdade (GL): \", gl)\n #p = float(input(\"Probabilidade de sucesso: \"))\n \n results = [] \n \n #start = time.time()\n for i in range(n):\n result = squares(np.uint64(i), key)\n result = result / (2**32) # normaliza resultado de 32 bits\n #print(\"[\", i, \"]:\", result)\n results.append(result)\n #pixelvet.append(result)\n #vetVal.append(result)\n \n x2, intervals = qi2.qi2Test(k, n, results)\n\n #end = time.time()\n print(\"================= RESULTADOS =================\")\n #print(\"Media: \", hex(sum//n))\n #print(\"Tempo de simulacao: \", end - start)\n \n #pIndex = qi2.getProbabilityIndex(p)\n #x2Max = qi2.table[gl-1][pIndex]\n #print(\"x2Max: \", x2Max)\n print(\"x2:\" , x2)\n \n qi2.histGraph(results, intervals)\n '''\n plt.figure(\"Graficos\",figsize=(15,12))\n plt.subplot(211)\n imagem = draw(n)\n plt.imshow(imagem, aspect=\"auto\", cmap='gray', vmin=0, vmax=255,norm=NoNorm())\n plt.axis(\"off\")\n plt.subplot(212)\n plt.plot(vetVal, 'ro')\n plt.grid(1)\n plt.show()\n '''" }, { "alpha_fraction": 0.46468400955200195, "alphanum_fraction": 0.5195167064666748, "avg_line_length": 25.924999237060547, "blob_id": "e8b1a16dd552103c89126fda1045b3e8d452a289", "content_id": "1b12a193eb389579a04b961865191d1a9ac48a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "no_license", "max_line_length": 59, "num_lines": 40, "path": "/RNGs/xorShift_RNG.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nimport qi2\n\ndef xorShift(y):\n y ^= np.uint32(y << 13)\n y ^= np.uint32(y >> 17)\n y ^= np.uint32(y << 15)\n\n return y\n\nif __name__ == \"__main__\":\n np.seterr(all='ignore')\n seed = 2463534242\n y = np.uint32(seed)\n #a, b, c = 13, 17, 15\n #iteracoes = 1000\n\n n = np.uint64(input(\"Numero de iteracoes (n): \"))\n k = int(input(\"Numero de categorias (k): \"))\n gl = k - 1; print(\"Grau de Liberdade (GL): \", gl)\n p = float(input(\"Probabilidade de sucesso: \"))\n results = []\n #start = time.time()\n for i in range(n):\n y = (xorShift(y))\n aux = y / 4294967295 # normaliza resultado\n #print(\"Valor: \", aux)\n #print(\"y(\", i, \") = \", aux)\n results.append(aux)\n \n #end = time.time()\n x2, intervals = qi2.qi2Test(k, n, results)\n\n print(\"================= RESULTADOS =================\")\n #print(\"Tempo de simulacao: \", end - start)\n pIndex = qi2.getProbabilityIndex(p)\n x2Max = qi2.table[gl-1][pIndex]\n print(\"x2Max: \", x2Max)\n print(\"x2:\" , x2)" }, { "alpha_fraction": 0.4021207094192505, "alphanum_fraction": 0.4771614968776703, "avg_line_length": 23.058822631835938, "blob_id": "ef7bc5a8d2ed2ba75180f88c4a5e558c0cb6fded", "content_id": "5e67ca6b40340be5b96dd7a2ac3c1b1885d9f17d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1226, "license_type": "no_license", "max_line_length": 59, "num_lines": 51, "path": "/RNGs/wichmann_RNG.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import numpy as np\nimport time\nimport qi2\n\ndef wichmann(x, y, z):\n x = 171 * (x % 177) - 2 * (x / 177)\n y = 172 * (y % 177) - 35 * (y / 176)\n z = 170 * (z % 178) - 63 * (z / 178)\n\n if x < 0:\n x = x + 30269\n elif y < 0:\n y = y + 30307\n elif z < 0:\n z + z + 30323\n\n result = x/30269 + y/30307 + z/30323\n result = result - int(result)\n\n return result\n\n\nif __name__ == \"__main__\":\n np.seterr(all='ignore')\n x = 1234\n y = x + 1\n z = y + 1\n #iteracoes = 1000\n\n n = np.uint64(input(\"Numero de iteracoes (n): \"))\n k = int(input(\"Numero de categorias (k): \"))\n gl = k - 1; print(\"Grau de Liberdade (GL): \", gl)\n p = float(input(\"Probabilidade de sucesso: \")) \n results = []\n #start = time.time()\n for i in range(n):\n w = wichmann(x, y, z)\n y += 1\n z += 2\n print(\"w(\", i, \") = \", y)\n results.append(w)\n\n #end = time.time()\n x2, intervals = qi2.qi2Test(k, n, results)\n \n print(\"================= RESULTADOS =================\")\n #print(\"Tempo de simulacao: \", end - start)\n pIndex = qi2.getProbabilityIndex(p)\n x2Max = qi2.table[gl-1][pIndex]\n print(\"x2Max: \", x2Max)\n print(\"x2:\" , x2)" }, { "alpha_fraction": 0.44809460639953613, "alphanum_fraction": 0.4783180058002472, "avg_line_length": 21.264705657958984, "blob_id": "0216d26f16e99b27cafabd2b6f7e3f90eba6640f", "content_id": "cceebcfe13e5efb32e9c5903736cad8bd5ad7b98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 52, "num_lines": 34, "path": "/RNGs/jvn_RNG.py", "repo_name": "Lucasgb7/Simulacao_Discreta", "src_encoding": "UTF-8", "text": "import time \n\n# John von Neumann's Generator\ndef JVN(x):\n x = x ** 2\n x = x / 100\n x = x % 10000\n return int(x)\n\n# Linear Congruential Generator\ndef LCG(x):\n return (a * x + c) % m\n\nif __name__ == \"__main__\":\n # seed = 322\n simulationTime = 20\n # x = int(input(\"Valor inicial [X0]: \"))\n x = 3\n # m = int(input(\"Módulo [M], M>0: \"))\n m = 10\n # a = int(input(\"Multiplicador [A], M>A>0: \"))\n a = 2\n # c = int(input(\"Incrementador [C], M>=C>=0: \"))\n c = 0\n start = time.time()\n print(start)\n for i in range(simulationTime):\n # seed = JVN(seed)\n # print(\"Semente: \", seed)\n x = LCG(x)\n print('X[', i, ']: ', x)\n end = time.time()\n \n print(\"Tempo para o cálculo:\", end - start) " } ]
10
MatheusLealAquino/meuCanal
https://github.com/MatheusLealAquino/meuCanal
fab2cb7fcfa10a7328272af333d92d15cd478cf9
ef1c468945f095338054183110864c6e6d78bc9c
93d5b4ffed6a616bdccb9f9483dbd999688af8f9
refs/heads/master
2020-03-29T12:21:18.060175
2020-01-19T19:59:11
2020-01-19T19:59:11
149,895,166
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7019519805908203, "alphanum_fraction": 0.7087087035179138, "avg_line_length": 34.05263137817383, "blob_id": "13b1439bfb0a33bbbc3ff937eba6794375ff8a9f", "content_id": "8a904d0cb8c331a163955e1b5ea044fa2531d296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1332, "license_type": "permissive", "max_line_length": 106, "num_lines": 38, "path": "/conteudo/views.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.shortcuts import render, redirect, get_object_or_404\n\nfrom conteudo.models import Video, Categoria\n\ndef exibir_catalogo(request):\n categorias = Categoria.objects.all()\n return render(request, 'conteudo/catalogo_videos.html', {'categorias': categorias})\n\ndef cadastro_video(request):\n return render(request, 'conteudo/cadastro_video.html')\n\ndef editar_video(request):\n return render(request, 'conteudo/editar_video.html')\n\ndef lista_categoria(request, id=None):\n categorias = Categoria.objects.all()\n if id != None:\n videos_lista = Video.objects.all().filter(categoria_id=id)\n else:\n videos_lista = Video.objects.all()\n\n paginator = Paginator(videos_lista, 3)\n page = request.GET.get('page',1)\n\n try:\n videos = paginator.page(page)\n except PageNotAnInteger:\n videos = paginator.page(1)\n except EmptyPage:\n videos = paginator.page(paginator.num_pages)\n\n return render(request, 'conteudo/lista_categoria.html', {'categorias': categorias, 'videos' : videos})\n\ndef exibir_video(request, id):\n video = get_object_or_404(Video, id= id)\n categorias = Categoria.objects.all()\n return render(request, 'conteudo/player_video.html', {'video':video, 'categorias':categorias})\n" }, { "alpha_fraction": 0.6901140809059143, "alphanum_fraction": 0.6901140809059143, "avg_line_length": 36.64285659790039, "blob_id": "04b22c91a56ddf3465577418010506e4904ddee3", "content_id": "ce7a9c7925728ccd6d7aa01c44bc44edd3a0711a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "permissive", "max_line_length": 79, "num_lines": 14, "path": "/conteudo/urls.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom conteudo import views\n\napp_name = 'conteudo'\n\nurlpatterns = [\n path('', views.exibir_catalogo, name='catalogo'),\n path('cadastro_video/', views.cadastro_video, name='cadastro_video'),\n path('editar_video/<int:id>/', views.editar_video, name='editar_video'),\n path('<int:id>/', views.exibir_video, name='exibir_video'),\n\n path('categoria/', views.lista_categoria, name='listar_todas_categorias'),\n path('categoria/<int:id>/', views.lista_categoria, name='lista_categoria'),\n]" }, { "alpha_fraction": 0.6262083649635315, "alphanum_fraction": 0.6294307112693787, "avg_line_length": 31.068965911865234, "blob_id": "bd8590027cf1f6277beff6eaf05e86edbca077a3", "content_id": "808cdeec652e0f2c19ba4e506ad44727f1e3744a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "permissive", "max_line_length": 100, "num_lines": 29, "path": "/conteudo/forms.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django import forms\n\nfrom conteudo.models import Video, Categoria\n\n\nclass VideoForm(forms.ModelForm):\n error_messages = {\n 'campo invalido' : \"Campo inválido\"\n }\n\n class Meta:\n model = Video\n fields = ('video_id','categoria', 'nome', 'url', 'capa', 'visualizacao', 'nota', 'sinopse')\n\n video_id = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n categoria = forms.ModelChoiceField(\n error_messages={'required': 'Campo obrigatório', },\n queryset=Categoria.objects.all().order_by(id),\n empty_label='--- Selecionar a Categoria ---',\n widget=forms.Select(attrs={'class': 'form-control form-control-sm'}),\n required=True\n )\n\n nome = forms.CharField(\n error_messages = {'required', 'Campo obrigatório',},\n widget=forms.TextInput(attrs={'class': 'form-control form-control-sm', 'maxlength': '120'}),\n required=True\n )\n\n" }, { "alpha_fraction": 0.6325187683105469, "alphanum_fraction": 0.6503759622573853, "avg_line_length": 31.272727966308594, "blob_id": "a2a314fe8fe427341681184eca105d993ca36fe2", "content_id": "5f60b33ea85f06099c0075a6ef6582acb93b7f07", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1064, "license_type": "permissive", "max_line_length": 83, "num_lines": 33, "path": "/conteudo/models.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Categoria(models.Model):\n nome = models.CharField(max_length=255, db_index=True)\n slug = models.SlugField(max_length=200)\n\n class Meta:\n ordering = ('nome',)\n verbose_name = 'categoria'\n verbose_name_plural = 'categorias'\n\n def __str__(self):\n return self.nome\n\n def videosCategoria(self):\n return Video.objects.all().filter(categoria_id=self.id).order_by('-id')[:4]\n\nclass Video(models.Model):\n categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING)\n nome = models.CharField(max_length=255)\n url = models.FileField(upload_to='conteudo/videos/')\n capa = models.FileField(upload_to='conteudo/images/')\n visualizacao = models.DecimalField(max_digits=10, decimal_places=1, default=0)\n nota = models.FloatField(max_length=20)\n sinopse = models.CharField(max_length=500)\n\n class Meta:\n ordering = ('nome',)\n verbose_name = 'video'\n verbose_name_plural = 'videos'\n\n def __str__(self):\n return self.nome" }, { "alpha_fraction": 0.7735849022865295, "alphanum_fraction": 0.7735849022865295, "avg_line_length": 25.75, "blob_id": "432a95e5932a84d57953d92d52b86be50bbfe629", "content_id": "0d72cc21559c35335fe10e0bf779304aa824c261", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "permissive", "max_line_length": 40, "num_lines": 4, "path": "/projeto/views.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\ndef pagina_inicial(request):\n return render(request, 'index.html')" }, { "alpha_fraction": 0.6096654534339905, "alphanum_fraction": 0.6096654534339905, "avg_line_length": 23.272727966308594, "blob_id": "1b2fb6c338b4df5e65f4c52257a82f59eab016e0", "content_id": "1ec8c5eb881caa7b53e6a40166a7759a7133ff91", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 269, "license_type": "permissive", "max_line_length": 63, "num_lines": 11, "path": "/projeto/static/projeto/js/script.js", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "$(function() {\n $('[data-toggle=\"tooltip\"]').tooltip();\n});\n$(function() {\n $('[data-toggle=\"popover\"]').popover();\n});\n\nvar vid = document.querySelector(\"#myVideo\");\nvid.onended = function() {\n\tdocument.querySelector(\"#collapseOne\").classList.add(\"show\");\t\n};\n\n\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6840659379959106, "avg_line_length": 20.47058868408203, "blob_id": "f2165042efaefca1971a45c88fc4692be52929d0", "content_id": "ae42ce2342d988252872496947e6027568e872f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 364, "license_type": "permissive", "max_line_length": 37, "num_lines": 17, "path": "/README.md", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "# meuCanal\nA project Django 3.x based on Netflix\n\n\nFramework to style: Bootstrap 4.x\n\n\n## Images\n* ![First image](screen1.jpg)\n* ![Second image](screen2.jpg)\n* ![Third image](screen3.jpg)\n* ![Fourth image](screen4.jpg)\n* ![Five image](screen5.jpg)\n* ![Six image](screen6.jpg)\n* ![Seven image](screen7.jpg)\n* ![Eight image](screen8.jpg)\n* ![Nine image](screen9.jpg)" }, { "alpha_fraction": 0.698630154132843, "alphanum_fraction": 0.698630154132843, "avg_line_length": 17.375, "blob_id": "0706fddb685d11e919aadb01ff64c680ab0f35d7", "content_id": "a9dc90e9fb156f0da3c0eacaaa49a90e339eb793", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "permissive", "max_line_length": 54, "num_lines": 8, "path": "/login/urls.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom login import views\n\napp_name = 'login'\n\nurlpatterns = [\n path('', views.pagina_login, name='pagina_login'),\n]" }, { "alpha_fraction": 0.7711864113807678, "alphanum_fraction": 0.7711864113807678, "avg_line_length": 28.5, "blob_id": "e6e5363e7dca2e7fcbd92792c9181a97c3c92a35", "content_id": "41ed92726d3bd11d7afaf292bfd57fbc836dbe18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "permissive", "max_line_length": 53, "num_lines": 4, "path": "/login/views.py", "repo_name": "MatheusLealAquino/meuCanal", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\ndef pagina_login(request):\n return render(request, 'login/pagina_login.html')\n" } ]
9
KausikN/BigData_Files
https://github.com/KausikN/BigData_Files
646f957d8ccc78249e7bf1c0a8a57fa3211754b6
9b5d7689de9cdf4521bb8a0554819b05b5ea09c8
3eb789d3fd2b0d32a7099eaa807b9418a926e935
refs/heads/master
2023-04-23T08:31:23.878590
2021-11-24T07:30:46
2021-11-24T07:30:46
235,078,965
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.33882150053977966, "alphanum_fraction": 0.6048526763916016, "avg_line_length": 54, "blob_id": "c26217544b0ec5a88671bc6088fc163b86dd7cdc", "content_id": "4a083898a60ca6eb716929a67e10a0fd6e977b3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 298, "num_lines": 21, "path": "/ProblemSet_1/9.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(9) Consider the following sample of weights for 45 individuals: \n79, 71, 89, 57, 76, 64, 82, 82, 67, 80, 81, 65, 73, 79, 79, 60, 58, 83, 74, 68, 78, 80, 78, 81, 76, 65, 70, 76, 58, 82, 59, 73, 72, 79, 87, 63, 74, 90, 69, 70, 83, 76, 61, 66, 71, 60, 57, 81, 57, 65, 81, 78, 77, 81, 81, 63, 71, 66, 56, 62, 75, 64, 74, 74, 70, 71, 56, 69, 63, 72, 81, 54, 72, 91, 92\nFor the above data generates histograms and depict them using packages in your platform. \nExplore the different types of histograms available and test drive the types supported in your platform.\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef GenerateHistogram(Marks):\n n_bins = 10\n X = np.arange(len(Marks))\n n, bins, patches = plt.hist(Marks, n_bins, facecolor='blue', alpha=0.5)\n plt.show()\n\nMarks = [79, 71, 89, 57, 76, 64, 82, 82, 67, 80, 81, 65, 73, 79, 79, 60, 58, 83, 74, 68, 78, 80, 78, 81, 76, 65, 70, 76, 58, 82, 59, 73, \n 72, 79, 87, 63, 74, 90, 69, 70, 83, 76, 61, 66, 71, 60, 57, 81, 57, 65, 81, 78, 77, 81, 81, 63, 71, 66, 56, 62, 75, 64, 74, 74, \n 70, 71, 56, 69, 63, 72, 81, 54, 72, 91, 92]\nGenerateHistogram(Marks)\n\n# INCOMPLETE" }, { "alpha_fraction": 0.5679012537002563, "alphanum_fraction": 0.6123456954956055, "avg_line_length": 39.599998474121094, "blob_id": "d354d81c0f7e07b84dfcbfc7515c0283382b64e7", "content_id": "53f1c346b84b75fd6ab9021609acec2626e0e87e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 88, "num_lines": 10, "path": "/FindHugeFiles.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "import os\n\ndef FindHugeFiles(path, max_file_size):\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n file_stats = os.stat(os.path.join(root, name))\n if file_stats.st_size / (1024 * 1024) > max_file_size:\n print(os.path.join(root, name), \":\", file_stats.st_size / (1024 * 1024))\n\nFindHugeFiles(\"E:\\UnityProjects\\\\GameDevPile\\\\\", 75)" }, { "alpha_fraction": 0.5386740565299988, "alphanum_fraction": 0.5635359287261963, "avg_line_length": 25.851852416992188, "blob_id": "4baa46467d3a5a0d8bac43b89fb53088b1952f63", "content_id": "6fc5a70815b9ed6176332a465258e3a3cee52287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 735, "license_type": "no_license", "max_line_length": 70, "num_lines": 27, "path": "/README.md", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "# BTech Big Data Files\n\n📖 Academic Files and Codes\n\n## Institute\n\n🧑‍🎓 B.Tech (H), IIITDM Kancheepuram\n\n## Course\n\n| Name | Roll No | SEM | Course Name |\n| :--------: | :---------: | :-: | :-------------------------------: |\n| N Kausik | COE17B010 | 6 | Analytics and Systems of Big Data |\n\n## Codes\n\n - [Assignment 1](Assignment_1/)\n - [Exploratory Assignment](ExploratoryAssignment_1)\n - [Hadoop Assignment](Hadoop_Assignment/)\n - [Reading Assignment](ReadingAssignment_1/)\n - Problem Sets\n - [Problem Set 1](ProblemSet_1/)\n - [Problem Set 2](ProblemSet_2/)\n - [Problem Set 3](ProblemSet_3/)\n - [Problem Set 4](ProblemSet_4/)\n - [Project](Project/)\n - [End Sem](EndSem_Exam/)" }, { "alpha_fraction": 0.6133718490600586, "alphanum_fraction": 0.6231164932250977, "avg_line_length": 30.263751983642578, "blob_id": "efbb954db57a05e9cc4f19cb9bfbacd091d9b78d", "content_id": "9fb8b8bac7a15728d4dfdc3898208472519c1a1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22166, "license_type": "no_license", "max_line_length": 130, "num_lines": 709, "path": "/Assignment_1/Submission/Algorithms.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nImplementation of Algorithms\nFIM - Apriori, FPGrowth\nCFI - Charm, AClose\nMFI - Mafia, Pincer Search\n'''\n# Imports\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n#from functools import lru_cache\nfrom mlxtend.frequent_patterns import association_rules\nfrom mlxtend.frequent_patterns import apriori, fpgrowth\nfrom mlxtend.preprocessing import TransactionEncoder\n\nimport Preprocessing\n\n# Main Functions\n# Read Data\ndef ReadCSVFile(filepath):\n return pd.read_csv(filepath)\n \n# Write Data\ndef WriteCSVFile(Data, filepath):\n return Data.to_csv(filepath, index=False)\n\n# Visualise Data\n\n# Describe Data\ndef DescribeDataset(Dataset):\n for key in Dataset.keys():\n print(\"Column:\", key)\n print(\"N Unique Values:\", len(Dataset[key].unique()))\n print(\"\\n\\n\")\n print(\"Dataset:\")\n print(Dataset.head)\n\n# Util Functions\ndef OneHotEncoder(Dataset):\n # TE = TransactionEncoder()\n # TE_Arr = TE.fit(Dataset).transform(Dataset)\n # return pd.DataFrame(TE_Arr, columns=TE.columns_)\n Dataset_Encoded = []\n ListDicts = Dataset.T.to_dict().values() # Convert Dataframe to list of dicts\n\n UniqueValues, UniqueValKeys = GetUniqueValues(Dataset)\n\n for data in tqdm(ListDicts):\n EncodedData = []\n for val, key in zip(UniqueValues, UniqueValKeys):\n if val == data[key]:\n EncodedData.append(True)\n else:\n EncodedData.append(False)\n Dataset_Encoded.append(EncodedData)\n \n Dataset_Encoded = pd.DataFrame(Dataset_Encoded, columns=UniqueValues)\n\n return Dataset_Encoded\n\ndef GetUniqueValues(Dataset):\n UniqueValues = []\n UniqueValKeys = []\n for key in Dataset.keys():\n uniquevals = Dataset[key].unique()\n for uv in uniquevals:\n if uv not in UniqueValues:\n UniqueValues.append(uv)\n UniqueValKeys.append(key)\n return UniqueValues, UniqueValKeys\n\ndef SelfJoin(Set, sizelimit=None):\n JoinedSet = []\n JoinedSetDicts = []\n\n for i in range(len(Set)):\n for j in range(i+1, len(Set)):\n val = {}\n for x, y in zip(Set[i], Set[j]):\n val[x] = True\n val[y] = True\n if sizelimit == None or sizelimit >= len(val.keys()):\n if val not in JoinedSetDicts:\n JoinedSetDicts.append(val)\n JoinedSet.append(list(val.keys()))\n \n return JoinedSet\n\ndef GenerateMFCS(MFCS, Items_Si):\t\t#For Pincer Search Algorithm\n MFCS = MFCS.copy()\n\n for inf_items in Items_Si:\n for MFCS_item in MFCS.copy():\n # print(MFCS_item)\n #if infrequent is subset of MFCS\n if all(s_item in MFCS_item for s_item in inf_items):\n MFCS.remove(MFCS_item)\n\n for item in inf_items:\n updateMFCS_item = MFCS_item.copy()\n updateMFCS_item.remove(item)\n\n if not any(all(s_item in Rem_MFCS for s_item in updateMFCS_item) for Rem_MFCS in MFCS):\n MFCS.append(updateMFCS_item)\n return MFCS\n\n# @lru_cache(maxsize=32)\ndef compVertBitmap(itemset, bitMap):\n if len(itemset) == 1:\n item = str(itemset[0])\n print(item)\n print(bitMap.keys())\n return bitMap[item]\n\n else:\n last_item = str(itemset[-1])\n return compVertBitmap(itemset[:-1], bitMap) & bitMap[last_item]\n\ndef countSupp(itemset, bitMap):\n\t\n # VerticalBitmap(itemset, bitMap)\n itemset_map = compVertBitmap(itemset, bitMap)\n itemset_supp_count = np.count_nonzero(itemset_map)\n # print(itemset_supp_count)\n return itemset_supp_count\n\n# Algorithms\n\n# FIM\n# Apriori\ndef Apriori(Dataset_Encoded, min_support=0.05):\n # Build apriori model\n FrequentItems = apriori(Dataset_Encoded, min_support=min_support, use_colnames=True)\n return FrequentItems\n \n# FPGrowth\ndef FPGrowth(Dataset_Encoded, min_support=0.05):\n FrequentItems = fpgrowth(Dataset_Encoded, min_support=min_support, use_colnames=True)\n return FrequentItems\n\n# RuleSet Mining\ndef RuleMining(FrequentItems, min_threshold=1):\n # Collect the inferred rules\n RuleSet = association_rules(FrequentItems, metric =\"lift\", min_threshold=min_threshold)\n RuleSet = RuleSet.sort_values(['confidence', 'lift'], ascending=[False, False])\n return RuleSet\n\n# CFI\n# Charm\ndef Charm(Dataset_Encoded, min_support=0.05, min_itemset_length=1):\n CFI = []\n\n min_support = min_support * len(Dataset_Encoded.index)\n print(\"Minimum Support:\", min_support)\n\n # Procedure\n # Minimum Support - 3\n # Count A(4), B(6), C(4), D(4), E(5) as supports\n Items = Dataset_Encoded.keys()\n ItemCounts = []\n for item in Items:\n ItemCounts.append(sum(Dataset_Encoded[item]))\n \n # Write in Ascending order of Support\n # A, C, D, E, B\n Items_Sorted = Items.copy()\n ItemCounts_Sorted = ItemCounts.copy()\n sorted_list1 = [y for _,y in sorted(zip(ItemCounts_Sorted,Items_Sorted),key=lambda x: x[0])]\n sorted_list2 = sorted(ItemCounts_Sorted)\n Items_Sorted = sorted_list1\n ItemCounts_Sorted = sorted_list2\n\n # Then write transactions where it occurs\n # A - 1, 3, 4, 5\n # C - 2, 4, 5, 6\n # D - 1, 3, 5, 6\n # E - 1, 2, 3, 4, 5\n # B - 1, 2, 3, 4, 5, 6\n AppearanceDict = {}\n for item in Items_Sorted:\n AppearanceDict[item] = [i for i, val in enumerate(Dataset_Encoded[item]) if val]\n\n # Make Pairs\n # For A and C - common trans - 4, 5 - count 2 < MinSupp = 3\n # As < MinSupp DONT COUNT THAT PAIR\n # A and D - 1, 3, 5 - count = 3 = 3 - TAKE A and D as pair\n # Now a new thing called AD with trans - 1, 3, 5\n # Now compare AD with E - AD - 135 subset of 12345 - E\n # As subset Cancel AD and make it as ADE\n # Now again check ADE with B - ADE - 135 subset of 123456 - B\n # As subset cancel ADE make it ADEB\n # As no further items after B - ADEB is first closed freq itemset\n # \n # Next Check A with E - 1345 (A) is subset if 12345 (E)\n # So, replace A with AE\n # Check AE with B - 1345 is subset of 123456 (E)\n # Replace AE with AEB - No further - AEB is CFI\n #\n # Next Check C with D\n # No subset but common items are 5, 6 - count 2 < MinSup - DONT TAKE\n # C and E - common - 245 - count 3 = MinSupp\n # Now new thing CE has ele 245\n # CE with B - 245 is subset of 123456 (B)\n # So replace CE with CEB - as no further - CEB is CFI\n #\n # Next Check C with B - subset - CB is CFI\n # \n # And So On\n for i in tqdm(range(len(Items_Sorted))):\n cfi_item = [Items_Sorted[i]]\n cfi_available = False\n for j in range(i+1, len(Items_Sorted)):\n CommonElements = [value for value in AppearanceDict[Items_Sorted[i]] if value in AppearanceDict[Items_Sorted[j]]]\n CommonCount = len(CommonElements)\n if CommonCount >= min_support:\n cfi_item = [Items_Sorted[i], Items_Sorted[j]]\n # Not Sure if needed Subset or Common Elements\n\n # Common Elements\n # for k in range(j+1, len(Items_Sorted)):\n # CommonElements_temp = [value for value in CommonElements if value in AppearanceDict[Items_Sorted[k]]]\n # CommonCount_temp = len(CommonElements_temp)\n # if CommonCount_temp >= min_support:\n # CommonElements = CommonElements_temp\n # CommonCount = CommonCount_temp\n # cfi_item.append(Items_Sorted[k])\n\n # Subset\n for k in range(j+1, len(Items_Sorted)):\n if set(CommonElements).issubset(set(AppearanceDict[Items_Sorted[k]])):\n CommonElements = AppearanceDict[Items_Sorted[k]]\n CommonCount = len(AppearanceDict[Items_Sorted[k]])\n cfi_item.append(Items_Sorted[k])\n \n if min_itemset_length <= len(cfi_item):\n CFI.append(cfi_item)\n cfi_available = True\n if not cfi_available and min_itemset_length <= 1:\n if len(AppearanceDict[Items_Sorted[i]]) >= min_support:\n cfi_item = [Items_Sorted[i]]\n CFI.append(cfi_item)\n\n return CFI\n\n# AprioriClose - AClose\ndef AClose(Dataset_Encoded, min_support=0.05, min_itemset_length=1):\n CFI = []\n\n min_support = min_support * len(Dataset_Encoded.index)\n print(\"Minimum Support:\", min_support)\n\n ListDicts = Dataset_Encoded.T.to_dict().values() # Convert Dataframe to list of dicts\n\n # Count Items\n Items = Dataset_Encoded.keys()\n ItemCounts = []\n for item in Items:\n ItemCounts.append(sum(Dataset_Encoded[item]))\n \n # Prune Level 1\n Items_L1 = []\n ItemCounts_L1 = []\n for item, count in zip(Items, ItemCounts):\n if count >= min_support:\n Items_L1.append([item])\n ItemCounts_L1.append(count)\n\n # Keep Pruning Till Empty\n Items_Li = Items_L1\n ItemCounts_Li = ItemCounts_L1\n i = 1\n while(len(Items_Li) > 0):\n i += 1\n\n # Add previous Values to CFI if superset and remove subsets\n newCFI = CFI.copy()\n for item in Items_Li:\n for cfi in CFI:\n if set(cfi).issubset(set(item)): # Check if subset\n if cfi in newCFI:\n newCFI.remove(cfi)\n if min_itemset_length <= len(item):\n newCFI.append(item)\n # CFI.remove(cfi)\n # if min_itemset_length <= len(item):\n # CFI.append(item)\n CFI = newCFI\n\n # Self-Join\n Items_Ci = SelfJoin(Items_Li, sizelimit=i)\n\n # Count Supports of Items_Ci\n ItemCounts = [0] * len(Items_Ci)\n for data in ListDicts:\n for i in range(len(Items_Ci)):\n ItemPresent = True\n for val in Items_Ci[i]:\n if not data[val]:\n ItemPresent = False\n break\n if ItemPresent:\n ItemCounts[i] += 1\n \n \n # Prune\n Items_Li = []\n for item, count in zip(Items_Ci, ItemCounts):\n if count >= min_support:\n Items_Li.append(item)\n \n return CFI\n\n# MFI\n# Pincer Search\ndef PincerSearch(Dataset_Encoded, min_support=0.05, min_itemset_length=1):\n MFI = []\n\n min_support = min_support * len(Dataset_Encoded.index)\n print(\"Minimum Support:\", min_support)\n\n ListDicts = Dataset_Encoded.T.to_dict().values() # Convert Dataframe to list of dicts\n\n Items = Dataset_Encoded.keys()\n ItemCounts = []\n\n MFCS = [[items] for items in Items]\n\n Items_Li = []\n Items_Si = []\n i = 1\n\n Items_Ci = [[item] for item in Items]\n\n while(len(Items_Ci) > 0):\n \n # Count Supports of Items_Ci and MFCS\n ItemCounts = [0] * len(Items_Ci)\n for data in ListDicts:\n for i in range(len(Items_Ci)):\n ItemPresent = True\n for val in Items_Ci[i]:\n if data[val] == False:\n ItemPresent = False\n break\n if ItemPresent:\n ItemCounts[i] += 1\n \n MFCSCount = [0] * len(MFCS)\n for data in ListDicts:\n for i in range(len(MFCS)):\n ItemPresent = True\n for val in MFCS[i]:\n if data[val] == False:\n ItemPresent = False\n break\n if ItemPresent:\n MFCSCount[i] += 1\n\n\n #Update MFI: MFI U freqitems in MFCS\n for itemset, support in zip(MFCS, MFCSCount):\n if ((support >= min_support) and (itemset not in MFI)):\n MFI.append(itemset)\n \n # Infrequent sets\n Items_Li = []\n Items_Si = []\n for item, count in zip(Items_Ci, ItemCounts):\n if count >= min_support:\n Items_Li.append(item)\n else:\n Items_Si.append(item)\n #update MFCS\n MFCS = GenerateMFCS(MFCS, Items_Si)\n \n # Prune LK that are subsets of MFI\n Ck = Items_Ci.copy()\n for item in Items_Ci.copy():\n if any(all(s_item in MFIitem for s_item in item) for MFIitem in MFI):\n Ck.remove(item)\n Items_Li = Ck\n\n i += 1\n # Self-Join\n Items_Ci = SelfJoin(Items_Li, sizelimit=i)\n\n #Prune Ck+1 that are not in MFCS\n Ck = Items_Ci.copy()\n for item in Items_Ci.copy():\n if not any(all(s_item in MFCSitem for s_item in item) for MFCSitem in MFCS):\n Ck.remove(item)\n Items_Ci = Ck\n\n return MFI\n\n# Mafia\nclass MafiaTree:\n def __init__(self, head, tail, supportCount=None):\t# supportCount=None\n self.head = head\n self.tail = tail.copy()\n self.supportCount = supportCount\n\ndef MafiaRun(currentMFNode, MFI, bitMap, Items, transCount, min_support):\n #Head Union Tail Pruning (HUT)------>\n HUT = currentMFNode.head + tuple(currentMFNode.tail)\n # HUT = currentMFNode.head.append(currentMFNode.tail)\n\n #If HUT is in MFI -> Stop Searching nd return\n if any(all(item in mfi for item in HUT) for mfi in MFI):\n return MFI\n\n #Count Support of all children\n # print('--------------------------------------',currentMFNode.head)\n nodeChild_supportCount = [(item, countSupp(currentMFNode.head + (item,), bitMap) ) for item in currentMFNode.tail]\n # nodeChild_supportCount = [(item, countSupp(currentMFNode.head.append(item), bitMap) ) for item in currentMFNode.tail]\n #Extract frequent Children of node and support count\n nodeFreqChildCount = [(item, support_count) for item, support_count in nodeChild_supportCount if support_count >= min_support]\n\n node_childEqualParent = []\t# items in tail with support count equal to that of parent\n node_tail_suppCount = []\t# items in node tail sorted by Decreasing Support\n\n for item, support_count in nodeFreqChildCount:\n if support_count == currentMFNode.supportCount:\n node_childEqualParent.append(item)\n else:\n node_tail_suppCount.append((item, support_count))\n\n #Sort items in the trimmed tail by increasing support:\n node_tail_suppCount.sort(key=lambda x:x[1])\n node_tail_items = [item for item, support in node_tail_suppCount]\n\n currentMFNode.head += tuple(node_childEqualParent)\n # currentMFNode.head.append(node_childEqualParent)\n currentMFNode.tail = node_tail_items\n\n is_leaf = not bool(currentMFNode.tail)\n\n for i, item in enumerate(currentMFNode.tail):\n new_node_head = currentMFNode.head + (item,)\n # new_node_head.append(item)\n new_node_tail = currentMFNode.tail[i+1:]\n new_node_supportCount = node_tail_suppCount[i][1]\n\n new_node = MafiaTree(new_node_head, new_node_tail, new_node_supportCount)\n\n MFI = MafiaRun(new_node, MFI, bitMap, Items, transCount, min_support)\n\n if is_leaf and currentMFNode.head and not any(all(item in mfi for item in currentMFNode.head) for mfi in MFI):\n MFI.append(set(currentMFNode.head))\n\n return MFI\n\ndef Mafia(Dataset_Encoded, min_support=0.05, min_itemset_length=1):\n MFI = []\n\n min_support = min_support * len(Dataset_Encoded.index)\n print(\"Minimum Support:\", min_support)\n\n Items = Dataset_Encoded.keys()\n Itemlist = []\n for item in Items:\n Itemlist.append(item)\n transCount, itemCount = Dataset_Encoded.shape\n # print(transCount)\n items_vertical_bitmaps = {item:np.array(Dataset_Encoded[item]) for item in Items}\n\n root = tuple()\n MFRoot = MafiaTree(root, Itemlist)\t#Creates a root Node \n\n MFI = MafiaRun(MFRoot, MFI, items_vertical_bitmaps, Itemlist, transCount, min_support)\n\n return MFI\n\n# LFI\n# Apriori Based LFI\ndef AprioriLFI(Dataset_Encoded, min_support=0.05):\n LFI = []\n\n # Run apriori and get all frequent itemsets\n FreqItemsets = Apriori(Dataset_Encoded, min_support=min_support)\n #print(FreqItemsets['itemsets'])\n\n # Convert to list of itemsets 2d array\n FI = []\n for itemset in FreqItemsets['itemsets']:\n FI.append(list(itemset))\n\n # Search and find the longest itemset\n max_len = 0\n for fi in FI:\n if len(fi) == max_len:\n LFI.append(fi)\n elif len(fi) > max_len:\n LFI = []\n LFI.append(fi)\n max_len = len(fi)\n\n return LFI\n\n# FPGrowth Based LFI\ndef FPGrowthLFI(Dataset_Encoded, min_support=0.05):\n LFI = []\n\n # Run FPGrowth and get all frequent itemsets\n FreqItemsets = FPGrowth(Dataset_Encoded, min_support=min_support)\n #print(FreqItemsets['itemsets'])\n\n # Convert to list of itemsets 2d array\n FI = []\n for itemset in FreqItemsets['itemsets']:\n FI.append(list(itemset))\n\n # Search and find the longest itemset\n max_len = 0\n for fi in FI:\n if len(fi) == max_len:\n LFI.append(fi)\n elif len(fi) > max_len:\n LFI = []\n LFI.append(fi)\n max_len = len(fi)\n\n return LFI\n\n\n# Driver Code\ndataset_path = 'Assignment 1/Dataset_Cleaned.csv'\n#LabelIndexMap_path = 'Assignment 1/LabelIndexMaps.p'\n\nDataset_Preprocessed = ReadCSVFile(dataset_path)\n#LabelIndexMap = pickle.load(open(LabelIndexMap_path, 'rb'))\n\n# Print Dataset\nDatasetRowCount = len(Dataset_Preprocessed['Symbol'])\nprint(\"Dataset Row Count:\", DatasetRowCount)\nprint(\"Dataset: 5 rows: \")\nprint(Dataset_Preprocessed.head(n=5))\n\nprint(\"\\n\")\n\n# Encode Dataset\nDatasetPortionPercentage = 0.005\nDataset_PortionSize = int(DatasetPortionPercentage * DatasetRowCount)\nif Dataset_PortionSize > DatasetRowCount:\n Dataset_PortionSize = DatasetRowCount\nprint(\"Operating on\", Dataset_PortionSize, \" data rows.\")\nprint(\"Encoding...\")\nDataset_TE = OneHotEncoder(Dataset_Preprocessed.head(Dataset_PortionSize))\n\nprint(\"\\n\\n\")\n\n# FIM\n# Apriori\nprint(\"Apriori\")\n\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nFI = Apriori(Dataset_TE, min_support=MinimumSupport)\nRuleSet = RuleMining(FI, min_threshold=MinimumThreshold)\nprint(\"Frequent Itemsets:\\n\", FI)\nprint(\"\\n\\n\")\nprint(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# FPGrowth\nprint(\"FPGrowth\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nFI = FPGrowth(Dataset_TE, min_support=MinimumSupport)\nRuleSet = RuleMining(FI, min_threshold=MinimumThreshold)\nprint(\"Frequent Itemsets:\\n\", FI)\nprint(\"\\n\\n\")\nprint(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n\n# CFI\n# Charm\nprint(\"Charm\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nCFI = Charm(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(CFI, min_threshold=MinimumThreshold)\nprint(\"Closed Frequent Itemsets:\")\ncfi_index = 1\nfor cfi in CFI:\n print(str(cfi_index) + \":\", \" - \".join(cfi))\n cfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# AClose\nprint(\"AClose\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nCFI = AClose(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(CFI, min_threshold=MinimumThreshold)\nprint(\"Closed Frequent Itemsets:\")\ncfi_index = 1\nfor cfi in CFI:\n print(str(cfi_index) + \":\", \" - \".join(cfi))\n cfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# MFI\n# Pincer Search\nprint(\"Pincer Search\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nMFI = PincerSearch(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(MFI, min_threshold=MinimumThreshold)\nprint(\"Maximal Frequent Itemsets:\")\nmfi_index = 1\nfor mfi in MFI:\n print(str(mfi_index) + \":\", \" - \".join(mfi))\n mfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# Mafia\nprint(\"Mafia\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nMFI = Mafia(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(MFI, min_threshold=MinimumThreshold)\nprint(\"Maximal Frequent Itemsets:\")\nmfi_index = 1\nfor mfi in MFI:\n print(str(mfi_index) + \":\", \" - \".join(mfi))\n mfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# LFI\n# Apriori Based LFI\nprint(\"Apriori Based LFI\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nLFI = AprioriLFI(Dataset_TE, min_support=MinimumSupport)\n# RuleSet = RuleMining(LFI, min_threshold=MinimumThreshold)\nprint(\"Longest Frequent Itemsets:\")\nlfi_index = 1\nfor lfi in LFI:\n print(str(lfi_index) + \":\", \" - \".join(lfi))\n lfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# FPGrowth Based LFI\nprint(\"FPGrowth Based LFI\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nLFI = FPGrowthLFI(Dataset_TE, min_support=MinimumSupport)\n# RuleSet = RuleMining(LFI, min_threshold=MinimumThreshold)\nprint(\"Longest Frequent Itemsets:\")\nlfi_index = 1\nfor lfi in LFI:\n print(str(lfi_index) + \":\", \" - \".join(lfi))\n lfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n" }, { "alpha_fraction": 0.5679012537002563, "alphanum_fraction": 0.6039506196975708, "avg_line_length": 39.52000045776367, "blob_id": "514e287509d7bdd70a62f13559bdb2433565735e", "content_id": "dbe2b652c19ad840a49270f4fede15bca9ca1837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2025, "license_type": "no_license", "max_line_length": 151, "num_lines": 50, "path": "/ProblemSet_2/7.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n7. An Advertisement agency develops new ads for various clients (like Jewellery shops, Textile shops). \nThe Agency wants to assess their performance, for which they want to know the number of ads they developed in each quarter for different shop category.\nHelp them to visualize data using radar/spider charts.\nShop Category Quarter 1 Quarter 2 Quarter 3 Quarter 4 \nTextile 10 6 8 13 \nJewellery 5 5 2 4 \nCleaning Essentials 15 20 16 15 \nCosmetics 14 10 21 11 \n'''\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\ndef RadarPlot(name, statsList, attribute_labels, plot_markers, plot_str_markers):\n\n labels = np.array(attribute_labels)\n fig= plt.figure()\n for stats in statsList:\n angles = np.linspace(0, 2*np.pi, len(labels), endpoint=False)\n stats = np.concatenate((stats,[stats[0]]))\n angles = np.concatenate((angles,[angles[0]]))\n\n ax = fig.add_subplot(111, polar=True)\n ax.plot(angles, stats, 'o-', linewidth=2)\n ax.fill(angles, stats, alpha=0.25)\n ax.set_thetagrids(angles * 180/np.pi, labels)\n plt.yticks(markers)\n ax.set_title(name)\n ax.grid(True)\n plt.show()\n\ndef RadarPlot_PlotLY(statsList, labels):\n for stats in statsList:\n df = pd.DataFrame(dict(r=stats, theta=labels))\n fig = px.line_polar(df, r='r', theta='theta', line_close=True)\n fig.show()\n\n# Driver Code\nlabels=['Textile', 'Jewellery', 'Cleaning Essentials', 'Cosmetics']\nmarkers = range(22)\nstr_markers = list(map(str, markers))\nAdsCount = [[10, 5, 15, 14], \n [6, 5, 20, 10], \n [8, 2, 16, 21], \n [13, 4, 15, 11]]\n\n# RadarPlot_PlotLY(AdsCount, labels)\nRadarPlot(\"Ads\", AdsCount, attribute_labels = labels, plot_markers = markers, plot_str_markers = str_markers) # example" }, { "alpha_fraction": 0.6214544773101807, "alphanum_fraction": 0.6247956156730652, "avg_line_length": 30.900226593017578, "blob_id": "1f998123fc5ba5f5dcf610dd7b01f2f5a7a4681d", "content_id": "3a0c6425ea589b1bc507da82a139616b296f09b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14067, "license_type": "no_license", "max_line_length": 181, "num_lines": 441, "path": "/Assignment_1/Codes/Preprocessing.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nPreprocessing Functions\n - Open CSV as Pandas Dataframe\n - Visualise Data\n - Print\n - Mean, Median, Mode\n - Charts - Pie, Bar, Histogram\n - Transform Data\n - Encoding of Text as Indices\n - Clean Data\n - Visualise NULL values\n - Fill null spaces or indicate null or remove if necc\n'''\n# Imports\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly.express as px\n\nfrom tqdm import tqdm\n\n# Main Functions\n\n# Read Data\ndef ReadCSVFile(filepath):\n return pd.read_csv(filepath)\n\n# Write Data\ndef WriteCSVFile(Data, filepath):\n return Data.to_csv(filepath, index=False)\n \n# Visualise Data\n\n# Print Data\ndef PrintData(Data):\n print(Data)\n\n# Charts\n# 1\ndef Histogram(Data, nbins=25, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Histogram\"):\n n, bins, patches = plt.hist(Data, nbins, facecolor='blue', alpha=0.5)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()\n\ndef PieChart(Data, labels, colors=['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'grey']):\n plt.pie(Data, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\n plt.axis('equal')\n plt.show()\n\ndef BarGraph(Data, labels, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Bar Graph\"):\n plt.bar(Data, Data, align='center', alpha=0.5)\n plt.xticks(Data, labels)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()\n\n# 2\ndef BoxPlot(Data, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Box Plot\"):\n plt.boxplot(Data)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\ndef ViolinPlot(Data, title=\"Violin Plot\"):\n plt.violinplot(Data)\n plt.title(title)\n plt.show()\n\n# 3\ndef StemPlot(Stems, Leaves, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Box Plot\"):\n plt.title('Stem and Leaf Plot')\n plt.xlabel('Stems')\n plt.ylabel('Leaves')\n markerline, stemlines, baseline = plt.stem(Stems, Leaves)\n plt.show()\n\ndef DensityPlot(Data, labels, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Density Plot\"):\n for x, label in zip(Data, labels):\n sns.distplot(x, hist = False, kde = True,\n kde_kws = {'linewidth': 3},\n label = label)\n \n # Plot formatting\n plt.legend(prop={'size': 16}, title = title)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\ndef RugPlot(Data, labels, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Rug Plot\"):\n for x, label in zip(Data, labels):\n sns.rugplot(x, label=label)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\ndef SwarmPlot(Data, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Swarm Plot\"):\n sns.swarmplot(Data)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\ndef JitteredBoxPlot(Data, xlabel=\"X Axis\", ylabel=\"Y Axis\", title=\"Jittered Box Plot\"):\n sns.boxplot(data=Data)\n sns.swarmplot(data=Data, color='grey')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()\n\ndef RadarPlot_PlotLY(statsList, labels):\n for stats in statsList:\n df = pd.DataFrame(dict(r=stats, theta=labels))\n fig = px.line_polar(df, r='r', theta='theta', line_close=True)\n fig.show()\n\ndef FunnelPlot(Data, labels):\n data = dict(number=Data, stage=labels)\n fig = px.funnel(data, x='number', y='stage')\n fig.show()\n\n\n\n# Transform Data\ndef EncodeDataset(Dataset):\n Dataset_Transformed = Dataset.copy()\n LabelIndexMaps = {}\n\n for k in Dataset.keys():\n Dataset_Transformed[k], LabelIndexMaps[k] = EncodeData(Dataset[k])\n\n return Dataset_Transformed, LabelIndexMaps\n\ndef EncodeData(Data):\n Data_Transformed = []\n LabelIndexMap = []\n\n # Get Label to Index Map and Transform Data\n curIndex = 0\n for x in Data:\n if not x in LabelIndexMap:\n # Add to Map\n curIndex = len(LabelIndexMap)\n LabelIndexMap.append(x)\n # Transform\n Data_Transformed.append(curIndex)\n else:\n Data_Transformed.append(LabelIndexMap.index(x))\n\n # if not x in LabelIndexMap.keys():\n # # Add to Map\n # LabelIndexMap[x] = curIndex\n # # Transform\n # Data_Transformed.append(curIndex)\n # curIndex += 1\n # else:\n # Data_Transformed.append(LabelIndexMap[x])\n \n return Data_Transformed, LabelIndexMap\n\n\n# Clean Data\n# Visualise data to clean\ndef Mode(X):\n modex = -1\n modex_freq = -1\n\n freq = FreqDist(X)\n\n for key in freq.keys():\n if freq[key] > modex_freq:\n modex = key\n modex_freq = freq[key]\n\n return modex\n\ndef FreqDist_BinWise(X, binsize):\n values = []\n Freq = {}\n minVal = int(min(X))\n maxVal = int(round(max(X)))\n print(\"Range:\", minVal, \"-\", maxVal)\n for i in range(minVal, maxVal+1, binsize):\n values.append(i)\n Freq[str(i)] = 0\n for x in X:\n key = int(int((round(x) - minVal)/binsize)*binsize + minVal)\n Freq[str(key)] += 1\n return Freq\n\ndef FreqDist(X):\n Freq = {}\n for x in X:\n if x in Freq.keys():\n Freq[x] += 1\n else:\n Freq[x] = 1\n return Freq\n\ndef PrintNonZeroFreq(FreqDist, binsize):\n print(\"Freq Dist Non Zero Values: \")\n nbins = 0\n for k in FreqDist.keys():\n if FreqDist[k] > 0:\n nbins += 1\n #print(k, \":\", FreqDist[k], \"\\n\")\n print(\"Found\", nbins, \"non empty bins\")\n\ndef MissingCount(Data):\n missing = 0\n indices = []\n i = 0\n for d in Data:\n if CheckMissing(d):\n missing += 1\n indices.append(i)\n i += 1\n return missing, indices\n\n# Cleaning Functions\ndef MissingClean(Dataset, NotReplacable_Keys = [\"Symbol\", \"Scientific Name with Author\"], NoChange_Keys = [\"Synonym Symbol\"], Replacable_Keys = [\"National Common Name\", \"Family\"]):\n Dataset_MissingCleaned = {}\n for key in Dataset.keys():\n Dataset_MissingCleaned[key] = []\n ListDicts = Dataset.T.to_dict().values() # Convert Dataframe to list of dicts\n\n # Only Option is to remove data if missing or leave it as it is - Depends on the field of the missing data\n # Symbol - CANT BE NULL - NOT REPLACABLE\n # Synonym Symbol - CAN BE NULL - REPLACABLE\n # Scientific Name - CANT BE NULL - NOT REPLACABLE\n # National Common Name - CAN BE NULL - REPLACABLE\n # Family - CAN BE NULL - REPLACABLE\n\n # So, if missing data in field [Symbol , Scientific Name], remove that row - As replacement makes no sense - as unique\n # If missing data in REPLACABLE field, first replace by a marker value - NULL\n # For field Synonym Symbol - No need to replace as it is optional field\n # For National Common Name and Family - replace SMARTLY\n # If there exists a subset of Scientific Name somewhere in another data row, set common name as that common name - AS COMMON NAME OF SUBSPECIES IS SAME AS COMMON NAME OF SPECIES\n\n n_removed = 0\n n_replaced = 0\n\n mode = ''\n for data in tqdm(ListDicts):\n data_cleaned = {}\n nullData = False # If any non replacable field is null\n replacedData = False # If replacable key data has already been replaced\n for key in data.keys():\n if key in NotReplacable_Keys:\n mode = 'remove'\n if CheckMissing(data[key]):\n nullData = True\n break\n else:\n data_cleaned[key] = data[key]\n elif key in NoChange_Keys:\n mode = 'nochange'\n if CheckMissing(data[key]):\n data_cleaned[key] = \"NULLVALUE\"\n else:\n data_cleaned[key] = data[key]\n elif key in Replacable_Keys and not replacedData:\n mode = 'replace'\n # Check for subset in Scientific Name with Author if data is missing\n if CheckMissing(data[key]):\n data[key] = \"NULLVALUE\"\n for data_subs in ListDicts:\n if data['Scientific Name with Author'] in data_subs['Scientific Name with Author']:\n for rep_key in Replacable_Keys:\n if CheckMissing(data[rep_key]):\n data_cleaned[rep_key] = data_subs[rep_key]\n else:\n data_cleaned[rep_key] = data[rep_key]\n replacedData = True\n break\n else:\n data_cleaned[key] = data[key]\n\n if not nullData:\n for key in data.keys():\n Dataset_MissingCleaned[key].append(data_cleaned[key])\n if replacedData:\n n_replaced += 1\n else:\n n_removed += 1\n\n Dataset_MissingCleaned = pd.DataFrame.from_dict(Dataset_MissingCleaned)\n\n return Dataset_MissingCleaned, n_removed, n_replaced\n \ndef RedundantClean(Dataset):\n Dataset_RedundantCleaned = {}\n for key in Dataset.keys():\n Dataset_RedundantCleaned[key] = []\n ListDicts = Dataset.T.to_dict().values() # Convert Dataframe to list of dicts\n\n # Dataset also contains some repeated data rows\n # Basic Cleaning - Search for duplicate data rows and remove all duplicates\n # IN THIS DATASET DONT DO - Advanced Cleaning - Remove row even if it is a subset of the data of any other row - REDUNDANT\n\n n_duplicates = 0\n\n # Remove Repeated Rows\n uniqList = []\n for data in tqdm(ListDicts):\n if not data in uniqList:\n uniqList.append(data)\n for key in Dataset_RedundantCleaned.keys():\n Dataset_RedundantCleaned[key].append(data[key])\n else:\n n_duplicates += 1\n\n Dataset_RedundantCleaned = pd.DataFrame.from_dict(Dataset_RedundantCleaned)\n\n return Dataset_RedundantCleaned, n_duplicates\n\n\n# Util Functions\ndef SplitDict(Dict):\n keys = Dict.keys()\n data = []\n for k in keys:\n data.append(Dict[k])\n return data, keys\n\ndef CheckMissing(Val):\n if str(Val).strip().replace('nan', '') in ['', ' ', 'NULL', 'null'] or 'NaN' in str(Val):\n return True\n return False\n\n'''\n# Driver Code\ndataset_path = 'Assignment 1/Dataset.csv'\nDataset = ReadCSVFile(dataset_path)\n\n# Print Dataset\nprint(\"Dataset Row Count:\", len(Dataset['Symbol']))\nprint(\"Dataset: 5 rows: \")\nprint(Dataset.head(n=5))\n\nprint(\"\\n\\n\")\n\n# Vis Freq of Dataset\nprint(\"Dataset Freq Visualistion...\")\nprint(\"Number of unique entries:\")\nfor key in Dataset.keys():\n Freq = FreqDist(Dataset[key])\n data, labels = SplitDict(Freq)\n print(key, \"-\", len(labels))\n #print(key, \":\\n\", FreqDist(Dataset[key]), \"\\n\\n\")\n #BarGraph(data, labels)\n\nprint(\"\\n\\n\")\n\n# Vis Missing Dataset\nprint(\"Missing Count:\")\nfor key in Dataset.keys():\n missing, indices = MissingCount(Dataset[key])\n print(key, \"-\", missing)\n\nprint(\"\\n\\n\")\n\n# Clean Missing Data\nprint(\"Cleaning Dataset...\")\n\n# CLEANING PROCEDURE ----------------------\n\n# MISSING DATA CLEAN\n# This Dataset is completely text based -- so no math values\n# Therefore, cleaning missing values using mean or median methods makes no sense\n# Mode Replacement can be used but again it does not make exact sense as any field can have any value - not necc the most appeared value\n\n# Only Option is to remove data if missing or leave it as it is - Depends on the field of the missing data\n# Symbol - CANT BE NULL - NOT REPLACABLE\n# Synonym Symbol - CAN BE NULL - REPLACABLE\n# Scientific Name - CANT BE NULL - NOT REPLACABLE\n# National Common Name - CAN BE NULL - REPLACABLE\n# Family - CAN BE NULL - REPLACABLE\n\n# So, if missing data in field [Symbol , Scientific Name], remove that row - As replacement makes no sense - as unique\n# If missing data in REPLACABLE field, first replace by a marker value - NULL\n# For field Synonym Symbol - No need to replace as it is optional field\n# For National Common Name and Family - replace SMARTLY\n# If there exists a subset of Scientific Name somewhere in another data row, set common name as that common name - AS COMMON NAME OF SUBSPECIES IS SAME AS COMMON NAME OF SPECIES\n\nprint(\"Cleaning Missing Data...\")\nDataset_MissingCleaned, n_removed, n_replaced = MissingClean(Dataset)\nprint(\"\\n\")\nprint(\"Removed\", n_removed, \"data rows\")\nprint(\"Replaced\", n_replaced, \"data rows\")\nprint(\"\\n\")\nprint(\"Cleaned Dataset Missing Count:\")\nfor key in Dataset.keys():\n missing, indices = MissingCount(Dataset[key])\n print(key, \"-\", missing)\n\nprint(\"\\n\\n\")\n\n# REDUNDANT DATA CLEAN\n# Dataset also contains some repeated data rows\n# Basic Cleaning - Search for duplicate data rows and remove all duplicates\n# Advanced Cleaning - Remove row even if it is a subset of the data of any other row - REDUNDANT - IN THIS DATASET DONT DO\n\nDataset_RedundantCleaned, n_duplicates = RedundantClean(Dataset_MissingCleaned)\nprint(\"\\n\")\nprint(\"Removed\", n_duplicates, \"duplicate data rows\")\nprint(\"\\n\")\nprint(\"Redundant Cleaned Dataset Row Count:\", len(Dataset_RedundantCleaned['Symbol']))\n\nprint(\"\\n\\n\")\n\n# Final Cleaned Dataset\nDataset_Cleaned = Dataset_RedundantCleaned\n\n# Save Cleaned Dataset\nWriteCSVFile(Dataset_Cleaned, 'Assignment 1/Dataset_Cleaned.csv')\n\n# Encode Dataset\nprint(\"Encoding Dataset...\")\nData_Transformed, LabelIndexMaps = EncodeDataset(Dataset_Cleaned)\nprint(\"Encoded Dataset: 5 Rows:\")\nprint(Data_Transformed.head(n=5))\n#print(LabelIndexMaps)\n\n# Save Encoded Dataset\nWriteCSVFile(Data_Transformed, 'Assignment 1/Dataset_Cleaned_Encoded.csv')\npickle.dump(LabelIndexMaps, open('Assignment 1/LabelIndexMaps.p', 'wb'))\n\n# Visualise Preprocessed Data - Family Distribution\nHistogram(Data_Transformed['Family'], len(LabelIndexMaps['Family']), 'Family Name', 'Frequency', 'Family Frequency')\n\nprint(\"\\n\\n\")\n'''" }, { "alpha_fraction": 0.5199353694915771, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 23.4342098236084, "blob_id": "baeeddee743e5f2333ce7d86a681c0ba94400a29", "content_id": "ccbbc300ce2b5c5565975ae10481af692838c08a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1856, "license_type": "no_license", "max_line_length": 91, "num_lines": 76, "path": "/ReadingAssignment_1/Codes/K-Means_Clustering.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nTest driving K-Means Clustering Algorithm\n'''\n# Imports\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Main Functions\n# def AgglomerativeClustering(items):\n# cluster_points = []\n# cluster_names = []\n\n# # Initially all points are clusters\n# index = 0\n# for item in items:\n# cluster_points.append(item)\n# cluster_names.append([index])\n# index += 1\n \n# # Find proximity matrix\n# Prox_Matrix = ProximityMatrix(cluster_points)\n\n# # Merge Nearest Clusters\n# cluster_names_new = cluster_names.copy()\n# cluster_points_new = cluster_points.copy()\n# merged_indices = []\n# merged_values = []\n# for i in range(len(cluster_points)):\n# if i in merged_indices:\n# continue\n# closest_index = -1\n# closest_value = -1\n# for j in range(i+1, len(cluster_points)):\n# if j in merged_indices:\n# continue\n# if closest_value == -1 or Prox_Matrix[str(i) + \"_\" + str(j)] < closest_value:\n# closest_index = j\n# closest_value = Prox_Matrix[str(i) + \"_\" + str(j)]\n# if closest_index > -1:\n\n# merged_indices.append(closest_index)\n\n\n\n# Driver Code\n# items = np.array([\n# [5,3],\n# [10,15],\n# [15,12],\n# [24,10],\n# [30,30],\n# [85,70],\n# [71,80],\n# [60,78],\n# [70,55],\n# [80,91]\n# ])\n\nminval = 0\nmaxval = 100\nn_points = 100\nN_Clusters = 3\n\n# Generate Random Points\nitems = np.random.randint(minval, maxval+1, (n_points, 2))\n\nplt.scatter(items[:,0], items[:,1])\nplt.show()\n\n# Clustering\nCluster = KMeans(n_clusters=N_Clusters)\nCluster.fit_predict(items)\n\nplt.scatter(items[:,0], items[:,1], c=Cluster.labels_, cmap='rainbow')\nplt.show()" }, { "alpha_fraction": 0.5466216206550598, "alphanum_fraction": 0.6385135054588318, "avg_line_length": 34.21428680419922, "blob_id": "8a2258921550a81a0b969701b62a0b264583dbfc", "content_id": "795017637292b993952d5cf6a65a6d1580127ef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 117, "num_lines": 42, "path": "/ProblemSet_3/Subset 1/1.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n1. Suppose that the data for analysis includes the attribute age. \nThe age values for the data tuples are (in increasing order) \n13, 15, 16, 16, 19, 20, 20, 21, 22, 22, 25, 25, 25, 25, 30, 33, 33, 35, 35, 35, 35, 36, 40, 45, 46, 52, 70\n(a) Use min-max normalization to transform the value 25 for age onto the range [0:0;1:0]. \n(b) Use z-score normalization to transform the value 25 for age, where the standard deviation of age is 12.94 years. \n(c) Use normalization by decimal scaling to transform the value 25 for age such that transformed value is < 1 \n'''\nimport numpy as np\n\ndef MinMaxNorm(X):\n minVal = min(X)\n maxVal = max(X)\n X_Norm = []\n for x in X:\n X_Norm.append(round((x - minVal) / (maxVal - minVal), 2))\n return X_Norm\n\ndef ZScoreNorm(X, mean, SD):\n X_Norm = []\n for x in X:\n X_Norm.append(round(((x - mean) / SD), 2))\n return X_Norm\n\ndef DecimalScaleNorm(X):\n maxVal = max(X)\n divpow = len(str(maxVal))\n X_Norm = []\n for x in X:\n X_Norm.append(round((x / (10 ** divpow)), 2))\n return X_Norm\n\n# Driver Code\nData = [13, 15, 16, 16, 19, 20, 20, 21, 22, 22, 25, 25, 25, 25, 30, 33, 33, 35, 35, 35, 35, 36, 40, 45, 46, 52, 70]\nMinMaxNormData = MinMaxNorm(Data)\nZScoreNormData = ZScoreNorm(Data, 25.0, 12.94)\nDecimalScaleNormData = DecimalScaleNorm(Data)\n\nprint(\"Data:\", Data, \"\\n\")\nprint(\"MinMax:\", MinMaxNormData, \"\\n\")\nprint(\"ZScore:\", ZScoreNormData, \"\\n\")\nprint(\"DecimalScale:\", DecimalScaleNormData)\n\n" }, { "alpha_fraction": 0.6404275894165039, "alphanum_fraction": 0.6763848662376404, "avg_line_length": 30.212121963500977, "blob_id": "115ca83a6d62bd6c750db4e16e92a050f396fd69", "content_id": "2a5f84a9fd8a8f424405244d8cc0230e81f066be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 95, "num_lines": 33, "path": "/ProblemSet_1/5.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(5) In Analytics and Systems of Bigdata course, for a class of 100 students, \naround 31 students secured ‘S’ grade, \n29 secured ‘B’ grade, \n25 ‘C’ grades, and \nrest of them secured ‘D’ grades. \nIf the range of each grade is 15 marks. \n(S for 85 to 100 marks, A for 70 to 85 …). \nDevelop an application that represents the above data : using Pie and Bar graphs.\n'''\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\n\ndef GeneratePieChart(data, labels):\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'grey']\n plt.pie(data, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\n plt.axis('equal')\n plt.show()\n\ndef GenerateBarGraph(data, labels):\n plt.bar(data, data, align='center', alpha=0.5)\n plt.xticks(data, labels)\n plt.xlabel('Grades')\n plt.ylabel('No of Students')\n plt.title('Class Performance')\n plt.show()\n\n# Driver Code\ndata = [31, 0, 29, 25, 15]\nlabels = ['S', 'A', 'B', 'C', 'D']\n\nGenerateBarGraph(data, labels)\nGeneratePieChart(data, labels)" }, { "alpha_fraction": 0.5330848097801208, "alphanum_fraction": 0.6505125761032104, "avg_line_length": 37.35714340209961, "blob_id": "a3c8ecd7dec45752341988d6a7df33f0ada120ba", "content_id": "b45b97124fa35f579e12b59b52b8b2d97ef3b5f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "no_license", "max_line_length": 127, "num_lines": 28, "path": "/ProblemSet_2/2.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n2. A Coach tracked the number of points that each of his 30 players on the team had in one game. \nThe points scored by each player is given below. \nVisualize the data using ordered stem-leaf plot and also detect the outliers and shape of the distribution. \n22, 21, 24, 19, 27, 28, 24, 25, 29, 28, 26, 31, 28, 27, 22, 39, 20, 10, 26, 24, 27, 28, 26, 28, 18, 32, 29, 25, 31, 27\n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef Generate_Stems_Leaves(data, leaflen):\n leaves = []\n stems = []\n for d in data:\n leaves.append(int(str(d)[(-1*leaflen):]))\n stems.append(int(str(d)[:(-1*leaflen)]))\n return stems, leaves\n\ndef GenerateStemPlot(stems, leaves):\n plt.title('Stem and Leaf Plot')\n plt.xlabel('Stems')\n plt.ylabel('Leaves')\n markerline, stemlines, baseline = plt.stem(stems, leaves)\n plt.show()\n\n# Driver Code\ndata = [22, 21, 24, 19, 27, 28, 24, 25, 29, 28, 26, 31, 28, 27, 22, 39, 20, 10, 26, 24, 27, 28, 26, 28, 18, 32, 29, 25, 31, 27]\nstems, leaves = Generate_Stems_Leaves(data, 1)\nGenerateStemPlot(stems, leaves)" }, { "alpha_fraction": 0.7056504487991333, "alphanum_fraction": 0.7260183691978455, "avg_line_length": 27.735849380493164, "blob_id": "4fad4f1cd2e1e515950d2ee314b194b522595471", "content_id": "61e41f31f9475a9884dec4904ae1f0764b629ae7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1522, "license_type": "no_license", "max_line_length": 118, "num_lines": 53, "path": "/ExploratoryAssignment_1/My Solutions/FinalReport/Codes/BIRCH.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nBIRCH\nBalanced Iterative Reducing and Clustering using Hierarchies Algorithm Implementation\n'''\n\n# Imports\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.cluster import Birch\n\nfrom matplotlib import pyplot as plt\n\n# Main Functions\ndef GenerateData(n_samples=450, centers=6, cluster_std=0.7, random_state=0):\n X, clusters = make_blobs(n_samples=n_samples, centers=centers, cluster_std=cluster_std, random_state=random_state)\n return X, clusters\n\ndef PlotData(X, labels=[]):\n if len(labels) == 0:\n plt.scatter(X[:,0], X[:,1], alpha=0.7, edgecolors='b')\n else:\n plt.scatter(X[:,0], X[:,1], c=labels, cmap='rainbow', alpha=0.7, edgecolors='b')\n plt.show()\n\ndef BIRCH_Library_Train(X, branching_factor=50, n_clusters=None, threshold=1.5):\n # Train Model on Input Data\n brc = Birch(branching_factor=branching_factor, n_clusters=n_clusters, threshold=threshold)\n brc.fit(X)\n return brc\n\ndef BIRCH_Library_Classify(model, X):\n labels = model.predict(X)\n return labels\n\n# Driver Code\nn_samples=450\ncenters=6\ncluster_std=0.7\nrandom_state=0\n\nbranching_factor = 50\nn_clusters = None\nthreshold = 1.5\n\n# Generate Data\nX, clusters = GenerateData(n_samples=n_samples, centers=centers, cluster_std=cluster_std, random_state=random_state)\nPlotData(X)\n\n# Train and Test Model\nbrc = BIRCH_Library_Train(X, branching_factor=branching_factor, n_clusters=n_clusters, threshold=threshold)\nlabels = BIRCH_Library_Classify(brc, X)\n\n# Plot Results\nPlotData(X, labels=labels)" }, { "alpha_fraction": 0.5749292969703674, "alphanum_fraction": 0.599434494972229, "avg_line_length": 26.921052932739258, "blob_id": "ebe68dc151e3ef3b7eaf0e73f515427e2beb46d9", "content_id": "5bf4fe36b869b8d5ebddb18f7903efd2d5055020", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1061, "license_type": "no_license", "max_line_length": 118, "num_lines": 38, "path": "/ProblemSet_1/1.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nGiven the following setup {Class,Tally score, Frequency}, develop an application that generates the table shown \n(you can populate the relevant data ; minimum data size :50 records). \nThe table is only an illustration for a data of color scores, \nyou are free to test the application over any data set with the application generating the tally and frequency scores.\n'''\nimport random\n\ndef GenerateTallyStr(no):\n five = '||||\\\\ '\n tally = five * int(no / 5) + '|' * (no % 5)\n return tally\n\ndef FreqDist(X):\n # Accepts only 1, 2, 3, other\n freq = {}\n freq['1'] = 0\n freq['2'] = 0\n freq['3'] = 0\n freq['other'] = 0\n for x in X:\n if x in [1, 2, 3]:\n freq[str(x)] += 1\n else:\n freq['other'] += 1\n return freq\n\n# Driver Code\nData = []\nit = 20\nfor i in range(it):\n Data.append(random.randint(1, 5))\nclasses = ['1', '2', '3', 'other']\nfreq = FreqDist(Data)\n\nprint(\"Score\\t\\t\", \"Tally\\t\\t\", \"Frequency\")\nfor c in classes:\n print(c + \"\\t\\t\", GenerateTallyStr(freq[c]) + \"\\t\\t\", str(freq[c]))\n" }, { "alpha_fraction": 0.6519393920898438, "alphanum_fraction": 0.6646775007247925, "avg_line_length": 39.91549301147461, "blob_id": "91190bb86169dfcb414bf36f1d18f67b98f41777", "content_id": "5e8294831814bf29b398df17778f38606afbf3cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8714, "license_type": "no_license", "max_line_length": 247, "num_lines": 213, "path": "/ReadingAssignment_1/Codes/GeneticOptimisation.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nOptimization using Genetic Operator\n'''\n# Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n# Main Functions\n# Fitness Functions\ndef PolyLinearFitness(equation_inputs, pop):\n # Calculating the fitness value of each solution in the current population.\n # The fitness function calculates the sum of products between each input and its corresponding weight.\n fitness = np.sum(pop*equation_inputs, axis=1)\n return fitness\n\ndef PolynomialFitness(equation_inputs, pop, boundary=(None, None)):\n # A + Bx2 + Cx3 ...\n fitness = np.array([[0]]*pop.shape[0])\n for eqi in range(len(equation_inputs)):\n a = np.power(pop, eqi)\n b = np.multiply(equation_inputs[eqi], a)\n fitness = fitness + np.multiply(equation_inputs[eqi], np.power(pop, eqi))\n return np.reshape(fitness, fitness.shape[0])\n\ndef PlotFitness(fitness_func, equation_inputs, start, stop, step):\n x = np.arange(start, stop, step)\n x = np.reshape(x, (x.shape[0], 1))\n fitness = fitness_func(equation_inputs, x)\n plt.plot(x, fitness)\n plt.show()\n\n# Parent Selection Functions\ndef select_mating_pool_bestfitness(pop, fitness, num_parents):\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n parents = np.empty((num_parents, pop.shape[1]))\n for parent_num in range(num_parents):\n max_fitness_idx = np.where(fitness == np.max(fitness))\n max_fitness_idx = max_fitness_idx[0][0]\n parents[parent_num, :] = pop[max_fitness_idx, :]\n fitness[max_fitness_idx] = -99999999999\n return parents\n\n# Crossover Functions\ndef crossover_OnePoint(parents, offspring_size):\n offspring = np.empty(offspring_size)\n # The point at which crossover takes place between two parents. Usually, it is at the center.\n crossover_point = np.uint8(offspring_size[1]/2)\n\n for k in range(offspring_size[0]):\n # Index of the first parent to mate.\n parent1_idx = k%parents.shape[0]\n # Index of the second parent to mate.\n parent2_idx = (k+1)%parents.shape[0]\n # The new offspring will have its first half of its genes taken from the first parent.\n offspring[k, 0:crossover_point] = parents[parent1_idx, 0:crossover_point]\n # The new offspring will have its second half of its genes taken from the second parent.\n offspring[k, crossover_point:] = parents[parent2_idx, crossover_point:]\n return offspring\n\n# Mutation Functions\ndef mutation_UniformNoise(offspring_crossover, mutated_gene_index=None, boundary=(None, None)):\n if mutated_gene_index == None:\n mutated_gene_index = np.random.randint(0, offspring_crossover.shape[1])\n # Mutation changes a single gene in each offspring randomly.\n for idx in range(offspring_crossover.shape[0]):\n # The random mutation to be added to the gene.\n random_mutation = np.random.uniform(-1.0, 1.0, 1)\n newoff = offspring_crossover[idx, mutated_gene_index] + random_mutation\n if not boundary[0] == None and newoff < boundary[0]:\n continue\n if not boundary[1] == None and newoff > boundary[1]:\n continue\n offspring_crossover[idx, mutated_gene_index] = newoff\n return offspring_crossover\n\n# Optimiser Function\ndef GeneticOpimizer(equation_inputs, num_weights, sol_per_pop, num_generations, num_parents_mating, fitness_func, select_mating_pool, crossover, mutation, boundary=(None, None), verbose=False, ncols=1, Summary=True):\n\n pop_size = (sol_per_pop, num_weights) # The population will have sol_per_pop chromosomes where each chromosome has num_weights genes.\n lowerbound = boundary[0]\n upperbound = boundary[1]\n if boundary[0] == None:\n lowerbound = -4.0\n if boundary[1] == None:\n upperbound = 4.0\n new_population = np.random.uniform(low=lowerbound, high=upperbound, size=pop_size) # Creating the initial population.\n\n print(\"No of Generations:\", num_generations)\n print(\"No of selected parents per generation:\", num_parents_mating)\n print(\"\\n\")\n\n max_fitness_history = []\n max_fitness_ingen_history = []\n best_chromosome_history = []\n best_chromosome_ingen_history = []\n\n max_fitness = None\n best_chromosome = None\n for generation in tqdm(range(num_generations)):\n # Measuring the fitness of each chromosome in the population.\n fitness = fitness_func(equation_inputs, new_population)\n\n # Record History\n max_fitness_ingen_history.append(np.max(fitness))\n best_chromosome_ingen_history.append(list(new_population[np.argmax(fitness)]))\n\n # Print\n if not max_fitness == None and verbose:\n print(\"Best result after generation\", str(generation - 1) + \":\", np.max(fitness))\n print(\"Improvement in result:\", str(np.max(fitness) - max_fitness))\n\n # Update Best Values\n if max_fitness == None or max_fitness < np.max(fitness):\n max_fitness = np.max(fitness)\n best_chromosome = new_population[np.argmax(fitness)]\n # Record History\n max_fitness_history.append(max_fitness)\n best_chromosome_history.append(list(best_chromosome))\n\n # Selecting the best parents in the population for mating.\n parents = select_mating_pool(new_population, fitness, num_parents_mating)\n\n # Generating next generation using crossover.\n offspring_crossover = crossover(parents, offspring_size=(pop_size[0] - parents.shape[0], num_weights))\n\n # Adding some variations to the offsrping using mutation.\n offspring_mutation = mutation(offspring_crossover, mutated_gene_index=None, boundary=boundary)\n\n # Prints\n if verbose:\n print(\"Generation:\", str(generation + 1), \"\\n\\n\")\n\n print(\"Fitness Values:\\n\")\n print(fitness)\n print(\"\\n\")\n\n print(\"Selected Parents:\\n\")\n for p in parents:\n print(p)\n print(\"\\n\")\n\n print(\"Crossover Result:\\n\")\n for off in offspring_crossover:\n print(off)\n print(\"\\n\")\n\n print(\"Mutation Result:\\n\")\n for off in offspring_mutation:\n print(off)\n print(\"\\n\\n\")\n\n # Creating the new population based on the parents and offspring.\n new_population[0 : parents.shape[0], :] = parents\n new_population[parents.shape[0] : , :] = offspring_mutation\n\n if Summary:\n print(\"Summary:\\n\")\n\n # Best Performer Chromosome\n print(\"Best Fitness:\", max_fitness)\n print(\"Best Chromosome:\", best_chromosome)\n print(\"\\n\\n\")\n\n # Plots\n # Best Fitness Per Generation Plot\n plt.plot(range(1, num_generations+1), max_fitness_ingen_history)\n plt.show()\n\n # Best Chromosome Per Generation Plot\n best_chromosome_ingen_history = np.array(best_chromosome_ingen_history)\n n_genes = len(best_chromosome)\n nrows = int(n_genes / ncols) + 1\n\n gen_range = range(1, num_generations+1)\n for gene_index in range(n_genes):\n ax = plt.subplot(nrows, ncols, gene_index+1)\n ax.title.set_text(\"Gene \" + str(gene_index+1) + \": Input: \" + str(equation_inputs[gene_index]) + \" , Best: \" + str(best_chromosome[gene_index]))\n plt.plot(gen_range, best_chromosome_ingen_history[:, gene_index])\n plt.show()\n\n return max_fitness, best_chromosome\n\n# Driver Code\n# Parameters\nverbose = False\nSummary = True\n\nsol_per_pop = 200 # Defining the population size.\nnum_generations = 5000\nnum_parents_mating = 100\n\nselect_mating_pool = select_mating_pool_bestfitness\ncrossover = crossover_OnePoint\nmutation = mutation_UniformNoise\n\nncols = 1\n\n# Q2 -> x^3 - 2(x^2) + x within (0, 31)\nprint(\"Q2\")\nfitness_func = PolynomialFitness\nboundary = (0, 31)\nequation_inputs = [0, -2, 1]\nnum_weights = 1 # Number of the weights we are looking to optimize.\nmax_fitness, best_chromosome = GeneticOpimizer(equation_inputs, num_weights, sol_per_pop, num_generations, num_parents_mating, fitness_func, select_mating_pool, crossover, mutation, boundary=boundary, verbose=verbose, ncols=ncols, Summary=Summary)\n\n# Q7 -> GA Operator Working\nprint(\"Q7\")\nfitness_func = PolyLinearFitness\nboundary = (None, None)\nequation_inputs = [4, -2, 3.5, 5, -11, -4.7, 2.5, 0.1]\nnum_weights = len(equation_inputs) # Number of the weights we are looking to optimize.\nmax_fitness, best_chromosome = GeneticOpimizer(equation_inputs, num_weights, sol_per_pop, num_generations, num_parents_mating, fitness_func, select_mating_pool, crossover, mutation, boundary=boundary, verbose=verbose, ncols=ncols, Summary=Summary)" }, { "alpha_fraction": 0.6960784196853638, "alphanum_fraction": 0.7184873819351196, "avg_line_length": 25.44444465637207, "blob_id": "cba347d64f06e969b29ee3083b6ad41930c417fe", "content_id": "87fa45a39ce67d6ea550ebfa7b837a27f20a8eef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 101, "num_lines": 27, "path": "/ProblemSet_2/6.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n6. Generate random numbers from the following distribution and visualize the data using violin plot. \n(i) Standard-Normal distribution. \n(ii) Log-Normal distribution. \n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef GenerateStandardNormalDist(n, mean=0.0, SD=1.0):\n return np.random.normal(mean, SD, (n))\n\ndef GenerateLogNormalDist(n, mean=0.0, SD=1.0):\n return np.random.lognormal(mean, SD, (n))\n\ndef ViolinPlot(X, title=''):\n plt.violinplot(X)\n plt.title(title)\n plt.show()\n\n# Driver Code\nn = 100\nmean = 0.0\nSD = 1.0\nSNDist = GenerateStandardNormalDist(n, mean, SD)\nLNDist = GenerateLogNormalDist(n, mean, SD)\nViolinPlot(SNDist, 'Standard Normal')\nViolinPlot(LNDist, 'Log Normal')\n" }, { "alpha_fraction": 0.7918660044670105, "alphanum_fraction": 0.8086124658584595, "avg_line_length": 68.83333587646484, "blob_id": "f7052a156539329cfb923494d9c4372707175aaa", "content_id": "c865e754bbcc6a03a530d6a16e17c00483ef66ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 418, "license_type": "no_license", "max_line_length": 132, "num_lines": 6, "path": "/Hadoop_Assignment/Reference/Readme.txt", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "The following zip contains\n1. Installation steps for Hadoop (hadoop_installation.ppt)\n2. word_count.zip, matrixMul.zip as sample codes for reference for Hadoop\n3. Execution steps for Hadoop (execution of Hadoop program.doc)\n4. Slides for Bigdata and Raid (Lecture1.pptx, Lecture2.pptx)\n5. Spark folder which has material related to spark, installation of hadoop and spark in cloudera and other refernce material links." }, { "alpha_fraction": 0.449739545583725, "alphanum_fraction": 0.4622631072998047, "avg_line_length": 35.8326530456543, "blob_id": "05621eb089de85df4645a4b1475e9a1b7af7f277", "content_id": "6fd2ba002577503f3fabde90e4716e78e7c14772", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9023, "license_type": "no_license", "max_line_length": 113, "num_lines": 245, "path": "/Assignment_1/Codes/Apriori.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nApriori Python Implementation\n'''\n\n# Imports\nimport sys\nimport csv\nimport collections\n\n# Functions\ndef generate_rules(overall_dict, csvfile):\n rules_dict = {}\n rules_array = []\n count = 0\n # combine two dictionaries\n # overall_dict = {**overall_dict, **vfi_dict}\n #\n #\n # for vfi1 in overall_dict.keys():\n # for vfi2 in overall_dict.keys():\n # if len(vfi1) + len(vfi2) == len(list(vfi_dict.keys())[0]):\n # if not vfi1 == vfi2:\n #\n # combine = vfi1 + vfi2\n # # print(overall_dict)\n # if combine in overall_dict.keys():\n # # compute support percentage\n # support_percentage = overall_dict[combine][0]\n # # compute confidence\n # conf = overall_dict[combine][1] / overall_dict[vfi1][1]\n # overall_dict = collections.OrderedDict(sorted(overall_dict.items()))\n for index1, item1 in enumerate(overall_dict.keys()):\n for index2, item2 in enumerate(overall_dict.keys()):\n # print(item1, item2)\n contain = False\n for i in item1:\n # print(i)\n if i in item2:\n contain = True\n break\n if contain: # at least one char in item1 is in item2\n continue\n else:\n # rules_array[count].append(item1)\n # rules_array[count].append(item2)\n test = set()\n for c in item1:\n test.add(c)\n for c in item2:\n test.add(c)\n # print('test', test)\n test = sorted(test)\n phrase = ()\n for t in test:\n phrase += (t,)\n # print(phrase)\n if phrase in overall_dict.keys():\n # supp same with that of phrase\n support_percentage = overall_dict[phrase][0]\n # conf = sup(phrase) / sup(first one)\n conf = overall_dict[phrase][1] / overall_dict[item1][1]\n # print('phrase',phrase)\n if support_percentage >= min_support_percentage and conf >= min_confidence:\n rules_array.append([])\n rules_array[count].append(support_percentage)\n rules_array[count].append(conf)\n rules_array[count].append(item1)\n rules_array[count].append(item2)\n count += 1\n csvfile.write('rule,')\n csvfile.write(str(\"%0.6f\" % support_percentage))\n csvfile.write(',')\n csvfile.write(str(\"%0.6f\" % conf))\n csvfile.write(',')\n for i in item1:\n csvfile.write(str(i))\n csvfile.write(',')\n csvfile.write('\\'=>\\'')\n csvfile.write(',')\n for index, i in enumerate(item2):\n csvfile.write(i)\n if not index == len(item2)-1:\n csvfile.write(',')\n csvfile.write('\\n')\n\n return rules_dict\n\ndef generate_cfi_vfi(cfi_dict, vfi_dict, overall_dict):\n vfi_dict = collections.OrderedDict(sorted(vfi_dict.items()))\n cfi_dict = collections.OrderedDict(sorted(cfi_dict.items()))\n vfi_dict = {}\n rules_dict = {}\n if cfi_dict == {}:\n with open(input_filename) as csvfile:\n reader = csv.reader(csvfile)\n for itemset in reader:\n # print(itemset)\n # itemset = itemset.strip()\n for index,i in enumerate(itemset):\n itemset[index] = i.strip()\n content.append(itemset[1:len(itemset)])\n # print(content)\n for key, item in enumerate(itemset):\n if key > 0: #support count\n # print(type(item))\n item = (item.strip(),)\n print(item)\n if not item in cfi_dict:\n cfi_dict[item] = []\n cfi_dict[item].append(0)\n cfi_dict[item].append(1)\n # print(cfi_dict)\n else:\n cfi_dict[item][1] += 1\n # print(len(content))\n for key in cfi_dict.keys():\n print(key)\n support_percentage = cfi_dict[key][1] / len(content)\n cfi_dict[key][0] = support_percentage\n if support_percentage >= min_support_percentage:\n # if key == ('a134',):\n # print('here', key)\n vfi_dict[key] = [support_percentage, cfi_dict[key][1]] # support percentage and support count\n\n else:\n support_count = 0\n for key in cfi_dict.keys():\n print(type(key))\n print(key, cfi_dict[key][0])\n if cfi_dict[key][0] >= min_support_percentage:\n vfi_dict[key] = [cfi_dict[key][0], cfi_dict[key][1]]\n # for every vfi_dict of key with length n (A,B,C), do the rule with overall length n (A->B)\n # rules_dict = generate_rules(vfi_dict, overall_dict)\n # print(vfi_dict)\n return [cfi_dict, vfi_dict, rules_dict]\n\ndef generate_largercfi(vfi_dict, cfi_dict):\n vfi_dict = collections.OrderedDict(sorted(vfi_dict.items()))\n cfi_dict = collections.OrderedDict(sorted(cfi_dict.items()))\n keys = list(vfi_dict.keys())\n # print('len(keys)',len(keys))\n print(keys)\n # print(len(keys))\n if len(keys) < 2:\n return {}\n for i in range(len(keys)):\n for j in range(len(keys)):\n key1 = keys[i]\n key2 = keys[j]\n test = set()\n for item1 in key1:\n test.add(item1)\n for item2 in key2:\n test.add(item2)\n\n test = sorted(test)\n # print(test)\n # prin\n # print(len(keys[0]))\n # print(test)\n # print(test)\n # print(test)\n # print('test[0]',test[0])\n # print(len(test))\n # print(len(test))\n # print(len(keys[0]))\n key = (keys[0],)\n # print(len(key))\n if len(test) == len(keys[0])+1:\n # print('here')\n new_key = () # tuple\n support_count = 0\n for item in test:\n new_key += (item,)\n # print('new_key', new_key)\n\n cfi_dict[new_key] = []\n\n for trans in content:\n # print('trans',trans)\n flag = True\n for index, k in enumerate(new_key):\n if not k in trans:\n flag = False\n if index == len(new_key)-1:\n if flag:\n support_count += 1\n\n cfi_dict[new_key].append(support_count / len(content))\n cfi_dict[new_key].append(support_count)\n # print(new_key)\n # new_key = keys[i] + keys[j]\n # cfi_dict[new_key] = min(vfi_dict[keys[i]], vfi_dict[keys[j]])\n # print(cfi_dict)\n return cfi_dict\n\ndef outputSetToFile(mydict, csvfile):\n orderedDict = collections.OrderedDict(sorted(mydict.items()))\n for key, value in orderedDict.items():\n csvfile.write('set,')\n csvfile.write(str( \"%0.6f\" % value[0]))\n csvfile.write(',')\n for idx in range(len(key)):\n csvfile.write(key[idx])\n # print('112312312312312312312',key[idx])\n if idx == len(key)-1:\n csvfile.write('\\n')\n else:\n csvfile.write(',')\n \n# Driver Code\ninput_filename = sys.argv[1]\noutput_filename = sys.argv[2]\nmin_support_percentage = float(sys.argv[3])\nmin_confidence = float(sys.argv[4])\ntotal_num_trans = 0\ncontent = []\n\nindex = 1\ncfi_dict = {}\nvfi_dict = {}\noverall_dict = {}\nwith open(output_filename, 'w') as csvfile:\n while True:\n # print(index)\n [cfi_dict, vfi_dict, rules_dict] = generate_cfi_vfi(cfi_dict, vfi_dict, overall_dict)\n # print(content)\n outputSetToFile(vfi_dict, csvfile)\n overall_dict = {**overall_dict, **vfi_dict}\n # if index < 9:\n # print(vfi_dict)\n\n cfi_dict = {}\n # print(vfi_dict)\n cfi_dict = generate_largercfi(vfi_dict, cfi_dict)\n\n # if index == 3:\n if cfi_dict == {}:\n # print(vfi_dict)\n break\n index += 1\n\n # print(overall_dict)\n # print(overall_dict)\n generate_rules(overall_dict, csvfile)" }, { "alpha_fraction": 0.5464503169059753, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 25.80434799194336, "blob_id": "cc30e1d85237c5e660adca4d8494bc2ae6cdbab7", "content_id": "732e716476136623bab83166207a834845d1a6a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2465, "license_type": "no_license", "max_line_length": 135, "num_lines": 92, "path": "/ProblemSet_2/3.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n3. For a sample space of 15 people, a statistician wanted to know the consumption of water and other beverages. \nHe collected their average consumption of water and beverages for 30 days (in litres). \nHelp him to visualize the data using density plot, rug plot and identify the mean, median, mode and skewness of the data from the plot.\nWATER 3.2, 3.5, 3.6, 2.5, 2.8, 5.9, 2.9, 3.9, 4.9, 6.9, 7.9, 8.0, 3.3, 6.6, 4.4 \nBEVERAGES 2.2, 2.5, 2.6, 1.5, 3.8, 1.9, 0.9, 3.9, 4.9, 6.9, 0.1, 8.0, 0.3, 2.6, 1.4 \n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\ndef Sum(X):\n return np.sum(np.array(X))\n\ndef Max(X):\n return np.max(np.array(X))\n\ndef FreqDist(X):\n freq = {}\n for x in X:\n freq[x] = 0\n for x in X:\n freq[x] += 1\n return freq\n\ndef Mean(X):\n return Sum(X) / len(X)\n\ndef Median(X):\n return np.median(np.array(X))\n\ndef Mode(X):\n modex = -1\n modex_freq = -1\n\n freq = FreqDist(X)\n\n for key in freq.keys():\n if freq[key] > modex_freq:\n modex = key\n modex_freq = freq[key]\n\n return modex\n\ndef StandardDeviation(X):\n return np.std(np.array(X))\n\ndef Skewness(X):\n return (Mean(X) - Mode(X)) / StandardDeviation(X)\n\ndef DensityPlot(X, labels):\n for x, label in zip(X, labels):\n sns.distplot(x, hist = False, kde = True,\n kde_kws = {'linewidth': 3},\n label = label)\n \n # Plot formatting\n plt.legend(prop={'size': 16}, title = 'Water vs Beverage')\n plt.title('Density Plot')\n plt.xlabel('Days')\n plt.ylabel('Consumption')\n plt.show()\n\ndef RugPlot(X, labels):\n for x, label in zip(X, labels):\n sns.rugplot(x, label=label)\n plt.title('Rug Plot')\n plt.xlabel('Days')\n plt.ylabel('Consumption')\n plt.show()\n\n# Driver Code\nWATER = [3.2, 3.5, 3.6, 2.5, 2.8, 5.9, 2.9, 3.9, 4.9, 6.9, 7.9, 8.0, 3.3, 6.6, 4.4]\nBEVERAGES = [2.2, 2.5, 2.6, 1.5, 3.8, 1.9, 0.9, 3.9, 4.9, 6.9, 0.1, 8.0, 0.3, 2.6, 1.4]\n\nprint(\"WATER analysis:\")\nprint(\"Mean:\", Mean(WATER))\nprint(\"Median:\", Median(WATER))\nprint(\"Mode:\", Mode(WATER))\nprint(\"Skewness:\", Skewness(WATER))\n\nprint(\"BEVERAGES analysis:\")\nprint(\"Mean:\", Mean(BEVERAGES))\nprint(\"Median:\", Median(BEVERAGES))\nprint(\"Mode:\", Mode(BEVERAGES))\nprint(\"Skewness:\", Skewness(BEVERAGES))\n\nDensityPlot([WATER, BEVERAGES], ['Water', 'Beverages'])\n\nRugPlot([WATER, BEVERAGES], ['Water', 'Beverages'])\n\n# RUG PLOT AND DENSITY PLOT LEFT" }, { "alpha_fraction": 0.5572112798690796, "alphanum_fraction": 0.5848855972290039, "avg_line_length": 28.841270446777344, "blob_id": "6bd646def779cf88d61c8626f746a492152c8757", "content_id": "2f5f56adc93152d726a6088ed0a606466ac60093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1879, "license_type": "no_license", "max_line_length": 130, "num_lines": 63, "path": "/ProblemSet_1/2.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(2) In a class of 18 students, assume marks distribution in an exam are as follows. \nLet the roll numbers start with CSE20D01 and all the \nodd roll numbers secure marks as follows: 25+((i+7)%10) and \neven roll numbers : 25+((i+8)%10). \nDevelop an application that sets up the data and calculate the mean and median for the marks obtained using the platform support.\n'''\n\ndef GenerateMarks(n_students, rollno_prefix):\n marks = []\n rollnos = []\n for i in range(n_students):\n zero_prefix = '0' * (len(str(n_students)) - len(str(i)))\n rollno = rollno_prefix + zero_prefix + str(i)\n mark = 0\n if i % 2 == 0:\n mark = 25 + ((i + 8) % 10)\n else:\n mark = 25 + ((i + 7) % 10)\n marks.append(mark)\n rollnos.append(rollno)\n return marks, rollnos\n\ndef Mean(marks):\n sum = 0\n for mark in marks:\n sum += mark\n return sum / len(marks)\n\ndef Median(marks):\n BubbleSort(marks)\n\n if len(marks) % 2 == 1:\n return marks[int((len(marks) - 1)/2)]\n else:\n return (marks[int(len(marks)/2)] + marks[int(len(marks)/2 - 1)]) / 2\n\ndef BubbleSort(arr):\n n = len(arr)\n\n # Traverse through all array elements\n for i in range(n):\n \n # Last i elements are already in place\n for j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j+1] :\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\n# Driver Code\nn_students = int(input(\"Enter no of students: \"))\nrollno_prefix = 'CSE20D'\nmarks, rollnos = GenerateMarks(n_students, rollno_prefix)\nprint(\"Marks: \")\ni = 0\nfor mark, rollno in zip(marks, rollnos):\n print(str(i+1) + \":\", rollno, \"-\", mark)\n i += 1\nprint(\"Mean Marks:\", Mean(marks))\nprint(\"Median Marks:\", Median(marks))" }, { "alpha_fraction": 0.5253063440322876, "alphanum_fraction": 0.5515450239181519, "avg_line_length": 35.45145797729492, "blob_id": "33a53345fb2c58c1687be88dba217e8ed5cf4bc7", "content_id": "6a58369d944832467a20522c09f7c2bc87ecffef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7508, "license_type": "no_license", "max_line_length": 157, "num_lines": 206, "path": "/Assignment_1/Codes/PincerSearch.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "#### MFI\n\nfrom itertools import combinations\n\ndef pruneCandidatesUsingMFS(candidate_itemsets, MFS):\n candidate_itemsets = candidate_itemsets.copy()\n for itemset in candidate_itemsets.copy():\n if any(all(_item in _MFS_itemset for _item in itemset) for _MFS_itemset in MFS):\n candidate_itemsets.remove(itemset)\n return candidate_itemsets\n\n\ndef generateCandidateItemsets(level_k, level_frequent_itemsets):\n n_frequent_itemsets = len(level_frequent_itemsets)\n candidate_frequent_itemsets = []\n for i in range(n_frequent_itemsets):\n j = i+1\n while (j<n_frequent_itemsets) and (level_frequent_itemsets[i][:level_k-1] == level_frequent_itemsets[j][:level_k-1]):\n candidate_itemset = level_frequent_itemsets[i][:level_k-1] + [level_frequent_itemsets[i][level_k-1]] + [level_frequent_itemsets[j][level_k-1]]\n candidate_itemset_pass = False\n if level_k == 1:\n candidate_itemset_pass = True\n elif (level_k == 2) and (candidate_itemset[-2:] in level_frequent_itemsets):\n candidate_itemset_pass = True\n elif all((list(_)+candidate_itemset[-2:]) in level_frequent_itemsets for _ in combinations(candidate_itemset[:-2], level_k-2)):\n candidate_itemset_pass = True\n if candidate_itemset_pass:\n candidate_frequent_itemsets.append(candidate_itemset)\n j += 1\n return candidate_frequent_itemsets\n\n\ndef pruneCandidatesUsingMFCS(candidate_itemsets, MFCS):\n\tcandidate_itemsets = candidate_itemsets.copy()\n\n\tfor itemset in candidate_itemsets.copy():\n\t\tif not any(all(_item in _MFCS_itemset for _item in itemset) for _MFCS_itemset in MFCS):\n\t\t\tcandidate_itemsets.remove(itemset)\n\n\treturn candidate_itemsets\n\n\ndef generateMFCS(MFCS, infrequent_itemsets):\n MFCS = MFCS.copy()\n for infrequent_itemset in infrequent_itemsets:\n for MFCS_itemset in MFCS.copy():\n\t\t\t# If infrequent itemset is a subset of MFCS itemset\n if all(_item in MFCS_itemset for _item in infrequent_itemset):\n MFCS.remove(MFCS_itemset)\n for item in infrequent_itemset:\n updated_MFCS_itemset = MFCS_itemset.copy()\n updated_MFCS_itemset.remove(item)\n if not any(all(_item in _MFCS_itemset for _item in updated_MFCS_itemset) for _MFCS_itemset in MFCS):\n MFCS.append(updated_MFCS_itemset)\n return MFCS\n\n\ndef pincerSearch(transactions, min_support):\n items = set()\n for transaction in transactions:\n items.update(transaction)\n items = sorted(list(items))\n level_k = 1 \n level_frequent_itemsets = [] \n candidate_frequent_itemsets = [[item] for item in items] \n level_infrequent_itemsets = [] \n MFCS = [items.copy()] \n MFS = [] \n print(\"MFCS = {}\".format(MFCS))\n print(\"MFS = {}\\n\".format(MFS))\n while candidate_frequent_itemsets:\n print(\"LEVEL {}: \".format(level_k))\n print(\"C{} = {}\".format(level_k, candidate_frequent_itemsets))\n candidate_freq_itemsets_cnts = [0]*len(candidate_frequent_itemsets)\n MFCS_itemsets_cnts = [0]*len(MFCS)\n for transaction in transactions:\n for i, itemset in enumerate(candidate_frequent_itemsets):\n if all(_item in transaction for _item in itemset):\n candidate_freq_itemsets_cnts[i] += 1\n for i, itemset in enumerate(MFCS):\n if all(_item in transaction for _item in itemset):\n MFCS_itemsets_cnts[i] += 1\n for itemset, support in zip(candidate_frequent_itemsets, candidate_freq_itemsets_cnts):\n print(\"{} -> {}\".format(itemset, support), end=', ')\n print()\n print(\"MFCS Change\")\n for itemset, support in zip(MFCS, MFCS_itemsets_cnts):\n print(\"{} -> {}\".format(itemset, support), end=', ')\n print()\n print(\"\")\n MFS.extend([itemset for itemset, support in zip(MFCS, MFCS_itemsets_cnts) if ((support >= min_support) and (itemset not in MFS))])\n print(\"MFS = {}\".format(MFS))\n level_frequent_itemsets = [itemset for itemset, support in zip(candidate_frequent_itemsets, candidate_freq_itemsets_cnts) if support >= min_support]\n level_infrequent_itemsets = [itemset for itemset, support in zip(candidate_frequent_itemsets, candidate_freq_itemsets_cnts) if support < min_support]\n print(\"L{} = {}\".format(level_k, level_frequent_itemsets))\n print(\"S{} = {}\".format(level_k, level_infrequent_itemsets))\n MFCS = generateMFCS(MFCS, level_infrequent_itemsets)\n print(\"MFCS = {}\".format(MFCS))\n level_frequent_itemsets = pruneCandidatesUsingMFS(level_frequent_itemsets, MFS)\n print(\"After Pruning: L{} = {}\\n\".format(level_k, level_frequent_itemsets))\n candidate_frequent_itemsets = generateCandidateItemsets(level_k, level_frequent_itemsets)\n candidate_frequent_itemsets = pruneCandidatesUsingMFCS(candidate_frequent_itemsets, MFCS)\n level_k += 1\n return MFS\n\n\n\ntransactions = [\n {'a', 'b', 'd'},\n {'a', 'e'},\n {'a', 'c', 'e'},\n {'a', 'b', 'c'},\n {'b', 'c', 'd', 'e'},\n {'b', 'd', 'e'},\n {'b', 'c', 'e'},\n {'a', 'c', 'd'},\n {'a', 'b', 'd', 'e'},\n {'a', 'b', 'c', 'd', 'e'},\n {'b', 'd'},\n {'c', 'd'},\n {'a', 'b'},\n {'a', 'd'},\n {'d', 'e'}\n]\n\n# transactions = [\n# {1, 2, 4},\n# {1, 5},\n# {1, 3, 5},\n# {1, 2, 3},\n# {2, 3, 4, 5},\n# {2, 4, 5},\n# {2, 3, 5},\n# {1, 3, 4},\n# {1, 2, 4, 5},\n# {1, 2, 3, 4, 5},\n# {2, 4},\n# {3, 4},\n# {1, 2},\n# {1, 4},\n# {4, 5}\n# ]\n\nmin_support_count = 2\nMFS = pincerSearch(transactions, min_support_count)\nprint(\"MFS = {}\".format(MFS))\n\n\n\n# transactions = [ Dataset_Enc = [\n# {'a', 'b', 'd'}, [1, 1, 0, 1, 0],\n# {'a', 'e'}, [1, 0, 0, 0, 1],\n# {'a', 'c', 'e'}, [1, 0, 1, 0, 1],\n# {'a', 'b', 'c'}, [1, 1, 1, 0, 0],\n# {'b', 'c', 'd', 'e'}, [0, 1, 1, 1, 1],\n# {'b', 'd', 'e'}, [0, 1, 0, 1, 1],\n# {'b', 'c', 'e'}, [0, 1, 1, 0, 1],\n# {'a', 'c', 'd'}, [1, 0, 1, 1, 0],\n# {'a', 'b', 'd', 'e'}, [1, 1, 0, 1, 1],\n# {'a', 'b', 'c', 'd', 'e'}, [1, 1, 1, 1, 1],\n# {'b', 'd'}, [0, 1, 0, 1, 0],\n# {'c', 'd'}, [0, 0, 1, 1, 0],\n# {'a', 'b'}, [1, 1, 0, 0, 0],\n# {'a', 'd'}, [1, 0, 0, 1, 0],\n# {'d', 'e'} [0, 0, 0, 1, 1]\n# ] \n# minSupp = 2\n\n# C1 = a b c d e\n# count = 9 9 7 10 8\n# S1 = null\n# L1 = a b c d e\n# MFCS = a b c d e\n\n# C2 = ab ac ad ae bc bd be cd ce de\n# count = 5 4 5 4 4 6 5 4 4 5\n# S2 = null\n# L2 = ab ac ad ae bc bd be cd ce de\n# MFCS = a b c d e\n\n# C3 = abc abd abe acd ace ade bcd bce bde cde\n# count = 2 3 2 2 2 2 2 3 3 2\n# S3 = null\n# L3 = abc abd abe acd ace ade bcd bce bde cde\n# MFCS = a b c d e\n\n# C4 = abcd abce abde acde bcde\n# count = 1 1 2 1 2\n# S4 = abcd abce acde\n# MFCS = \n# abcd -- bcde acde abde abce abcd\n# abce -- bcde acde abde abcd [bce ace abe abc]\n# acde -- bcde abde abcd [bce ace abe abc] [cde ade ace acd]\n\n# MFCS = bcde abde abcd [bce ace abe abc] [cde ade ace acd]\n# MFI Count = 2 2 1 [3 2 2 2] [2 2 2 2]\n# L4 = abde bcde\n\n# C5 = abcde\n# count = 1\n# MFI = bcde abde bce ace abe abc cde ade acd\n# removed subsets\n# MFI = bcde abde ace abc acd\n# S5 = abcde\n# MFCS = same as before abcde is not subset of any\n# L5 = null" }, { "alpha_fraction": 0.6536231637001038, "alphanum_fraction": 0.6855072379112244, "avg_line_length": 29.04347801208496, "blob_id": "6650eb587dad40c2b28e0b2faf6cee89d73bd059", "content_id": "8bb9129ec43160d1e292491a1b7197c14e09734f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 690, "license_type": "no_license", "max_line_length": 95, "num_lines": 23, "path": "/ProblemSet_1/6.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(6) On a given day (average basis), a student is observed to spend \n33% of time in studying, \n30% in sleeping, \n18% in playing, \n5% for hobby activities, \nand rest for spending with friends and family. \nPlot a pie chart showing his daily activities. \n'''\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\n\ndef GeneratePieChart(data, labels):\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'grey']\n plt.pie(data, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)\n plt.axis('equal')\n plt.show()\n\n# Driver Code\ndata = [33, 30, 18, 5, 14]\nlabels = ['Study', 'Sleep', 'Play', 'Hobby', 'Friend & Family']\n\nGeneratePieChart(data, labels)" }, { "alpha_fraction": 0.5840634107589722, "alphanum_fraction": 0.610346257686615, "avg_line_length": 25.351648330688477, "blob_id": "3e069f3a9eae8b55b53e830aa009b01c24fbb7d4", "content_id": "922e5e0b8547d7ae1e4dd2f1276bae6aeed73ce3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 119, "num_lines": 91, "path": "/ExploratoryAssignment_1/My Solutions/FinalReport/Codes/LogisticRegression.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nLogistic regression Algorithm Implementation\n'''\n\n# Imports\nfrom sklearn import datasets\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n# Main Functions\ndef LoadIrisData():\n iris = datasets.load_iris()\n x = np.array(iris.data[0:99, :2])\n y = np.array(iris.target[0:99])\n return x, y\n\ndef PlotData(X, labels=[], title='Plot'):\n if len(labels) == 0:\n plt.scatter(X[:, 0], X[:, 1], alpha=0.7, edgecolors='b')\n else:\n plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='rainbow', alpha=0.7, edgecolors='b')\n plt.title(title)\n plt.show()\n\ndef Sigmoid_Predict(Data, w, b):\n Z = np.dot(w.T, Data.T) + b\n return 1 / (1 + 1/np.exp(Z))\n\ndef GradientDescent(Data, Target, lr=0.01, epochs=1000, w=None, b=None, recordInterval=1):\n #Step 1: Initial Model Parameter\n N = len(Data)\n if w == None:\n w = np.zeros((2,1))\n if b == None:\n b = 0\n costs = []\n for i in tqdm(range(epochs)):\n #Step 2: Apply sigmoid Function and get y prediction\n y_pred = Sigmoid_Predict(Data, w, b)\n #Step 3: Calculate Cost Function\n cost = -(1/N) * np.sum(Target * np.log(y_pred) + (1-Target) * np.log(1-y_pred))\n #Step 4: Calculate Gradient\n dw = 1/N * np.dot(Data.T, (y_pred-Target).T)\n db = 1/N * np.sum(y_pred-Target)\n #Step 5: Update w & b\n w = w - lr * dw\n b = b - lr * db\n #Records cost\n if i % recordInterval == 0:\n costs.append(cost)\n return costs, w, b\n\ndef PlotLoss(losses):\n losses = np.array(losses)\n plt.plot(np.arange(1, losses.shape[0]+1), losses)\n plt.title('Loss vs Epoch')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n# Driver Code\n# Load Data\nData, Target = LoadIrisData()\n# Plot Data\nPlotData(Data, labels=Target, title='Original')\n\n# Do Gradient Descent and fit the data\n# Params\nlr=0.01\nepochs=1000000\nw=None\nb=None\nrecordInterval=1000\n\nlosses, w_final, b_final = GradientDescent(Data, Target, lr=lr, epochs=epochs, w=w, b=b, recordInterval=recordInterval)\n# Plot Loss\nPlotLoss(losses)\n\n# Predict\nY_Pred = Sigmoid_Predict(Data, w_final, b_final)\nY_Pred = np.reshape(Y_Pred, Y_Pred.shape[1])\nprint(Y_Pred)\nY_Pred_final = []\nfor yp in Y_Pred:\n if yp >= 0.5:\n Y_Pred_final.append(1)\n else:\n Y_Pred_final.append(0)\n\nPlotData(Data, labels=Y_Pred_final, title='Predicted')" }, { "alpha_fraction": 0.5241448879241943, "alphanum_fraction": 0.6317907571792603, "avg_line_length": 27.428571701049805, "blob_id": "5e133588366d3c2fbbb358ba8c4eaa377f113c3c", "content_id": "090033efcded60a2631a263f065ae5720c9eddca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 994, "license_type": "no_license", "max_line_length": 114, "num_lines": 35, "path": "/ProblemSet_2/5.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n5. The data below represents the number of chairs in each class of a government high school. \nCreate a box plot and swarm plot (add jitter) and find the number of data points that are outliers. \n35, 54, 60, 65, 66, 67, 69, 70, 72, 73, 75, 76, 54, 25, 15, 60, 65, 66, 67, 69, 70, 72, 130, 73, 75, 76 \n'''\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef BoxPlot(X):\n plt.boxplot(X)\n plt.title('Box Plot')\n plt.xlabel('')\n plt.ylabel('No of Chairs')\n plt.show()\n\ndef SwarmPlot(X):\n sns.swarmplot(X)\n plt.title('Swarm Plot')\n plt.xlabel('No of Chairs')\n plt.ylabel('')\n plt.show()\n\ndef JitteredBoxPlot(X):\n sns.boxplot(data=X)\n sns.swarmplot(data=X, color='grey')\n plt.title('Box Plot')\n plt.xlabel('')\n plt.ylabel('No of Chairs')\n plt.show()\n\n# Driver Code\nChairs = [35, 54, 60, 65, 66, 67, 69, 70, 72, 73, 75, 76, 54, 25, 15, 60, 65, 66, 67, 69, 70, 72, 130, 73, 75, 76]\n# BoxPlot(Chairs)\n# SwarmPlot(Chairs)\nJitteredBoxPlot(Chairs)" }, { "alpha_fraction": 0.760869562625885, "alphanum_fraction": 0.760869562625885, "avg_line_length": 14.666666984558105, "blob_id": "c36d08a39adcc03dfcd53121533c0ee110983814", "content_id": "e31ebd12f53c5a81e889bd9d188b7a8c2a53ba38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 38, "num_lines": 3, "path": "/ReadingAssignment_1/Codes/Backpropogation.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nTest driving Backpropogation Algorithm\n'''" }, { "alpha_fraction": 0.5388547778129578, "alphanum_fraction": 0.6707566380500793, "avg_line_length": 43.5, "blob_id": "ce4f2a44403abb7a33e5f183faf368d39f539cd6", "content_id": "c2a3ffca5a9621b5e7e8c5a4df03dd129f8128d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 980, "license_type": "no_license", "max_line_length": 124, "num_lines": 22, "path": "/ProblemSet_2/1.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n1. On New Year’s Eve, Tina walked into a random shop and surprised to see a huge crowd there. \nShe is interested to find what kind of products they sell the most, for which she needs the age distribution of customers. \nHelp her to find out the same using histogram. \nThe age details of the customers are given below \n7, 9, 27, 28, 55, 45, 34, 65, 54, 67, 34, 23, 24, 66, 53, 45, 44, 88, 22, 33, 55, 35, 33, 37, 47, 41,31, 30, 29, 12\nIdentify the type of histogram (eg. Bimodal, Multimodal, Skewed..etc). Use different bin sizes. \n'''\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef GenerateHistogram(Ages):\n n_bins = 25\n X = np.arange(len(Ages))\n n, bins, patches = plt.hist(Ages, n_bins, facecolor='blue', alpha=0.5)\n plt.show()\n\nAges = [7, 9, 27, 28, 55, 45, 34, 65, 54, 67, 34, 23, 24, 66, 53, 45, 44, 88, 22, 33, 55, 35, 33, 37, 47, 41,31, 30, 29, 12]\nGenerateHistogram(Ages)\n\n# FROM HISTOGRAM WE GET THAT\n# DATA IS BIMODAL - 30 to 40 and 50 to 60" }, { "alpha_fraction": 0.7162661552429199, "alphanum_fraction": 0.7171903848648071, "avg_line_length": 21.5625, "blob_id": "479f0b8a5943b25a6a3c72a5c0baa30d36671afb", "content_id": "0d92c4e83b19a6f6e5ab6883425813929c77e604", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1082, "license_type": "no_license", "max_line_length": 76, "num_lines": 48, "path": "/Hadoop_Assignment/Codes/WordCount.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nWord Count using PySpark\n'''\n\n# Imports\n# import findspark\n# findspark.init()\nimport pyspark\nimport sys\nfrom pyspark import SparkContext, SparkConf\n\n# Main Functions\ndef InitSpark():\n # Creating a spark context\n return SparkContext(\"local\", \"PySpark Word Count Program\")\n\ndef ReadWordsFromFile(sc, path):\n # Read Words in Input File\n return sc.textFile(path).flatMap(lambda line: line.split(\" \"))\n\ndef WordCount_Spark(words):\n # Count Occurence of each word\n return words.map(lambda word: (word, 1)).reduceByKey(lambda a, b: a + b)\n\ndef SaveData(wordCounts, savePath):\n if not savePath == None:\n # Save data as text file\n wordCounts.saveAsTextFile(savePath)\n\ndef PrintData(wordCounts):\n # Print Counts\n print(\"Word Counts:\\n\", wordCounts.collect())\n\n# Driver Code\n# Params\ninputPath = 'wordCountInput.txt'\nsavePath = None\n\n# Init Spark\nsc = InitSpark()\n\n# Read Words and count\nwords_data = ReadWordsFromFile(sc, inputPath)\nword_counts = WordCount_Spark(words_data)\n\n# Save and Print\nSaveData(word_counts, savePath)\nPrintData(word_counts)" }, { "alpha_fraction": 0.7038983702659607, "alphanum_fraction": 0.717477023601532, "avg_line_length": 25.55813980102539, "blob_id": "055a05967454791bcab6f0b140ee7592b2067849", "content_id": "898ec508b6c700d60cf91088e76cb89e1a15ddd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2283, "license_type": "no_license", "max_line_length": 129, "num_lines": 86, "path": "/Hadoop_Assignment/Codes/FIM.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nFrequent Itemset Mining using PySpark\n'''\n\n# Imports\n# import findspark\n# findspark.init()\nfrom pyspark.sql.functions import split\nfrom pyspark.ml.fpm import FPGrowth\nfrom pyspark import SparkContext, since\nfrom pyspark.sql import SQLContext as sc\nfrom pyspark.context import SparkContext\nfrom pyspark.sql.session import SparkSession\n\n# Main Functions\ndef InitSpark():\n # Creating a spark context and starting a session\n sc = SparkContext.getOrCreate()\n spark = SparkSession(sc)\n return sc, spark\n\ndef ReadTextFile(path):\n # Read Input Data\n return sc.textFile(path)\n\ndef Data_to_Transactions(text_lines):\n # Convert Data to Transactions\n trans = []\n i = 0\n for line in text_lines.collect():\n trans.append((i, list(set(map(lambda x: int(x), str(line).split())))))\n i = i + 1\n return trans\n\ndef CreateSparkDataframe(spark, trans, itemsColName='items'):\n # Create Spark Dataframe\n df = spark.createDataFrame(trans, [\"id\", itemsColName])\n return df\n\ndef FpGrowth_CreateModel(itemsCol='items', minSupport=0.05, minConfidence=0.75):\n # Aliasing the inbuild FPGrowth function as fpgrwoth and setting the minimumsupport to be 0.05 (that is around 50000) and min\n # confidence as 0.75\n return FPGrowth(itemsCol=\"items\", minSupport=0.05, minConfidence=0.75)\n\ndef FpGrowth_FitModel(model, df):\n # Fitting our model\n return model.fit(df)\n\ndef PrintData(model, df):\n # FI\n print(\"Frequent Itemsets:\")\n print(model.freqItemsets.show())\n\n # AR\n print(\"Association Rules:\")\n print(model.associationRules.show())\n\n # Predictions\n print(\"Predictions for input data:\")\n print(model.transform(df).show())\n\n# Driver Code\n# Params\npath = 'kosarak.dat'\nitemsCol = 'items'\nminSupport = 0.05\nminConfidence = 0.75\n\n# Init Spark\nsc, spark = InitSpark()\n\n# Read Input Data\ntext_data = ReadTextFile(path)\n\n# Transform data to transactions and make dataframe\ntrans = Data_to_Transactions(text_data)\nprint(\"No of Transactions:\", len(trans))\ndf = CreateSparkDataframe(spark, trans, itemsColName=itemsCol)\nprint(\"Dataframe:\\n\", df.show())\n\n# Create and Fit Model to data\nmodel = FpGrowth_CreateModel(itemsCol, minSupport, minConfidence)\nmodel = FpGrowth_FitModel(model, df)\n\n# Display Results\nPrintData(model, df)" }, { "alpha_fraction": 0.5783348083496094, "alphanum_fraction": 0.5989257097244263, "avg_line_length": 23.30434799194336, "blob_id": "776006b9cee11c3d9cb3426588053ab0ed500e27", "content_id": "11b94e569cd505d7ff4812a20be59d7cf03e276c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 103, "num_lines": 46, "path": "/ProblemSet_1/3.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(3) For a sample space of 20 elements,\nthe values are fitted to the line Y=2X+3, X>5. \nDevelop an application that sets up the data and computes the standard deviation of this sample space. \n(use random number generator supported in your development platform to generate values of X)\n'''\nimport random\n\ndef GenerateData(n_data, MaxVal):\n X = []\n Y = []\n for i in range(n_data):\n x = random.randint(6, MaxVal)\n y = (2 * x) + 3\n X.append(x)\n Y.append(y)\n return X, Y\n\ndef Mean(X):\n sum = 0\n for x in X:\n sum += x\n return sum / len(X)\n\ndef StandardDeviation(X):\n SD = 0.0\n\n mean = Mean(X)\n sumsqaurediff = 0.0\n for x in X:\n sumsqaurediff += (x - mean) ** 2\n SD = (sumsqaurediff / len(X)) ** (1/2)\n\n return SD\n\n# Driver Code\nn_data = int(input(\"Enter no of data points: \"))\nMaxVal = 100\nX, Y = GenerateData(n_data, MaxVal)\nprint(\"Points: \")\ni = 0\nfor x, y in zip(X, Y):\n print(str(i+1) + \": (\", x, \",\", y, \")\")\n i += 1\nprint(\"Standard Deviation of X:\", StandardDeviation(X))\nprint(\"Standard Deviation of Y:\", StandardDeviation(Y))" }, { "alpha_fraction": 0.7143786549568176, "alphanum_fraction": 0.7182823419570923, "avg_line_length": 25.05084800720215, "blob_id": "a4b6fdcd5cecf769ca5b527cf6ab772f2b3ebb8d", "content_id": "7caa5c2c200de8dfcdaeba535dc67cd7dab78fc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 92, "num_lines": 59, "path": "/ReadingAssignment_1/Codes/BayesClassifier.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nBayes Classification Test Drive\n'''\n# Imports\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\ndef ImportDataset_Iris():\n iris = load_iris()\n return iris\n\ndef SplitDataset(Dataset):\n # Separate Target Field from Dataset\n X = Dataset.data\n Y = Dataset.target\n\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=1)\n\n return X, Y, X_train, X_test, Y_train, Y_test\n\ndef Train(X_train, Y_train):\n BayesClassifier = GaussianNB()\n BayesClassifier.fit(X_train, Y_train)\n return BayesClassifier\n\ndef Predict(GaussianClassifier, X_test):\n Y_pred = GaussianClassifier.predict(X_test)\n print(\"Predicted values:\")\n print(Y_pred)\n return Y_pred\n\ndef PrintAccuracy(Y_test, Y_pred):\n print(\"Confusion Matrix:\\n\", confusion_matrix(Y_test, Y_pred))\n print(\"Accuracy:\\n\", accuracy_score(Y_test, Y_pred)*100)\n print(\"Report:\\n\", classification_report(Y_test, Y_pred))\n\n\n# Driver Code\n\nprint(\"\\n\\n\")\n\n# Building Phase\nDataset = ImportDataset_Iris()\nX, Y, X_train, X_test, Y_train, Y_test = SplitDataset(Dataset)\nBayesClassifier = Train(X_train, Y_train)\n\nprint(\"\\n\\n\")\n\n# Operational Phase\n# Prediction using Gini\nprint(\"Results Using Bayes Classifier:\")\nY_pred = Predict(BayesClassifier, X_test)\nPrintAccuracy(Y_test, Y_pred)\n\nprint(\"\\n\\n\")\n" }, { "alpha_fraction": 0.6747685074806213, "alphanum_fraction": 0.6828703880310059, "avg_line_length": 26.294736862182617, "blob_id": "d1acb4fda49d0f5945c3cbb813d300ccabd1d3a7", "content_id": "8fcde8fa1aea61c52eda208807d433c7cdb61418", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2592, "license_type": "no_license", "max_line_length": 98, "num_lines": 95, "path": "/Assignment_1/Codes/DecisionTree.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nDecision Tree Classifier Test Drive\n'''\n# Imports\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n# Functions\ndef ImportDataset(dataset_path):\n # Import Dataset\n balance_data = pd.read_csv(dataset_path, sep= ',')#, header = None)\n\n # Print Dataset Size\n print (\"Dataset Length: \", len(balance_data))\n print (\"Dataset Shape: \", balance_data.shape)\n \n # Print Dataset Obseravtions\n print (\"Dataset: \",balance_data.head())\n return balance_data\n\ndef SplitDataset(balance_data):\n # Separate Target Field from Dataset\n X = balance_data.values[:, 1:]\n Y = balance_data.values[:, 0]\n\n # Split Dataset into train and test\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state = 100)\n \n return X, Y, X_train, X_test, Y_train, Y_test\n\ndef Train_Gini(X_train, Y_train):\n # Create Decision Tree\n DT_Gini = DecisionTreeClassifier(\n criterion=\"gini\", random_state=100,\n max_depth=3, min_samples_leaf=5)\n\n # Train\n DT_Gini.fit(X_train, Y_train)\n return DT_Gini\n\ndef Train_Entropy(X_train, Y_train):\n # Create Decision Tree\n DT_Entropy = DecisionTreeClassifier(\n criterion = \"entropy\", random_state = 100,\n max_depth = 3, min_samples_leaf = 5)\n\n # Train\n DT_Entropy.fit(X_train, Y_train)\n return DT_Entropy\n\ndef Predict(X_test, clf_object):\n # Predicton on test with Gini Index\n Y_pred = clf_object.predict(X_test)\n print(\"Predicted values:\")\n print(Y_pred)\n return Y_pred\n\ndef PrintAccuracy(Y_test, Y_pred):\n print(\"Confusion Matrix:\\n\", confusion_matrix(Y_test, Y_pred))\n print(\"Accuracy:\\n\", accuracy_score(Y_test, Y_pred)*100)\n print(\"Report:\\n\", classification_report(Y_test, Y_pred))\n\n'''\n# Driver code\nDataset_Path = 'Assignment 1/balance-scale.csv'\n\nprint(\"\\n\\n\")\n\n# Building Phase\nDataset = ImportDataset(Dataset_Path)\nX, Y, X_train, X_test, Y_train, Y_test = SplitDataset(Dataset)\nDT_Gini = Train_Gini(X_train, Y_train)\nDT_Entropy = Train_Entropy(X_train, Y_train)\n\nprint(\"\\n\\n\")\n \n# Operational Phase\n# Prediction using Gini\nprint(\"Results Using Gini Index:\")\nY_pred_Gini = Predict(X_test, DT_Gini)\nPrintAccuracy(Y_test, Y_pred_Gini)\n\nprint(\"\\n\\n\")\n\n# Prediction using Entropy\nprint(\"Results Using Entropy:\")\nY_pred_Entropy = Predict(X_test, DT_Entropy)\nPrintAccuracy(Y_test, Y_pred_Entropy)\n\nprint(\"\\n\\n\")\n'''" }, { "alpha_fraction": 0.6057245135307312, "alphanum_fraction": 0.6221824884414673, "avg_line_length": 29.064516067504883, "blob_id": "934467cd7c0c90dd28e0582c593c2060aa9b3f43", "content_id": "e14e81450fc76875302b25b93b6061079847b1e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2799, "license_type": "no_license", "max_line_length": 118, "num_lines": 93, "path": "/ProblemSet_1/7.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(7) Develop an application (absolute grader) that accepts marks scored by \n20 students in ASBD course \n(as a split up of three : \nMid Sem (30), \nEnd Sem(50) and \nAssignments(20). \nCompute the total and use it to grade the students following absolute grading :\n>=90 – S ; >=80 – A and so on till D. \nCompute the Class average for total marks in the course and 50% of class average would be fixed as the cut off for E. \nGenerate a frequency table for the grades as well (Table displaying the grades and counts of them). \n'''\nimport random\n\ndef GenerateData(n_data, rand=True):\n MidSemMarks = []\n EndSemMarks = []\n AssignmentMarks = []\n for i in range(n_data):\n if rand:\n MidSemMarks.append(random.randint(0, 30))\n EndSemMarks.append(random.randint(0, 50))\n AssignmentMarks.append(random.randint(0, 20))\n else:\n MidSemMarks.append(int(input(\"Enter Midsem Marks for \" + str(i+1) + \": \")))\n EndSemMarks.append(int(input(\"Enter Endsem Marks for \" + str(i+1) + \": \")))\n AssignmentMarks.append(int(input(\"Enter Assignment Marks for \" + str(i+1) + \": \")))\n return MidSemMarks, EndSemMarks, AssignmentMarks\n \ndef CalculateTotalMarks(MidSemMarks, EndSemMarks, AssignmentMarks):\n TotalMarks = []\n for midsem, endsem, assign in zip(MidSemMarks, EndSemMarks, AssignmentMarks):\n TotalMarks.append(midsem + endsem + assign)\n return TotalMarks\n\ndef GetGrade(mark, avgmarks):\n grade = 'U'\n if mark >= 90:\n grade = 'S'\n elif mark >= 80:\n grade = 'A'\n elif mark >= 70:\n grade = 'B'\n elif mark >= 60:\n grade = 'C'\n elif mark >= 50:\n grade = 'D'\n elif mark >= int(avgmarks / 2):\n grade = 'E'\n else:\n grade = 'U'\n return grade\n\ndef CalculateGrades(TotalMarks):\n Grades = []\n avgmarks = Mean(TotalMarks)\n for totmark in TotalMarks:\n Grades.append(GetGrade(totmark, avgmarks))\n return Grades\n\ndef Mean(X):\n sum = 0\n for x in X:\n sum += x\n return sum / len(X)\n\ndef FreqDist(X, keys):\n freq = {}\n for key in keys:\n freq[key] = 0\n for x in X:\n freq[x] = 0\n for x in X:\n freq[x] += 1\n return freq\n\n# Driver Code\nn_data = 20\nPossibleGrades = ['S', 'A', 'B', 'C', 'D', 'E', 'U']\nMidSemMarks, EndSemMarks, AssignmentMarks = GenerateData(n_data, True)\nTotalMarks = CalculateTotalMarks(MidSemMarks, EndSemMarks, AssignmentMarks)\nGrades = CalculateGrades(TotalMarks)\nprint(\"Mean Marks:\", Mean(TotalMarks))\nprint(\"Grades:\")\ni = 0\nfor mark, grade in zip(TotalMarks, Grades):\n i += 1\n print(str(i) + \":\", mark, \"-\", grade)\n \nprint(\"Freq Dist of Grades: \")\nfreq = FreqDist(Grades, PossibleGrades)\nfor key in PossibleGrades:\n print(key + \":\", freq[key])" }, { "alpha_fraction": 0.633058488368988, "alphanum_fraction": 0.6602323651313782, "avg_line_length": 35.292518615722656, "blob_id": "456c4c2c98c4c78c0d1c45880d22325c4a7ed363", "content_id": "6e463b0f356d7e71c5e380660ed9885c9cf6541a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5370, "license_type": "no_license", "max_line_length": 145, "num_lines": 147, "path": "/ProblemSet_3/Subset 1/2.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n2. Use the given dataset and perform the operations listed below. \nDataset Description It is a well-known fact that Millenials LOVE Avocado Toast. \nIt's also a well known fact that all Millenials live in their parents basements. \nClearly, they aren't buying home because they are buying too much Avocado Toast! But maybe there's hope... \nif a Millenial could find a city with cheap avocados, they could live out the Millenial American Dream. \nHelp them to filter out the clutter using some pre-processing techniques. \nSome relevant columns in the dataset: \n• Date - The date of the observation \n• AveragePrice - the average price of a single avocado \n• type - conventional or organic \n• year - the year \n• Region - the city or region of the observation \n• Total Volume - Total number of avocados sold \n• 4046 - Total number of avocados with PLU* 4046 sold \n• 4225 - Total number of avocados with PLU* 4225 sold \n• 4770 - Total number of avocados with PLU* 4770 sold (Product Lookup codes (PLU’s)) * \n\na. \nSort the attribute “Total Volume” in the dataset given and distribute the data into equal sized/frequency bins of size 50 & 250.\nSmooth the sorted data by \n(i)bin-means \n(ii) bin-medians \n(iii) bin-boundaries (smooth using bin boundaries after trimming the data by 2%). \n\nb. \nThe dataset represents weekly retail scan data for National retail volume (units) and price. \nRetail scan data comes directly from retailers’ cash registers based on actual retail sales of Hass avocados. \nHowever, the company is interested in the monthly (total per month) and annual sales (total per year), rather than the total per week. \nSo, reduce the data accordingly. \n\nc.\nSummarize the number of missing values for each attribute \n\nd.\nPopulate data for the missing values of the attribute= “Average Price” by averaging their values that fall under the same region. \n\ne. \nDiscretize the attribute=“Date” using concept hierarchy into {Old, New, Recent} {2015,2016 : Old, 2017: New, 2018: Recent} and plot in q-q plots \n'''\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm \nimport pylab as py\n\ndef ReadCSVFile(filepath):\n return pd.read_csv(filepath)\n\ndef FreqDist(X, binsize):\n values = []\n Freq = {}\n minVal = int(min(X))\n maxVal = int(round(max(X)))\n print(\"Range:\", minVal, \"-\", maxVal)\n for i in range(minVal, maxVal+1, binsize):\n values.append(i)\n Freq[str(i)] = 0\n for x in X:\n key = int(int((round(x) - minVal)/binsize)*binsize + minVal)\n Freq[str(key)] += 1\n return Freq\n\ndef PrintNonZeroFreq(FreqDist, binsize):\n print(\"Freq Dist \" + str(binsize) + \" Non Zero Values: \")\n nbins = 0\n for k in FreqDist.keys():\n if FreqDist[k] > 0:\n nbins += 1\n #print(k, \":\", FreqDist[k], \"\\n\")\n print(\"Found\", nbins, \"non empty bins\")\n\ndef MissingCount(Data, label):\n missing = 0\n indices = []\n i = 0\n for d in Data[label]:\n if str(d).strip().replace('nan', '') in ['', ' '] or 'NaN' in str(d):\n missing += 1\n indices.append(i)\n i += 1\n return missing, indices\n\n# Driver Code\nfilepath = 'ProblemSet3/Subset 1/avocado_csv.csv'\nlabels = ['Unnamed: 0', 'Date', 'AveragePrice', 'Total Volume', '4046', '4225',\n '4770', 'Total Bags', 'Small Bags', 'Large Bags', 'XLarge Bags', 'type',\n 'year', 'region']\nData = ReadCSVFile(filepath)\n\n# A\nFreqDist50 = FreqDist(Data['Total Volume'], 50)\nFreqDist250 = FreqDist(Data['Total Volume'], 250)\nPrintNonZeroFreq(FreqDist50, 50)\nPrintNonZeroFreq(FreqDist250, 250)\n\n# B\n\n# C\nprint(\"Missing Counts:\")\nfor label in labels:\n miscount = MissingCount(Data, label)\n print(label, miscount)\n\n# D\nMissingLabel = 'AveragePrice'\nmissingcount, indices = MissingCount(Data, MissingLabel)\nData_NoMissing = Data.copy()\nfor i in indices:\n Region = Data_NoMissing['region'][i]\n AvgP = 0.0\n RegionCount = 0\n for a, r in zip(Data_NoMissing[MissingLabel], Data_NoMissing['region']):\n if r == Region and a not in ['', ' ', 'nan'] and '.' in str(a):\n AvgP += float(a)\n RegionCount += 1\n AvgP /= RegionCount\n Data_NoMissing[MissingLabel][i] = str(AvgP)\n print(\"Added Value\", AvgP, \"to missing value in region\", Region)\nprint(\"Now missing count:\", MissingCount(Data_NoMissing, MissingLabel))\n\n# E\n# DateLabels = [2015,2016 : 'Old', 2017: 'New', 2018: 'Recent']\nDateLabels = ['Old', 'New', 'Recent']\nDateIndices = {}\nDateValues = {}\nfor l in DateLabels:\n DateIndices[l] = []\n DateValues[l] = []\nfor di in range(len(Data['Date'])):\n date = Data['Date'][di].split('-')\n if date[2] in ['2015', '2016']:\n DateIndices['Old'].append(di)\n #DateValues['Old'].append(Data['Date'][di])\n DateValues['Old'].append(int(date[0]) + int(date[1])*30)\n elif date[2] in ['2017']:\n DateIndices['New'].append(di)\n #DateValues['New'].append(Data['Date'][di])\n DateValues['New'].append(int(date[0]) + int(date[1])*30)\n elif date[2]in ['2018']:\n DateIndices['Recent'].append(di)\n #DateValues['Recent'].append(Data['Date'][di])\n DateValues['Recent'].append(int(date[0]) + int(date[1])*30)\nprint(\"Dicretized Date Sizes:\")\nfor l in DateLabels:\n print(l, len(DateIndices[l]))\n sm.qqplot(np.array(DateValues[l]), line ='45') \n py.show() \n" }, { "alpha_fraction": 0.520500659942627, "alphanum_fraction": 0.576176106929779, "avg_line_length": 21.504854202270508, "blob_id": "1122fb2a4ac9b7383c251c1ac6e5486cf0dd094a", "content_id": "255451ba16d6fa1d6d8e95887f0a13bcf9d85c03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2317, "license_type": "no_license", "max_line_length": 174, "num_lines": 103, "path": "/ProblemSet_1/4.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n(4) For a given data of heights of a class, the heights of 15 students are recorded as \n167.65, 167, 172, 175, 165, 167, 168, 167, 167.3, 170, 167.5, 170, 167, 169, and 172. \nDevelop an application that computes; \nexplore if there are any packages supported in your platform that depicts these measures / their calculation of central tendency in a visual form for ease of understanding. \na. Mean height of the student \nb. Median and Mode of the sample space \nc. Standard deviation \nd. Measure of skewness. [(Mean-Mode)/standard deviation] \n'''\n\ndef Sum(X):\n sum = 0\n for x in X:\n sum += x\n return sum\n\ndef Max(X):\n max = 0\n for x in X:\n if max < x:\n max = x\n return max\n\ndef FreqDist(X):\n freq = {}\n for x in X:\n freq[x] = 0\n for x in X:\n freq[x] += 1\n return freq\n\ndef Mean(X):\n return Sum(X) / len(X)\n\ndef Median(X):\n BubbleSort(X)\n\n if len(X) % 2 == 1:\n return X[int((len(X) - 1)/2)]\n else:\n return (X[int(len(X)/2)] + X[int(len(X)/2 - 1)]) / 2\n\ndef Mode(X):\n modex = -1\n modex_freq = -1\n\n freq = FreqDist(X)\n\n for key in freq.keys():\n if freq[key] > modex_freq:\n modex = key\n modex_freq = freq[key]\n\n return modex\n\ndef StandardDeviation(X):\n SD = 0.0\n\n mean = Mean(X)\n sumsqaurediff = 0.0\n for x in X:\n sumsqaurediff += (x - mean) ** 2\n SD = (sumsqaurediff / len(X)) ** (1/2)\n\n return SD\n\ndef Skewness(X):\n return (Mean(X) - Mode(X)) / StandardDeviation(X)\n\ndef BubbleSort(arr):\n n = len(arr)\n\n # Traverse through all array elements\n for i in range(n):\n \n # Last i elements are already in place\n for j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j+1] :\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\n# Driver Code\ndata = [167.65, 167, 172, 175, 165, 167, 168, 167, 167.3, 170, 167.5, 170, 167, 169, 172]\nprint(\"Data:\", data)\n\n# a\nprint(\"Mean Height:\", Mean(data))\n\n# b\nprint(\"Median Height:\", Median(data))\nprint(\"Mode Height:\", Mode(data))\n\n# c\nprint(\"Standard Deviation:\", StandardDeviation(data))\n\n# d\nprint(\"Skewness:\", Skewness(data))\n\n# Visual Representation?" }, { "alpha_fraction": 0.5886469483375549, "alphanum_fraction": 0.6044582724571228, "avg_line_length": 28.45801544189453, "blob_id": "a17e620a1493320ce21de272be290220d786d165", "content_id": "9c7d67883845350339631f0607a1d50e4d84e570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3858, "license_type": "no_license", "max_line_length": 116, "num_lines": 131, "path": "/ReadingAssignment_1/Codes/HeirarchicalClustering.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nTest driving Heirarchical Clustering Algorithm\n'''\n# Imports\nfrom sklearn import cluster\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Main Functions\n# Main Functions\ndef ClosestPoints(Points):\n min_distance = None\n min_points = (-1, -1)\n for i in range(len(Points)):\n for j in range(i+1, len(Points)):\n dist = Dist(Points[i], Points[j])\n if min_distance == None or min_distance > dist:\n min_distance = dist\n min_points = (i, j)\n return min_distance, min_points\n\ndef MeanPoint(A, B):\n C = []\n for a, b in zip(A, B):\n C.append((a+b)/2)\n return C\n\ndef Dist(A, B):\n return (((A[0] - B[0])**2) + ((A[1] - B[1])**2)) ** (0.5)\n\ndef AgglomerativeClustering(items, n_clusters):\n Clusters = []\n ClusterPoints = []\n\n # Initially every item is a cluster\n for pi in range(len(items)):\n Clusters.append([pi])\n ClusterPoints.append(items[pi])\n\n new_Clusters = Clusters\n new_ClusterPoints = ClusterPoints\n iteration = 1\n while(len(new_Clusters) > n_clusters):\n Clusters = new_Clusters\n ClusterPoints = new_ClusterPoints\n\n # Find Closest Points\n min_dist, closest_points = ClosestPoints(ClusterPoints)\n\n # Merge the closest points\n # Remove to be merged\n new_ClusterPoints = RemoveIndices(new_ClusterPoints, closest_points)\n new_Clusters = RemoveIndices(new_Clusters, closest_points)\n\n # Add merged\n this_cluster = list(np.append(np.array(Clusters[closest_points[0]]), np.array(Clusters[closest_points[1]])))\n mean_point = MeanPoint(ClusterPoints[closest_points[0]], ClusterPoints[closest_points[1]])\n new_Clusters.append(this_cluster)\n new_ClusterPoints.append(mean_point)\n print(iteration)\n print(\"len:\", len(new_Clusters))\n print(Clusters[closest_points[0]])\n print(Clusters[closest_points[1]])\n print(this_cluster)\n print(mean_point)\n\n iteration += 1\n Clusters = new_Clusters\n ClusterPoints = new_ClusterPoints\n\n ClusterMap = {}\n for cluster_index in range(len(Clusters)):\n for p in Clusters[cluster_index]:\n ClusterMap[str(p)] = cluster_index\n\n return ClusterMap, ClusterPoints\n\ndef Plot(Points, corres_cluster):\n Points = np.array(Points)\n if corres_cluster == None:\n plt.scatter(Points[:, 0], Points[:, 1])\n else:\n plt.scatter(Points[:, 0], Points[:, 1], c=corres_cluster, cmap='rainbow')\n plt.show()\n\ndef RemoveIndices(Items, Indices):\n new_Items = []\n for i in range(len(Items)):\n if not i in Indices:\n new_Items.append(Items[i])\n return new_Items\n\n# ---------------------------------------- Self Made Function Driver Code ----------------------------------------\nPoints = [(3, 4), (7, 5), (2, 6), (3, 1),\n (8, 2), (7, 3), (4, 4), (6, 6),\n (7, 4), (6, 7)]\n\nn_clusters = 3\n\nPlot(Points, None)\n\nmin_distance, min_points = ClosestPoints(Points)\n\nprint(\"Min Distance:\", min_distance)\nprint(\"Closest Points:\", Points[min_points[0]], Points[min_points[1]])\n\nClusterMap, ClusterPoints = AgglomerativeClustering(Points, n_clusters)\n \ncorres_cluster = []\nfor pi in range(len(Points)):\n corres_cluster.append(ClusterMap[str(pi)])\nPlot(Points, corres_cluster)\n\n# ---------------------------------------- Library Driver Code ----------------------------------------\nminval = 0\nmaxval = 100\nn_points = 100\nN_Clusters = 3\n\n# Generate Random Points\nitems = np.random.randint(minval, maxval+1, (n_points, 2))\n\nplt.scatter(items[:,0], items[:,1])\nplt.show()\n\n# Clustering\nCluster = cluster.AgglomerativeClustering(n_clusters=N_Clusters, affinity='euclidean', linkage='ward')\nCluster.fit_predict(items)\n\nplt.scatter(items[:,0], items[:,1], c=Cluster.labels_, cmap='rainbow')\nplt.show()" }, { "alpha_fraction": 0.7212950587272644, "alphanum_fraction": 0.7242990732192993, "avg_line_length": 28.382352828979492, "blob_id": "ebfd1f8bf691e0f1de9015ff4810e108609ef2a0", "content_id": "123e7f73c7de954af4aa70dc6f0585bfc20d3d9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2996, "license_type": "no_license", "max_line_length": 116, "num_lines": 102, "path": "/Hadoop_Assignment/Codes/KMeansClustering.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nK-Means Clustering using PySpark\n'''\n\n# Imports\n# import findspark\n# findspark.init()\nfrom pyspark.sql.functions import split\nfrom pyspark.ml.clustering import KMeans\nfrom pyspark import SparkContext, since\nfrom pyspark.sql import SQLContext as sc\nfrom pyspark.context import SparkContext\nfrom pyspark.sql.session import SparkSession\nfrom pyspark.ml.feature import VectorAssembler\n\n# Main Functions\ndef InitSpark():\n # Creating spark context and starting a session\n sc = SparkContext.getOrCreate()\n spark = SparkSession(sc)\n return spark, sc\n\ndef ReadTextFile(path):\n # Read Input Data\n return sc.textFile(path)\n\ndef GenerateFeatureLists(text_lines):\n # Create Features from Text Data - Skip 1st attribute\n features = []\n for line in text_lines.collect():\n l = str(line)\n feature_list = line.split()\n feature_list = feature_list[1 : :]\n features.append(list(map(lambda x: int(x), feature_list)))\n return features\n\ndef CreateSparkDataframe(spark, features):\n # Generate Column names\n colNames = []\n for i in range(len(features[0])):\n colNames.append(\"A_\" + str(i))\n\n # Creating Dataframe of features\n return spark.createDataFrame(features, colNames), colNames\n\ndef VectorAssembleData(df, colNames):\n # Vector Assemble Data to make it compatible with KMeans\n vecAssembler = VectorAssembler(inputCols=colNames, outputCol=\"features\")\n return vecAssembler.transform(df)\n\ndef KMeans_InitModel(k=5, seed=1):\n # Aliasing the inbuild KMeans function as kmeans with number of clusters 5 and seed as 1 (random initial points)\n return KMeans(k=k, seed=seed)\n\ndef KMeans_FitModel(model, df_va):\n # Fit Model to data\n return model.fit(df_va.select('features'))\n\ndef TransformData(model, df_va, colNames):\n # Drop all Initial Columns and transform to feature vectors with cluster\n transformed = model.transform(df_va)\n # for name in colname:\n # transformed.drop(name).collect()\n return transformed.drop(*colNames)\n\ndef PrintData(model):\n # Printing the centers of the clusters\n print(\"The Centres are : \")\n for centre in model.clusterCenters():\n print(centre)\n\n# Driver Code\n# Params\npath = 'pumsb.dat'\nk = 5\nseed = 1\n\n# Init Spark\nspark, sc = InitSpark()\n\n# Read Input File\ntext_data = ReadTextFile(path)\n\n# Generate Feature Lists and Create Spark Dataframe and Vector Assemble Transform data\nfeatures = GenerateFeatureLists(text_data)\nprint(\"The total number of data points are : \", len(features))\n\ndf, colNames = CreateSparkDataframe(spark, features)\nprint(\"Column Names:\\n\", colNames)\nprint(\"Initial Dataframe:\\n\", df.show())\n\ndf_va = VectorAssembleData(df, colNames)\nprint(\"VectorAssembleTransformed Dataframe:\\n\", df_va.show())\n\n# Use KMeans\nmodel = KMeans_InitModel(k=k, seed=seed)\nmodel = KMeans_FitModel(model, df_va)\ndf_transformed = TransformData(model, df_va, colNames)\nprint(\"Final Transformed Dataframe:\\n\", df_transformed.show())\n\n# Print Data\nPrintData(model)" }, { "alpha_fraction": 0.5412490367889404, "alphanum_fraction": 0.599845826625824, "avg_line_length": 27.844444274902344, "blob_id": "7105226109d2db0349b69a8b34d9277e92985dbc", "content_id": "f41cf4dea31623468e34043300d755760b0a76a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1297, "license_type": "no_license", "max_line_length": 112, "num_lines": 45, "path": "/ProblemSet_2/4.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n4. A car company wants to predict how much fuel different cars will use based on their masses. \nThey took a sample of cars, drove each car 100km, and measured how much fuel was used in each case (in litres). \nVisualize the data using scatterplot and also find co-relation between the 2 variables \n(eg. Positive//Negative, Linear/ Non-linear co-relation) The data is summarized in the table below. \n(Use a reasonable scale on both axes and put the explanatory variable on the x-axis.) \nFuel used \n(L) 3.6 6.7 9.8 11.2 14.7 \nMass (metric tons) 0.45 0.91 1.36 1.81 2.27 \n'''\nimport matplotlib.pyplot as plt\n\ndef Correlation(X, Y):\n n = len(X)\n sig_xy = 0\n sig_x = 0\n sig_y = 0\n sig_x2 = 0\n sig_y2 = 0\n for x, y in zip(X, Y):\n sig_xy += x*y\n sig_x += x\n sig_y += y\n sig_x2 += x**2\n sig_y2 += y**2\n\n corr = ((n*sig_xy) - (sig_x*sig_y)) / (((n*sig_x2 - (sig_x**2)) * (n*sig_y2 - (sig_y**2)))**(1/2))\n return corr\n\ndef Scatterplot(X, Y):\n plt.scatter(X, Y)\n plt.title('Scatter Plot')\n plt.xlabel('Mass')\n plt.ylabel('Litres')\n plt.show()\n\n# Driver Code\nMass = [0.45, 0.91, 1.36, 1.81, 2.27]\nL = [3.6, 6.7, 9.8, 11.2, 14.7]\n\nprint(Correlation(Mass, L))\n\nScatterplot(Mass, L)\n\n# Positive Correlation" }, { "alpha_fraction": 0.5464715957641602, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 22.25, "blob_id": "d87745c74c02eb9e25fc8ad2d5b2ad9860a4e6dc", "content_id": "a4adbc56c82c1f5f2664a276c1f4aa8b0e51b5bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2324, "license_type": "no_license", "max_line_length": 91, "num_lines": 100, "path": "/ExploratoryAssignment_1/Codes/LinearRegression.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nLinear regression Algorithm Implementation\n'''\n\n# Imports\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# Main Functions\ndef GenerateData(m, c, n_samples=450, random_scale=5, random_seed=0):\n np.random.seed(random_seed)\n x = np.arange(1, n_samples+1)\n y = c + m * x + np.random.normal(0, random_scale, n_samples)\n return x, y\n\ndef PlotData(X, Y, model=None):\n plt.plot(X, Y, \"o\")\n if not model == None:\n # 2 extreme points of line are (0, c) and (n, mn+c)\n x1 = 0\n y1 = model['c']\n x2 = model['n']\n y2 = model['m']*x2 + model['c']\n plt.plot([x1, x2], [y1, y2], color='k', linestyle='-', linewidth=2)\n plt.show()\n\ndef LinearRegression2D_Fit(X, Y, model=None):\n # Init\n sumX = 0\n sumX2 = 0\n sumY = 0\n sumXY = 0\n n = X.shape[0]\n\n # Check if retraining model\n if not model == None:\n sumX = model['sumX']\n sumX2 = model['sumX2']\n sumY = model['sumY']\n sumXY = model['sumXY']\n n += model['n']\n else:\n model = {}\n\n # Calculate sums\n for x, y in zip(X, Y):\n sumX += x\n sumY += y\n sumX2 += x * x\n sumXY += x * y\n \n # Calculate constants\n m = (n*sumXY - sumX*sumY) / (n*sumX2 - sumX*sumX)\n c = (sumY - m*sumX) / n\n\n # Update Model\n model['m'] = m\n model['c'] = c\n model['sumX'] = sumX\n model['sumX2'] = sumX2\n model['sumY'] = sumY\n model['sumXY'] = sumXY\n model['n'] = n\n\n return model\n\ndef LinearRegression2D_Predict(model, X):\n return model['m']*X + model['c']\n\n# Driver Code\n# Params\nm = 5\nc = 10\nn_samples = 100\nrandom_scale = 50\nseed = 100\n\n# Fit\n# Generate Data\nX, Y = GenerateData(m, c, n_samples=n_samples, random_scale=random_scale, random_seed=seed)\n# Plot Data\nPlotData(X, Y)\n# Fit the data\nmodel = LinearRegression2D_Fit(X, Y)\n# Plot Fitted Data\nPlotData(X, Y, model=model)\n\n# Predict\nX_pred = np.array([12.3, 10.1, 25.2])\nY_actual = np.multiply(X_pred, m) + c\nY_pred = LinearRegression2D_Predict(model, X_pred)\n\n# Evaluate\nprint(\"Parameters:\")\nprint(\"Slope: Actual:\", m, \" - Predicted:\", model['m'])\nprint(\"Intercept: Actual:\", c, \" - Predicted:\", model['c'])\n\nprint(\"Predictions:\")\nfor i in range(X_pred.shape[0]):\n print(\"X:\", X_pred[i], \"Actual Y:\", Y_actual[i], \" - Predicted Y:\", Y_pred[i])" }, { "alpha_fraction": 0.47020408511161804, "alphanum_fraction": 0.5061224699020386, "avg_line_length": 28.90243911743164, "blob_id": "bb9189f10975bd5c646e84ef03de331953908d82", "content_id": "4899ea90b8de0fd38ee0f8d3ea1f09633f35549a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 90, "num_lines": 41, "path": "/EndSem_Exam/Codes/clustering.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "def dist(p1, p2):\n return ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)**(0.5)\n\ndef distmatrix(points):\n distances = []\n for i in range(len(points)):\n distrow = []\n for j in range(len(points)):\n distrow.append(round(dist(points[i], points[j]), 3))\n distances.append(distrow)\n return distances\n\ndef printmatrix(matrix):\n for i in range(len(matrix)):\n prtstr = \"\"\n for j in range(len(matrix[i])):\n prtstr += str(matrix[i][j]) + \"\\t\"\n print(prtstr)\n\ndef findminval(matrix):\n minval = -1\n minind = [(0, 1)]\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if not i == j and not i > j:\n if minval > matrix[i][j] or minval == -1:\n minval = matrix[i][j]\n minind = [(i+1, j+1)]\n elif minval == matrix[i][j]:\n minind.append((i+1, j+1))\n return minind, minval\n\ndef reduceMatrx(matrix, indexes, clusters):\n new_matrix = []\n for cluster in clusters:\n \n\npoints = [(2, 10), (2, 5), (8, 4), (5, 8), (7, 5), (6, 4), (1, 2), (4, 9), (8, 6), (6, 7)]\nmatrix = distmatrix(points)\nprintmatrix(matrix)\nprint(findminval(matrix))" }, { "alpha_fraction": 0.5192150473594666, "alphanum_fraction": 0.5707277059555054, "avg_line_length": 26.200000762939453, "blob_id": "55b7157dfd704b7334bf3ab16e14d4bcf043a921", "content_id": "1cc480a23b733898d7b3d917f324ff8702da01a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 113, "num_lines": 45, "path": "/ProblemSet_2/9.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nLet's say you are the new owner of a small ice-cream shop in a little village near the beach. \nYou noticed that there was more business in the warmer months than the cooler months. \nBefore you alter your purchasing pattern to match this trend, you want to be sure that the relationship is real. \nHelp him to find the correlation between the data given.\nTemperature Number of Customers \n98 15 \n87 12 \n90 10 \n85 10 \n95 16 \n75 7\n'''\nimport matplotlib.pyplot as plt\n\ndef Correlation(X, Y):\n n = len(X)\n sig_xy = 0\n sig_x = 0\n sig_y = 0\n sig_x2 = 0\n sig_y2 = 0\n for x, y in zip(X, Y):\n sig_xy += x*y\n sig_x += x\n sig_y += y\n sig_x2 += x**2\n sig_y2 += y**2\n\n corr = ((n*sig_xy) - (sig_x*sig_y)) / (((n*sig_x2 - (sig_x**2)) * (n*sig_y2 - (sig_y**2)))**(1/2))\n return corr\n\ndef Scatterplot(X, Y):\n plt.scatter(X, Y)\n plt.title('Scatter Plot')\n plt.xlabel('Temperature')\n plt.ylabel('No of Customers')\n plt.show()\n\n# Driver Code\nTemp = [98, 87, 90, 85, 95, 75]\nCustNo = [15, 12, 10, 10, 16, 7]\n\nprint(\"Correlation:\", Correlation(Temp, CustNo))\nScatterplot(Temp, CustNo)" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 21, "blob_id": "b0117dc96424195c0a508f57b1f2db70311a3754", "content_id": "c57b30e4abf15be32c276a6dcabacdd8416756e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/Assignment_1/Codes/test.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "print(\"DONT COPY!!!\")" }, { "alpha_fraction": 0.5686438083648682, "alphanum_fraction": 0.5761626362800598, "avg_line_length": 27.283464431762695, "blob_id": "b364e75f57cd8566af5929fbb12ec9865edfd416", "content_id": "09bbe5ed1f5d3d3938f4fb987264b38e4081625c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3591, "license_type": "no_license", "max_line_length": 123, "num_lines": 127, "path": "/ExploratoryAssignment_1/Codes/HITS.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nHITS\nHyperlink Induced Topic Search Algorithm Implementation\n'''\n\n# Imports\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n# Main Functions\ndef HITS_Library(Edges, max_iters=10, normalized=True, displayGraph=True):\n Graph = nx.DiGraph()\n Graph.add_edges_from(Edges)\n\n if displayGraph:\n plt.figure(figsize =(10, 10))\n nx.draw_networkx(Graph, with_labels = True)\n plt.show()\n\n hubs, authorities = nx.hits(Graph, max_iter=max_iters, normalized=normalized)\n # The in-built hits function returns two dictionaries keyed by nodes\n # containing hub scores and authority scores respectively.\n\n return hubs, authorities\n\ndef HITS(Edges, max_iters=10, normalized=True, displayGraph=True):\n AdjMatrix, Nodes = GetAdjacencyMatrix(Edges)\n\n # Init\n hubs = {}\n authorities = {}\n for n in Nodes:\n hubs[n] = 1\n authorities[n] = 1\n\n if displayGraph:\n Graph = nx.DiGraph()\n Graph.add_edges_from(Edges)\n plt.figure(figsize =(10, 10))\n nx.draw_networkx(Graph, with_labels = True)\n plt.show()\n\n # Iters\n for it in range(max_iters):\n # Evaluate Hub Scores and Authority Scores\n for i in range(len(Nodes)):\n new_score_hub = 0\n new_score_authority = 0\n for j in range(len(AdjMatrix)):\n if AdjMatrix[i][j]:\n new_score_hub += authorities[Nodes[j]]\n if AdjMatrix[j][i]:\n new_score_authority += hubs[Nodes[j]]\n hubs[Nodes[i]] = new_score_hub\n authorities[Nodes[i]] = new_score_authority\n # Normalise\n if normalized:\n hubs = Normalise(hubs)\n authorities = Normalise(authorities)\n \n return hubs, authorities\n\n\ndef GetAdjacencyMatrix(Edges):\n Nodes = []\n for edge in Edges:\n if not edge[0] in Nodes:\n Nodes.append(edge[0])\n if not edge[1] in Nodes:\n Nodes.append(edge[1])\n AdjMatrix = []\n for i in range(len(Nodes)):\n row = []\n for j in range(len(Nodes)):\n if i == j:\n row.append(False)\n elif (Nodes[i], Nodes[j]) in Edges:\n row.append(True)\n else:\n row.append(False)\n AdjMatrix.append(row)\n return AdjMatrix, Nodes\n\ndef Normalise(arr):\n arr_norm = {}\n sumsquares = 0\n for ak in arr.keys():\n sumsquares += arr[ak] ** 2\n sumsquares = sumsquares ** (0.5)\n for ak in arr.keys():\n arr_norm[ak] = arr[ak] / sumsquares\n return arr_norm\n\n\n# Driver Code\nEdges = [ ('A', 'D'), ('B', 'C'), ('B', 'E'), ('C', 'A'),\n ('D', 'C'), ('E', 'D'), ('E', 'B'), ('E', 'F'),\n ('E', 'C'), ('F', 'C'), ('F', 'H'), ('G', 'A'),\n ('G', 'C'), ('H', 'A')\n ]\nmax_iterations = 100\nnormalized = True\n\ndisplayGraph = False\n\nhubs_lib, authorities_lib = HITS_Library(Edges, max_iters=max_iterations, normalized=normalized, displayGraph=displayGraph)\nhubs, authorities = HITS(Edges, max_iters=max_iterations, normalized=normalized, displayGraph=displayGraph)\n\nprint(\"Self Implementation:\")\nprint(\"Hub Scores: \")\nfor key in hubs.keys():\n print(key + \":\", hubs[key])\nprint(\"\\n\")\nprint(\"Authority Scores: \")\nfor key in authorities.keys():\n print(key + \":\", authorities[key])\n\nprint(\"\\n\\n\")\n\nprint(\"Library Implementation:\")\nprint(\"Hub Scores: \")\nfor key in hubs_lib.keys():\n print(key + \":\", hubs_lib[key])\nprint(\"\\n\")\nprint(\"Authority Scores: \")\nfor key in authorities_lib.keys():\n print(key + \":\", authorities_lib[key])" }, { "alpha_fraction": 0.6452528238296509, "alphanum_fraction": 0.6495528221130371, "avg_line_length": 30.686649322509766, "blob_id": "d793395cea88c1378f46d076228c39e2e0a72e87", "content_id": "2a0ec4a24ba0af8d75ad7fa42a1151a73b1ded41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11628, "license_type": "no_license", "max_line_length": 177, "num_lines": 367, "path": "/Assignment_1/Submission/main.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nMain Code to run implementations\n'''\n\n# Imports\nimport Preprocessing as pre\nimport Algorithms as algo\nimport DecisionTree as dt\nimport BayesClassifier as bc\n\n# Driver Code\n# - Preprocessing ------------------------------------------------------------------------------------------------\nprint(\"- Preprocessing ------------------------------------------------------------------------------------------------\")\n# Open and Clean Dataset\ndataset_path = 'Dataset.csv'\nDataset = pre.ReadCSVFile(dataset_path)\n\n# Print Dataset\nprint(\"Dataset Row Count:\", len(Dataset['Symbol']))\nprint(\"Dataset: 5 rows: \")\nprint(Dataset.head(n=5))\n\nprint(\"\\n\\n\")\n\n# Vis Freq of Dataset\nprint(\"Dataset Freq Visualistion...\")\nprint(\"Number of unique entries:\")\nfor key in Dataset.keys():\n Freq = pre.FreqDist(Dataset[key])\n data, labels = pre.SplitDict(Freq)\n print(key, \"-\", len(labels))\n #print(key, \":\\n\", FreqDist(Dataset[key]), \"\\n\\n\")\n #BarGraph(data, labels)\n\nprint(\"\\n\\n\")\n\n# Vis Missing Dataset\nprint(\"Missing Count:\")\nfor key in Dataset.keys():\n missing, indices = pre.MissingCount(Dataset[key])\n print(key, \"-\", missing)\n\nprint(\"\\n\\n\")\n\n# Clean Missing Data\nprint(\"Cleaning Dataset...\")\n\n# CLEANING PROCEDURE ----------------------\n\n# MISSING DATA CLEAN\n# This Dataset is completely text based -- so no math values\n# Therefore, cleaning missing values using mean or median methods makes no sense\n# Mode Replacement can be used but again it does not make exact sense as any field can have any value - not necc the most appeared value\n\n# Only Option is to remove data if missing or leave it as it is - Depends on the field of the missing data\n# Symbol - CANT BE NULL - NOT REPLACABLE\n# Synonym Symbol - CAN BE NULL - REPLACABLE\n# Scientific Name - CANT BE NULL - NOT REPLACABLE\n# National Common Name - CAN BE NULL - REPLACABLE\n# Family - CAN BE NULL - REPLACABLE\n\n# So, if missing data in field [Symbol , Scientific Name], remove that row - As replacement makes no sense - as unique\n# If missing data in REPLACABLE field, first replace by a marker value - NULL\n# For field Synonym Symbol - No need to replace as it is optional field\n# For National Common Name and Family - replace SMARTLY\n# If there exists a subset of Scientific Name somewhere in another data row, set common name as that common name - AS COMMON NAME OF SUBSPECIES IS SAME AS COMMON NAME OF SPECIES\n\nprint(\"Cleaning Missing Data...\")\nDataset_MissingCleaned, n_removed, n_replaced = pre.MissingClean(Dataset)\nprint(\"\\n\")\nprint(\"Removed\", n_removed, \"data rows\")\nprint(\"Replaced\", n_replaced, \"data rows\")\nprint(\"\\n\")\nprint(\"Cleaned Dataset Missing Count:\")\nfor key in Dataset.keys():\n missing, indices = pre.MissingCount(Dataset[key])\n print(key, \"-\", missing)\n\nprint(\"\\n\\n\")\n\n# REDUNDANT DATA CLEAN\n# Dataset also contains some repeated data rows\n# Basic Cleaning - Search for duplicate data rows and remove all duplicates\n# Advanced Cleaning - Remove row even if it is a subset of the data of any other row - REDUNDANT - IN THIS DATASET DONT DO\n\nDataset_RedundantCleaned, n_duplicates = pre.RedundantClean(Dataset_MissingCleaned)\nprint(\"\\n\")\nprint(\"Removed\", n_duplicates, \"duplicate data rows\")\nprint(\"\\n\")\nprint(\"Redundant Cleaned Dataset Row Count:\", len(Dataset_RedundantCleaned['Symbol']))\n\nprint(\"\\n\\n\")\n\n# Final Cleaned Dataset\nDataset_Cleaned = Dataset_RedundantCleaned\n\n# Save Cleaned Dataset\npre.WriteCSVFile(Dataset_Cleaned, 'Dataset_Cleaned.csv')\n\n# Encode Dataset\nprint(\"Encoding Dataset...\")\nData_Transformed, LabelIndexMaps = pre.EncodeDataset(Dataset_Cleaned)\nprint(\"Encoded Dataset: 5 Rows:\")\nprint(Data_Transformed.head(n=5))\nprint(LabelIndexMaps)\n\n# Save Encoded Dataset\npre.WriteCSVFile(Data_Transformed, 'Dataset_Cleaned_Encoded.csv')\npre.pickle.dump(LabelIndexMaps, open('LabelIndexMaps.p', 'wb'))\n\n# Visualise Preprocessed Data - Family Distribution\npre.Histogram(Data_Transformed['Family'], len(LabelIndexMaps['Family']), 'Family Name', 'Frequency', 'Family Frequency')\n\nprint(\"\\n\\n\")\n# - Preprocessing ------------------------------------------------------------------------------------------------\n\n# - Part A - FIM Algos ------------------------------------------------------------------------------------------------\nprint(\"- Part A - FIM Algos ------------------------------------------------------------------------------------------------\")\ndataset_path = 'Dataset_Cleaned.csv'\n#LabelIndexMap_path = 'LabelIndexMaps.p'\n\nDataset_Preprocessed = algo.ReadCSVFile(dataset_path)\n#LabelIndexMap = pickle.load(open(LabelIndexMap_path, 'rb'))\n\n# Print Dataset\nDatasetRowCount = len(Dataset_Preprocessed['Symbol'])\nprint(\"Dataset Row Count:\", DatasetRowCount)\nprint(\"Dataset: 5 rows: \")\nprint(Dataset_Preprocessed.head(n=5))\n\nprint(\"\\n\")\n\n# Encode Dataset\n### - Change to limit size of dataset encoded\nDatasetPortionPercentage = 0.005\n### - Change to limit size of dataset encoded\nDataset_PortionSize = int(DatasetPortionPercentage * DatasetRowCount)\nif Dataset_PortionSize > DatasetRowCount:\n Dataset_PortionSize = DatasetRowCount\nprint(\"Operating on\", Dataset_PortionSize, \" data rows.\")\nprint(\"Encoding...\")\nDataset_TE = algo.OneHotEncoder(Dataset_Preprocessed.head(Dataset_PortionSize))\n\nprint(\"\\n\\n\")\n\n# FIM\n# Apriori\nprint(\"Apriori\")\n\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nFI = algo.Apriori(Dataset_TE, min_support=MinimumSupport)\nRuleSet = algo.RuleMining(FI, min_threshold=MinimumThreshold)\nprint(\"Frequent Itemsets:\\n\", FI)\nprint(\"\\n\\n\")\nprint(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# FPGrowth\nprint(\"FPGrowth\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nFI = algo.FPGrowth(Dataset_TE, min_support=MinimumSupport)\nRuleSet = algo.RuleMining(FI, min_threshold=MinimumThreshold)\nprint(\"Frequent Itemsets:\\n\", FI)\nprint(\"\\n\\n\")\nprint(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n\n\n# CFI\n# Charm\nprint(\"Charm\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nCFI = algo.Charm(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(CFI, min_threshold=MinimumThreshold)\nprint(\"Closed Frequent Itemsets:\")\ncfi_index = 1\nfor cfi in CFI:\n print(str(cfi_index) + \":\", \" - \".join(cfi))\n cfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# AClose\nprint(\"AClose\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nCFI = algo.AClose(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(CFI, min_threshold=MinimumThreshold)\nprint(\"Closed Frequent Itemsets:\")\ncfi_index = 1\nfor cfi in CFI:\n print(str(cfi_index) + \":\", \" - \".join(cfi))\n cfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# MFI\n# Pincer Search\nprint(\"Pincer Search\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nMFI = algo.PincerSearch(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(MFI, min_threshold=MinimumThreshold)\nprint(\"Maximal Frequent Itemsets:\")\nmfi_index = 1\nfor mfi in MFI:\n print(str(mfi_index) + \":\", \" - \".join(mfi))\n mfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# Mafia\n\nprint(\"Mafia\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\nMinimumItemsetLength = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\nprint(\"Minimum Itemset Length -\", MinimumItemsetLength)\n\nMFI = algo.Mafia(Dataset_TE, min_support=MinimumSupport, min_itemset_length=MinimumItemsetLength)\n# RuleSet = RuleMining(MFI, min_threshold=MinimumThreshold)\nprint(\"Maximal Frequent Itemsets:\")\nmfi_index = 1\nfor mfi in MFI:\n print(str(mfi_index) + \":\", \" - \".join(mfi))\n mfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# LFI\n# Apriori Based LFI\nprint(\"Apriori Based LFI\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nLFI = algo.AprioriLFI(Dataset_TE, min_support=MinimumSupport)\n# RuleSet = RuleMining(LFI, min_threshold=MinimumThreshold)\nprint(\"Longest Frequent Itemsets:\")\nlfi_index = 1\nfor lfi in LFI:\n print(str(lfi_index) + \":\", \" - \".join(lfi))\n lfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# FPGrowth Based LFI\nprint(\"FPGrowth Based LFI\")\nMinimumSupport = 0.1\nMinimumThreshold = 1\n\nprint(\"Minimum Support -\", MinimumSupport)\nprint(\"Minimum Threshold -\", MinimumThreshold)\n\nLFI = algo.FPGrowthLFI(Dataset_TE, min_support=MinimumSupport)\n# RuleSet = RuleMining(LFI, min_threshold=MinimumThreshold)\nprint(\"Longest Frequent Itemsets:\")\nlfi_index = 1\nfor lfi in LFI:\n print(str(lfi_index) + \":\", \" - \".join(lfi))\n lfi_index += 1\nprint(\"\\n\\n\")\n# print(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# - Part A - FIM Algos ------------------------------------------------------------------------------------------------\n\n# - Part B - Rules Mining ------------------------------------------------------------------------------------------------\nprint(\"- Part B - Rules Mining ------------------------------------------------------------------------------------------------\")\nRuleSet = algo.RuleMining(FI, min_threshold=MinimumThreshold)\nprint(\"RuleSet:\\n\", RuleSet.head)\nprint(\"\\n\\n\")\n\n# - Part B - Rules Mining ------------------------------------------------------------------------------------------------\n\n# - Part C - Predictive Analytics ------------------------------------------------------------------------------------------------\nprint(\"- Part C - Predictive Analytics ------------------------------------------------------------------------------------------------\")\n# Decision Tree\nprint(\"Decision Tree on balance-scale dataset:\")\n\nDataset_Path = 'balance-scale.csv'\n\nprint(\"\\n\\n\")\n\n# Building Phase\nDataset = dt.ImportDataset(Dataset_Path)\nX, Y, X_train, X_test, Y_train, Y_test = dt.SplitDataset(Dataset)\nDT_Gini = dt.Train_Gini(X_train, Y_train)\nDT_Entropy = dt.Train_Entropy(X_train, Y_train)\n\nprint(\"\\n\\n\")\n \n# Operational Phase\n# Prediction using Gini\nprint(\"Results Using Gini Index:\")\nY_pred_Gini = dt.Predict(X_test, DT_Gini)\ndt.PrintAccuracy(Y_test, Y_pred_Gini)\n\nprint(\"\\n\\n\")\n\n# Prediction using Entropy\nprint(\"Results Using Entropy:\")\nY_pred_Entropy = dt.Predict(X_test, DT_Entropy)\ndt.PrintAccuracy(Y_test, Y_pred_Entropy)\n\nprint(\"\\n\\n\")\n\n# Bayes Classifier\nprint(\"\\n\\n\")\nprint(\"Bayes Classifier on Iris dataset:\")\n\n# Building Phase\nDataset = bc.ImportDataset_Iris()\nX, Y, X_train, X_test, Y_train, Y_test = bc.SplitDataset(Dataset)\nBayesClassifier = bc.Train(X_train, Y_train)\n\nprint(\"\\n\\n\")\n\n# Operational Phase\n# Prediction using Gini\nprint(\"Results Using Bayes Classifier:\")\nY_pred = bc.Predict(BayesClassifier, X_test)\nbc.PrintAccuracy(Y_test, Y_pred)\n\nprint(\"\\n\\n\")\n\n# - Part C - Predictive Analytics ------------------------------------------------------------------------------------------------" }, { "alpha_fraction": 0.6191067099571228, "alphanum_fraction": 0.6526054739952087, "avg_line_length": 35.681819915771484, "blob_id": "fc4fb3309c7a22afe8033bc1ab86852a69ff4dd6", "content_id": "e956cc5756aed3a7ce2e2a41fa5f0ccdf88a9f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 806, "license_type": "no_license", "max_line_length": 123, "num_lines": 22, "path": "/ProblemSet_2/8.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\n8. An organization wants to calculate the % of time they spent on each process for their product development. \nVisualize the data using funnel chart with the data given below.\nProduct Development steps Time spent (in hours) \nRequirement Elicitation 50 \nRequirement Analysis 110 \nSoftware Development 250 \nDebugging & Testing 180\nOthers 70 \n'''\nimport plotly.express as px\n\ndef FunnelPlot(X, labels):\n data = dict(number=X, stage=labels)\n fig = px.funnel(data, x='number', y='stage')\n fig.show()\n\n# Driver Code\nProdDevSteps = ['Requirement Elicitation', 'Requirement Analysis', 'Software Development', 'Debugging & Testing', 'Others']\nTime = [50, 110, 250, 180, 70]\n\nFunnelPlot(Time, ProdDevSteps)" }, { "alpha_fraction": 0.6945558786392212, "alphanum_fraction": 0.720343828201294, "avg_line_length": 27.62295150756836, "blob_id": "07c0a597b140514284f7910ec73dbbdae792209d", "content_id": "3a77ea7ae0e1608f514a69f3082a2ab227fdda11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1745, "license_type": "no_license", "max_line_length": 130, "num_lines": 61, "path": "/Hadoop_Assignment/Codes/MatrixMul.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nMatrix Multiplication using PySpark\n'''\n\n# Imports\n# import findspark\n# findspark.init()\nimport numpy as np\nfrom pyspark.mllib.linalg.distributed import RowMatrix\nfrom pyspark.mllib.linalg.distributed import *\nfrom pyspark import SparkContext\nfrom pyspark.sql.session import SparkSession\n\n# Main Functions\ndef InitSpark():\n # Creating a sparkcontext and sparksession\n sc = SparkContext(\"local\", \"PySpark Matrix Multiplier Program\")\n spark = SparkSession(sc)\n return spark, sc\n\ndef GenerateRandomMatrix(size, valRange=(0, 100)):\n # Create Random Matrix\n M = np.random.randint(valRange[0], valRange[1], size).astype(np.float64)\n # M = np.arange(1024 ** 2, dtype=np.float64).reshape(1024, 1024)\n return M\n\ndef toBlockMatrix(rdd, rowsPerBlock=1024, colsPerBlock=1024):\n # Convert array into blockmatrix\n return IndexedRowMatrix(rdd.zipWithIndex().map(lambda xi: IndexedRow(xi[1], xi[0]))).toBlockMatrix(rowsPerBlock, colsPerBlock)\n\ndef MatrixMultiply_Spark(sc, A, B):\n # Matrix Multiply using Spark\n bm_A = toBlockMatrix(sc.parallelize(A))\n bm_B = toBlockMatrix(sc.parallelize(B))\n return (bm_A.multiply(bm_B)).toLocalMatrix()\n\ndef MatrixMultiply_Numpy(A, B):\n # Matrix Multiply using Numpy\n return A.dot(B)\n\n# Driver Code\n# Params\nsize = (1024, 1024)\nvalRange = (1, 101)\n\n# Init Spark\nspark, sc = InitSpark()\n\n# Generate Random Matrices\nA = GenerateRandomMatrix(size, valRange)\nB = GenerateRandomMatrix(size, valRange)\nprint(\"A:\\n\", A, \"\\n\")\nprint(\"B:\\n\", B, \"\\n\")\n\n# Multiply using Spark\nproduct_Spark = MatrixMultiply_Spark(sc, A, B)\nprint(\"Spark Product:\\n\", product_Spark, \"\\n\")\n\n# Multiply using Numpy\nproduct_Numpy = MatrixMultiply_Numpy(A, B)\nprint(\"Numpy Product:\\n\", product_Numpy, \"\\n\")" }, { "alpha_fraction": 0.5747048854827881, "alphanum_fraction": 0.5888701677322388, "avg_line_length": 30.892473220825195, "blob_id": "a24fcf7a4052ea030f89616f3790e8fd48d7077f", "content_id": "4d30396f945386081c822972ade9ad014834e3d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2965, "license_type": "no_license", "max_line_length": 118, "num_lines": 93, "path": "/ExploratoryAssignment_1/My Solutions/FinalReport/Codes/DBSCAN.py", "repo_name": "KausikN/BigData_Files", "src_encoding": "UTF-8", "text": "'''\nDBSCAN\nDensity-based Spatial Clustering of Applications with Noise Algorithm Implementation\n'''\n\n# Imports\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\n# Main Functions\ndef GenerateData(n_samples=450, centers=6, cluster_std=0.7, random_state=0):\n X, clusters = make_blobs(n_samples=n_samples, centers=centers, cluster_std=cluster_std, random_state=random_state)\n return X, clusters\n\ndef PlotData(X, labels=[]):\n if len(labels) == 0:\n plt.scatter(X[:,0], X[:,1], alpha=0.7, edgecolors='b')\n else:\n plt.scatter(X[:,0], X[:,1], c=labels, cmap='rainbow', alpha=0.7, edgecolors='b')\n plt.show()\n\n# def DBSCAN(dataset, eps, MinPts):\n# # cluster index\n# C = 1\n# for each unvisited point p in dataset:\n# mark p as visited\n# # find neighbors\n# Neighbors N = find the neighboring points of p\n\n# if |N|>=MinPts:\n# N = N U N'\n# if p' is not a member of any cluster:\n# add p' to cluster C\n\ndef DBSCAN_Library(X, eps=0.3, min_samples=10, plot=True):\n db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n\n # Number of clusters in labels, ignoring noise if present.\n n_clusters = len(set(labels)) - (1 if -1 in labels else 0)\n\n if plot:\n # Plot\n # Black removed and is used for noise instead.\n unique_labels = set(labels)\n colors = ['y', 'b', 'g', 'r']\n print(\"Using Colors:\", colors)\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = 'k'\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k', \n markersize=6)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,\n markeredgecolor='k',\n markersize=6)\n\n plt.title('number of clusters: ' + str(n_clusters))\n plt.show()\n\n return labels\n\n# Driver Code\nn_samples=450\ncenters=3\ncluster_std=0.7\nrandom_state=0\n\neps=0.3\nmin_samples=10\nplot=True\n\n# Generate Data\nX, clusters = GenerateData(n_samples=n_samples, centers=centers, cluster_std=cluster_std, random_state=random_state)\nPlotData(X)\n\n# Apply Algorithm and Plot\nlabels = DBSCAN_Library(X, eps=eps, min_samples=min_samples, plot=plot)\n# PlotData(X, labels)" } ]
44
maryumraza/Walmart-Sales-Predictor
https://github.com/maryumraza/Walmart-Sales-Predictor
8deb15551feaaf0b3d39d2b9414938243a359169
13fc455dbefa5cacbad37fab0dee647eab54d8d2
194c154a9254d6609ce2a1af7978bd1a55f3b772
refs/heads/master
2021-05-20T10:43:36.081231
2020-04-01T18:39:51
2020-04-01T18:39:51
252,255,996
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 25, "blob_id": "ec24080fc21a743c039e3f6af009ef66649090e8", "content_id": "4caa2d63caa3a454428fe97b9de54dfb63bc2c55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/README.md", "repo_name": "maryumraza/Walmart-Sales-Predictor", "src_encoding": "UTF-8", "text": "# Walmart-Sales-Predictor" }, { "alpha_fraction": 0.656899094581604, "alphanum_fraction": 0.6717358827590942, "avg_line_length": 24.009708404541016, "blob_id": "42837b0dd732f4609f5f02c78a9591edcf4e8e51", "content_id": "6f2622e0fd4bb2d6511b1e9b6e68f46201229268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 183, "num_lines": 103, "path": "/walmart_sales.py", "repo_name": "maryumraza/Walmart-Sales-Predictor", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 31 19:57:28 2020\r\n\r\n@author: uni tech\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import preprocessing\r\nfrom sklearn.metrics import r2_score\r\nfrom sklearn.impute import SimpleImputer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.svm import SVR\r\n\r\n\r\n\r\n# Initializing datasets\r\ntrain=pd.read_csv('train.csv')\r\ntest=pd.read_csv('test_walmart.csv')\r\nfeatures=pd.read_csv('features.csv')\r\nstores=pd.read_csv('stores.csv')\r\n\r\n# Mergign train and features datasets\r\ndf= pd.merge(features, train, on=['Store', 'Date', 'IsHoliday'], how='inner')\r\n\r\n# One Hot Encoding categorical data\r\none_hot=pd.get_dummies(stores['Type'])\r\nstores=stores.drop('Type', axis=1)\r\nstores = stores.join(one_hot)\r\n\r\n\r\n\r\n\r\ndf = pd.merge(df, stores, on=['Store'], how='inner')\r\n\r\n# Separating date, month, and year from Date\r\ndf['Date']=pd.to_datetime(df['Date'])\r\ndf['year']=df['Date'].dt.year\r\ndf['month']=df['Date'].dt.month\r\ndel df['Date']\r\n\r\n\r\nholiday= pd.get_dummies(df['IsHoliday'])\r\ndf= df.drop('IsHoliday', axis=1)\r\ndf= df.join(holiday)\r\n\r\n\r\n# Fixing null values in markdown with the help of imputer class\r\nse= SimpleImputer()\r\nmarkdown= pd.DataFrame(se.fit_transform(df[['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5']]),columns=['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'])\r\ndf= df.drop(['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'], axis=1)\r\n\r\ndf = pd.concat([df,markdown], axis=1)\r\n\r\n\r\nX = np.array(df.drop(columns='Weekly_Sales'))\r\ny= np.array(df['Weekly_Sales']).reshape(-1,1)\r\n\r\n\r\n# Normalizing inputs and outputs\r\nscalar= preprocessing.MinMaxScaler()\r\nX= scalar.fit_transform(X)\r\ny= scalar.fit_transform(y)\r\n\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\r\n\r\n\r\n# Defining functions for regression\r\ndef linear_reg():\r\n clf= LinearRegression()\r\n return clf\r\n \r\n \r\ndef svm_reg():\r\n clf= SVR(kernel='rbf', degree=3, gamma='scale')\r\n return clf\r\n\r\ndef decision_tree():\r\n clf=DecisionTreeRegressor(criterion='mse',splitter='best')\r\n return clf\r\n\r\ndef random_forest():\r\n clf= RandomForestRegressor(n_estimators=5, criterion='mse')\r\n return clf\r\n\r\nlr_ = linear_reg()\r\nsvm_ = svm_reg()\r\ndt_ = decision_tree()\r\nrf_ = random_forest()\r\n\r\nmodels = [lr_ , dt_, svm_ , rf_]\r\nfor model in models:\r\n y_train = y_train.ravel()\r\n model.fit(X_train, y_train)\r\n y_pred = model.predict(X_test)\r\n score = r2_score(y_test, y_pred)\r\n print(score)\r\n \r\n\r\n\r\n \r\n\r\n\r\n" } ]
2
lakerrenhu/reinforcement-learning-project
https://github.com/lakerrenhu/reinforcement-learning-project
e5d106cf884881098ea1f12c98d60a0c5d341a7d
97bdc47c58ec6ac04f35a41fc873bdc2ae3525c5
f60455f3f84f333dd35c415c23188ee5af5fd8a0
refs/heads/main
2023-03-17T14:19:44.762411
2021-03-14T21:58:28
2021-03-14T21:58:28
347,763,657
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8074866533279419, "alphanum_fraction": 0.8074866533279419, "avg_line_length": 61.33333206176758, "blob_id": "ebb620a060071c2d0f09620f852776abe31f0246", "content_id": "15d8d8ba89544a269eb888825bee9f40039aa451", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 187, "license_type": "no_license", "max_line_length": 111, "num_lines": 3, "path": "/README.md", "repo_name": "lakerrenhu/reinforcement-learning-project", "src_encoding": "UTF-8", "text": "# reinforcement-learning-project\nBased on the UC berkeley AI class and homeworks--Pacman projects, http://ai.berkeley.edu/project_overview.html\nhttp://ai.berkeley.edu/reinforcement.html\n" }, { "alpha_fraction": 0.5422740578651428, "alphanum_fraction": 0.5498276948928833, "avg_line_length": 36.17241287231445, "blob_id": "8575501fdf915530cb2a2ac2d6342fd55866866f", "content_id": "6de017760059a822f579cf59d8bff7ce8ff2a047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7546, "license_type": "no_license", "max_line_length": 95, "num_lines": 203, "path": "/valueIterationAgents.py", "repo_name": "lakerrenhu/reinforcement-learning-project", "src_encoding": "UTF-8", "text": "# valueIterationAgents.py\n# -----------------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# ([email protected]) and Dan Klein ([email protected]).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel ([email protected]).\n\n\nimport mdp, util\n\nfrom learningAgents import ValueEstimationAgent\n\n#import numpy as np\n#import matplotlib.pyplot as plt\n#import csv\n\nclass ValueIterationAgent(ValueEstimationAgent):\n \"\"\"\n * Please read learningAgents.py before reading this.*\n\n A ValueIterationAgent takes a Markov decision process\n (see mdp.py) on initialization and runs value iteration\n for a given number of iterations using the supplied\n discount factor.\n \"\"\"\n def __init__(self, mdp, discount = 0.9, iterations = 100):\n \"\"\"\n Your value iteration agent should take an mdp on\n construction, run the indicated number of iterations\n and then act according to the resulting policy.\n\n Some useful mdp methods you will use:\n mdp.getStates()\n mdp.getPossibleActions(state)\n mdp.getTransitionStatesAndProbs(state, action)\n mdp.getReward(state, action, nextState)\n mdp.isTerminal(state)\n \"\"\"\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n\n # Write value iteration code here\n \"*** YOUR CODE HERE ***\"\n ## three loops: for iteration,for states,for actions\n j=0\n tt=0\n n=self.iterations\n sset=self.mdp.getStates()\n Vvalue=[]\n #print(sset)\n #print(len(sset))\n def largest(array,N):\n larg = array[0] # Initial value \n i=1 # compare every element with \n while i<N: \n if array[i] > larg: \n larg = array[i] # current max\n i=i+1\n return larg\n while j<n: #iteration loop\n self.values1=util.Counter()\n ts=0\n while ts<len(sset):#states loop\n st=sset[ts]\n #for st in sset:\n dt=self.mdp.isTerminal(st)\n Qvalue=[]\n if dt==False:\n #if st!='TERMINAL_STATE':\n sets=self.mdp.getPossibleActions(st)\n t=0\n while t<len(sets):#action loop\n tt=self.computeQValueFromValues(st, sets[t])\n Qvalue.insert(len(Qvalue),tt)\n t=t+1\n #for t in sets:\n #Qvalue.append(self.computeQValueFromValues(st, act))\n # tt=self.computeQValueFromValues(st, t)\n # Qvalue.insert(len(Qvalue),tt)\n else:\n Qvalue.insert(len(Qvalue),0)\n larg=largest(Qvalue,len(Qvalue))\n self.values1[st]=larg\n #observe the evolution of V-value\n if st==(0, 2):\n #print(st)\n #print(larg)\n Vvalue.insert(len(Vvalue),larg) #visualize the evolution of V-value\n ts=ts+1\n self.values=self.values1\n j=j+1\n #check the stored V-value at state of (0,2)\n #print(Vvalue)\n \n # name of csv file \n #filename = \"Vvalues.csv\"\n # writing to csv file \n # with open(filename, 'w') as csvfile: \n # creating a csv writer object \n # csvwriter = csv.writer(csvfile) \n # writing the data rows \n # csvwriter.writerows(Vvalue) \n #compare the runtimes of two method\n #plt.plot(range(1,len(Vvalue)+1), Vvalue, 'r--')\n #plt.xlabel('the number of iteration')\n #plt.ylabel('V-value')\n #plt.title('The evolution of V-value at (0,2)')\n #plt.text(5, 1.5, 'red: iterative method')\n #plt.text(5, 1.3, 'green:direct method')\n #plt.show()\n \n def getValue(self, state):\n \"\"\"\n Return the value of the state (computed in __init__).\n \"\"\"\n return self.values[state]\n\n\n def computeQValueFromValues(self, state, action):\n \"\"\"\n Compute the Q-value of action in state from the\n value function stored in self.values.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n #get the list of nextstate and prob from mdp.getTransitionStatesAndProbs(state, action)\n #next_state_prob=self.mdp.getTransitionStatesAndProbs(state, action)\n #store each transition result\n Qvalue=[]\n for next_state,prob in self.mdp.getTransitionStatesAndProbs(state, action):\n Qvalue.insert(len(Qvalue),prob*(self.mdp.getReward(state, action, next_state)+\n self.discount*self.values[next_state]))\n return sum(Qvalue)\n \n util.raiseNotDefined()\n\n def computeActionFromValues(self, state):\n \"\"\"\n The policy is the best action in the given state\n according to the values currently stored in self.values.\n\n You may break ties any way you see fit. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return None.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n dt=self.mdp.isTerminal(state)\n tt=0\n def largest(array,N): #find the largest one\n larg = array[0] # Initial value\n i=1 \n while i<N: \n if array[i] > larg: \n larg = array[i] # current max\n i=i+1\n #print (\"Largest in given array is\",maxm)\n return larg\n opt_policy= None\n if dt==False:# if it's not terminal state\n #acts=self.mdp.getPossibleActions(state)\n Q_value=[]\n #get all Qvalue\n sets=self.mdp.getPossibleActions(state)\n #print(len(sets))\n #print(sets[0])\n t1=0\n while t1<len(sets):\n tt=self.computeQValueFromValues(state, sets[t1])\n Q_value.insert(len(Q_value),tt)\n t1=t1+1\n #get opt_policy=argmax(Qvalue)\n t2=0\n while t2<len(sets):\n tt=self.computeQValueFromValues(state, sets[t2])\n if tt==largest(Q_value,len(Q_value)):\n opt_policy=sets[t2]\n t2=t2+1\n #for t in self.mdp.getPossibleActions(state):\n # tt=self.computeQValueFromValues(state, t)\n # if tt==largest(Q_value,len(Q_value)):\n # opt_policy=t\n return opt_policy\n else:\n return opt_policy\n \n util.raiseNotDefined()\n\n def getPolicy(self, state):\n return self.computeActionFromValues(state)\n\n def getAction(self, state):\n \"Returns the policy at the state (no exploration).\"\n return self.computeActionFromValues(state)\n\n def getQValue(self, state, action):\n return self.computeQValueFromValues(state, action)\n" }, { "alpha_fraction": 0.6782786846160889, "alphanum_fraction": 0.7008196711540222, "avg_line_length": 36.9555549621582, "blob_id": "c80895008160aacc594f3de9f927e3e837da264c", "content_id": "d1945109e46f214b4cbbb7638b222f884bf54b16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3416, "license_type": "no_license", "max_line_length": 83, "num_lines": 90, "path": "/analysis.py", "repo_name": "lakerrenhu/reinforcement-learning-project", "src_encoding": "UTF-8", "text": "# analysis.py\n# -----------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# ([email protected]) and Dan Klein ([email protected]).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel ([email protected]).\n\n\n######################\n# ANALYSIS QUESTIONS #\n######################\n\n# Set the given parameters to obtain the specified policies through\n# value iteration.\n\ndef question2():\n #the noise affects the prob of jumping into the pits and V values.\n #the current settings of discount=0.9 noise=0.2 cannot lead the agent\n #to crossing the bridge.If the noise decreases to be 0 or close to 0,\n #then the search of agent is treated as a deterministic problem.\n #the V values from left to right will be:[5.9, 6.56, 7.29, 8.1, 9].\n #theoretically, the agent will cross the bridge from left to right. \n answerDiscount = 0.9\n answerNoise = 0\n return answerDiscount, answerNoise\n\ndef question3a():\n #if the living reward is a big penalty, the agent tends to end the game quickly\n #small noise mean more likely to risk\n answerDiscount = 1#0.9\n answerNoise = 0.01#0.01\n answerLivingReward = -5\n return answerDiscount, answerNoise, answerLivingReward\n # If not possible, return 'NOT POSSIBLE'\n\ndef question3b():\n #low discount encourages the agent to get reward earlier(+1) than later (+10)\n #positive living reward makes the agent want to live longer\n #dont want to risk of jumping into pits\n answerDiscount = 0.2#0.2\n answerNoise = 0.01\n answerLivingReward =0 #0.5\n return answerDiscount, answerNoise, answerLivingReward\n # If not possible, return 'NOT POSSIBLE'\n\ndef question3c():\n #if there's no living penalty,then the agent would prefer (+10)\n #small noise lets the agent not worried about pits\n #reasonable discount will make the agent find a shortcut\n answerDiscount = 0.5#0.7,0.5 works\n answerNoise = 0.01\n answerLivingReward = 0\n return answerDiscount, answerNoise, answerLivingReward\n # If not possible, return 'NOT POSSIBLE'\n\ndef question3d():\n #no discount and low living penalty make the agent prefer (+10)\n #large noise increases the risk of jumping into pits\n answerDiscount = 1\n answerNoise = 0.3\n answerLivingReward = -0.2\n return answerDiscount, answerNoise, answerLivingReward\n # If not possible, return 'NOT POSSIBLE'\n\ndef question3e():\n #since living reward is very large, living longer brings more rewards\n answerDiscount = 1\n answerNoise = 0\n answerLivingReward = 100\n return answerDiscount, answerNoise, answerLivingReward\n # If not possible, return 'NOT POSSIBLE'\n\ndef question6():\n answerEpsilon = None\n answerLearningRate = None\n return answerEpsilon, answerLearningRate\n # If not possible, return 'NOT POSSIBLE'\n\nif __name__ == '__main__':\n print 'Answers to analysis questions:'\n import analysis\n for q in [q for q in dir(analysis) if q.startswith('question')]:\n response = getattr(analysis, q)()\n print ' Question %s:\\t%s' % (q, str(response))\n" }, { "alpha_fraction": 0.56691974401474, "alphanum_fraction": 0.5729935169219971, "avg_line_length": 32.40579605102539, "blob_id": "fb75f1c06abda70a6566b31024b43497081d8cd8", "content_id": "00e83eca8b7de8b7f261f43f2cb0cdf5c62fb3d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9220, "license_type": "no_license", "max_line_length": 93, "num_lines": 276, "path": "/qlearningAgents.py", "repo_name": "lakerrenhu/reinforcement-learning-project", "src_encoding": "UTF-8", "text": "# qlearningAgents.py\n# ------------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# ([email protected]) and Dan Klein ([email protected]).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel ([email protected]).\n\n\nfrom game import *\nfrom learningAgents import ReinforcementAgent\nfrom featureExtractors import *\n\nimport random,util,math\n\n#import csv\nQvector=[]\n\nclass QLearningAgent(ReinforcementAgent):\n \"\"\"\n Q-Learning Agent\n\n Functions you should fill in:\n - computeValueFromQValues\n - computeActionFromQValues\n - getQValue\n - getAction\n - update\n\n Instance variables you have access to\n - self.epsilon (exploration prob)\n - self.alpha (learning rate)\n - self.discount (discount rate)\n\n Functions you should use\n - self.getLegalActions(state)\n which returns legal actions for a state\n \"\"\"\n def __init__(self, **args):\n \"You can initialize Q-values here...\"\n ReinforcementAgent.__init__(self, **args)\n\n \"*** YOUR CODE HERE ***\"\n self.Qvalues = util.Counter()\n\n def getQValue(self, state, action):\n \"\"\"\n Returns Q(state,action)\n Should return 0.0 if we have never seen a state\n or the Q node value otherwise\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n #the initial value should be zero\n return self.Qvalues[(state,action)]\n \n # util.raiseNotDefined()\n\n\n def computeValueFromQValues(self, state):\n \"\"\"\n Returns max_action Q(state,action)\n where the max is over legal actions. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return a value of 0.0.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n #print(state)\n def largest(array,N):\n larg = array[0] # Initialial value \n i=1 # compare every element with \n while i<N: \n if array[i] > larg: \n larg = array[i] # current max\n i=i+1\n return larg\n Qvalue=[]\n sets=self.getLegalActions(state)\n if state!='TERMINAL_STATE':\n ts=0\n while ts<len(sets):\n t=sets[ts]\n #for t in sets:\n #Qvalue.append(self.getQValue(state,act))\n Qvalue.insert(len(Qvalue),self.getQValue(state,t))\n ts=ts+1\n if state==(1, 2):\n #print(state)\n #print((Qvalue))\n Qvector.append(Qvalue) #store Q-value for visualize the evolution\n #print(Qvector)\n return largest(Qvalue,len(Qvalue))\n else:\n #Qvalue.insert(len(Qvalue),0)\n #Qvector.append(Qvalue)\n #print(state)\n #print(Qvector)\n return 0\n \n # util.raiseNotDefined()\n\n def computeActionFromQValues(self, state):\n \"\"\"\n Compute the best action to take in a state. Note that if there\n are no legal actions, which is the case at the terminal state,\n you should return None.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def largest(array,N):#find the largest one\n larg = array[0] # Initial value \n i=1 # compare every element with \n while i<N: \n if array[i] > larg: \n larg = array[i] # current max\n i=i+1\n return larg\n opt_policy= None\n if state!='TERMINAL_STATE':# if it's not terminal state \n sets=self.getLegalActions(state)\n Q_value=[]\n #get all Qvalue\n t1=0\n while t1<len(sets):\n ct=sets[t1] #get each state\n Q_value.insert(len(Q_value),self.getQValue(state,ct))\n t1=t1+1\n #Q_value.append(self.getQValue(state,act))\n t2=0\n while t2<len(sets):#get opt_policy=argmax(Qvalue)\n ct=sets[t2] #get each state\n tt=self.getQValue(state,ct)\n if tt==largest(Q_value,len(Q_value)):\n opt_policy=ct\n t2=t2+1\n return opt_policy\n else:\n return opt_policy\n \n #util.raiseNotDefined()\n\n def getAction(self, state):\n \"\"\"\n Compute the action to take in the current state. With\n probability self.epsilon, we should take a random action and\n take the best policy action otherwise. Note that if there are\n no legal actions, which is the case at the terminal state, you\n should choose None as the action.\n\n HINT: You might want to use util.flipCoin(prob)\n HINT: To pick randomly from a list, use random.choice(list)\n \"\"\"\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n s1=self.computeActionFromQValues(state)\n s2=random.choice(self.getLegalActions(state))\n s=util.flipCoin(self.epsilon)\n if state!='TERMINAL_STATE': # not terminal state\n action=s1 if s==False else s2\n #action=s\n #action=self.getPolicy(state)\n #else:\n #action=s2\n #action=random.choice(legalActions)\n return action\n else:\n return action # if terminal state\n \n #util.raiseNotDefined()\n\n #return action\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n The parent class calls this to observe a\n state = action => nextState and reward transition.\n You should do your Q-Value update here\n\n NOTE: You should never call this function,\n it will be called on your behalf\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n \n s2= self.getValue(nextState)\n sample=self.discount*s2+reward\n s1= self.getQValue(state,action)\n self.Qvalues[(state,action)] = (1-self.alpha)*s1 + self.alpha*sample\n #self.Qvalues[(state,action)] = S1 + self.alpha*(sample-S1)\n #util.raiseNotDefined()\n\n def getPolicy(self, state):\n return self.computeActionFromQValues(state)\n\n def getValue(self, state):\n return self.computeValueFromQValues(state)\n\n\nclass PacmanQAgent(QLearningAgent):\n \"Exactly the same as QLearningAgent, but with different default parameters\"\n\n def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):\n \"\"\"\n These default parameters can be changed from the pacman.py command line.\n For example, to change the exploration rate, try:\n python pacman.py -p PacmanQLearningAgent -a epsilon=0.1\n\n alpha - learning rate\n epsilon - exploration rate\n gamma - discount factor\n numTraining - number of training episodes, i.e. no learning after these many episodes\n \"\"\"\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)\n\n def getAction(self, state):\n \"\"\"\n Simply calls the getAction method of QLearningAgent and then\n informs parent of action for Pacman. Do not change or remove this\n method.\n \"\"\"\n action = QLearningAgent.getAction(self,state)\n self.doAction(state,action)\n return action\n\n\nclass ApproximateQAgent(PacmanQAgent):\n \"\"\"\n ApproximateQLearningAgent\n\n You should only have to overwrite getQValue\n and update. All other QLearningAgent functions\n should work as is.\n \"\"\"\n def __init__(self, extractor='IdentityExtractor', **args):\n self.featExtractor = util.lookup(extractor, globals())()\n PacmanQAgent.__init__(self, **args)\n self.weights = util.Counter()\n\n def getWeights(self):\n return self.weights\n\n def getQValue(self, state, action):\n \"\"\"\n Should return Q(state,action) = w * featureVector\n where * is the dotProduct operator\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n Should update your weights based on transition\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()\n\n def final(self, state):\n \"Called at the end of each game.\"\n # call the super-class final method\n PacmanQAgent.final(self, state)\n\n # did we finish training?\n if self.episodesSoFar == self.numTraining:\n # you might want to print your weights here for debugging\n \"*** YOUR CODE HERE ***\"\n pass\n" } ]
4
kanak3699/Visualizing-a-Decision-Tree
https://github.com/kanak3699/Visualizing-a-Decision-Tree
4e24be576b7300f6867298b86bf6a4d0306bab62
fa75c384a3c4dc437f119d5308ab43f5863b4871
24dd840eb214a63c4feacba778b508ed22609124
refs/heads/master
2021-08-16T06:40:41.880451
2020-03-25T00:47:31
2020-03-25T00:47:31
133,874,673
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7912458181381226, "alphanum_fraction": 0.804713785648346, "avg_line_length": 36.125, "blob_id": "209d8d4e6175aedc42c73b7afab122a926220605", "content_id": "c932907002d531f1032a556e4209b19bb6b82c57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 297, "license_type": "no_license", "max_line_length": 133, "num_lines": 8, "path": "/README.md", "repo_name": "kanak3699/Visualizing-a-Decision-Tree", "src_encoding": "UTF-8", "text": "# Visualizing-a-Decision-Tree\nThis is a practice code from Google Developers.\n\n## Getting Started\n* The code is for personal practice from Google Developers on getting started with Machine Learning with python programming language.\n\n## More Information\nhttps://www.youtube.com/watch?v=tNa99PG8hR8\n" }, { "alpha_fraction": 0.5330219864845276, "alphanum_fraction": 0.5703802704811096, "avg_line_length": 7.910714149475098, "blob_id": "70566fa6a7714223ff6ae41443d661bd21589286", "content_id": "e5673f008c01d233cfb827c21fa8cb7675c08c66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1499, "license_type": "no_license", "max_line_length": 82, "num_lines": 168, "path": "/Visualizing a Decision Tree.py", "repo_name": "kanak3699/Visualizing-a-Decision-Tree", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nfrom sklearn.datasets import load_iris\n\n\n# In[2]:\n\n\niris = load_iris()\n\n\n# In[4]:\n\n\nprint(iris.feature_names)\n\n\n# In[5]:\n\n\nprint(iris.target_names)\n\n\n# In[7]:\n\n\nprint(iris.data[0])\n\n\n# In[8]:\n\n\nprint(iris.target[0])\n\n\n# In[13]:\n\n\nfor i in range(len(iris.target)):\n print(\"Example %d: label %s, features %s\" % (i, iris.target[i], iris.data[i]))\n\n\n# In[17]:\n\n\nimport numpy as np\n\n\n# In[15]:\n\n\niris = load_iris()\n\n\n# In[18]:\n\n\ntest_idx = [0,50,100]\n\n\n# In[19]:\n\n\ntrain_target =np.delete(iris.target, test_idx)\n\n\n# In[20]:\n\n\ntrain_data = np.delete(iris.data, test_idx, axis=0)\n\n\n# In[21]:\n\n\ntest_target = iris.target[test_idx]\n\n\n# In[23]:\n\n\ntest_data = iris.data[test_idx]\n\n\n# In[24]:\n\n\nfrom sklearn import tree\n\n\n# In[25]:\n\n\nclf = tree.DecisionTreeClassifier()\n\n\n# In[26]:\n\n\nclf.fit(train_data, train_target) \n\n\n# In[28]:\n\n\nprint(test_target)\n\n\n# In[29]:\n\n\nprint(clf.predict(test_data))\n\n\n# In[39]:\n\n\nimport pydotplus\n\n\n# In[30]:\n\n\nfrom sklearn.externals.six import StringIO\n\n\n# In[32]:\n\n\nimport pydot\n\n\n# In[49]:\n\n\nimport graphviz\n\n\n# In[33]:\n\n\ndot_data = StringIO()\n\n\n# In[34]:\n\n\ntree.export_graphviz(clf,\n out_file=dot_data,\n feature_names=iris.feature_names,\n class_names=iris.target_names,\n filled=True, rounded=True,\n impurity=False)\n\n\n# In[52]:\n\n\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\n\n\n# In[ ]:\n\n\ngraph.write_pdf(\"iris.pdf\")\n\n" } ]
2
TishkoffLab/TF_Binding_scores
https://github.com/TishkoffLab/TF_Binding_scores
a969e87877059a9246fd65721e3a73753d52bc06
eaa5306023ff1ca1e2d34f9528d7ceaa49a9ddb5
d5d9497d9a4b94c39b21658637c0ba54df948e59
refs/heads/master
2020-12-23T03:49:58.807188
2020-03-19T14:55:19
2020-03-19T14:55:19
237,023,559
0
0
null
2020-01-29T16:01:53
2020-02-18T16:46:10
2020-03-19T14:55:19
Jupyter Notebook
[ { "alpha_fraction": 0.5899548530578613, "alphanum_fraction": 0.5985327363014221, "avg_line_length": 40.74528121948242, "blob_id": "e21119f93aa6e6601574a8d737b5ee90024f49ec", "content_id": "546723b10acfeb8e4bf8a5317197fb12fdf473aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8860, "license_type": "no_license", "max_line_length": 207, "num_lines": 212, "path": "/generate_backgroundH_forTFs.py", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "import sys\nfrom pandas import *\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot\nimport random\nfrom scipy.stats import norm\nimport os\nfrom argparse import ArgumentParser\nimport pybedtools\nimport pdb\nimport math\nimport time\n\nparser = ArgumentParser()\n# parser.add_argument(\"-i\", \"--input_genes\", dest=\"input_gene_file\",\n# \t\t\t\t\thelp=\"input file containing the list of TF names, one per row\")\nparser.add_argument(\"-m\", \"--matrix_loc\", dest=\"matrix_loc\",\n\t\t\t\t\thelp=\"full path of the folder that contains the PWM matrix files\")\nparser.add_argument(\"-o\", \"--outname\", dest=\"outname\",\n\t\t\t\t\thelp=\"the name of the file to save the sequence scores to\")\nparser.add_argument(\"-f\", \"--refchrmfastaloc\", dest=\"ref_fasta_loc\",\n\t\t\t\t\thelp=\"location of the reference fasta files (should just be a fasta per chromosome) to use for getting the reference sequences\")\nparser.add_argument(\"-b\", \"--bg_frac_file\", dest=\"bgfrac_file\",\n help=\"file containing the background frequency of A/C/T/G, for each autosomal chromosome.\")\nparser.add_argument(\"-r\", \"--reps\", dest=\"reps\",\n help=\"number of replicate background binding scores to generate\")\nparser.add_argument(\"-g\", \"--genomesize\", dest=\"genome_size_file\",\n help=\"file containing the chromosme names and sizes, to pull random sequences from\")\n\ndef read_JASPAR_transfac_pfms(infile):\n pfms_file = open(infile,'r')\n pfms_info = {}\n seq_len = 0\n for line in pfms_file:\n line = line.split('\\n')[0]\n if(len(line.split(' ')) > 1):\n line = line.split(' ')\n if(line[0] == 'DE'):\n pfms_info['Matrix_ID'] = line[1]\n pfms_info['Matrix_Name'] = line[2]\n seq_len = 0\n elif(line[0] == 'CC'):\n temp = line[1].split(':')\n pfms_info[temp[0]] = temp[1]\n if(seq_len > 0):\n pfms_info['TF_len'] = seq_len\n elif(len(line.split('\\t')) > 1):\n line = line.split('\\t')\n if(line[0] == 'PO'):\n curr_matorder = line[1:]\n else:\n curr_vals = {}\n for n,v in enumerate(line[1:]):\n curr_vals[curr_matorder[n]] = float(v)+1\n pfms_info[int(line[0])] = curr_vals\n seq_len = int(line[0])\n else:\n pass\n pfms_file.close()\n return pfms_info\n\ndef get_matrix_byTF(tfname,info_dicts):\n matrix_dict_touse = None\n for i in info_dicts:\n if(i['Matrix_Name'] == tfname):\n matrix_dict_touse = i\n break\n if(matrix_dict_touse == None):\n print('Could not find a PWM for Transcription Factor {0}'.format(tfname))\n return None\n matrix_aslist = []\n for i in range(1,matrix_dict_touse['TF_len']+1):\n matrix_aslist.append(matrix_dict_touse[i])\n return matrix_aslist\n\n\n\ndef get_lnPWM_from_fracPWM(fracPWM,bgfreqs):\n lnPWM = []\n bgfreqs_dict = {x:bgfreqs['frac_{0}'.format(x)].values[0] for x in 'ACTG'}\n for en in fracPWM:\n temp_matrix = {}\n for b in 'ACTG':\n f = float(en[b])/bgfreqs_dict[b]\n temp_matrix[b] = -np.log(f)\n lnPWM.append(temp_matrix)\n return lnPWM\n\ndef get_fracPWM_from_matrix(pwm):\n fracPWM = []\n for en in pwm:\n temp_matrix = {}\n curr_totcount = sum([float(x) for x in en.values()])\n for b in 'ACTG':\n if(f == 0.0):\n temp_matrix[b] = 1/curr_totcount\n else:\n temp_matrix[b] = float(en[b])/curr_totcount\n fracPWM.append(temp_matrix)\n return fracPWM\n\ndef get_matrix_counts(pwm):\n pos_counts = []\n for en in pwm:\n temp = [float(x) for x in en.values()]\n pos_counts.append(sum(temp))\n return pos_counts\n\ndef get_matrix_scores(pwm,seq):\n seqval_list = []\n for n,b in enumerate(seq):\n try:\n seqval_list.append(float(pwm[n][b]))\n except:\n if(b not in 'ACTG'):\n print('Sequence contains a letter, {0}, that is not A/C/G/T at position {1}'.format(b,n))\n return None\n else:\n continue\n return seqval_list\n\n\ndef calculate_bgH(seq,ln_pwm,bgfreqs):\n currscore_ln = get_matrix_scores(ln_pwm,seq)\n Y = compute_Y(seq,bgfreqs)\n H = currscore_ln - Y\n return np.sum(H)\n\n\ndef get_random_bgseqs(slen,reps,fastaloc,chrmsizes,seqinfo_file=None):\n # x = pybedtools.BedTool()\n\tchrms_touse = list(chrmsizes['chrom'])\n\tbgseq_list = []\n\tif(seqinfo_file is not None):\n\t\tgenseqs_df = read_csv(seqinfo_file,delimiter='\\t')\n\t\tfor r,seq in genseqs_df.iterrows():\n\t\t\tchrmfile = '{0}/chr{1}.fa'.format(fastaloc,seq['Chrm'])\n\t\t\tcurr_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(seq['Chrm'],seq['Start'],seq['End']),chrmfile)\n\t\t\tbgseq_list.append([seq['Chrm'],seq['Start'],seq['End'],curr_seq.upper()])\n\twhile(len(bgseq_list) < int(reps)):\n\t\tis_valid_seq = True\n\t\ttry:\n\t\t\tcurr_chrm = random.randint(1,22)\n\t\t\tcurr_start = random.randint(1,chrmsizes.loc[chrmsizes['chrom'] == 'chr{0}'.format(curr_chrm)]['size'].values[0])\n\t\t\t#TODO check for end being over the size of the chrm\n\t\t\tcurr_end = curr_start + slen\n\t\t\tchrmfile = '{0}/chr{1}.fa'.format(fastaloc,curr_chrm)\n\t\t\tcurr_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(curr_chrm,curr_start,curr_end),chrmfile).upper()\n\t\t\tfor b in curr_seq:\n\t\t\t\tif(b not in 'ACTG'):\n\t\t\t\t\tis_valid_seq = False\n\t\t\t\t\tcontinue\n\t\t\tif(is_valid_seq):\n\t\t\t\tbgseq_list.append([curr_chrm,curr_start,curr_end,curr_seq])\n\t\texcept:\n\t\t\tcontinue\n\t\t\t# bgseq_list.append({'chrm':curr_chrm,'start':curr_start,'end':curr_end,'seq':curr_seq.upper()})\n\treturn bgseq_list\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n bgfrac_df = read_csv(args.bgfrac_file,delimiter='\\t')\n chrmsizes_df = read_csv(args.genome_size_file,delimiter='\\t')\n transfac_matrix_list = os.listdir(args.matrix_loc)\n\n outfile = open(args.outname,'w')\n outfile.write('Average Background H score for each TF. Number of replicates: {0}\\nTF_name\\tBG Z score\\n'.format(args.reps))\n bg_H_by_TF = {}\n outfile.close()\n for f in transfac_matrix_list:\n \tstart = time.time()\n curr_JASPARmatrix = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))\n print('starting calculation for TF {0}'.format(curr_JASPARmatrix['Matrix_Name']))\n curr_matrix = []\n for i in range(1,curr_JASPARmatrix['TF_len']+1):\n curr_matrix.append(curr_JASPARmatrix[i])\n try:\n bgseqs = get_random_bgseqs(curr_JASPARmatrix['TF_len'],args.reps,args.ref_fasta_loc,chrmsizes_df,'bgseqs_info_v1/{0}.{1}reps.random_bgseq.info'.format(curr_JASPARmatrix['Matrix_Name'],args.reps))\n except:\n bgseqs = get_random_bgseqs(curr_JASPARmatrix['TF_len'],args.reps,args.ref_fasta_loc,chrmsizes_df)\n # outfile_currTF = open('bgseqs_info/{0}.{1}reps.random_bgseq.info'.format(curr_JASPARmatrix['Matrix_Name'],args.reps),'w')\n # outfile_currTF.write('Chrm\\tStart\\tEnd\\n')\n bg_H_list = []\n curr_fracPWM = get_fracPWM_from_matrix(curr_matrix)\n curr_lnfracPWM_bychrm = []\n for n in range(1,23):\n bgfreqs_n = bgfrac_df.loc[bgfrac_df['Chrm'] == str(n)][['frac_A','frac_C','frac_G','frac_T']]\n curr_lnfracPWM_bychrm.append(get_lnPWM_from_fracPWM(curr_fracPWM,bgfreqs_n))\n print('starting to calculate H scores for bg seqs, for TF {0}'.format(curr_JASPARmatrix['Matrix_Name']))\n time_allseqs = []\n for s in bgseqs:\n seqstart = time.time()\n curr_seq = s[3]\n curr_lnfracPWM = curr_lnfracPWM_bychrm[s[0]-1]\n curr_H = np.sum(get_matrix_scores(curr_lnfracPWM,curr_seq))\n bg_H_list.append(curr_H)\n # outfile_currTF.write('{0}\\t{1}\\t{2}\\n'.format(s[0],s[1],s[2]))\n seqend = time.time()\n time_allseqs.append(seqend-seqstart)\n print('finished H calculations for all bg seqs, average time taken = {0}'.format(np.average(time_allseqs)))\n currTF_output_df = DataFrame(bgseqs,columns=['Chrm','Start','End','seq'])\n currTF_output_df.to_csv('bgseqs_info/{0}.{1}reps.random_bgseq.info'.format(curr_JASPARmatrix['Matrix_Name'],args.reps),sep='\\t',columns=['Chrm','Start','End'],index=False)\n curr_z = np.sum([math.exp(-x) for x in bg_H_list])\n outfile = open(args.outname,'a')\n # bg_H_by_TF[curr_matrix['Matrix_Name']] = sum(curr_z)\n outfile.write('{0}\\t{1}\\n'.format(curr_JASPARmatrix['Matrix_Name'],curr_z))\n # bg_H_by_TF[curr_matrix['Matrix_Name']] = bg_H_list\n # outfile_currTF.close()\n outfile.close()\n end = time.time()\n print('Finished calculation of bg Z score for TF {0}; time taken = {1}'.format(curr_JASPARmatrix['Matrix_Name'],(end - start)))\n \n \n\n\n\n" }, { "alpha_fraction": 0.5850945711135864, "alphanum_fraction": 0.6067852973937988, "avg_line_length": 39.84090805053711, "blob_id": "0a2406b3971e560f00059d34ffa0d62b38018190", "content_id": "f1571577ebb8c0fdbbd987dcb419c8452027a5c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1798, "license_type": "no_license", "max_line_length": 390, "num_lines": 44, "path": "/get_PWMscore_genenames_multisnpfile.sh", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\n\nsnp_file=\"\"\nscript_path='/home/hautj/TF_binding'\noutname=\"\"\nbedfile_prefix=\"/home/hautj/TF_binding/JASPAR2020_hg19_bedfiles_bychrm/JASPAR2020_hg19.converted\"\ngenomes_loc=\"/home/hautj/TF_binding/hg19_refgenomes\"\n\nprint_usage() {\n printf \"Usage: ...\"\n}\n\nwhile getopts 'o:s:p:b:g:' flag; do\n case \"${flag}\" in\n o) outname=\"${OPTARG}\" ;;\n s) snp_file=\"${OPTARG}\" ;;\n p) script_path=\"${OPTARG}\" ;;\n b) bedfile_prefix=\"${OPTARG}\" ;;\n g) genomes_loc=\"${OPTARG}\" ;;\n *) print_usage\n exit 1 ;;\n esac\ndone\n\nlcount=0\nwhile read line; do\n #Skip the first line, since it's the header \n if [[ \"${lcount}\" -eq 0 ]]; then\n lcount=1\n else\n curr_pos=(`echo \"${line}\" | cut -d$'\\t' -f 1 | cut -d$':' -f 2`)\n curr_chrm=(`echo \"${line}\" | cut -d$'\\t' -f 1 | cut -d$':' -f 1`)\n curr_ref=(`echo \"${line}\" | cut -d$'\\t' -f 2`)\n curr_alt=(`echo \"${line}\" | cut -d$'\\t' -f 3`)\n curr_outname=\"${outname}.chrm${curr_chrm}.pos${curr_pos}\"\n orig_bedfile=\"${bedfile_prefix}.chrm${curr_chrm}.bed\"\n #Looping through the bedfile (containing the TFs found on that chromosome), and add them to a file.\n awk -v pos=\"$curr_pos\" 'BEGIN{OFS=\"\\t\"}($2 <= pos && $3 >= pos) {print $2,$3,$4,$6} $2 > pos {exit}' \"${orig_bedfile}\" > \"${curr_outname}.TF_genes\"\n echo \"found TF genes, starting score calculation for snp ${curr_chrm}:${curr_pos}\"\n python ${script_path}/get_PWMscores.py -o \"${curr_outname}.PWM_scores\" -i \"${curr_outname}.TF_genes\" -m \"${script_path}/JASPAR2020_CORE_vertebrates_non-redundant_pfms_transfac\" -p \"${curr_chrm}:${curr_pos}\" -r \"${curr_ref}\" -a \"${curr_alt}\" -c \"${genomes_loc}/chr${curr_chrm}.fa\" -b \"${script_path}/ACTG_count.all_chrms.fractions.txt\" -z \"${script_path}/backgroundZ_forTFs.1000reps.txt\"\n fi\ndone <\"${snp_file}\"\n\n" }, { "alpha_fraction": 0.5795795917510986, "alphanum_fraction": 0.5930930972099304, "avg_line_length": 29.953489303588867, "blob_id": "a26ed91a0195f14b0f0a3c595ca7b3d4d7fefd85", "content_id": "2d63e4120eda08702a2b01168433b7b46b0dbc6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1332, "license_type": "no_license", "max_line_length": 419, "num_lines": 43, "path": "/get_PWMscore_genenames_multisnpfile.binding_thresh.sh", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\n\nsnp_file=\"\"\nscript_path='/home/hautj/TF_binding'\noutname=\"\"\ngenomes_loc=\"/home/hautj/TF_binding/hg19_refgenomes\"\ntfs_touse=\"\"\nbinding_thresh=\"0.01\"\n\nprint_usage() {\n printf \"Usage: ...\"\n}\n\nwhile getopts 'o:s:p:g:t:f:' flag; do\n case \"${flag}\" in\n o) outname=\"${OPTARG}\" ;;\n s) snp_file=\"${OPTARG}\" ;;\n p) script_path=\"${OPTARG}\" ;;\n g) genomes_loc=\"${OPTARG}\" ;;\n t) tfs_touse=\"${OPTARG}\" ;;\n f) binding_thresh=\"${OPTARG}\" ;;\n *) print_usage\n exit 1 ;;\n esac\ndone\n\nargflags_string=''\n\nif [[ $tfs_touse != \"\" ]]; then\n argflags_string+='-t \"${tfs_touse}\"'\nfi\n\n\nwhile read line; do\n curr_pos=(`echo \"${line}\" | cut -d$'\\t' -f 3`)\n curr_chrm=(`echo \"${line}\" | cut -d$'\\t' -f 2`)\n curr_ref=(`echo \"${line}\" | cut -d$'\\t' -f 4`)\n curr_alt=(`echo \"${line}\" | cut -d$'\\t' -f 5`)\n curr_outname=\"${outname}.chrm${curr_chrm}.pos${curr_pos}\"\n python ${script_path}/get_PWMscores.py -o \"${curr_outname}.above_threshold.PWM_scores\" -m \"${script_path}/JASPAR2020_CORE_vertebrates_non-redundant_pfms_transfac\" -p \"${curr_chrm}:${curr_pos}\" -r \"${curr_ref}\" -a \"${curr_alt}\" -c \"${genomes_loc}/chr${curr_chrm}.fa\" -b \"${script_path}/ACTG_count.all_chrms.fractions.txt\" -z \"${script_path}/backgroundZ_forTFs.1000reps.txt\" -f \"${binding_thresh}\" #\"${argflags_string}\"\ndone <\"${snp_file}\"\n\n" }, { "alpha_fraction": 0.7168743014335632, "alphanum_fraction": 0.7327293157577515, "avg_line_length": 26.625, "blob_id": "305b976d6a188a09c301deb8ab6cdaca9706695b", "content_id": "011b135855e0f19fd1975e51062ce0394f47cb54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 883, "license_type": "no_license", "max_line_length": 106, "num_lines": 32, "path": "/example/README.md", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "## Running the example\n\nFor the standard operation:\n\n```\n./get_PWMscore_genenames_multisnpfile.sh -s example/example_SNPs.input\\\n\t -o example/example_SNPs.test.output\\\n\t -b example/JASPAR2020_hg19.converted.subset.example \\\n\t -p ${PWD} \\\n\t -g ${PWD}/example \n\n```\n\nUsing the functionality of just a single sequence:\n\n```\npython get_PWMscores.py -s \"AAGACATTTGAAAATTATCTA\"\\\n\t -o example/example_seq.test.output\\\n\t -m ${PWD}/JASPAR2020_CORE_vertebrates_non-redundant_pfms_transfac \\\n\t -z ${PWD}/backgroundZ_forTFs.1000reps.txt \\\n\t -b ${PWD}/ACTG_count.all_chrms.fractions.txt\n```\n\nRunning all potential binding orientations/directions/positions for a specific SNP and specific set of TFs\n\n```\n./get_PWMscore_genenames_multisnpfile.binding_thresh.sh -s example/example_SNPs.binding_thresh.input\\\n\t -o example/example_bindingthreshold.test.output\\\n\t -p ${PWD} \\\n\t -g ${PWD}/example \n\n```" }, { "alpha_fraction": 0.7224880456924438, "alphanum_fraction": 0.7511961460113525, "avg_line_length": 40.599998474121094, "blob_id": "1d4ae9911f24c193c26321c3f1a339ff63e66340", "content_id": "9768b11b6cc509e0d5d9fefd5b03bc64ce3bb70e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 209, "license_type": "no_license", "max_line_length": 186, "num_lines": 5, "path": "/run_example.sh", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -e\n\n./get_PWMscore_genenames_multisnpfile.sh -s example/example_SNPs.input -o example/example_SNPs.test.output -b example/JASPAR2020_hg19.converted.subset.example -p ${PWD} -g ${PWD}/example \n" }, { "alpha_fraction": 0.5983812808990479, "alphanum_fraction": 0.6031208038330078, "avg_line_length": 52.7529411315918, "blob_id": "6fc92ff5c93dc91cd98ab8d002980c19c8b39f37", "content_id": "da841d35e4fd673fa73d2860a69423bd74220acb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27429, "license_type": "no_license", "max_line_length": 324, "num_lines": 510, "path": "/get_PWMscores.py", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "import sys\nfrom pandas import *\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot\nimport random\nfrom scipy.stats import norm\nimport os\nfrom argparse import ArgumentParser\nimport pybedtools\nimport pdb\nimport math\nimport time\n\nparser = ArgumentParser()\nparser.add_argument(\"-i\", \"--input_genes\", dest=\"input_gene_file\",\n\t\t\t\t\thelp=\"input file containing the list of TF gene names, one per row\")\nparser.add_argument(\"-s\", \"--sequence\", dest=\"sequence\",\n\t\t\t\t\thelp=\"sequence to compute score for, A/C/T/G\")\nparser.add_argument(\"-t\", \"--tfname\", dest=\"tf_tocheck\",\n help=\"name of a specfic transcription factor, or a file containing any number of TFs (one per line). If this argument is supplied, then the script only calculates the score for that TF. Must supply a sequence as well.\")\nparser.add_argument(\"-m\", \"--matrix_loc\", dest=\"matrix_loc\",\n\t\t\t\t\thelp=\"full path of the folder that contains the PWM matrix files\")\nparser.add_argument(\"-o\", \"--outname\", dest=\"outname\",\n\t\t\t\t\thelp=\"the name of the file to save the sequence scores to\")\nparser.add_argument(\"-r\", \"--refallele\", dest=\"ref_al\",\n\t\t\t\t\thelp=\"reference allele for the snp of interest, A/C/T/G\")\nparser.add_argument(\"-a\", \"--altallele\", dest=\"alt_al\",\n\t\t\t\t\thelp=\"alternate allele for the snp of interest, A/C/T/G\")\nparser.add_argument(\"-p\", \"--position\", dest=\"position\",\n\t\t\t\t\thelp=\"position, in bp, for the snp of interest\")\nparser.add_argument(\"-c\", \"--refchrmfasta\", dest=\"ref_fasta_file\",\n\t\t\t\t\thelp=\"reference fasta file (should just be a single chromosome) to use for getting the reference sequence\")\nparser.add_argument(\"-b\", \"--bg_frac_file\", dest=\"bgfrac_file\",\n help=\"file containing the background frequency of A/C/T/G, for each autosomal chromosome.\")\nparser.add_argument(\"-z\", \"--bg_zscore_file\", dest=\"bgZscore_file\",\n help=\"file containing the background Z scores for each TF, precalculated using a significant number of replicates\")\nparser.add_argument(\"-f\", \"--tf_cutoff\", dest=\"tfpbind_cutoff\",\n help=\"the cutoff for significant pbinding score. If this is provided, the script will check the snp against all tfs and orientations, then save only the results that are above the threshold.\")\n\n#Reads in the JASPAR PWM file (transfac formatted)\n# infile (str): the PWM file to read in\n#Returns:\n# pfms_info (dict): dictionary containig the information about the PWM, from the file (plus an entry with the length of the PWM)\ndef read_JASPAR_transfac_pfms(infile):\n pfms_file = open(infile,'r')\n pfms_info = {}\n seq_len = 0\n for line in pfms_file:\n line = line.split('\\n')[0]\n if(len(line.split(' ')) > 1):\n line = line.split(' ')\n if(line[0] == 'DE'):\n pfms_info['Matrix_ID'] = line[1]\n pfms_info['Matrix_Name'] = line[2]\n seq_len = 0\n elif(line[0] == 'CC'):\n temp = line[1].split(':')\n pfms_info[temp[0]] = temp[1]\n if(seq_len > 0):\n pfms_info['TF_len'] = seq_len\n elif(len(line.split('\\t')) > 1):\n line = line.split('\\t')\n if(line[0] == 'PO'):\n curr_matorder = line[1:]\n else:\n curr_vals = {}\n for n,v in enumerate(line[1:]):\n curr_vals[curr_matorder[n]] = float(v)+1\n pfms_info[int(line[0])] = curr_vals\n seq_len = int(line[0])\n else:\n pass\n pfms_file.close()\n return pfms_info\n\n#Loops through the info_dicts list (of PWM matrix file info), and returns the PWM matrix dict for the given TF\n#Inputs:\n# tfname (str): name of the transcription factor\n# info_dicts (list): made by looping over all the JASPAR matrix files; this is a list of all of those matrices as dicts\n#Returns:\n# matrix_dict_touse (dict): the dictionary containing the PWM for the given TF\ndef get_matrix_byTF(tfname,info_dicts):\n matrix_dict_touse = None\n for i in info_dicts:\n if(i['Matrix_Name'] == tfname):\n matrix_dict_touse = i\n break\n if(matrix_dict_touse == None):\n print('Could not find a PWM for Transcription Factor {0}'.format(tfname))\n return None\n matrix_aslist = []\n for i in range(1,matrix_dict_touse['TF_len']+1):\n matrix_aslist.append(matrix_dict_touse[i])\n return matrix_aslist\n\n#Given a matrix dict for a TF, containing the PWM of the counts for each base in the sequence, returns just the PWM with each position entry being recalculated as a fraction of the count at that position\n#Inputs:\n# matrix_dict (dict): the dictionary containing the PWM for a given TF, in addition to the other data about that TF\n#Returns:\n# PWM_dict (dict): a dicitonary where each key is a position relative to the TF (1-indexed) and each value is a dictionary with keys A/C/G/T and values equal to the raw count divided by the total counts for all four bases at that position.\ndef get_fracPWM_from_matrix(pwm):\n fracPWM = []\n for en in pwm:\n temp_matrix = {}\n curr_totcount = sum([float(x) for x in en.values()])\n for b in 'ACTG':\n if(f == 0.0):\n temp_matrix[b] = 1/curr_totcount #TODO: Move check for 0 entry from reading in matrix to here\n else:\n temp_matrix[b] = float(en[b])/curr_totcount\n fracPWM.append(temp_matrix)\n return fracPWM\n\n#Given a fractional PWM (made by get_fracPWM_from_matrix) and a set of background frequencies of the four bases, calculate a -log PWM\n#Inputs:\n# fracPWM (dict): PWM where each entry is a fraction of the counts for each base\n# bgfreqs (DataFrame): a dataframe with a single row, containing columns frac_A/frac_C/frac_G/frac_T which has the fraction of the chromosome/genome corrisponding to that base\n#Returns:\n# lnPWM_dict (dict): PWM where each entry is the fracPWM entry, divided by the background base fraction, then taken the negative natural log of it\ndef get_lnPWM_from_fracPWM(fracPWM,bgfreqs):\n lnPWM = []\n bgfreqs_dict = {x:bgfreqs['frac_{0}'.format(x)].values[0] for x in 'ACTG'}\n for en in fracPWM:\n temp_matrix = {}\n for b in 'ACTG':\n f = float(en[b])/bgfreqs_dict[b]\n temp_matrix[b] = -np.log(f) #TODO speed this up; take sum of fracs and then log the whole thing\n lnPWM.append(temp_matrix)\n return lnPWM\n\n#For a given sequence, returns the complementary sequence\n#Inputs:\n# seq (str): sequence of A/C/T/G\n#Returns:\n# new_seq (str): sequence of A/C/T/G complementary to the original sequence\ndef get_complseq(seq):\n new_seq = []\n for b in seq:\n if(b == 'A'):\n new_seq.append('T')\n elif(b == 'T'):\n new_seq.append('A')\n elif(b == 'G'):\n new_seq.append('C')\n elif(b == 'C'):\n new_seq.append('G')\n else:\n print('Base pair not A/C/T/G! {0}'.format(b))\n return ''.join(new_seq)\n\n#Given a sequence, replaces the allele at a specified position with the given allele\n#Inputs:\n# fullseq (str): sequence of A/C/T/G\n# position (int): position of the allele to be replaced, relative to the length of the input sequence (so it must be <= len(fullseq))\n# allele (str): A/C/T/G, to replace the one at the position in fullseq\n#Returns:\n# new_seq (str): fullseq, with the new allele at the given position\ndef make_seq(fullseq,position,allele):\n new_seq = ''\n for n,b in enumerate(fullseq):\n try:\n if(n==position):\n new_seq = ''.join([new_seq,allele])\n else:\n new_seq = ''.join([new_seq,b])\n except:\n if(b not in 'ACTG'):\n print('Sequence contains a letter, {0}, that is not A/C/G/T at position {1}'.format(b,n))\n return None\n else:\n continue\n return new_seq.upper()\n\n#For a given PWM and sequence length, return the sum of the counts for all four bases at each position\n#Inputs:\n# pwm (dict): position weight matrix, with or without the additional info from the matrix file\n# seqlen (int): length of the sequence, so that we can loop over the PWM\n#Returns:\n# pos_counts (list): list of counts at each position\ndef get_matrix_counts(pwm):\n pos_counts = []\n for en in pwm:\n temp = [float(x) for x in en.values()]\n pos_counts.append(sum(temp))\n return pos_counts\n\n#For a given PWM and sequence, calculates the score at each position\n#Inputs:\n# pwm (dict): position weight matrix, with or without the additional info from the matrix file\n# seq (str): A/C/G/T sequence\n#Returns:\n# seqval_list (list): list of the values at each position given the specific base in seq\ndef get_matrix_scores(pwm,seq):\n seqval_list = []\n for n,b in enumerate(seq):\n try:\n seqval_list.append(float(pwm[n][b]))\n except:\n if(b not in 'ACTG'):\n print('Sequence contains a letter, {0}, that is not A/C/G/T at position {1}'.format(b,n))\n return None\n else:\n continue\n return seqval_list\n\n# def calculate_bindingP(hscore,bg_z_score,tf_name):\n\n# return math.exp(-hscore)/bgZ_score\n\n# def ge\n\n# def calculate_normed_bindingP(hscore,bg_z_df,tf_name):\n# try:\n# bgZ_touse = bg_z_df.loc[bg_z_df['TF_name'] == tf_name]['BG Z score'].values[0]\n# except:\n# print('Could not find background Z score for TF {0}'.format(tf_name))\n# return -1\n# return math.exp(-hscore)/bgZ_touse\ndef get_revseq_foroutput(seq,pos,tf_len):\n new_seq = ''.join(list(reversed(seq)))\n return [new_seq,(tf_len-pos)]\n\ndef get_seq_combos(seq,al,tf_len):\n seqs_list = []\n complseqs_list = []\n for num in range(tf_len):\n newseq = ''\n curr_ref_start_pos = tf_len-num-1\n curr_ref_end_pos = curr_ref_start_pos+tf_len\n curr_seq = seq[curr_ref_start_pos:curr_ref_end_pos]\n tempseq = ''\n for n,b in enumerate(curr_seq):\n if((n+curr_ref_start_pos) == (tf_len-1)):\n tempseq = ''.join([tempseq,al])\n else:\n tempseq = ''.join([tempseq,b])\n seqs_list.append([tempseq,num])\n complseqs_list.append([get_complseq(tempseq),num])\n return seqs_list,complseqs_list\n\ndef get_scoredict_entry(seq,pwm,lnfracPWM,bgZ_score,bgfreqs,tfname,tfpbind_cutoff=None,seq_pos=None):\n # pdb.set_trace()\n curr_scoredict = None\n try:\n rawscore_list = get_matrix_scores(pwm,seq)\n pos_counts = get_matrix_counts(pwm)\n tot_count = sum(pos_counts)\n score_ln = get_matrix_scores(lnfracPWM,seq)\n curr_fracscore = sum([rawscore_list[x]/pos_counts[x] for x in range(len(pos_counts))])\n curr_H = np.sum(score_ln)\n curr_bindingp = math.exp(-curr_H)/bgZ_score #calculate_bindingP(curr_H,bgZ_score,tfname)\n curr_normed_bindingp = math.exp(-curr_H)/(bgZ_score+math.exp(-curr_H)) \n if(tfpbind_cutoff is not None):\n if(curr_bindingp >= float(tfpbind_cutoff)):\n curr_scoredict = {'tf_name':tfname,'raw_score':sum(rawscore_list),'tf_len':len(pwm),'counts_perpos':min(pos_counts),\n 'fraction_score':curr_fracscore,'H':curr_H,'bindingP':curr_bindingp,'bindingP_normed':curr_normed_bindingp,'sequence':seq,'motif_pos':seq_pos}\n else:\n curr_scoredict = {'tf_name':tfname,'raw_score':sum(rawscore_list),'tf_len':len(pwm),'counts_perpos':min(pos_counts),\n 'fraction_score':curr_fracscore,'H':curr_H,'bindingP':curr_bindingp,'bindingP_normed':curr_normed_bindingp,'sequence':seq}\n except:\n print('Could not get score dict for given PWM from TF {0}! Check inputs.'.format(tfname))\n return None\n return curr_scoredict\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n bgfrac_df = read_csv(args.bgfrac_file,delimiter='\\t')\n\n if(args.sequence is not None):\n sequence = args.sequence\n\n #Read in the matrix files and make dict entries for each one\n transfac_matrix_list = os.listdir(args.matrix_loc)\n infodicts_list = []\n for f in transfac_matrix_list:\n curr_infodict = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))\n infodicts_list.append(curr_infodict)\n\n score_dict_bytf ={}\n tfs_to_check = []\n if(args.tf_tocheck is not None):\n try:\n tf_list_file = open(args.tf_tocheck,'r')\n for line in tf_list_file:\n tfs_to_check.append(line.split('\\n')[0])\n tf_list_file.close()\n except:\n tfs_to_check.append(args.tf_tocheck)\n else:\n for i in infodicts_list:\n tfs_to_check.append(i['Matrix_Name'])\n # except:\n # for i in infodicts_list:\n # tfs_to_check.append(i['Matrix_Name'])\n bg_z_df = read_csv(args.bgZscore_file,delimiter='\\t',skiprows=1)\n\n bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == 'Total'][['frac_A','frac_C','frac_G','frac_T']]\n for tf in tfs_to_check:\n try:\n bgZ_score = bg_z_df.loc[bg_z_df['TF_name'] == tf]['BG Z score'].values[0]\n except:\n print('Could not find background Z score for TF {0}'.format(tf))\n break\n curr_matrix = get_matrix_byTF(tf,infodicts_list)\n curr_fracPWM = get_fracPWM_from_matrix(curr_matrix)\n curr_lnfracPWM = get_lnPWM_from_fracPWM(curr_fracPWM,bgfreqs)\n\n curr_sd = get_scoredict_entry(sequence,curr_matrix,curr_lnfracPWM,bgZ_score,bgfreqs,tf)\n if(curr_sd is not None):\n score_dict_bytf[tf] = curr_sd\n #Writing the PWM scores to the output file\n outfile = open(args.outname,'w')\n outfile.write('Scores for Given Transcription Factors for sequence {0}, as a fraction of the total count \\nTF_Name\\tPWM Fraction Score\\tTF Length\\tTF Counts per position\\tH\\tP binding\\n'.format(sequence))\n for tf,scores in sorted(score_dict_bytf.items(), key=lambda k_v: k_v[1]['bindingP'],reverse=True):\n outfile.write('{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n'.format(tf,scores['fraction_score'],scores['tf_len'],scores['counts_perpos'],scores['H'],scores['bindingP'],scores['bindingP_normed']))\n outfile.close()\n elif(args.tfpbind_cutoff is not None):\n # pdb.set_trace()\n tpbindcutoff = float(args.tfpbind_cutoff)\n position = int(args.position.split(':')[1])\n chromosome = int(args.position.split(':')[0])\n #Read in the matrix files and make dict entries for each one\n transfac_matrix_list = os.listdir(args.matrix_loc)\n infodicts_list = []\n for f in transfac_matrix_list:\n curr_infodict = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))\n infodicts_list.append(curr_infodict)\n bg_z_df = read_csv(args.bgZscore_file,delimiter='\\t',skiprows=1)\n try:\n bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == str(chromosome)][['frac_A','frac_C','frac_G','frac_T']]\n except:\n bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == 'Total'][['frac_A','frac_C','frac_G','frac_T']]\n\n infodicts_list_touse = []\n if(args.tf_tocheck is not None):\n tfs_to_check = []\n try:\n tf_list_file = open(args.tf_tocheck,'r')\n for line in tf_list_file:\n tfs_to_check.append(line.split('\\n')[0])\n tf_list_file.close()\n except:\n tfs_to_check.append(args.tf_tocheck)\n for i in infodicts_list:\n if(i['Matrix_Name'] in tfs_to_check):\n infodicts_list_touse.append(i)\n else:\n infodicts_list_touse = infodicts_list\n \n #Creating the final dictionary containing the values for each TF, with the Ref/Alt alleles\n sig_score_dicts = []\n for i in infodicts_list_touse:\n start = time.time()\n curr_matrix = []\n for n in range(1,i['TF_len']+1):\n curr_matrix.append(i[n])\n fracPWM = get_fracPWM_from_matrix(curr_matrix)\n lnfracPWM = get_lnPWM_from_fracPWM(fracPWM,bgfreqs)\n ref_full_forward_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(chromosome,(position-len(curr_matrix)+1),(position+len(curr_matrix)-1)),args.ref_fasta_file).upper()\n ref_full_reverse_seq = ''.join(list(reversed(ref_full_forward_seq)))\n curr_forward_ref_seqlist,curr_forward_compl_ref_seqlist = get_seq_combos(ref_full_forward_seq,args.ref_al,len(curr_matrix))\n curr_reverse_ref_seqlist,curr_reverse_compl_ref_seqlist = get_seq_combos(ref_full_reverse_seq,args.ref_al,len(curr_matrix))\n try:\n bgZ_score = bg_z_df.loc[bg_z_df['TF_name'] == i['Matrix_Name']]['BG Z score'].values[0]\n except:\n print('Could not find background Z score for TF {0}'.format(tf_name))\n break\n for s in curr_forward_ref_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '+'\n curr_scoredict['direction'] = 'forward'\n curr_scoredict['allele'] = 'ref'\n sig_score_dicts.append(curr_scoredict)\n for s in curr_forward_compl_ref_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '-'\n curr_scoredict['direction'] = 'forward'\n curr_scoredict['allele'] = 'ref'\n sig_score_dicts.append(curr_scoredict)\n for s in curr_reverse_ref_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '+'\n curr_scoredict['direction'] = 'reverse'\n curr_scoredict['allele'] = 'ref'\n new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)\n curr_scoredict['sequence'] = new_seq\n curr_scoredict['motif_pos'] = new_pos \n sig_score_dicts.append(curr_scoredict)\n for s in curr_reverse_compl_ref_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '-'\n curr_scoredict['direction'] = 'reverse'\n curr_scoredict['allele'] = 'ref'\n new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)\n curr_scoredict['sequence'] = new_seq\n curr_scoredict['motif_pos'] = new_pos \n sig_score_dicts.append(curr_scoredict)\n\n curr_forward_alt_seqlist,curr_forward_compl_alt_seqlist = get_seq_combos(ref_full_forward_seq,args.alt_al,len(curr_matrix))\n curr_reverse_alt_seqlist,curr_reverse_compl_alt_seqlist = get_seq_combos(ref_full_reverse_seq,args.alt_al,len(curr_matrix))\n for s in curr_forward_alt_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '+'\n curr_scoredict['direction'] = 'forward'\n curr_scoredict['allele'] = 'alt'\n sig_score_dicts.append(curr_scoredict)\n for s in curr_forward_compl_alt_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '-'\n curr_scoredict['direction'] = 'forward'\n curr_scoredict['allele'] = 'alt'\n sig_score_dicts.append(curr_scoredict)\n for s in curr_reverse_alt_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '+'\n curr_scoredict['direction'] = 'reverse'\n curr_scoredict['allele'] = 'alt'\n new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)\n curr_scoredict['sequence'] = new_seq\n curr_scoredict['motif_pos'] = new_pos \n sig_score_dicts.append(curr_scoredict)\n for s in curr_reverse_compl_alt_seqlist:\n curr_scoredict = get_scoredict_entry(s[0],curr_matrix,lnfracPWM,bgZ_score,bgfreqs,i['Matrix_Name'],tpbindcutoff,s[1])\n if(curr_scoredict is not None):\n curr_scoredict['orientation'] = '-'\n curr_scoredict['direction'] = 'reverse'\n curr_scoredict['allele'] = 'alt'\n new_seq,new_pos = get_revseq_foroutput(curr_scoredict['sequence'],curr_scoredict['motif_pos'],curr_scoredict['tf_len']-1)\n curr_scoredict['sequence'] = new_seq\n curr_scoredict['motif_pos'] = new_pos \n sig_score_dicts.append(curr_scoredict)\n # pdb.set_trace()\n end = time.time()\n # print('time taken to calculate all sequences for tf {0} = {1}'.format(i['Matrix_Name'],(end-start)))\n if(len(sig_score_dicts) > 0):\n outfile = open(args.outname,'w')\n outfile.write('Scores for all Transcription Factors above bindingP score {0}\\nTF_Name\\tPWM Fraction Score\\tTF Length\\tTF Counts per position\\tH\\tP binding\\tAllele\\tOrientation\\tDirection\\tPosition in Motif\\tBinding Sequence\\n'.format(args.tfpbind_cutoff))\n for scores in sorted(sig_score_dicts, key=lambda k: k['bindingP'],reverse=True):\n outfile.write('{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\\t{10}\\n'.format(scores['tf_name'],scores['fraction_score'],scores['tf_len'],scores['counts_perpos'],scores['H'],scores['bindingP'],scores['allele'],scores['orientation'],scores['direction'],scores['motif_pos'],scores['sequence']))\n outfile.close()\n else:\n position = int(args.position.split(':')[1])\n chromosome = int(args.position.split(':')[0])\n\n #Reading in the TF_genes file, made by the bash script, which has the TF names, positions, and strandedness\n gene_df = read_csv('{0}'.format(args.input_gene_file),header=None,delimiter='\\t')\n gene_df.columns = ['pos_start','pos_end','tf_name','strand']\n\n #Read in the matrix files and make dict entries for each one\n transfac_matrix_list = os.listdir(args.matrix_loc)\n infodicts_list = []\n for f in transfac_matrix_list:\n curr_infodict = read_JASPAR_transfac_pfms('{0}/{1}'.format(args.matrix_loc,f))\n infodicts_list.append(curr_infodict)\n\n #Getting the reference sequence that contains all of the TF genes within it, then add the new start/stop coordinates relative to full ref seq to the dataframe\n ref_pos_end = max(gene_df['pos_end'])\n ref_pos_start = min(gene_df['pos_start'])\n ref_full_seq = pybedtools.BedTool.seq('chr{0}:{1}-{2}'.format(chromosome,ref_pos_start,ref_pos_end),args.ref_fasta_file)\n updated_pos = (position-ref_pos_start)\n gene_df['relative_start'] = gene_df['pos_start']-ref_pos_start\n gene_df['relative_end'] = gene_df['pos_end']-ref_pos_start\n\n bg_z_df = read_csv(args.bgZscore_file,delimiter='\\t',skiprows=1)\n # pdb.set_trace()\n #Creating the final dictionary containing the values for each TF, with the Ref/Alt alleles\n score_dict_bytf ={}\n for i,g in gene_df.iterrows():\n curr_relative_pos = abs(updated_pos-g['relative_start'])\n curr_refseq = make_seq(ref_full_seq[g['relative_start']:(g['relative_end']+1)],curr_relative_pos,args.ref_al)\n curr_altseq = make_seq(ref_full_seq[g['relative_start']:(g['relative_end']+1)],curr_relative_pos,args.alt_al)\n if(g['strand'] == '-'):\n curr_refseq = get_complseq(curr_refseq)\n curr_altseq = get_complseq(curr_altseq)\n curr_matrix = get_matrix_byTF(g['tf_name'],infodicts_list)\n try:\n bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == str(chromosome)][['frac_A','frac_C','frac_G','frac_T']]\n except:\n bgfreqs = bgfrac_df.loc[bgfrac_df['Chrm'] == 'Total'][['frac_A','frac_C','frac_G','frac_T']]\n # pdb.set_trace()\n curr_fracPWM = get_fracPWM_from_matrix(curr_matrix)\n curr_lnfracPWM = get_lnPWM_from_fracPWM(curr_fracPWM,bgfreqs)\n\n try:\n bgZ_score = bg_z_df.loc[bg_z_df['TF_name'] == g['tf_name']]['BG Z score'].values[0]\n except:\n print('Could not find background Z score for TF {0}'.format(tf_name))\n\n curr_scoredict_ref = get_scoredict_entry(curr_refseq,curr_matrix,curr_lnfracPWM,bgZ_score,bgfreqs,g['tf_name'])\n curr_scoredict_alt = get_scoredict_entry(curr_altseq,curr_matrix,curr_lnfracPWM,bgZ_score,bgfreqs,g['tf_name'])\n curr_scoredict = {'ref_fraction_score':curr_scoredict_ref['fraction_score'],'alt_fraction_score':curr_scoredict_alt['fraction_score'],\n 'tf_len':curr_scoredict_ref['tf_len'],'counts_perpos':curr_scoredict_ref['counts_perpos'],'H (REF)':curr_scoredict_ref['H'],'H (ALT)':curr_scoredict_alt['H'],\n 'BindingP (REF)':curr_scoredict_ref['bindingP'],'BindingP (ALT)':curr_scoredict_alt['bindingP']}\n score_dict_bytf[g['tf_name']] = curr_scoredict\n\n #Writing the PWM scores to the output file\n outfile = open(args.outname,'w')\n outfile.write('Scores for Transcription Factors Containing SNP at {0} on chromosome {1}, as a fraction of the total count \\nTF_Name\\tPWM Fraction Score (REF allele)\\tPWM Fraction Score (ALT allele)\\tTF Length\\tTF Counts per position\\tH (REF)\\tH (ALT)\\tBinding P (REF)\\tBinding P (ALT)\\n'.format(position,chromosome))\n for tf,scores in sorted(score_dict_bytf.items(), key=lambda k_v: k_v[1]['alt_fraction_score'],reverse=True):\n outfile.write('{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\n'.format(tf,scores['ref_fraction_score'],scores['alt_fraction_score'],scores['tf_len'],\n scores['counts_perpos'],scores['H (REF)'],scores['H (ALT)'],scores['BindingP (REF)'],scores['BindingP (ALT)']))\n outfile.close()\n \n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7681862115859985, "alphanum_fraction": 0.7694794535636902, "avg_line_length": 54.23214340209961, "blob_id": "74656c1e7062538282ddcb1a08fe7cb0c81ef80b", "content_id": "a193fbd8d515e4f026f032efa1077eab9ea5bd20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3093, "license_type": "no_license", "max_line_length": 385, "num_lines": 56, "path": "/README.md", "repo_name": "TishkoffLab/TF_Binding_scores", "src_encoding": "UTF-8", "text": "## Transcription Factor Binding Scores\n\nThis repository contains the scripts that are used to calculate the binding affinity of a given sequence, or of a given Reference/Alternative allele found with GWAS.\n\nIf given a snp, the scripts will use the set of BED files containing TF start/stop binding sites from the JASPAR database to find the TF genes that the SNP falls within. This script is get_PWMscore_genenames.sh\n\nThen, using the downloaded JASPAR database of Position Weight Matrix files (transfac formatted), it will calculate the score for each TF, taking the sequence from the hg19 reference chromosome and replacing the base at the target SNP position with the REF and ALT alleles. The output is a tab-seperated file with a row for each TF that the SNP falls within, and the following columns: \nTF_Name, PWM Fraction Score (REF allele), PWM Fraction Score (ALT allele), TF Length, TF Counts per position, H (REF), Hprime (ALT)\n\n### Scripts:\n\n\nget_PWMscore.py\n\nThis script takes a particular SNP of interest, the list of TF names that corrisponds to that SNP, and calculates the scores from the PWMs of each TF.\n```\nInput flags:\n-i --input_genes\n\t\t\t\tinput file containing the list of TF gene names, one per row\n-m --matrix_loc\n\t\t\t\tfull path of the folder that contains the PWM matrix files\n-o --outname\n\t\t\t\tthe name of the file to save the sequence scores to\n-r --refallele\n\t\t\t\treference allele for the snp of interest, A/C/T/G\n-a --altallele\n\t\t\t\talternate allele for the snp of interest, A/C/T/G\n-p --position\n\t\t\t\tposition, in bp, for the snp of interest\n-c --refchrmfasta\n\t\t\t\treference fasta file (should just be for a single chromosome) to use for getting the reference sequence\n-b --bg_frac_file\n\t\t\t\tfile containing the background frequency of A/C/T/G, for each autosomal chromosome\n-z --bg_zscore_file\n\t\t\t\tfile containing the background Z scores for each TF, precalculated using a significant number of replicates\n-f --tf_cutoff\n\t\t\t\tthe cutoff for significant pbinding score. If this is provided, the script will check the snp against all tfs and orientations, then save only the results that are above the threshold.\n\nNote: bg_frac_file was made using a seperate script, with frequencies calculated from reference genome hg_19 and is in this repository as \"ACTG_count.all_chrms.fractions.txt\".\n\n```\nget_PWMscore_genenames_multisnpfile.sh\n\nThis script runs the python script on each of the SNPs that we have found through GWAS. It finds the TFs from the premade JASPAR bedfile that overlap with each particular SNP and makes a file containing that info, and then runs get_PWMscore.py with the requisite input flags.\n\n```\nInput flags:\n-o outname\n\t\t\tName to use for the output files (<outname>.TF_genes and <outname>.PWM_scores)\n\n-s snp_file\n\t\t\tFile containing the SNPs that will be analyzed. First line is the column names, followed by one line per SNP. Each line must have the SNP name (chr:pos format), the reference allele, and the alternative allele\n-p script_path\n\t\t\tThe path where the script is located. This folder must contain get_PWMscore.py, the folder containing \n\n```\n" } ]
7
NQ31/scrapy_project
https://github.com/NQ31/scrapy_project
198ac5385bf522b2eb172e197eeb5365c688a479
0d04fcfc570ff7adb3a78ec0ce9c467fc82281e1
dfa2731d22b0efd33396c4a03752470f41f28db1
refs/heads/master
2022-12-31T03:24:43.460420
2020-10-19T15:14:00
2020-10-19T15:14:00
305,419,430
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4905897080898285, "alphanum_fraction": 0.4968632459640503, "avg_line_length": 32.125, "blob_id": "0390b59cdb2d0fa388641e6028a9488dc85ea140", "content_id": "8e58c5b16118baa5de51f085b2ba1b9a38ae6941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "no_license", "max_line_length": 75, "num_lines": 24, "path": "/qiubaipro/qiubaipro/spiders/test2.py", "repo_name": "NQ31/scrapy_project", "src_encoding": "UTF-8", "text": "import scrapy\n\nfrom qiubaipro.items import QiubaiproItem\nclass Test2Spider(scrapy.Spider):\n name = 'test2'\n # allowed_domains = ['https://www.qiushibaike.com/']\n start_urls = ['https://www.qiushibaike.com/']\n\n def parse(self, response):\n li_list = response.xpath('//*[@id=\"content\"]/div/div[2]/div/ul/li')\n all_data = []\n for li in li_list:\n name = li.xpath('./div/div/a/span/text()')[0].extract()\n text = li.xpath('./div/a/text()')[0].extract()\n # print(name + \":\" + text)\n # dict = {\n # \"name\": name,\n # \"text\": text\n # }\n # all_data.append(dict)\n item=QiubaiproItem()\n item['name']= name\n item['text']=text\n yield item\n\n\n" }, { "alpha_fraction": 0.5465281009674072, "alphanum_fraction": 0.5588096380233765, "avg_line_length": 40.45098114013672, "blob_id": "62d0295e0ddc27d52b5a2006bd0ec45687553d18", "content_id": "d68c7b69fbc8e805bb1a8a4f8782da1c7bd22f7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2179, "license_type": "no_license", "max_line_length": 137, "num_lines": 51, "path": "/mzitu/mzitu/spiders/mzi.py", "repo_name": "NQ31/scrapy_project", "src_encoding": "UTF-8", "text": "import scrapy\nfrom mzitu.items import MzituItem\n\nclass MziSpider(scrapy.Spider):\n name = 'mzi'\n # allowed_domains = ['www.xxx.com']\n start_urls = ['https://www.mzitu.com/']\n #第几页\n def parse(self, response):\n page_num=response.xpath('/html/body/div[2]/div[1]/div[3]/div/a[4]/text()').extract_first()\n for i in range(0,4):\n if i+1==1:\n url='https://www.mzitu.com/'\n else:\n url='https://www.mzitu.com/page/%s/'%(i+1)\n # print('第%s页 --'%i,url)\n yield scrapy.Request(url=url,callback=self.page_parse,meta={'ref':url})\n #获取各个图集url\n def page_parse(self,response):\n\n fef=response.meta['ref']\n li_list=response.xpath('//div[@class=\"postlist\"]/ul/li')\n for li in li_list[0:10]:\n tuji_url=li.xpath('./a/@href').extract_first()\n tuji_title=li.xpath('./span[1]/a/text()').extract_first()\n yield scrapy.Request(url=tuji_url,headers={'referer':fef},callback=self.tuji_parse,meta={'tuji_url':tuji_url,'ref':tuji_url})\n #获取每个图集的页数\n def tuji_parse(self,response):\n item=MzituItem()\n ref=response.meta['ref']\n tuji_url=response.meta['tuji_url']\n tuji_page_num=response.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()').extract_first()\n for i in range(int(tuji_page_num)):\n if i+1==1:\n url=tuji_url\n else:\n url=tuji_url+'/%s'%(i+1)\n item['img_referer']=url\n # print('图集第%s页 -url--'%i,url)\n yield scrapy.Request(url=url,headers={'referer':ref},callback=self.img_parse,meta={'item':item})\n #下载图集的图片\n def img_parse(self,response):\n item=response.meta['item']\n img_url=response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@src').extract_first()\n img_path=response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@alt').extract_first()\n item['img_path']=img_path\n # print(img_url)\n item['img_url']=img_url\n # print(item['img_url'])\n # print(item['img_path'])\n yield item\n\n\n\n" }, { "alpha_fraction": 0.548410952091217, "alphanum_fraction": 0.553584635257721, "avg_line_length": 35.56756591796875, "blob_id": "399de15d621f698253f8997bc8560f2b411e42f5", "content_id": "71393328572903e02dfbac7f63ce1f4af927dc72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 94, "num_lines": 37, "path": "/pian/pian/spiders/bizhi.py", "repo_name": "NQ31/scrapy_project", "src_encoding": "UTF-8", "text": "import scrapy\n\nfrom pian.items import PianItem\nclass BizhiSpider(scrapy.Spider):\n name = 'bizhi'\n # allowed_domains = ['www.xxx.com']\n start_urls = ['http://www.netbian.com/meinv/']\n\n def parse(self,response):\n page_num=response.xpath('//*[@id=\"main\"]/div[4]/a[8]/text()').extract_first()\n #获取各个页的网址\n for i in range(5):\n if i+1==1:\n url='http://www.netbian.com/meinv/'\n else:\n url='http://www.netbian.com/meinv/index_%s.htm'%(i+1)\n yield scrapy.Request(url=url,callback=self.parse_page)\n def parse_page(self, response):\n item = PianItem()\n li_list=response.xpath('//div[@class=\"list\"]/ul/li')\n #获取当前页面是第几页\n page=response.xpath('//*[@id=\"main\"]/div[4]/b/text()').extract_first()\n item['mulu']='第%s页'%(page)\n #获取壁纸的原图地址\n for li in li_list:\n try:\n geren_url='http://www.netbian.com'+li.xpath('./a/@href').extract_first()\n except:\n continue\n yield scrapy.Request(url=geren_url, callback=self.parse_detail,meta={'item':item})\n\n def parse_detail(self,response):\n item = response.meta['item']\n #获取图片地址\n img_url=response.xpath('//div[@class=\"pic\"]/p/a/img/@src').extract_first()\n item['url']=img_url\n yield item\n" }, { "alpha_fraction": 0.6571242809295654, "alphanum_fraction": 0.6577748656272888, "avg_line_length": 29.760000228881836, "blob_id": "9fe19bb0d1e0c9a5b84abbfbcfbaa2e881a5a21b", "content_id": "599a2de4a28c49edd5374d242ed9313f93a1abe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1955, "license_type": "no_license", "max_line_length": 66, "num_lines": 50, "path": "/pian/pian/pipelines.py", "repo_name": "NQ31/scrapy_project", "src_encoding": "UTF-8", "text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n#导入相应的模块\nfrom scrapy.pipelines.images import ImagesPipeline\nimport scrapy\nfrom . import settings\nimport os\n# class PianPipeline:\n# def process_item(self, item, spider):\n# return item\n\nclass PianImgPipeline(ImagesPipeline):\n # 该方法用来对图片url发起请求\n def get_media_requests(self, item, info):\n print('开始下载')\n #在这里需要把item传给file_path方法进行处理。不按图片分页存放的话,可以不写meta参数\n return scrapy.Request(item['url'],meta={'item':item})\n #该方法是用来设置图片的下载路径以及图片的名字\n def file_path(self, request, response=None, info=None):\n item=request.meta['item']\n #分类文件夹,\n wenjianjia=item['mulu']\n '''\n 根目录,也就是settings文件下创建的存储图片根目录\n 注意:根目录的设置的时候,不要加“./”,否则下面创建文件夹的时候,会自动创建一个根目录名字的文件夹\n '''\n img_source=settings.IMAGES_STORE\n #图片存放的文件夹路径\n img_path = os.path.join(img_source, wenjianjia)\n #判断文件夹存放的位置是否存在,不存在则新建文件夹\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n #更改图片名字\n url=request.url\n url=url.split('/')[-1]\n file_name=url\n #图片存放路径\n image_path=os.path.join(wenjianjia,file_name)\n #返回图片的存放路径\n\n return image_path\n def item_completed(self, results, item, info):\n print('下载完成')\n return item" }, { "alpha_fraction": 0.6572327017784119, "alphanum_fraction": 0.6580188870429993, "avg_line_length": 31.64102554321289, "blob_id": "92daae83e7111e1fa9cedb073e7691f576fd9196", "content_id": "2f8a099a9d1bf82790e585064ffd7355ecc0a7db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1300, "license_type": "no_license", "max_line_length": 105, "num_lines": 39, "path": "/mzitu/mzitu/pipelines.py", "repo_name": "NQ31/scrapy_project", "src_encoding": "UTF-8", "text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom scrapy.pipelines.images import ImagesPipeline\nimport scrapy\nfrom . import settings\nimport os\n# class MzituPipeline:\n# def process_item(self, item, spider):\n# return item\nclass myPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n print('下载开始')\n return scrapy.Request(item['img_url'],headers={'referer':item['img_referer']},meta={'item':item})\n def file_path(self, request, response=None, info=None):\n item=request.meta['item']\n #获取目录\n floder=item['img_path']\n source_path=settings.IMAGES_STORE\n #路径\n img_path=os.path.join(source_path,floder)\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n\n url = request.url\n url = url.split('/')[-1]\n img_name=url\n img_file_path=os.path.join(floder,img_name)\n print(img_file_path)\n\n return img_file_path\n def item_completed(self, results, item, info):\n print('下载结束')\n return item" } ]
5
tagplay/django-uuid-pk
https://github.com/tagplay/django-uuid-pk
bbc6fd92759631345cb09bef371a7571bd7e3681
b9b333e920b4af45a56e3c9900a98e521dd0c8b7
06a61f025faffba78d737fdf29ab3b7b2b467714
refs/heads/master
2021-01-16T21:04:33.265997
2013-10-07T14:59:53
2013-10-07T14:59:53
21,210,838
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4341614842414856, "alphanum_fraction": 0.4521739184856415, "avg_line_length": 26.288135528564453, "blob_id": "cd42655210a12134a30579364ac7e0b31fb4d57a", "content_id": "86c64d765cd8672fd5fc514e0196c95d49ba164d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1610, "license_type": "permissive", "max_line_length": 99, "num_lines": 59, "path": "/django_uuid_pk/tests/settings.py", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "import os\n\nSITE_ID = 1\nSTATIC_URL = '/static/'\nSECRET_KEY =';pkj;lkj;lkjh;lkj;oi'\ndb = os.environ.get('DBENGINE', None)\nif db == 'pg':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'django_uuid_pk',\n 'HOST': '127.0.0.1',\n 'PORT': '',\n 'USER': 'postgres',\n 'PASSWORD': '',\n 'OPTIONS': {\n 'autocommit': True, # same value for all versions of django (is the default in 1.6)\n }}}\nelif db == 'mysql':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'django_uuid_pk',\n 'HOST': '127.0.0.1',\n 'PORT': '',\n 'USER': 'aa',\n 'PASSWORD': ''}}\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'django_uuid_pk.sqlite',\n 'HOST': '',\n 'PORT': ''}}\n\nINSTALLED_APPS = ('django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django_uuid_pk.tests')\n\nALLOWED_HOSTS = ('127.0.0.1',)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(levelname)-8s: %(asctime)s %(name)10s: %(funcName)40s %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n },\n}\n" }, { "alpha_fraction": 0.5761904716491699, "alphanum_fraction": 0.6095238327980042, "avg_line_length": 16.14285659790039, "blob_id": "d233ff8630009eb765ec5749e524f9f357d0cf2c", "content_id": "fa35d27cf126cf8490a09dbc151cdd1800af15bd", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 840, "license_type": "permissive", "max_line_length": 56, "num_lines": 49, "path": "/tox.ini", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "[tox]\nenvlist =\n d14,d15,d16,trunk\n\n[pytest]\nDJANGO_SETTINGS_MODULE=django_uuid_pk.tests.settings\nnorecursedirs = data .tox\naddopts =\n -p no:cacheprovider\n -p no:cache\n -p no:runfailed\n -p no:xdist\n -p no:pep8\n --tb=short\n --capture=no\n\npython_files=django_uuid_pk/tests/test*.py\npep8ignore = * ALL\n\n[testenv]\nwhitelist_externals = make\n /bin/sh\n /usr/bin/mysql\n /usr/bin/psql\nchangedir={toxinidir}\n\ncommands =\n make install-deps init-db ci -f {toxinidir}/Makefile\n\n\n[testenv:d14]\nbasepython = python2.7\nsetenv =\n DJANGO=1.4.x\n\n[testenv:d15]\nbasepython = python2.7\nsetenv =\n DJANGO=1.5.x\n\n[testenv:d16]\nbasepython = python2.7\nsetenv =\n DJANGO=1.6.x\n\n[testenv:trunk]\nbasepython = python2.7\nsetenv =\n DJANGO=dev\n" }, { "alpha_fraction": 0.6326963901519775, "alphanum_fraction": 0.634819507598877, "avg_line_length": 17.84000015258789, "blob_id": "c3765255e0fbefb010ee9c38b77ba03c8c801a5c", "content_id": "667e9ed2e9df83061cd052c380236894b6d62ba7", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "permissive", "max_line_length": 78, "num_lines": 25, "path": "/conftest.py", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "import os\nimport sys\nfrom django.conf import settings\n\n\ndef pytest_configure(config):\n if not settings.configured:\n os.environ['DJANGO_SETTINGS_MODULE'] = 'django_uuid_pk.tests.settings'\n\n\n\ndef runtests(args=None):\n import pytest\n\n if not args:\n args = []\n\n if not any(a for a in args[1:] if not a.startswith('-')):\n args.append('django_uuid_pk/tests')\n\n sys.exit(pytest.main(args))\n\n\nif __name__ == '__main__':\n runtests(sys.argv)\n" }, { "alpha_fraction": 0.6931818127632141, "alphanum_fraction": 0.6931818127632141, "avg_line_length": 28.33333396911621, "blob_id": "82ff626e697a07fe274d85221e426ba6a76c646f", "content_id": "418bc4c33f1ea5ab19de284f4c74dfea43d192d8", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "permissive", "max_line_length": 40, "num_lines": 3, "path": "/django_uuid_pk/tests/__init__.py", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "# from __future__ import absolute_import\n# from .tests import *\n# from .models import *\n" }, { "alpha_fraction": 0.6372950673103333, "alphanum_fraction": 0.6547130942344666, "avg_line_length": 36.519229888916016, "blob_id": "c995c99dace3c8b5c075429c60c0b68e35e4a5aa", "content_id": "2268ef1d3cba47c7562ec825daafc04f3fdf583b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1952, "license_type": "permissive", "max_line_length": 112, "num_lines": 52, "path": "/Makefile", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "VERSION=2.0.0\nBUILDDIR=~build\nDJANGO_14=django==1.4.8\nDJANGO_15=django==1.5.4\nDJANGO_16=https://www.djangoproject.com/m/releases/1.6/Django-1.6b4.tar.gz\nDJANGO_DEV=git+git://github.com/django/django.git\n\nmkbuilddir:\n\tmkdir -p ${BUILDDIR}\n\ninstall-deps:\n\tpip install -r django_uuid_pk/requirements.pip python-coveralls coverage\n\ntest:\n\tpy.test -vv\n\nclean:\n\trm -fr ~build dist wfp_foodnet.egg-info .coverage .pytest MEDIA_ROOT MANIFEST *.egg .cache\n\tfind . -name __pycache__ -prune | xargs rm -rf\n\tfind . -name \"*.py?\" -prune | xargs rm -rf\n\trm -rf pep8.out flake.out coverage.xml pytest.xml\n\nfullclean: clean\n\tfind . -name *.sqlite -prune | xargs rm -rf\n\trm -fr .tox\n\ncoverage:\n\tpy.test --cov=django_uuid_pk --cov-config=django_uuid_pk/tests/.coveragerc\nifdef BROWSE\n\tfirefox ${BUILDDIR}/coverage/index.html\nendif\n\n\ninit-db:\n\t@sh -c \"if [ '${DBENGINE}' = 'mysql' ]; then mysql -e 'DROP DATABASE IF EXISTS django_uuid_pk;'; fi\"\n\t@sh -c \"if [ '${DBENGINE}' = 'mysql' ]; then pip install MySQL-python; fi\"\n\t@sh -c \"if [ '${DBENGINE}' = 'mysql' ]; then mysql -e 'create database IF NOT EXISTS django_uuid_pk;'; fi\"\n\n\t@sh -c \"if [ '${DBENGINE}' = 'pg' ]; then psql -c 'DROP DATABASE IF EXISTS django_uuid_pk;' -U postgres; fi\"\n\t@sh -c \"if [ '${DBENGINE}' = 'pg' ]; then psql -c 'CREATE DATABASE django_uuid_pk;' -U postgres; fi\"\n\t@sh -c \"if [ '${DBENGINE}' = 'pg' ]; then pip install -q psycopg2; fi\"\n\nci: mkbuilddir\n\t@sh -c \"if [ '${DJANGO}' = '1.4.x' ]; then pip install ${DJANGO_14}; fi\"\n\t@sh -c \"if [ '${DJANGO}' = '1.5.x' ]; then pip install ${DJANGO_15}; fi\"\n\t@sh -c \"if [ '${DJANGO}' = '1.6.x' ]; then pip install ${DJANGO_16}; fi\"\n\t@sh -c \"if [ '${DJANGO}' = 'dev' ]; then pip install ${DJANGO_DEV}; fi\"\n\t@pip install coverage\n\t@python -c \"from __future__ import print_function;import django;print('Django version:', django.get_version())\"\n\t@echo \"Database:\" ${DBENGINE}\n\n\tpy.test -vv --cov=django_uuid_pk --cov-report=html --cov-config=.coveragerc\n\n" }, { "alpha_fraction": 0.7326453924179077, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 27.810810089111328, "blob_id": "cfbdb6e30e762f2355433798e80921ceeaae2722", "content_id": "b69d314a3f309995a3b4fbc6da45c8c7a531c2d4", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "permissive", "max_line_length": 73, "num_lines": 37, "path": "/django_uuid_pk/tests/models.py", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "import uuid\nfrom django.db import models\nfrom django_uuid_pk.fields import UUIDField\n\n\nclass ModelUUIDField(models.Model):\n uuid1 = UUIDField(version=1, auto=True)\n uuid3 = UUIDField(namespace=uuid.NAMESPACE_URL, version=3, auto=True)\n uuid4 = UUIDField(version=4, auto=True)\n uuid5 = UUIDField(namespace=uuid.NAMESPACE_URL, version=5, auto=True)\n\nclass AutoUUIDFieldModel(models.Model):\n uuid = UUIDField(auto=True)\n\n\nclass ManualUUIDFieldModel(models.Model):\n uuid = UUIDField(auto=False)\n\n\nclass NamespaceUUIDFieldModel(models.Model):\n uuid = UUIDField(auto=True, namespace=uuid.NAMESPACE_URL, version=5)\n\n\nclass BrokenNamespaceUUIDFieldModel(models.Model):\n uuid = UUIDField(auto=True, namespace='lala', version=5)\n\n\nclass PrimaryKeyUUIDFieldModel(models.Model):\n uuid = UUIDField(primary_key=True)\n #char = models.CharField(max_length=10, null=True)\n\nclass BrokenPrimaryKeyUUIDFieldModel(models.Model):\n uuid = UUIDField(primary_key=True)\n unique = models.IntegerField(unique=True)\n\n def __repr__(self):\n return {}\n" }, { "alpha_fraction": 0.6322047114372253, "alphanum_fraction": 0.6696820855140686, "avg_line_length": 33.85585403442383, "blob_id": "79302412d241a1ce77d21f3418730b3b9a7221fb", "content_id": "0a260ce737b2d878746e24a12061e613b5db395c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3869, "license_type": "permissive", "max_line_length": 107, "num_lines": 111, "path": "/django_uuid_pk/tests/tests.py", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "import json\nimport uuid\nfrom django.core.serializers import serialize\n\nfrom django.db import IntegrityError\nfrom django.test import TestCase\nimport pytest\nfrom django_uuid_pk.fields import StringUUID\n\nfrom django_uuid_pk.tests.models import (AutoUUIDFieldModel, ManualUUIDFieldModel, NamespaceUUIDFieldModel,\n BrokenNamespaceUUIDFieldModel, PrimaryKeyUUIDFieldModel,\n BrokenPrimaryKeyUUIDFieldModel, ModelUUIDField)\n\n\n\ndef assertJSON(data):\n try:\n json.loads(data)\n except ValueError:\n raise\n\n\n\[email protected]_db\nclass UUIDFieldTestCase(TestCase):\n def test_protocols(self):\n obj = ModelUUIDField.objects.create()\n self.assertTrue(isinstance(obj.uuid1, uuid.UUID))\n self.assertTrue(isinstance(obj.uuid3, uuid.UUID))\n self.assertTrue(isinstance(obj.uuid4, uuid.UUID))\n self.assertTrue(isinstance(obj.uuid5, uuid.UUID))\n\n def test_auto_uuid4(self):\n obj = AutoUUIDFieldModel.objects.create()\n self.assertTrue(obj.uuid)\n self.assertEquals(len(obj.uuid), 32)\n #self.assertTrue(isinstance(obj.uuid, uuid.UUID))\n self.assertEquals(obj.uuid.version, 4)\n\n def test_raises_exception(self):\n self.assertRaises(IntegrityError, ManualUUIDFieldModel.objects.create)\n\n def test_manual(self):\n obj = ManualUUIDFieldModel.objects.create(uuid=uuid.uuid4())\n self.assertTrue(obj)\n self.assertEquals(len(obj.uuid), 32)\n #self.assertTrue(isinstance(obj.uuid, uuid.UUID))\n self.assertEquals(obj.uuid.version, 4)\n\n def test_namespace(self):\n obj = NamespaceUUIDFieldModel.objects.create()\n self.assertTrue(obj)\n self.assertEquals(len(obj.uuid), 32)\n #self.assertTrue(isinstance(obj.uuid, uuid.UUID))\n self.assertEquals(obj.uuid.version, 5)\n\n def test_broken_namespace(self):\n self.assertRaises(ValueError, BrokenNamespaceUUIDFieldModel.objects.create)\n\n def test_wrongvalue(self):\n obj = PrimaryKeyUUIDFieldModel.objects.create()\n with pytest.raises(ValueError):\n obj.uuid = 1\n\n def test_assign1(self):\n obj = PrimaryKeyUUIDFieldModel.objects.create()\n obj.uuid = uuid.UUID('5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4')\n obj.save()\n assert str(obj.uuid) == '5b27d1bde7c346f3aaf211e4d32f60d4'\n #assert obj.uuid == '5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4'\n assert obj.uuid == uuid.UUID('5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4')\n\n def test_assign2(self):\n obj = PrimaryKeyUUIDFieldModel.objects.create()\n obj.uuid = '5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4'\n obj.save()\n assert str(obj.uuid) == '5b27d1bde7c346f3aaf211e4d32f60d4'\n\n\n def test_primary_key(self):\n obj = PrimaryKeyUUIDFieldModel.objects.create()\n assert obj.pk\n\n obj = PrimaryKeyUUIDFieldModel()\n assert not obj.pk\n\n # reset primary key if save() fails\n BrokenPrimaryKeyUUIDFieldModel.objects.create(unique=1)\n obj = BrokenPrimaryKeyUUIDFieldModel(unique=1)\n with pytest.raises(IntegrityError):\n obj.save()\n assert not obj.pk\n\n def test_serialize(self):\n obj = PrimaryKeyUUIDFieldModel.objects.create()\n obj.uuid = uuid.UUID(\"2e9280cfdc8e42bdbf0afa3043acaa7e\")\n obj.save()\n serialized = serialize('json', PrimaryKeyUUIDFieldModel.objects.all())\n assertJSON(serialized)\n\n #def test_json(self):\n # obj = PrimaryKeyUUIDFieldModel.objects.create()\n # obj.save()\n # serialized = json.dumps(obj)\n # assertJSON(serialized)\n\n #deserialized = json.loads(serialized, object_hook=registry.object_hook)\n #\n #print 111, deserialized\n #\n #assert PrimaryKeyUUIDField(**deserialized).uuid == obj.uuid\n" }, { "alpha_fraction": 0.7611026167869568, "alphanum_fraction": 0.7611026167869568, "avg_line_length": 37.411766052246094, "blob_id": "cf65025b81e24ad3e4727293321aca3cffb7a358", "content_id": "fb7bc04ab73a64d2ee5cfdd689a153d25d55b92a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 653, "license_type": "permissive", "max_line_length": 91, "num_lines": 17, "path": "/README.rst", "repo_name": "tagplay/django-uuid-pk", "src_encoding": "UTF-8", "text": "django-uuid-pk\n==============\n\n\n.. image:: https://secure.travis-ci.org/saxix/django-uuid-pk.png?branch=develop\n :target: http://travis-ci.org/saxix/django-django-uuid-pk/\n\n\ndjango uuidfield that can be used as primary key\n\nThe reason of this project is that any other UUDField implementation that I found\ndoes not reflect the expected behaviour when used as primary-key,\nif the save() fails for any reason the value of the field persist so that checking `obj.pk`\ncannot be used anymore to check if the object is stored or not.\n\nThe code is mainly based on the dcramer's implementation that can be found at\nhttps://github.com/dcramer/django-uuidfield\n" } ]
8
paolapilar/juegos
https://github.com/paolapilar/juegos
42ecc096da22b7f38615e1e2ba47a7ca25055d16
3dcaedf1a89e06f3a773e707cdb776cabb06459e
0e2a772e2d8ba247711d0bbdbd485ae4c0d72d7d
refs/heads/master
2020-06-09T03:53:57.906088
2019-06-24T02:59:24
2019-06-24T02:59:24
193,365,557
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.487226277589798, "alphanum_fraction": 0.510948896408081, "avg_line_length": 29.44444465637207, "blob_id": "9a3007d419a268cbc7d828d18d86e880f5298272", "content_id": "7b1c3146fb92a6637f7d9e76db04236d2a21daa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 88, "num_lines": 18, "path": "/collectables.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport pygame\nimport base\n\nclass Apple( base.Entity ) :\n\n def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) :\n super( Apple, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )\n\n self._color = ( 255, 255, 0 )\n self._alive = True\n\n def draw( self, canvas ) :\n _xleft = self._x - 0.5 * self._cellSize \n _ytop = self._y - 0.5 * self._cellSize\n\n pygame.draw.rect( canvas, \n self._color, \n (_xleft, _ytop, self._w, self._h) )" }, { "alpha_fraction": 0.8072289228439331, "alphanum_fraction": 0.8072289228439331, "avg_line_length": 40.5, "blob_id": "5dad83eb9e1e7b11dfc74ec1d59a06fadff9cffb", "content_id": "036bb4d3bfbc9475bc8b0caf8d23c374da9762c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 73, "num_lines": 2, "path": "/README.md", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "# juegos\nAlgunos juegos hechos para el curso de programación de videojuegos @ ucsp\n" }, { "alpha_fraction": 0.4847426116466522, "alphanum_fraction": 0.49685534834861755, "avg_line_length": 33.620967864990234, "blob_id": "7caf06f48ffc267f2484a6f5fdb47226db7797b4", "content_id": "a655d183ad55dabf88f22fefedb01b30ebecf214", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4293, "license_type": "no_license", "max_line_length": 100, "num_lines": 124, "path": "/snake.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport pygame\nimport base\n\nfrom collections import deque\n\nclass SnakePart( base.Entity ) :\n \n def __init__( self, i, j, color, cellSize, canvasWidth, canvasHeight ) :\n super( SnakePart, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )\n\n self.color = color\n self.lasti = i\n self.lastj = j\n\n def draw( self, canvas ) :\n _xleft = self._x - 0.5 * self._cellSize \n _ytop = self._y - 0.5 * self._cellSize\n\n pygame.draw.rect( canvas, \n self.color, \n (_xleft, _ytop, self._w, self._h) )\t\n\nclass Snake( base.Entity ) :\n\n def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) :\n super( Snake, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )\n\n self._bodyParts = [ SnakePart( i, j, ( 50, 50, 50 ), cellSize, canvasWidth, canvasHeight ) ]\n self._speed = 800.\n self._direction = 'left'\n self._displacement = 0.0\n self._frameTime = 0.001\n\n self._nx = int( canvasWidth / cellSize )\n self._ny = int( canvasHeight / cellSize )\n\n self._alive = True\n\n def alive( self ) :\n return self._alive\n\n def head( self ) :\n return self._bodyParts[0]\n\n def tail( self ) :\n return self._bodyParts[-1]\n\n def setDirection( self, direction ) :\n if len( self._bodyParts ) > 1 :\n # chequear si quieren ir a la direccion contraria\n if ( self._direction == 'left' and direction == 'right' or\n self._direction == 'right' and direction == 'left' or\n self._direction == 'up' and direction == 'down' or\n self._direction == 'down' and direction == 'up' ) :\n # mantener la misma direccion\n self._direction = self._direction\n else :\n # cambiar la direction\n self._direction = direction\n else :\n self._direction = direction\n\n def grow( self ) :\n _i = self.tail().lasti\n _j = self.tail().lastj\n\n _newPart = SnakePart( _i, _j, \n ( 50, 50, 50 ), \n self._cellSize, \n self._canvasWidth, \n self._canvasHeight )\n self._bodyParts.append( _newPart )\n\n def update( self ) :\n self._displacement = self._displacement + self._speed * self._frameTime\n if self._displacement > self._cellSize :\n self.head().lasti = self.head().i\n self.head().lastj = self.head().j\n # mover una casilla en la direccion adecuada\n if self._direction == 'up' :\n self.head().j += 1\n elif self._direction == 'down' :\n self.head().j -= 1\n elif self._direction == 'right' :\n self.head().i += 1\n elif self._direction == 'left' :\n self.head().i -= 1\n\n for k in range( 1, len( self._bodyParts ) ) :\n self._bodyParts[k].lasti = self._bodyParts[k].i\n self._bodyParts[k].lastj = self._bodyParts[k].j\n\n self._bodyParts[k].i = self._bodyParts[k-1].lasti\n self._bodyParts[k].j = self._bodyParts[k-1].lastj\n\n # resetear el acumulador\n self._displacement = 0.0\n\n if self.head()._x > 800. and self._direction == 'right' :\n self.head().i = 0\n \n if self.head()._x < 0. and self._direction == 'left' :\n self.head().i = self._nx\n\n if self.head()._y > 600. and self._direction == 'down' :\n self.head().j = self._ny\n \n if self.head()._y < 0. and self._direction == 'up' :\n self.head().j = 0\n \n for k in range( len( self._bodyParts ) ) :\n self._bodyParts[k].update()\n\n for i in range( 1, len( self._bodyParts ) ) :\n if self.head().hit( self._bodyParts[i] ):\n self._alive = False\n\n def draw( self, canvas ) :\n for k in range( len( self._bodyParts ) ) :\n self._bodyParts[k].draw( canvas )\n \n ## # la misma forma de iterar\n ## for bodyPart in self._bodyParts :\n ## \t bodyPart.draw( canvas )" }, { "alpha_fraction": 0.5226320028305054, "alphanum_fraction": 0.5565800666809082, "avg_line_length": 26.744186401367188, "blob_id": "031e722d2dbb8d17e4b0ebbbee3d25304f8e146c", "content_id": "6854f74e715d1d78a875a1a27ced1351a8efe7fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2386, "license_type": "no_license", "max_line_length": 84, "num_lines": 86, "path": "/screen.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport pygame\nimport world\n\nclass Text( object ) :\n\n def __init__( self, x, y, message, size, color ) :\n super( Text, self).__init__()\n\n self._message = message\n self._textFont = pygame.font.Font( None, size )\n self._textSurface = self._textFont.render( message, True, color )\n self._textRect = self._textSurface.get_rect()\n self._textRect.center = ( x, y )\n\n def draw( self, canvas ) :\n canvas.blit( self._textSurface, self._textRect )\n\nclass Screen( object ) :\n\n def __init__( self, canvas, backgroundColor ) :\n super( Screen, self ).__init__()\n\n self._canvas = canvas\n self._backgroundColor = backgroundColor\n self._texts = []\n\n self._keys = None\n\n def setKeys( self, keys ) :\n self._keys = keys\n\n def addText( self, text ) :\n self._texts.append( text )\n\n def draw( self ) :\n self._canvas.fill( self._backgroundColor )\n\n for i in range( len( self._texts ) ) :\n self._texts[i].draw( self._canvas )\n\n def update( self ) :\n pass\n\nclass MenuScreen( Screen ) :\n\n def __init__( self, canvas ) :\n super( MenuScreen, self ).__init__( canvas, ( 255, 255, 0 ) )\n\n self._textTitle = Text( 100, 100, 'SNAKE', 50, ( 0, 0, 0 ) )\n self._textPlay = Text( 100, 400, 'PLAY', 40, ( 255, 255, 255 ) )\n\n self.addText( self._textTitle )\n self.addText( self._textPlay )\n\nclass GameOverScreen( Screen ) :\n\n def __init__( self, canvas ) :\n super( GameOverScreen, self ).__init__( canvas, ( 0, 0, 0 ) )\n\n self._textGameOver = Text( 100, 100, 'GAME OVER :(', 50, ( 255, 0, 255 ) )\n self._textContinue = Text( 100, 400, 'Continue???', 40, ( 255, 255, 255 ) )\n\n self.addText( self._textGameOver )\n self.addText( self._textContinue )\n\nclass GameScreen( Screen ) :\n\n def __init__( self, canvas, canvasWidth, canvasHeight ) :\n super( GameScreen, self ).__init__( canvas, ( 255, 255, 255 ) )\n\n self._world = world.World( 40, canvasWidth, canvasHeight )\n\n def draw( self ) :\n super( GameScreen, self ).draw()\n\n self._world.draw( self._canvas )\n\n def update( self ) :\n self._world.setKeys( self._keys )\n self._world.update()\n\n def lose( self ) :\n return self._world.lose()\n\n def win( self ) :\n return self._world.win()" }, { "alpha_fraction": 0.45003244280815125, "alphanum_fraction": 0.4656067490577698, "avg_line_length": 33.7593994140625, "blob_id": "23cc2559ec5e25e8ad00f376d8c7085b5060b716", "content_id": "41c91f17e3d42d1e0b8b9e78605abce0441b3c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9246, "license_type": "no_license", "max_line_length": 130, "num_lines": 266, "path": "/world.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport math\nimport random\nimport pygame\n\nfrom base import Entity\nfrom snake import Snake\nfrom collectables import Apple\n\nclass Obstacle( Entity ) :\n\n def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) :\n super( Obstacle, self ).__init__( i, j, di, dj, cellSize, canvasWidth, canvasHeight )\n\n self._color = ( 255, 0, 0 )\n\n def draw( self, canvas ) :\n _xleft = self._x - 0.5 * self._cellSize \n _ytop = self._y - 0.5 * self._cellSize\n\n pygame.draw.rect( canvas, \n self._color, \n (_xleft, _ytop, self._w, self._h) )\n\nclass World( object ) :\n\n def __init__( self, cellSize, canvasWidth, canvasHeight, level = 1 ) :\n super( World, self ).__init__()\n\n self._cellSize = cellSize\n self._canvasWidth = canvasWidth\n self._canvasHeight = canvasHeight\n \n self._level = level\n\n self._nx = int( self._canvasWidth / self._cellSize )\n self._ny = int( self._canvasHeight / self._cellSize )\n\n self._maxLives = 4\n self._numLives = 4\n\n self._snake = Snake( int( self._nx / 2. ), \n int( self._ny / 2. ),\n self._cellSize,\n self._canvasWidth,\n self._canvasHeight )\n\n self._gameWin = False\n self._gameOver = False\n self._keys = None\n\n self._points = 0\n\n self._font = pygame.font.Font( None, 40 )\n\n self._obstacles = []\n self._occupied = []\n self._apples = []\n\n self._createObstacles()\n self._createWalls()\n\n for obstacle in self._obstacles :\n self._occupied.append( ( obstacle.i, obstacle.j ) )\n\n self._createApples( 1 )\n\n if self._level == 1 :\n self._snake._speed = 800.\n elif self._level == 2 :\n self._snake._speed = 2100.\n elif self._level == 3 :\n self._snake._speed = 2100.\n\n def _createObstacles( self ) :\n if self._level == 1 :\n return\n elif self._level == 2 :\n while len( self._obstacles ) < 5 :\n _i = random.randint(0, self._nx)\n _j = random.randint(0, self._ny)\n if _i == int( self._nx / 2 ) and _j == int( self._ny / 2 ) :\n continue\n self._obstacles.append( Obstacle( _i, _j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n\n elif self._level == 3 :\n while len( self._obstacles ) < 10 :\n _i = random.randint(0, self._nx)\n _j = random.randint(0, self._ny)\n if _i == int( self._nx / 2 ) and _j == int( self._ny / 2 ) :\n continue\n self._obstacles.append( Obstacle( _i, _j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n\n def _createWalls( self ) :\n if self._level == 1 :\n return\n elif self._level == 2 :\n for i in range( self._nx ) :\n self._obstacles.append( Obstacle( i, 0, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n self._obstacles.append( Obstacle( i, self._ny - 1, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n \n for j in range( self._ny ) :\n self._obstacles.append( Obstacle( 0, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n self._obstacles.append( Obstacle( self._nx - 1, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n elif self._level == 3 :\n for i in range( self._nx ) :\n if i == int( self._nx / 2 ) :\n continue\n\n self._obstacles.append( Obstacle( i, 0, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n self._obstacles.append( Obstacle( i, self._ny - 1, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n \n for j in range( self._ny ) :\n if j == int( self._ny / 2 ) :\n continue\n\n self._obstacles.append( Obstacle( 0, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n self._obstacles.append( Obstacle( self._nx - 1, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n\n def _createApples( self, maxApples = 20 ) :\n\n while True :\n _i = random.randint( 2, self._nx - 2 )\n _j = random.randint( 2, self._ny - 2 )\n _canCreate = True\n\n for _occupiedPosition in self._occupied :\n _ioccupied = _occupiedPosition[0]\n _joccupied = _occupiedPosition[1]\n\n if _i == _ioccupied and _j == _joccupied :\n _canCreate = False\n break\n\n if _canCreate :\n self._apples.append( Apple( _i, _j, self._cellSize, self._canvasWidth, self._canvasHeight ) )\n\n if len( self._apples ) >= maxApples :\n break\n\n def setKeys( self, keys ) :\n self._keys = keys\n\n def restart( self ) :\n self._points = 0\n self._snake = Snake( int( self._nx / 2. ), \n int( self._ny / 2. ),\n self._cellSize,\n self._canvasWidth,\n self._canvasHeight )\n\n if self._level == 1 :\n self._snake._speed = 800.\n elif self._level == 2 :\n self._snake._speed = 2100.\n elif self._level == 3 :\n self._snake._speed = 2100.\n\n self._apples = []\n self._obstacles = []\n self._occupied = []\n\n self._createObstacles()\n self._createWalls()\n\n for obstacle in self._obstacles :\n self._occupied.append( ( obstacle.i, obstacle.j ) )\n\n self._createApples( 1 )\n\n def _drawGrid( self, canvas ) :\n for i in range( self._nx ) :\n xline = ( i + 1 ) * self._cellSize\n pygame.draw.line( canvas, \n ( 0, 0, 0 ),\n ( xline, 0 ), \n ( xline, self._canvasHeight ),\n 1 )\n\n for j in range( self._ny ) :\n yline = ( j + 1 ) * self._cellSize\n pygame.draw.line( canvas, \n ( 0, 0, 0 ),\n ( 0, yline ), \n ( self._canvasWidth, yline ),\n 1 )\n\n def _drawScore( self, canvas ) :\n _textSurface = self._font.render( 'Puntaje: %d - Vidas: %d' % ( self._points, self._numLives ),\n True,\n ( 0, 0, 255 ) )\n\n _textSurface.get_rect().center = ( 30, 30 )\n\n canvas.blit( _textSurface, _textSurface.get_rect() )\n\n def draw( self, canvas ) :\n self._drawGrid( canvas )\n self._snake.draw( canvas )\n\n for obstacle in self._obstacles :\n obstacle.draw( canvas )\n\n for apple in self._apples :\n apple.draw( canvas )\n\n self._drawScore( canvas )\n \n def update( self ) :\n if self._keys :\n if self._keys['up'] == True :\n self._snake.setDirection( 'up' )\n elif self._keys['down'] == True :\n self._snake.setDirection( 'down' )\n elif self._keys['right'] == True :\n self._snake.setDirection( 'right' )\n elif self._keys['left'] == True :\n self._snake.setDirection( 'left' )\n\n self._snake.update()\n\n for obstacle in self._obstacles :\n obstacle.update()\n if self._snake.head().hit( obstacle ) :\n self._snake._alive = False\n\n if not self._snake.alive() :\n self._numLives = self._numLives - 1\n if self._numLives >= 1 :\n self.restart()\n else :\n self._gameOver = True\n return\n\n for i in range( len( self._apples ) ) :\n self._apples[i].update()\n if self._snake.head().hit( self._apples[i] ) :\n self._apples[i]._alive = False\n self._snake.grow()\n self._points = self._points + 1\n self._createApples( 1 )\n if self._level == 1 and self._points >= 5 :\n self._level = 2\n self._numLives = 4\n self._points = 0\n self.restart()\n elif self._level == 2 and self._points >= 10 :\n self._level = 3\n self._numLives = 4\n self._points = 0\n self.restart()\n elif self._level == 3 and self._points >= 15 :\n self._gameWin = True\n return\n\n _newApples = []\n for apple in self._apples :\n if apple._alive :\n _newApples.append( apple )\n\n self._apples = _newApples\n\n def lose( self ) :\n return self._gameOver\n\n def win( self ) :\n return self._gameWin" }, { "alpha_fraction": 0.4907591640949249, "alphanum_fraction": 0.4936025142669678, "avg_line_length": 35.25773239135742, "blob_id": "7e1276450dec0bc0db991381a7ae0228a2a8ac75", "content_id": "c8521d26e301b48feccc378287396d682b76e3bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3517, "license_type": "no_license", "max_line_length": 99, "num_lines": 97, "path": "/main.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport pygame\nimport random\nimport time\n\nfrom snake import Snake\nfrom collectables import Apple\n\nimport screen\n\nclass Game :\n\n def __init__( self ) :\n pygame.init()\n self._canvasWidth = 800\n self._canvasHeight = 600\n self._canvas = pygame.display.set_mode( ( self._canvasWidth, self._canvasHeight ) )\n self._gameExit = False\n self._keys = { 'up' : False, \n 'down' : False, \n 'right' : False, \n 'left' : False,\n 'enter' : False,\n 'escape' : False }\n\n self._screen = screen.MenuScreen( self._canvas )\n self._screenName = 'menu'\n\n def _getEvents( self ) :\n for event in pygame.event.get() :\n if event.type == pygame.QUIT :\n self._gameExit = True\n elif event.type == pygame.KEYDOWN :\n if event.key == pygame.K_UP :\n self._keys['up'] = True\n elif event.key == pygame.K_DOWN :\n self._keys['down'] = True\n elif event.key == pygame.K_RIGHT :\n self._keys['right'] = True\n elif event.key == pygame.K_LEFT :\n self._keys['left'] = True\n elif event.key == pygame.K_RETURN :\n self._keys['enter'] = True\n elif event.key == pygame.K_ESCAPE :\n self._keys['escape'] = True\n elif event.type == pygame.KEYUP :\n if event.key == pygame.K_UP :\n self._keys['up'] = False\n elif event.key == pygame.K_DOWN :\n self._keys['down'] = False\n elif event.key == pygame.K_RIGHT :\n self._keys['right'] = False\n elif event.key == pygame.K_LEFT :\n self._keys['left'] = False\n elif event.key == pygame.K_RETURN :\n self._keys['enter'] = False\n elif event.key == pygame.K_ESCAPE :\n self._keys['escape'] = False\n\n def _updateScreen( self ) :\n self._screen.setKeys( self._keys )\n self._screen.update()\n self._screen.draw()\n\n if self._screenName == 'menu' and self._keys['enter'] == True :\n self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight )\n self._screenName = 'game'\n\n elif self._screenName == 'game' and self._screen.lose() :\n self._screen = screen.GameOverScreen( self._canvas )\n self._screenName = 'gameover'\n\n elif self._screenName == 'game' and self._screen.win() :\n self._screen = screen.MenuScreen( self._canvas ) \n self._screenName = 'menu'\n\n elif self._screenName == 'gameover' and self._keys['enter'] == True :\n self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight )\n self._screenName = 'game'\n\n elif self._screenName == 'gameover' and self._keys['escape'] == True :\n self._screen = screen.MenuScreen( self._canvas )\n self._screenName = 'menu'\n\n def run( self ) :\n \n while not self._gameExit :\n self._getEvents()\n self._updateScreen()\n\n # actualizar el canvas\n pygame.display.update()\n # esperar un ratito\n time.sleep( 0.001 )\n\nif __name__ == '__main__' :\n _game = Game()\n _game.run()" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 30.81818199157715, "blob_id": "e3dd05089affa1f54a6d352a2959761ae0a978db", "content_id": "042dd0f6c72720e49a5b5c7c1099ef8b22eeb997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 62, "num_lines": 11, "path": "/utils.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport math\n\ndef grid2screen( i, j, cellSize, canvasWidth, canvasHeight ) :\n x = ( i + 0.5 ) * cellSize\n y = canvasHeight - ( j + 0.5 ) * cellSize\n return x, y\n\ndef screen2grid( x, y, cellSize, canvasWidth, canvasHeight ) :\n i = math.floor( x / cellSize - 0.5 )\n j = math.floor( ( canvasHeight - y ) / cellSize - 0.5 )\n return i, j" }, { "alpha_fraction": 0.43322649598121643, "alphanum_fraction": 0.4535256326198578, "avg_line_length": 30.200000762939453, "blob_id": "80564435292770baad0299f840a65e9cbabdcd19", "content_id": "88bd74a8abb4c38e6ce3f726705992877fd0a7b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1872, "license_type": "no_license", "max_line_length": 121, "num_lines": 60, "path": "/base.py", "repo_name": "paolapilar/juegos", "src_encoding": "UTF-8", "text": "\nimport math\nimport utils\n\nclass Entity( object ) :\n\n def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) :\n super( Entity, self ).__init__()\n\n self.i = i\n self.j = j\n\n self._cellSize = cellSize\n self._canvasWidth = canvasWidth\n self._canvasHeight = canvasHeight\n self._di = di\n self._dj = dj\n\n self._x, self._y = utils.grid2screen( i, j, cellSize, canvasWidth, canvasHeight )\n self._w = di * cellSize\n self._h = dj * cellSize\n\n self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 )\n self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 )\n\n def x( self ) :\n return self._x\n\n def y( self ) :\n return self._y\n\n def xc( self ) :\n return self._xc\n\n def yc( self ) :\n return self._yc\n\n def w( self ) :\n return self._w\n\n def h( self ) :\n return self._h\n\n def update( self ) :\n self._x, self._y = utils.grid2screen( self.i, self.j, \n self._cellSize, \n self._canvasWidth, \n self._canvasHeight )\n\n self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 )\n self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 )\n\n def hit( self, other ) :\n _dx = abs( self._xc - other.xc() )\n _dy = abs( self._yc - other.yc() )\n\n if ( _dx < ( self._w / 2. ) + ( other.w() / 2. ) and\n _dy < ( self._h / 2. ) + ( other.h() / 2. ) ) :\n return True\n else :\n return False" } ]
8
JoanJaraBosch/web-personal-django
https://github.com/JoanJaraBosch/web-personal-django
665fcb601a81328cbe5565b16d45637f8a55466b
54779d1b26cdcd569dce20f432974a891b7bd7c1
29426d60fe2df72748579385cbf60f9e3022ff2f
refs/heads/master
2022-12-19T04:21:08.873906
2020-03-24T20:26:13
2020-03-24T20:26:13
203,875,051
0
0
null
2019-08-22T21:19:39
2020-03-24T20:26:17
2022-12-08T06:04:04
JavaScript
[ { "alpha_fraction": 0.5497835278511047, "alphanum_fraction": 0.5688311457633972, "avg_line_length": 29.394737243652344, "blob_id": "c646e35d67bbdb2710fd7621f80706915df52d90", "content_id": "6870988c415d9cbb504d8cb08ae2482e0e4ae525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 90, "num_lines": 38, "path": "/webpersonal/portafolio/migrations/0002_auto_20190822_2247.py", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-08-22 20:47\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portafolio', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='createdDate',\n field=models.DateTimeField(auto_now_add=True, verbose_name='Data de creació'),\n ),\n migrations.AlterField(\n model_name='project',\n name='description',\n field=models.TextField(verbose_name='Desccripció'),\n ),\n migrations.AlterField(\n model_name='project',\n name='image',\n field=models.ImageField(upload_to='projects', verbose_name='Imatge'),\n ),\n migrations.AlterField(\n model_name='project',\n name='title',\n field=models.CharField(max_length=200, verbose_name='Títol'),\n ),\n migrations.AlterField(\n model_name='project',\n name='updatedDate',\n field=models.DateTimeField(auto_now=True, verbose_name='Data dactualització'),\n ),\n ]\n" }, { "alpha_fraction": 0.49470898509025574, "alphanum_fraction": 0.6957672238349915, "avg_line_length": 16.18181800842285, "blob_id": "cf067d1c12db5840f242efb956834a8f5295b6e8", "content_id": "4c44cfc867e3d24f86fee4b58a162331339422c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 378, "license_type": "no_license", "max_line_length": 24, "num_lines": 22, "path": "/webpersonal/requirements.txt", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "astroid==2.2.5\ncertifi==2019.6.16\ncolorama==0.4.1\ndj-database-url==0.5.0\nDjango==2.2.10\ngunicorn==19.9.0\nisort==4.3.21\nlazy-object-proxy==1.4.1\nmccabe==0.6.1\nPillow==6.1.0\npsycopg2==2.7.4\npylint==2.3.1\npylint-django==2.0.11\npylint-plugin-utils==0.5\npython-decouple==3.1\npytz==2019.2\nsix==1.12.0\nsqlparse==0.3.0\ntyped-ast==1.3.4\nwhitenoise==4.1.3\nwincertstore==0.2\nwrapt==1.11.2\n" }, { "alpha_fraction": 0.6867470145225525, "alphanum_fraction": 0.6907630562782288, "avg_line_length": 40.55555725097656, "blob_id": "9009fc1ddea393573e1951b7e713858654fdf2e6", "content_id": "39e8f009cab411cd41f5b6b2e659f1a62fcac2a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/webpersonal/portafolio/models.py", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Project(models.Model):\n title = models.CharField(max_length=200, verbose_name = 'Títol')\n moreinfo = models.URLField(null=True, blank=True,verbose_name = 'Mes Informació')\n description = models.TextField(verbose_name = 'Desccripció')\n image = models.ImageField(verbose_name = 'Imatge', upload_to = 'projects')\n createdDate = models.DateTimeField(auto_now_add=True, verbose_name = 'Data de creació')\n updatedDate = models.DateTimeField(auto_now=True, verbose_name = 'Data dactualització')\n\n class Meta:\n verbose_name = 'Projecte'\n verbose_name_plural = 'Projectes'\n ordering = [\"-createdDate\"]\n\n def __str__(self):\n return self.title" }, { "alpha_fraction": 0.5302013158798218, "alphanum_fraction": 0.5872483253479004, "avg_line_length": 24.913043975830078, "blob_id": "e16b97f1ccfcd5a7a299d91cacadb2c6313bab81", "content_id": "a465241de979e2ab1b7c5326586a93370a95688e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 82, "num_lines": 23, "path": "/webpersonal/portafolio/migrations/0005_auto_20190822_2252.py", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-08-22 20:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portafolio', '0004_auto_20190822_2251'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='description',\n field=models.TextField(verbose_name='Desccripció'),\n ),\n migrations.AlterField(\n model_name='project',\n name='moreinfo',\n field=models.CharField(max_length=200, verbose_name='Mes Informació'),\n ),\n ]\n" }, { "alpha_fraction": 0.552293598651886, "alphanum_fraction": 0.5853211283683777, "avg_line_length": 29.33333396911621, "blob_id": "f0a7d754e097365df67195ab22fc615b5b788a3f", "content_id": "0d13274000f314bc969b7f5af8ffad398c54ffd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 548, "license_type": "no_license", "max_line_length": 92, "num_lines": 18, "path": "/webpersonal/core/templates/core/contact.html", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "{% extends 'core/base.html' %}\n{% block title %}Contacte{% endblock %}\n\n{% block headers %}\n<h1>Contacte</h1>\n<span class=\"subheading\">Assessoria</span>\n{% endblock %}\n{% block background %}{% load static %} {% static 'core/img/contact-bg.jpg' %}{% endblock %}\n{% block content %}\n<div class='row project'>\n <div class=\"col-lg-8 col-md-10 mx-auto\">\n <p>Per qualsevol dubte contacteu amb mi:</p>\n <br>\n <p><b>Número:</b> +34 692698058</p>\n <p><b>Honoraris:</b> 60€/h (preu base)</p>\n </div>\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.5652173757553101, "alphanum_fraction": 0.6264821887016296, "avg_line_length": 24.299999237060547, "blob_id": "85061bff0254c536131df9c2923c4ff3829f7606", "content_id": "aed4da09717d896deb30d8fe0d259e5fd52de184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 101, "num_lines": 20, "path": "/webpersonal/portafolio/migrations/0003_project_moreinfo.py", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-08-22 20:50\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portafolio', '0002_auto_20190822_2247'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='project',\n name='moreinfo',\n field=models.TextField(default=django.utils.timezone.now, verbose_name='Mes Informació'),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5568720102310181, "alphanum_fraction": 0.6090047359466553, "avg_line_length": 22.44444465637207, "blob_id": "6e2ab1b86215f61f718a6148169095f7d3da3bac", "content_id": "e4fcd3b4bdd0716c58f4b384f1b91560ddf021a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 79, "num_lines": 18, "path": "/webpersonal/portafolio/migrations/0004_auto_20190822_2251.py", "repo_name": "JoanJaraBosch/web-personal-django", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.4 on 2019-08-22 20:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portafolio', '0003_project_moreinfo'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='project',\n name='description',\n field=models.CharField(max_length=200, verbose_name='Desccripció'),\n ),\n ]\n" } ]
7
birkin/ezb_dbprx
https://github.com/birkin/ezb_dbprx
439620dbc99e060395ae76df4d832276300a8dc2
05aee03d2f2d5f724460c21c957ca1efeac0d190
1cbae856c8d1976322f39fe1907c5d1d1a6497a7
refs/heads/master
2021-01-01T06:54:51.960573
2014-06-19T12:54:02
2014-06-19T12:54:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6248793005943298, "alphanum_fraction": 0.630480945110321, "avg_line_length": 35.716312408447266, "blob_id": "7383614402ec869a29a117270e1ee22ef6e78192", "content_id": "45fa14c0c00e899945174decf6975540cab045bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5177, "license_type": "no_license", "max_line_length": 152, "num_lines": 141, "path": "/proxy_app.py", "repo_name": "birkin/ezb_dbprx", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport datetime, json, os\nimport flask\nfrom ezb_dbprx.config import settings\nfrom ezb_dbprx.utils import logger_setup, db_handler\nfrom flask.ext.basicauth import BasicAuth # http://flask-basicauth.readthedocs.org/en/latest/\n\n\n## setup\napp = flask.Flask(__name__)\nlog = logger_setup.setup_logger()\n#\napp.config['BASIC_AUTH_USERNAME'] = settings.BASIC_AUTH_USERNAME\napp.config['BASIC_AUTH_PASSWORD'] = settings.BASIC_AUTH_PASSWORD\nbasic_auth = BasicAuth(app)\n\n\n## experimentation ##\n\n\[email protected]( '/hello1/', methods=['GET'] )\ndef hi_a():\n \"\"\" Tests simple json response return. \"\"\"\n return flask.jsonify( {'hello': 'world'} )\n\n\[email protected]( '/hello2/', methods=['GET'] )\ndef hi_b():\n \"\"\" Tests logging. \"\"\"\n log.info( u'hi there' )\n return flask.jsonify( {'hello': 'world2'} )\n\n\[email protected]( '/basic_auth/', methods=['GET'] )\n@basic_auth.required\ndef try_basic_auth():\n \"\"\" Tests basic-auth. \"\"\"\n log.info( u'in proxy_app.try_basic_auth()' )\n return flask.jsonify( {'got': 'through'} )\n\n\[email protected]( '/forbidden/', methods=['GET'] )\ndef try_forbidden():\n \"\"\" Tests forbidden response. \"\"\"\n log.debug( u'in proxy_app.try_forbidden()' )\n return flask.abort( 403 )\n\n\[email protected]( '/post_test/', methods=['POST'] )\ndef handle_post():\n \"\"\" Tests perceiving params response return. \"\"\"\n value_a = flask.request.form['key_a'].strip()\n return flask.jsonify( {u'key_a': value_a} )\n\n\n## real work ##\n\n\[email protected]( u'/my_ip/', methods=['GET'] )\ndef show_ip():\n \"\"\" Returns ip.\n Note: this was a test, but could be useful for debugging. \"\"\"\n ip = flask.request.remote_addr\n log.debug( u'in proxy_app.show_ip(); remote_addr, `%s`' % ip )\n return flask.jsonify( {u'client_ip': ip} )\n\n\[email protected]( u'/search_new_request/', methods=['GET'] )\n@basic_auth.required\ndef search():\n \"\"\" Searches for new requests. \"\"\"\n client_ip = flask.request.remote_addr\n if not client_ip in settings.LEGIT_IPS.keys():\n log.debug( u'- in proxy_app.search_new_request(); client_ip `%s` not in LEGIT_IPS; returning forbidden' % client_ip )\n return flask.abort( 403 )\n db = db_handler.DB_Handler( log )\n result_list = db.search_new_request()\n return_dict = {\n u'request_type': u'search_new_request',\n u'datetime': unicode( datetime.datetime.now() ),\n u'result': result_list }\n return flask.jsonify( return_dict )\n\n\[email protected]( u'/update_request_status/', methods=['POST'] )\n@basic_auth.required\ndef update_request_status():\n \"\"\" Updates db request status. \"\"\"\n log.debug( u'- in proxy_app.update_request_status(); starting' )\n client_ip = flask.request.remote_addr\n log.debug( u'- in proxy_app.update_request_status(); client_ip, `%s`' % client_ip )\n if not client_ip in settings.LEGIT_IPS.keys():\n log.debug( u'- in proxy_app.update_request_status(); returning forbidden' )\n return flask.abort( 403 )\n log.debug( u'- in proxy_app; update_request_status(); ip legit' )\n log.debug( u'- in proxy_app; update_request_status(); flask.request.form.keys(), %s' % sorted(flask.request.form.keys()) )\n db_id = flask.request.form[u'db_id'] # flask will return a '400 - Bad Request' if getting a value fails\n status = flask.request.form[u'status']\n try:\n assert status in [ u'in_process', u'processed' ] # never changing it to its original 'not_yet_processed'\n assert db_id.isdigit()\n except Exception as e:\n log.error( u'- in proxy_app; update_request_status(); params grabbed; keys good but value(s) bad; db_id, `%s`; status, `%s`' % (db_id, status) )\n return flask.abort( 400, u'Bad data.' )\n log.debug( u'- in proxy_app; update_request_status(); params grabbed & data is valid' )\n db = db_handler.DB_Handler( log )\n result_dict = db.update_request_status( db_id, status )\n assert result_dict.keys() == [ u'status_update_result' ]\n return_dict = {\n u'request_type': u'update_request_status',\n u'db_id': db_id,\n u'requested_new_status': status,\n u'datetime': unicode( datetime.datetime.now() ),\n u'result': result_dict[ u'status_update_result' ]\n }\n return flask.jsonify( return_dict )\n\n\[email protected]( u'/add_history_note/', methods=['POST'] )\n@basic_auth.required\ndef add_history_note():\n \"\"\" Adds history note. \"\"\"\n log.debug( u'- in proxy_app.add_history_note(); starting' )\n if not flask.request.remote_addr in settings.LEGIT_IPS.keys():\n log.debug( u'- in proxy_app.add_history_note(); returning forbidden for ip, `%s`' % flask.request.remote_addr )\n return flask.abort( 403 )\n ( db_id, db_h ) = ( flask.request.form[u'db_id'], db_handler.DB_Handler(log) ) # flask will return a '400 - Bad Request' if getting a value fails\n result dbh.add_history_note( db_id )\n return_dict = {\n u'request_type': u'add_history_note', u'db_id': db_id,\n u'datetime': unicode( datetime.datetime.now() ), u'result': result }\n return flask.jsonify( return_dict )\n\n\n\n# if __name__ == '__main__':\n# if os.getenv('DEVBOX') == 'true':\n# app.run( host='0.0.0.0', debug=True )\n# else:\n# app.run()\n" }, { "alpha_fraction": 0.5854083895683289, "alphanum_fraction": 0.5881474018096924, "avg_line_length": 45.155174255371094, "blob_id": "9a61cda0aa791965805d31e9e9e7acae3af2e994", "content_id": "ad523cb21c625521f7cffd38955a3bc3d854718b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8032, "license_type": "no_license", "max_line_length": 167, "num_lines": 174, "path": "/utils/db_handler.py", "repo_name": "birkin/ezb_dbprx", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\" Handles db connection and executes sql. \"\"\"\n\nimport datetime, json, os, pprint, random, sys\nimport MySQLdb\nfrom ezb_dbprx.config import settings\n\n\nclass DB_Handler(object):\n\n def __init__(self, file_logger ):\n \"\"\" Sets up basics. \"\"\"\n self.db_host = settings.DB_HOST\n self.db_port = settings.DB_PORT\n self.db_username = settings.DB_USERNAME\n self.db_password = settings.DB_PASSWORD\n self.db_name = settings.DB_NAME\n self.connection_object = None # populated during queries\n self.cursor_object = None # populated during queries\n self.file_logger = file_logger\n self.key_mapper = { # converts database fields into more generic keys\n u'alt_edition': u'preference_alternate_edition', # needed?\n u'barcode': u'patron_barcode',\n u'bibno': u'item_bib_number', # needed?\n u'created': u'db_create_date',\n u'email': u'patron_email',\n u'eppn': u'patron_shib_eppn',\n u'firstname': u'patron_name_first',\n u'group': u'patron_shib_group',\n u'id': u'db_id',\n u'isbn': u'item_isbn',\n u'lastname': u'patron_name_last',\n u'loc': u'libary_location', # needed?\n u'name': u'patron_name_firstlast',\n u'patronId': u'patron_id', # needed?\n u'pref': u'preference_quick', # needed?\n u'request_status': u'db_request_status',\n u'sfxurl': u'item_openurl',\n u'staffnote': u'staff_note',\n u'title': u'item_title',\n u'volumes': u'item_volumes',\n u'wc_accession': u'item_worldcat_id'\n }\n\n ## execute_sql() ##\n\n def execute_sql(self, sql):\n \"\"\" Executes sql; returns tuple of row-dicts.\n Example return data: ( {row1field1key: row1field1value, row1field2key: row1field2value}, {row2field1key: row2field1value, row2field2key: row2field2value} )\n Called by self.search_new_request(), self.update_request_status(), and self.update_history_note() \"\"\"\n try:\n self._setup_db_connection()\n if not self.cursor_object:\n return\n self.cursor_object.execute( sql )\n dict_list = self.cursor_object.fetchall() # really a tuple of row-dicts\n dict_list = self._unicodify_resultset( dict_list )\n return dict_list\n except Exception as e:\n message = u'in db_handler.execute_sql(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )\n self.file_logger.error( message )\n return None\n finally:\n self._close_db_connection()\n\n def _setup_db_connection( self ):\n \"\"\" Sets up connection; populates instance attributes.\n Called by execute_sql() \"\"\"\n self.file_logger.debug( u'in db_handler._setup_db_connection(); starting' )\n try:\n self.connection_object = MySQLdb.connect(\n host=self.db_host, port=self.db_port, user=self.db_username, passwd=self.db_password, db=self.db_name )\n self.file_logger.debug( u'in db_handler._setup_db_connection(); connection-object set' )\n self.cursor_object = self.connection_object.cursor(MySQLdb.cursors.DictCursor)\n return\n except Exception as e:\n message = u'in db_handler._setup_db_connection(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )\n self.file_logger.error( message )\n\n def _unicodify_resultset( self, dict_list ):\n \"\"\" Takes tuple of row-dicts;\n Makes true list and ensures all keys and values are unicode;\n Returns list of type-corrected dicts.\n Called by execute_sql() \"\"\"\n result_list = []\n for row_dict in dict_list:\n new_row_dict = {}\n for key,value in row_dict.items():\n if type(value) == datetime.datetime:\n value = unicode(value)\n new_row_dict[ unicode(key) ] = unicode(value)\n result_list.append( new_row_dict )\n return result_list\n\n def _close_db_connection( self ):\n \"\"\" Closes db connection.\n Called by execute_sql() \"\"\"\n try:\n self.cursor_object.close()\n self.connection_object.close()\n return\n except Exception as e:\n message = u'in db_handler._close_db_connection(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )\n self.file_logger.error( message )\n\n ## search_new_request() ##\n\n def search_new_request( self ):\n \"\"\" Returns json string of list of dicts on find, empty-list on no-find.\n Called by: proxy_app.search_new_request() \"\"\"\n sql = settings.SEARCH_SQL\n self.file_logger.debug( u'in db_handler.search_new_request; sql, %s' % sql )\n raw_dict_list = self.execute_sql( sql )\n self.file_logger.debug( u'in db_handler.search_new_request; raw_dict_list, %s' % raw_dict_list )\n return_val = []\n if raw_dict_list:\n if len( raw_dict_list ) > 0:\n return_val = self._massage_raw_data( raw_dict_list )\n return return_val\n\n def _massage_raw_data( self, raw_dict_list ):\n \"\"\" Makes keys more generic.\n Returns list of updated dicts\n Called by search_new_request() .\n Possible TODO: add None to self.key_mapper if item isn't needed; test for that here and don't return it. \"\"\"\n updated_list = []\n for entry in raw_dict_list:\n massaged_dict = {}\n for (key, value) in entry.items():\n new_key = self.key_mapper[key]\n massaged_dict[new_key] = value\n updated_list.append( massaged_dict )\n return updated_list\n\n ## update_request_status ##\n\n def update_request_status( self, db_id, status ):\n \"\"\" Updates request table status field.\n Called by proxy_app.update_request_status() \"\"\"\n ## update the status\n update_sql = settings.UPDATE_REQUEST_STATUS_SQL_PATTERN % ( status, db_id )\n self.file_logger.debug( u'in db_handler.update_request_status(); update_sql, %s' % update_sql )\n try:\n self.execute_sql( update_sql )\n except Exception as e:\n self.file_logger.error( u'in db_handler.update_request_status(); problem executing update; exception: %s' % e )\n return { u'status_update_result': u'status_update_failed_on_exception' }\n ## confirm the update was successful\n confirmation_sql = settings.CONFIRM_REQUEST_STATUS_SQL_PATTERN % db_id\n self.file_logger.debug( u'in db_handler.update_request_status(); confirmation_sql, %s' % confirmation_sql )\n try:\n result_dict_list = self.execute_sql( confirmation_sql )\n self.file_logger.debug( u'in db_handler.update_request_status; result_dict_list, %s' % result_dict_list )\n if result_dict_list[0][u'request_status'] == status:\n return { u'status_update_result': u'status_updated' }\n else:\n return { u'status_update_result': u'status_confirmation_failed' }\n except Exception as e:\n self.file_logger.error( u'in db_handler.update_request_status(); problem executing confirmation; exception: %s' % e )\n return { u'status_update_result': u'status_confirmation_failed_on_exception' }\n\n ## add history note ##\n\n def add_history_entry( self, request_id ):\n \"\"\" Creates history table record.\n Called by proxy_app.add_history_note() \"\"\"\n add_history_sql = settings.CREATE_HISTORY_ENTRY_PATTERN % request_id\n self.file_logger.debug( u'in db_handler.add_history_entry(); add_history_sql, %s' % add_history_sql )\n result = self.execute_sql( sql )\n self.file_logger.debug( u'in db_handler.add_history_entry(); result, `%s`' % result )\n return\n\n # end class DB_Handler()\n\n" }, { "alpha_fraction": 0.6393442749977112, "alphanum_fraction": 0.6393442749977112, "avg_line_length": 22.13793182373047, "blob_id": "229e4d6d29dab3861c9a461e5e8750dace62fb7c", "content_id": "15168920730bb18e6f73f17559bd9454bc12fcb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 671, "license_type": "no_license", "max_line_length": 125, "num_lines": 29, "path": "/README.md", "repo_name": "birkin/ezb_dbprx", "src_encoding": "UTF-8", "text": "### Purpose\n\nTest project...\n\nExperiment: this flask app would create a lightweight api to a database.\n\nThis would allow another experimental project to run queries and updates more generically than being tied to direct db calls.\n\n\n### Notes\n\n- This app assumes a project structure like:\n\n some_enclosing_directory/\n ezb_dbprx/\n config/\n proxy_app.py\n env_ezb_dbprx/\n\n\n- This app ssumes an entry in our existing apache .conf file like:\n\n <Directory /path/to/ezb_dbprx>\n Order allow,deny\n Allow from all\n </Directory>\n WSGIScriptAlias /path/to/ezb_dbprx/config/wsgi.py\n\n---\n" }, { "alpha_fraction": 0.6653061509132385, "alphanum_fraction": 0.680272102355957, "avg_line_length": 35.75, "blob_id": "f8f8bdd4e3f64166e81cea17a8e4a951679dc81a", "content_id": "14f9f7dd2e7e83bbb768a2c5c4b914a38e75821d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 106, "num_lines": 20, "path": "/utils/logger_setup.py", "repo_name": "birkin/ezb_dbprx", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\" Handles log setup. \"\"\"\n\nimport logging, os\nimport logging.handlers\nfrom ezb_dbprx.config import settings\n\n\ndef setup_logger():\n \"\"\" Returns a logger to write to a file. \"\"\"\n filename = u'%s/ezb_dbprx.log' % settings.LOG_DIR\n formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )\n logger = logging.getLogger( u'ezb_dbprx' )\n level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }\n logger.setLevel( level_dict[settings.LOG_LEVEL] )\n file_handler = logging.handlers.RotatingFileHandler( filename, maxBytes=(5*1024*1024), backupCount=1 )\n file_handler.setFormatter( formatter )\n logger.addHandler( file_handler )\n return logger\n" }, { "alpha_fraction": 0.7107142806053162, "alphanum_fraction": 0.7114285826683044, "avg_line_length": 44.16128921508789, "blob_id": "033d8fee99d06b33600be690c02f3b14fb0e4450", "content_id": "e624a953b7c8d6da9ff62fb285b63b2c99f81c79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 166, "num_lines": 31, "path": "/config/settings.py", "repo_name": "birkin/ezb_dbprx", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport json, os\n\n\n## db access\nDB_HOST = unicode( os.environ.get(u'ezb_dbprx__DB_HOST') )\nDB_PORT = int( unicode(os.environ.get(u'ezb_dbprx__DB_PORT')) )\nDB_USERNAME = unicode( os.environ.get( u'ezb_dbprx__DB_USERNAME') )\nDB_PASSWORD = unicode( os.environ.get(u'ezb_dbprx__DB_PASSWORD') )\nDB_NAME = unicode( os.environ.get( u'ezb_dbprx__DB_NAME') )\n\n## db sql\nSEARCH_SQL = unicode( os.environ.get( u'ezb_dbprx__SEARCH_SQL') ) # for db_handler.DB_Handler.search_new_request()\nUPDATE_REQUEST_STATUS_SQL_PATTERN = unicode( os.environ.get( u'ezb_dbprx__UPDATE_REQUEST_STATUS_SQL_PATTERN') ) # for db_handler.DB_Handler.update_request_status()\nCONFIRM_REQUEST_STATUS_SQL_PATTERN = unicode( os.environ.get( u'ezb_dbprx__CONFIRM_REQUEST_STATUS_SQL_PATTERN') ) # for db_handler.DB_Handler.update_request_status()\nCREATE_HISTORY_ENTRY_PATTERN = unicode( os.environ.get( u'ezb_dbprx__CREATE_HISTORY_ENTRY_SQL_PATTERN') ) # for db_handler.DB_Handler.add_history_entry()\n\n\n## file-logger\nLOG_DIR = unicode( os.environ.get(u'ezb_dbprx__LOG_DIR') )\nLOG_LEVEL = unicode( os.environ.get(u'ezb_dbprx__LOG_LEVEL') )\n\n## basic auth\nBASIC_AUTH_USERNAME = unicode( os.environ.get(u'ezb_dbprx__BASIC_AUTH_USERNAME') )\nBASIC_AUTH_PASSWORD = unicode( os.environ.get(u'ezb_dbprx__BASIC_AUTH_PASSWORD') )\n\n## other\nLEGIT_IPS = json.loads( unicode(os.environ.get(u'ezb_dbprx__LEGIT_IPS')) )\n\n# end\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.7017543911933899, "avg_line_length": 18, "blob_id": "5f98802adf338437dd83adaa98e4d15e92154223", "content_id": "389dab73b56d4a81dd6bb80ffff28bafbacd269b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 57, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/config/requirements.txt", "repo_name": "birkin/ezb_dbprx", "src_encoding": "UTF-8", "text": "Flask-BasicAuth==0.2.0\nFlask==0.10.1\nMySQL-python==1.2.3\n" } ]
6
sebflipper/tingbot-screen-brightness
https://github.com/sebflipper/tingbot-screen-brightness
d61cfea3f945db2623b777c119825834f47127af
aebc6d3980759982faed37d2cab6ce8b1aa517af
ff38002d003eb9f8b74f71fb9ddb282f94d8bb6a
refs/heads/master
2020-03-23T15:33:37.104317
2018-07-20T20:49:58
2018-07-21T07:11:39
141,756,555
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.7564102411270142, "avg_line_length": 35.70588302612305, "blob_id": "7843f968182bab055969490f2f805a9dab237946", "content_id": "8cff5121a4bce99ca24dc82b2bb4fa5ee55facfc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 624, "license_type": "permissive", "max_line_length": 138, "num_lines": 17, "path": "/README.md", "repo_name": "sebflipper/tingbot-screen-brightness", "src_encoding": "UTF-8", "text": "# Tingbot Screen Brightness\n\nSets the brightness of your [Tingbot](http://tingbot.com/) screen.\n\n* **Right button** Increase brightness\n* **Right middle button** Turn screen on\n* **Left middle button** Turn screen off\n* **Left button** Decrease brightness\n\n![Screenshot](screenshot.png)\n\n## Installation\n\n1. Download and install [Tide](https://github.com/tingbot/tide/releases/).\n2. Download the latest version of [Screen Brightness.tingapp](https://github.com/sebflipper/tingbot-screen-brightness/archive/master.zip).\n3. Open `Screen Brightness.tingapp` with Tide.\n4. Run in the simulator or send to your Tingbot and open.\n" }, { "alpha_fraction": 0.5829145908355713, "alphanum_fraction": 0.6221105456352234, "avg_line_length": 24.538461685180664, "blob_id": "f95b811ba181b3142188385ba2e59c50c42cb019", "content_id": "ad2bd57495d8cd6cdd32e30086caa5795dfc8efd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "permissive", "max_line_length": 77, "num_lines": 39, "path": "/Screen Brightness.tingapp/main.py", "repo_name": "sebflipper/tingbot-screen-brightness", "src_encoding": "UTF-8", "text": "import tingbot\nfrom tingbot import *\n\ndef update():\n screen.fill(color='black')\n screen.brightness = tingbot.app.settings['brightness']\n \n if tingbot.app.settings['brightness'] > 0:\n screen.text('-', font_size=15, xy=(10,15))\n screen.text('off', font_size=15, xy=(50,15))\n screen.text('on', font_size=15, xy=(275,15))\n screen.text('+', font_size=15, xy=(310,15))\n \n screen.text('Brightness\\n %i%%' % tingbot.app.settings['brightness'])\n\n@left_button.press\ndef press():\n if tingbot.app.settings['brightness'] > 0:\n tingbot.app.settings['brightness'] -= 10\n update()\n \n@midleft_button.press\ndef press():\n tingbot.app.settings['brightness'] = 0\n update()\n \n@midright_button.press\ndef press():\n tingbot.app.settings['brightness'] = 100\n update()\n\n@right_button.press\ndef press():\n if tingbot.app.settings['brightness'] < 100:\n tingbot.app.settings['brightness'] += 10\n update()\n\nupdate()\ntingbot.run()" } ]
2
byambaa1982/combine_tables
https://github.com/byambaa1982/combine_tables
a815a130d89407a375b29573a5bc0c62c6a05ba2
c4dd40ca6ac25167ddfa43cbeda450accd6d6d14
019230ae78644c6fbd4f84c20fe5c897b842d308
refs/heads/master
2021-03-10T08:55:38.740495
2020-04-08T05:00:50
2020-04-08T05:00:50
246,440,745
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6150173544883728, "alphanum_fraction": 0.6310763955116272, "avg_line_length": 29.719999313354492, "blob_id": "bf40e68844809e52882d2278a943b138d9fd402e", "content_id": "2b7ddb28097561089302cffa646a08f751713a32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2304, "license_type": "no_license", "max_line_length": 114, "num_lines": 75, "path": "/main.py", "repo_name": "byambaa1982/combine_tables", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n# ------- Read CSV data ----------\n\n\n# stop=pd.read_csv('Arkiv/stops.txt')\nstop_times=pd.read_csv('Arkiv/stop_times.txt')\n# calendar=pd.read_csv('Arkiv/calendar.txt')\ncalendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')\ntrips=pd.read_csv('Arkiv/trips.txt')\n\n# ----------Conditional Subset ----------\n\nnew_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]\n\n\n\n#----------- remove useless columns from calendar data----------------------------\n\nnew_csv=stop_times.iloc[0:,0:5]\n\n\n# ---------------Merge them on service_id and make a new column named \"unique_trip_id\"\na=trips\nb=new_calendar_dates\nc=pd.merge(a, b, on='service_id', how='left')\nc['unique_trip_id']=c.index+1\ne=stop_times\nf=pd.merge(c, e, on='trip_id', how='left')\ndf=f\n\n\n\n\n# result['unique_trip_id'] = result.groupby(['trip_id','end_date']).ngroup()\n# result=result.sort_values(by=['unique_trip_id', 'stop_sequence'])\n\n\n# unique_trip_id=1\n# new=[]\n# for i in range(0,len(my_list)-1):\n# if my_list[i] == my_list[i+1]:\n# new.append(unique_trip_id)\n# else:\n# unique_trip_id+=1\n# new.append(unique_trip_id)\n\n# -------- Make int into string and combine two column on new columns-------\n\n\ndf['unique_trip_id']=df['unique_trip_id'].map(lambda x: x+1)\ndf['first']=df['unique_trip_id'].map(lambda x: str(x))\ndf['second']=df['stop_sequence'].map(lambda x: str(x))\ndf['first_date']=df['start_date'].map(lambda x: str(x))\ndf['second_date']=df['end_date'].map(lambda x: str(x))\ndf['unique_sub_trip_id']= df[['first', 'second']].apply(lambda x: '.'.join(x), axis=1)\ndf['arrival_time']= df[['second_date', 'arrival_time']].apply(lambda x: ' '.join(x), axis=1)\ndf['departure_time']= df[['first_date', 'departure_time']].apply(lambda x: ' '.join(x), axis=1)\n\n# --------- Rerange data ---------------\n\ndf=df[['unique_trip_id','unique_sub_trip_id','trip_id','stop_id','stop_sequence','arrival_time','departure_time']]\n\nunique_trip_id_list=df.unique_trip_id.unique().tolist()\n\ndf_list=[]\nfor i in unique_trip_id_list:\n df1 = df.loc[df['unique_trip_id'] == i]\n df1['arrival_time'] = df1['arrival_time'].shift(-1)\n df1['stop_sequence'] = df1['stop_sequence'].shift(-1)\n df_list.append(df1)\nfinal_result=pd.concat(df_list)\n\nfinal_result.to_csv('result.csv')\n" }, { "alpha_fraction": 0.6479880809783936, "alphanum_fraction": 0.6769001483917236, "avg_line_length": 35.46739196777344, "blob_id": "83413a695343f939282102a6e55de559879f99cc", "content_id": "d7ec218df41cb4f70adbd8eedde794871e3b1b9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3355, "license_type": "no_license", "max_line_length": 248, "num_lines": 92, "path": "/README.md", "repo_name": "byambaa1982/combine_tables", "src_encoding": "UTF-8", "text": "# Combining multitables and some data engineering\n\n## Goal\nOne of my customers sent me the following message on fiverr: `I need help creating one table/CSV-file from a few different tables (in GTFS-format).` \nThere are three different tables. `trips.txt calendar.txt and stop_times.txt`. The goal is that we needed to combine them and made some data engineering. \n\n### Calendar dates\n\n index | service_id | date | exception_type\n-------- | ------------- | -------- | --------------\n0 | 1 | 20200205 | 1\n1 | 1 \t | 20200206 | 1\n2 | 1 | 20200207 | 1\n3 | 1 | 20200212 | 1\n\n\n### Trips\n\n![trips](/images/trips.png)\n\n### Stop times\n\n![stop times](/images/stop_times.png)\n\n### My goal\n\n![Final](/images/final.png)\n\nThe following is all the tables we need to do:\n```python\nstop_times=pd.read_csv('Arkiv/stop_times.txt')\ncalendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')\ntrips=pd.read_csv('Arkiv/trips.txt')\n```\nOur customer needs only trip data between Feb 25, 2020 and March 01, 2020. So we need to select calender dates Dataframe by condition on date column. \n```python \nnew_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]\n```\nAlso, We need only the first five columns from `Stop times`. \n```python\nnew_csv=stop_times.iloc[0:,0:5]\n```\nAfter that I merged them on service_id and make a new column named \"unique_trip_id\"\n```python\na=trips\nb=new_calendar_dates\nc=pd.merge(a, b, on='service_id', how='left')\nc['unique_trip_id']=c.index+1\ne=stop_times\nf=pd.merge(c, e, on='trip_id', how='left')\ndf=f\n```\nWe cannot combine columns have intiger data. We have to turn `int` values into `str` values before combine.\n```python\ndf['unique_trip_id']=df['unique_trip_id'].map(lambda x: x+1)\ndf['first']=df['unique_trip_id'].map(lambda x: str(x))\ndf['second']=df['stop_sequence'].map(lambda x: str(x))\ndf['first_date']=df['start_date'].map(lambda x: str(x))\ndf['second_date']=df['end_date'].map(lambda x: str(x))\ndf['unique_sub_trip_id']= df[['first', 'second']].apply(lambda x: '.'.join(x), axis=1)\ndf['arrival_time']= df[['second_date', 'arrival_time']].apply(lambda x: ' '.join(x), axis=1)\ndf['departure_time']= df[['first_date', 'departure_time']].apply(lambda x: ' '.join(x), axis=1)\n```\nAs our customer's wish, we have to rearrange Pandas column sequence. \n```python\ndf=df[['unique_trip_id','unique_sub_trip_id','trip_id','stop_id','stop_sequence','arrival_time','departure_time']]\n```\nThe most challenging part of this task is that we have to shift down `arrival_time` and `stop_sequence` by one. But it is not a whole dataset but it consists many subsets by `unique_trip_id`. We have to use `shift` method on each subset seprately. \n\n```python\nunique_trip_id_list=df.unique_trip_id.unique().tolist()\n\ndf_list=[]\nfor i in unique_trip_id_list:\n df1 = df.loc[df['unique_trip_id'] == i]\n df1['arrival_time'] = df1['arrival_time'].shift(-1)\n df1['stop_sequence'] = df1['stop_sequence'].shift(-1)\n df_list.append(df1)\nfinal_result=pd.concat(df_list)\n```\nHere we go. This is our file to deliver to the customer. \n```python\nfinal_result.to_csv('result.csv')\n```\nIf you have anything to ask, please contact me clicking following link? \n\n + www.fiverr.com/coderjs\nPlease connect me in linkedin: \n + https://www.linkedin.com/in/byamba-enkhbat-026722162/\n\t\n\nThank you\n" }, { "alpha_fraction": 0.7138686180114746, "alphanum_fraction": 0.741605818271637, "avg_line_length": 24.407407760620117, "blob_id": "1b9919d3127e59ad962d62ada3b1f111da85af31", "content_id": "ca5185ba5e7073182642c8b020575e3c30808d75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 98, "num_lines": 27, "path": "/test.py", "repo_name": "byambaa1982/combine_tables", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\nstop=pd.read_csv('Arkiv/stops.txt')\nstop_times=pd.read_csv('Arkiv/stop_times.txt')\ncalendar=pd.read_csv('Arkiv/calendar.txt')\ncalendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')\ntrips=pd.read_csv('Arkiv/trips.txt')\n\nprint(stop.shape)\nprint(stop_times.shape)\nprint(calendar.shape)\nprint(calendar_dates.shape)\nprint(trips.shape)\n\n# ----------Conditional Subset ----------\n\nnew_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]\n\nprint(new_calendar_dates.date.min())\nprint(new_calendar_dates.date.max())\nprint(new_calendar_dates.shape)\n\n\ntrips=trips.iloc[0:,1:3]\nprint(trips.head())\nprint(trips.shape)" } ]
3
darshanime/scrapy-tutorials
https://github.com/darshanime/scrapy-tutorials
82909d0d41e940a2bc13563071eda365e2dace9a
a68744b78daab73c4fe54bcfb31ef12f60a7ebb1
9afe0426abaca54300df3b2b56c970c2536bf8fc
refs/heads/master
2016-08-11T13:11:06.499085
2015-09-28T20:37:13
2015-09-28T20:37:13
43,314,329
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6583333611488342, "alphanum_fraction": 0.6583333611488342, "avg_line_length": 19.16666603088379, "blob_id": "09eba4288706ca043e547cde08123d81519eb9c4", "content_id": "f986b3f6a84673e86fd4720184cfec526e87084d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "permissive", "max_line_length": 30, "num_lines": 6, "path": "/cardekho/cardekho/items.py", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "from scrapy import Item, Field\n\nclass CardekhoItem(Item):\n title = Field()\n price = Field()\n distance = Field()" }, { "alpha_fraction": 0.5664085745811462, "alphanum_fraction": 0.5872543454170227, "avg_line_length": 49.89393997192383, "blob_id": "47ab63961ecd01528ed28dc82fe553ab31bba177", "content_id": "339bf374511782e2974bba4b36552c7f81b28381", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3358, "license_type": "permissive", "max_line_length": 204, "num_lines": 66, "path": "/housing/housing/spiders/housing_spider.py", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "from housing.items import HousingItemBuy\nfrom scrapy import Spider\nfrom scrapy.http.request import Request\n\n#To parse the JSON received\nimport json\n\nclass HousingSpider(Spider):\n name = \"housing\"\n allowed_domains = [\"housing.com\"]\n custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}\n \n\n def start_requests(self):\n #We have 1080 pages to fetch\n for count in range(1,1081):\n \n print \"Getting page : %s\" %count\n \n yield Request(\"https://buy.housing.com/api/v1/buy/index/filter?poly=f97f947ffae6408ac295&results_per_page=30&p=\" + str(count) + \"&resale_total_count=30045&np_total_count=2329\", self.parse_buy)\n\n \n def parse_buy(self, response):\n \n #Since the response is purely JSON\n text = response.body\n\n #Parsing it using the builtin json utility\n parsed_json = json.loads(text)\n \n #For each entry, we will store all the information we defined earlier in items.py\n #The parsed json can be read as a dict. Examining the JSON, we can easily navigate \n #to where we have the data we need.\n\n for iter in range(30):\n item = HousingItemBuy()\n item['ad_price'] = parsed_json[\"hits\"][iter][\"formatted_price\"]\n item['ad_url'] = parsed_json[\"hits\"][iter][\"inventory_canonical_url\"]\n item['ad_title'] = parsed_json[\"hits\"][iter][\"title\"]\n item['ad_coordinates'] = parsed_json[\"hits\"][iter][\"location_coordinates\"]\n item['ad_date_added'] = parsed_json[\"hits\"][iter][\"date_added\"]\n item['ad_area'] = parsed_json[\"hits\"][iter][\"inventory_configs\"][0][\"area\"]\n item['ad_bedrooms'] = parsed_json[\"hits\"][iter][\"inventory_configs\"][0][\"number_of_bedrooms\"]\n item['ad_toilets'] = parsed_json[\"hits\"][iter][\"inventory_configs\"][0][\"number_of_toilets\"]\n item['ad_contact_persons_number'] = parsed_json[\"hits\"][iter][\"contact_persons_info\"][0][\"contact_no\"]\n item['ad_contact_persons_id'] = parsed_json[\"hits\"][iter][\"contact_persons_info\"][0][\"profile_id\"]\n item['ad_contact_persons_name'] = parsed_json[\"hits\"][iter][\"contact_persons_info\"][0][\"name\"]\n \n #Some entries do not have the ad_city/ad_locality variable. \n try:\n item['ad_city'] = parsed_json[\"hits\"][iter][\"display_city\"][0]\n except :\n item['ad_city'] = \"None given\"\n \n try:\n item['ad_locality'] = parsed_json[\"hits\"][iter][\"display_city\"][1]\n except :\n item['ad_locality'] = \"None given\"\n \n item['ad_gas_pipeline'] = parsed_json[\"hits\"][iter][\"inventory_amenities\"][\"has_gas_pipeline\"]\n item['ad_lift'] = parsed_json[\"hits\"][iter][\"inventory_amenities\"][\"has_lift\"]\n item['ad_parking'] = parsed_json[\"hits\"][iter][\"inventory_amenities\"][\"has_parking\"]\n item['ad_gym'] = parsed_json[\"hits\"][iter][\"inventory_amenities\"][\"has_gym\"]\n item['ad_swimming_pool'] = parsed_json[\"hits\"][iter][\"inventory_amenities\"][\"has_swimming_pool\"]\n item['ad_id'] = parsed_json[\"hits\"][iter][\"id\"]\n yield item" }, { "alpha_fraction": 0.5594855546951294, "alphanum_fraction": 0.5594855546951294, "avg_line_length": 26.086956024169922, "blob_id": "e0238280d5b1597c3d8b8af38a4a256e3b19d206", "content_id": "7eb086f9d3ad3097c29e4b1732a03eff69d69d70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "permissive", "max_line_length": 59, "num_lines": 23, "path": "/housing/housing/items.py", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "from scrapy import Item, Field\n\nclass HousingItemBuy(Item):\n ad_id = Field()\n ad_title = Field()\n ad_price = Field()\n ad_area = Field()\n ad_url = Field()\n ad_date_added = Field()\n ad_coordinates = Field()\n ad_bedrooms = Field()\n ad_toilets = Field()\n ad_gas_pipeline = Field()\n ad_lift = Field()\n ad_parking = Field() \n ad_gym = Field()\n ad_swimming_pool = Field()\n ad_city = Field()\n ad_locality = Field()\n ad_contact_persons_name = Field()\n ad_contact_persons_number = Field()\n ad_contact_persons_id = Field()\n count = Field()" }, { "alpha_fraction": 0.584269642829895, "alphanum_fraction": 0.6219903826713562, "avg_line_length": 50.95833206176758, "blob_id": "060d4e724e22ed3fcb1a41ada6677c63bea9584a", "content_id": "a1fc05a4781f554e379a67697b98cc9ff6ff0f30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1246, "license_type": "permissive", "max_line_length": 162, "num_lines": 24, "path": "/cardekho/cardekho/spiders/cardekho_spider.py", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "from cardekho.items import CardekhoItem\nfrom scrapy import Spider\nfrom scrapy.http.request import Request\n\nclass CardekhoSpider(Spider):\n name = \"cardekho\"\n allowed_domains = [\"http://www.cardekho.com\"]\n start_urls = [\"http://www.cardekho.com/used-cars+in+mumbai-all/\"]\n \n #This is to not get redirected by CarDekho. We are identifying ourselves as a web-browser. \n custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}\n \n def start_requests(self):\n #There are 162 pages, we are asking Scrapy to get us all of them.\n for i in range(162):\n yield Request(\"http://www.cardekho.com/used-cars+in+mumbai-all/\" + str(i), self.parse)\n\n def parse(self, response):\n for sel in response.xpath('/html/body/main/div/div[2]/div[2]/div[9]/form/ul/li'):\n item = CardekhoItem()\n item ['title'] = sel.xpath('div[1]/div[2]/div[1]/a/text()').extract()\n item ['price'] = sel.xpath('div[1]/div[3]/div[1]/text()').extract()\n item ['distance'] = sel.xpath('div[1]/div[2]/div[3]/ul/li[1]/div[2]/span/text()').extract() \n yield item" }, { "alpha_fraction": 0.8227847814559937, "alphanum_fraction": 0.8227847814559937, "avg_line_length": 38.5, "blob_id": "03ae24ce8d99d31c9afb9abf2b0dab86d72ec27c", "content_id": "3849abde87fe23477c321e4b19a780d425be24d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 79, "license_type": "permissive", "max_line_length": 59, "num_lines": 2, "path": "/README.md", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "# scrapy-tutorials\nCode for the Scrapy Tutorial series on darshanime.github.io\n" }, { "alpha_fraction": 0.5632411241531372, "alphanum_fraction": 0.5928853750228882, "avg_line_length": 35.21428680419922, "blob_id": "541e04705ab2469d37ff621b6d5439d570d23b98", "content_id": "a76ea228649b137f4da79bc18e4dbc1ea1bd4992", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "permissive", "max_line_length": 73, "num_lines": 14, "path": "/scrapy 101/scrapy101/spiders/dmoz.py", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "from scrapy.spiders import BaseSpider\nfrom scrapy101.items import Scrapy101Item\n\nclass Scrapy101Spider(BaseSpider):\n name = \"dmoz\"\n allowed_domains = [\"dmoz.org/\"]\n start_urls = [\"http://www.dmoz.org/\"]\n \n def parse(self, response):\n for div in response.xpath('/html/body/div[3]/div[3]/div[1]/div'):\n for entry in div.xpath('span'):\n item = Scrapy101Item()\n item['title'] = entry.xpath('a/text()').extract()\n print item['title']" }, { "alpha_fraction": 0.7105262875556946, "alphanum_fraction": 0.75, "avg_line_length": 18.25, "blob_id": "32ab82e407283241f1aa7c7e0467b6d22b6801bd", "content_id": "6d182da03cf0be2c463a53cb775254ca8473dfa9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "permissive", "max_line_length": 30, "num_lines": 4, "path": "/scrapy 101/scrapy101/items.py", "repo_name": "darshanime/scrapy-tutorials", "src_encoding": "UTF-8", "text": "from scrapy import Item, Field\n\nclass Scrapy101Item(Item):\n title = Field()" } ]
7
prakashpatil1430/Fashionproject
https://github.com/prakashpatil1430/Fashionproject
8c15fb94a61e55909e04c61fbbd8361923b93090
7e4a8d579b66cac896ec99aa3c3bdca2665646ff
e7c99a07a68365307a99e0bd6f4a81a28f5e5afd
refs/heads/main
2023-08-13T18:28:44.920230
2021-09-26T17:32:14
2021-09-26T17:32:14
410,614,138
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5315126180648804, "alphanum_fraction": 0.5735294222831726, "avg_line_length": 25.44444465637207, "blob_id": "65f382b4a4441a24dae94fbec4b838a8f4ce3e80", "content_id": "49c0335ef4f1bd05934374a9ec452125b50d2db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 131, "num_lines": 18, "path": "/fashion/migrations/0003_alter_product_category.py", "repo_name": "prakashpatil1430/Fashionproject", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.6 on 2021-09-25 07:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fashion', '0002_cart_orderplaced_product'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='product',\n name='category',\n field=models.CharField(choices=[('TS', 'Tshirts'), ('W', 'Watches'), ('P', 'Perfumes'), ('S', 'Shoes')], max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.7444579005241394, "alphanum_fraction": 0.7444579005241394, "avg_line_length": 64.2631607055664, "blob_id": "36edbecee4d5dbf560f1c02a6f0b7d3d9c332d06", "content_id": "a83a0f3646c7c8b57b6f0f86f76448caf8c6714f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2481, "license_type": "no_license", "max_line_length": 249, "num_lines": 38, "path": "/fashion/urls.py", "repo_name": "prakashpatil1430/Fashionproject", "src_encoding": "UTF-8", "text": "\nfrom django.urls import path\n# from.views import address,add_to_cart,mobile,checkout,orders,ProductView,ProductDetailView,CustomerRegistrationView,ProfileView,show_cart,laptop,fashion_top,fashion_bottom,gym_product,home_decor,plus_cart,minus_cart,remove_cart,payment_done,orders\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n# from django.contrib.auth import views as auth_views\nfrom fashion.views import HomeView,perfume_view,product_view,shoes_view,watch_view,tshirt_view,ProductDetailView,add_to_cart,CustomerRegistrationView,ProfileView,address,show_cart,remove_cart,checkout,orders\nfrom django.contrib.auth import views as auth_views\nfrom .forms import LoginForm,MyPasswordChangeForm\n# ,MyPasswordResetForm,MySetPasswordForm\n\nurlpatterns = [\n path('',HomeView.as_view(),name='home'),\n path('alldata/',product_view,name=\"alldata\"),\n path('perfume/',perfume_view,name=\"perfume\"),\n path('perfume/<slug:data>/',perfume_view,name=\"perfume\"),\n path('watches/',watch_view,name=\"watches\"),\n path('watches/<slug:data>/',watch_view,name=\"watches\"),\n path('tshirts/',tshirt_view,name=\"tshirts\"),\n path('tshirts/<slug:data>/',tshirt_view,name=\"tshirts\"),\n path('shoes/',shoes_view,name=\"shoes\"),\n path('shoes/<slug:data>/',shoes_view,name=\"shoes\"),\n path('product-detail/<int:pk>',ProductDetailView.as_view(),name=\"product-detail\"),\n path('add-to-cart/',add_to_cart,name=\"add-to-cart\"),\n path('cart/',show_cart,name='cart'),\n path('removecart/<int:pk>/',remove_cart,name='removecart'),\n path('profile/',ProfileView.as_view(),name=\"profile\"),\n path('address/',address,name=\"address\"),\n path('orders/',orders,name=\"orders\"),\n path('regestration/',CustomerRegistrationView.as_view(),name=\"customerregistration\"),\n path('login/', auth_views.LoginView.as_view(template_name='fashion/login.html',authentication_form=LoginForm), name='login'),\n path('logout/', auth_views.LogoutView.as_view(next_page='login') ,name='logout'),\n path('passwordchange/',auth_views.PasswordChangeView.as_view(template_name='fashion/passwordchange.html',form_class=MyPasswordChangeForm,success_url='/passwordchangedone/'),name=\"passwordchange\"),\n path('passwordchangedone/', auth_views.PasswordChangeDoneView.as_view(template_name='fashion/passwordchangedone.html'), name='passwordchangedone'),\n path('checkout/',checkout,name='checkout'),\n\n \n \n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n" }, { "alpha_fraction": 0.6254838705062866, "alphanum_fraction": 0.6352688074111938, "avg_line_length": 37.2716064453125, "blob_id": "ed2db1ab1c7a357d3a813adb0155f3f6e38b7978", "content_id": "6bcdd80422b2b42061f7013fc5bea3d7b61ca3b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9300, "license_type": "no_license", "max_line_length": 152, "num_lines": 243, "path": "/fashion/views.py", "repo_name": "prakashpatil1430/Fashionproject", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views import View\nfrom .models import Product, Customer, Cart, OrderPlaced\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom .forms import CustomerRegistrationForm, CustomerProfileForm\nfrom django.contrib import messages\nfrom django.db.models import Q\n\n# Create your views here.\n\n\nclass HomeView(View):\n def get(self, request):\n all_product = Product.objects.all()\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n return render(request, 'fashion/index2.html', context={'data': all_product, 'totalitem': totalitem})\n\n\ndef product_view(request, data=None):\n all_product = Product.objects.all()\n return render(request, 'fashion/index2.html', {'data': all_product})\n\n\nclass ProductDetailView(View):\n\n def get(self, request, pk):\n totalitem = 0\n product = Product.objects.get(pk=pk)\n item_already_in_cart = False\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n item_already_in_cart = Cart.objects.filter(\n Q(product=product.id) & Q(user=request.user)).exists()\n return render(request, 'fashion/productdetail.html', {'product': product, 'totalitem': totalitem, 'item_already_in_cart': item_already_in_cart})\n\n\ndef perfume_view(request, data=None):\n\n all_product = Product.objects.all()\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n if data == None:\n perfume = Product.objects.filter(category='P')\n elif data == 'Below1000':\n perfume = Product.objects.filter(\n category='P').filter(discounted_price__lt=1000)\n elif data == 'Above1000':\n perfume = Product.objects.filter(\n category='P').filter(discounted_price__gt=1000)\n\n return render(request, 'fashion/index2.html', {'perfume': perfume, 'totalitem': totalitem, 'data': all_product})\n\n\ndef tshirt_view(request, data=None):\n all_product = Product.objects.all()\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n if data == None:\n tshirts = Product.objects.filter(category='TS')\n elif data == 'm-tshirt':\n tshirts = Product.objects.filter(category='TS').filter(brand=data)\n elif data == 'w-tshirt':\n tshirts = Product.objects.filter(category='TS').filter(brand=data)\n elif data == 'Below1000':\n tshirts = Product.objects.filter(\n category='TS').filter(discounted_price__lt=1000)\n elif data == 'Above1000':\n tshirts = Product.objects.filter(\n category='TS').filter(discounted_price__gt=1000)\n return render(request, 'fashion/index2.html', {'tshirts': tshirts, 'totalitem': totalitem, 'data': all_product})\n\ndef watch_view(request, data=None):\n all_product = Product.objects.all()\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n if data == None:\n watches = Product.objects.filter(category='W')\n elif data == 'm-watch':\n watches = Product.objects.filter(category='W').filter(brand=data)\n elif data == 'w-match':\n tshirts = Product.objects.filter(category='W').filter(brand=data)\n elif data == 'Below1000':\n watches = Product.objects.filter(\n category='W').filter(discounted_price__lt=1000)\n elif data == 'Above1000':\n watches = Product.objects.filter(\n category='W').filter(discounted_price__gt=1000)\n return render(request, 'fashion/index2.html', {'watches': watches, 'totalitem': totalitem, 'data': all_product})\n\n\n\ndef shoes_view(request, data=None):\n all_product = Product.objects.all()\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n\n if data == None:\n shoes = Product.objects.filter(category='S')\n elif data == 'man-shoes':\n shoes = Product.objects.filter(category='S').filter(brand=data)\n elif data == 'women-shoes':\n shoes = Product.objects.filter(category='S').filter(brand=data)\n elif data == 'Above1000':\n shoes = Product.objects.filter(\n category='S').filter(discounted_price__gt=1000)\n elif data == 'Below1000':\n shoes = Product.objects.filter(\n category='S').filter(discounted_price__lt=1000)\n\n return render(request, 'fashion/index2.html', {'shoes': shoes, 'totalitem': totalitem, 'data': all_product})\n\n\ndef add_to_cart(request):\n if request.user.is_authenticated:\n user = request.user\n product_id = request.GET.get('prod_id')\n product = Product.objects.get(id=product_id)\n Cart(user=user, product=product).save()\n return redirect('/cart')\n else:\n return redirect('/login')\n\n\ndef remove_cart(request, pk):\n user = request.user\n product = Product.objects.get(pk=pk)\n c = Cart.objects.get(Q(product=product) & Q(user=user))\n c.delete()\n return redirect('/cart')\n\n\nclass CustomerRegistrationView(View):\n\n def get(self, request):\n form = CustomerRegistrationForm()\n return render(request, 'fashion/customer_reg.html', {'form': form})\n\n def post(self, request):\n form = CustomerRegistrationForm(request.POST)\n if form.is_valid():\n messages.success(\n request, 'Congratulations!! Registered Successfully.')\n form.save()\n return render(request, 'fashion/customer_reg.html', {'form': form})\n\n\nclass ProfileView(View):\n def get(self, request):\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n form = CustomerProfileForm()\n return render(request, 'fashion/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})\n\n def post(self, request):\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n form = CustomerProfileForm(request.POST)\n if form.is_valid():\n usr = request.user\n name = form.cleaned_data['name']\n locality = form.cleaned_data['locality']\n city = form.cleaned_data['city']\n state = form.cleaned_data['state']\n zipcode = form.cleaned_data['zipcode']\n reg = Customer(user=usr, name=name, locality=locality,\n city=city, state=state, zipcode=zipcode)\n reg.save()\n messages.success(\n request, 'Congratulations!! Profile Updated Successfully.')\n return render(request, 'fashion/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})\n\n\ndef checkout(request):\n if request.user.is_authenticated:\n user = request.user\n addr = Customer.objects.filter(user=user)\n cart_items = Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70\n total_amount = 0.0\n cart_product = [p for p in Cart.objects.all() if p.user == user]\n if cart_product:\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount = amount+tempamount\n total_amount = amount + shipping_amount\n return render(request, 'fashion/checkout.html', {'addr': addr, 'cart_items': cart_items, 'total_amount': total_amount})\n else:\n return redirect('/login')\n\n\ndef address(request):\n totalitem = 0\n if request.user.is_authenticated:\n totalitem = len(Cart.objects.filter(user=request.user))\n addr = Customer.objects.filter(user=request.user)\n return render(request, 'fashion/address.html', {'addr': addr, 'active': 'btn-primary', 'totalitem': totalitem})\n\n\ndef show_cart(request):\n if request.user.is_authenticated:\n user = request.user\n cart = Cart.objects.filter(user=user)\n amount = 0.0\n shipping_amount = 70\n total_amount = 0.0\n cart_product = [p for p in Cart.objects.all() if p.user == user]\n if cart_product:\n for p in cart_product:\n tempamount = (p.quantity * p.product.discounted_price)\n amount = amount+tempamount\n total_amount = amount + shipping_amount\n return render(request, 'fashion/addtocart.html', {'carts': cart, 'amount': amount, 'total_amount': total_amount})\n else:\n return render(request, 'fashion/emptycart.html')\n else:\n return redirect('/login')\n\n\ndef orders(request):\n user = request.user\n customer_id = user.id\n print(customer_id)\n cartid = Cart.objects.filter(user=user)\n customer = Customer.objects.get(id=customer_id)\n for cid in cartid:\n OrderPlaced(user=user, customer=customer,\n product=cid.product, quantity=cid.quantity).save()\n # print(\"Order Saved\")\n cid.delete()\n # print(\"Cart Item Deleted\")\n return redirect(\"/orders\")\n\n op = OrderPlaced.objects.filter(user=request.user)\n return render(request, 'fashion/orders.html', {'order_placed': op})\n" } ]
3
001001matheus001001/Minecraft-python
https://github.com/001001matheus001001/Minecraft-python
69d401cc98352e3fc55ca5f778312998c5320d1d
1633201b62181297c173b61eb63ac200a6ee9c53
e70869b0fdc4e1ba6b933f74c937f9014841beb1
refs/heads/master
2021-09-05T21:41:09.824336
2018-01-31T06:40:33
2018-01-31T06:40:33
119,645,921
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7130177617073059, "alphanum_fraction": 0.715976357460022, "avg_line_length": 21.600000381469727, "blob_id": "dff7cf5bcb060710a13bde2786203a9f4a87540a", "content_id": "095dfe8dd599b6373b37228998a3eda18b3b5b32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 41, "num_lines": 15, "path": "/teleportpreciso.py", "repo_name": "001001matheus001001/Minecraft-python", "src_encoding": "UTF-8", "text": "# Conectar ao Minecraft\nfrom mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\n# String para variaveis de 3D\n\nx = input(\"localização desejada para x \")\ny = input(\"localização desejada para y \")\nz = input(\"localização desejada para z \")\n\n# Mudar a posição do jogador\n\nmc.player.setPos(x, y, z)\n\nprint(\"Fim de locomoção ->\", x, y ,z)" }, { "alpha_fraction": 0.7218044996261597, "alphanum_fraction": 0.7293233275413513, "avg_line_length": 43.33333206176758, "blob_id": "879bdbe6a1691e3acdde967f3c92965f6cb903cd", "content_id": "11518dd3ba728aa04aaef7c4764d66c50bc4c7eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 133, "license_type": "no_license", "max_line_length": 73, "num_lines": 3, "path": "/README.md", "repo_name": "001001matheus001001/Minecraft-python", "src_encoding": "UTF-8", "text": "# Minecraft-python\n# Dependencia Python3 + mcpi.minecraft + minecraft upado no python ! ! ! \n# Vou postar todos que criar ate mais .\n" }, { "alpha_fraction": 0.7069892287254333, "alphanum_fraction": 0.7123655676841736, "avg_line_length": 23.866666793823242, "blob_id": "e673a506df7f12a7a65764f7cd91d5c5c046d3d1", "content_id": "cf90d505162b6cc92c9ce86f1a6385d1c419f1bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/CriaBlocos.py", "repo_name": "001001matheus001001/Minecraft-python", "src_encoding": "UTF-8", "text": "# Conectar ao Minecraft\nfrom mcpi.minecraft import Minecraft\nmc = Minecraft.create()\n\n# String para variaveis de 3D\n\nbloco = input(\"Numero do bloco desejado:\")\n\nx = input(\"localização desejada para: x \")\ny = input(\"localização desejada para: y \")\nz = input(\"localização desejada para: z \")\n\nmc.setBlock(x, y, z, bloco)\n\nprint(\"Fim de script obs: cria 1 bloco por vez ! \")" } ]
3
JiriPapousek/facebook-analysis
https://github.com/JiriPapousek/facebook-analysis
6568759982db47b818ed2808846cb11ec2050c73
fb600ac09fa9142d8b86e5f941969394394360f5
092e811b26d9e0cfbef1f2779a965bc52a16aeb2
refs/heads/master
2021-09-01T14:12:29.815184
2017-12-27T11:28:34
2017-12-27T11:28:34
109,899,115
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5249901413917542, "alphanum_fraction": 0.5461001992225647, "avg_line_length": 31.633047103881836, "blob_id": "b22bd9e950cbc66c206a1e815e1ac5fa7728c13f", "content_id": "de9d095456503047e50bf177add1a0123f5ca286", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15223, "license_type": "no_license", "max_line_length": 86, "num_lines": 466, "path": "/analysis.py", "repo_name": "JiriPapousek/facebook-analysis", "src_encoding": "UTF-8", "text": "from os import listdir\nimport matplotlib.pyplot as plt\nimport pylab\nimport operator\nimport numpy as np\nimport sys\nimport calendar\n\n\ndef clear_data(first_tag, last_lag, text):\n \"\"\"\n This function returns string between first_tag and last_tag in text. It also\n returns changed text so that it will not include this string and tags.\n \"\"\"\n first_index = text.find(first_tag) + len(first_tag)\n last_index = text.find(last_lag)\n result = text[first_index:last_index]\n text = text[last_index + len(last_lag):len(text)]\n return result, text\n\n\ndef messages_to_a_list():\n \"\"\"\n This function makes a list of data from the html files of conversations,so\n that it would be easier to work with those data later.\n \"\"\"\n chat_names = listdir(sys.argv[1] + \"/messages\")\n final_list = []\n\n for chat_name in chat_names:\n file = open(sys.argv[1] + \"/messages/\" + chat_name)\n conversation = file.read()\n file.close()\n conversation = conversation.split(\"<div class=\\\"message\\\">\")\n people_in_conversation, conversation[0] = clear_data(\n \"Konverzace s&nbsp;\", \"</title>\", conversation[0])\n people_in_conversation = people_in_conversation.split(\",\")\n final_list.append([people_in_conversation, []])\n conversation.pop(0)\n for message in conversation:\n \"\"\"\n Finds a name of user who sent the message, time of sending and\n message itself, afterwards it gets rid of all tags around and\n appends the result as a new value to the list.\n \"\"\"\n clear_name, message = clear_data(\n \"<span class=\\\"user\\\">\", \"</span>\", message)\n clear_time, message = clear_data(\n \"<span class=\\\"meta\\\">\", \"</span>\", message)\n clear_text, message = clear_data(\"<p>\", \"</p>\", message)\n final_list[len(final_list) -\n 1][1].append([clear_name, clear_time, clear_text])\n\n return final_list\n\n\ndef identify_the_owner():\n \"\"\"\n This function returns the full name of owner of the account.\n \"\"\"\n file = open(sys.argv[1] + \"/index.htm\")\n profile = file.read()\n file.close()\n result, profile = clear_data(\"<h1>\", \"</h1>\", profile)\n return result\n\n\ndef count_sent_vs_received_messsages(data, number_of_results):\n \"\"\"\n This function counts all received and sent messages in every face to face\n conversation.\n \"\"\"\n final = []\n for conversation in data:\n if len(conversation[0]) == 1:\n final.append([conversation[0], 0, 0, 0])\n for message in conversation[1]:\n final[len(final) - 1][3] += 1\n if message[0] == identify_the_owner():\n final[len(final) - 1][1] += 1\n else:\n final[len(final) - 1][2] += 1\n final = sorted(final, key=operator.itemgetter(3))[::-1]\n\n names = []\n my_messages = []\n others_messages = []\n for i in range(number_of_results):\n names.append(final[i][0][0])\n my_messages.append(final[i][1])\n others_messages.append(final[i][2])\n print(names)\n print(my_messages)\n print(others_messages)\n return names, my_messages, others_messages\n\ndef show_sent_vs_received_messages(data, number_of_results):\n \"\"\"\n This function shows the top results of received and sent messages in\n every face to face conversation in the bar chart.\n \"\"\"\n result = count_sent_vs_received_messsages(data, number_of_results)\n names = result[0]\n my_messages = result[1]\n others_messages = result[2]\n\n plt.figure(figsize=(10, 6))\n plt.title(\"Sent and received messages in the most used conversations\")\n plt.bar(\n np.arange(len(my_messages)),\n my_messages,\n width=0.4,\n align='edge',\n alpha=0.7,\n color='r',\n label=\"Sent messages\")\n plt.bar(\n np.arange(len(others_messages)) + 0.4,\n others_messages,\n width=0.4,\n align='edge',\n alpha=0.7,\n color='b',\n label=\"Received messages\")\n plt.legend()\n plt.xticks(np.arange(len(names))+0.4, names, rotation=90)\n plt.ylabel(\"Number of messages\")\n plt.xlim(0, number_of_results)\n plt.tight_layout()\n pylab.savefig(\"sent_vs_received.png\")\n plt.show()\n\n\ndef count_word(data, word, person):\n \"\"\"\n The function returns the list of all messages including certain word written\n by specified person.\n \"\"\"\n word_number = 0\n for conversation in data:\n for message in conversation[1]:\n if word in message[2] and message[0] == person:\n print(str(conversation[0]) + \" \" +\n message[1] + \" \" + message[2])\n word_number += 1\n return word_number\n\n\ndef clear_time(str):\n \"\"\"\n Takes plain time string as an argument and converts it to the list of\n separated values.\n \"\"\"\n minutes = int(str[str.find(\":\") + 1:str.find(\":\") + 3])\n if str[str.find(\":\") - 2] != \" \":\n hours = int(str[str.find(\":\") - 2:str.find(\":\")])\n else:\n hours = int(str[str.find(\":\") - 1:str.find(\":\")])\n day = int(str[0:str.find(\".\")])\n month = str.split(\" \")[1]\n year = int(str.split(\" \")[2])\n return [hours, minutes, day, month, year]\n\ndef count_messages_throughout_a_day(data):\n \"\"\"\n The function counts all sent and received messages messages in every\n minute of a day.\n \"\"\"\n my_daily_messages = [0] * 60 * 24\n others_daily_messages = [0] * 60 * 24\n for conversation in data:\n if len(conversation[0]) == 1:\n for message in conversation[1]:\n t = clear_time(message[1])\n time = 60 * t[0] + t[1]\n if message[0] == identify_the_owner():\n my_daily_messages[time] += 1\n else:\n others_daily_messages[time] += 1\n return my_daily_messages, others_daily_messages\n\ndef show_messages_throughout_a_day(data):\n \"\"\"\n The function shows all sent and received messages messages in every\n minute of a day in a plot chart.\n \"\"\"\n result = count_messages_throughout_a_day(data)\n my_daily_messages = result[0]\n others_daily_messages = result[1]\n\n plt.figure(figsize=(10, 6))\n plt.title(\"Sent and received messages throughout a day\")\n plt.ylabel(\"Number of messages\")\n plt.plot(\n np.arange(len(my_daily_messages)),\n my_daily_messages,\n color='b',\n alpha=0.7,\n label=\"Sent messages\")\n plt.plot(\n np.arange(len(others_daily_messages)),\n others_daily_messages,\n color='r',\n alpha=0.7,\n label=\"Received messages\")\n plt.legend(loc='upper left')\n times = [\n \"0:00\",\n \"3:00\",\n \"6:00\",\n \"9:00\",\n \"12:00\",\n \"15:00\",\n \"18:00\",\n \"21:00\"]\n plt.xticks([180 * i for i in range(8)], times)\n plt.ylabel(\"Number of messages\")\n plt.xlim(0, 1440)\n plt.tight_layout()\n pylab.savefig(\"messages_throughout_a_day.png\")\n plt.show()\n\ndef count_men_vs_women(data):\n \"\"\"\n This function counts all sent and received messages to men and women\n separately.\n \"\"\"\n sent_to_women = 0\n sent_to_men = 0\n received_from_women = 0\n received_from_men = 0\n for conversation in data:\n if len(conversation[0]) == 1:\n for message in conversation[1]:\n name = conversation[0][0]\n if message[0] == identify_the_owner():\n if name[len(name) - 3:len(name)] == \"ová\":\n sent_to_women += 1\n else:\n sent_to_men += 1\n else:\n if name[len(name) - 3:len(name)] == \"ová\":\n received_from_women += 1\n else:\n received_from_men += 1\n return sent_to_men, sent_to_women, received_from_men, received_from_women\n\ndef show_men_vs_women(data):\n \"\"\"\n This function shows all sent and received messages to men and women\n separately in the bar chart.\n \"\"\"\n\n result = count_men_vs_women(data)\n sent_to_men = result[0]\n sent_to_women = result[1]\n received_from_men = result[2]\n received_from_women = result[3]\n\n plt.figure(figsize=(10, 6))\n plt.title(\"Exchanged messages with men and women\")\n plt.bar(np.arange(2), [sent_to_men, sent_to_women],\n color='r', width=0.4, alpha=0.7, label=\"Sent messages\")\n plt.bar(np.arange(2) + 0.40, [received_from_men, received_from_women],\n color='b', width=0.4, alpha=0.7, label=\"Received messages\")\n plt.legend(loc='upper left')\n plt.xticks(np.arange(2)+0.2, [\"Men\", \"Women\"])\n plt.ylabel(\"Number of messages\")\n pylab.savefig(\"men_vs_women.png\")\n plt.show()\n\ndef count_who_starts_conversation(data, number_of_results):\n \"\"\"\n This function counts the messages starting conversations sent by me vs.\n those sent by someone else.\n \"\"\"\n final = []\n list_of_greetings = [\n \"zdravíčko\",\n \"ahoj\",\n \"čau\",\n \"čus\",\n \"nazdar\",\n \"nazdárek\",\n \"dobrý den\"]\n for conversation in data:\n if len(conversation[0]) == 1:\n final.append([conversation[0][0], 0, 0, 0])\n conversation[1] = conversation[1][::-1]\n previous_message = conversation[1][0]\n previous_time = clear_time(previous_message[1])\n for i in range(1, len(conversation[1])):\n message = conversation[1][i]\n time = clear_time(message[1])\n if time[2] != previous_time[2]:\n if time[3] != previous_time[3] or time[4] != previous_time[4] or (\n time[2] - previous_time[2]) != 1:\n if message[0] == identify_the_owner():\n final[len(final) - 1][1] += 1\n else:\n final[len(final) - 1][2] += 1\n final[len(final) - 1][3] += 1\n else:\n for greeting in list_of_greetings:\n if message[2].lower().find(greeting) != -1:\n if message[0] == identify_the_owner():\n final[len(final) - 1][1] += 1\n else:\n final[len(final) - 1][2] += 1\n final[len(final) - 1][3] += 1\n previous_time = time\n\n final = sorted(final, key=operator.itemgetter(3))[::-1]\n names = []\n me = []\n them = []\n for i in range(number_of_results):\n names.append(final[i][0] + \" \")\n me.append(final[i][1])\n them.append(final[i][2])\n return names, me, them\n\ndef show_who_starts_conversation(data, number_of_results):\n \"\"\"\n This function creates the bar chart showing the rates of messages starting\n the conversation compared on basis of who sent that message.\n \"\"\"\n result = count_who_starts_conversation(data, number_of_results)\n names = result[0]\n me = result[1]\n them = result[2]\n\n plt.figure(figsize=(10, 6))\n plt.title(\"Who starts the conversation first\")\n plt.bar(\n np.arange(len(me)),\n me,\n width=0.4,\n align=\"edge\",\n alpha=0.7,\n color=\"r\",\n label=\"Me\")\n plt.bar(\n np.arange(len(them)) + 0.4,\n them,\n width=0.4,\n align=\"edge\",\n alpha=0.7,\n color=\"b\",\n label=\"Other person\")\n plt.legend()\n plt.xticks(np.arange(len(names))+0.4, names, rotation=90)\n plt.ylabel(\"Number of openings\")\n plt.xlim(0, number_of_results)\n plt.tight_layout()\n pylab.savefig(\"who_starts_the_conversation.png\")\n plt.show()\n\n\ndef count_msgs_throughout_a_year(data, year):\n \"\"\"\n This function returns all messages in year by month, separated on messages\n sent by the account owner and received by him.\n \"\"\"\n months_my_messages = {}\n months_others_messages = {}\n for conversation in data:\n for message in conversation[1]:\n time = clear_time(message[1])\n if time[4] == year:\n if message[0] == identify_the_owner():\n if time[3] in months_my_messages:\n months_my_messages[time[3]] += 1\n else:\n months_my_messages[time[3]] = 0\n else:\n if time[3] in months_others_messages:\n months_others_messages[time[3]] += 1\n else:\n months_others_messages[time[3]] = 0\n my_messages = []\n others_messages = []\n months = [\n \"leden\",\n \"únor\",\n \"březen\",\n \"duben\",\n \"květen\",\n \"červen\",\n \"červenec\",\n \"srpen\",\n \"září\",\n \"říjen\",\n \"listopad\",\n \"prosinec\"]\n for month in months:\n if month in months_my_messages:\n my_messages.append(months_my_messages[month])\n else:\n my_messages.append(0)\n if month in months_others_messages:\n others_messages.append(months_others_messages[month])\n else:\n others_messages.append(0)\n\n return my_messages, others_messages\n\n\ndef show_msgs_throughout_a_year(data):\n \"\"\"\n This function draws a chart of sent and received messages by month\n throughout several years.\n \"\"\"\n sent = []\n received = []\n for year in [i + 2014 for i in range(4)]:\n result = count_msgs_throughout_a_year(data, year)\n sent.append(result[0])\n received.append(result[1])\n\n colors = [\"r\", \"b\", \"g\", \"m\"]\n\n plt.figure(figsize=(10, 6))\n plt.title(\"Sent and received messages by month in last years\")\n\n color_lines = []\n for i in range(4):\n color_lines.append(plt.plot(\n np.arange(12),\n sent[i],\n ls=\"solid\",\n color=colors[i],\n alpha=0.8,\n label=str(2014 + i))[0])\n plt.plot(\n np.arange(12),\n received[i],\n ls=\"dashed\",\n color=colors[i],\n alpha=0.8)\n\n black_lines = []\n black_lines.append(plt.plot([], [], color=\"#000000\", ls=\"solid\")[0])\n black_lines.append(plt.plot([], [], color=\"#000000\", ls=\"dashed\")[0])\n\n colors_legend = plt.legend(color_lines,\n [str(i + 2014) for i in range(4)],\n loc=\"upper left\")\n plt.legend(black_lines,\n [\"Sent messages\",\"Received messages\"],\n loc=\"upper right\")\n plt.gca().add_artist(colors_legend)\n\n plt.xticks(np.arange(13) - 1, calendar.month_name, rotation=70)\n plt.xlim(0, 11)\n plt.ylabel(\"Number of messages\")\n plt.tight_layout()\n pylab.savefig(\"msgs_throughout_by_month.png\")\n plt.show()\n\n\nshow_msgs_throughout_a_year(messages_to_a_list())\nshow_who_starts_conversation(messages_to_a_list(), 15)\nshow_men_vs_women(messages_to_a_list())\nshow_sent_vs_received_messages(messages_to_a_list(), 15)\nshow_messages_throughout_a_day(messages_to_a_list())" }, { "alpha_fraction": 0.8285714387893677, "alphanum_fraction": 0.8285714387893677, "avg_line_length": 51.5, "blob_id": "658b6e0cccf08199cb0967ffc91c2b6b63594df5", "content_id": "260234f296d1d1bd2702ebaabfaf9a75d70edf00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 105, "license_type": "no_license", "max_line_length": 84, "num_lines": 2, "path": "/README.md", "repo_name": "JiriPapousek/facebook-analysis", "src_encoding": "UTF-8", "text": "# facebook-analysis\nMakes an analysis of Facebook conversations on base of data from downloaded archive.\n" } ]
2
sdotson/udacity-machine-learning-nanodegree
https://github.com/sdotson/udacity-machine-learning-nanodegree
ad62e22f4c96322b19d1aa12193d59c1573b0e94
0f91015650a5fe6850f0f434c227dba954770722
af565f3f167529c4e0a2f77189d06e1b07e71e0a
refs/heads/master
2022-11-10T00:47:10.738133
2020-06-26T21:03:59
2020-06-26T21:03:59
273,815,726
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8028169274330139, "alphanum_fraction": 0.8028169274330139, "avg_line_length": 93.66666412353516, "blob_id": "f652aff2549415163fce9a88d0870b9498f58a66", "content_id": "b3884718201b6ecbb7b5f8f7c56ff95e8f4e1b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 568, "license_type": "no_license", "max_line_length": 292, "num_lines": 6, "path": "/README.md", "repo_name": "sdotson/udacity-machine-learning-nanodegree", "src_encoding": "UTF-8", "text": "# Udacity Machine Learning Nano-degree projects\n\nHere are the projects I've completed over the course of Udacity's Machine Learning nano-degree program.\n\n- [Finding donors](finding-donors) - Used supervised learning methods to determine who is a likely donor from a dataset.\n- [Classifying flowers](classifying-flowers) - A command line application that uses a pretrained torchvision model architecture of your choice to train the model on a given dataset, save a checkpoint for that trained model, and then use that model later to predict the class of a given image.\n" }, { "alpha_fraction": 0.7503828406333923, "alphanum_fraction": 0.751148521900177, "avg_line_length": 30.095237731933594, "blob_id": "4c8c671400cb5f583a20ffd35eb68f67b8b8be00", "content_id": "6f3d0e69ff7dc1af8d214977cd6f821ae2fef235", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1306, "license_type": "permissive", "max_line_length": 85, "num_lines": 42, "path": "/classifying-flowers/predict.py", "repo_name": "sdotson/udacity-machine-learning-nanodegree", "src_encoding": "UTF-8", "text": "# third party imports\nimport argparse\nimport json\n\n# local imports\nfrom model import predict, load_checkpoint\nfrom utils import determine_device\nfrom validation import validate_predict_args\n\n# CLI defaults\nTOP_K_DEFAULT = 1\n\n# configure argument parser\nparser = argparse.ArgumentParser(description=\"Trains model and saves checkpoint\")\nparser.add_argument(\"image_path\", help=\"the path for the image you wish to classify\")\nparser.add_argument(\"checkpoint\", help=\"the model checkpoint you would like to use\")\nparser.add_argument(\"--category_names\")\nparser.add_argument(\"--gpu\", action=\"store_true\")\nparser.add_argument(\"--top_k\", type=int, default=TOP_K_DEFAULT)\n\n# parse and validate args\nargs = parser.parse_args()\nvalidate_predict_args(args)\n\n# Getting category to name mapping\ncat_to_name = None\nif args.category_names:\n with open(args.category_names, \"r\") as f:\n cat_to_name = json.load(f)\n\n# use gpu if available and requested in args\ndevice = determine_device(args.gpu)\nprint(\"Using device {}...\".format(device.type))\n\nprint(\"Loading checkpoint...\")\nmodel = load_checkpoint(args.checkpoint, device)\n\nprint(\"Predicting class for image...\")\nchart_data = predict(args.image_path, model, device, cat_to_name, args.top_k)\n\nprint(\"Printing chart of classes and probabilities...\")\nprint(chart_data)\n" }, { "alpha_fraction": 0.6670753955841064, "alphanum_fraction": 0.6670753955841064, "avg_line_length": 36.068180084228516, "blob_id": "f0cc5814cc597bbe5e8b03bbf54354764a8b4994", "content_id": "bb0b3d2d29a7f5866c2616b4b1f657a73286420b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1631, "license_type": "permissive", "max_line_length": 83, "num_lines": 44, "path": "/classifying-flowers/validation.py", "repo_name": "sdotson/udacity-machine-learning-nanodegree", "src_encoding": "UTF-8", "text": "from os import path\nimport torch\nfrom torchvision import models\n\n# validates train.py args\ndef validate_train_args(args):\n # check cuda\n if args.gpu and torch.cuda.is_available() == False:\n # we don't want to throw sand in the user's face\n # but let them know we are falling back to CPU\n print(\"GPU is not enabled for this device, falling back to CPU\")\n\n # check data_directory existance\n if path.exists(args.data_directory) == False:\n raise ValueError(\n \"data directory does not exist: {}\".format(args.data_directory)\n )\n\n # check save_dir existance\n if args.save_dir and path.exists(args.save_dir) == False:\n raise ValueError(\"save directory does not exist: {}\".format(args.save_dir))\n\n\n# validates predict.py args\ndef validate_predict_args(args):\n # check cuda\n if args.gpu and torch.cuda.is_available() == False:\n # we don't want to throw sand in the user's face\n # but let them know we are falling back to CPU\n print(\"GPU is not enabled for this device, falling back to CPU\")\n\n # check data_directory existance\n if path.exists(args.image_path) == False:\n raise ValueError(\"image path does not exist: {}\".format(args.image_path))\n\n # check checkpoint existance\n if path.exists(args.checkpoint) == False:\n raise ValueError(\"checkpoint does not exist: {}\".format(args.checkpoint))\n\n # check category names existance\n if args.category_names and path.exists(args.category_names) == False:\n raise ValueError(\n \"category names does not exist: {}\".format(args.category_names)\n )\n" }, { "alpha_fraction": 0.7057453393936157, "alphanum_fraction": 0.722437858581543, "avg_line_length": 28.609195709228516, "blob_id": "0bdf2b891e18119d0b6c33045d442e781e0762fd", "content_id": "a12fd917189b373899426abacc32dc59d8eeb6fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2576, "license_type": "permissive", "max_line_length": 84, "num_lines": 87, "path": "/classifying-flowers/train.py", "repo_name": "sdotson/udacity-machine-learning-nanodegree", "src_encoding": "UTF-8", "text": "# third party imports\nimport argparse\nimport os\nimport torch\nfrom torchvision import models\n\n# local imports\nfrom model import create_dataloaders, create_model, train_model\nfrom utils import determine_device\nfrom validation import validate_train_args\n\n# CLI defaults\nHIDDEN_UNITS_DEFAULT = 2048\nARCH_DEFAULT = \"vgg16\"\nLEARNING_RATE_DEFAULT = 0.001\nEPOCHS_DEFAULT = 8\n\n# other settings\nBATCH_SIZE = 60\nDROPOUT_PROBABILITY = 0.5\nARCH_CHOICES = [\n \"vgg16\",\n \"vgg16_bn\",\n \"vgg11\",\n \"vgg11_bn\",\n \"vgg13\",\n \"vgg13_bn\",\n \"vgg19\",\n \"vgg19_bn\",\n \"densenet121\",\n \"densenet161\",\n \"densenet169\",\n \"densenet201\",\n]\n\n# configure argument parser\nparser = argparse.ArgumentParser(description=\"Trains model and saves checkpoint\")\nparser.add_argument(\"data_directory\", help=\"the directory for the training data\")\nparser.add_argument(\"--arch\", choices=ARCH_CHOICES, default=ARCH_DEFAULT)\nparser.add_argument(\"--gpu\", action=\"store_true\")\nparser.add_argument(\"--learning_rate\", type=float, default=LEARNING_RATE_DEFAULT)\nparser.add_argument(\"--save_dir\")\nparser.add_argument(\"--epochs\", type=int, default=EPOCHS_DEFAULT)\nparser.add_argument(\"--hidden_units\", type=int, default=HIDDEN_UNITS_DEFAULT)\n\n# parse CLI args\nargs = parser.parse_args()\n\n# do some additional validation on args\nvalidate_train_args(args)\n\n# get dataloaders and class_to_idx map\nprint(\"Creating dataloaders...\")\ndataloaders, class_to_idx = create_dataloaders(args.data_directory, BATCH_SIZE)\n\n# use gpu if available and requested in args\ndevice = determine_device(args.gpu)\nprint(\"Using device {}...\".format(device.type))\n\nprint(\"Creating model...\")\ntraining_directory = args.data_directory + \"/train/\"\noutput_units_size = sum(\n [os.path.isdir(training_directory + i) for i in os.listdir(training_directory)]\n)\nmodel, input_size = create_model(\n args.arch, args.hidden_units, DROPOUT_PROBABILITY, output_units_size, device\n)\n\n# train the model in place\nprint(\"Training model...\")\ntrain_model(model, dataloaders, args.epochs, args.learning_rate, device)\n\n# save checkpoint\nprint(\"Saving checkpoint...\")\ncheckpoint = {\n \"arch\": args.arch,\n \"batch_size\": BATCH_SIZE,\n \"class_to_idx\": class_to_idx,\n \"dropout_probability\": DROPOUT_PROBABILITY,\n \"hidden_size\": args.hidden_units,\n \"input_size\": input_size,\n \"output_size\": output_units_size,\n \"state_dict\": model.state_dict(),\n}\nsave_path = args.save_dir + \"/checkpoint.pth\" if args.save_dir else \"checkpoint.pth\"\ntorch.save(checkpoint, save_path)\nprint(\"Done. Checkpoint has been saved at {}\".format(save_path))\n" }, { "alpha_fraction": 0.6909871101379395, "alphanum_fraction": 0.6909871101379395, "avg_line_length": 28.125, "blob_id": "7b7a55fc31d04f9f952f90abe0a4db0426115fe5", "content_id": "4f68b6d27a72359926a8d1246a4e678e8eed74ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "permissive", "max_line_length": 75, "num_lines": 8, "path": "/classifying-flowers/utils.py", "repo_name": "sdotson/udacity-machine-learning-nanodegree", "src_encoding": "UTF-8", "text": "import torch\n\n\ndef determine_device(gpu_flag_enabled):\n \"\"\"Determine device given gpu flag and the availability of cuda\"\"\"\n return torch.device(\n \"cuda\" if torch.cuda.is_available() and gpu_flag_enabled else \"cpu\"\n )\n" }, { "alpha_fraction": 0.6032524108886719, "alphanum_fraction": 0.6140565872192383, "avg_line_length": 32.25185012817383, "blob_id": "a51c1701687499b0a580fad8ce3adf9238ee170f", "content_id": "96b4f72b41f33f3eee8cbd86d01076459dd23970", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8978, "license_type": "permissive", "max_line_length": 108, "num_lines": 270, "path": "/classifying-flowers/model.py", "repo_name": "sdotson/udacity-machine-learning-nanodegree", "src_encoding": "UTF-8", "text": "import argparse\nfrom collections import OrderedDict\nfrom torchvision import datasets, models, transforms\nimport torch\nfrom torch import nn, optim\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nimport time\n\n\ndef create_dataloaders(data_directory, batch_size):\n \"\"\"Create dataloaders for training, validation, and test data.\"\"\"\n means = [0.485, 0.456, 0.406]\n std_deviations = [0.229, 0.224, 0.225]\n image_size = 224\n rotation = 30\n\n train_dir = data_directory + \"/train\"\n valid_dir = data_directory + \"/valid\"\n test_dir = data_directory + \"/test\"\n\n train_transform = transforms.Compose(\n [\n transforms.RandomRotation(rotation),\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(means, std_deviations),\n ]\n )\n\n test_transform = transforms.Compose(\n [\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize(means, std_deviations),\n ]\n )\n\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)\n test_dataset = datasets.ImageFolder(test_dir, transform=test_transform)\n valid_dataset = datasets.ImageFolder(valid_dir, transform=test_transform)\n\n dataloaders = {\n \"train\": torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=True\n ),\n \"test\": torch.utils.data.DataLoader(test_dataset, batch_size=batch_size),\n \"valid\": torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size),\n }\n\n class_to_idx = train_dataset.class_to_idx\n\n return dataloaders, class_to_idx\n\n\ndef create_classifier(\n input_size, hidden_units_size, dropout_probability, output_units_size\n):\n \"\"\"Create and return classifier.\"\"\"\n return nn.Sequential(\n OrderedDict(\n [\n (\"fc1\", nn.Linear(input_size, hidden_units_size)),\n (\"relu\", nn.ReLU()),\n (\"dropout\", nn.Dropout(p=dropout_probability)),\n (\"fc2\", nn.Linear(hidden_units_size, output_units_size)),\n (\"output\", nn.LogSoftmax(dim=1)),\n ]\n )\n )\n\n\ndef determine_classifier_input_size(classifier):\n \"\"\"Return input size for classifier\"\"\"\n is_classifier_sequential = isinstance(\n classifier, torch.nn.modules.container.Sequential\n )\n input_size = (\n classifier[0].in_features\n if is_classifier_sequential\n else classifier.in_features\n )\n return input_size\n\n\ndef create_model(\n arch, hidden_units_size, dropout_probability, output_units_size, device\n):\n \"\"\"Create pretrained model with custom classifier for given architecture.\"\"\"\n model = getattr(models, arch)(pretrained=True)\n\n for param in model.parameters():\n param.requires_grad = False\n\n # define new classifier\n input_size = determine_classifier_input_size(model.classifier)\n model.classifier = create_classifier(\n input_size, hidden_units_size, dropout_probability, output_units_size\n )\n model.to(device)\n\n return model, input_size\n\n\ndef train_model(model, dataloaders, epochs, learning_rate, device):\n \"\"\"Train model and periodically log validation stats.\"\"\"\n images_trained = 0\n print_every = 5\n running_loss = 0\n\n optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)\n criterion = nn.NLLLoss()\n train_start = time.time()\n for epoch in range(epochs):\n model.train()\n\n for inputs, labels in dataloaders[\"train\"]:\n images_trained += 1\n inputs, labels = inputs.to(device), labels.to(device)\n\n optimizer.zero_grad()\n\n logps = model.forward(inputs)\n loss = criterion(logps, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if images_trained % print_every == 0:\n model.eval()\n model.to(device)\n\n with torch.no_grad():\n accuracy = 0\n validation_loss = 0\n for images, labels in dataloaders[\"valid\"]:\n images, labels = images.to(device), labels.to(device)\n\n logps = model.forward(images)\n validation_loss += criterion(logps, labels).item()\n\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\n print(\n f\"Epoch {epoch+1}/{epochs} (image {images_trained}).. \"\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Validation loss: {validation_loss/len(dataloaders['valid']):.3f}.. \"\n f\"validation accuracy: {accuracy/len(dataloaders['valid']):.3f}\"\n )\n\n running_loss = 0\n model.train()\n print(\"Training completed in {} seconds\".format(time.time() - train_start))\n\n\ndef process_image(image_path):\n \"\"\" Scale, crop, and normalize a PIL image for a PyTorch model and \n return as Torch tensor.\n \"\"\"\n with Image.open(image_path) as image:\n shortest_side_length = 256\n is_width_bigger = image.size[0] > image.size[1]\n new_size = (\n [image.size[0], shortest_side_length]\n if is_width_bigger\n else [shortest_side_length, image.size[1]]\n )\n\n # return image with new size\n resized_image = image.resize(new_size)\n width, height = resized_image.size\n\n # determine center crop bounding box\n crop_size = 224\n left = (width - crop_size) / 2\n upper = (height - crop_size) / 2\n right = (width + crop_size) / 2\n lower = (height + crop_size) / 2\n\n # crop the image\n cropped_image = resized_image.crop((left, upper, right, lower))\n\n # transform to numpy array\n np_image = np.array(cropped_image)\n\n # squish and normalize\n np_image_squished = np_image / 255\n means = np.array([0.485, 0.456, 0.406])\n std_deviations = np.array([0.229, 0.224, 0.229])\n normalized_image = (np_image_squished - means) / std_deviations\n\n # we need to change order of dimensions to meet pytorch's expectations\n transposed_image = np.transpose(normalized_image, (2, 0, 1))\n return torch.from_numpy(transposed_image)\n\n\ndef predict(image_path, model, device, cat_to_name, top_k):\n \"\"\" Predict the class (or classes) of an image using a trained deep learning model.\n \"\"\"\n predict_start = time.time()\n model.to(device)\n\n processed_image = process_image(image_path)\n\n # needs to be a float or computer gets angry with me\n image_float = processed_image.float().unsqueeze(0)\n\n # run image through model\n model.eval()\n model_output = model.forward(image_float.to(device))\n predictions = torch.exp(model_output)\n\n # top predictions and top labels\n top_preds, top_labels = predictions.topk(top_k)\n\n # need to detach in order to call numpy\n top_preds = top_preds.detach()\n\n if device.type != \"cpu\":\n top_preds = top_preds.cpu()\n\n top_preds = top_preds.numpy().tolist()\n top_labels = top_labels.tolist()\n\n data = {\"class\": pd.Series(model.class_to_idx)}\n\n # if there is cat_to_name translation dict around, we can add the flower_name column\n if cat_to_name:\n data[\"flower_name\"] = pd.Series(cat_to_name)\n\n chart_data = pd.DataFrame(data)\n chart_data = chart_data.set_index(\"class\")\n chart_data = chart_data.iloc[top_labels[0]]\n chart_data[\"probabilities\"] = top_preds[0]\n\n print(\n \"Processing and prediction completed in {} seconds\".format(\n time.time() - predict_start\n )\n )\n\n return chart_data\n\n\ndef load_checkpoint(checkpoint_path, device):\n \"\"\"Load checkpoint at checkpoint_path with device and return pretrained model with custom classifier.\"\"\"\n # Below is a solution for loading checkpoint saved on a gpu device and I believe vice versa\n # https://discuss.pytorch.org/t/on-a-cpu-device-how-to-load-checkpoint-saved-on-gpu-device/349\n checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)\n model = getattr(models, checkpoint[\"arch\"])(pretrained=True)\n\n input_size = determine_classifier_input_size(model.classifier)\n model.classifier = create_classifier(\n input_size,\n checkpoint[\"hidden_size\"],\n checkpoint[\"dropout_probability\"],\n checkpoint[\"output_size\"],\n )\n model.load_state_dict(checkpoint[\"state_dict\"])\n model.class_to_idx = checkpoint[\"class_to_idx\"]\n model.to(device)\n\n return model\n" } ]
6
therealpeterpython/gimp-average-layers
https://github.com/therealpeterpython/gimp-average-layers
22a99d5da88ef5dbffdc8aa74095afc9f976da8d
0082a996b51287142a1d67e7445ef652b4942021
94123c15fb0475b99204aa69a6a838e483f2b57e
refs/heads/master
2020-04-24T04:05:18.887349
2019-02-27T09:56:01
2019-02-27T09:56:01
171,691,580
6
1
null
2019-02-20T14:48:50
2018-12-05T20:49:11
2018-11-14T02:09:27
null
[ { "alpha_fraction": 0.71199631690979, "alphanum_fraction": 0.7330586314201355, "avg_line_length": 28.917808532714844, "blob_id": "7b1ab7b738d02f73c47a967a058bc9c0b2aa4139", "content_id": "11ef26df5d1e091ebf33f46aea42edbf3f49c0e4", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2184, "license_type": "permissive", "max_line_length": 125, "num_lines": 73, "path": "/README.md", "repo_name": "therealpeterpython/gimp-average-layers", "src_encoding": "UTF-8", "text": "## gimp-average-layers\n\nThis GIMP plugin merges all layers in an image by taking an average value of each pixel. Useful for noise reduction.\n\n![Example](/example.png?raw=true \"Example\")\n\n\n### Installing\n\n#### Windows\n\n1. Move this plugin into the `%appdata%/GIMP/2.8/plug-ins/` directory.\n2. Restart GIMP.\n\n\n#### Linux\n\n1. Move this plugin into the `~/.gimp-2.8/plug-ins/` directory.\n2. `chmod +x ~/.gimp-2.8/plug-ins/average-layers.py`\n3. Restart GIMP.\n\n\n#### Mac OS X\n\n1. Move this plugin into the `~/Library/Application\\ Support/GIMP/2.8/plug-ins/` directory.\n2. `chmod +x ~/Library/Application\\ Support/GIMP/2.8/plug-ins/average-layers.py`\n3. Restart GIMP.\n\n\n### Usage\n\n1. Select `File -> Open as Layers...` to open the images you wish to blend.\n2. Select `Filters -> Combine -> Average Layers ...`\n3. Choose the average function\n4. Wait...\n\n\n### Problems\n\nThe biggest problem is the speed. The mean average works different from the others and is thereby really fast.\nIf you choose one of the other average functions you maybe have to wait a long time. \nThe plugin works with pixel regions and the hole process is slow for the mean and very slow for the mode function.\n\nAnother issue is the fact that you can not cancel the execution properly.\n\nThe script works on every channel and takes it values from there independetly. It would be better to work\nwith the hole pixel to avoid creating new color combinations.\n\nIf you have a solution to this problems just let me now.\n\n\n### Roadmap\n\nI am planing to implement the range average and my own generalized mode average. \nIf it is possible i will speeding the algorithm up. \nJust use the selection, not the whole image. \n\n### Changes\n\nThe [original function][1] was made created by Oona Räisänen. [John Goodliff][2] added some features like an undo group and progress bar. \nI have restructured everything to implement different kinds of average functions.\n\n\n### Author & Licensing\n\nMade by Simon Filter (2019, public domain)\n\n[Changes][2] were made by John Goodliff (2018). \n[Original function][1] was created by Oona Räisänen (2012-2015, public domain).\n\n\n[1]: https://github.com/windytan/gimp-average-layers\n[2]: https://github.com/jerboa88/gimp-average-layers\n" }, { "alpha_fraction": 0.6036435961723328, "alphanum_fraction": 0.6114513278007507, "avg_line_length": 31.65999984741211, "blob_id": "14444c45789e1e6a51371cf338d6aa33a0b816b6", "content_id": "b0a69ef0b605bb0c31350a2f7b8c299c1391023f", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6532, "license_type": "permissive", "max_line_length": 142, "num_lines": 200, "path": "/average-layers.py", "repo_name": "therealpeterpython/gimp-average-layers", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom gimpfu import *\nfrom array import array\nimport time\nimport sys\n\nimport itertools\nimport operator\n\nfrom collections import Counter\n\n# Not sure if get_mode() or get_mode1() is faster\n# but it looks like get_mode is despite its length the faster one\n\ndef get_mode1(lst):\n return Counter(lst).most_common(1)[0][0]\n\n\n# Returns the mode of the list\ndef get_mode(lst):\n # get an iterable of (item, iterable) pairs\n SL = sorted((x, i) for i, x in enumerate(lst))\n groups = itertools.groupby(SL, key=operator.itemgetter(0))\n # auxiliary function to get \"quality\" for an item\n def _auxfun(g):\n item, iterable = g\n count = 0\n min_index = len(lst)\n for _, where in iterable:\n count += 1\n min_index = min(min_index, where)\n # print 'item %r, count %r, minind %r' % (item, count, min_index)\n return count, -min_index\n # pick the highest-count/earliest item\n return max(groups, key=_auxfun)[0]\n\n\n# Returns the median of the list as input type if the list has an odd length\n# or the mean between the two middle elements as float\ndef get_median(lst):\n n = len(lst)\n h = n//2\n lst.sort()\n if n % 2:\n return lst[h]\n else:\n return sum(lst[h-1:h+1])/2.0\n\n\n# Returns the mean of the list as float\ndef get_mean(list):\n return sum(list) / float(len(list))\n\n\n# Returns the visible layers of the image as list\ndef get_visible_layers(img):\n pdb.gimp_message(\"Get visible layers\")\n\n gimp.progress_init('Getting visible layers')\n layers = img.layers\n layers_vis = []\n for layer in layers:\n if pdb.gimp_item_get_visible(layer):\n if not pdb.gimp_item_is_group(layer):\n pdb.gimp_layer_add_alpha(layer)\n layers_vis.append(layer)\n gimp.progress_update(1)\n if len(layers_vis) == 0:\n pdb.gimp_message(\"No visible layer found!\")\n gimp.quit()\n\n pdb.gimp_message(\"Got visible layers\")\n return layers_vis\n\n\n# Calculates the mean layer of the image\n# identically to the original script\ndef calc_mean(img):\n layers_vis = get_visible_layers(img)\n pdb.gimp_message(\"mean\")\n\n # Set oppacity of visible layers\n layers_left = len(layers_vis)\n gimp.progress_init('Setting layer opacities')\n for layer in layers_vis:\n layer.opacity = 100.0 / layers_left\n layers_left -= 1\n gimp.progress_update((len(layers_vis) - layers_left) / len(layers_vis))\n\n gimp.progress_init('Merging layers')\n pdb.gimp_image_merge_visible_layers(img, CLIP_TO_IMAGE)\n gimp.progress_update(1)\n\n\n# Calculates the average layer with the given average function 'avrg_fnc' of the image\n# It just takes the visible layers into account\ndef calc_avrg(img, avrg_fnc):\n try:\n pdb.gimp_message(\"Calc average\")\n image_x = img.width\n image_y = img.height\n layers_arrays = []\n num_channels = 0\n layers_vis = get_visible_layers(img)\n\n # get pixel arrays\n # layers_arrays contains the arrays of the layers\n # an array contains the pixel values of one layer as [pixel1_r, pixel1_g, pixel1_b, pixel1_A, pixel2_r, ...]\n gimp.progress_init('Getting pixel values')\n for i,layer in enumerate(layers_vis):\n layer_rgn = layer.get_pixel_rgn(0, 0, image_x, image_y, False, False)\n layers_arrays.append(array(\"B\", layer_rgn[:, :]))\n num_channels = len(layer_rgn[0,0]) # Not pretty in this loop but it works\n gimp.progress_update((i+1) / float(len(layers_vis)))\n\n # create the merge layer and the destination pixel region\n merged_layer = pdb.gimp_layer_new(img, image_x, image_y, RGB_IMAGE, \"merged\", 100, NORMAL_MODE)\n pdb.gimp_layer_add_alpha(merged_layer)\n pdb.gimp_image_insert_layer(img, merged_layer, None, 0)\n dest_rgn = merged_layer.get_pixel_rgn(0, 0, image_x, image_y, True, True)\n dest_array = array(\"B\", \"\\x00\" * (image_x * image_y * num_channels))\n\n pdb.gimp_message(\"Doing the hard work\")\n t = time.time()\n\n # process the arrays in this manner\n # its faster than actual write out the for loops\n averaged_values = [int(avrg_fnc([arr[i] for arr in layers_arrays])) for i in range(len(layers_arrays[0]))]\n dest_array = array('B',averaged_values)\n\n\n pdb.gimp_message(str(time.time() - t))\n pdb.gimp_message(\"Hard work done!\")\n\n # add dest_array to the dest_rgn\n dest_rgn[:,:] = dest_array.tostring() # deprecated in Python 3\n\n # Write out changes\n merged_layer.flush()\n merged_layer.merge_shadow(1)\n merged_layer.update(0, 0, image_x, image_y)\n pdb.gimp_image_merge_visible_layers(img, CLIP_TO_IMAGE)\n pdb.gimp_message(\"Calced average\")\n\n except:\n # Print the exception details in gimp\n exc_type, exc_obj, exc_tb = sys.exc_info()\n pdb.gimp_message(\"Type: \" +str(exc_type)+\"\\nLine: \" +str(exc_tb.tb_lineno))\n\n\ndef average_layers(img, average):\n try:\n pdb.gimp_image_undo_group_start(img)\n\n if(average == \"mean\"):\n calc_mean(img) # faster than calc_avrg(img, get_mean)\n\n elif(average == \"median\"):\n pdb.gimp_message(\"median\")\n calc_avrg(img, get_median)\n\n elif(average == \"mode\"):\n pdb.gimp_message(\"mode\")\n calc_avrg(img, get_mode)\n\n elif(average == \"gmode\"):\n pdb.gimp_message(\"gmode\")\n pdb.gimp_message(\"Not implemented yet!\")\n #calc_avrg(img, get_gmode)\n\n elif(average == \"range\"):\n pdb.gimp_message(\"range\")\n pdb.gimp_message(\"Not implemented yet!\")\n #calc_avrg(img, get_range)\n\n pdb.gimp_message(\"finished\")\n pdb.gimp_image_undo_group_end(img)\n except:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n pdb.gimp_message(\"Type: \" +str(exc_type)+\"\\nLine: \" +str(exc_tb.tb_lineno))\n\nregister(\n 'python_fu_average_layers',\n 'Merge all layers together using an average value for each pixel',\n 'Merge all layers together using an average value for each pixel',\n 'Simon Filter',\n 'Simon Filter',\n '2019',\n 'Average layers ...',\n '*',\n [\n (PF_IMAGE, \"image\", \"takes current image\", None),\n (PF_RADIO, \"average\", \"Set kind of average\", \"mean\",((\"Mean (fast)\", \"mean\"), (\"Median (slow)\", \"median\"), (\"Mode (slow!)\", \"mode\"))),\n ],\n [],\n average_layers, menu=\"<Image>/Filters/Combine\"\n)\n\n\nmain()\n" } ]
2
gausszh/sae_site
https://github.com/gausszh/sae_site
593a9531236313d402eb20bb0446105f299cec97
992c29b3f6ca8561c76908b0efdb16a3d4e69775
5fb8dc452c286754486007b18de554316bfc7b27
refs/heads/master
2016-08-03T17:05:31.747341
2014-11-18T13:33:26
2014-11-18T13:33:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5724637508392334, "alphanum_fraction": 0.5978260636329651, "avg_line_length": 14.333333015441895, "blob_id": "471315e8128427de0c4c0af56eaf091f5c3464ae", "content_id": "04e9801f2e3c158b5f693a2d0eb7ba4a304f9159", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 93, "num_lines": 18, "path": "/utils/filters.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "# coding=utf8\n\"\"\"\njinja2的过滤器\n\"\"\"\nimport markdown\n\n\ndef md2html(md):\n \"\"\"\n @param {unicode} md\n @return {unicode html}\n \"\"\"\n return markdown.markdown(md, ['extra', 'codehilite', 'toc', 'nl2br'], safe_mode=\"escape\")\n\n\nJINJA2_FILTERS = {\n 'md2html': md2html,\n}\n" }, { "alpha_fraction": 0.6292406916618347, "alphanum_fraction": 0.6429725289344788, "avg_line_length": 21.509090423583984, "blob_id": "de2986a854be979b9a10e841c99012592680b6cb", "content_id": "7527696c7a64c63f7bcb4796134a975726a0f243", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 81, "num_lines": 55, "path": "/models/base.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#coding=utf8\n\"\"\"\n基础类--用户信息\n\"\"\"\n\nfrom sqlalchemy import (\n MetaData, Table, Column, Integer, BigInteger, Float, String, Text, DateTime,\n ForeignKey, Date, UniqueConstraint)\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom models import sae_engine\nfrom models import create_session\n\nBase = declarative_base()\nmetadata = MetaData()\n\n\nclass User(Base):\n\n \"\"\"\n 发布历史日志\n \"\"\"\n\n __tablename__ = 'user'\n __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}\n\n id = Column(Integer, primary_key=True)\n open_id = Column(String(45), nullable=False, index=True)\n token = Column(String(64), nullable=False, index=True)\n name = Column(String(45))\n email = Column(String(60))\n address = Column(String(150))\n tel = Column(String(15))\n school = Column(String(45))\n create_time = Column(DateTime)\n\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return unicode(self.id)\n\n def __repr__(self):\n return '<User %r>' % (self.name)\n\n\nif __name__ == '__main__':\n Base.metadata.create_all(bind=sae_engine)\n" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 5.55555534362793, "blob_id": "1a0b299707e5b2a2fcdb4b236025672dfd1e0f82", "content_id": "dc49214e4a83c4c3800e4c7922324ebba8ff70ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 94, "license_type": "no_license", "max_line_length": 10, "num_lines": 9, "path": "/README.md", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "sae_site\n========\n\n部署在sae上的代码\n\n\n## todo\n1. 权限认证\n2. 加个评论功能吧" }, { "alpha_fraction": 0.650109589099884, "alphanum_fraction": 0.6508400440216064, "avg_line_length": 27.54166603088379, "blob_id": "fd04bc87c836b759cf1d84891c6c6a5a6015b4e8", "content_id": "d7b9102edca3cd268840d7a5d223a5cbf80b7f64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "no_license", "max_line_length": 86, "num_lines": 48, "path": "/views/base.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#coding=utf8\n\nimport datetime\nfrom flask import Blueprint, request, jsonify, render_template, redirect\nimport flask_login \nimport weibo as sinaweibo\n\nfrom models.base import create_session, User\nfrom utils import user_cache\nfrom configs import settings\n\n\nbp_base = Blueprint('base', __name__, url_prefix='/base')\n\n\n@bp_base.route('/weibo/login/')\ndef weibo_login():\n api = sinaweibo.Client(settings.API_KEY,settings.API_SECRET,settings.REDIRECT_URI)\n code = request.args.get('code')\n try:\n api.set_code(code)\n except Exception, e:\n return redirect('/blog/')\n\n sinainfo = api.token\n user = user_cache.get_user(sinainfo.get('uid'), format='object')\n if user:\n flask_login.login_user(user, remember=True)\n else:\n user = User()\n user.open_id = sinainfo.get('uid')\n user.token = sinainfo.get('access_token')\n userinfo = api.get('users/show', uid=sinainfo.get('uid'))\n user.name = userinfo.get('name')\n user.address = userinfo.get('location')\n user.create_time = datetime.datetime.now()\n session = create_session()\n session.add(user)\n session.commit()\n flask_login.login_user(user, remember=True)\n session.close()\n return redirect('/blog/')\n\n\n@bp_base.route('/logout/')\ndef logout():\n flask_login.logout_user()\n return redirect('/blog/')" }, { "alpha_fraction": 0.5121951103210449, "alphanum_fraction": 0.707317054271698, "avg_line_length": 15.5, "blob_id": "31b8f5c4b050650557d408aecee81ad9d79d854d", "content_id": "4d48810fe2f1fb5c876f7065193b3c1faeedb731", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 164, "license_type": "no_license", "max_line_length": 19, "num_lines": 10, "path": "/requirements.txt", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "flask==0.10.1\ngunicorn==19.1.1\nMarkupSafe==0.23\nredis==2.10\nsimplejson==3.6.3\nMySQL-python==1.2.3\nSQLAlchemy==0.9.8\nFlask-Login==0.2.11\nrequests==2.2.1\nPyments==1.6" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.65625, "avg_line_length": 22.959999084472656, "blob_id": "ec852cba228a8973c428a619ee11e7f0704b7a35", "content_id": "37cde90bb687ca9cb4525875ca167f87609767d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1248, "license_type": "no_license", "max_line_length": 59, "num_lines": 50, "path": "/flask_app.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n# coding=utf8\r\n\r\n\r\nfrom flask import Flask, render_template, g\r\nimport flask_login\r\n\r\nfrom configs import settings\r\nfrom utils.filters import JINJA2_FILTERS\r\nfrom utils import user_cache\r\nfrom views import blog, base, security\r\n\r\n\r\ndef create_app(debug=settings.DEBUG):\r\n app = Flask(__name__)\r\n app.register_blueprint(blog.bp_blog)\r\n app.register_blueprint(base.bp_base)\r\n app.register_blueprint(security.bp_security)\r\n app.jinja_env.filters.update(JINJA2_FILTERS)\r\n app.debug = debug\r\n app.secret_key = \"gausszh\"\r\n\r\n @app.route('/')\r\n def index():\r\n return render_template('index.html')\r\n\r\n @app.before_request\r\n def check_user():\r\n g.user = flask_login.current_user\r\n\r\n login_manager = flask_login.LoginManager()\r\n login_manager.setup_app(app)\r\n\r\n @login_manager.user_loader\r\n def load_user(userid):\r\n user = user_cache.get_user(userid, format='object')\r\n return user\r\n\r\n login_manager.unauthorized = blog.list\r\n # login_manager.anonymous_user = AnonymousUserMixin\r\n\r\n return app\r\n\r\napp = create_app(settings.DEBUG)\r\n\r\n\r\nif __name__ == '__main__':\r\n host = settings.APP_HOST\r\n port = settings.APP_PORT\r\n app.run(host=host, port=port)\r\n" }, { "alpha_fraction": 0.7817796468734741, "alphanum_fraction": 0.7881355881690979, "avg_line_length": 26.823530197143555, "blob_id": "30fbb1ccf6cdfcaa2be45279609e511b29ac2a68", "content_id": "4f055fe77e396e0cbbe9348e1f0e95af41f2747e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "no_license", "max_line_length": 82, "num_lines": 17, "path": "/models/__init__.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#coding=utf-8\n\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom configs import settings\nsae_engine = create_engine(settings.DB_SAE_URI+'?charset=utf8', encoding='utf-8', \n\tconvert_unicode=True, pool_recycle=settings.DB_POOL_RECYCLE_TIMEOUT, \n\techo=settings.DB_ECHO)\n\ncreate_session = sessionmaker(autocommit=False, autoflush=False, \n bind=sae_engine)\n\n\nBase = declarative_base()" }, { "alpha_fraction": 0.6500508785247803, "alphanum_fraction": 0.6551373600959778, "avg_line_length": 24.86842155456543, "blob_id": "cdc3d96be39435e164cf07aff9aad5374cddf683", "content_id": "2507699bab67c53e67549262f7e0f15dffad967c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 987, "license_type": "no_license", "max_line_length": 66, "num_lines": 38, "path": "/utils/__init__.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#coding=utf8\n\nimport datetime\nimport redis\n\nimport flask_login\n\nfrom models.base import User, create_session\nfrom utils import user_cache\nfrom configs import settings\n\n\ndef AnonymousUserMixin():\n '''\n This is the default object for representing an anonymous user.\n '''\n session = create_session()\n user = User()\n count = user_cache.get_anonymous_count()\n anonymouser_id = 1000 + count\n user.open_id = 'anonymous%s' % anonymouser_id\n user.name = u'游客%s' % anonymouser_id\n user.token = ''\n user.create_time = datetime.datetime.now()\n session.add(user)\n session.commit()\n user_cache.incr_anonymous_count()\n flask_login.login_user(user, remember=True)\n session.close()\n return user\n\nredis_pool = redis.ConnectionPool(host=settings.REDIS_IP, \n port=settings.REDIS_PORT, \n db=settings.REDIS_DB)\n\n\ndef redis_connection():\n return redis.Redis(connection_pool=redis_pool)\n" }, { "alpha_fraction": 0.6828358173370361, "alphanum_fraction": 0.6865671873092651, "avg_line_length": 21.33333396911621, "blob_id": "92ecfac5b04713dfe59804c1a3e296912626ce47", "content_id": "803a9cc70ec6ef2c06bb2e862d21027197778792", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/utils/blog_cache.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "# coding=utf8\nfrom configs import settings\nfrom utils import redis_connection\n\n\nAPP = \"blog\"\n\n\ndef set_draft_blog(uid, markdown):\n _cache = redis_connection()\n key = str(\"%s:draft:blog:%s\" % (APP, uid))\n _cache.set(key, markdown, settings.DRAFT_BLOG_TIMEOUT)\n" }, { "alpha_fraction": 0.5991501212120056, "alphanum_fraction": 0.6515581011772156, "avg_line_length": 20.393939971923828, "blob_id": "023b4ffc780d33d669b6ec30a2b0c7aba00f4b6a", "content_id": "cca1e093eb1e401ede6e2bc33cad0879471e1423", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 710, "license_type": "no_license", "max_line_length": 75, "num_lines": 33, "path": "/configs/settings_dev.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#coding=utf8\nimport os\n\n# system setting\nDEBUG = True\nAPP_HOST = '127.0.0.1'\nAPP_PORT = 7020\nSTORAGE_BUCKET_DOMAIN_NAME = 'blogimg'\n\n# database\n\nif os.environ.get('SERVER_SOFTWARE'):#线上\n\timport sae\n\tDB_SAE_URI = 'mysql://%s:%s@%s:%s/database_name' % (sae.const.MYSQL_USER, \n \tsae.const.MYSQL_PASS, sae.const.MYSQL_HOST, sae.const.MYSQL_PORT)\n\tDB_POOL_RECYCLE_TIMEOUT = 10\n\tDB_ECHO = True\nelse:\n\tDB_SAE_URI = 'mysql://user:[email protected]:3306/database_name'\n\t# DB_SAE_URI = 'sqlite:////database.db'\n\tDB_POOL_RECYCLE_TIMEOUT = 10\n\tDB_ECHO = True\n\n# cache\nREDIS_HOST = \"127.0.0.1\"\nREDIS_PORT = 6379\nREDIS_DB = 1\nCACHE_TIMEOUT = 3\n\n# app\nAPI_KEY = '***'\nAPI_SECRET = '****'\nREDIRECT_URI = 'http://****'\n" }, { "alpha_fraction": 0.5805805921554565, "alphanum_fraction": 0.5832499265670776, "avg_line_length": 25.758928298950195, "blob_id": "c58a4e3a1c1cf1265122bd247bf6be3a1ab79925", "content_id": "3720322e6121dc28d4ffe3047afc4dda99376096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3047, "license_type": "no_license", "max_line_length": 79, "num_lines": 112, "path": "/utils/user_cache.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "# coding=utf8\n\ntry:\n import simplejson as json\nexcept Exception:\n import json\nimport datetime\nfrom sqlalchemy.sql import or_\n\nfrom models.base import create_session, User\nfrom models.blog import BlogArticle\nfrom configs import settings\nfrom utils import redis_connection\n# import sae.kvdb\n\nAPP = \"base\"\n\n\ndef get_user(uid, format=\"json\"):\n _cache = redis_connection()\n key = str(\"%s:user:%s\" % (APP, uid))\n userinfo = _cache.get(key)\n new = False\n if not userinfo:\n session = create_session()\n userinfo = session.query(User).filter(or_(User.id == uid,\n User.open_id == uid)).first()\n userinfo = orm2json(userinfo)\n _cache.set(key, json.dumps(userinfo), settings.CACHE_TIMEOUT)\n new = True\n session.close()\n if not new:\n userinfo = json.loads(userinfo)\n\n if format == 'object' and userinfo:\n user = User()\n for k in userinfo:\n setattr(user, k, userinfo.get(k))\n userinfo = user\n return userinfo or None\n\n\ndef delete_user(uid):\n _cache = redis_connection()\n key = str(\"%s:user:%s\" % (APP, uid))\n _cache.delete(key)\n\n\ndef get_anonymous_count():\n _cache = redis_connection()\n key = \"%s:anonymous:count\" % APP\n count = _cache.get(key)\n if not count:\n session = create_session()\n count = session.query(User).filter(\n User.open_id.startswith(\"anonymous\")).count()\n _cache.set(key, count, settings.CACHE_TIMEOUT)\n session.close()\n return int(count)\n\n\ndef incr_anonymous_count():\n _cache = redis_connection()\n key = \"%s:anonymous:count\" % APP\n count = get_anonymous_count()\n _cache.set(key, count + 1, settings.CACHE_TIMEOUT)\n\n\ndef get_blog(blog_id):\n \"\"\"\n 获取博客的数据\n \"\"\"\n _cache = redis_connection()\n key = str(\"%s:blog:%s\" % (APP, blog_id))\n bloginfo = _cache.get(key)\n new = False\n if not bloginfo:\n session = create_session()\n bloginfo = session.query(BlogArticle).filter_by(id=blog_id).first()\n bloginfo = orm2json(bloginfo)\n _cache.set(key, json.dumps(bloginfo), settings.CACHE_TIMEOUT)\n new = True\n session.close()\n if not new:\n bloginfo = json.loads(bloginfo)\n return bloginfo\n\n\ndef delete_blog(blog_id):\n _cache = redis_connection()\n key = str(\"%s:blog:%s\" % (APP, blog_id))\n _cache.delete(key)\n\n\ndef orm2json(orm):\n \"\"\"\n 将sqlalchemy返回的对象转换为可序列话json类型的对象\n \"\"\"\n def single2py(instance):\n d = {}\n if instance:\n keys = instance.__dict__.keys()\n for key in keys:\n if key.startswith('_'):\n continue\n value = getattr(instance, key)\n d[key] = isinstance(value, datetime.datetime) and \\\n value.strftime('%Y-%m-%d %H:%M:%S') or value\n return d\n if isinstance(orm, list):\n return [single2py(ins) for ins in orm]\n return single2py(orm)\n" }, { "alpha_fraction": 0.7423469424247742, "alphanum_fraction": 0.7448979616165161, "avg_line_length": 20.83333396911621, "blob_id": "865022b820614b97b4a822bd37df6cb7e2b08d16", "content_id": "3ac49467c520129298c2563358fcb50b8cf80d5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 69, "num_lines": 18, "path": "/views/security.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "# coding=utf8\n\"\"\"\n学web安全用到的一些页面\n\"\"\"\nfrom flask import Blueprint, render_template\nfrom sae.storage import Bucket\n\nfrom configs import settings\n\n\nbp_security = Blueprint('security', __name__, url_prefix='/security')\nbucket = Bucket(settings.STORAGE_BUCKET_DOMAIN_NAME)\nbucket.put()\n\n\n@bp_security.route('/wanbo/video/')\ndef wanbo_video():\n return render_template('security/wanbo_video.html')" }, { "alpha_fraction": 0.57880699634552, "alphanum_fraction": 0.5805043578147888, "avg_line_length": 29.937984466552734, "blob_id": "d789ddece88e920f0428869b4c015190f85af30d", "content_id": "566aa7ec743bb76314a5e8f9bba8f021f52c7871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4190, "license_type": "no_license", "max_line_length": 80, "num_lines": 129, "path": "/views/blog.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "# coding=utf8\r\n\r\nimport datetime\r\nimport urllib\r\nfrom flask import Blueprint, request, jsonify, render_template, g\r\nimport flask_login\r\nfrom sae.storage import Bucket\r\n\r\nfrom models.blog import create_session, BlogArticle\r\nfrom utils.blog_cache import set_draft_blog\r\nfrom configs import settings\r\n\r\n\r\nbp_blog = Blueprint('blog', __name__, url_prefix='/blog')\r\nbucket = Bucket(settings.STORAGE_BUCKET_DOMAIN_NAME)\r\nbucket.put()\r\n\r\n\r\n@bp_blog.route('/')\r\n@bp_blog.route('/list/')\r\ndef list():\r\n session = create_session()\r\n blogs = session.query(BlogArticle).order_by(BlogArticle.update_time.desc())\\\r\n .all()\r\n session.close()\r\n return render_template('blog/blog_list.html', blogs=blogs)\r\n\r\n\r\n@bp_blog.route('/delete/<int:blog_id>/', methods=['POST'])\r\n@flask_login.login_required\r\ndef delete(blog_id):\r\n session = create_session()\r\n blog = session.query(BlogArticle).filter_by(id=blog_id).first()\r\n if blog.create_by == g.user.id:\r\n blog.is_active = 0\r\n session.commit()\r\n session.close()\r\n return jsonify(ok=True, data={'blog_id': blog_id})\r\n session.close()\r\n return jsonify(ok=False, reason=u'数据错误')\r\n\r\n\r\n@bp_blog.route('/draft/', methods=['POST'])\r\n@flask_login.login_required\r\ndef draft():\r\n \"\"\"\r\n 保存未上传的文章为草稿\r\n \"\"\"\r\n form = request.form\r\n markdown = form.get('markdown', '')\r\n set_draft_blog(flask_login.current_user.id, markdown)\r\n return jsonify(ok=True)\r\n\r\n\r\n@bp_blog.route('/edit/<int:blog_id>/', methods=['GET', 'POST'])\r\n@bp_blog.route('/edit/', methods=['GET', 'POST'])\r\n@flask_login.login_required\r\ndef edit(blog_id=0):\r\n if request.method == 'GET':\r\n if blog_id == 0:\r\n blog = None\r\n else:\r\n session = create_session()\r\n blog = session.query(BlogArticle).filter_by(id=blog_id).first()\r\n session.close()\r\n return render_template('blog/blog_edit.html', blog=blog)\r\n\r\n if request.method == 'POST':\r\n form = request.form\r\n markdown = form.get('markdown')\r\n title = form.get('title')\r\n blog_id = form.get('blog_id')\r\n if markdown and title and (len(markdown.strip()) * \r\n len(title.strip()) > 0):\r\n\r\n session = create_session()\r\n now = datetime.datetime.now()\r\n # blog_id belong to this user\r\n if blog_id:\r\n blog = session.query(BlogArticle).filter_by(id=blog_id).first()\r\n if not blog_id or not blog:\r\n blog = BlogArticle()\r\n blog.create_by = flask_login.current_user.id\r\n blog.create_time = now\r\n blog.is_active = 1\r\n blog.update_time = now\r\n blog.title = title\r\n blog.markdown = markdown\r\n session.add(blog)\r\n session.commit()\r\n blog_id = blog.id\r\n session.close()\r\n return jsonify(ok=True, data={'blog_id': blog_id})\r\n return jsonify(ok=False, reason=u'数据错误')\r\n\r\n\r\n@bp_blog.route('/view/<int:blog_id>/')\r\ndef view_blog(blog_id):\r\n session = create_session()\r\n query = session.query(BlogArticle).filter_by(id=blog_id)\r\n if not flask_login.current_user.is_active():\r\n query = query.filter_by(is_active=1)\r\n blog = query.first()\r\n session.close()\r\n return render_template('blog/blog_view.html', blog=blog)\r\n\r\n\r\n@bp_blog.route('/files/', methods=['POST'])\r\n@flask_login.login_required\r\ndef save_file():\r\n \"\"\"\r\n 存储上传的图片\r\n \"\"\"\r\n files_name = request.files.keys()\r\n ret = []\r\n for fn in files_name:\r\n # 暂未做安全校验 PIL\r\n img_file = request.files.get(fn)\r\n bucket.put_object(fn, img_file)\r\n link = bucket.generate_url(fn)\r\n ret.append({'name': fn, 'link': link})\r\n http_files_link = request.form.keys()\r\n for fn in http_files_link:\r\n http_link = request.form.get(fn)\r\n img_file = urllib.urlopen(http_link)\r\n bucket.put_object(fn, img_file)\r\n link = bucket.generate_url(fn)\r\n ret.append({'name': fn, 'link': link})\r\n return jsonify(ok=True, data=ret)\r\n\r\n\r\n" }, { "alpha_fraction": 0.6902390718460083, "alphanum_fraction": 0.695219099521637, "avg_line_length": 26.135135650634766, "blob_id": "f52869923beb670ca50eb15a4219d5d331268f16", "content_id": "ffc9c682f55b9acdef188c243f076a2f8af0fcc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1016, "license_type": "no_license", "max_line_length": 81, "num_lines": 37, "path": "/models/blog.py", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#coding=utf8\n\nimport datetime\nfrom sqlalchemy import (\n MetaData, Table, Column, Integer, BigInteger, Float, String, Text, DateTime,\n ForeignKey, Date, UniqueConstraint)\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base\n\nfrom models import sae_engine\nfrom models import create_session\n\nBase = declarative_base()\nmetadata = MetaData()\n\n\nclass BlogArticle(Base):\n\n \"\"\"\n 发布历史日志\n \"\"\"\n\n __tablename__ = 'blog_article'\n __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}\n\n id = Column(Integer, primary_key=True)\n title = Column(String(50))\n markdown = Column(Text)\n html = Column(Text)\n create_by = Column(Integer, index=True, nullable=False)\n create_time = Column(DateTime, nullable=False)\n update_time = Column(DateTime, index=True, nullable=False,)\n is_active = Column(Integer, nullable=False, default=1)\n\nif __name__ == '__main__':\n Base.metadata.create_all(bind=sae_engine)\n" }, { "alpha_fraction": 0.5831385254859924, "alphanum_fraction": 0.591759204864502, "avg_line_length": 27.168724060058594, "blob_id": "cf0edb8a7b66519ba1e782a5f7a5b3c297aa1ff6", "content_id": "e9e176ab67b9c45bec5ebf381a313c5858159e7c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 7268, "license_type": "permissive", "max_line_length": 97, "num_lines": 243, "path": "/static/js/blog/edit.js", "repo_name": "gausszh/sae_site", "src_encoding": "UTF-8", "text": "(function (win){\n\tvar blog = blog || {};\n\tvar IMG_STORAGE = \"img_storage\";\n\n\tblog.init = function(){\n\t\tlocalStorage.setItem(IMG_STORAGE, '');\n\t\tif ( $(\"#blog_id\").val().length === 0 ) {\n\t\t\t$(\"#blog_id\").val(localStorage['blog_id']);\n\t\t}\n\t\tif ( $(\"#title\").val().length === 0 ) {\n\t\t\t$(\"#title\").val(localStorage['title']);\n\t\t}\n\t\t\n\n\t};\n\n\tblog.img_storage = function(){\n\t\tvar img_storage_str = localStorage.getItem(IMG_STORAGE);\n\t\tif (img_storage_str == undefined || img_storage_str.length < 1){\n\t\t\treturn {}\n\t\t}\n\t\treturn JSON.parse(img_storage_str);\n\t};\n\n\tblog.set_img = function(img_sha1, link){\n\t\tvar img = blog.img_storage();\n\t\timg[img_sha1] = link;\n\t\tlocalStorage.setItem(IMG_STORAGE, JSON.stringify(img));\n\t}\n\n\t/**\n\t * 发布文章,发布前要先发送图片,并将文章缓存到localStorage\n\t * @param {event} e \n\t */\n\tblog.submit = function(e){\n\t\t$('#upload').attr('disabled',true);\n\t\t$('#upload').val('发送中。。。');\n\t\tblog.search_img(true);\n\t\teditor.save(true, true);\n\t\tvar filename = editor.settings.file.name;\n\t\tvar markdown = editor.exportFile(filename,'text',true)\n\t\tvar title = $(\"#title\").val().trim();\n\t\tif (title.length * markdown.length > 0){\n\t\t\t$.post('/blog/edit/', {\n\t\t\t\t\t'blog_id': $(\"#blog_id\").val(),\n\t\t\t\t\t'title': title,\n\t\t\t\t\t'markdown': markdown},\n\t\t\t\t\tfunction(ret, status){\n\t\t\t\t\t\t$(\"#blog_id\").val(ret.data.blog_id);\n\t\t\t\t\t\t$('#upload').attr('disabled',false);\n\t\t\t\t\t\t$('#upload').val('submit');\n\t\t\t\t\t\t// localStorage.setItem('title', title);\n\t\t\t\t\t\t// localStorage.setItem('blog_id', ret.data.blog_id);\n\t\t\t\t\t\t// var storage = JSON.parse(editor._storage[editor._previewDraftLocation + \n\t\t\t\t\t\t// \teditor.settings.localStorageName]);\n\t\t\t\t\t\t\n\t\t\t\t\t\t// storage[filename] = editor._defaultFileSchema();\n\n\t\t\t\t\t\t// editor._storage[editor._previewDraftLocation + editor.settings.localStorageName] = \n\t\t\t\t\t\t// \teditor._storage[editor.settings.localStorageName] = \n\t\t\t\t\t\t// \t\tJSON.stringify({filename: editor._defaultFileSchema()});\n\t\t\t\t\t\t// editor.open();\n\t\t\t\t\t\t// $(\"#title\").val('');\n\t\t\t\t\t\t// $(\"#blog_id\").val('');\n\t\t\t\t\t\t$(\"#message\").show();\n\t\t\t\t\t\tsetTimeout(function(){ $(\"#message\").hide();}, 2000);\n\t\t\t\t\t},\n\t\t\t\t\t'json');\n\t\t}else{\n\t\t\t$('#upload').attr('disabled',false);\n\t\t\t$('#upload').val('submit');\n\t\t\talert('数据不完整');\n\t\t\treturn false;\n\t\t}\n\n\t};\n\t/**\n\t * 遍历编辑框中的图片,将其发送到服务器上。\n\t * @param {is_submit} 是否要提交文章了。意味着,如果是则需要将所有未完成上传的都上传一次,且ajax用同步的方式\n\t * @return {type} [description]\n\t */\n\tblog.search_img = function( is_submit ){\n\t\tvar imgs = $('img', $(editor.editor));\n\t\tfor (var img_index = 0; img_index < imgs.length; img_index++) {\n\t\t\tvar img = $(imgs[img_index]);\n\t\t\tvar src = img.attr('src');\n\n\t\t\t//非我的域名的图片都需要转换\n\t\t\tif (src.indexOf('http://gausszh') === 0 || src.indexOf('http://localhost') === 0){\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tvar img_sha1 = CryptoJS.SHA1(src).toString();//SHA256(base64_img);\n\t\t\timg.attr('class', img_sha1);\n\t\t\tvar img_storage = blog.img_storage();\n\t\t\t//正在上传或者已将上传过的则不重复了\n\t\t\tif ( img_storage[img_sha1] !== undefined && !is_submit) {\n\t\t\t\tif ( img_storage[img_sha1].length > 0) {\n\t\t\t\t\timg.attr('src', img_storage[img_sha1]);\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\t\n\t\t\tblog.set_img(img_sha1, '');\n\t\t\tvar\tform = new FormData();\n\t\t\tif (src.indexOf('http') === 0){\n\t\t\t\tform.append(img_sha1, src)\n\t\t\t} else {\n\t\t \tvar img_type = src.slice(src.indexOf('data:') + 5,src.indexOf(';'))\n\t\t\t\tvar base64_img = src.slice(src.indexOf('base64,') + 7);\n\t\t\t\tform.append(img_sha1, blog.str_to_blob(base64_img, img_type));\n\t\t\t}\n\n\t\t\t// 提示用户,目前在上传哦\n\t\t\timg.hide();\n\t\t\tvar progress_tag = document.createElement('p');\n\t\t\tprogress_tag.className = img_sha1;\n\t\t\t$(progress_tag).insertAfter(img);\n\t\t\tvar progress_f = function ( event ) {\n\t\t\t\tif (event.lengthComputable) {\n\t\t\t\t\tvar percentComplete = event.loaded / event.total * 100;\n\t\t\t\t\tvar klass = arguments.callee.klass;\n\t\t\t\t\tvar progress_tag = $('p.' + klass, $(editor.editor));\n\t\t\t\t\tprogress_tag.innerHTML = '正在上传....'+ percentComplete + '%'\n\n\t\t\t\t} \n\t\t\t};\n\t\t\tprogress_f.klass = img_sha1;\n\n\t\t\t$.ajax({\n\t\t\t\turl:'/blog/files/', \n\t\t\t\ttype:'POST',\n\t\t\t\tdata:form,processData:false,\n\t\t\t\tcontentType: false,\n\t\t\t\tcontext: progress_tag,\n\t\t\t\txhrFields: {\n\t\t\t\t\tonprogress: progress_f\n\t\t\t\t},\n\t\t\t\tasync: !is_submit,\n\t\t\t\tsuccess: function(ret,status){\n\t\t\t\t\tif (ret.ok){\n\t\t\t\t\t\tprogress_tag.innerHTML = '';\n\t\t\t\t\t\tfor (var i = 0; i < ret.data.length; i++) {\n\t\t\t\t\t\t\tblog.set_img(ret.data[i].name, ret.data[i].link);\n\t\t\t\t\t\t\tvar img = $('img.' + ret.data[i].name, $(editor.editor));\n\t\t\t\t\t\t\timg.attr('src', ret.data[i].link);\n\t\t\t\t\t\t\timg.show();\n\t\t\t\t\t\t};\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tprogress_tag.innerHTML = '上传失败';\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t});\n\t\t};\n\t};\n\t/**\n\t * 复制粘贴图片,chrome\n\t * @param {event} e \n\t */\n\tblog.send_img = function(e){\n\t\tvar clip = e.originalEvent.clipboardData || e.clipboardData;\n\t\tif ( !/image/.test(clip.types[0])) {\n\t\t\treturn true;\n\t\t}\n\t\tvar items = clip.items;\n\t\tif ( items ) {\n\t\t\tvar img_blob = items[0].getAsFile();\n\t\t\tvar rd = new FileReader();\n\t\t\trd.onload = function ( e ) {\n\t\t\t\tvar base64_img = rd.result;\n\t\t\t\teditor.editorIframeDocument.execCommand(\"insertHTML\",false,'<img src=\"' + base64_img + '\">');\n\t\t\t}\n\t\t\trd.readAsDataURL(img_blob);\n\n\t\t\te.preventDefault();\n\t\t}\n\t\t// var\tform = new FormData();\n\n\t\t// form.append(img_sha1, img_blob)\n\n\t}\n\n \t$(editor.editor).bind('paste', blog.send_img);\n\t/**\n\t * 将字符串装换为Blob类型\n\t * @param {string} str [需要被装换的字符串]\n\t * @param {string} type [生成的Blob数据的 类型,比如 image/png]\n\t * @return {Blob} [装换后的Blob 类型数据]\n\t (*/\n\tblog.str_to_blob = function (str, type) {\n\t\tvar\tbin_str = atob(str);\n\t\tvar\tarray = new Uint8Array(new ArrayBuffer(bin_str.length));\n\t\tfor(var i = 0; i < bin_str.length; i++) {\n\t\t\tarray[i] = bin_str.charCodeAt(i);\n\t\t}\n\t\tvar\tdv = new DataView(array.buffer);\n\t\tvar\tblob_file = new Blob([dv], {'type': type});\n\n\t\treturn blob_file;\n\n\t}\n\n\tblog.upload_temp_blog = function () {\n\t\tvar imgs = blog.img_storage();\n\t\tvar keys = Object.keys(imgs);\n\t\tfor (var i = 0; i < keys.length; i++) {\n\t\t\tif ( imgs[keys[i]] == '' || imgs[keys[i]] == undefined ) {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\tvar filename = editor.settings.file.name;\n\t\tvar markdown = editor.exportFile(filename,'text',true)\n\t\tvar md_sha1 = CryptoJS.SHA1(markdown).toString();\n\t\tif (localStorage.markdown === undefined) {\n\t\t\tlocalStorage.markdown = md_sha1;\n\t\t}\n\t\tif ( localStorage.markdown === md_sha1) {\n\t\t\treturn false;\n\t\t}\n\t\tlocalStorage.markdown = md_sha1;\n\t\tvar title = $(\"#title\").val().trim();\n\t\tif (title.length * markdown.length > 0){\n\t\t\t$.post('/blog/edit/', {\n\t\t\t\t\t'blog_id': $(\"#blog_id\").val(),\n\t\t\t\t\t'title': title,\n\t\t\t\t\t'markdown': markdown},\n\t\t\t\t\tfunction(ret, status){\n\t\t\t\t\t\t$(\"#blog_id\").val(ret.data.blog_id);\n\t\t\t\t\t\t$('#upload').attr('disabled',false);\n\t\t\t\t\t\t$('#upload').val('submit');\n\t\t\t\t\t},\n\t\t\t\t\t'json');\n\t\t}\n\t}\n\t//定期扫描编辑区的图片\n\twindow.setInterval(blog.search_img, editor.settings.file.autoSave);\n\t//定期将编辑的内容保存到服务器草稿中\n\twindow.setInterval(blog.upload_temp_blog, editor.settings.file.autoSave);\n\twindow.blog = blog;\n\n})(window)" } ]
15
cgddrd/maartech-test
https://github.com/cgddrd/maartech-test
8dbc6a511f95dadb97df62035cb878e0d8429926
a362baf383d375339c7453a2f5d3855f9a3503ff
de04ec2506b86aec5c857bb155c99a86f37b5414
refs/heads/main
2023-04-30T15:10:18.675672
2021-05-21T17:38:34
2021-05-21T17:38:34
369,610,178
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7258687019348145, "alphanum_fraction": 0.7258687019348145, "avg_line_length": 22.484848022460938, "blob_id": "6bac074ed8894ccf5090a6834262301f8f2b3f42", "content_id": "d65e096542e9df9c35ddfb03b656578c130e3364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 777, "license_type": "no_license", "max_line_length": 151, "num_lines": 33, "path": "/README.md", "repo_name": "cgddrd/maartech-test", "src_encoding": "UTF-8", "text": "# Maartech Technical Test Submission\n\n**Author: Connor Goddard**\n\n## Description\nThis Python application allows users to import the contents of CSV data files located in a target folder to dedicated tables in a PostgreSQL database. \n\n## Usage\n\nTo get started, type the following in the terminal/Command Prompt:\n\n```\npip install -r requirements.txt\n\npython ./run.py --help\n```\n\n## Configuration\n\nYou can specify database connection settings and the target folder path via a YAML configuration file (default: `./config.yml). \n\nThe structure of this configuration file should be as follows:\n\n```\ndb:\n host: <HOST_NAME>\n port: <PORT_NUMBER>\n database: <DATABASE_NAME>\n user: <USER_NAME>\n password: <PASSWORD>\n\ntarget_path: <FILE_PATH_TO_FOLDER_CONTAINING_CSV_FILES>\n```\n\n\n" }, { "alpha_fraction": 0.4848484992980957, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 15.5, "blob_id": "f4a838a7291ca95e54c31c3e96799de07fad3496", "content_id": "4c5c427da47abce13cfcdb95f0cb581dc760226a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 66, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/requirements.txt", "repo_name": "cgddrd/maartech-test", "src_encoding": "UTF-8", "text": "click==8.0.1\ngreenlet==1.1.0\npsycopg2-binary==2.8.6\nPyYAML==5.4.1\n" }, { "alpha_fraction": 0.6454260349273682, "alphanum_fraction": 0.6485599279403687, "avg_line_length": 45.54166793823242, "blob_id": "af46dadbd455bb8ce45b17298e97b41a6709e191", "content_id": "e418165b1e010c1c26d3c8c1c6ede451f05ab84f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6701, "license_type": "no_license", "max_line_length": 223, "num_lines": 144, "path": "/run.py", "repo_name": "cgddrd/maartech-test", "src_encoding": "UTF-8", "text": "# MAARTech technical test submission.\n# Author: Connor Goddard\n# First Published: 2021-05-20 \n\n# Submission notes:\n# - For this task, I've made a two key assumptions: 1) we only need to support CSV file types; and 2) that it's a requirement to have the ORIGINAL/RAW data AS CONTAINED IN THE DATA FILES imported into the database tables.\n# \n# - I've made the decision NOT to transform the data and build new feature columns (e.g. combine the 'lat' and 'long' columns into a single GIS 'POINT' column) because in my experience, \n# you would typically want to make sure the RAW data is imported 'as-is', and then apply such transformations across the 'raw' tables \n# to curate new 'analytics' tables once the data is available in the database. This same reasoning led me to choose to NOT convert \n# the hexadecimal representation of OSM tag values into plaintext. Again, this could be done as part of a downstream process, with the original data preserved.\n# \n# - I recognise that the data contained in the input files appears to be OpenStreetMap data, so it is possible that instead of connecting to and querying the database directly from Python, \n# we could potentially make use of the official 'osm2pgsql' tool (https://osm2pgsql.org/) which could automate much of the table schema creation and unpacking. (This could even be called dynamically via a Python script.)\n# \n# - In terms of database credentials, in a production envrionment, we'd want to load the credentials in from a secure location at runtime (i.e. ideally from a secrets manager, \n# but at the very least from a 'secure' configuration file - excluded from version control). \n# \n# - I could have used SQLAlchemy to provide the connection to the database (SQLAlchemy is a popular and well-established library for working with RDBMS databases in Python), however,\n# because I wanted to take particular advantage of the 'COPY FROM' syntax supported by PostgresSQL, using SQL Alchemy would have been in some ways redundant, because I would have needed to \n# access the underlying engine (psycopg2) in order to use the 'copy_expert()' function (i.e. it was more efficient just to import and use the psycopg2 library directly in this case).\n# \n# - I felt that building Python classes/objects in this situation was a little bit overkill, so kept everything contained inside a single script file with core functionality split out to dedicated functions. \n# Obviously if the scope of the application was to grow (e.g. to parse and import different data file types), then abstracting certain logic (e.g. to load/parse these different file types) to dedicated \n# class files would be a reasonable option.\n# \n# - In terms of evolving this application, I would like to add the ability to define the table schema directly from CSV header structure.\n\n\nimport click\nfrom psycopg2 import connect, sql\nfrom pathlib import Path\nimport yaml\nimport logging\n\n# I'm a fan of using decorators for readabililty.\[email protected]()\[email protected]('--config', default='./config.yml', help='The path to the config file.')\ndef run(config):\n \"\"\"Imports data from CSV files into a series of PostgresSQL tables (one table per file).\"\"\"\n\n logging.info('Application started.')\n\n db_conn = None\n\n # Personally, I prefer YAML format in defining configuration files vs. the standard 'INI' format provided by Python. I find it cleaner.\n config = read_yaml_config(config)\n\n files = list_files(config['target_path'])\n\n try:\n # Use the '**' syntax to flatten the dictionary into key-value pairs that can be passed into as parameters into psycopg2.connect().\n db_conn = connect(**config['db'])\n\n for file in files:\n import_file_to_database(file, db_conn)\n\n logging.info('Import complete.')\n\n except Exception:\n logging.error('An error occurred whilst importing data files into the database', exc_info=1)\n\n finally:\n if db_conn is not None:\n db_conn.close()\n\ndef read_yaml_config(config_path):\n\n try:\n with open(config_path) as file:\n # We use safe_load() here to help prevent execution of any arbitary code embedded in the YAML file. \n yaml_file = yaml.safe_load(file)\n return yaml_file\n except Exception:\n logging.error('Failed to load YAML config file.', exc_info=1)\n\ndef list_files(search_folder:str):\n\n pattern = \"*.csv\"\n\n directory = Path(search_folder)\n\n # Return a list of all files that match the pattern in the search folder.\n return [csvFile for csvFile in directory.glob(pattern)]\n\ndef import_file_to_database(file_path:str, conn):\n \n file_name = Path(file_path).stem\n\n try:\n\n logging.info('Importing file {} into database table {}.'.format(file_path, file_name))\n\n with conn.cursor() as cur:\n \n # First, attempt to create the table if it doesn't already exist.\n query = sql.SQL(\"\"\"\n \n CREATE TABLE IF NOT EXISTS {table_name} (\n \n osm_id INTEGER PRIMARY KEY,\n area NUMERIC NOT NULL, \n lon NUMERIC NOT NULL, \n lat NUMERIC NOT NULL, \n tags JSONB, \n osm_type VARCHAR(25) NOT NULL, \n p_tag_value TEXT, \n city TEXT, \n postcode TEXT, \n address TEXT, \n street TEXT, \n has_way BOOLEAN NOT NULL,\n shop_type TEXT,\n derived_shared_area NUMERIC,\n derived_way_area NUMERIC,\n parent_way INTEGER, \n shared_divisor INTEGER, \n area_sq_foot NUMERIC NOT NULL\n )\n \n \"\"\").format(table_name = file_name)\n\n cur.execute(query)\n cur.commit()\n\n with open(file_path, 'r') as f: \n\n # Second, use the PgSQL 'COPY' feature to efficiently copy the contects of the CSV file into the table. (This can scale to millions of rows.) - https://www.postgresql.org/docs/current/sql-copy.html\n query = sql.SQL(\"\"\"\n COPY {table_name} FROM stdin WITH CSV HEADER\n DELIMITER as ','\n \"\"\").format(table_name = file_name)\n\n cur.copy_expert(sql=query, file=f)\n\n except Exception: \n logging.error('Failed to import file {} into database table {}'.format(file_path, file_name), exc_info=1)\n\n finally:\n if cur is not None:\n cur.close()\n\nif __name__ == '__main__':\n run()" } ]
3
crysthianzaar/DjangoChallenge
https://github.com/crysthianzaar/DjangoChallenge
632cacae84e5b617e371e500c279c0baece9dc80
bc41d34dcd667dd01c482caa88f4bcbe1b0342a4
8086a03678f65dacd6c32cc85466f33dce7c2ada
refs/heads/main
2023-06-05T07:56:18.219291
2022-09-21T00:21:14
2022-09-21T00:21:14
376,643,709
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6887871623039246, "alphanum_fraction": 0.6887871623039246, "avg_line_length": 42.79999923706055, "blob_id": "6fea4a1f640fca3955ba77ac79e0fac3b8448c8f", "content_id": "deb3d3aaf88f7f0caa8136a5b6888a7f8cae31e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 149, "num_lines": 10, "path": "/APIcore/urls.py", "repo_name": "crysthianzaar/DjangoChallenge", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('api/', include('rest_auth.urls')),\n path('api/', include('articles.urls')), #This is not good practice, I realized the need for this conflict only at the end of the delivery, sorry.\n path('api/admin/', include('articles.urls')),\n path('api/sign-up/', include('rest_auth.registration.urls')),\n path('admin/', admin.site.urls),\n]" }, { "alpha_fraction": 0.6453700065612793, "alphanum_fraction": 0.6645774245262146, "avg_line_length": 37.33333206176758, "blob_id": "55695af55a98f2065cfcd52af317168fbb38e5ac", "content_id": "e66b9c927d90693f99e4621df3a7c2b087a7f2dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4952, "license_type": "no_license", "max_line_length": 224, "num_lines": 129, "path": "/README.md", "repo_name": "crysthianzaar/DjangoChallenge", "src_encoding": "UTF-8", "text": "# Django Challenge\n## _prerequisites:_\n\n- Python 3\n- Git\n- Docker\n- Docker Compose\n\n### STAR PROJECT IN DEVELOPMENT: \n- Clone this project\n- Run Docker Compose: `docker-compose up`\n- Make makemigrations: `docker-compose exec web python manage.py makemigrations`\n- Make Migrate: `docker-compose exec web python manage.py migrate`\n- Create a superuser: `docker-compose exec web python manage.py createsuperuser`\n\n### DEPLOY PROJECT IN PRODUCTION:\n- Set `DEBUG = False` in `APIcore\\settings.py`\n- Add the server domain to the `ALLOWED_HOSTS` in `APIcore\\settings.py`\n- Docker Compose: `docker-compose -f docker-compose.yml -f production.yml up -d` [More Information](https://docs.docker.com/compose/production/)\n- Createsuperuser in production\n### _Features Delivered:_\n\n- Login API: `/api/login/`\n- Sign-up API: `/api/sign-up/`\n- CRUD `/api/admin/authors/`\n- CRUD `/api/admin/articles/`\n- `/api/articles/?category=:slug`\n- `/api/articles/:id/`\n## API documentation\n- Authentication Method in Postman: `Basic Auth: username and password` [Example](https://i.ibb.co/8bgycqH/imagem-2021-06-17-100118.png) \n\n[![Run in Postman](https://run.pstmn.io/button.svg)](https://app.getpostman.com/run-collection/eca4f5e62ae19caea527)\n\n| EndPoint | Authentication | Parameters | Methods | Comments | \n|----------------|---------------|---------------|----------------|-----------|\n| `/api/login/` | All | username , password | POST | - \n| `/api/sign-up/`| All | username, email, password1, password1 | POST | -\n| `/api/logout/` | IsAuthenticated | | POST | -\n| `/api/user/` | IsAdmin | | GET | -\n| `/api/admin/authors/` | IsAdmin | name, picture | GET, POST, PUT , DELETE |\n| `/api/admin/articles/` | IsAdmin | category, author,title, summary, firstParagraph, body | GET, POST, PUT , DELETE |\n| `/api/articles/?category=:slug` | IsAdmin | category | GET | partially reached the demand, authentication is for admins only\n| `/api/articles/:id/` | IsAdmin | id | GET | Partially reached the demand, authentication is for admins only\n\n\n### Final remarks:\n- The authentication and CRUD Admin part has ended.\n- The query part for authenticated and unauthenticated users was partially terminated as a change had to be made to the original requirements\n- There are some differences from the initial requirements, such as endpoint change, author is not an object and there are no differences between an authenticated and unauthenticated user in the api `/api/all/articles/:id/` \n-----\n\n\n## Description\n\n**Challenge goal**: The purpose of this challenge is to give an overall understanding of a backend application. You’ll be implementing a simplified version of a news provider API. The concepts that you’re going to apply are:\n\n- REST architecture;\n- Authentication and permissions;\n- Data modeling and migrations;\n- PostgreSQL database;\n- Query optimization;\n- Serialization;\n- Production builds (using Docker).\n\n**Target level**: This is an all around challenge that cover both juniors and experience devs based on the depth of how the concepts were applied.\n\n**Final accomplishment**: By the end of this challenge you’ll have a production ready API.\n\n## Acceptance criteria\n\n- Clear instructions on how to run the application in development mode\n- Clear instructions on how to run the application in a Docker container for production\n- A good API documentation or collection\n- Login API: `/api/login/`\n- Sign-up API: `/api/sign-up/`\n- Administrator restricted APIs:\n - CRUD `/api/admin/authors/`\n - CRUD `/api/admin/articles/`\n- List article endpoint `/api/articles/?category=:slug` with the following response:\n```json\n[\n {\n \"id\": \"39df53da-542a-3518-9c19-3568e21644fe\",\n \"author\": {\n \"id\": \"2d460e48-a4fa-370b-a2d0-79f2f601988c\",\n \"name\": \"Author Name\",\n \"picture\": \"https://picture.url\"\n },\n \"category\": \"Category\",\n \"title\": \"Article title\",\n \"summary\": \"This is a summary of the article\"\n },\n ...\n]\n```\n- Article detail endpoint `/api/articles/:id/` with different responses for anonymous and logged users:\n\n **Anonymous**\n ```json\n {\n \"id\": \"39df53da-542a-3518-9c19-3568e21644fe\",\n \"author\": {\n \"id\": \"2d460e48-a4fa-370b-a2d0-79f2f601988c\",\n \"name\": \"Author Name\",\n \"picture\": \"https://picture.url\"\n },\n \"category\": \"Category\",\n \"title\": \"Article title\",\n \"summary\": \"This is a summary of the article\",\n \"firstParagraph\": \"<p>This is the first paragraph of this article</p>\"\n }\n ```\n\n **Logged user**\n ```json\n {\n \"id\": \"39df53da-542a-3518-9c19-3568e21644fe\",\n \"author\": {\n \"id\": \"2d460e48-a4fa-370b-a2d0-79f2f601988c\",\n \"name\": \"Author Name\",\n \"picture\": \"https://picture.url\"\n },\n \"category\": \"Category\",\n \"title\": \"Article title\",\n \"summary\": \"This is a summary of the article\",\n \"firstParagraph\": \"<p>This is the first paragraph of this article</p>\",\n \"body\": \"<div><p>Second paragraph</p><p>Third paragraph</p></div>\"\n }\n ```\n\n" }, { "alpha_fraction": 0.6616256833076477, "alphanum_fraction": 0.6748582124710083, "avg_line_length": 30.117647171020508, "blob_id": "920db239adf632069a90fd38988d97dedb6ab243", "content_id": "b6b350edff80e178691a73f23b17025aaea08a56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 90, "num_lines": 34, "path": "/articles/models.py", "repo_name": "crysthianzaar/DjangoChallenge", "src_encoding": "UTF-8", "text": "from django.db import models\nimport uuid\nfrom django.utils.text import slugify\n\nSTATUS_CHOICES = (\n ('draft', 'Draft'),\n ('published', 'Published'),\n)\n\nclass Author(models.Model):\n name = models.CharField(max_length=250)\n picture = models.URLField()\n\n def __str__(self):\n return self.name\n \nclass Articles(models.Model):\n category = models.CharField(max_length=150)\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n title = models.CharField(max_length=150)\n summary = models.CharField(max_length=250)\n firstParagraph = models.TextField()\n body = models.TextField()\n status = models.CharField(max_length = 10, choices = STATUS_CHOICES, default ='draft')\n slug = models.SlugField(auto_created= True)\n published_at = models.DateTimeField(auto_now_add = True)\n updated = models.DateTimeField(auto_now = True)\n\n def save(self, *args, **kwargs):\n self.category = slugify(self.category)\n super(Articles, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.6836734414100647, "alphanum_fraction": 0.6836734414100647, "avg_line_length": 29.153846740722656, "blob_id": "20bd848e746f6ba389c3f7f6886ea52042b132a0", "content_id": "52af5858d4dfaa0a4df507c1ae17766e480865d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 98, "num_lines": 13, "path": "/articles/serializers.py", "repo_name": "crysthianzaar/DjangoChallenge", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Articles, Author\n\n\nclass AuthorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Author\n fields = '__all__'\n\nclass ArticleSerializer(serializers.ModelSerializer):\n class Meta:\n model = Articles\n fields = ['id','category', 'author', 'title', 'summary','firstParagraph','body','status',]\n" }, { "alpha_fraction": 0.6987577676773071, "alphanum_fraction": 0.6987577676773071, "avg_line_length": 31.299999237060547, "blob_id": "8025a01ae453f7dbbf3a9e28b20c3662804505e7", "content_id": "f57d0b8cee4bf805996ee63af8196a4f17bb7ce1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 70, "num_lines": 10, "path": "/articles/urls.py", "repo_name": "crysthianzaar/DjangoChallenge", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import AuthorDetail,AuthorList, ArticleDetail, ArticleList\n\n\nurlpatterns = [\n path('authors/<int:pk>', AuthorDetail.as_view()),\n path('authors/', AuthorList.as_view()),\n path('articles/', ArticleList.as_view()),\n path('articles/<slug:slug>', ArticleDetail.as_view()),\n]" }, { "alpha_fraction": 0.6834085583686829, "alphanum_fraction": 0.6927201151847839, "avg_line_length": 35.515464782714844, "blob_id": "0275083476f89b3fdf2c348b7b7f5d3718372dd4", "content_id": "aeb4614077d576b847069096777161b27a2be446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3544, "license_type": "no_license", "max_line_length": 79, "num_lines": 97, "path": "/articles/views.py", "repo_name": "crysthianzaar/DjangoChallenge", "src_encoding": "UTF-8", "text": "from django.utils.text import slugify\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import permission_classes\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny \nfrom rest_framework import status, generics\nfrom django.http import Http404\nfrom .serializers import ArticleSerializer, AuthorSerializer\nfrom .models import Author, Articles\n\n\n@permission_classes([IsAdminUser])\nclass AuthorDetail(APIView):\n\n def get_object(self, pk):\n try:\n return Author.objects.get(pk=pk)\n except Author.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n author = self.get_object(pk)\n serializer = AuthorSerializer(author)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n author = self.get_object(pk)\n serializer = AuthorSerializer(author, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n author = self.get_object(pk)\n author.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n@permission_classes([IsAdminUser])\nclass AuthorList(APIView):\n def get(self, request, format=None):\n author = Author.objects.all()\n serializer = AuthorSerializer(author, many=True)\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = AuthorSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@permission_classes([IsAdminUser])\nclass ArticleList(generics.ListAPIView):\n filter_backends = [DjangoFilterBackend]\n model = Articles\n serializer_class = ArticleSerializer\n\n queryset = Articles.objects.all()\n serializer_class = ArticleSerializer\n filter_backends = [DjangoFilterBackend]\n filterset_fields = ['category']\n\n def post(self, request, format=None):\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@permission_classes([IsAdminUser])\nclass ArticleDetail(APIView):\n\n def get_object(self, slug):\n try:\n return Articles.objects.get(slug=slug)\n except Articles.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n articles = self.get_object(slug)\n serializer = ArticleSerializer(articles)\n return Response(serializer.data)\n\n def put(self, request, slug, format=None):\n articles = self.get_object(slug)\n serializer = ArticleSerializer(articles, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, slug, format=None):\n articles = self.get_object(slug)\n articles.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n" } ]
6
Joohee-Park/DeepQA
https://github.com/Joohee-Park/DeepQA
6fb7c75dda356a6345c9480d5b56af562698bc74
f0feb0ee8fc06e9f63e3f4bedde09ed9ad74078e
a5cd49d966e6f417bd660ad396c4b7c43d4f187a
refs/heads/master
2020-04-17T18:39:28.923605
2016-08-29T02:09:06
2016-08-29T02:09:06
66,529,568
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.680232584476471, "alphanum_fraction": 0.6991279125213623, "avg_line_length": 26.157894134521484, "blob_id": "c02247837f1f9a6d67a4e6f2dbbf451e2c213ecf", "content_id": "55e7dfdc8a4575b78f789ee6915b4c37d1ed90a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2064, "license_type": "no_license", "max_line_length": 74, "num_lines": 76, "path": "/main.py", "repo_name": "Joohee-Park/DeepQA", "src_encoding": "UTF-8", "text": "# import keras\nimport lib.tensor as tensor\nimport lib.data as data\nimport os\n\nfrom keras.layers import Input, Dense, LSTM\nfrom keras.models import Model, load_model\nimport numpy as np\n\nembeDim = 98\nmaxlen = 500\nnb_epoch = 50\nrnnDim = 128\nanswerDim = 50\nbatch_size = 100\n\nROOT_DIR = os.path.dirname(__file__)\n# [0] Prepare answer dictionary\nprint(\"[0] Prepare Answer Dictionary\")\nansToidx, idxToans = data.answer_dict()\n\n# [1] Prepare Training Data\nprint(\"[1] Prepare Training Data\")\n_training_sentence, _training_answer = data.prepareTrainingData()\n\n# [1.1] Cut the residual training data to fit batch size\ntraining_size = _training_answer.shape[0]\ntraining_size -= (training_size % batch_size)\n\ntraining_sentence = _training_sentence[0:training_size,:,:]\ntraining_answer = _training_answer[0:training_size,:]\n\nprint(\"[1] Number of training instances is \" + str(training_size))\n\nprint(\"[1] Training Label sanity check : \" , end=\"\")\n\nif np.sum(np.sum(training_answer)) == training_size:\n print(\"PASSED\")\nelse:\n print(\"FAILED\")\n exit()\n\n# [2] Define DeepQA Models\nprint(\"[2] Define DeepQA Models\")\n\nMODEL_PATH = \"Model/model.h5\"\nif os.path.exists(MODEL_PATH):\n print(\"[2] Trained model already exist. Load the existing model\")\n DeepQA = load_model(MODEL_PATH)\nelse:\n print(\"[2] Trained model not found. Start to build a fresh model\")\n\n sentence = Input(batch_shape=(batch_size, maxlen, embeDim))\n e1 = LSTM(rnnDim, activation='tanh', return_sequences=True)(sentence)\n e2 = LSTM(rnnDim, activation='tanh')(e1)\n prediction = Dense(answerDim, activation='softmax')(e2)\n\n DeepQA = Model(sentence, prediction)\n\n print(DeepQA.summary())\n\n DeepQA.compile(optimizer='adadelta', loss='rms', metrics=['accuracy'])\n\n# [3] Train the Model\nprint(\"[3] Train the Model\")\nDeepQA.fit(training_sentence, training_answer,\n shuffle=True,\n nb_epoch=nb_epoch,\n batch_size=batch_size)\n\nif os.path.exists(MODEL_PATH):\n os.remove(MODEL_PATH)\nDeepQA.save(MODEL_PATH)\nprint(\"[3] Successfully save the Model\")\n\n# [4] Test\n" }, { "alpha_fraction": 0.5537415146827698, "alphanum_fraction": 0.5588435530662537, "avg_line_length": 34.011905670166016, "blob_id": "20340e29e0c643013cd2765439b0d0947e23bfdc", "content_id": "14cd5f40aa122808751637fdb653e6f3fad104fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2940, "license_type": "no_license", "max_line_length": 117, "num_lines": 84, "path": "/lib/data.py", "repo_name": "Joohee-Park/DeepQA", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport lib.tensor as tensor\n\n# Answers in this dictionary doesn't have white space\ndef answer_dict():\n ROOT_DIR = os.path.dirname(os.path.dirname(__file__))\n\n try:\n f = open(ROOT_DIR + \"/Data/answer_candidates.txt\", \"r\", encoding=\"utf-8\")\n except:\n print(\"Can not find /Data/answer_candidates.txt \")\n return\n\n ansToidx = {}\n idxToans = {}\n for index, _answer in enumerate(f.readlines()):\n answer = tensor.preprocess(_answer.replace(\"\\n\", \"\")).replace(\" \",\"\")\n ansToidx[answer] = index\n idxToans[index] = answer\n\n return ansToidx, idxToans\n\n#This function converts .txt data into 3-d tensors\ndef toCorpusTensor(file_list):\n\n for file_name in file_list :\n f = open(file_name, \"r\", encoding=\"utf-8\")\n ans2idx, idx2ans = answer_dict()\n\n sentenceTensorList = []\n answerTensorList = []\n\n for line in f.readlines():\n # Read a line\n try:\n _title, sentence = line.replace(\"\\n\",\"\").split(\"\\t\")\n except :\n continue\n\n title = tensor.preprocess(_title)\n\n hit_flag = False\n for entry in ans2idx:\n if title in entry or entry in title :\n\n # Make sentence tensor\n try:\n sentenceTensor = tensor.toTensor(sentence)\n except Exception as es :\n #print(\"sentence error : \" + str(es) + \" \" + str(sentence))\n continue\n\n # Make answer tensor\n try:\n title = tensor.preprocess(_title)\n answerTensor = tensor.toAnswerTensor(ans2idx[entry])\n except Exception as ae:\n #print(\"answer error : \" + str(ae) + \" \" + str(sentence))\n continue\n\n hit_flag = True\n\n # Append to the tensors to each list if both tensors have no problem\n if hit_flag:\n answerTensorList.append(answerTensor)\n sentenceTensorList.append(sentenceTensor)\n\n length = len(answerTensorList)\n if length == 0 :\n return\n answerTensor = np.zeros((length, answerTensorList[0].shape[0]))\n sentenceTensor = np.zeros((length, sentenceTensorList[0].shape[0], sentenceTensorList[0].shape[1]))\n for i in range(length):\n answerTensor[i,:] = answerTensorList[i][:]\n sentenceTensor[i,:,:] = sentenceTensorList[i][:,:]\n\n return sentenceTensor, answerTensor\n\ndef prepareTrainingData():\n TRAINING_DATA_DIR = os.path.dirname(os.path.dirname(__file__)) + \"/Data/Training\"\n file_list = [ TRAINING_DATA_DIR + \"/\" + file for file in os.listdir(TRAINING_DATA_DIR) if file.endswith(\".txt\") ]\n print(\"[1] Number of Training Text file is \" + str(len(file_list)))\n return toCorpusTensor(file_list)" }, { "alpha_fraction": 0.44340425729751587, "alphanum_fraction": 0.45872339606285095, "avg_line_length": 44.19230651855469, "blob_id": "34cc755dcdc438895eb28a4f8b85e6d1900f9b81", "content_id": "3d1a8483272b7761b117905747f9cd4ba3d4571b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1391, "license_type": "no_license", "max_line_length": 152, "num_lines": 26, "path": "/lib/korean.py", "repo_name": "Joohee-Park/DeepQA", "src_encoding": "UTF-8", "text": "# This function returns stream of unfolded Korean alphabet given regular Korean sentences.\n# e.g 자연어처리 -> ㅈㅏㅇㅕㄴㅇㅓㅊㅓㄹㅣ\n\ndef unfold(sentence):\n\n first = [ \"ㄱ\", \"ㄲ\", \"ㄴ\", \"ㄷ\", \"ㄸ\", \"ㄹ\", \"ㅁ\", \"ㅂ\", \"ㅃ\", \"ㅅ\", \"ㅆ\", \"ㅇ\", \"ㅈ\", \"ㅉ\", \"ㅊ\", \"ㅋ\", \"ㅌ\", \"ㅍ\", \"ㅎ\" ]\n middle = [ \"ㅏ\", \"ㅐ\", \"ㅑ\", \"ㅒ\", \"ㅓ\", \"ㅔ\", \"ㅕ\", \"ㅖ\", \"ㅗ\", \"ㅘ\", \"ㅙ\", \"ㅚ\", \"ㅛ\", \"ㅜ\",\"ㅝ\", \"ㅞ\", \"ㅟ\", \"ㅠ\", \"ㅡ\", \"ㅢ\", \"ㅣ\" ]\n last = [ \"\", \"ㄱ\", \"ㄲ\", \"ㄳ\", \"ㄴ\", \"ㄵ\", \"ㄶ\", \"ㄷ\", \"ㄹ\", \"ㄺ\", \"ㄻ\", \"ㄼ\", \"ㄽ\", \"ㄾ\", \"ㄿ\", \"ㅀ\", \"ㅁ\", \"ㅂ\", \"ㅄ\", \"ㅅ\", \"ㅆ\", \"ㅇ\", \"ㅈ\", \"ㅊ\", \"ㅋ\", \"ㅌ\", \"ㅍ\", \"ㅎ\" ]\n\n result = []\n for letter in sentence :\n\n if 0xAC00 <= ord(letter) and ord(letter) <= 0xD7AF:\n korean_value = ord(letter) - 0xAC00\n last_index = int(korean_value % 28 )\n middle_index = int(((korean_value - last_index) / 28) % 21)\n first_index = int((((korean_value - last_index) / 28) - middle_index) / 21)\n result.append(first[first_index])\n result.append(middle[middle_index])\n result.append(last[last_index])\n else :\n # 한글 모아쓰기의 범위가 아닌 것들은 그냥 그대로 리턴\n # e.g ㅋㅋㅋ, abc\n result.append(letter)\n\n return \"\".join(result)\n" }, { "alpha_fraction": 0.4849397540092468, "alphanum_fraction": 0.5491195321083069, "avg_line_length": 33.79838562011719, "blob_id": "f41ccd974ef39279590e5ef34e2b4652c4ecb7b8", "content_id": "1b43f1bf76903e1c4228003e1c71877acd78b3e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4432, "license_type": "no_license", "max_line_length": 125, "num_lines": 124, "path": "/lib/tensor.py", "repo_name": "Joohee-Park/DeepQA", "src_encoding": "UTF-8", "text": "import re\nimport numpy as np\nimport lib.korean as korean\n\n_embedim = 98\n_maxlen = 500\n_answerDim = 50\n\ndict = { \"ㄱ\":0, \"ㄲ\":1, \"ㄴ\":2, \"ㄷ\":3, \"ㄸ\":4, \"ㄹ\":5, \"ㅁ\":6, \"ㅂ\":7, \"ㅃ\":8, \"ㅅ\":9, \"ㅆ\":10, \"ㅇ\":11, \"ㅈ\":12, \"ㅉ\":13,\n \"ㅊ\":14, \"ㅋ\":15, \"ㅌ\":16, \"ㅍ\":17, \"ㅎ\":18, \"ㅏ\":19, \"ㅐ\":20, \"ㅑ\":21, \"ㅒ\":22, \"ㅓ\":23, \"ㅔ\":24,\n \"ㅕ\":25, \"ㅖ\":26, \"ㅗ\":27, \"ㅘ\":28, \"ㅙ\":29, \"ㅚ\":30, \"ㅛ\":31, \"ㅜ\":32,\"ㅝ\":33, \"ㅞ\":34, \"ㅟ\":35, \"ㅠ\":36, \"ㅡ\":37,\n \"ㅢ\":38, \"ㅣ\":39, \"ㄳ\":40, \"ㄵ\":41, \"ㄶ\":42, \"ㄺ\":43, \"ㄻ\":44, \"ㄼ\":45, \"ㄽ\":46, \"ㄾ\":47, \"ㄿ\":48, \"ㅀ\":49, \"ㅄ\":50,\n \"a\":51, \"b\":52, \"c\":53, \"d\":54, \"e\":55, \"f\":56, \"g\":57, \"h\":58, \"i\":59, \"j\":60, \"k\":61, \"l\":62, \"m\":63, \"n\":64,\n \"o\":65, \"p\":66, \"q\":67, \"r\":68, \"s\":69, \"t\":70, \"u\":71, \"v\":72, \"w\":73, \"x\":74, \"y\":75, \"z\":76, \"!\":77, \"\\\"\":78,\n \"?\":79, \".\":80, \",\":81, \"-\":82, \":\":83, \"~\":84, \"%\":85, \"\\'\":86, \"0\":87, \"1\":88, \"2\":89, \"3\":90, \"4\":91, \"5\":92,\n \"6\":93, \"7\":94, \"8\":95, \"9\":96, \" \":97 }\n\n# This function converts sentences into tensors\n# Tensor can express : Korean Alphabet, English Alphabet (lower case), Numbers and punctuation marks\n# Its dimension is 98\ndef toTensor(sentence):\n # Input : Normal sentence e.g \"나는 밥을 먹었다.\"\n # Output : 300 X 98 tensor\n embedim = _embedim\n maxlen = _maxlen\n\n stage_1 = preprocess(sentence)\n stage_2 = korean.unfold(stage_1)\n\n tindex = [97] * maxlen\n for i, letter in enumerate(stage_2) :\n if not letter in dict :\n continue\n else :\n tindex[i] = dict[letter]\n\n tensor = np.zeros((maxlen,embedim))\n for i in range(len(tindex)):\n tensor[i][tindex[i]] = 1\n\n return tensor\n\n# This function converts tensor into sentenes\n# Input dimension : 300 X 98\n# Output : Corresponding sentence\ndef toSentence(tensor):\n\n embedim = _embedim\n maxlen = _maxlen\n if tensor.shape != (maxlen, embedim) :\n print(\"Tensor dimension doesn't match to (\" + str(embedim) + \", \" + str(maxlen) + \"), given :\" + str(tensor.shape))\n return\n\n inv_dict = {v:k for k,v in dict.items()}\n\n result = \"\"\n # Check the tensor and lookup in the inverted dictionary\n for i in range(maxlen):\n tindex = 0\n for j in range(embedim):\n if tensor[i][j] != 0 :\n tindex = j\n break\n result += inv_dict[tindex]\n\n return result\n\ndef toAnswerTensor(index):\n resultTensor = np.zeros(_answerDim)\n resultTensor[index] = 1\n return resultTensor\n\n# This function preprocesses the sentence\n# 1. It removes the irregular space\n# 2. If it does not end with \".\", then append it\n# 3. It removes the characters between parenthesis\n# 4. If it contains out-of-scope characters, it removes it\ndef preprocess(sentence):\n\n if len(sentence) < 1:\n return sentence\n\n # [1] It removes the characters between parenthesis : (),[],{}\n # ? in regex means \"greedy\"\n sentence = re.sub(r\"\\(.*?\\)\",\"\", sentence)\n sentence = re.sub(r\"\\[.*?\\]\",\"\", sentence)\n sentence = re.sub(r\"\\{.*?\\}\",\"\", sentence)\n\n # [2] If it contains out-of-scope characters, remove it\n # Korean Syllable : \\uAC00-\\uD7AF\n # Korean Alphabet : \\u1100-\\u11FF\n # Alphanumeric and punctuation marks : \\u0021-\\u007E\n sentence = re.sub('[^\\uAC00-\\uD7AF\\u1100-\\u11FF\\u0021-\\u007E ]+',\"\",sentence)\n\n # [3] Some Preprocessing\n # Replace various whitespace into normal space\n sentence = re.sub('[\\s]+',\" \",sentence)\n # Convert to lower-case\n sentence = sentence.lower()\n\n # If out-of-string-index error occurs, just ignore it\n try:\n # [3] It removes start-space\n if sentence[0] == ' ':\n sentence = sentence[1:]\n\n except:\n return sentence\n\n return sentence\n\n# Split the sentence out of text corpus\n# Returns the list of sentence\ndef splitSentence(text):\n result = []\n # Do not split the sentence for the case of \"0.3\"\n text = re.sub(r'([0-9])\\.([0-9])',r'\\1@\\2',text)\n # Do not split the sentence for the case of \"e.g\", \"cf.\", \"St.\", \"st.\", \"s.t\"\n text = text.replace(\"e.g\", \"e@g\").replace(\"cf.\", \"cf@\").replace(\"St.\", \"St@\").replace(\"st.\", \"st@\").replace(\"s.t\", \"s@t\")\n for line in text.split(\".\"):\n line = line.replace(\"@\",\".\")\n if len(line) > 0:\n result.append(line)\n return result\n\n" } ]
4
valbertovc/blog_django_bootstrap_ajax
https://github.com/valbertovc/blog_django_bootstrap_ajax
4af390decdfadf6feb9e16e3a046cc4df7a3f486
738a352d78d4a11bf782cd2b7a7bbc8126fa1087
421877575dff90841714c851e0153d09146eeae2
refs/heads/master
2018-01-11T20:36:21.792058
2015-12-12T16:51:09
2015-12-12T16:51:09
43,775,679
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7172632813453674, "alphanum_fraction": 0.7186225652694702, "avg_line_length": 39.87036895751953, "blob_id": "ca2835a1e09ba8630cda43311c569e72a822fe29", "content_id": "5133117778ff53ae599ab25e913ddfb9f4a1548d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2207, "license_type": "permissive", "max_line_length": 171, "num_lines": 54, "path": "/blog_django_bootstrap_ajax/urls.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom core.views import PostListView, ContactView\nfrom core.models import Post\nfrom core.models import Category\nfrom core.models import Tag\nfrom accounts.models import UserProfile\nfrom django.views.generic import TemplateView\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom ajax_select import urls as ajax_select_urls\n\n#Import dajaxice modules\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\n\n\nurlpatterns = patterns('',\n url(r'^ajax_select/', include(ajax_select_urls)),\n url(r'^admin/', include(admin.site.urls)),\n\n #Rich textarea editor\n (r'^ckeditor/', include('ckeditor_uploader.urls')),\n\n #About page\n url(r'^blog/about$', 'core.views.about', name='about'),\n\n #Contact page\n url(r'^blog/contact$', ContactView.as_view(), name='contact'),\n\n #Posts page (home)\n url(r'^$', PostListView.as_view(), name='post-list'),\n url(r'^blog/(?P<pk>[0-9]+)/$', 'core.views.view_post', name='post-detail'),\n url(r'^blog/category/$', ListView.as_view(model=Category), name='category-list'),\n url(r'^blog/category/(?P<slug>[\\w -.?]+)/*$', DetailView.as_view(model=Category, slug_field='name', context_object_name='category'), name='category-detail'),\n\n url(r'^accounts/profile/(?P<slug>[\\w-]+)/$', DetailView.as_view(model=UserProfile, slug_field='user__username', context_object_name='profile'), name='profile-detail'),\n\n url(r'^blog/tag/(?P<slug>[\\w -.?]+)/$', DetailView.as_view(model=Tag, slug_field='name', context_object_name='tag'), name='tag-detail'),\n url(r'^blog/tag/$', ListView.as_view(model=Tag), name='tag-list'),\n\n #Configure Dajaxice urls\n url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),\n\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n\nurlpatterns += staticfiles_urlpatterns()\n" }, { "alpha_fraction": 0.5055487155914307, "alphanum_fraction": 0.5228113532066345, "avg_line_length": 28, "blob_id": "3e4d6f94a18c26be3ab1c443b59bb78fee6cf22d", "content_id": "aa3af21bca1b2f91493e81b558a139e9ed858d85", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 811, "license_type": "permissive", "max_line_length": 76, "num_lines": 28, "path": "/core/templates/core/category_detail.html", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n{% load humanize %}\n{% block title %} {{ category.name }} {% endblock %}\n\n{% block page_header %}\n <div class=\"row header header-{{category.color}} content-underlay\">\n <div class=\"container\">\n <h1 class=\"text-{{category.color}}\">{{category.name}}</h1>\n </div>\n </div>\n{% endblock %}\n\n{% block content %}\n <div class=\"row\">\n \t{% for post in category.posts.all %}\n <div class=\"col-xs-12 col-sm-6 col-md-4\">\n <div class=\"thumbnail\">\n <img src=\"http://placehold.it/460x200\" alt=\"\">\n <div class=\"caption\">\n <h3>{{post.title}}</h3>\n <p>{{post.resume|safe}}</p>\n <p><a href=\"{{post.get_absolute_url}}\">Continue lendo...</a></p>\n </div>\n </div>\n </div>\n \t{% endfor %}\n </div>\n{% endblock %}" }, { "alpha_fraction": 0.594345211982727, "alphanum_fraction": 0.5955356955528259, "avg_line_length": 27.243698120117188, "blob_id": "d91955af605f71197a270c5f69961939e3729273", "content_id": "05b009752a13a93e526efaf7a9aefcacf0b8b026", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3360, "license_type": "permissive", "max_line_length": 46, "num_lines": 119, "path": "/accounts/choices.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "SOCIAL_CHOICES = {\n 'adn': 'adn',\n 'android': 'android',\n 'angellist': 'angellist',\n 'apple': 'apple',\n 'behance': 'behance',\n 'behance-square' : 'behance-square',\n 'bitbucket': 'bitbucket',\n 'bitbucket-square' : 'bitbucket-square',\n 'bitcoin (alias)': 'bitcoin (alias)',\n 'btc': 'btc',\n 'buysellads' : 'buysellads',\n 'cc-amex': 'cc-amex',\n 'cc-discover': 'cc-discover',\n 'cc-mastercard': 'cc-mastercard',\n 'cc-paypal': 'cc-paypal',\n 'cc-stripe': 'cc-stripe',\n 'cc-visa': 'cc-visa',\n 'codepen': 'codepen',\n 'connectdevelop' : 'connectdevelop',\n 'css3' : 'css3',\n 'dashcube' : 'dashcube',\n 'delicious': 'delicious',\n 'deviantart' : 'deviantart',\n 'digg' : 'digg',\n 'dribbble' : 'dribbble',\n 'dropbox': 'dropbox',\n 'drupal' : 'drupal',\n 'empire' : 'empire',\n 'facebook' : 'facebook',\n 'facebook-f (alias)' : 'facebook-f (alias)',\n 'facebook-official': 'facebook-official',\n 'facebook-square': 'facebook-square',\n 'flickr' : 'flickr',\n 'forumbee' : 'forumbee',\n 'foursquare' : 'foursquare',\n 'ge (alias)' : 'ge (alias)',\n 'git': 'git',\n 'git-square' : 'git-square',\n 'github' : 'github',\n 'github-alt' : 'github-alt',\n 'github-square': 'github-square',\n 'gittip (alias)' : 'gittip (alias)',\n 'google' : 'google',\n 'google-plus': 'google-plus',\n 'google-plus-square' : 'google-plus-square',\n 'google-wallet': 'google-wallet',\n 'gratipay' : 'gratipay',\n 'hacker-news': 'hacker-news',\n 'html5': 'html5',\n 'instagram': 'instagram',\n 'ioxhost': 'ioxhost',\n 'joomla' : 'joomla',\n 'jsfiddle' : 'jsfiddle',\n 'lastfm' : 'lastfm',\n 'lastfm-square': 'lastfm-square',\n 'leanpub': 'leanpub',\n 'linkedin' : 'linkedin',\n 'linkedin-square': 'linkedin-square',\n 'linux': 'linux',\n 'maxcdn' : 'maxcdn',\n 'meanpath' : 'meanpath',\n 'medium' : 'medium',\n 'openid' : 'openid',\n 'pagelines': 'pagelines',\n 'paypal' : 'paypal',\n 'pied-piper' : 'pied-piper',\n 'pied-piper-alt' : 'pied-piper-alt',\n 'pinterest': 'pinterest',\n 'pinterest-p': 'pinterest-p',\n 'pinterest-square' : 'pinterest-square',\n 'qq' : 'qq',\n 'ra (alias)' : 'ra (alias)',\n 'rebel': 'rebel',\n 'reddit' : 'reddit',\n 'reddit-square': 'reddit-square',\n 'renren' : 'renren',\n 'sellsy' : 'sellsy',\n 'share-alt': 'share-alt',\n 'share-alt-square' : 'share-alt-square',\n 'shirtsinbulk' : 'shirtsinbulk',\n 'simplybuilt': 'simplybuilt',\n 'skyatlas' : 'skyatlas',\n 'skype': 'skype',\n 'slack': 'slack',\n 'slideshare' : 'slideshare',\n 'soundcloud' : 'soundcloud',\n 'spotify': 'spotify',\n 'stack-exchange' : 'stack-exchange',\n 'stack-overflow' : 'stack-overflow',\n 'steam': 'steam',\n 'steam-square' : 'steam-square',\n 'stumbleupon': 'stumbleupon',\n 'stumbleupon-circle' : 'stumbleupon-circle',\n 'tencent-weibo': 'tencent-weibo',\n 'trello' : 'trello',\n 'tumblr' : 'tumblr',\n 'tumblr-square': 'tumblr-square',\n 'twitch' : 'twitch',\n 'twitter': 'twitter',\n 'twitter-square' : 'twitter-square',\n 'viacoin': 'viacoin',\n 'vimeo-square' : 'vimeo-square',\n 'vine' : 'vine',\n 'vk' : 'vk',\n 'wechat (alias)' : 'wechat (alias)',\n 'weibo': 'weibo',\n 'weixin' : 'weixin',\n 'whatsapp' : 'whatsapp',\n 'windows': 'windows',\n 'wordpress': 'wordpress',\n 'xing' : 'xing',\n 'xing-square': 'xing-square',\n 'yahoo': 'yahoo',\n 'yelp' : 'yelp',\n 'youtube': 'youtube',\n 'youtube-play' : 'youtube-play',\n 'youtube-square' : 'youtube-square',\n}" }, { "alpha_fraction": 0.6439732313156128, "alphanum_fraction": 0.6439732313156128, "avg_line_length": 24.253520965576172, "blob_id": "fa7cf7b2000e67b7d515f49e185dacab149cbe51", "content_id": "5618e59691d8953dbac3c32064008eb2c9b393df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1792, "license_type": "permissive", "max_line_length": 57, "num_lines": 71, "path": "/core/ajax.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "import json\nfrom dajaxice.decorators import dajaxice_register\nfrom core.models import Post, Comment\n\n@dajaxice_register\ndef recommend_post(request, pk):\n post = Post.objects.get(pk=pk)\n user = request.user\n \n if user.userprofile.is_post_recommended(post):\n user.posts_recommended.remove(post)\n user.save()\n\n else:\n user.posts_recommended.add(post)\n user.save()\n \n total = str(len(post.recommends.all()))\n\n return json.dumps({'total': total, 'pk': post.pk})\n\n@dajaxice_register\ndef recommend_comment(request, pk):\n comment = Comment.objects.get(pk=pk)\n user = request.user\n \n if user.userprofile.is_comment_recommended(comment):\n user.comments_recommended.remove(comment)\n user.save()\n\n else:\n user.comments_recommended.add(comment)\n user.save()\n \n total = str(len(comment.recommends.all()))\n\n return json.dumps({'total': total, 'pk': comment.pk})\n\n@dajaxice_register\ndef like_comment(request, pk):\n comment = Comment.objects.get(pk=pk)\n user = request.user\n \n if user.userprofile.is_comment_liked(comment):\n user.comments_liked.remove(comment)\n user.save()\n\n else:\n user.comments_liked.add(comment)\n user.save()\n \n total = str(len(comment.likes.all()))\n\n return json.dumps({'total': total, 'pk': comment.pk})\n\n@dajaxice_register\ndef unlike_comment(request, pk):\n comment = Comment.objects.get(pk=pk)\n user = request.user\n \n if user.userprofile.is_comment_unliked(comment):\n user.comments_unliked.remove(comment)\n user.save()\n\n else:\n user.comments_unliked.add(comment)\n user.save()\n \n total = str(len(comment.unlikes.all()))\n\n return json.dumps({'total': total, 'pk': comment.pk})" }, { "alpha_fraction": 0.6734610795974731, "alphanum_fraction": 0.6767224073410034, "avg_line_length": 35.07352828979492, "blob_id": "f5b563f7c751246c8be74cb0faf0ec8c0b8b9f72", "content_id": "3485f196db88a77c74a10325147effb42809b0d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "permissive", "max_line_length": 82, "num_lines": 68, "path": "/accounts/models.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\nfrom django.db.models import signals\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom accounts.choices import SOCIAL_CHOICES\nfrom django.conf import settings\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField('auth.User', related_name='userprofile')\n site = models.URLField()\n bio = models.TextField()\n picture = models.ImageField(upload_to='profiles', blank=True)\n\n def __unicode__(self):\n return u'Profile of user: %s' % self.user.username\n\n def get_absolute_url(self):\n\t\treturn reverse('profile-detail', kwargs={'slug': self.user.username})\n\n def is_post_recommended(self, post):\n ids = []\n for post_recommended in self.user.posts_recommended.all():\n ids.append(post_recommended.pk)\n return post.pk in ids\n\n def is_comment_recommended(self, comment):\n ids = []\n for comment_recommended in self.user.comments_recommended.all():\n ids.append(comment_recommended.pk)\n return comment.pk in ids\n\n def is_comment_liked(self, comment):\n ids = []\n for comment_liked in self.user.comments_liked.all():\n ids.append(comment_liked.pk)\n return comment.pk in ids\n\n def is_comment_unliked(self, comment):\n ids = []\n for comment_unliked in self.user.comments_unliked.all():\n ids.append(comment_unliked.pk)\n return comment.pk in ids\n\n# from django.dispatch import receiver\n# from django.db.models.signals import post_save\n# #faz com que, todo usuário tenha um profile\n# @receiver(post_save, sender=User)\n# def create_user_profile(sender, instance, created, **kwargs):\n# if created:\n# UserProfile.objects.get_or_create(user=instance)\n\nclass SocialNetwork(models.Model):\n icon = models.CharField(max_length=30, choices=SOCIAL_CHOICES.items())\n name = models.CharField(max_length=50)\n url = models.URLField()\n users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Social',\n through_fields=('social_network', 'user'))\n\n def __unicode__(self):\n return self.name\n\nclass Social(models.Model):\n profile = models.CharField(max_length=100)\n social_network = models.ForeignKey(SocialNetwork)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='social_set')\n" }, { "alpha_fraction": 0.6172301173210144, "alphanum_fraction": 0.6357688307762146, "avg_line_length": 31.75, "blob_id": "b3fbf27b461579c01766ee8e83f8fa7eff302cc4", "content_id": "68cde652adb3343481ba1e0d84879a4b8fadee05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 917, "license_type": "permissive", "max_line_length": 149, "num_lines": 28, "path": "/core/migrations/0007_auto_20150320_2156.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('core', '0006_auto_20150306_2203'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='comment',\n name='recommends',\n field=models.ManyToManyField(related_query_name='comment_recommended', related_name='comments_recommended', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='post',\n name='recommends',\n field=models.ManyToManyField(related_query_name='post_recommended', related_name='posts_recommended', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.7303522825241089, "alphanum_fraction": 0.7303522825241089, "avg_line_length": 27.384614944458008, "blob_id": "e7080b2027e6cc47c6c4e541c19a755b60227258", "content_id": "bbdf316a1c26792d7938534e4e099f2a9b0de625", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 738, "license_type": "permissive", "max_line_length": 59, "num_lines": 26, "path": "/core/templatetags/filters.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "from django import template\nregister = template.Library()\n\[email protected]\ndef is_post_recommended(user, post):\n if not user.is_authenticated():\n return False\n return user.userprofile.is_post_recommended(post)\n\[email protected]\ndef is_comment_recommended(user, comment):\n if not user.is_authenticated():\n return False\n return user.userprofile.is_comment_recommended(comment)\n\[email protected]\ndef is_comment_liked(user, comment):\n if not user.is_authenticated():\n return False\n return user.userprofile.is_comment_liked(comment)\n\[email protected]\ndef is_comment_unliked(user, comment):\n if not user.is_authenticated():\n return False\n return user.userprofile.is_comment_unliked(comment)\n" }, { "alpha_fraction": 0.7057245373725891, "alphanum_fraction": 0.7262969613075256, "avg_line_length": 27.421052932739258, "blob_id": "57795734c669c1d516b1d6f3cd7d9c9c9122ccda", "content_id": "ab9516259ded05f7d0ae46797b7e66a0dbb5cdee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1129, "license_type": "permissive", "max_line_length": 220, "num_lines": 38, "path": "/README.md", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# Blog em Django + JQuery + Bootstrap + Font-awesome #\r\n\r\nEste projeto foi construído apenas com intuito de aprendizado. O objetivo dele foi aprender sobre a tecnologia Python e Django. Além disso, aproveitei a oportunidade para adicionar misturar outros recursos. Versão: 0.1.0\r\n\r\n* Project example: https://github.com/valbertovc/blog_django_bootstrap_ajax\r\n* Demo: http://djangobootstrapajax.valberto.com/ (access: user_test user_test)\r\n\r\n## Conteúdo ##\r\n\r\n* Tecnologias utilizadas\r\n* Funcionalidades\r\n* Recursos do Django\r\n\r\n## Tecnologias utilizadas ##\r\n\r\n* Linguagem de programação: Python 2.7.6\r\n* Desenvolvimento web: Django 1.7.0\r\n* JavaScript: JQuery 1.11.2\r\n* Icones: Font-awesome 4.3.0\r\n* Animações: animate.css\r\n* Front-end: Bootstrap 3.3.2\r\n* WYSIWYG editor: tinymce 4.1\r\n* Ajax: Dajaxice 0.7\r\n\r\n## Instalação das dependências ##\r\n\r\n```shell\r\n$ pip install git+git://github.com/renyi/django-pageviews.git\r\n$ pip install django-wysiwyg\r\n$ pip install django-ckeditor\r\n$ pip install django-dajaxice\r\n$ pip install django-dajax\r\n$ pip install Pillow\r\n```\r\n\r\n## Funcionalidades ##\r\n\r\n##Recursos do Django##\r\n" }, { "alpha_fraction": 0.6819620132446289, "alphanum_fraction": 0.6819620132446289, "avg_line_length": 26.478260040283203, "blob_id": "ade43d680abce3b97640e4cf5a8d0442fb63de46", "content_id": "831812a188e2b050ba58f5ae682d13be65b839fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "permissive", "max_line_length": 59, "num_lines": 23, "path": "/core/lookups.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "from ajax_select import register, LookupChannel\nfrom .models import Tag, Category\n\n@register('tags')\nclass TagsLookup(LookupChannel):\n model = Tag\n\n def get_query(self, q, request):\n return self.model.objects.filter(name__icontains=q)\n\n def format_item_display(self, item):\n return u\"<span class='tag'>%s</span>\" % item.name\n\n\n@register('categories')\nclass CategoriesLookup(LookupChannel):\n model = Category\n\n def get_query(self, q, request):\n return self.model.objects.filter(name__icontains=q)\n\n def format_item_display(self, item):\n return u\"<span class='tag'>%s</span>\" % item.name\n" }, { "alpha_fraction": 0.5333333611488342, "alphanum_fraction": 0.7090908885002136, "avg_line_length": 17.33333396911621, "blob_id": "a3543329710512558fb7cf2755017ed00e4183d9", "content_id": "ae010a3aa3f34c54fcb73c34851137f17fc0b27b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 165, "license_type": "permissive", "max_line_length": 23, "num_lines": 9, "path": "/requirements.txt", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "Django==1.8.4\ndjango-ckeditor==5.0.2\ndjango-dajax==0.9.2\ndjango-dajaxice==0.7\ndjango-pageviews==1.0.0\ndjango-wysiwyg==0.7.1\nPillow==3.0.0\npytz==2015.6\nwheel==0.24.0\n" }, { "alpha_fraction": 0.6082589030265808, "alphanum_fraction": 0.6272321343421936, "avg_line_length": 31, "blob_id": "6b7920c94dea060fce29ed8faba41eec0867fd8d", "content_id": "382eb5176766a3a00ae980ae43cec260af4f8b36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 896, "license_type": "permissive", "max_line_length": 141, "num_lines": 28, "path": "/core/migrations/0008_auto_20150330_2022.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('core', '0007_auto_20150320_2156'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='comment',\n name='like',\n field=models.ManyToManyField(related_query_name='comment_liked', related_name='comments_liked', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='comment',\n name='unlike',\n field=models.ManyToManyField(related_query_name='comment_unliked', related_name='comments_unliked', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.7553648352622986, "alphanum_fraction": 0.7553648352622986, "avg_line_length": 22.399999618530273, "blob_id": "8b1d6954317e222969d9c5c51aff13bb4d1e2a72", "content_id": "c242a53b41a25b026bcc3bb5071a14cf9b0ccfd7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "permissive", "max_line_length": 51, "num_lines": 10, "path": "/core/templatetags/active_menu.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "from django import template\nfrom django.core.urlresolvers import reverse\nimport re\n\nregister = template.Library()\n\[email protected]\ndef active(request, url):\n path = reverse(url)\n return 'active' if path in request.path else ''" }, { "alpha_fraction": 0.7422037124633789, "alphanum_fraction": 0.7442827224731445, "avg_line_length": 24.3157901763916, "blob_id": "6322bba2e6561149424f020ef44ea105eeb43e07", "content_id": "1129c4289ec35f2181c67b23ea2e26bb298bb16c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "permissive", "max_line_length": 39, "num_lines": 19, "path": "/core/admin.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib import admin\nfrom core.models import Post\nfrom core.models import Category\nfrom core.models import Comment\nfrom core.models import Tag\nfrom core.forms import PostForm\n\[email protected](Post)\nclass PostAdmin(admin.ModelAdmin):\n # raw_id_fields = ('tags',)\n form = PostForm\n class Meta:\n model = Post\n\nadmin.site.register(Category)\nadmin.site.register(Comment)\nadmin.site.register(Tag)\n" }, { "alpha_fraction": 0.5246583223342896, "alphanum_fraction": 0.5389186143875122, "avg_line_length": 32, "blob_id": "f841c38de800476ad107d8157e71680772639107", "content_id": "5455fbddb300992386a4f816cb4d8c3e41dd2bc5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1683, "license_type": "permissive", "max_line_length": 114, "num_lines": 51, "path": "/accounts/migrations/0004_auto_20150306_2227.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('accounts', '0003_auto_20150228_2022'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Social',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('profile', models.CharField(max_length=100)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SocialNetwork',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('icon', models.CharField(max_length=30)),\n ('name', models.CharField(max_length=50)),\n ('url', models.URLField()),\n ('users', models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='accounts.Social')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='social',\n name='social_network',\n field=models.ForeignKey(to='accounts.SocialNetwork'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='social',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.6418764591217041, "alphanum_fraction": 0.6449275612831116, "avg_line_length": 34.9315071105957, "blob_id": "5b7ca3179610577a0eb7a18a879f789bec793df4", "content_id": "bf1de373b3fd3c48856630afe6f99af978d480d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2622, "license_type": "permissive", "max_line_length": 76, "num_lines": 73, "path": "/core/views.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import redirect, render_to_response, get_object_or_404\nfrom django.shortcuts import render_to_response\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import FormView\nfrom django.template import RequestContext\n\nfrom core.models import Category, Post, Tag\nfrom core.forms import PostForm, CommentForm, ContactForm\n\nclass ContactView(FormView):\n template_name = 'core/contact.html'\n form_class = ContactForm\n success_url = '/blog/contact'\n\n def form_valid(self, form):\n form.send_email()\n return super(ContactView, self).form_valid(form)\n\nclass PostListView(ListView):\n\n model = Post\n paginate_by = 5\n\n def get_context_data(self, **kwargs):\n context = super(PostListView, self).get_context_data(**kwargs)\n context['category_list'] = Category.objects.all()\n context['tag_list'] = Tag.objects.all()\n return context\n\ndef about(request):\n template='core/about.html'\n username = settings.BLOG_OWNER_USER_NAME\n context = {'owner': User.objects.get(username=username)}\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\n\n@user_passes_test(lambda u: u.is_superuser)\ndef add_post(request):\n form = PostForm(request.POST or None)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect(post)\n return render_to_response('core/add_post.html', \n { 'form': form },\n context_instance=RequestContext(request))\n\ndef view_post(request, pk):\n post = get_object_or_404(Post, pk=pk)\n form = CommentForm(request.POST or None)\n recommend_post_form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.publisher = request.user\n comment.post = post\n comment.save()\n return redirect(request.path)\n form.initial['publisher'] = request.user.pk\n return render_to_response('core/post_detail.html',\n {\n 'post': post,\n 'form': form,\n },\n context_instance=RequestContext(request))" }, { "alpha_fraction": 0.7098727822303772, "alphanum_fraction": 0.7192610502243042, "avg_line_length": 34.12765884399414, "blob_id": "13a273654b23b17ba7da705b2084f0767060ba27", "content_id": "6b8a5e2f311f510cf1c810beb937543839ec61ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3311, "license_type": "permissive", "max_line_length": 121, "num_lines": 94, "path": "/core/models.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom ckeditor.fields import RichTextField\nfrom django.conf import settings\n\nCOR_CHOICES = (\n\t('success', 'Verde'),\n\t('info', 'Azul'),\n\t('default', 'Cinza'),\n\t('danger', 'Vermelho'),\n\t('warning', 'Amarelo'),\n)\n\nclass Category(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'Categoria'\n\t\tverbose_name_plural = 'Categorias'\n\n\tname = models.CharField(max_length=255)\n\tcolor = models.CharField(max_length=15, choices=COR_CHOICES)\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\tdef percent_representation(self):\n\t\ttotal = float(Post.objects.all().count())\n\t\tnum = float(self.posts.count())\n\t\tif total == 0.0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn ((num/total) * 100)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('category-detail', kwargs={'slug': self.name})\n\nclass Tag(models.Model):\n\tname = models.TextField(verbose_name='Nome', max_length=50)\n\tcolor = models.CharField(max_length=15, choices=COR_CHOICES)\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n\tdef percent_representation(self):\n\t\ttotal = float(Post.objects.all().count())\n\t\tnum = float(self.posts.count())\n\t\tif total == 0.0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn ((num/total) * 100)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('tag-detail', kwargs={'slug': self.name})\n\nclass Post(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'Artigo'\n\t\tverbose_name_plural = 'Artigos'\n\n\ttitle = models.CharField(max_length=100, verbose_name='título')\n\tcontent = RichTextField(verbose_name='conteúdo')\n\tpublished_at = models.DateTimeField(verbose_name='publicação', auto_now_add=True)\n\tcategory = models.ForeignKey(Category, related_name=\"posts\", related_query_name=\"post\")\n\tpublisher = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=\"posts\", related_query_name=\"post\")\n\ttags = models.ManyToManyField(Tag, related_name=\"posts\", related_query_name=\"post\")\n\trecommends = models.ManyToManyField(User, related_name='posts_recommended', related_query_name='post_recommended')\n\n\tdef __unicode__(self):\n\t\treturn \"#{} - {} by {}\".format(self.pk, self.title, self.publisher)\n\n\tdef resume(self):\n\t\treturn \" \".join(self.content.split()[:60]) + '...'\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('post-detail', kwargs={'pk': self.pk})\n\nclass Comment(models.Model):\n\tclass Meta:\n\t\tverbose_name = 'Comentário'\n\t\tverbose_name_plural = 'Comentários'\n\n\ttext = models.TextField(verbose_name='deixe um comentário')\n\tapproved = models.BooleanField(default=False, verbose_name='aprovado')\n\tpublisher = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='leitor')\n\tpublished_at = models.DateTimeField(verbose_name='publicação', auto_now_add=True)\n\tpost = models.ForeignKey(Post, verbose_name='post', related_name=\"comments\", related_query_name=\"coment\")\n\trecommends = models.ManyToManyField(User, related_name='comments_recommended', related_query_name='comment_recommended')\n\tlikes = models.ManyToManyField(User, related_name='comments_liked', related_query_name='comment_liked')\n\tunlikes = models.ManyToManyField(User, related_name='comments_unliked', related_query_name='comment_unliked')\n\n\tdef __unicode__(self):\n\t\treturn '{}... by {} em {}'.format(self.texto[:15], self.owner, self.publicacao)\n" }, { "alpha_fraction": 0.6387283205986023, "alphanum_fraction": 0.6387283205986023, "avg_line_length": 31.40625, "blob_id": "01613749054fb419bdb09bbe74ae1bbe35db21a7", "content_id": "9391b32d738543a741a0696ad1405f5fc7691004", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1038, "license_type": "permissive", "max_line_length": 79, "num_lines": 32, "path": "/static/scripts.js", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "$( document ).ready(function() {\n $('[data-toggle=\"tooltip\"]').tooltip();\n\n $('i[data-toggle=\"collapse\"]').click(function (e) \n { $(this).toggleClass('fa-plus fa-minus'); \n });\n\n $('i[data-animation]').hover(\n function(){ $(this).addClass('animated ' + $(this).data('animation')) }, \n function(){ $(this).removeClass('animated ' + $(this).data('animation')) }\n );\n});\n\nfunction recommend_comment(data){\n $('#recommend_comment_'+data.pk).toggleClass('fa-star fa-star-o');\n $('#total_recommends_comment_'+data.pk).html(data.total);\n}\n\nfunction recommend_post(data){\n $('#recommend_post_'+data.pk).toggleClass('fa-star fa-star-o');\n $('#total_recommends_post_'+data.pk).html(data.total);\n}\n\nfunction like_comment(data){\n $('#like_comment_'+data.pk).toggleClass('fa-thumbs-up fa-thumbs-o-up');\n $('#total_like_comment_'+data.pk).html(data.total);\n}\n\nfunction unlike_comment(data){\n $('#unlike_comment_'+data.pk).toggleClass('fa-thumbs-down fa-thumbs-o-down');\n $('#total_unlike_comment_'+data.pk).html(data.total);\n}\n\n" }, { "alpha_fraction": 0.5382513403892517, "alphanum_fraction": 0.5437158346176147, "avg_line_length": 40.82857131958008, "blob_id": "10ad405b8df69b44e0badfc619f732c84845292c", "content_id": "b90d6d815f89f0be26c631cf1e34356dc48bbe6e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2928, "license_type": "permissive", "max_line_length": 179, "num_lines": 70, "path": "/core/migrations/0001_initial.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('color', models.CharField(max_length=15, choices=[('success', 'Verde'), ('info', 'Azul'), ('default', 'Cinza'), ('danger', 'Vermelho'), ('warning', 'Amarelo')])),\n ],\n options={\n 'verbose_name': 'Categoria',\n 'verbose_name_plural': 'Categorias',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('text', models.TextField(verbose_name='coment\\xe1rio')),\n ('approved', models.BooleanField(default=False, verbose_name='aprovado')),\n ('published_at', models.DateTimeField(auto_now_add=True, verbose_name='publica\\xe7\\xe3o')),\n ],\n options={\n 'verbose_name': 'Coment\\xe1rio',\n 'verbose_name_plural': 'Coment\\xe1rios',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100, verbose_name='t\\xedtulo')),\n ('content', models.TextField(verbose_name='conte\\xfado')),\n ('published_at', models.DateTimeField(verbose_name='publica\\xe7\\xe3o')),\n ('category', models.ForeignKey(related_query_name='post', related_name='posts', to='core.Category')),\n ('publisher', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Artigo',\n 'verbose_name_plural': 'Artigos',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='comment',\n name='post',\n field=models.ForeignKey(related_query_name='coment', related_name='comments', verbose_name='post', to='core.Post'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='comment',\n name='publisher',\n field=models.ForeignKey(verbose_name='leitor', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.5839895009994507, "alphanum_fraction": 0.5905511975288391, "avg_line_length": 27.22222137451172, "blob_id": "71bf235a2cf276d5bd631f0b6d291d17781fd35c", "content_id": "851fc0786e453c002c3a3f33cb3e7efa29bb15e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "permissive", "max_line_length": 114, "num_lines": 27, "path": "/core/migrations/0005_auto_20150302_2210.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0004_tag_color'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='publisher',\n field=models.ForeignKey(related_query_name='post', related_name='posts', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='post',\n name='tags',\n field=models.ManyToManyField(related_query_name='post', related_name='posts', to='core.Tag'),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.5596465468406677, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 25.115385055541992, "blob_id": "9794d05b2b0df0a6930ced42c60b558a177322f8", "content_id": "564a8a9cd09900035cfbbec8de1f21f37b12b600", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "permissive", "max_line_length": 91, "num_lines": 26, "path": "/core/migrations/0002_auto_20150228_1957.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='comment',\n name='text',\n field=models.TextField(verbose_name='deixe um coment\\xe1rio'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='post',\n name='published_at',\n field=models.DateTimeField(auto_now_add=True, verbose_name='publica\\xe7\\xe3o'),\n preserve_default=True,\n ),\n ]\n" }, { "alpha_fraction": 0.7970802783966064, "alphanum_fraction": 0.7985401749610901, "avg_line_length": 28.65217399597168, "blob_id": "e85e1c2a4d306f5034bd764a73db2f4a79d88cbd", "content_id": "3e967c7b0aad5e66cbed0f8713169e7fa9b54d93", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "permissive", "max_line_length": 66, "num_lines": 23, "path": "/accounts/admin.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\nfrom django.contrib.auth.models import User\n\nfrom accounts.models import UserProfile\nfrom accounts.models import Social\nfrom accounts.models import SocialNetwork\n\nclass UserSocialInline(admin.TabularInline):\n model = Social\n\nclass UserProfileInline(admin.TabularInline):\n model = UserProfile\n inlines = (UserSocialInline,)\n\nclass UserAdmin(DjangoUserAdmin):\n inlines = (UserProfileInline, UserSocialInline)\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(SocialNetwork)\n\n\n\n" }, { "alpha_fraction": 0.6983606815338135, "alphanum_fraction": 0.7036885023117065, "avg_line_length": 25.813186645507812, "blob_id": "57d7ca95a9d8d6cf8bfc8c5c4925bc5780e84a9b", "content_id": "7affe14fb3460cf87c0ba77d102b4f05acfb8e5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2440, "license_type": "permissive", "max_line_length": 87, "num_lines": 91, "path": "/blog_django_bootstrap_ajax/settings.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSECRET_KEY = '(hwaotjt^y49i_8f7&$vq5mzjw!wyb4=o6whr-s9!qhfs1t%=6'\n\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\n\nINSTALLED_APPS = (\n 'core',\n 'accounts',\n 'django_wysiwyg',\n 'ckeditor',\n 'django.contrib.sites',\n 'pageviews',\n 'dajaxice',\n 'dajax',\n 'ajax_select',\n \n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'pageviews.middleware.PageViewsMiddleware',\n)\n\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'dajaxice.finders.DajaxiceFinder',\n)\n\nROOT_URLCONF = 'blog_django_bootstrap_ajax.urls'\n\nWSGI_APPLICATION = 'blog_django_bootstrap_ajax.wsgi.application'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.request\",\n )\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nLANGUAGE_CODE = 'pt-BR'\nTIME_ZONE = 'America/Recife'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nAUTH_PROFILE_MODULE = 'accounts.UserProfile'\nDJANGO_WYSIWYG_FLAVOR = \"tinymce\"\nCKEDITOR_UPLOAD_PATH = 'uploads/'\nCKEDITOR_JQUERY_URL = 'http://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'\nBLOG_OWNER_USER_NAME='valberto'\nSITE_ID=1\nSTATIC_URL = '/static/'\nSTATIC_ROOT = BASE_DIR + '/static/'\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n" }, { "alpha_fraction": 0.5387324094772339, "alphanum_fraction": 0.5721830725669861, "avg_line_length": 27.399999618530273, "blob_id": "f41ff1013685c59495370ccad2241e9693f5f9a4", "content_id": "543ea7f347bc9ba4564807e8cb68f59faff7ffb3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "permissive", "max_line_length": 189, "num_lines": 20, "path": "/core/migrations/0004_tag_color.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0003_auto_20150301_2019'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tag',\n name='color',\n field=models.CharField(default='success', max_length=15, choices=[('success', 'Verde'), ('info', 'Azul'), ('default', 'Cinza'), ('danger', 'Vermelho'), ('warning', 'Amarelo')]),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5512820482254028, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 38.5, "blob_id": "bc469298465a525235cd2b3b3ec447f45dae98cf", "content_id": "fa83e491c89d16b216361d5242530cdf3684c3be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "permissive", "max_line_length": 49, "num_lines": 2, "path": "/core/utils.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "def join_with_commas(frase):\n\treturn \", \".join(frase[:-1]) + \" e \" + frase[-1]" }, { "alpha_fraction": 0.44694244861602783, "alphanum_fraction": 0.45773380994796753, "avg_line_length": 28.289474487304688, "blob_id": "93730f18d8ac3157cab3d7ba7ccf00c70698fdd1", "content_id": "90425273df1e07c9ab217c3b2e651eb22f151f94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1112, "license_type": "permissive", "max_line_length": 77, "num_lines": 38, "path": "/core/templates/core/post_list.html", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n{% block title %} Meu Blog {% endblock %}\n{% block page_header %}\n <div class=\"row header content-underlay\">\n <div class=\"container\">\n <h1>Meu Blog</h1>\n </div>\n </div>\n{% endblock %}\n{% block content %}\n\t\t<div class=\"row\">\n <div class=\"col-xs-12 col-sm-9\">\n \t\t {% include 'core/partials/_posts.html' %}\n \t\t</div>\n <div class=\"col-xs-12 col-sm-3\">\n <h4><i class=\"fa fa-flag fa-fw\"></i> Categorias</h4>\n <aside>\n <ul class=\"nav nav-pills nav-stacked\">\n {% for category in category_list %}\n <li>\n <a href=\"{{category.get_absolute_url}}\">{{category.name}}</a>\n </li>\n {% endfor %}\n </ul>\n </aside>\n <h4><i class=\"fa fa-tags fa-fw\"></i> Tags</h4>\n <aside>\n <ul class=\"nav nav-pills nav-stacked\">\n {% for tag in tag_list %}\n <li>\n <a href=\"{{tag.get_absolute_url}}\">{{tag.name}}</a>\n </li>\n {% endfor %}\n </ul>\n </aside>\n </div>\n </div>\n{% endblock %}" }, { "alpha_fraction": 0.6931818127632141, "alphanum_fraction": 0.7045454382896423, "avg_line_length": 28, "blob_id": "68b53efb6af4fb96fce06a7a3383e923b157c8c3", "content_id": "e0073e467e32e56c0ff7c06ceca9602dd34959c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "permissive", "max_line_length": 39, "num_lines": 3, "path": "/accounts/forms.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nfrom django import forms\n\n" }, { "alpha_fraction": 0.6917539238929749, "alphanum_fraction": 0.6924083828926086, "avg_line_length": 33.727272033691406, "blob_id": "a097dd32f9ce6a2d621e04d09795cfa7eed408a8", "content_id": "ec18bc0d80c3edc177dbc7ebf6b35fdc97fdbf81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1528, "license_type": "permissive", "max_line_length": 87, "num_lines": 44, "path": "/core/forms.py", "repo_name": "valbertovc/blog_django_bootstrap_ajax", "src_encoding": "UTF-8", "text": "# -*- coding:utf8 -*-\nfrom __future__ import unicode_literals\nfrom django import forms\nfrom django.contrib import admin\nfrom django.core.mail import BadHeaderError\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.core.mail import send_mail\nfrom core.models import Post, Comment\nfrom ajax_select.fields import AutoCompleteSelectMultipleField, AutoCompleteSelectField\n\n\nclass PostForm(forms.ModelForm):\n tags = AutoCompleteSelectMultipleField('tags', required=False, help_text=None)\n category = AutoCompleteSelectField('categories', required=False, help_text=None)\n\n class Meta:\n model = Post\n exclude = ['published_at', 'publisher']\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['text']\n\n\nclass ContactForm(forms.Form):\n name = forms.CharField(help_text=\"Informe seu nome completo\")\n email = forms.EmailField(help_text=\"Seu e-mail para contato\")\n topic = forms.CharField(help_text=\"Assunto que deseja tratar\")\n message = forms.CharField(widget=forms.Textarea)\n\n def send_email(self):\n name = self.cleaned_data.get('name', '')\n subject = self.cleaned_data.get('topic', '')\n message = self.cleaned_data.get('message', '')\n from_email = self.cleaned_data.get('email', '')\n\n if subject and message and from_email:\n try:\n print \"email enviado\"\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n" } ]
27
ahawker/krustofsky
https://github.com/ahawker/krustofsky
89c8647fa949a09c3a9de524afc4f111fd8f8659
17f684258a9e206a167d096514c0c524a216396c
789331dac5f81138b7cf094b35ffe4aada7821f7
refs/heads/master
2020-04-17T10:08:10.899958
2019-01-19T00:20:04
2019-01-19T00:20:04
166,488,934
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5844370722770691, "alphanum_fraction": 0.5877483487129211, "avg_line_length": 22.532466888427734, "blob_id": "72ecea0c838974ec12c9044314120e2ee9a6f6da", "content_id": "7993a3713a421ce8450ca09d2c7748dd59920299", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "permissive", "max_line_length": 92, "num_lines": 77, "path": "/import.py", "repo_name": "ahawker/krustofsky", "src_encoding": "UTF-8", "text": "\"\"\"\n import.py\n ~~~~~~~~~\n\n Run this script to convert social security popular baby names dataset to SQLite.\n\"\"\"\nimport glob\nimport io\nimport os\nimport sqlite3\nimport sys\n\n\nSCHEMA = \"\"\"\nCREATE TABLE IF NOT EXISTS names (\n year integer,\n name text,\n sex text,\n occurrences integer\n);\n\nCREATE INDEX IF NOT EXISTS names_year_idx ON names (year);\nCREATE INDEX IF NOT EXISTS names_name_idx ON names (name);\nCREATE INDEX IF NOT EXISTS names_sex_idx ON names (sex);\nCREATE INDEX IF NOT EXISTS names_occurrences_idx ON names (occurrences);\n\"\"\"\n\n\nINSERT = \"\"\"\nINSERT OR IGNORE INTO names (\n year,\n name,\n sex,\n occurrences\n) VALUES (\n :year,\n :name,\n :sex,\n :occurrences\n);\n\"\"\"\n\n\ndef data_generator():\n \"\"\"Generator function that yields dicts for each line in each data file\"\"\"\n for path in glob.glob('data/*.txt'):\n with io.open(path, 'r') as f:\n print('Processing file {}'.format(path))\n year = os.path.splitext(os.path.basename(path))[0].strip('yob')\n for line in f:\n line = line.strip()\n name, sex, occurrences = line.split(',')\n yield {\n 'year': int(year.lower()),\n 'name': name.lower(),\n 'sex': sex.lower(),\n 'occurrences': int(occurrences)\n }\n\n\ndef create_db(name):\n \"\"\"Create Sqlite DB using SCHEMA\"\"\"\n db = sqlite3.connect(name, check_same_thread=False, detect_types=sqlite3.PARSE_COLNAMES)\n db.executescript(SCHEMA)\n return db\n\n\ndef main(argv):\n \"\"\"Convert directory of text files to SQLite database\"\"\"\n db = create_db(argv[0])\n db.executemany(INSERT, data_generator())\n db.commit()\n db.close()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n" }, { "alpha_fraction": 0.670487105846405, "alphanum_fraction": 0.7010506391525269, "avg_line_length": 17.05172348022461, "blob_id": "4d37934e863f4dec5e8580b6b4b5736f85b8be3f", "content_id": "97d5ace62b405e6ed71e3a5daaa2358e5ddf06af", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1047, "license_type": "permissive", "max_line_length": 81, "num_lines": 58, "path": "/README.md", "repo_name": "ahawker/krustofsky", "src_encoding": "UTF-8", "text": "# krustofsky\n\nConvert the social security popular baby names dataset to SQLite for fun queries.\n\n## Status\n\nHacked together in 10m. Use at your own risk!\n\n## Dataset\n\nhttps://www.ssa.gov/oact/babynames/limits.html\n\n## Usage\n\n**Download the dataset**\n\n```bash\n$ wget -O names.zip https://www.ssa.gov/oact/babynames/names.zip\n$ unzip names.zip -d data/\n```\n\n**Creating the database**\n\n```bash\n$ git clone [email protected]:ahawker/krustofsky.git\n$ cd krustofsky\n$ python import.py names.db\n```\n\n**Running queries**\n\n```bash\n$ sqlite3 names.db\nSQLite version 3.16.0 2016-11-04 19:09:39\nEnter \".help\" for usage hints.\nsqlite> select sum(occurrences) from names;\nsum(occurrences)\n----------------\n348120517\n```\n\n```bash\nsqlite> .schema\nCREATE TABLE names (\n year integer,\n name text,\n sex text,\n occurrences integer\n);\nCREATE INDEX names_year_idx ON names (year);\nCREATE INDEX names_name_idx ON names (name);\nCREATE INDEX names_sex_idx ON names (sex);\nCREATE INDEX names_occurrences_idx ON names (occurrences);\n```\n\n## License\n\n[Apache 2.0](LICENSE)\n" } ]
2
techgnosis/volca_beats_remap
https://github.com/techgnosis/volca_beats_remap
fff557456056a2f1ad100c26f904a215af5cdc08
7f9ae88ee19cb2972c627a004f6495bd42909eeb
92f722937fd9ea37c51b582857fba9c8e4020cf2
refs/heads/master
2020-05-29T08:48:55.686277
2016-10-02T20:41:31
2016-10-02T20:41:31
69,778,227
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4719827473163605, "alphanum_fraction": 0.5495689511299133, "avg_line_length": 19.173913955688477, "blob_id": "5f5a84a2ccfe5d2f7db193798641f545a9f76d78", "content_id": "ea0915f77c6239b3422c3399fb64a9fac0fe300d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 52, "num_lines": 46, "path": "/remapper.py", "repo_name": "techgnosis/volca_beats_remap", "src_encoding": "UTF-8", "text": "import mido\n\n# Volca Beats has ridiculous note mappings\n# 36 - C2 - Kick\n# 38 - D2 - Snare\n# 43 - G2 - Lo Tom\n# 50 - D3 - Hi Tom\n# 42 - F#2 - Closed Hat\n# 46 - A#2 - Open Hat\n# 39 - D#2 - Clap\n# 75 - D#5 - Claves\n# 67 - G4 - Agogo\n# 49 - C#3 - Crash\n\n\nnote_mapping = {\n 48 : 36,\n 49 : 38,\n 50 : 43,\n 51 : 50,\n 52 : 42,\n 53 : 46,\n 54 : 39,\n 55 : 75,\n 56 : 67,\n 57 : 49\n}\n\n\nmido.set_backend('mido.backends.rtmidi')\n\ninport = mido.open_input('RemapInput', virtual=True)\n\noutputs = mido.get_output_names()\num_one = next(x for x in outputs if 'UM-ONE' in x)\n\noutport = mido.open_output(um_one, virtual=False)\n\nfor msg in inport:\n if msg.type in ['note_on','note_off']:\n # mido starts MIDI channels at 0\n if msg.channel == 1:\n if msg.note in note_mapping:\n new_note = note_mapping[msg.note]\n msg.note = new_note\n outport.send(msg)\n" } ]
1
liuchao012/myPythonWeb
https://github.com/liuchao012/myPythonWeb
10ace93fdcef7f5b29c23f129314a89de020f007
f8d90bc0b14a935f37a13d112173b73d3da8f303
77ac7092c651a4836510bad3ed20300645d4c081
refs/heads/master
2021-07-12T05:25:00.060872
2020-06-01T08:05:47
2020-06-01T08:05:47
138,588,485
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "0f1dd8c04e67456f8d60f0d836e8aa16c5069a1a", "content_id": "72e407e2886933996be6ffb6ed15bbac905af921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/listsss/apps.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ListsssConfig(AppConfig):\n name = 'listsss'\n" }, { "alpha_fraction": 0.6148350834846497, "alphanum_fraction": 0.6201550364494324, "avg_line_length": 39.85714340209961, "blob_id": "ba1ba0df6f27f57d02a7506e610ee1e2ddaea2c6", "content_id": "19f5e74d291a20dd8be9d3e592b867518476077f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6827, "license_type": "no_license", "max_line_length": 87, "num_lines": 161, "path": "/test/listsss/tests_views.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom django.template.loader import render_to_string\nfrom django.utils.html import escape\nfrom listsss.models import Item, List\nfrom listsss.views import home_page\nimport unittest\n\n\n# Create your tests here.\nclass HomePageTest(TestCase):\n def test_root_url_resolves_to_home_page_view(self):\n print(\"第x个测试通过了\")\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n def test_home_page_return_correct_html(self):\n request = HttpRequest()\n resp = home_page(request)\n\n # 使用render_to_string ,django自带函数 生成string字符串,和渲染获取到的字符串对比\n #### 注释:这个没有办法解决,两次生成得tocken值是不相同的,所以先注释掉这个字段对应的断言\n expected_html = render_to_string('listsss/home.html', request=request)\n\n # .decode()将字符串转换成unicode\n # self.assertEqual(resp.content.decode(), expected_html)\n\n # self.assertTrue(resp.content.startswith(b'<html>'))\n self.assertIn(b\"<title>To-Do lists</title>\", resp.content)\n self.assertTrue(resp.content.endswith(b'</html>'))\n\n # def test_home_page_only_saves_items_when_necessary(self):\n # request = HttpRequest()\n # home_page(request)\n # self.assertEqual(Item.objects.count(), 0)\n\n # 中途这个用例不要了\n # def test_home_page_displays_all_list_items(self):\n # Item.objects.create(text='itemey 1')\n # Item.objects.create(text='itemey 2')\n #\n # req = HttpRequest()\n # rep = home_page(req)\n #\n # self.assertIn('itemey 1', rep.content.decode())\n # self.assertIn('itemey 2', rep.content.decode())\n\n\nclass ListViewTest(TestCase):\n # def test_home_page_displays_all_list_items(self):\n def test_home_page_displays_only_items_for_that_list(self):\n # list_ = List.objects.create()\n # Item.objects.create(text='itemey 1', list=list_)\n # Item.objects.create(text='itemey 2', list=list_)\n\n correct_list = List.objects.create()\n Item.objects.create(text='itemey 1', list=correct_list)\n Item.objects.create(text='itemey 2', list=correct_list)\n\n other_list = List.objects.create()\n Item.objects.create(text='other itemey 1', list=other_list)\n Item.objects.create(text='other itemey 2', list=other_list)\n\n # resp = self.client.get('/list/the-only-list-in-the-world/')\n resp = self.client.get('/list/%d/' % (correct_list.id,))\n\n self.assertContains(resp, 'itemey 1')\n self.assertContains(resp, 'itemey 2')\n self.assertNotContains(resp, 'other itemey 1')\n self.assertNotContains(resp, 'other itemey 2')\n\n def test_uses_list_template(self):\n # resp = self.client.get('/list/the-only-list-in-the-world/')\n list_ = List.objects.create()\n resp = self.client.get('/list/%d/' % (list_.id,))\n self.assertTemplateUsed(resp, 'listsss/list.html')\n\n def test_passes_correct_list_to_template(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n resp = self.client.get('/list/%d/' % (correct_list.id,))\n self.assertEqual(resp.context['list'], correct_list)\n\n def test_can_save_a_POST_to_an_existing_list(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n self.client.post('/list/%d/' % (correct_list.id,),\n data={'item_text': 'A new item for an existiong list'})\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.first()\n self.assertEqual(new_item.text, 'A new item for an existiong list')\n self.assertEqual(new_item.list, correct_list)\n\n def test_POST_redirects_to_list_view(self):\n other_list = List.objects.create()\n correct_list = List.objects.create()\n resp = self.client.post('/list/%d/' % (correct_list.id,),\n data={'item_text': 'A new item for an existiong list'})\n self.assertRedirects(resp, '/list/%d/' % (correct_list.id,))\n\n def test_validation_errors_end_up_on_lists_page(self):\n list_ = List.objects.create()\n resp = self.client.post('/list/%d/'%(list_.id,), data={\"item_text\":''})\n self.assertEqual(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'listsss/list.html')\n ex_error=escape('You cant have an empty list item')\n self.assertContains(resp, ex_error)\n\nclass NewListTest(TestCase):\n def test_saving_a_POST_request(self):\n self.client.post('/list/new', data={'item_text': 'A new list item'})\n self.assertEqual(Item.objects.count(), 1)\n new_item = Item.objects.first()\n self.assertEqual(new_item.text, 'A new list item')\n # requ = HttpRequest()\n # requ.method = 'POST'\n # requ.POST['item_text'] = 'A new list item'\n #\n # rep = home_page(requ)\n #\n # self.assertEqual(Item.objects.count(), 1)\n # new_item = Item.objects.first()\n # self.assertEqual(new_item.text, 'A new list item')\n #\n # # 下面这部分单独拿出去做一个 单独的单元测试\n # # self.assertIn('A new list item', rep.content.decode())\n # # post 请求后页面重定向\n # # self.assertEqual(rep.status_code, 302)\n # # self.assertEqual(rep['location'], '/')\n\n def test_redirects_after_POST(self):\n rep = self.client.post('/list/new', data={'item_text': 'A new list item'})\n # self.assertEqual(rep.status_code, 302)\n\n new_list = List.objects.first()\n self.assertRedirects(rep, '/list/%d/' % (new_list.id,))\n # django 的检查项\n # self.assertRedirects(rep, '/list/the-only-list-in-the-world/')\n # 这段重新修改\n\n # requ = HttpRequest()\n # requ.method = 'POST'\n # requ.POST['item_text'] = 'A new list item'\n #\n # rep = home_page(requ)\n # self.assertEqual(rep.status_code, 302)\n # self.assertEqual(rep['location'], '/list/the-only-list-in-the-world/')\n\n def test_validation_error_are_sent_back_to_home_page_template(self):\n resp = self.client.post('/list/new', data={'item_text': ''})\n self.assertEqual(resp.status_code, 200)\n self.assertTemplateUsed(resp, 'listsss/home.html')\n ex_error = escape(\"You cant have an empty list item\")\n print(resp.content.decode())\n self.assertContains(resp, ex_error)\n\n def test_invalid_list_items_arent_saved(self):\n self.client.post('/list/new', data={\"item_text\": ''})\n self.assertEqual(List.objects.count(), 0)\n self.assertEqual(Item.objects.count(), 0)\n\n" }, { "alpha_fraction": 0.6554192304611206, "alphanum_fraction": 0.6697341799736023, "avg_line_length": 25.405405044555664, "blob_id": "9559f3b004050ae674aafb4ce148885e9520c87c", "content_id": "e272e6da917ead8de63f794e422c58518ea28b70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1018, "license_type": "no_license", "max_line_length": 71, "num_lines": 37, "path": "/functional_tests/base.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/6/25 20:15\n# @Author : Mat\n# @Email : [email protected]\n# @File : functional_tests1.py\n# @Software: PyCharm\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom django.test import LiveServerTestCase\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nimport unittest\nfrom unittest import skip\n\n\nclass FunctionalTest(StaticLiveServerTestCase):\n\n #不知道为什么加上下面两个方法之后就报错了\n # @classmethod\n # def setUpClass(cls):\n # pass\n #\n # @classmethod\n # def tearDownClass(cls):\n # pass\n\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(3)\n\n def tearDown(self):\n self.driver.quit()\n\n def check_for_row_in_list_table(self, row_text):\n table = self.driver.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text, [row.text for row in rows])\n\n" }, { "alpha_fraction": 0.6439725160598755, "alphanum_fraction": 0.6458463668823242, "avg_line_length": 33.0638313293457, "blob_id": "768aa3379d37a2432f3a7d4646912aade4bd2fe4", "content_id": "4fce98f99d07523d88ac6f70fe238a7eabf3c1c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1601, "license_type": "no_license", "max_line_length": 76, "num_lines": 47, "path": "/test/listsss/tests_models.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom django.urls import resolve\nfrom django.http import HttpRequest\nfrom django.template.loader import render_to_string\nfrom listsss.models import Item, List\nfrom listsss.views import home_page\nimport unittest\nfrom django.core.exceptions import ValidationError\n\nclass ListAndItemModelsTest(TestCase):\n def test_saving_and_retrieving_items(self):\n list_ = List()\n list_.save()\n\n first_item = Item()\n first_item.text = 'The first (ever) list item'\n first_item.list = list_\n first_item.save()\n\n second_item = Item()\n second_item.text = 'Item the second'\n second_item.list = list_\n second_item.save()\n\n saved_liat = List.objects.first()\n self.assertEqual(saved_liat, list_)\n\n saved_items = Item.objects.all()\n self.assertEqual(saved_items.count(), 2)\n\n first_save_item = saved_items[0]\n second_save_item = saved_items[1]\n self.assertEqual(first_save_item.text, 'The first (ever) list item')\n self.assertEqual(first_save_item.list, list_)\n self.assertEqual(second_save_item.text, 'Item the second')\n self.assertEqual(second_save_item.list, list_)\n\n def test_cannot_save_empty_list_items(self):\n list_=List.objects.create()\n item = Item(list= list_, text='')\n with self.assertRaises(ValidationError):\n item.save()\n item.full_clean()\n\n def test_get_absolute_url(self):\n list_ = List.objects.create()\n self.assertEqual(list_.get_absolute_url(), '/list/%d/'%(list_.id,))\n" }, { "alpha_fraction": 0.6235294342041016, "alphanum_fraction": 0.6235294342041016, "avg_line_length": 32.75640869140625, "blob_id": "92931faf6215b889cde9039ab094148495a47337", "content_id": "a99dc2212162aa217d5a366a9e1854716011ac6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2955, "license_type": "no_license", "max_line_length": 101, "num_lines": 78, "path": "/listsss/views.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect # redirect是python的重定向方法\nfrom django.http import HttpResponse\nfrom listsss.models import Item, List\nfrom django.core.exceptions import ValidationError\n\n\n# Create your views here.\ndef home_page(request):\n # return HttpResponse(\"<html><title>To-Do lists</title></html>\")\n # if (request.method=='POST'):\n # return HttpResponse(request.POST['item_text'])\n\n # 新加了页面这里就可以删除了\n # if (request.method == 'POST'):\n # new_item_text = request.POST['item_text']\n # Item.objects.create(text=new_item_text)\n # return redirect('/list/the-only-list-in-the-world/')\n\n ##第二种方法\n # else:\n # new_item_text = ''\n # return render(request, 'listsss/home.html', {'new_item_text':new_item_text})\n\n ##第一种方法\n # item = Item()\n # item.text = request.POST.get('item_text', '')\n # item.save()\n # return render(request, 'listsss/home.html', {'new_item_text':request.POST.get('item_text','')})\n\n # 这里首页不用展示相关的数据了\n # items_list = Item.objects.all()\n # return render(request, 'listsss/home.html', {'items_list': items_list})\n return render(request, 'listsss/home.html')\n\n\ndef view_list(request, list_id):\n error = None\n list_ = List.objects.get(id=list_id)\n if request.method == 'POST':\n try:\n item = Item.objects.create(text=request.POST['item_text'], list=list_)\n item.full_clean()\n item.save()\n #简化\n #return redirect('/list/%d/' % (list_.id,))\n return redirect(list_)\n except ValidationError:\n item.delete() # 不知道为什么要加这一步,书里面没有这步骤,书上说抓取到这个错误就不会存到数据库里面了,可还是存进去了\n error = 'You cant have an empty list item'\n return render(request, 'listsss/list.html', {'list': list_, 'error': error})\n\n\ndef new_list(request):\n list_ = List.objects.create()\n item = Item.objects.create(text=request.POST['item_text'], list=list_)\n try:\n item.full_clean()\n item.save()\n except ValidationError:\n list_.delete()\n item.delete() # 不知道为什么要加这一步,书里面没有这步骤,书上说抓取到这个错误就不会存到数据库里面了,可还是存进去了\n error = 'You cant have an empty list item'\n return render(request, 'listsss/home.html', {\"error\": error})\n # 重新定义到有效地址\n # return redirect('/list/the-only-list-in-the-world/')\n # 去除硬编码\n # return redirect('/list/%d/' % (list_.id,))\n return redirect('view_list', list_.id)\n\n\ndef add_item(request, list_id):\n list_ = List.objects.get(id=list_id)\n Item.objects.create(text=request.POST['item_text'], list=list_)\n return redirect('/list/%d/' % (list_.id,))\n\n\nclass home_page_class():\n pass\n\n\n" }, { "alpha_fraction": 0.647794783115387, "alphanum_fraction": 0.6546762585639954, "avg_line_length": 37.51807403564453, "blob_id": "f17ceb2a99f584fdaa6385e2e7693f41132064b8", "content_id": "d1623667f715774edb18a62554ca582d940577a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3633, "license_type": "no_license", "max_line_length": 163, "num_lines": 83, "path": "/functional_tests/test_simple_list_creation.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/6/25 20:15\n# @Author : Mat\n# @Email : [email protected]\n# @File : functional_tests1.py\n# @Software: PyCharm\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom django.test import LiveServerTestCase\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nimport unittest\nfrom unittest import skip\nfrom .base import FunctionalTest\n\n\nclass NewVisitorTest(FunctionalTest):\n\n def test_can_start_a_list_and_retrieve_it_later(self):\n # 类继承LiveServerTestCase 后将不使用实际部署的localhost 地址,使用 django提供的self.live_server_url地址\n # self.driver.get(\"http://localhost:8000\")\n self.driver.get(self.live_server_url)\n\n # 发现页面上显示的 TO-DO 字样\n self.assertIn('To-Do', self.driver.title)\n header_text = self.driver.find_element_by_tag_name('h1').text\n self.assertIn('To-Do', header_text)\n\n # 应用邀请输入一个代办事项\n inputbox = self.driver.find_element_by_id('id_new_item')\n self.assertEqual(inputbox.get_attribute('placeholder'), 'Enter a to-do item')\n\n # 在输入框中输入购买孔雀羽毛\n inputbox.send_keys('Buy peacock feathers')\n\n # 点击回车后页面更新\n # 代办事项中显示 ‘1:Buy peacock feathers’\n inputbox.send_keys(Keys.ENTER)\n\n edith_list_url = self.driver.current_url\n self.assertRegex(edith_list_url, '/list/.+?')\n\n table = self.driver.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n # self.assertTrue(any(row.text == '1:Buy peacock feathers' for row in rows), 'New to-do item did not appear in table - - its text was:\\n%s' % (table.text))\n\n # 页面又显示了一个文本框,可以输入其他代办事项\n # 输入‘Use peacock feathers to make a fly’\n inputbox = self.driver.find_element_by_id('id_new_item')\n inputbox.send_keys('Use peacock feathers to make a fly')\n inputbox.send_keys(Keys.ENTER)\n table = self.driver.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn('1:Buy peacock feathers', [row.text for row in rows])\n self.assertIn('2:Use peacock feathers to make a fly', [row.text for row in rows])\n\n ##我们需要新打开一个浏览器,并且不让cookice相互干扰\n # 让录入的清单不会被别人看到\n self.driver.quit()\n\n # 其他人访问页面看不到刚才录入的清单\n self.driver = webdriver.Firefox()\n self.driver.get(self.live_server_url)\n page_text = self.driver.find_element_by_tag_name('body').text\n self.assertNotIn('Buy peacock feathers', page_text)\n self.assertNotIn('make a fly', page_text)\n\n # 他输入了新的代办事项,创建了一个新的代办清单\n inputbox = self.driver.find_element_by_id('id_new_item')\n inputbox.send_keys('Buy milk')\n inputbox.send_keys(Keys.ENTER)\n\n # 他获得了一个属于他自己的url\n francis_list_url = self.driver.current_url\n self.assertRegex(edith_list_url, '/list/.+?')\n self.assertNotEquals(francis_list_url, edith_list_url)\n\n # 这个页面还是没有其他人的清单\n # 但是这个页面包含他自己的清单\n page_text = self.driver.find_element_by_tag_name('body').text\n self.assertNotIn('Buy peacock feathers', page_text)\n self.assertIn('Buy milk', page_text)\n # self.fail('Finisth the test')\n" }, { "alpha_fraction": 0.6660305261611938, "alphanum_fraction": 0.6965649127960205, "avg_line_length": 33.93333435058594, "blob_id": "3794e1d65e28f5265b0fb32f8f464b6415625784", "content_id": "eb9ead9243b2315440a86f511a5d96c13841ad98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1098, "license_type": "no_license", "max_line_length": 98, "num_lines": 30, "path": "/functional_tests/tests_layout_and_styling.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/6/25 20:15\n# @Author : Mat\n# @Email : [email protected]\n# @File : functional_tests1.py\n# @Software: PyCharm\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom django.test import LiveServerTestCase\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nimport unittest\nfrom unittest import skip\nfrom .base import FunctionalTest\n\nclass LayoutAndStylingTest(FunctionalTest):\n\n def test_layout_and_styling(self):\n self.driver.get(self.live_server_url)\n self.driver.set_window_size(1024, 768)\n\n # 查看页面元素居中\n inputbox = self.driver.find_element_by_id('id_new_item')\n self.assertAlmostEqual(inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10)\n\n # 保存成功后,清单列表的输入框也居中\n inputbox.send_keys('testing')\n inputbox.send_keys(Keys.ENTER)\n inputbox = self.driver.find_element_by_id('id_new_item')\n self.assertAlmostEqual(inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10)\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 13, "blob_id": "6cb425f73b4e9efb0edc510fe2f20566b4037617", "content_id": "7c149f821012e8e0edf9200d49683efc96361f3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/README.md", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "# myPythonWeb\npythonWeb编程实战\n" }, { "alpha_fraction": 0.6683453321456909, "alphanum_fraction": 0.6805755496025085, "avg_line_length": 38.71428680419922, "blob_id": "32d7505d327aacf1da3bbdbf34213b5abd6f92fd", "content_id": "6b16b30d2ac983ef610f9d4b7e0814a9420f8188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "no_license", "max_line_length": 77, "num_lines": 35, "path": "/functional_tests/tests_list_item_validation.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/6/25 20:15\n# @Author : Mat\n# @Email : [email protected]\n# @File : functional_tests1.py\n# @Software: PyCharm\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom django.test import LiveServerTestCase\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nimport unittest\nfrom unittest import skip\nfrom .base import FunctionalTest\n\n\nclass ItemValidationTest(FunctionalTest):\n def test_cannot_add_empty_list_items(self):\n self.driver.get(self.live_server_url)\n self.driver.find_element_by_id('id_new_item').send_keys('\\n')\n error = self.driver.find_element_by_css_selector('.has-error')\n self.assertEqual(error.text, \"You cant have an empty list item\")\n\n self.driver.find_element_by_id('id_new_item').send_keys('Buy milk\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n\n self.driver.find_element_by_id('id_new_item').send_keys('\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n error = self.driver.find_element_by_css_selector('.has-error')\n self.assertEqual(error.text, \"You cant have an empty list item\")\n\n self.driver.find_element_by_id('id_new_item').send_keys('Buy tea\\n')\n self.check_for_row_in_list_table('1:Buy milk')\n self.check_for_row_in_list_table('2:Buy tea')\n self.fail(\"write me!\")\n" }, { "alpha_fraction": 0.6675900220870972, "alphanum_fraction": 0.7008309960365295, "avg_line_length": 18.052631378173828, "blob_id": "53ff034422bb2eeeac750989f65bec067c059a68", "content_id": "bc02c538f65bdd011f9bddcf1fadeaaad83cbfd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 519, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/functional_tests/__init__.py", "repo_name": "liuchao012/myPythonWeb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/6/28 17:06\n# @Author : Mat\n# @Email : [email protected]\n# @File : __init__.py.py\n# @Software: PyCharm\n\n\n'''\n\nfunctional_tests,中的文件需要已tests开头系统命令才能读取到测试用例并执行测试\n\n测试执行命令python manage.py test functional_tests,来完成功能测试\n\n如果执行 python manage.py test 那么django 将会执行 功能测试和单元测试\n\n如果想只运行单元测试则需要执行固定的app ,python manage.py test listsss\n\n'''" } ]
10
FernandoBontorin/spark-optimization-features
https://github.com/FernandoBontorin/spark-optimization-features
eb997a38c678b34e804503c4a6959ded20a9cc00
1937282247618c10e866705fe5dc01a74e5ec521
fc98618a952208a0f978b739772f3d86773ef476
refs/heads/master
2023-05-31T11:12:27.312341
2021-06-24T22:24:12
2021-06-24T22:24:12
379,179,307
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.5920000076293945, "alphanum_fraction": 0.6364444494247437, "avg_line_length": 26.790122985839844, "blob_id": "c730c77fc8947ae51e3daa895a9774316f872aeb", "content_id": "9293472cd85a380008175d1ea790222b15e80b6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 2250, "license_type": "no_license", "max_line_length": 91, "num_lines": 81, "path": "/docker-compose.yaml", "repo_name": "FernandoBontorin/spark-optimization-features", "src_encoding": "UTF-8", "text": "version: '3'\nx-airflow-common:\n &airflow-common\n image: serasa-airflow-pyspark:2.0.2\n environment:\n &airflow-common-env\n AIRFLOW__CORE__EXECUTOR: LocalExecutor\n AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow\n AIRFLOW__CORE__FERNET_KEY: ''\n AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'true'\n AIRFLOW__CORE__LOAD_EXAMPLES: 'false'\n AIRFLOW__API__AUTH_BACKEND: 'airflow.api.auth.backend.basic_auth'\n volumes:\n - ./airflow/dags:/opt/airflow/dags\n - ./airflow/logs:/opt/airflow/logs\n - ./target/scala-2.11:/tmp/applications\n - ./data:/tmp/data\n user: \"${AIRFLOW_UID:-50000}:${AIRFLOW_GID:-50000}\"\n depends_on:\n - postgres\n\nservices:\n spark-master:\n image: bde2020/spark-master:2.4.5-hadoop2.7\n container_name: spark-master\n ports:\n - \"8080:8080\"\n - \"7077:7077\"\n environment:\n - INIT_DAEMON_STEP=setup_spark\n volumes:\n - ./data:/tmp/data\n - ./target/scala-2.11:/tmp/applications\n spark-worker-1:\n image: bde2020/spark-worker:2.4.5-hadoop2.7\n container_name: spark-worker-1\n depends_on:\n - spark-master\n ports:\n - \"8081:8081\"\n - \"62460:4040\"\n environment:\n - \"SPARK_MASTER=spark://spark-master:7077\"\n volumes:\n - ./data:/tmp/data\n - ./target/scala-2.11:/tmp/applications\n airflow-webserver:\n <<: *airflow-common\n container_name: airflow-webserver\n command: webserver\n deploy:\n resources:\n limits:\n cpus: '1'\n memory: 1Gb\n ports:\n - \"80:8080\"\n airflow-scheduler:\n <<: *airflow-common\n container_name: airflow-scheduler\n ports:\n - \"62461:4040\"\n command: scheduler\n restart: always\n airflow-init:\n <<: *airflow-common\n container_name: airflow-init\n command: version\n environment:\n <<: *airflow-common-env\n _AIRFLOW_DB_UPGRADE: 'true'\n _AIRFLOW_WWW_USER_CREATE: 'true'\n _AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow}\n _AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow}\n postgres:\n image: postgres:13\n container_name: postgres\n environment:\n POSTGRES_USER: airflow\n POSTGRES_PASSWORD: airflow\n POSTGRES_DB: airflow" }, { "alpha_fraction": 0.7402313351631165, "alphanum_fraction": 0.7602375745773315, "avg_line_length": 30.058252334594727, "blob_id": "faf2c0261238c5a0f666b2d3024aa43bbd9071fb", "content_id": "05e145face02fe2cc06db43f78462741af5e0c45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3199, "license_type": "no_license", "max_line_length": 158, "num_lines": 103, "path": "/README.md", "repo_name": "FernandoBontorin/spark-optimization-features", "src_encoding": "UTF-8", "text": "### About\n\n#### Description\n\nThis spark-optimization-features repo is a Spark Application developed in Scala to attribute creation\n\n#### Target\n\nIntroduce some data processing to be able to practice Spark resources optimize at Serasa Experian DataLab meetup\n\n### Links\n\n#### Kaggle Dataset\n\nhttps://www.kaggle.com/kartik2112/fraud-detection\n\n#### Sparklens\n\nProject [repository](https://github.com/qubole/sparklens) [package](https://repos.spark-packages.org/qubole/sparklens/0.3.2-s_2.11/sparklens-0.3.2-s_2.11.jar)\n\n### Application\n\n#### commands to test/build\n\nassembly create a fat-jar with all necessary dependencies\n\n```bash\nsbt clean\nsbt test\nsbt assembly\n```\n\n#### run\n\n```bash\nspark-submit --master yarn --conf spark.default.parallelism=50 \\\n --conf spark.sql.shuffle.partitions=50 \\\n --conf spark.sql.parquet.compression.codec=snappy \\\n --conf spark.dynamicAllocation.enabled=false \\\n --conf spark.network.timeout=360000 \\\n --conf spark.shuffle.service.enabled=false \\\n --conf spark.sql.autoBroadcastJoinThreshold=-1 \\\n --conf spark.port.maxRetries=100 \\\n --conf spark.yarn.maxAppAttempts=1 \\\n --conf spark.executor.extraJavaOptions=-XX:+UseG1GC \\\n --conf spark.extraListeners=com.qubole.sparklens.QuboleJobListener \\\n --conf spark.sparklens.data.dir=tmp/sparklens/ \\\n --jars https://repos.spark-packages.org/qubole/sparklens/0.3.2-s_2.11/sparklens-0.3.2-s_2.11.jar \\\n --num-executors 1 \\\n --executor-cores 1 \\\n --executor-memory 512M \\\n --driver-memory 1G \\\n --name \"spark-optimization-features\" \\\n --class com.github.fernandobontorin.jobs.FraudBookProcessor \\\n --queue root.default \\\n --deploy-mode cluster \\\n target/scala-2.11/spark-optimization-features-assembly-0.1.0-SNAPSHOT.jar \\\n --dataframes data/fraudTest.csv,data/fraudTrain.csv --output data/fraud_book_features\n```\n\n### Environment\nUp Applications\n```bash\ndocker build -f airflow/Dockerfile -t serasa-airflow-pyspark:2.0.2 .\ndocker-compose up\n```\nSet Spark-master connection\n```yaml\nconnId: spark.default\nhost: spark://spark-master\nport: 7077\n```\nOptional\n```yaml\nextra: {\"deploy-mode\":\"cluster\"}\n```\n\n### Optimization\n\n#### partitions\none of the biggest villains of distributed data processing is the shuffle\n read/write, causes cores to get bottleneck waiting IO operations,\n the more you avoid IO during the process the better the application performs. \nIn this case is important to pay attention to small files/partitions and\nlarger than HDFS block size\n\n#### GC Pressure\nControl memory allocation and core ratio, if the ratio of GB/core is wrong, the\ncomputation stages, waste much time on garbage collector instead of processing data,\nputting too much pressure on GC\n\n#### One plan (Code performance)\nIf possible, is very recommended reducing stages as more than possible, \nat data attributes creation, execute a unique final action\n\n#### Parallelize (Code performance)\nOn aggregation operations is very difficult to process in one plan,\n also, is very recommended orient your code to execute all final actions\n in parallel\n\n#### Buckets (Code performance)\nOn join operations, like join 2 datasets using a long primary key is recommended,\ncreate a bucket using part of primary key content, this helps Spark to organize\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7942583560943604, "avg_line_length": 34, "blob_id": "1adf9baf2c82531ceebd11550e292118ab7446ae", "content_id": "13b9a2f4d4da766b7eb08944211f17bd7b5feacf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 209, "license_type": "no_license", "max_line_length": 76, "num_lines": 6, "path": "/airflow/Dockerfile", "repo_name": "FernandoBontorin/spark-optimization-features", "src_encoding": "UTF-8", "text": "FROM apache/airflow:2.0.2\nUSER root\nRUN apt update\nRUN apt install --no-install-recommends openjdk-11-jre-headless -yqq\nUSER airflow\nRUN pip3 install pyspark==2.4.5 apache-airflow-providers-apache-spark==1.0.3" }, { "alpha_fraction": 0.5910846590995789, "alphanum_fraction": 0.6065336465835571, "avg_line_length": 42.13888931274414, "blob_id": "498d156bad89f7683e97a2a097f9c3be857eade9", "content_id": "25559fb33237ee4080fba2f0fc1d702bd6a382aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6214, "license_type": "no_license", "max_line_length": 118, "num_lines": 144, "path": "/airflow/dags/spark_optimization_features.py", "repo_name": "FernandoBontorin/spark-optimization-features", "src_encoding": "UTF-8", "text": "from airflow import DAG\nfrom airflow.operators.dummy import DummyOperator\nfrom airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator\nfrom airflow.utils.dates import days_ago\n\nfraud_features_jar = \"/tmp/applications/spark-optimization-features-assembly-0.1.0-SNAPSHOT.jar\"\nsparklens_jar = \"https://repos.spark-packages.org/qubole/sparklens/0.3.2-s_2.11/sparklens-0.3.2-s_2.11.jar\"\n\nwith DAG(dag_id='spark_optimization_features', default_args={'owner': 'Airflow'}, schedule_interval=None,\n start_date=days_ago(1), tags=['fraud_features_set'], catchup=False, concurrency=1, max_active_runs=1) as dag:\n\n start = DummyOperator(task_id=\"start\")\n\n book_fraud = SparkSubmitOperator(\n task_id=\"book_fraud\",\n conn_id=\"spark.default\",\n name=\"Book Fraud\",\n application=fraud_features_jar,\n conf={\n \"spark.default.parallelism\": 200,\n \"spark.dynamicAllocation.enabled\": \"false\",\n \"spark.network.timeout\": 360000,\n \"spark.shuffle.service.enabled\": \"false\",\n \"spark.sql.autoBroadcastJoinThreshold\": -1,\n \"spark.port.maxRetries\": 10,\n \"spark.yarn.maxAppAttempts\": 1,\n \"spark.executor.extraJavaOptions\": \"-XX:+UseG1GC\",\n \"spark.extraListeners\": \"com.qubole.sparklens.QuboleJobListener\",\n \"spark.sparklens.data.dir\": \"/tmp/data/history/sparklens\"\n },\n jars=sparklens_jar,\n num_executors=1,\n executor_cores=1,\n executor_memory=\"512m\",\n driver_memory=\"1G\",\n java_class=\"com.github.fernandobontorin.jobs.FraudBookProcessor\",\n application_args=[\n \"--dataframes\",\n \"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,\"\n \"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv\",\n \"--output\",\n \"file:///tmp/data/fraud_book_features\"\n ]\n )\n\n book_fraud_optimized = SparkSubmitOperator(\n task_id=\"book_fraud_optimized\",\n conn_id=\"spark.default\",\n name=\"Book Fraud Optimized\",\n application=fraud_features_jar,\n conf={\n \"spark.default.parallelism\": 1,\n \"spark.dynamicAllocation.enabled\": \"false\",\n \"spark.network.timeout\": 360000,\n \"spark.shuffle.service.enabled\": \"false\",\n \"spark.sql.autoBroadcastJoinThreshold\": -1,\n \"spark.port.maxRetries\": 10,\n \"spark.yarn.maxAppAttempts\": 1,\n \"spark.executor.extraJavaOptions\": \"-XX:+UseG1GC\",\n \"spark.extraListeners\": \"com.qubole.sparklens.QuboleJobListener\",\n \"spark.sparklens.data.dir\": \"/tmp/data/history/sparklens\"\n },\n jars=sparklens_jar,\n num_executors=1,\n executor_cores=1,\n executor_memory=\"512m\",\n driver_memory=\"1G\",\n java_class=\"com.github.fernandobontorin.jobs.FraudBookProcessor\",\n application_args=[\n \"--dataframes\",\n \"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,\"\n \"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv\",\n \"--output\",\n \"file:///tmp/data/fraud_book_features\"\n ]\n )\n\n aggregation_fraud = SparkSubmitOperator(\n task_id=\"aggregation_fraud\",\n conn_id=\"spark.default\",\n name=\"Agg Fraud Set\",\n application=fraud_features_jar,\n conf={\n \"spark.default.parallelism\": 200,\n \"spark.dynamicAllocation.enabled\": \"false\",\n \"spark.network.timeout\": 360000,\n \"spark.shuffle.service.enabled\": \"false\",\n \"spark.sql.autoBroadcastJoinThreshold\": -1,\n \"spark.port.maxRetries\": 10,\n \"spark.yarn.maxAppAttempts\": 1,\n \"spark.executor.extraJavaOptions\": \"-XX:+UseG1GC\",\n \"spark.extraListeners\": \"com.qubole.sparklens.QuboleJobListener\",\n \"spark.sparklens.data.dir\": \"/tmp/data/history/sparklens\"\n },\n jars=sparklens_jar,\n num_executors=1,\n executor_cores=1,\n executor_memory=\"512m\",\n driver_memory=\"1G\",\n java_class=\"com.github.fernandobontorin.jobs.AggregationProcessor\",\n application_args=[\n \"--dataframes\",\n \"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,\"\n \"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv\",\n \"--output\",\n \"file:///tmp/data/aggregation_fraud\"\n ]\n )\n\n aggregation_fraud_par = SparkSubmitOperator(\n task_id=\"aggregation_fraud_par\",\n conn_id=\"spark.default\",\n name=\"Agg Fraud Set Par\",\n application=fraud_features_jar,\n conf={\n \"spark.default.parallelism\": 200,\n \"spark.dynamicAllocation.enabled\": \"false\",\n \"spark.network.timeout\": 360000,\n \"spark.shuffle.service.enabled\": \"false\",\n \"spark.sql.autoBroadcastJoinThreshold\": -1,\n \"spark.port.maxRetries\": 10,\n \"spark.yarn.maxAppAttempts\": 1,\n \"spark.executor.extraJavaOptions\": \"-XX:+UseG1GC\",\n \"spark.extraListeners\": \"com.qubole.sparklens.QuboleJobListener\",\n \"spark.sparklens.data.dir\": \"/tmp/data/history/sparklens\"\n },\n jars=sparklens_jar,\n num_executors=1,\n executor_cores=1,\n executor_memory=\"512m\",\n driver_memory=\"1G\",\n java_class=\"com.github.fernandobontorin.jobs.ParAggregationProcessor\",\n application_args=[\n \"--dataframes\",\n \"file:///tmp/data/fraudTest.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,\"\n \"file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv,file:///tmp/data/fraudTrain.csv\",\n \"--output\",\n \"file:///tmp/data/aggregation_fraud_par\"\n ]\n )\n\n end = DummyOperator(task_id=\"end\")\n\n start >> book_fraud >> book_fraud_optimized >> (aggregation_fraud, aggregation_fraud_par) >> end\n\n\n" } ]
4
shiodat/algorithms
https://github.com/shiodat/algorithms
d28f42d45bd4d6869d1b14207ac5a14120621063
81ec28375f8e16c4b72540720ff47b54a737169b
91c69933eef5033e4876344675dd80102530a78a
refs/heads/master
2020-03-23T17:47:07.071777
2018-07-23T10:06:30
2018-07-23T10:06:30
141,875,219
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46701163053512573, "alphanum_fraction": 0.47089263796806335, "avg_line_length": 23.15625, "blob_id": "998adf0a9d2b601735219418d33f691e09e68d73", "content_id": "b09c62bdc190d94e6353658bc21baad86692cc5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "no_license", "max_line_length": 54, "num_lines": 32, "path": "/src/union_find.py", "repo_name": "shiodat/algorithms", "src_encoding": "UTF-8", "text": "class UnionFind(object):\n\n def __init__(self, N):\n self.N = N\n self.parent = list(range(self.N))\n self.rank = [0] * self.N\n self.size = [1] * self.N\n\n def find(self, x):\n if self.parent[x] != x:\n self.parent[x] = self.find(self.parent[x])\n return self.parent[x]\n\n def unite(self, x, y):\n x = self.find(x)\n y = self.find(y)\n if x == y:\n return\n\n if self.rank[x] < self.rank[y]:\n x, y = y, x\n\n self.size[x] += self.size[y]\n self.parent[y] = x\n if self.rank[x] == self.rank[y]:\n self.rank[x] += 1\n\n def same(self, x, y):\n return self.find(x) == self.find(y)\n\n def count(self, x):\n return self.size[self.find(x)]\n" }, { "alpha_fraction": 0.48407337069511414, "alphanum_fraction": 0.5009652376174927, "avg_line_length": 19.323530197143555, "blob_id": "338c0b610389c2d0707b0a5f650d698b8e9dd29c", "content_id": "9890f8cc35144f56a189ab872bc71d3cb74d4f8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2072, "license_type": "no_license", "max_line_length": 66, "num_lines": 102, "path": "/memo.md", "repo_name": "shiodat/algorithms", "src_encoding": "UTF-8", "text": "# Memo\n\n## UnionFind\n\nhttps://abc040.contest.atcoder.jp/tasks/abc040_d\n```python\nfrom operator import itemgetter\n\nclass UnionFind(object):\n\n def __init__(self, N):\n self.N = N\n self.parent = list(range(self.N))\n self.rank = [0] * self.N\n self.size = [1] * self.N\n\n def find(self, x):\n if self.parent[x] != x:\n self.parent[x] = self.find(self.parent[x])\n return self.parent[x]\n\n def unite(self, x, y):\n x = self.find(x)\n y = self.find(y)\n if x == y:\n return\n\n if self.rank[x] < self.rank[y]:\n x, y = y, x\n\n self.size[x] += self.size[y]\n self.parent[y] = x\n if self.rank[x] == self.rank[y]:\n self.rank[x] += 1\n\n def same(self, x, y):\n return self.find(x) == self.find(y)\n\n def count(self, x):\n return self.size[self.find(x)]\n\n\nN, M = map(int, input().split())\nroads = [tuple(map(int, input().split())) for _ in range(M)]\nQ = int(input())\npeople = [[i] + list(map(int, input().split())) for i in range(Q)]\n\nroads.sort(key=itemgetter(2), reverse=True)\npeople.sort(key=itemgetter(2), reverse=True)\nanswer = [0] * Q\n\nuf = UnionFind(N + 1)\n\nstart = 0\nfor i, vi, wi in people:\n for j in range(start, M):\n aj, bj, yj = roads[j]\n if yj > wi:\n uf.unite(aj, bj)\n else:\n start = j\n break\n answer[i] = uf.count(vi)\n\nfor v in answer:\n print(v)\n```\n\n## DP\n\nhttps://abc040.contest.atcoder.jp/tasks/abc040_c\n\n```python\nN = int(input())\na = [int(ai) for ai in input().split()]\na.extend([float('inf'), float('inf')])\n\ndp = [0] * (N + 2)\ndp[1] = abs(a[1] - a[0])\nfor i in range(N):\n dp[i+2] = min(dp[i] + abs(a[i+2] - a[i]),\n dp[i+1] + abs(a[i+2] - a[i+1]))\nprint(dp[N-1])\n```\n\n## Prime Factors\n\n```python\n\ndef prime_factors(n):\n factors = []\n i = 2\n while i * i <= n:\n if n % i == 0:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors\n```" } ]
2
wuljchange/interesting_python
https://github.com/wuljchange/interesting_python
1ee5f2474645267fca9eb4e391700dcb5db051e3
3fdf9f7f17f7b361be030bb4eadf7aab889b15fe
3978985f6cf08f141cb9d780be4f8b7b2347c16b
refs/heads/master
2020-05-02T18:47:18.087498
2020-03-09T04:02:08
2020-03-09T04:02:08
178,139,898
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5102827548980713, "alphanum_fraction": 0.5372750759124756, "avg_line_length": 20.027027130126953, "blob_id": "4f49118c28aa793c9f41459301cace88c2e79a55", "content_id": "45a917a0facfa949d786057ed6994fe701b57063", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1758, "license_type": "permissive", "max_line_length": 67, "num_lines": 74, "path": "/part-struct/test-heapq.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import heapq\n\n\nclass PriorityQueue:\n def __init__(self):\n self._queue = []\n self._index = 0\n\n def push(self, priority, item):\n heapq.heappush(self._queue, (-priority, self._index, item))\n self._index += 1\n\n def pop(self):\n return heapq.heappop(self._queue)[-1]\n\n\nclass Item:\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return self.name\n\n\nif __name__ == \"__main__\":\n nums = [1, 5, 2, 4, 3]\n a = heapq.nlargest(3, nums)\n print(a)\n b = heapq.nsmallest(3, nums)\n print(b)\n print(type(b))\n # 对集合进行堆排序放入列表中,返回值为 None\n c = heapq.heapify(nums)\n print(c)\n print(nums)\n nums2 = [1, 5, 2, 4, 3]\n # heappop,heappush\n d = heapq.heappop(nums2)\n print(d)\n e = heapq.heappop(nums2)\n print(e)\n print(nums2)\n heapq.heappush(nums2, 1)\n print(nums2)\n f = heapq.heappop(nums2)\n print(f)\n # deque 保留最后插入的 N 个元素,返回值是可迭代对象\n from collections import deque\n q = deque(maxlen=3)\n q.append(1)\n q.appendleft(2)\n print(q)\n q.appendleft(3)\n q.appendleft(4)\n print(q)\n q.append(5)\n print(type(q))\n a, *b, c = q\n print(a, b, c)\n # sorted 排序可迭代对象,通过切片,左闭右开,切记!\n nums3 = [1, 5, 3, 2]\n print(sorted(nums3)[1:])\n # re 模块\n t = 'asdf fjdk; afed, fjek,asdf, foo'\n import re\n # 多个分隔符,不保留分隔符分组\n f1 = re.split(r'[;,\\s]\\s*', t)\n # 多个分隔符,保留分隔符分组\n f2 = re.split(r'(;|,|\\s)\\s*', t)\n # 多个分隔符,不保留分隔符分组\n f3 = re.split(r'(?:;|,|\\s)\\s*', t)\n print(f1)\n print(f2)\n print(f3)\n" }, { "alpha_fraction": 0.45026177167892456, "alphanum_fraction": 0.48691099882125854, "avg_line_length": 20.240739822387695, "blob_id": "ab43e90c7a4000674da363c03cb09195a1deffd1", "content_id": "c2cfab2afbd9c6ca482f3e08828d7692d41fbf02", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "permissive", "max_line_length": 63, "num_lines": 54, "path": "/part-interview/test17.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-07 18:46\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test17.py\n# ----------------------------------------------\n\n\ndef test():\n # 在函数内部使用 global 声名全局变量\n global A\n A = 1\n print(A)\n\n\nif __name__ == \"__main__\":\n # input 函数与用户交互使用\n\n # 解释一下 python 中 pass 的作用。\n # 是空语句,是为了保持程序结构的完整性。不做任何事情,一般用做占位语句。\n\n # is == 的区别,== 是比较两个对象的 value 值是否相等,is 判断两个对象的 id 是否相等\n # python 对象包含3个基本要素,value id type\n\n # python 中的作用域,global,nonlocal 语句,全局作用域,在函数内部对函数外部的非全局变量的使用\n\n # 三元运算符的用法\n b = \"1\"\n a = \"0\" if b == \"0\" else \"1\"\n print(a)\n # enumerate 模块,遍历会带上索引\n for i, v in enumerate(range(1, 11)):\n print(i, v)\n # python 中的标准模块,functools collections logging\n test()\n A = 2\n print(A)\n # 断言成功继续往下执行,失败就报异常信息\n assert 1 == 1\n print(\"end\")\n # dir 用于查看对象的 属性和方法\n a = [1, 2, [1, 2]]\n b = a\n import copy\n c = copy.copy(a)\n d = copy.deepcopy(a)\n a[2].append(3)\n print(a, b , c, d)\n data = [1, 2, 3, 4]\n print(data[::-1])\n d = [1, 2, 3, 4]\n e = d\n print(id(d), id(e))" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5496794581413269, "avg_line_length": 26.130434036254883, "blob_id": "511e0cd89e735d957821316e306889f9e4f4e20a", "content_id": "316b1b037c916104c336fc38aee652633564a657", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "permissive", "max_line_length": 82, "num_lines": 23, "path": "/part-yaml/test-yaml.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from ruamel.yaml import YAML\n\n\nif __name__ == \"__main__\":\n # yaml文件解析\n with open('deployments.yaml') as fp:\n content = fp.read()\n yaml = YAML()\n print(content)\n content = yaml.load_all(content)\n print(type(content))\n data = []\n for c in content:\n data.append(c)\n print(data[0])\n c = data[0]\n tmp = c['spec']['template']['spec']['containers'][0]['args'][2]\n c['spec']['template']['spec']['containers'][0]['args'][2] = tmp.format('http')\n data[0] = c\n content = (d for d in data)\n print(content)\n with open('new.yaml', 'w') as f:\n yaml.dump_all(content, f)\n" }, { "alpha_fraction": 0.5318182110786438, "alphanum_fraction": 0.5318182110786438, "avg_line_length": 19, "blob_id": "29ba8c0d6b2fe514cef7ab95bf320aabc6f5b017", "content_id": "2460deb177f468b938d98eec2ad5f5b83ab315c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "permissive", "max_line_length": 35, "num_lines": 11, "path": "/part-text/test-iofile.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import io\n\n\nif __name__ == \"__main__\":\n s = io.StringIO()\n s_byte = io.BytesIO()\n print('test', file=s, end=\"\\t\")\n s_byte.write(b'bytes')\n print(\"new\")\n print(s.getvalue())\n print(s_byte.getvalue())\n" }, { "alpha_fraction": 0.501324474811554, "alphanum_fraction": 0.5377483367919922, "avg_line_length": 24.820512771606445, "blob_id": "156590aea5da3a59905f548356abf44953afba94", "content_id": "a78729b4b3c060589bd31d3142e202eb5cb3974b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3682, "license_type": "permissive", "max_line_length": 78, "num_lines": 117, "path": "/part-interview/test06.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-01 13:05\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test06.py\n# ----------------------------------------------\nimport json\nfrom datetime import datetime\nfrom json import JSONEncoder\nfrom functools import wraps\n\n\nclass Man:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\nclass ComplexEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, datetime):\n return o.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n return super(ComplexEncoder, self).default(o)\n\n\ndef bb(n: int):\n def multi(args):\n return n*args\n return multi\n\n\n# 函数定义成装饰器的时候,建议加上 wraps,他能保留装饰器定义函数的原有名称和docstring\ndef dt(func):\n @wraps(func)\n def new(*args, **kwargs):\n res = func(args)\n return res\n return new\n\n\nif __name__ == \"__main__\":\n # 交换两个变量的值,可以直接赋值\n a, b = 1, 2\n print(a, b)\n print(id(a), id(b))\n a, b = b, a\n print(a, b)\n print(id(a), id(b))\n # read,readline,readlines\n # read 是直接读取整个文件内容\n # readline 是读取文件的一行内容,从最开始读起\n # readlines 是读取文件所有内容,按行分隔成 list,会把换行符也带上\n with open('test.txt', 'r') as f:\n # r1 = f.read()\n # print(\"r1 \"+r1)\n # r2 = f.readline()\n # print(\"r2 \"+r2)\n r3 = f.readlines()\n print(r3)\n # json 序列化支持的数据类型有哪些\n # 基本上 python3 的基本数据类型都支持\n d1 = {\"d1\": 1}\n d2 = {\"d2\": \"2\"}\n d3 = {\"d3\": dict()}\n d4 = {\"d4\": list()}\n d5 = {\"d5\": tuple()}\n d6 = {\"d6\": True}\n print(json.dumps(d1))\n # json 序列化对象支持 datetime 对象,定义一个函数或者类,把 datetime 对象转换成字符串即可\n # 一般自己定义的类是有 self.__dict__ 方法的\n m = Man(\"test\", 24)\n d7 = {\"d7\": m}\n print(json.dumps(d7, default=lambda obj: obj.__dict__))\n d8 = {\"d8\": datetime.now()}\n print(json.dumps(d8, cls=ComplexEncoder))\n # json 序列化的时候,遇到中文会默认转换成 unicode,要求让他保留中文格式\n d9 = {\"hello\": \"你好\"}\n print(json.dumps(d9))\n print(json.dumps(d9, ensure_ascii=False))\n # 合并文件信息,按顺序排列\n with open('test1.txt', 'r') as f1:\n t1 = f1.read()\n with open('test2.txt', 'r') as f2:\n t2 = f2.read()\n print(\"t1 \", t1)\n print(\"t2 \", t2)\n # 字符串属于可迭代对象,sorted 过后返回一个 list\n t = sorted(t1 + t2)\n print(\"t \", \"\".join(t))\n # 当前日期计算函数\n dt1 = \"20190530\"\n import datetime\n dt1 = datetime.datetime.strptime(dt1, \"%Y%m%d\")\n print(dt1)\n dt2 = dt1 + datetime.timedelta(days=5)\n print(dt2.strftime(\"%Y%m%d\"))\n import arrow\n dt1 = \"2019-05-30\"\n dt1 = arrow.get(dt1)\n print(dt1)\n dt2 = dt1.shift(days=+5)\n print(dt2.isoformat())\n # 1 行代码实现 1-100 之间的偶数\n # range 方法是左闭右开\n t = [i for i in range(1, 100) if i % 2 == 0]\n print(t)\n # with 语句的作用,用作上下文管理器,一般用于文件读写,方式没有及时关闭文件\n # 如果一个对象有 self.__enter__(self) 和 self.__exit__(self) 方法的话,可以用 with 做上下文管理器\n # python 计算一个文件中大写字母的个数\n with open('test1.txt', 'r') as f:\n t = f.read()\n print(t)\n l = [i for i in t if \"A\" <= i <= \"Z\"]\n print(l)\n print(len(l))" }, { "alpha_fraction": 0.5983606576919556, "alphanum_fraction": 0.6106557250022888, "avg_line_length": 26.16666603088379, "blob_id": "38ec4da85476c3e8c035544dccfbe47d0965d684", "content_id": "db92a584b898ac3f7750ab001a7714cb1336a27b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 488, "license_type": "permissive", "max_line_length": 67, "num_lines": 18, "path": "/part-data/test-closepackage.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\n\n\ndef urltemplate(template):\n def opener(**kwargs):\n return template.format_map(kwargs)\n # return urlopen(template.format_map(kwargs))\n return opener\n\n\nif __name__ == \"__main__\":\n url = urltemplate('http://www.baidu.com?name={name}&age={age}')\n print(url)\n test = 'http://www.kingsoft.com?name={name}&age={age}'\n s1 = test.format_map({'name': 'mac', 'age': 23})\n print(s1)\n s = url(name='Alex', age=23)\n print(s)" }, { "alpha_fraction": 0.5229358077049255, "alphanum_fraction": 0.5412843823432922, "avg_line_length": 23.81818199157715, "blob_id": "0a2f2aa5dd24a081d83c66088bf1158bd9eaa465", "content_id": "76601895eef83e8c73912bda5797f0de3d80f6a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "permissive", "max_line_length": 73, "num_lines": 22, "path": "/part-data/test-sqlite.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import sqlite3\n\n\nif __name__ == \"__main__\":\n data = [\n (1, 2, 3),\n (2, 3, 4),\n ]\n s = sqlite3.connect('database.db')\n # 给数据库建立游标,就可以执行sql查询语句了\n db = s.cursor()\n db.execute('create table wulj (name, number, rate)')\n print(db)\n s.commit()\n db.executemany('insert into wulj (?,?,?)', data)\n s.commit()\n for row in db.execute('select * from wulj'):\n print(row)\n number = 10\n # 用户输入参数用于交互查询,?代表占位符\n for row in db.execute('select * from wulj where num > ?', (number,)):\n print(row)" }, { "alpha_fraction": 0.3527980446815491, "alphanum_fraction": 0.39416059851646423, "avg_line_length": 26.433332443237305, "blob_id": "81546ff9643d129040f81a78c1bcaf06b2c975d6", "content_id": "6b646987147611c1bf582210b64c7f465de56e7c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 920, "license_type": "permissive", "max_line_length": 54, "num_lines": 30, "path": "/part-interview/test16.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-07 18:11\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test16.py\n# ----------------------------------------------\nimport re\n\n\nif __name__ == \"__main__\":\n # 使用正则表达式匹配地址\n s = \"www.baidu.com.jkjh\"\n if re.match(r'(.*).(.*).(.*)', s):\n print(\"pass\")\n r = re.findall(r'(.*)\\.(.*)\\.(.*)', s)\n print(r)\n s = \" 98 100 102 \"\n s = re.sub(r' (\\d+) (\\d+) (\\d+) ', r'\\3/\\2/\\1', s)\n print(s)\n # 正则匹配中 (.*) 和 (.*?) 的区别是一个是最长匹配,一个是最短匹配\n text = 'Computer says \"no.\" Phone says \"yes.\"'\n t1 = re.findall(r'\"(.*)\"', text)\n t2 = re.findall(r'\"(.*?)\"', text)\n print(t1)\n print(t2)\n # 匹配邮箱的正则表达式\n text1 = \"[email protected], [email protected],\"\n t3 = re.findall(r'\\s*(.*?)@(.*?).com,\\s*', text1)\n print(t3)" }, { "alpha_fraction": 0.39867842197418213, "alphanum_fraction": 0.43171805143356323, "avg_line_length": 22.28205108642578, "blob_id": "bead2a719742dad91c962f633b9fd872492e7bae", "content_id": "927fc0cb8f6d5211e36bb056cda948c3fe4de812", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 946, "license_type": "permissive", "max_line_length": 48, "num_lines": 39, "path": "/part-interview/test18.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-08 11:05\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test18.py\n# ----------------------------------------------\n\n\ndef search_2(data, l):\n \"\"\" 二分查找法 \"\"\"\n length = len(l)\n # 递归一定要写出退出条件\n if length <= 1:\n if length <= 0:\n return False\n elif data == l[0]:\n return 0, l[0]\n else:\n return False\n mid_index = int(length/2)\n mid = l[mid_index]\n if data > mid:\n f_index = mid_index + 1\n return search_2(data, l[f_index:])\n elif data < mid:\n return search_2(data, l[:mid_index])\n else:\n return mid_index, mid\n\n\nif __name__ == \"__main__\":\n data = 0\n l = [i for i in range(10)]\n if search_2(data, l):\n index, value = search_2(data, l)\n print(index, value)\n else:\n print(False)\n" }, { "alpha_fraction": 0.501954197883606, "alphanum_fraction": 0.5265215039253235, "avg_line_length": 27.44444465637207, "blob_id": "75530d891960b980a68f94031b470c4728ecdf68", "content_id": "80af72d8ef8ee20e96f52b3ff33aacb66736fa00", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1791, "license_type": "permissive", "max_line_length": 131, "num_lines": 63, "path": "/part-class/test-compare.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from functools import total_ordering\nimport re\n\n\nclass Room:\n def __init__(self, name, length, width):\n self.name = name\n self.length = length\n self.width = width\n self.squre_foot = self.length*self.width\n\n\n@total_ordering\nclass House:\n def __init__(self, name, style):\n self.name = name\n self.style = style\n self.rooms = list()\n\n @property\n def living_space_footage(self):\n return sum(r.squre_foot for r in self.rooms)\n\n def append_room(self, room):\n self.rooms.append(room)\n\n def __str__(self):\n return '{} area is {}, style is {}'.format(self.name, self.living_space_footage, self.style)\n\n def __eq__(self, other):\n return self.living_space_footage == other.living_space_footage\n\n def __lt__(self, other):\n return self.living_space_footage < other.living_space_footage\n\n\nif __name__ == \"__main__\":\n # a = Room('bed_room', 20, 30)\n # # b = Room('living_room', 30, 40)\n # # c = Room('kitchen_room', 10, 20)\n # # h = House('home', 'Asia')\n # # h1 = House('new-home', 'Europe')\n # # h.append_room(a)\n # # h.append_room(b)\n # # h1.append_room(c)\n # # if h1 > h:\n # # print('{} area > {}'.format(h1.living_space_footage, h.living_space_footage))\n # # else:\n # # print('{} area is {} and < {} area is {}'.format(h1.name, h1.living_space_footage, h.name, h.living_space_footage))\n # #\n # # data = [1, 3, 3, 2, 5, 7, 5, 4, 5]\n # # a = list({k:'' for k in data})\n # # print(a)\n s = re.compile(r'[0-9]+')\n if s.match('1'):\n print('yes')\n data = [1,2,3,5,7,8]\n new = [23, 45, 1]\n new.reverse()\n\n print(new)\n print(data+new)\n print(round(7/3, 2))" }, { "alpha_fraction": 0.5649717450141907, "alphanum_fraction": 0.5649717450141907, "avg_line_length": 21.25, "blob_id": "e7534f7c42d545ff44d7f0d9581c7aa15adb025d", "content_id": "43ad365df7777ee50d60ac0a7085e9e500e6410c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "permissive", "max_line_length": 38, "num_lines": 8, "path": "/part-text/test-newfile.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import array\n\n\nif __name__ == \"__main__\":\n # xt模式测试写入文件不能直接覆盖,只能写入到不存在的文件里面\n with open('test.file', 'xt') as f:\n f.write('test not exist')\n print(\"end\", end='#')" }, { "alpha_fraction": 0.4990758001804352, "alphanum_fraction": 0.5175600647926331, "avg_line_length": 23.636363983154297, "blob_id": "4476d9fd499ee7d8b4b686ccf64734001495fb9d", "content_id": "583ec1faa8c202a04d9a97475bb67fd02ebf2686", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "permissive", "max_line_length": 59, "num_lines": 22, "path": "/part-sort-alogrithm/test-shell.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# 希尔排序 时间复杂度是O(NlogN)\n# 又称缩小增量排序 首先设置一个基础增量d,对每间隔d的元素分组,然后对每个分组的元素进行直接插入排序\n# 然后缩小增量,用同样的方法,直到增量小于0时,排序完成\n\n\ndef shell_sort(data: list):\n n = len(data)\n gap = int(n / 2) # 设置基础增量\n # 当增量小于0时,排序完成\n while gap > 0:\n for i in range(gap, n):\n j = i\n while j >= gap and data[j-gap] > data[j]:\n data[j-gap], data[j] = data[j], data[j-gap]\n j -= gap\n gap = int(gap / 2)\n return data\n\n\nif __name__ == \"__main__\":\n t_data = [3, 2, 5, 4, 1]\n print(shell_sort(t_data))" }, { "alpha_fraction": 0.5528531074523926, "alphanum_fraction": 0.5603367686271667, "avg_line_length": 19.980392456054688, "blob_id": "e4dd3f1b83c1b72ab1ec7ab8fd8b3e73219bae8c", "content_id": "e80be73b2bfc1f6df8e121cbf6ce26706db5d429", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "permissive", "max_line_length": 44, "num_lines": 51, "path": "/part-thread/test_exchange.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from contextlib import contextmanager\nfrom collections import defaultdict\n\n\nclass Exchange:\n def __init__(self):\n self._subscribers = set()\n\n def attach(self, task):\n self._subscribers.add(task)\n\n def detach(self, task):\n self._subscribers.remove(task)\n\n @contextmanager\n def subscribe(self, *tasks):\n for task in tasks:\n self.attach(task)\n try:\n yield\n finally:\n for task in tasks:\n self.detach(task)\n\n def send(self, msg):\n for subscriber in self._subscribers:\n subscriber.send(msg)\n\n\nclass Task:\n def send(self, msg):\n print(msg)\n\n\n_changes = defaultdict(Exchange)\n\n\ndef get_change(name):\n return _changes[name]\n\n\nif __name__ == \"__main__\":\n data = {'new1': 1, 'new3': 2, 'new2': 3}\n # new = sorted(data.items())\n print(dict(sorted(data.items())))\n # exc = get_change('name')\n # task_a = Task()\n # task_b = Task()\n # with exc.subscribe(task_a, task_b):\n # exc.send('msg1')\n # exc.send('msg2')" }, { "alpha_fraction": 0.46550416946411133, "alphanum_fraction": 0.5155420899391174, "avg_line_length": 42.96666717529297, "blob_id": "2335d20e2910a275b8371db43e0cc5f0a82e0a6a", "content_id": "52e3b0c8737de4b093a74fe95e7d5c6b5464920c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1325, "license_type": "permissive", "max_line_length": 108, "num_lines": 30, "path": "/part-kafka/kafka-consumer.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-01-13 14:30\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : kafka-consumer.py\n# ----------------------------------------------\nfrom kafka import KafkaConsumer\nimport time\n\n\ndef start_consumer():\n consumer = KafkaConsumer('my_test_topic1',\n bootstrap_servers='kafka-0-0.kafka-0-inside-svc.kafka.svc.cluster.local:32010,'\n 'kafka-1-0.kafka-1-inside-svc.kafka.svc.cluster.local:32011,'\n 'kafka-2-0.kafka-2-inside-svc.kafka.svc.cluster.local:32012,'\n 'kafka-3-0.kafka-3-inside-svc.kafka.svc.cluster.local:32013,'\n 'kafka-4-0.kafka-4-inside-svc.kafka.svc.cluster.local:32014,'\n 'kafka-5-0.kafka-5-inside-svc.kafka.svc.cluster.local:32015')\n for msg in consumer:\n print(msg)\n print(\"topic = %s\" % msg.topic) # topic default is string\n print(\"partition = %d\" % msg.offset)\n print(\"value = %s\" % msg.value.decode()) # bytes to string\n print(\"timestamp = %d\" % msg.timestamp)\n print(\"time = \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(msg.timestamp/1000)))\n\n\nif __name__ == '__main__':\n start_consumer()\n" }, { "alpha_fraction": 0.5343074798583984, "alphanum_fraction": 0.5603557825088501, "avg_line_length": 27.10714340209961, "blob_id": "ee13ab13fcb63eeac87ba8d920224458ef9782fd", "content_id": "3ed7c6557f0a9e0f65ebfaeb26dbd0450fe28255", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1730, "license_type": "permissive", "max_line_length": 97, "num_lines": 56, "path": "/part-data/test-pandas.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='test.log',\n filemode='w')\n\n\nif __name__ == \"__main__\":\n datas = pd.read_csv('test.csv')\n print(datas)\n # 输出每一列的数据类型\n print(datas.dtypes)\n # 输出前几行,会自动把header输出,不算行\n print(datas.head(2))\n # 每一列都有什么特征\n print(datas.columns)\n # 输出csv文件有多少行和列,不算header\n print(datas.shape)\n # pandas.Series传递一个list为参数\n s = pd.Series([1, 2, 3, np.nan, 5, 6])\n print(s)\n dates = pd.date_range('20181201', periods=12)\n print(dates)\n da = np.random.randn(3, 4)\n print(da)\n # 传递一个np数组\n df = pd.DataFrame(data=np.random.randn(12, 6), index=dates, columns=list('ABCDEF'))\n print(df)\n # 传递一个dict对象\n df2 = pd.DataFrame({\"a\": [i for i in range(4)],\n \"b\": \"test\"})\n print(df2)\n # view head or tail 元素,head default n=5\n print(df.head())\n print(df.tail(2))\n # view index, columns, values\n print(df.index)\n print(df.columns)\n print(df.values)\n # describe 快速显示DataFrame的各项指标\n print(df.describe())\n # df.loc[] useful\n print(df.loc[dates[0]])\n print(df.loc[dates[0], ['A', 'B']])\n print(df.loc[dates[0]:dates[2], ['A', 'B', 'C']])\n print(df.iloc[0:2])\n print(df.iloc[0:2, 3:4])\n logging.info('new')\n df3 = df.copy()\n print(df3)\n print(df3.mean())\n print(df3.mean(1))\n" }, { "alpha_fraction": 0.46503496170043945, "alphanum_fraction": 0.5419580340385437, "avg_line_length": 18.066667556762695, "blob_id": "46a6e3b828dd6f4781843c8afa165b556bc56775", "content_id": "1e20bfaaadf29e481397e2b8321a0c2e739ec5d1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "permissive", "max_line_length": 45, "num_lines": 15, "path": "/part-data/test-partial.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from functools import partial\nimport math\n\n\ndef distance(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return math.hypot(x2-x1, y2-y1)\n\n\nif __name__ == \"__main__\":\n points = [(1, 2), (3, 4), (7, 8), (5, 6)]\n pt = (5, 6)\n points.sort(key=partial(distance, pt))\n print(points)\n" }, { "alpha_fraction": 0.5210421085357666, "alphanum_fraction": 0.5377421379089355, "avg_line_length": 28.959999084472656, "blob_id": "5f62d206a5ae18e68dc28f113645ca246a7c9613", "content_id": "c80042a0845ab8e11222bdb31198c52b98652dff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1539, "license_type": "permissive", "max_line_length": 62, "num_lines": 50, "path": "/part-text/test-iter.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from itertools import dropwhile, islice\nfrom itertools import permutations, combinations\nfrom itertools import combinations_with_replacement\n\n\ndef parser(filename):\n with open(filename, 'rt') as f:\n for lineno, line in enumerate(f, 1):\n print(lineno, line)\n fields = line.split()\n try:\n count = float(fields[0])\n except ValueError as e:\n print('Lineno {} parser {}'.format(lineno, e))\n\n\nif __name__ == \"__main__\":\n l1 = [1, 2, 3, 4]\n l2 = [2, 3, 4, 5]\n a = [(x, y) for x, y in zip(l1, l2)]\n print(a)\n for index, (x, y) in enumerate(a):\n print(index, x, y)\n line_text = 'test new world'\n print(line_text.split())\n items = [1, 2, 3, 4]\n for i in enumerate(items):\n print(i)\n # 指定行号\n for i in enumerate(items, 2):\n print(i)\n # 允许同一个元素被选取,在一个元祖中\n cp = [i for i in combinations_with_replacement(items, 2)]\n p_test = [i for i in permutations(items, 2)]\n c_test = [i for i in combinations(items, 2)]\n print(p_test)\n print(c_test)\n print(cp)\n with open('../data-struct-algorithm/tmp/test') as f:\n r = f.readlines()\n print(r)\n st = ['#new', '#test', 'test']\n s = islice(st, 1, None)\n for s1 in s:\n print(s1)\n print(s)\n ret = list(filter(lambda x: x.startswith('#'), r))\n print(ret)\n for line in dropwhile(lambda x: x.startswith(\"#\"), f):\n print(line, end=\" \")" }, { "alpha_fraction": 0.5878594517707825, "alphanum_fraction": 0.6261980533599854, "avg_line_length": 18.625, "blob_id": "2ccce8d4fddfdaaf348a671e8e42835cc6363123", "content_id": "46eb5c4f47676cfce0ff2558edb21ecf5b0e9b0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "permissive", "max_line_length": 56, "num_lines": 16, "path": "/part-struct/test-groupby.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from operator import itemgetter\nfrom itertools import groupby\n\n\ndata = [\n {\"date\": 2019},\n {\"date\": 2018},\n {\"date\": 2020}\n]\ndata.sort(key=itemgetter('date'))\nprint(data)\nfor date, item in groupby(data, key=itemgetter('date')):\n print(date)\n print(item)\n for i in item:\n print(type(i), i)" }, { "alpha_fraction": 0.5316804647445679, "alphanum_fraction": 0.5371900796890259, "avg_line_length": 21.75, "blob_id": "78120593e2491991f0789af091003d7324ccb5d7", "content_id": "461a9e738596c51c92df53476d2c42d6feefc461", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 415, "license_type": "permissive", "max_line_length": 39, "num_lines": 16, "path": "/part-text/test-gzip.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import gzip\nimport bz2\n\n\nif __name__ == \"__main__\":\n # gzip作用于一个已经打开的二进制文件 new character\n f = open('file.gz', 'rb')\n with gzip.open(f, 'rb') as f:\n print(f.read())\n # with语句结束自动会关闭文件\n with gzip.open('file', 'wt') as f:\n f.read(\"test\")\n print(\"new line\")\n with bz2.open('file', 'wt') as f:\n f.read(\"test\")\n print(\"end\")" }, { "alpha_fraction": 0.5639880895614624, "alphanum_fraction": 0.5848214030265808, "avg_line_length": 27.04166603088379, "blob_id": "2545f3c3ebec310f2c8c0117ebc1f5b104bbdfae", "content_id": "16bcc9b3883640b25f381fada9cb2ee04c6e97ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "permissive", "max_line_length": 61, "num_lines": 24, "path": "/part-sort-alogrithm/test-bubble.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# 冒泡排序 该算法的事件复杂度未O(N^2)\n# 具体过程如下 首先遍历数组中的n个元素,对数组中的相邻元素进行比较,如果左边的元素大于右边的元素,则交换两个元素所在的\n# 位置,至此,数组的最右端的元素变成最大的元素,接着对剩下的n-1个元素执行相同的操作。\n\n\ndef bubble_sort(data: list):\n # 外面的循环控制内部循环排序的次数,例如5个数,只需要4次排序就行了\n for i in range(len(data)-1):\n change = False\n # 内部循环比较相邻元素,找到剩下元素的最大值放在数组的右边\n for j in range(len(data)-i-1):\n if data[j] > data[j+1]:\n data[j], data[j+1] = data[j+1], data[j]\n change = True\n # 当change=False时,说明没有交换的情况发生,说明该数组已经排序完成\n # 减少了循环的次数\n if not change:\n break\n return data\n\n\nif __name__ == \"__main__\":\n t_data = [5, 4, 3, 2, 1]\n print(bubble_sort(t_data))" }, { "alpha_fraction": 0.5093867182731628, "alphanum_fraction": 0.5244054794311523, "avg_line_length": 32.29166793823242, "blob_id": "66eec1dae0c6d4ab97d98532cde07c1044332f45", "content_id": "4f5a567a0dfaef10d18690f70bc77966d5fb81ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "permissive", "max_line_length": 99, "num_lines": 24, "path": "/part-plumbum/test01.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from plumbum import local, FG, BG, cli, SshMachine, colors\nfrom plumbum.cmd import grep, awk, wc, head, cat, ls, tail, sudo, ifconfig\n\n\nif __name__ == \"__main__\":\n ls = local[\"ls\"]\n print(ls())\n # 环境在linux\n # 管道符 pipe\n command = ls[\"-a\"] | awk['{if($2=\"100\") print $2}'] | wc[\"-l\"]\n print(command())\n # 重定向\n command = cat['test.file'] | head[\"-n\", 5]\n print(command())\n # 后台运行和当前终端运行\n command = (cat['test.file'] | grep[\"-v\", \"test\"] | (tail[\"-n\", 5] > \"out.file\")) & FG\n print(command())\n command = (awk['-F', '\\t', '{print $1, $2}', 'test.file'] | (head['-n', 5] >> 'out.file')) & BG\n print(command())\n # 嵌套命令\n command = sudo[ifconfig['-a']]\n command1 = (sudo[ifconfig[\"-a\"]] | grep[\"-i\", \"loop\"]) & FG\n print(command())\n print(command1())\n" }, { "alpha_fraction": 0.3946078419685364, "alphanum_fraction": 0.4264705777168274, "avg_line_length": 26.266666412353516, "blob_id": "4615c5c651d94b304b29c63fd4fc53c85c29728f", "content_id": "4a1911302c9949eed5f66f4610f16c0c65cddcc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "permissive", "max_line_length": 62, "num_lines": 15, "path": "/part-requests/test-requests.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2019-11-08 11:42\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test-requests.py\n# ----------------------------------------------\nimport requests\n\n\nif __name__ == \"__main__\":\n url = \"https://cn.bing.com/\"\n resp = requests.get(\"https://\"+\"cn.bing.com\", verify=True)\n print(resp.status_code)\n print(resp.url)" }, { "alpha_fraction": 0.5070358514785767, "alphanum_fraction": 0.5292782783508301, "avg_line_length": 20.598039627075195, "blob_id": "fe52761b8d77fd09e1ad1a3de36a77157253e4e2", "content_id": "2ee3853a89abf07d8b93e01a579315163b8e3713", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2939, "license_type": "permissive", "max_line_length": 92, "num_lines": 102, "path": "/part-interview/test11.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-04 23:48\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test11.py\n# ----------------------------------------------\n\n\nclass Demo:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n def __call__(self, *args, **kwargs):\n \"\"\" 改变实例的状态 \"\"\"\n self.x, self.y = args\n\n\nclass Counter:\n def __init__(self, func):\n self.func = func\n self.count = 0\n\n def __call__(self, *args, **kwargs):\n self.count += 1\n return self.func(*args, **kwargs)\n\n\n@Counter\ndef foo(name):\n print(name)\n\n\nclass Test:\n # 静态属性\n x = 1\n y = 2\n\n def __init__(self, x=7, y=8):\n # 成员属性\n self.x = x\n self.y = y\n\n def normal_func(self):\n print(\"normal\", self.x, self.y)\n\n @staticmethod\n def static_func():\n print(\"static\", Test().x, Test().y)\n print(Test(3, 4).normal_func())\n\n @classmethod\n def class_func(cls):\n print(\"class\", Test.x, Test.y)\n print(cls(5, 6).normal_func())\n\n\nif __name__ == \"__main__\":\n data = \"sastfftsasdsh\"\n print(5/2)\n q = [True if x % 3 == 0 else -x for x in range(1, 101)]\n d = [True if data[i] == data[len(data)-i-1] else False for i in range(int(len(data)/2))]\n print(d)\n\n # 装饰器有什么作用\n # 用于给给现有函数增加额外功能,接收一个函数作为参数\n # 定义的装饰器函数可以带参数,函数本身也可以带参数\n\n # python 垃圾回收机制\n # 1. 引用计数器回收,引用计数器为0时,就会被解释器的 gc 回收\n # 2. 分代垃圾回收机制,对于对象的相互引用和循环引用,第一种回收方式时无法实现的,具体分为第一代,第二代,第三代\n # 第一代主要用于去除同一代中的相互索引和循环索引,存活的对象放入第二代,以此类推。\n\n # __call__ 使用\n # 可调用对象,对于类,函数,但凡是可以把()应用到一个对象上的情况都是可调用对象\n # 如果一个类中实现了 __call__ 函数,就可以将一个实例对象变成一个可调用对象\n demo = Demo(1, 2, 3)\n print(demo.x, demo.y)\n # 将实例对象当成函数调用,直接调用类中定义的 __call__ 函数,用于改变对象状态最直接优雅的方法\n demo(5, 6)\n print(demo.x, demo.y)\n\n for i in range(10):\n foo(i)\n print(foo.count)\n\n # 判断一个对象是函数还是方法\n # 与类和实例无绑定关系的function都是函数\n # 与类和实例有绑定关系的function都是方法\n\n # @staticmethod 和 @classmethod\n # @staticmethod 静态方法,与类和实例无关\n # @classmethod 类方法\n\n Test.static_func()\n Test.class_func()\n Test.x = 10\n t = Test(1, 2)\n print(t.x)\n print(Test.x)\n" }, { "alpha_fraction": 0.5515587329864502, "alphanum_fraction": 0.5755395889282227, "avg_line_length": 23.58823585510254, "blob_id": "5fd0567efdfa781a9632e0a079f8ef46fd873135", "content_id": "823c01f3850ce7397cd0a1b74684e02ab9717d24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 601, "license_type": "permissive", "max_line_length": 61, "num_lines": 17, "path": "/part-sort-alogrithm/test-select.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# 选择排序 时间复杂度时O(N^2)\n# 具体过程如下 首先在n个元素的数组中找到最小值放在数组的最左端,然后在剩下的n-1个元素中找到最小值放在左边第二个位置\n# 以此类推,直到所有元素的顺序都已经确定\n\n\ndef select_sort(data: list):\n # 外部循环只需遍历n-1次\n for i in range(len(data)-1):\n for j in range(i+1, len(data)):\n if data[i] > data[j]:\n data[i], data[j] = data[j], data[i]\n return data\n\n\nif __name__ == \"__main__\":\n t_data = [5, 4, 3, 2, 1]\n print(select_sort(t_data))" }, { "alpha_fraction": 0.49152541160583496, "alphanum_fraction": 0.5275423526763916, "avg_line_length": 23.894737243652344, "blob_id": "48f0e1716b57727340534820c169621256973b23", "content_id": "ebd2b1ae51d6da1e02c2b45bbd62f56da104bf5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 680, "license_type": "permissive", "max_line_length": 53, "num_lines": 19, "path": "/part-sort-alogrithm/test-insert.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# 插入排序 时间复杂度O(N^2)\n# 具体过程如下 每次循环往已经排好序的数组从后往前插入一个元素,第一趟比较两个元素的大小,第二趟插入元素\n# 与前两个元素进行比较,放到合适的位置,以此类推。\n\n\ndef insert_sort(data: list):\n for i in range(1, len(data)):\n key = data[i]\n # 相当于相邻元素进行比较,但是逻辑更清楚一点\n for j in range(i-1, -1, -1):\n if data[j] > key:\n data[j+1] = data[j]\n data[j] = key\n return data\n\n\nif __name__ == \"__main__\":\n t_data = [2, 4, 1, 5, 3, 5, 9, 10, 8, 7]\n print(insert_sort(t_data))" }, { "alpha_fraction": 0.5122615694999695, "alphanum_fraction": 0.5367847681045532, "avg_line_length": 27.230770111083984, "blob_id": "a53deaf23198740e827cfb3effb181c41d8bf606", "content_id": "dbc1f7e5492a4ea66eefa3692fecf09376435b67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "permissive", "max_line_length": 81, "num_lines": 13, "path": "/part-jsane/test01.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import jsane\n\n\nif __name__ == \"__main__\":\n # jsane是一个json解析器\n # loads 解析一个json字符串\n j = jsane.loads('{\"name\": \"wulj\", \"value\": \"pass\"}')\n print(j.name.r())\n # from_dict 解析字典\n j2 = jsane.from_dict({'key': ['v1', 'v2', ['v3', 'v4', {'inner': 'value'}]]})\n print(j2.key[2][2].inner.r())\n # 当解析找不到key时,设置默认值\n print(j2.key.new.r(default=\"test\"))\n" }, { "alpha_fraction": 0.42378050088882446, "alphanum_fraction": 0.4466463327407837, "avg_line_length": 16.756755828857422, "blob_id": "ec4b7049d2aecb4bf7616dac9c6851ea2961ed59", "content_id": "9fe02d8ec92ed364a30a2f1b7cf71cceafd7e688", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "permissive", "max_line_length": 48, "num_lines": 37, "path": "/part-interview/test12.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-05 20:00\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test12.py\n# ----------------------------------------------\nfrom abc import abstractmethod, ABCMeta\n\n\nclass Interface(object):\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def test(self):\n pass\n\n def new(self):\n pass\n\n\nclass NewTest(Interface):\n def __init__(self):\n print(\"interface\")\n\n def test(self):\n print(\"test\")\n\n def new(self):\n print(\"new\")\n\n\nif __name__ == \"__main__\":\n print(\"test\")\n # nt = NewTest()\n # nt.test()\n # nt.new()" }, { "alpha_fraction": 0.45625001192092896, "alphanum_fraction": 0.48750001192092896, "avg_line_length": 20.399999618530273, "blob_id": "ec36bcce2aa077416b74794db2ddbd6bad0c4210", "content_id": "b41f9af09a8f08b9f7f803d40e13dbb6e2f9e6f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "permissive", "max_line_length": 28, "num_lines": 15, "path": "/part-data/test-scale.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "if __name__ == \"__main__\":\n x = 1234\n # 函数形式\n print(bin(x))\n print(oct(x))\n print(hex(x))\n # format形式,没有前缀 0b,0o,0x\n print(format(x, 'b'))\n print(format(x, 'o'))\n print(format(x, 'x'))\n #将进制的数据转换成整数字符串\n a = format(x, 'b')\n b = format(x, 'x')\n print(int(a, 2))\n print(int(b, 16))" }, { "alpha_fraction": 0.6122449040412903, "alphanum_fraction": 0.6163265109062195, "avg_line_length": 23.600000381469727, "blob_id": "5fce6f085ca33d51374dd51645e8083b1702ec71", "content_id": "667bd688256319c542844cce21d5b1d14c9abb9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "permissive", "max_line_length": 79, "num_lines": 10, "path": "/part-text/test-glob-fnmatch.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import glob\nimport fnmatch\nimport os.path\n\n\nif __name__ == \"__main__\":\n dir_path = '/root/tmp/test'\n path = '/root/tmp/test/*.py'\n pyfiles = glob.glob(path)\n pyfiles2 = [name for name in os.listdir(dir_path) if fnmatch(name, '*.py')]" }, { "alpha_fraction": 0.47567567229270935, "alphanum_fraction": 0.49369367957115173, "avg_line_length": 25.4761905670166, "blob_id": "e6c207654dbc0a08e1ca30a0d1d3bfadf0750b29", "content_id": "e3b354f0cd008637525bfb6a7b0f0dbdf2fe651d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "permissive", "max_line_length": 60, "num_lines": 21, "path": "/part-text/test-list.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import arrow\nimport re\nimport pdb\nimport tempfile\n\n\nif __name__ == \"__main__\":\n # print(arrow.now().shift(days=-1).format('YYYY-MM-DD'))\n # data = ['merge', '1', 'commit', 'merge']\n # data.remove('1')\n # print(data)\n # d = [{'code': 12}, {'code': 11}, {'code': 13}]\n # d.sort(key=lambda x: x['code'])\n # print(d)\n # s = ' --hello -world+ '\n # print(re.sub(\"[' ', '-', '+']\", '', s))\n with tempfile.NamedTemporaryFile('w+t') as f:\n print(f.name)\n f.write('hello world!')\n f.seek(0)\n print(f.read())" }, { "alpha_fraction": 0.52173912525177, "alphanum_fraction": 0.52173912525177, "avg_line_length": 14.333333015441895, "blob_id": "e5d5571a34c37fa63fd98fa4cd7598c053411012", "content_id": "c025c46d34ba93e2fa12f196ba1f6ee19a8352e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "permissive", "max_line_length": 30, "num_lines": 6, "path": "/part-text/test-re.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import re\n\n\ntext = '/* http new s */'\nr = re.compile(r'/\\*(.*?)\\*/')\nprint(r.findall(text))\n" }, { "alpha_fraction": 0.5920934677124023, "alphanum_fraction": 0.6064689755439758, "avg_line_length": 24.9069766998291, "blob_id": "ed4502d094fa2d9d565ea235127c3ad7f9ac3899", "content_id": "29c7922c1a3a95a6cfa409e887ab9070bbdd31cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1245, "license_type": "permissive", "max_line_length": 62, "num_lines": 43, "path": "/part-text/test-smtp.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# import smtplib\n# from email.mime.text import MIMEText\n# from email.header import Header\n#\n# # 第三方 SMTP 服务\n# mail_host = \"smtp.qq.com\" # 设置服务器\n# mail_user = \"\" # 用户名\n# mail_pass = \"XXXXXX\" # 口令\n#\n# sender = '[email protected]'\n# receivers = ['[email protected]'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\n#\n# message = MIMEText('Python 邮件发送测试...', 'plain', 'utf-8')\n# message['From'] = Header(\"菜鸟教程\", 'utf-8')\n# message['To'] = Header(\"测试\", 'utf-8')\n#\n# subject = 'Python SMTP 邮件测试'\n# message['Subject'] = Header(subject, 'utf-8')\n#\n# try:\n# smtpObj = smtplib.SMTP()\n# smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号\n# smtpObj.login(mail_user, mail_pass)\n# smtpObj.sendmail(sender, receivers, message.as_string())\n# print(\"邮件发送成功\")\n# except smtplib.SMTPException:\n# print(\"Error: 无法发送邮件\")\n\nimport arrow\nimport json\nimport os\nfrom pathlib import Path\n\n\nif __name__ == \"__main__\":\n print(1)\n print(Path(__file__).resolve().parent)\n # with open('test.json', 'r') as config:\n # print(config)\n # print(type(json.load(config)))\n # print(arrow.now())\n # data = [1,2,3,4,5,6]\n # print(data[3:])" }, { "alpha_fraction": 0.5079365372657776, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 18.6875, "blob_id": "7447eddeb52631245671c8cf45f6c255106bf15f", "content_id": "430867b0ba396a56c58400b5143edbb5dabcdacf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "permissive", "max_line_length": 44, "num_lines": 16, "path": "/part-data/test-decimal.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from decimal import Decimal, localcontext\n\n\ndef main(a, b):\n a = Decimal(a)\n b = Decimal(b)\n return a+b\n\n\nif __name__ == \"__main__\":\n sum = main('3.2', '4.3')\n # 使用上下文管理器更改输出的配置信息\n with localcontext() as ctx:\n ctx.prec = 3\n print(Decimal('3.2')/Decimal('2.3'))\n print(sum == 7.5)\n" }, { "alpha_fraction": 0.5630027055740356, "alphanum_fraction": 0.5790884494781494, "avg_line_length": 24.758621215820312, "blob_id": "d17bdcddac81d2125abdccc3a0b08cfc59c6220d", "content_id": "92c09ba9de5cd0add91f1704ed2830f17011c9d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1026, "license_type": "permissive", "max_line_length": 64, "num_lines": 29, "path": "/part-sort-alogrithm/test-quick.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# 快速排序 时间复杂度时O(NlogN)\n# 具体过程如下 采用一种分治递归的算法 从数组中任意选择一个数作为基准值,然后将数组中比基准值小的放在左边\n# 比基准值大的放在右边,然后对左右两边的数使用递归的方法排序\n\n\ndef partition(data, start, end):\n i = start - 1\n for j in range(start, end):\n # 刚开始以data[end]的值作为基准值\n if data[j] < data[end]:\n i += 1\n # 如果j所在的位置的值小于end,则i往前进一步,并与j的值交换,即将一个新的值加入到小于end的区域\n data[i], data[j] = data[j], data[i]\n i += 1\n data[i], data[end] = data[end], data[i]\n return i\n\n\ndef quick_sort(data: list, start, end):\n if start < end:\n mid = partition(data, start, end)\n quick_sort(data, start, mid-1)\n quick_sort(data, mid+1, end)\n return data\n\n\nif __name__ == \"__main__\":\n t_data = [5, 4, 3, 2, 1]\n print(quick_sort(t_data, 0, 4))" }, { "alpha_fraction": 0.42276424169540405, "alphanum_fraction": 0.46544715762138367, "avg_line_length": 27.941177368164062, "blob_id": "2ab3c8a14f38afa7972459560fa63d8757cb8958", "content_id": "9fc48abbe6d316e8a40b53e965ccb98b1309e7c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "permissive", "max_line_length": 58, "num_lines": 17, "path": "/part-interview/test15.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-07 12:18\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test15.py\n# ----------------------------------------------\n\n\nif __name__ == \"__main__\":\n # filter 方法,func + iterator\n data = [i for i in range(1, 11)]\n print(list(filter(lambda x: x % 2 == 0, data)))\n # 什么是猴子补丁\n # 运行是动态替换模块的方法\n # python 是如何管理内存的,引用计数和分代垃圾回收的机制\n # 当退出 python3 时,是否会释放所有内存分配,答案时否定的,对于循环引用和相互引用的内存还不会释放\n" }, { "alpha_fraction": 0.5419501066207886, "alphanum_fraction": 0.5850340127944946, "avg_line_length": 25, "blob_id": "aa86d27fb8977eba345c04ae9e6f4731ff2399c1", "content_id": "119126d810e04f082b0d2763dbef56435a238024", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "permissive", "max_line_length": 57, "num_lines": 17, "path": "/part-text/test-set.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "if __name__ == \"__main__\":\n # list,dict,set是不可hash的\n # int,float,str,tuple是可以hash的\n data = [1, 2, '232', (2, 3)]\n data1 = [2, 3, '213', (2, 3)]\n\n # 两个list取补集,元素在data中,不在data1中\n diff_list = list(set(data).difference(set(data1)))\n print(diff_list)\n\n # 取交集\n inter_list = list(set(data).intersection(set(data1)))\n print(inter_list)\n\n # 取并集\n union_list = list(set(data).union(set(data1)))\n print(union_list)" }, { "alpha_fraction": 0.4797406792640686, "alphanum_fraction": 0.4991896152496338, "avg_line_length": 23.68000030517578, "blob_id": "77d9806e1c9372f32076dc89eeb02fd170adef1a", "content_id": "48d09ea3864ceccb6940fe78c8daf8dedd98966e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 617, "license_type": "permissive", "max_line_length": 79, "num_lines": 25, "path": "/part-text/bracket_expression.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import arrow\n\n\nbracket_dct = {'(': ')', '{': '}', '[': ']', '<': '>'}\n\n\ndef bracket(arg: str):\n match_stack = []\n for char in arg:\n if char in bracket_dct.keys():\n match_stack.append(char)\n elif char in bracket_dct.values():\n if len(match_stack) > 0 and bracket_dct[match_stack.pop()] == char:\n continue\n else:\n return False\n else:\n continue\n return True\n\n\nif __name__ == \"__main__\":\n test = '(12121){}dasda[oio{dad}232<asfsd>232]'\n print(arrow.now().format('YYYY-MM-DD HH:MM:SS'))\n print(bracket(test))\n" }, { "alpha_fraction": 0.6081632375717163, "alphanum_fraction": 0.6169096231460571, "avg_line_length": 25.384614944458008, "blob_id": "029dfcb9c6593aa8143429a013b30d009b7f9084", "content_id": "94ad84b19464786036a3922699675a0845f735f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1715, "license_type": "permissive", "max_line_length": 73, "num_lines": 65, "path": "/part-thread/thread_lock.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import threading\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom functools import partial\nfrom contextlib import contextmanager\n\n\n# State to stored info on locks already acquired\n_local = threading.local()\n\n\n@contextmanager\ndef acquire(*locks):\n locks = sorted(locks, key=lambda x: id(x))\n\n acquired = getattr(_local, 'acquire', [])\n if acquired and max(id(lock) for lock in acquired) >= id(locks[0]):\n raise RuntimeError('Lock order violation')\n\n acquired.extends(locks)\n _local.acquired = acquired\n\n try:\n for lock in locks:\n lock.acquire()\n yield\n finally:\n for lock in reversed(locks):\n lock.release()\n del acquired[-len(locks):]\n\n\nclass LazyConnection:\n def __init__(self, address, family=AF_INET, socket_type=SOCK_STREAM):\n self.address = address\n self.family = family\n self.socket_type = socket_type\n self.local = threading.local()\n\n def __enter__(self):\n if hasattr(self.local, 'sock'):\n raise RuntimeError('connection existed')\n self.local.sock = socket(self.family, self.socket_type)\n self.local.sock.connect(self.address)\n return self.local.sock\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.local.sock.close()\n del self.local.sock\n\n\ndef test(conn):\n with conn as c:\n c.send(b'test\\n')\n resp = b''.join(iter(partial(c.recv, 8192), b''))\n print(len(resp))\n\n\nif __name__ == \"__main__\":\n conn = LazyConnection((\"www.test.com\", 8081))\n t1 = threading.Thread(target=test, args=(conn,))\n t2 = threading.Thread(target=test, args=(conn,))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n" }, { "alpha_fraction": 0.6490850448608398, "alphanum_fraction": 0.6490850448608398, "avg_line_length": 30, "blob_id": "d15df2a4f1e4b6886d3eda947c3038c437071927", "content_id": "695b407b2f890bab9830b10be89f3fae49b35962", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1037, "license_type": "permissive", "max_line_length": 103, "num_lines": 30, "path": "/part-text/test-path.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import os.path\nimport time\nimport glob\nimport fnmatch\n\n\nif __name__ == \"__main__\":\n dir_path = '/data/proc/log'\n file_name = [name for name in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, name))]\n dir_name = [name for name in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path, name))]\n pyfile = [name for name in os.listdir(dir_path) if name.endswith('.py')]\n path = '/data/prolog/log/test.log'\n print(os.path.basename(path))\n print(os.path.dirname(path))\n print(os.path.split(path))\n print(os.path.join('root', 'tmp', os.path.basename(path)))\n # 测试文件或者目录是否存在 指定类型判断\n if os.path.exists(path):\n print(True)\n os.path.isfile(path)\n os.path.isdir(path)\n # 测试是否是软连接\n os.path.islink(path)\n # 得到软连接的完整路径\n os.path.realpath(path)\n os.path.getsize(path)\n # 得到文件的创建时间\n os.path.getmtime(path)\n # 修改文件的创建时间\n time.ctime(os.path.getmtime(path))" }, { "alpha_fraction": 0.5515643358230591, "alphanum_fraction": 0.5589030385017395, "avg_line_length": 23.875, "blob_id": "94a90a3509395798941b32f988fbcd6f4342f157", "content_id": "17bb3ad8b5b6717bb334d6eec946d902ed4b9527", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2757, "license_type": "permissive", "max_line_length": 67, "num_lines": 104, "path": "/part-marshmallow/test-load&dump.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from marshmallow import Schema, fields, post_load, pprint\nfrom hashlib import md5\n\nsort_key = ['name', 'role']\n\n\nclass Actor(object):\n \"\"\"\n 创建actor基础类\n \"\"\"\n def __init__(self, name, role, grade):\n self.name = name\n self.role = role\n self.grade = grade\n\n def __str__(self):\n return '<Actor_str(name={self.name!r})>'.format(self=self)\n\n def __repr__(self):\n return '<Actor_repr(name={self.name!r})>'.format(self=self)\n\n def __eq__(self, other):\n bools = []\n for key in sort_key:\n bools.append(getattr(self, key) == getattr(other, key))\n return all(bools)\n\n @staticmethod\n def get_hash(self):\n source = ''.join([getattr(self, key) for key in sort_key])\n m = md5(source.encode('utf-8'))\n return m.hexdigest()\n\n\nclass Movie(object):\n \"\"\"\n 创建movie基础类\n \"\"\"\n def __init__(self, name, actors):\n self.name = name\n self.actors = actors\n\n # 重构内置的str函数\n def __str__(self):\n return '<Movie_str(name={self.name!r})>'.format(self=self)\n\n # 重构内置的repr函数\n def __repr__(self):\n return '<Movie_repr(name={self.name!r})>'.format(self=self)\n\n # 重构内置的 == 函数\n def __eq__(self, other):\n bools = []\n act1 = {actor.get_hash(): actor for actor in self.actors}\n act2 = {actor.get_hash(): actor for actor in other.actors}\n common_key = set(act1) & set(act2)\n for key in common_key:\n bools.append(act1.pop(key) == act2.pop(key))\n unique_count = len(act1.values()) + len(act2.values())\n bl = (self.name == other.name)\n return bl and all(bools) and (unique_count == 0)\n\n\nclass ActorScm(Schema):\n \"\"\"\n 创建actor schema基础类\n \"\"\"\n name = fields.Str()\n role = fields.Str()\n grade = fields.Int()\n\n @post_load\n def make_data(self, data):\n return Actor(**data)\n\n\nclass MovieScm(Schema):\n \"\"\"\n 创建movie schema基础类\n \"\"\"\n name = fields.Str()\n actors = fields.Nested(ActorScm, many=True)\n\n @post_load\n def make_data(self, data):\n return Movie(**data)\n\n\nif __name__ == \"__main__\":\n # 将字典反序列化为movie基础类\n actor1 = {'name': 'lucy', 'role': 'hero', 'grade': 9}\n actor2 = {'name': 'mike', 'role': 'boy', 'grade': 10}\n movie = {'name': 'green', 'actors': [actor1, actor2]}\n schema = MovieScm()\n ret = schema.load(movie)\n # print 输出类时,调用的是__str__函数\n print(ret)\n # pprint 输出类时,调用的是__repr__函数\n pprint(ret.data)\n\n # 将movie基础类序列化为字典\n schema = MovieScm()\n ret_dct = schema.dump(ret.data)\n pprint(ret_dct.data)\n\n\n" }, { "alpha_fraction": 0.5533769130706787, "alphanum_fraction": 0.5664488077163696, "avg_line_length": 22, "blob_id": "7f5a4dd2a15fd817c8ac4919edd05e9080f80bdc", "content_id": "9549951d8f7ad7f7cbbfd49d30de3d4026f064fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "permissive", "max_line_length": 59, "num_lines": 20, "path": "/part-data/test-callback.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "def async_apply(func, args, *, callback):\n result = func(*args)\n callback(result)\n\n\ndef make_handle():\n sequence = 0\n while True:\n result = yield\n sequence += 1\n print('[{}] result is {}'.format(sequence, result))\n\n\nif __name__ == \"__main__\":\n # 协程处理\n handle = make_handle()\n next(handle)\n add = lambda x, y: x+y\n async_apply(add, (2, 3), callback=handle.send)\n async_apply(add, (3, 4), callback=handle.send)" }, { "alpha_fraction": 0.5214446783065796, "alphanum_fraction": 0.5293453931808472, "avg_line_length": 19.159090042114258, "blob_id": "6317f6670f67fcb6820301e849f909ec2cb41208", "content_id": "5ba4c0f93da61b4cad82b67a6a63fa6d453206fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 978, "license_type": "permissive", "max_line_length": 61, "num_lines": 44, "path": "/part-text/data/test-copy-text.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\n\n\ndef dedupe(items):\n \"\"\"\n 删除一个迭代器中重复的元素,并保持顺序\n :param items: 迭代器\n :return:\n \"\"\"\n a = set()\n for item in items:\n if item not in a:\n yield item\n a.add(item)\n\n\n# 找出一个字符串中最长的没有重复字符的字段\ndef cutout(test: str):\n max_data = []\n for s in test:\n if s not in max_data:\n max_data.append(s)\n else:\n yield max_data\n max_data = []\n max_data.append(s)\n yield max_data\n\n\nif __name__ == \"__main__\":\n data = [1, 2, 2, 1, 4, 5, 4]\n print(list(dedupe(data)))\n\n # 简单方法\n order_dct = OrderedDict()\n for item in data:\n order_dct[item] = item\n print(list(order_dct.keys()))\n data = 'anmninminuc'\n for item in cutout(data):\n print(''.join(item))\n\n output = ''.join(max(cutout(data), key=lambda s: len(s)))\n print(output)" }, { "alpha_fraction": 0.46271511912345886, "alphanum_fraction": 0.590822160243988, "avg_line_length": 18.370370864868164, "blob_id": "53c1ba24d0ee7e398c7a81325e89924726b62306", "content_id": "9baedaf676c54139940f075f444103fac97f9b35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 523, "license_type": "permissive", "max_line_length": 38, "num_lines": 27, "path": "/part-struct/test-compress.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from itertools import compress\nimport re\nimport arrow\n\n\naddresses = [\n '5412 N CLARK',\n '5148 N CLARK',\n '5800 E 58TH',\n '2122 N CLARK',\n '5645 N RAVENSWOOD',\n '1060 W ADDISON',\n '4801 N BROADWAY',\n '1039 W GRANVILLE',\n]\ncounts = [0, 3, 10, 4, 1, 7, 6, 1]\n\nnew = [n > 5 for n in counts]\n\nl = list(compress(addresses, new))\nprint(l)\ntest = '12 23, 34; 1213'\nprint(re.split(r'\\s*[,;\\s]\\s*', test))\n\nprint(arrow.now().isoformat())\nt = arrow.get('2018-12-01 10:23')\nprint(t.isoformat().split('.')[0])\n" }, { "alpha_fraction": 0.5063062906265259, "alphanum_fraction": 0.5387387275695801, "avg_line_length": 18.13793182373047, "blob_id": "4d8db510ecb903dfa3b7f6525ec700292ee44607", "content_id": "21a5ea9309ed1dc3f9378be7faa6ade4cb2d7a36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "permissive", "max_line_length": 52, "num_lines": 29, "path": "/part-interview/test09.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-04 15:31\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test09.py\n# ----------------------------------------------\nimport pymysql\n\n# 打开数据库连接\ndb = pymysql.connect(\"host\", \"username\", \"pw\", \"db\")\n\n# 创建一个游标对象\ncursor = db.cursor()\n\n# 执行查询\ncursor.execute(\"select * from db.tb\")\n\n# 获取数据\ndata = cursor.fetchone()\nprint(data)\n\n# 关闭连接\ndb.close()\n\n# 数据库的三范式\n# 1. 确保每列保持原子性,每一列的数据都是不可分解的原子值,根据需求而定哈\n# 2. 确保表中的每列都和主键相关,不能只和一部分主键相关(主要针对联合主键而言)\n# 3. 确保每列都和主键直接相关,而不能间接相关\n" }, { "alpha_fraction": 0.5691056847572327, "alphanum_fraction": 0.6043360233306885, "avg_line_length": 20.764705657958984, "blob_id": "a44a715105b77ebedf78a0504546aa38910f34ca", "content_id": "9fc2d6f2b233f893191b0c381cff5845fb85bb8c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "permissive", "max_line_length": 35, "num_lines": 17, "path": "/part-data/test-random.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import random\n\n\nif __name__ == \"__main__\":\n values = [1, 2, 3, 4, 5]\n # 随机选取一个元素\n print(random.choice(values))\n # 随机选取几个元素且不重复\n print(random.sample(values, 3))\n # 打乱原序列中的顺序\n print(random.shuffle(values))\n # 生成随机整数,包括边界值\n print(random.randint(0, 10))\n # 生成0-1的小数\n print(random.random())\n # 获取N位随机数的整数\n print(random.getrandbits(10))" }, { "alpha_fraction": 0.517110288143158, "alphanum_fraction": 0.5969581604003906, "avg_line_length": 17.85714340209961, "blob_id": "2d1d47ab6569f49eb7676644d902bb9501b95b6c", "content_id": "ace3c362bebd65ef1741f086274328e456212db3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "permissive", "max_line_length": 31, "num_lines": 14, "path": "/part-data/test-hex.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import base64\nimport binascii\n\n\nif __name__ == \"__main__\":\n s = b'hello world!'\n # 2进制转换成16进制\n h = binascii.b2a_hex(s)\n print(h)\n # 16进制转换成2进制\n print(binascii.a2b_hex(h))\n h1 = base64.b16encode(s)\n print(h1)\n print(base64.b16decode(h1))" }, { "alpha_fraction": 0.5495818257331848, "alphanum_fraction": 0.561529278755188, "avg_line_length": 19.950000762939453, "blob_id": "49ed674624d7f2065d94ae175a85f2f673a0fb1b", "content_id": "3fe09df74793d0cb9fa7b4b319270e4538ed43cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "permissive", "max_line_length": 96, "num_lines": 40, "path": "/part-data/test-b2-struct.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from struct import Struct\n\n\ndef record_data(records, format, file):\n record_struct = Struct(format)\n for r in records:\n file.write(record_struct.pack(*r))\n\n\ndef read_data(format, f):\n \"\"\"\n 增量块的形式迭代\n :param format:\n :param f:\n :return:\n \"\"\"\n read_struct = Struct(format)\n chunks = iter(lambda: f.read(read_struct.size), b'')\n return (read_struct.unpack(chunk) for chunk in chunks)\n\n\ndef unpack_data(format, f):\n \"\"\"\n 全量迭代\n :param format:\n :param f:\n :return:\n \"\"\"\n unpack_data = Struct(format)\n return (unpack_data.unpack_from(f, offset) for offset in range(0, len(f), unpack_data.size))\n\n\nif __name__ == \"__main__\":\n records = [\n (1, 2, 3),\n (2, 3, 4),\n (3, 4, 5),\n ]\n with open('test.file', 'wb') as f:\n record_data(records, '<idd', f)" }, { "alpha_fraction": 0.43396225571632385, "alphanum_fraction": 0.4802744388580322, "avg_line_length": 26.761905670166016, "blob_id": "7f5abd591981b352e2e0898b16228509a0011a5b", "content_id": "56083326bdbe71bc50d141bd5844536449d80e01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "permissive", "max_line_length": 55, "num_lines": 21, "path": "/part-elasticsearch/test-elasticsearch.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2019-11-25 17:49\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test-elasticsearch.py\n# ----------------------------------------------\nfrom elasticsearch import Elasticsearch\nfrom ssl import create_default_context\n\n\nif __name__ == \"__main__\":\n context = create_default_context(cafile=\"./ca.crt\")\n es = Elasticsearch(\n ['10.100.51.164'],\n http_auth=('elastic', 'K6fgGGmOu359V4GY3TOw'),\n scheme=\"https\",\n port=9200,\n ssl_context=context\n )\n print(es.info())\n" }, { "alpha_fraction": 0.35104894638061523, "alphanum_fraction": 0.39300698041915894, "avg_line_length": 23.689655303955078, "blob_id": "15ab7077518f8e41597ec29f07c3c70bec7def3c", "content_id": "f1bfa883ff066e83aa9fe7318593667b36b1afbd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "permissive", "max_line_length": 48, "num_lines": 29, "path": "/part-interview/test04.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-01 12:33\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test04.py\n# ----------------------------------------------\nif __name__ == \"__main__\":\n # 字典操作\n dct = {\"a\": 1, \"b\": 2}\n a = dct.pop(\"a\")\n print(a)\n print(dct)\n del dct[\"b\"]\n print(dct)\n # 合并两个字典\n a = {\"a\": 1, \"b\": 2}\n b = {\"c\": 3, \"d\": 4}\n a.update(b)\n print(a)\n # 生成器的方式生成一个字典,dict 直接初始化 必须是元组的 list 形式才可以\n values = [1, 2, 3]\n keys = [\"a\", \"b\", \"c\"]\n dct = {k: v for k, v in zip(keys, values)}\n print(dct)\n dct2 = dict(zip(keys, values))\n dct3 = dict([(\"a\", 1), (\"b\", 2)])\n print(dct2)\n print(dct3)" }, { "alpha_fraction": 0.3858974277973175, "alphanum_fraction": 0.4205128252506256, "avg_line_length": 19.552631378173828, "blob_id": "37793f15965f53b801dca6186d85dea4efce89ac", "content_id": "a6f93d8033552d1183f01cf041696c879693b761", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 888, "license_type": "permissive", "max_line_length": 48, "num_lines": 38, "path": "/part-interview/test05.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-01 12:45\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test05.py\n# ----------------------------------------------\n\n\n# 定义一个生成器的函数,需要用到 yield\ndef my_generate(nums):\n for i in nums:\n yield i\n\n\nif __name__ == \"__main__\":\n # 一行代码搞定交换字典的 key value 值\n dct1 = {\"A\": 1, \"B\": 2}\n dct2 = {str(v): k for k, v in dct1.items()}\n print(dct2)\n # 实现 tuple 和 list 的转换\n a = (1, 2, 3)\n print(a)\n b = list(a)\n print(b)\n # 把 列表转换成生成器\n c = [i for i in range(3)]\n g = my_generate(c)\n print(g)\n # 遍历生成器\n for i in g:\n print(i, end=\" \")\n print(\"\")\n # 编码\n a = \"hello\"\n b = \"你好\"\n print(a.encode(\"utf-8\"))\n print(b.encode(\"utf-8\"))" }, { "alpha_fraction": 0.5846154093742371, "alphanum_fraction": 0.5902097821235657, "avg_line_length": 24.571428298950195, "blob_id": "43cd0d76ea2b1147f7a53385e66720b5e6d796d5", "content_id": "4e206d7bab07615cea43fdc5a928e089c4284b63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 723, "license_type": "permissive", "max_line_length": 66, "num_lines": 28, "path": "/part-class/test-with.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from functools import partial\nfrom socket import socket, AF_INET, SOCK_STREAM\n\n\nclass LazyConnection:\n def __init__(self, address, family=AF_INET, type=SOCK_STREAM):\n self.address = address\n self.family = family\n self.type = type\n self.connections = []\n\n def __enter__(self):\n sock = socket(self.family, self.type)\n sock.connect(self.address)\n self.connections.append(sock)\n return sock\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.connections.pop().close()\n\n\nif __name__ == \"__main__\":\n conn = LazyConnection(('http://www.baidu.com', 80))\n # 嵌套使用conn\n with conn as s1:\n pass\n with conn as s2:\n pass" }, { "alpha_fraction": 0.3881579041481018, "alphanum_fraction": 0.46052631735801697, "avg_line_length": 18.125, "blob_id": "9887c649e4732b1353e5887324b3ed9a43a9407d", "content_id": "6b5621a22c498035f566cdbdc4754b8371dd3fba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "permissive", "max_line_length": 38, "num_lines": 8, "path": "/part-struct/upack-value.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "data = ['test', 90, 80, (1995, 8, 30)]\n\n\nif __name__ == \"__main__\":\n _, start, end, (_, _, day) = data\n print(start)\n print(end)\n print(day)" }, { "alpha_fraction": 0.465543657541275, "alphanum_fraction": 0.47830525040626526, "avg_line_length": 19.589473724365234, "blob_id": "8dbb43d47bd5b5e4430b64280cd92a63f79e2bba", "content_id": "f6e6582b87263ae26b23fbcff40eb49b83af876c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2541, "license_type": "permissive", "max_line_length": 66, "num_lines": 95, "path": "/part-interview/test14.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-06 10:58\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test14.py\n# ----------------------------------------------\n\n\nclass Demo(object):\n # 类的属性\n count = 0\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n print(\"__init__ 方法被执行\")\n\n def __new__(cls, *args, **kwargs):\n print(\"__new__ 方法被执行\")\n # 调用 cls 类才会执行 __init__ 方法\n return object.__new__(cls)\n\n def __str__(self):\n return \"str test\"\n\n def __repr__(self):\n return \"repr test\"\n\n def __del__(self):\n print(\"del\")\n\n def __getattribute__(self, item):\n # 属性访问拦截器\n if item == \"x\":\n return \"redirect x\"\n else:\n return object.__getattribute__(self, item)\n\n def __call__(self, *args, **kwargs):\n self.x, self.y = args\n print(\"__call__\")\n\n\ndef test(a):\n print(a)\n print(id(a))\n\n\nif __name__ == \"__main__\":\n # 列举你所知道的 python 的魔法方法及用途\n # python 有一些内置定义好的方法,这些方法在特定的时期会被自动调用\n # __init__ 函数,创建实例化对象为其赋值使用,是在 __new__ 之后使用,没有返回值\n # __new__ 是实例的构造函数,返回一个实例对象,__init__ 负责实例初始化操作,必须有返回值,返回一个实例对象\n d = Demo(1, 2)\n print(d)\n print(d.x)\n print(d.y)\n # 获取指定类的所有父类\n print(Demo.__bases__)\n d(3, 4)\n print(d.x)\n print(d.y)\n # print(type(d))\n # # 获取已知对象的类\n # print(d.__class__)\n # type 用于查看 python 对象类型\n print(type(d))\n # 对于可变数据类型和不可变数据类型有差异,可变数据类型用引用传参,不可变数据类型用传值\n # 不可变数据类型包括,数字,字符串,元组\n # 可变数据类型包括,列表,字典,集合\n a = 1\n print(a)\n print(id(a))\n test(a)\n\n a = [1, 2]\n print(a)\n print(id(a))\n test(a)\n\n a = {1, 2}\n print(a)\n print(id(a))\n test(a)\n\n # 简述 any(),all() 方法\n # any 数组中的所有元素只要有一个为 True 就返回 True\n if any([True, False]):\n print(\"any\")\n # all 数组中的所有元素只要有一个为 False 就返回 False\n if all([True, False]):\n print(\"all\")\n else:\n print(\"not all\")\n\n\n\n" }, { "alpha_fraction": 0.6072186827659607, "alphanum_fraction": 0.6135880947113037, "avg_line_length": 21.4761905670166, "blob_id": "4fd73d1a13862d0e26c6a1c8ee7f6f400febaa1b", "content_id": "8cc2435a9c853c0ce318718efd9d9015e26e808d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 503, "license_type": "permissive", "max_line_length": 68, "num_lines": 21, "path": "/part-class/test-iter-inial.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import collections\nimport bisect\n\n\nclass ItemSequence(collections.Sequence):\n def __init__(self, initial=None):\n self._items = sorted(initial) if initial is not None else []\n\n def __getitem__(self, item):\n return self._items[item]\n\n def __len__(self):\n return len(self._items)\n\n # bisect 插入item到有序队列里,并按照顺序排列\n def add(self, item):\n bisect.insort(self._items, item)\n\n\nif __name__ == \"__main__\":\n test = ItemSequence([1, 2, 3])" }, { "alpha_fraction": 0.4540117383003235, "alphanum_fraction": 0.49510762095451355, "avg_line_length": 21.217391967773438, "blob_id": "f54d08315d639171c8cbeb6a4193fdca1f82e641", "content_id": "919d9eec0215d74ac82c0590867c27d72d36158a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "permissive", "max_line_length": 50, "num_lines": 23, "path": "/part-sanic/test-sanic.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2019-11-07 18:50\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test-sanic.py\n# ----------------------------------------------\nfrom sanic import Sanic\nfrom sanic.response import json\nfrom pprint import pprint\n\n\napp = Sanic()\n\n\[email protected]('/', methods=['POST'])\nasync def bili_flv(request):\n pprint(request.raw_args)\n return json(True)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8821, debug=True)\n" }, { "alpha_fraction": 0.5179891586303711, "alphanum_fraction": 0.5372104644775391, "avg_line_length": 25.350648880004883, "blob_id": "85e60ac6dcd0c8bfa5ac9046f36c9b7d96542555", "content_id": "de7853e0e458697dc344b81784a6c0676477ba11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2185, "license_type": "permissive", "max_line_length": 101, "num_lines": 77, "path": "/part-interview/test10.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-04 16:38\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test10.py\n# ----------------------------------------------\nimport redis\nimport uuid\nimport time\nfrom threading import Thread\n\n\nredis_client = redis.Redis(host=\"127.0.0.1\", port=6379, username=\"test\", password=\"test\", db=0)\n\n\ndef acquire_lock(lock_name, acquire_time=10, time_out=10):\n \"\"\"\n :param lock_name: 锁名称\n :param acquire_time: 客户端等待获取锁的时间\n :param time_out: 锁的超时时间\n :return: True or False\n \"\"\"\n identifier = str(uuid.uuid4())\n end = time.time() + acquire_time\n lock = \"string:lock:\" + lock_name\n while time.time() < end:\n # 成功设置,则插入数据,返回1,否则已经有相同 key,返回0\n if redis_client.setnx(lock, identifier):\n # 设置 key 失效时间\n redis_client.expire(lock, time_out)\n # 获取成功,返回 identifier\n return identifier\n # 每次请求都更新锁名称的失效时间\n elif not redis_client.ttl(lock):\n redis_client.expire(lock, time_out)\n time.sleep(0.001)\n return False\n\n\ndef release_lock(lock_name, identifier):\n \"\"\"\n :param lock_name: 锁名称\n :param identifier: uid\n :return: True or False\n \"\"\"\n lock = \"string:lock:\" + lock_name\n pip = redis_client.pipeline(True)\n while True:\n try:\n pip.watch(lock)\n lock_value = redis_client.get(lock)\n if not lock_value:\n return True\n\n if lock_value.decode() == identifier:\n pip.multi()\n pip.delete(lock)\n pip.execute()\n return True\n pip.unwatch()\n break\n except redis.exceptions.WatchError:\n pass\n return False\n\n\ndef sec_kill():\n identifier = acquire_lock(\"resource\")\n print(Thread.getName(), \"acquire resource\")\n release_lock(\"resource\", identifier)\n\n\nif __name__ == \"__main__\":\n for i in range(50):\n t = Thread(target=sec_kill)\n t.start()\n" }, { "alpha_fraction": 0.4834224581718445, "alphanum_fraction": 0.5283422470092773, "avg_line_length": 25, "blob_id": "5b23a203ff5656937166807646e4b9e87b191c80", "content_id": "19defcdde90562323dc5ec0d555555613963fe41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1011, "license_type": "permissive", "max_line_length": 92, "num_lines": 36, "path": "/part-text/test-yield.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from collections import Iterable\nimport random\nimport heapq\n\n\n# 处理嵌套列表\ndef flatten(items, ignore_types=(str, bytes)):\n for item in items:\n if isinstance(item, Iterable) and not isinstance(item, ignore_types):\n yield from flatten(item)\n else:\n yield item\n\n\nif __name__ == \"__main__\":\n l1 = [1, 2, 3, 4, 5, 10, 9]\n l2 = [2, 3, 4, 5, 8, 6, 11]\n for i in heapq.merge(l1, l2):\n print(i)\n print(\"end\")\n items = [1, 2, 3, [2, 3, 4], [5, 6, 7]]\n for i in flatten(items):\n print(i)\n # 改变输出的分割符和行尾符\n print(1, 2, sep=' ', end='#\\n')\n # str.join()只能连接字符串,非字符串的需要用sep方式隔开\n d = [\"wulj\", 1, 2]\n print(*d, sep=',')\n data = {'name1': ['vau1', 'vau2'], 'name2': ['vau1', 'vau2'], 'name3': ['vau1', 'vau2']}\n print(list(data.items()))\n k, v = random.choice(list(data.items()))\n data = {\n k: random.choice(v)\n }\n random.choice()\n print(data)" }, { "alpha_fraction": 0.6236754655838013, "alphanum_fraction": 0.6254919767379761, "avg_line_length": 26.272727966308594, "blob_id": "fc24eaf958a6ac093d7fd019aa38b1b074fdf634", "content_id": "b38878f28a0c524c4a8fa27ec9659052619d8437", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3409, "license_type": "permissive", "max_line_length": 90, "num_lines": 121, "path": "/part-marshmallow/test-load&dump2.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from marshmallow import Schema, fields, pprint, post_load, post_dump, ValidationError\nfrom datetime import datetime\n\n\nclass VideoLog(object):\n \"\"\"\n vlog基础类\n \"\"\"\n def __init__(self, **data):\n for k, v in data.items():\n setattr(self, k, v)\n\n def __str__(self):\n return '<VideoLog_str(name={self.name})>'.format(self=self)\n\n def __repr__(self):\n return '<VideoLog_repr(name={self.name})>'.format(self=self)\n\n\nclass User(object):\n \"\"\"\n user基础类\n \"\"\"\n def __init__(self, name, age, email, videos=None):\n self.name = name\n self.age = age\n self.email = email\n self.videos = videos or []\n\n def __str__(self):\n return '<User_str(name={self.name})>'.format(self=self)\n\n def __repr__(self):\n return '<User_repr(name={self.name})>'.format(self=self)\n\n\nclass VideoLogSchema(Schema):\n title = fields.Str(required=True)\n content = fields.Str(required=True)\n created_time = fields.DateTime()\n\n @post_load\n def make_data(self, data):\n return VideoLog(**data)\n\n\nclass UserSchema(Schema):\n name = fields.Str()\n age = fields.Int()\n email = fields.Email()\n videos = fields.Nested(VideoLogSchema, many=True)\n\n @post_load\n def make_data(self, data):\n return User(**data)\n\n\n# 继承前面定义好的schema类\nclass ProVideoSchema(VideoLogSchema):\n fans = fields.Nested(UserSchema, many=True)\n\n @post_load\n def make_data(self, data):\n return VideoLog(**data)\n\n\nclass TestAttributeSchema(Schema):\n new_name = fields.Str(attribute='name')\n age = fields.Int()\n email_addr = fields.Email(attribute='email')\n new_videos = fields.Nested(VideoLogSchema, many=True)\n\n @post_load\n def make_data(self, data):\n return User(**data)\n\n\n# 重构,隐式字段创建\nclass NewUserSchema(Schema):\n uppername = fields.Function(lambda obj: obj.name.upper())\n\n class Meta:\n fields = (\"name\", \"age\", \"email\", \"videos\", \"uppername\")\n\n\nif __name__ == \"__main__\":\n # 序列化为字典 example\n video = VideoLog(title='example', content='test', created_time=datetime.now())\n video_schema = VideoLogSchema()\n video_ret = video_schema.dump(video)\n pprint(video_ret.data)\n\n # 反序列化为类 example\n user_dct = {'name': 'wulj', 'age': 24, 'email': '[email protected]', 'videos': [video_ret.data]}\n user_schema = UserSchema()\n user_ret = user_schema.load(user_dct)\n pprint(user_ret.data)\n\n # 测试validate error\n test_video = {'title': 'test_validate'}\n try:\n print('test')\n schema = VideoLogSchema()\n ret = schema.load(test_video)\n pprint(ret.data)\n except ValidationError as err:\n print('error')\n pprint(err.valid_data)\n\n # 测试partial,处理required=True的\n partial_video = {'title': 'partial', 'created_time': datetime.now()}\n ret = VideoLogSchema().load(partial_video, partial=('content', ))\n print(ret)\n new_ret = VideoLogSchema(partial=('content', )).load(partial_video)\n new1_ret = VideoLogSchema(partial=True).load(partial_video)\n new2_ret = VideoLogSchema().load(partial_video, partial=True)\n\n # 测试attribute,指定属性名称\n test_user_attribute = User(name='attribute', age=23, email='[email protected]', videos=[])\n attribute_ret = TestAttributeSchema().dump(test_user_attribute)\n pprint(attribute_ret.data)\n\n\n\n" }, { "alpha_fraction": 0.4651394486427307, "alphanum_fraction": 0.4900398552417755, "avg_line_length": 30.40625, "blob_id": "fe13bbb4d55f4bbd7062ef2ed6cd9633593ba87e", "content_id": "96e9e3862fde815ec930e03b6fb991c6e7c41ab9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1020, "license_type": "permissive", "max_line_length": 107, "num_lines": 32, "path": "/part-interview/test07.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-03 20:58\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test07.py\n# ----------------------------------------------\nfrom pymongo import MongoClient\n\n\nclass PyMongoDemo:\n def __init__(self):\n self.client = MongoClient(\"mongodb://{username}:{password}@{host}:{port}\"\n .format(username=\"test\", password=\"test\", host=\"test\", port=27137))\n self.db = self.client.my_db # 数据库\n self.tb = self.db.tb # 表名\n\n def insert_data(self):\n users = [{\"name\": \"test\", \"age\": 10}, {\"name\": \"nb\", \"age\": 18}]\n self.tb.insert(users)\n\n def get_data(self):\n self.insertData()\n for data in self.tb.find():\n print(data)\n\n\nif __name__ == \"__main__\":\n m = PyMongoDemo()\n m.get_data()\n col = MongoClient(\"the_client\").get_database(\"the_db\").get_collection(\"the_col\")\n col.create_index([(\"field\", 1)], unique=False)" }, { "alpha_fraction": 0.35229358077049255, "alphanum_fraction": 0.3963302671909332, "avg_line_length": 23.81818199157715, "blob_id": "2718a577a8426ce3772de741631bb2565db7ba4e", "content_id": "9e339a5e5e761231956c0dc0f684e601d6a20570", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "permissive", "max_line_length": 48, "num_lines": 22, "path": "/part-interview/test20.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-08 12:13\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test20.py\n# ----------------------------------------------\nfrom collections import defaultdict\n\n\nif __name__ == \"__main__\":\n # 找出列表中重复的元素\n data = [1, 2, 3, 4, 3, 5, 5, 1]\n dct = defaultdict(list)\n for d in data:\n dct[str(d)].append(d)\n print(dct)\n for k, v in dct.items():\n if len(v) > 1:\n print(k)\n s = \"+++--++--\"\n print(\"\".join(sorted(s)))" }, { "alpha_fraction": 0.5474860072135925, "alphanum_fraction": 0.6312848925590515, "avg_line_length": 35, "blob_id": "b5103e17fdd6321ac77877c5d5170e6bbe1d8525", "content_id": "7f8ccd662791dd420493c27b412616e203caa5a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "permissive", "max_line_length": 47, "num_lines": 5, "path": "/part-interview/test01.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# 使用lambda对list排序,正数在前,从小到大,负数在后,从大到小\n# lambda设置2个条件,先将小于0的排在后面,再对每一部分绝对值排序\ndata = [-5, 8, 0, 4, 9, -4, -20, -2, 8, 2, -4]\na = sorted(data, key=lambda x: (x < 0, abs(x)))\nprint(a)" }, { "alpha_fraction": 0.5717470645904541, "alphanum_fraction": 0.6080259680747986, "avg_line_length": 31.899999618530273, "blob_id": "11faa59f48ddcc25fb4fd74e78b268f29dfed956", "content_id": "703e319f211e2b85aa0b1c3bdac78c6c8a26d433", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6870, "license_type": "permissive", "max_line_length": 112, "num_lines": 150, "path": "/part-interview/test08.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-04 10:32\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test08.py\n# ----------------------------------------------\nimport redis\n\n\nif __name__ == \"__main__\":\n # redis 现有的数据类型\n # 1. String 二进制安全,可以包含任何数据,一个 key 对应一个 value\n # SET key value,GET key,DEL key\n # 2. Hash 数据类型,键值对集合,适合存储对象的属性\n # HMSET key field1 value1 field2 value2,HGET key field1\n # 3. List 数据类型,双向链表,消息队列\n # lpush key value,lrange key 0 10\n # 4. Set 数据类型,hash 表实现,元素不重复\n # sadd key value,smembers key\n # 5. zset 数据类型,有序集合\n # zadd key score member,排行榜,带权重的消息队列\n\n # python 连接 redis\n # 普通的连接方式\n redis_conn = redis.Redis(host=\"127.0.0.1\", port=6379, username=\"test\", password=\"test\", db=0)\n # 连接池的方式\n redis_pool = redis.ConnectionPool(host=\"127.0.0.1\", port=6379, username=\"test\", password=\"test\", db=0)\n redis_conn1 = redis.Redis(connection_pool=redis_pool)\n\n # String 字符串\n # set 操作,ex 过期时间(秒),px 过期时间(毫秒),nx (name 不存在时,当前操作才执行),xx (name 存在时,当前操作才执行)\n redis_conn.set(name=\"test\", value=\"test\", ex=\"300\", px=None, nx=True, xx=False)\n # get 操作\n v = redis_conn.get(\"test\")\n print(v)\n # mset 设置多个值\n redis_conn.mset({\"1\": 1, \"2\": 2})\n # mget 获取多个值\n m = redis_conn.mget([\"1\", \"2\"])\n # getset 给已有的键设置新值\n v = redis_conn.getset(\"1\", 2)\n # setrange 根据索引修改某个键的value值,返回的是值的长度\n lg = redis_conn.setrange(\"1\", 0, \"232\")\n # getrange 根据索引获取键的部分value值,当所给键不存在时,返回 b''\n v1 = redis_conn.getrange(\"key\", 1, 2)\n # strlen 获取value长度,如果没有key 返回 0\n lg1 = redis_conn.strlen(\"key\")\n # incr/decr,int 类型的值或者字符串的数值,默认为1\n v2 = redis_conn.incr(\"key\", amount=1)\n v3 = redis_conn.decr(\"key\", amount=1)\n # incrbyfloat,浮点数自增\n v4 = redis_conn.incrbyfloat(\"key\", amount=1.0)\n # append,追加字符串,如果不存在 key 就设置新值,返回value的长度\n lg2 = redis_conn.append(\"key\", \"666\")\n\n # List,在redis中,1个key对应一个列表\n # lpush/rpush,返回列表的大小,当键不存在时,创建新的列表\n lg3 = redis_conn.lpush(\"key\", 1, 2, \"test\")\n # lpushx/rpushx,当键不存在时,不添加也不创建新的列表\n lg4 = redis_conn.lpushx(\"key\", \"value\")\n # llen,获取所给key列表的大小\n lg5 = redis_conn.llen(\"key\")\n # linsert,在指定位置插入新值,ref_key 不存在就返回 0 ,否则就返回插入后list的长度\n lg6 = redis_conn.linsert(\"key\", \"AFTER\", \"ref_key\", \"value\")\n # lset 通过索引赋值,返回 boolean 值\n bl = redis_conn.lset(\"key\", 0, \"value\")\n # lindex 通过索引获取列表中的值\n v6 = redis_conn.lindex(\"key\", 0)\n # lrange,获取列表中的一段数据\n v7 = redis_conn.lrange(\"key\", 0, 5)\n # lpop/rpop 删除左边或者右边第一个值,返回被删除元素的值\n v8 = redis_conn.lpop(\"key\")\n # lrem 删除列表中N个相同的值,返回被删除元素的个数\n v9 = redis_conn.lrem(\"key\", \"value\", -2)\n # ltrim 删除列表范围外的所有元素\n v10 = redis_conn.ltrim(\"key\", 5, 6)\n # blpop 删除并返回列表最左边的值,返回一个元组 (key, value)\n v11 = redis_conn.blpop(\"key\")\n # rpoplpush 一个列表最右边的元素取出后添加到列表的最左边,返回取出的元素值\n v12 = redis_conn.rpoplpush(\"key1\", \"key2\")\n\n # Hash,value 值一个 map\n # hset,返回添加成功的个数\n v13 = redis_conn.hset(\"key\", \"key1\", \"value\")\n # hmset 添加多个键值对\n v14 = redis_conn.hmset(\"key\", {\"1\": 1, \"2\": 2})\n # hmget 获取多个键值对\n v15 = redis_conn.hmget(\"key\", [\"1\", \"2\"])\n # hget\n v16 = redis_conn.hget(\"key\", \"1\")\n # hgetall,获取所有的键值对\n v17 = redis_conn.hgetall(\"name\")\n # hlen 获取键值对的个数\n v18 = redis_conn.hlen(\"name\")\n # hkeys 获取所有的键\n v19 = redis_conn.hkeys(\"name\")\n # hvals 获取所有的value\n v20 = redis_conn.hvals(\"name\")\n # hexists 检查 hash 中是否存在某个 key\n v21 = redis_conn.hexists(\"name\", \"key\")\n # hdel 删除 hash 中的键值对\n v22 = redis_conn.hdel(\"name\", \"key1\", \"key2\")\n # hincrby 自增 hash 中的 value 值\n v23 = redis_conn.hincrby(\"name\", \"key\", -1)\n # hincrbyfloat\n v24 = redis_conn.hincrbyfloat(\"name\", \"key\", 1.0)\n # expire 设置某个键的过期时间\n v25 = redis_conn.expire(\"name\", \"key\")\n\n # Set\n # sadd 插入元素到集合中\n s = redis_conn.sadd(\"name\", \"1\", 3, 4)\n # scard 返回集合中元素的个数\n s1 = redis_conn.scard(\"name\")\n # smembers 获取集合中所有的元素\n s2 = redis_conn.smembers(\"name\")\n # srandmember 随机获取一个或者N个元素\n s3 = redis_conn.srandmember(\"name\", number=2)\n # sismember 判断一个值是否在集合中\n s4 = redis_conn.sismember(\"name\", \"value\")\n # spop 随机删除集合中的元素\n s5 = redis_conn.spop(\"name\")\n # srem 删除集合中的一个或者多个元素,返回删除元素的个数\n s6 = redis_conn.srem(\"name\", \"a\", \"b\")\n # smove 将集合中的一个元素移动到另一个集合中去\n s7 = redis_conn.smove(\"name1\", \"name2\", \"a\")\n # sdiff 两个集合求差集\n s8 = redis_conn.sdiff(\"name1\", \"name2\")\n # sinter 两个集合求交集\n s9 = redis_conn.sinter(\"name1\", \"name2\")\n # sunion 并集\n s10 = redis_conn.sunion(\"name1\", \"name2\")\n\n # Zset\n\n # redis 的事务\n # MULTI 开始事务,命令入队,EXEC 执行事务,DISCARD 放弃事务。\n # 与 mysql 事务的概念有所区别,不是原子性的,如果事务中途有命令失败,不会回滚,并继续往下执行。\n # redis 对于单个命令的执行是原子性的\n\n # 分布式锁是什么\n # 分布式锁主要用于分布式集群服务互斥共享累或者方法中的变量,对于单机应用而言,可以采用并行处理互斥\n # 分布式锁具备那些条件\n # 1. 在分布式系统环境下,一个方法在同一时间只能被一个机器的一个线程执行\n # 2. 高可用的获取锁与释放锁\n # 3. 高性能的获取锁与释放锁\n # 4. 具备可重入特性\n # 5. 具备锁失效机制,防止死锁\n # 6. 具备非阻塞锁特性,即没有获取到锁将直接返回获取锁失败" }, { "alpha_fraction": 0.6505050659179688, "alphanum_fraction": 0.6505050659179688, "avg_line_length": 28.117647171020508, "blob_id": "c9b46a30c44f5214b15afad41de9d52d24e86b78", "content_id": "6f20860ffc625754036b9d1d54d7cb22f5e29980", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "permissive", "max_line_length": 61, "num_lines": 17, "path": "/part-selenium/test01.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nif __name__ == \"__main__\":\n # 加载浏览器\n browser = webdriver.Chrome()\n # 获取页面\n browser.get('https://www.baidu.com')\n print(browser.page_source)\n # 查找单个元素\n input_first = browser.find_element_by_id('q')\n input_second = browser.find_element_by_css_selector('#q')\n input_third = browser.find_element(By.ID, 'q')\n # 查找多个元素\n input_elements = browser.find_elements(By.ID, 'q')\n # 元素交互操作,搜索框查询\n" }, { "alpha_fraction": 0.5285252928733826, "alphanum_fraction": 0.5554359555244446, "avg_line_length": 30, "blob_id": "43198050350c62809613cb95c0a83462c5877b9e", "content_id": "5faf802c7f261251426e002c51ef8f22a5317572", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "permissive", "max_line_length": 67, "num_lines": 30, "path": "/part-struct/sort-dict.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from operator import itemgetter, attrgetter\n\n\nclass User:\n def __init__(self, uid, name):\n self.uid = uid\n self.name = name\n\n def get_name(self):\n return self.name\n\n\nif __name__ == \"__main__\":\n datas = [\n {'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},\n {'fname': 'David', 'lname': 'Beazley', 'uid': 1002},\n {'fname': 'John', 'lname': 'Cleese', 'uid': 1001},\n {'fname': 'Big', 'lname': 'Jones', 'uid': 1004}\n ]\n row1 = sorted(datas, key=itemgetter('fname', 'lname'))\n print(row1)\n row2 = sorted(datas, key=lambda x: x['uid'])\n print(row2)\n users = [User(1, 'first'), User(3, 'second'), User(2, 'third')]\n row3 = sorted(users, key=attrgetter('uid', 'name'))\n min_user = min(users, key=attrgetter('uid'))\n max_user = max(users, key=lambda u: u.name)\n print(min_user.uid, min_user.name)\n print(max_user.uid, max_user.name)\n print(row3)" }, { "alpha_fraction": 0.4337121248245239, "alphanum_fraction": 0.47727271914482117, "avg_line_length": 24.14285659790039, "blob_id": "864f0b48e8f1bb64020c67462c4a679fc1ffcb8d", "content_id": "2765ea483df479e49dc1cd746c61bf1f735e030a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "permissive", "max_line_length": 71, "num_lines": 21, "path": "/part-interview/test13.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-05 20:43\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test13.py\n# ----------------------------------------------\nimport test10\n\n\nif __name__ == \"__main__\":\n # python3 高级特性,反射\n # 字符串返回映射到代码的一种机制,python3 提供了四个内置函数 getattr setattr hasattr delattr\n obj = getattr(test10, \"acquire_lock\")\n if hasattr(test10, \"acquire_lock\"):\n print(\"test\")\n else:\n print(\"new\")\n\n # metaclass 作用以及应用场景\n # 元类是一个创建类的类\n" }, { "alpha_fraction": 0.849056601524353, "alphanum_fraction": 0.849056601524353, "avg_line_length": 25.5, "blob_id": "67994445d2a2bf73c409802b4aeb8ddc325a375d", "content_id": "094135d305e6665181c6d0e09091d9b6b85281da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 53, "license_type": "permissive", "max_line_length": 31, "num_lines": 2, "path": "/README.md", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# interesting_python\nsome interesting python program\n" }, { "alpha_fraction": 0.5242915153503418, "alphanum_fraction": 0.5283401012420654, "avg_line_length": 21.5, "blob_id": "89a1896368d8225b7cc84803c5bbdfead957b752", "content_id": "9aee467af3968c8bd882ecbf801abd4b6066d751", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "permissive", "max_line_length": 37, "num_lines": 22, "path": "/part-struct/test-deque.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from collections import deque\n\n\ndef search(lines, pattern, history):\n pre_lines = deque(maxlen=history)\n for line in lines:\n if pattern in line:\n pre_lines.append(line)\n return pre_lines\n\n\nif __name__ == \"__main__\":\n with open('tmp/test', 'r') as f:\n s = search(f, 'python', 5)\n print(s)\n s.append('python9')\n s.appendleft('python')\n s.pop()\n s.popleft()\n for line in s:\n print(line)\n print(\"end\")" }, { "alpha_fraction": 0.43022507429122925, "alphanum_fraction": 0.4771704077720642, "avg_line_length": 25.827587127685547, "blob_id": "57dd8eb4ab6187ca1fcbd8676c715cbbc5999919", "content_id": "b329988f8c701db40b260d6801509b4d8578c4d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1555, "license_type": "permissive", "max_line_length": 63, "num_lines": 58, "path": "/part-text/test-tt.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from collections import Counter, defaultdict\nimport requests\nimport arrow\n\n\nclass Data:\n def __init__(self, data):\n self.data = data\n\n\nif __name__ == \"__main__\":\n url = 'http://10.100.51.45/rate/repair?start={}&end={}'\n start = '2019-04-01 23:10'\n end = '2019-04-07 23:10'\n ret = requests.get(url.format(start, end))\n print(ret.status_code)\n # dct = {}\n # d = Data('1')\n # dct['re'] = d\n # print(dct)\n # test = defaultdict()\n # data = [{'key1': 1}, {'key2': 2}]\n # for d in data:\n # test.update(d)\n # print(test)\n # dct = {'data': test}\n # for k, v in dct.items():\n # print(k)\n # for k1, v1 in v.items():\n # print(k1)\n # print(v1)\n # data = [1, 2, 3, 5, 5]\n # data2 = [2, 3, 4, 6, 2]\n # a = set(d for d in data)\n # print(a)\n # b = set(d for d in data2)\n # print(b)\n # print(a & b)\n # print(a - (a & b))\n # t = tuple(d for d in data)\n # for i in t:\n # print(i)\n # print(tuple(d for d in data))\n # link = 'http://jira.op.ksyun.com/browse/BIGDATA-614/test'\n # print(link.split('/')[-2])\n # print('http'.upper())\n # for dct in data:\n # for key in ('key1', 'key2'):\n # if key not in dct.keys():\n # dct[key] = 0\n # print(data)\n # ret = defaultdict(Counter)\n # data1 = {'name': {'key1': [1, 2, 3]}}\n # data2 = {'name': {'key1': [2, 3, 4]}}\n # for d in (data1, data2):\n # for name, data in d.items():\n # ret[name] += Counter(data)\n # print(ret)" }, { "alpha_fraction": 0.6043956279754639, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 19.33333396911621, "blob_id": "558b8a7c7ab1ff8b10118ab16884266e0c80558f", "content_id": "896797ddf003580561718688a1eee3f7829de160", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "permissive", "max_line_length": 49, "num_lines": 9, "path": "/part-text/test-fixed-record.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from functools import partial\n\n\n# 从指定文件按固定大小迭代\nwith open('file', 'rb') as f:\n re_size = 32\n records = iter(partial(f.read, re_size), b'')\n for r in records:\n print(r)" }, { "alpha_fraction": 0.4173728823661804, "alphanum_fraction": 0.42584747076034546, "avg_line_length": 15.892857551574707, "blob_id": "5bd1ae30a49ec841348248db428b854f5d78c1bb", "content_id": "0dd6e6d85dac62de52ae254fecc3966918501ec4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "permissive", "max_line_length": 30, "num_lines": 28, "path": "/part-struct/unpack-value2.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "records = [('foo', 1, 2),\n ('bar', 'hello'),\n ('foo', 3, 4),\n ]\n\n\ndef drop_first_last(grades):\n _, *middle, _ = grades\n return middle\n\n\ndef do_foo(x, y):\n print('foo', x, y)\n\n\ndef do_bar(s):\n print('bar', s)\n\n\nif __name__ == \"__main__\":\n for tag, *args in records:\n print(args)\n print(*args)\n if tag == 'foo':\n do_foo(*args)\n elif tag == 'bar':\n do_bar(*args)\n print(\"done\")" }, { "alpha_fraction": 0.5100671052932739, "alphanum_fraction": 0.5268456339836121, "avg_line_length": 18.933332443237305, "blob_id": "05874d0ac59abf7ce3a0ee80d4ff69839025dd36", "content_id": "991e93c27b9f67ea4805a369ee402b3f8b0a02c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "permissive", "max_line_length": 56, "num_lines": 15, "path": "/part-text/test-strip.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import re\nimport os\n\n\nif __name__ == \"__main__\":\n s = \" hello new world \\n\"\n # strip用于取出首尾指定字符\n print(s.strip())\n print(s.lstrip())\n print(s.rstrip())\n s = \"test ?\"\n s1 = s.replace('?', 'new')\n print(s1)\n s2 = re.sub('new', 'fresh', s1, flags=re.IGNORECASE)\n print(s2)" }, { "alpha_fraction": 0.5394300818443298, "alphanum_fraction": 0.553346574306488, "avg_line_length": 28.58823585510254, "blob_id": "4aa58fc171ce505d3b0b43187559cb3b8296df72", "content_id": "d07b211a436c2272a3b81cf8b6dc4615c7caa9e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1509, "license_type": "permissive", "max_line_length": 78, "num_lines": 51, "path": "/part-class/test-class.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "class Structure1:\n _fields = []\n\n def __init__(self, *args, **kwargs):\n if len(args) > len(self._fields):\n raise TypeError('Excepted {} arguments'.format(len(self._fields)))\n\n for name, value in zip(self._fields, args):\n setattr(self, name, value)\n\n for name in self._fields[len(args):]:\n setattr(self, name, kwargs.pop(name))\n\n if kwargs:\n raise TypeError('Invalid arguments {}'.format(','.join(kwargs)))\n\n\nclass Stock(Structure1):\n _fields = [\"name\", \"age\", \"career\"]\n\n\nclass Structure2:\n _fields = [\"name\", \"age\", \"career\"]\n\n def __init__(self, *args, **kwargs):\n if len(args) != len(self._fields):\n raise TypeError('Excepted {} arguments'.format(len(self._fields)))\n\n for name, value in zip(self._fields, args):\n setattr(self, name, value)\n\n extra_args = kwargs - self._fields\n for name in extra_args:\n setattr(self, name, kwargs.pop(name))\n\n if kwargs:\n raise TypeError('Invalid arguments {}'.format(','.join(kwargs)))\n\n\nif __name__ == \"__main__\":\n data = [\"test1\", \"test2\", \"name\"]\n kwargs = {\"name\": \"wulj\", \"age\": 23}\n print(kwargs.keys()-data)\n test_dict = {\"name\": \"value\", \"test\": \"new\"}\n print(','.join(test_dict))\n s1 = Stock(\"Alex\", 23, \"programmer\")\n print(s1.name, s1.age, s1.career)\n s2 = Stock(\"lucy\", age=22, career=\"teacher\")\n print(s2)\n s3 = Stock(\"Mary\", 23, \"player\", \"like\")\n print(s3)\n" }, { "alpha_fraction": 0.41115880012512207, "alphanum_fraction": 0.4721029996871948, "avg_line_length": 42.14814758300781, "blob_id": "bc19fbdef09fd83d82e219daf921bc1abdf6d40f", "content_id": "e912c020cd96c53ff20faf6f9847bda71299f224", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1171, "license_type": "permissive", "max_line_length": 108, "num_lines": 27, "path": "/part-kafka/kafka-producer.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2019-12-25 21:25\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : kafka-producer.py\n# ----------------------------------------------\nfrom kafka import KafkaProducer\nfrom time import sleep\n\n\ndef start_producer():\n producer = KafkaProducer(bootstrap_servers='kafka-0-0.kafka-0-inside-svc.kafka.svc.cluster.local:32010,'\n 'kafka-1-0.kafka-1-inside-svc.kafka.svc.cluster.local:32011,'\n 'kafka-2-0.kafka-2-inside-svc.kafka.svc.cluster.local:32012,'\n 'kafka-3-0.kafka-3-inside-svc.kafka.svc.cluster.local:32013,'\n 'kafka-4-0.kafka-4-inside-svc.kafka.svc.cluster.local:32014,'\n 'kafka-5-0.kafka-5-inside-svc.kafka.svc.cluster.local:32015')\n for i in range(0, 100000):\n msg = 'msg is ' + str(i)\n print(msg)\n producer.send('my_test_topic1', msg.encode('utf-8'))\n sleep(3)\n\n\nif __name__ == '__main__':\n start_producer()\n" }, { "alpha_fraction": 0.3913823962211609, "alphanum_fraction": 0.43177738785743713, "avg_line_length": 22.723403930664062, "blob_id": "ba0fac202a150722ce53716ed46c00e77decc7ac", "content_id": "fee58ba6130bd2f5ab628eaccec12fd6d9941ec8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1342, "license_type": "permissive", "max_line_length": 49, "num_lines": 47, "path": "/part-interview/test02.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-01 10:39\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test02.py\n# ----------------------------------------------\nif __name__ == \"__main__\":\n # gbk 和 utf-8 格式之间的转换\n # gbk 编码,针对于中文字符\n t1 = \"中国加油\"\n t1_gbk = t1.encode(\"gbk\")\n print(t1.encode(\"gbk\"))\n # utf-8 字符\n t2_utf = t1_gbk.decode(\"gbk\").encode(\"utf-8\")\n print(t2_utf)\n print(t1.encode(\"utf-8\"))\n # 正则切分字符串\n s1 = \"info : xiaoZhang 33 shandong\"\n import re\n # 非捕获分组\n c1 = re.compile(r'\\s*[:\\s]\\s*')\n l1 = re.split(c1, s1)\n print(l1)\n # 捕获分组\n c2 = re.compile(r'(\\s*:\\s*|\\s)')\n l2 = re.split(c2, s1)\n print(l2)\n # 如果仍需使用圆括号输出非捕获分组的话\n c3 = re.compile(r'(?:\\s*:\\s*|\\s)')\n l3 = re.split(c3, s1)\n print(l3)\n # 去除多余空格\n a = \"你好 中国 \"\n a = a.rstrip()\n print(a)\n # 字符串转换成小写\n b = \"sdsHOJOK\"\n print(b.lower())\n # 单引号 双引号 三引号的区别\n # 单引号 和 双引号 输出结果一样,都显示转义后的字符\n a = '-\\t-\\\\-\\'-%-/-\\n'\n b = \"-\\t-\\\\-\\'-%-/-\\n\"\n print(a)\n print(b)\n c = r\"-\\t-\\\\-\\'-%-/-\\n\"\n print(c)" }, { "alpha_fraction": 0.6191536784172058, "alphanum_fraction": 0.6213808655738831, "avg_line_length": 22.63157844543457, "blob_id": "36e70ccdf9b10cd054b9f40c692616c26bf07fc2", "content_id": "99fcf58c6c16087f25ae63bb6827dc667600d2b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "permissive", "max_line_length": 43, "num_lines": 19, "path": "/part-text/test-enumerate.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\ncounter_words = defaultdict(list)\n\n\n# 定位文件中的每一行出现某个字符串的次数\ndef locate_word(test_file):\n with open(test_file, 'r') as f:\n lines = f.readlines()\n for num, line in enumerate(lines, 1):\n for word in line.split():\n counter_words[word].append(num)\n return counter_words\n\n\nif __name__ == \"__main__\":\n file = 'data/test.txt'\n ret = locate_word(file)\n print(ret.get('test', []))\n" }, { "alpha_fraction": 0.4857296645641327, "alphanum_fraction": 0.5018847584724426, "avg_line_length": 20.113636016845703, "blob_id": "2c7314acd2e35cfe883a521a8c23569bd160c8f6", "content_id": "de15ad00483005f670a03c9c67dc4de541722801", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2029, "license_type": "permissive", "max_line_length": 81, "num_lines": 88, "path": "/part-interview/test19.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-08 11:30\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test19.py\n# ----------------------------------------------\n# 单例模式的 N 种实现方法,就是程序在不同位置都可以且仅可以取到同一个实例\n\n\n# 函数装饰器实现\ndef singleton(cls):\n _instance = {}\n\n def inner():\n if cls not in _instance:\n # cls 作为 key,value 值为 cls 的实例化\n _instance[cls] = cls()\n return _instance[cls]\n return inner\n\n\n@singleton\nclass Cls(object):\n def __init__(self):\n print(\"__init__\")\n\n\n# 类装饰器实现\nclass Singleton:\n def __init__(self, cls):\n self._cls = cls\n self._instance = {}\n\n def __call__(self, *args, **kwargs):\n if self._cls not in self._instance:\n self._instance[self._cls] = self._cls()\n return self._instance[self._cls]\n\n\n@Singleton\nclass Cls2:\n def __init__(self):\n print(\"__init__2\")\n\n\n# 使用 new 关键字实现单例模式\nclass Singleton1(object):\n # 类属性,公共属性\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = object.__new__(cls, *args, **kwargs)\n return cls._instance\n\n def __init__(self):\n print(\"__init__3\")\n\n\n# 使用 metaclass 实现单例模式\nclass Singleton3(type):\n _instance = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instance:\n cls._instance[cls] = super(Singleton3, cls).__call__(*args, **kwargs)\n return cls._instance[cls]\n\n\nclass Singleton4(metaclass=Singleton3):\n def __init__(self):\n print(\"__init__4\")\n\n\nif __name__ == \"__main__\":\n c = Cls()\n d = Cls()\n print(id(c) == id(d))\n e = Cls2()\n f = Cls2()\n print(id(e) == id(f))\n g = Singleton1()\n h = Singleton1()\n print(id(g) == id(h))\n i = Singleton4()\n j = Singleton4()\n print(id(i) == id(j))" }, { "alpha_fraction": 0.40700218081474304, "alphanum_fraction": 0.4595186114311218, "avg_line_length": 17.31999969482422, "blob_id": "24027ecc91c68f16a686027f1323d8f5176b6733", "content_id": "3edf744d026597207ced37243c42a43a175dad92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "permissive", "max_line_length": 40, "num_lines": 25, "path": "/part-data/test-numpy.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nif __name__ == \"__main__\":\n \"\"\"\n 使用numpy模块来对数组进行运算\n \"\"\"\n x = [1, 2, 3, 4]\n y = [5, 6, 7, 8]\n print(x+y)\n print(x*2)\n nx = np.array(x)\n ny = np.array(y)\n print(nx*2)\n print(nx+10)\n print(nx+ny)\n print(np.sqrt(nx))\n print(np.cos(nx))\n # 二维数组操作\n a = np.array([[1, 2, 3], [2, 3, 4]])\n # select row 1\n print(a[1])\n # select column 1\n print(a[:, 1])\n print(np.where(a > 1, a, 0))" }, { "alpha_fraction": 0.3927238881587982, "alphanum_fraction": 0.4458955228328705, "avg_line_length": 23.953489303588867, "blob_id": "a00fd09b88c74c5eb2ed3321d10d30886ef70565", "content_id": "a308b0a956c1733dc0c25e4d050b7e49000b4d04", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1208, "license_type": "permissive", "max_line_length": 48, "num_lines": 43, "path": "/part-interview/test03.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2020-03-01 11:28\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test03.py\n# ----------------------------------------------\nif __name__ == \"__main__\":\n # 对列表元素去重\n aList = [1, 2, 3, 2, 1]\n b = set(aList)\n print(list(b))\n # 2,太简单 不说了\n s1 = \"1,2,3\"\n print(s1.split(\",\"))\n # 3,找出两个列表中相同元素和不同元素\n a = [1, 2, 5, 3, 2]\n b = [4, 5, 6, 1, 2]\n common_l = list(set(a) & set(b))\n print(common_l)\n only_in_a = list(set(a) - set(common_l))\n only_in_b = list(set(b) - set(common_l))\n print(only_in_a)\n print(only_in_b)\n # 一行代码展开 list,nice\n a = [[1, 2], [3, 4], [5, 6]]\n b = [j for i in a for j in i]\n print(b)\n # numpy 实现,flatten 方法,然后转换成 list\n import numpy as np\n c = np.array(a).flatten().tolist()\n print(c)\n # 合并列表,list 可以用 extend 方法\n a = [1, 2, 3]\n b = [4, 5, 6]\n a.extend(b)\n print(a)\n # 打乱一个列表\n import random\n a = [1, 2, 3, 4, 5]\n random.shuffle(a)\n print(a)\n print(random.randint(1, 10))" }, { "alpha_fraction": 0.5044510364532471, "alphanum_fraction": 0.5222551822662354, "avg_line_length": 28.764705657958984, "blob_id": "94b49d5ac2338daa9cf31865e22aaa1fc0d3987e", "content_id": "65421357f6481f1d2c2e9e53a7489fd53c0f2406", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1011, "license_type": "permissive", "max_line_length": 62, "num_lines": 34, "path": "/part-yaml/test-file.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "if __name__ == \"__main__\":\n names = set()\n dct = {\"test\": \"new\"}\n data = ['wulinjiang1', 'test', 'test', 'wulinjiang1']\n print('\\n'.join(data))\n from collections import defaultdict\n data1 = defaultdict(list)\n # print(data1)\n # for d in data:\n # data1[d].append(\"1\")\n # print(data1)\n content = 'aydsad'\n for k, v in data1.items():\n print(k)\n content += '\\n'.join(v)\n print('\\n'.join(v))\n print(content)\n if data1:\n print(True)\n # dct = {\"test1\": \"wulinjiang1\",}\n # for i in range(3):\n # dct.update({'content': i})\n # print(dct)\n # for d in data:\n # names.add(d)\n # for name in names:\n # print(name)\n # with open('deployments.yaml') as fp:\n # content = fp.readlines()\n # print(content[25].format('http://www.baidu.com'))\n # content[25] = content[25].format('http://www.baidu.com')\n # with open('deployments.yaml', 'w') as fp:\n # for c in content:\n # fp.writeline" }, { "alpha_fraction": 0.4540390074253082, "alphanum_fraction": 0.47632312774658203, "avg_line_length": 20.176469802856445, "blob_id": "aaf3ed793c0c2a20a5b0f3d837c9373105321169", "content_id": "a4ae9a6a64953c203e9fde4430d6b26ad870ef8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "permissive", "max_line_length": 46, "num_lines": 17, "path": "/part-struct/test-dict.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\n\nif __name__ == \"__main__\":\n d = {\n \"1\": 1,\n \"2\": 2,\n \"5\": 5,\n \"4\": 4,\n }\n print(d.keys())\n print(d.values())\n print(zip(d.values(), d.keys()))\n max_value = max(zip(d.values(), d.keys()))\n min_value = min(zip(d.values(), d.keys()))\n print(max_value)\n print(min_value)" }, { "alpha_fraction": 0.45382165908813477, "alphanum_fraction": 0.4888535141944885, "avg_line_length": 21.428571701049805, "blob_id": "568baa24099a6102d8cf050c30469b45152c0335", "content_id": "66b991533875f94ce5960a15a37a71d34c2bccd6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "permissive", "max_line_length": 51, "num_lines": 28, "path": "/part-sanic/test_g_10000.py", "repo_name": "wuljchange/interesting_python", "src_encoding": "UTF-8", "text": "# ----------------------------------------------\n# -*- coding: utf-8 -*-\n# @Time : 2019-11-07 18:50\n# @Author : 吴林江\n# @Email : [email protected]\n# @File : test-sanic.py\n# ----------------------------------------------\nfrom sanic import Sanic\nfrom sanic import response\nfrom pprint import pprint\n\n\napp = Sanic()\n\n\[email protected]('/', methods=['POST'])\nasync def g(request):\n data = request.json\n resp = []\n for k, v in data:\n for d in v:\n resp.append(sorted(d.items()))\n pprint(sorted(resp))\n return response.json(True)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=10000, debug=True)\n" } ]
81
opn7d/Lab2
https://github.com/opn7d/Lab2
b35852ccf0d6ade194be76b96abd986e48cd1f2b
59107abfe0b774038c259a959e9d9147dfe1021e
7f380706ec8ab9ddffee398c40972f426ed76de3
refs/heads/master
2020-09-28T13:55:12.408735
2019-12-09T09:47:17
2019-12-09T09:47:17
226,791,277
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7154185175895691, "alphanum_fraction": 0.7365638613700867, "avg_line_length": 35.6129035949707, "blob_id": "d0c3de2e8ea0573334d95cd2e87078062f243b58", "content_id": "8eda9b2c8e1b72eee583eaf7d761fcaee7c8b0e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1135, "license_type": "no_license", "max_line_length": 107, "num_lines": 31, "path": "/Question4", "repo_name": "opn7d/Lab2", "src_encoding": "UTF-8", "text": "from keras.models import Sequential\nfrom keras import layers\nfrom keras.preprocessing.text import Tokenizer\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\n# read the file\ndf = pd.read_csv('train.tsv',\n header=None,\n delimiter='\\t', low_memory=False)\n# labels columns\ndf.columns = ['PhraseID', 'SentenceID', 'Phrase', 'Sentiment']\nsentences = df['Phrase'].values\ny = df['Sentiment'].values\n\ntokenizer = Tokenizer(num_words=2000)\ntokenizer.fit_on_texts(sentences)\nsentences = tokenizer.texts_to_matrix(sentences)\n\nle = preprocessing.LabelEncoder()\ny = le.fit_transform(y)\nX_train, X_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)\n\n# Number of features\n# print(input_dim)\nmodel = Sequential()\nmodel.add(layers.Dense(300, input_dim=2000, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))\nmodel.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['acc'])\nhistory=model.fit(X_train,y_train, epochs=5, verbose=True, validation_data=(X_test,y_test), batch_size=256)\n" } ]
1
jfstepha/minecraft-ros
https://github.com/jfstepha/minecraft-ros
c07b07c3657338b95409288266968c5b7cb614e2
30c8de66447dfdfa496a994a27ee705b56581b05
835166244307dabfe2270b176f23170a1b1ffe9a
refs/heads/master
2021-01-01T06:50:37.554367
2013-03-07T03:49:10
2013-03-07T03:49:10
34,567,810
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.36679109930992126, "alphanum_fraction": 0.4142126441001892, "avg_line_length": 32.87529373168945, "blob_id": "5924fbe656b6831018b4021a218fc5e945b5d60b", "content_id": "b2ed4c2d810643dff8072b20f3c3047b6964d430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14466, "license_type": "no_license", "max_line_length": 180, "num_lines": 425, "path": "/src/octomap_2_minecraft.py", "repo_name": "jfstepha/minecraft-ros", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 18 23:52:09 2013\n\n@author: jfstepha\n\"\"\"\n\n# parts of this code borrowed from: \n\n# Minecraft save file creator from kinect images created by getSnapshot.py\n#\tBy: Nathan Viniconis\n#\n\n# in it, he said: \"You can use this code freely without any obligation to the original or myself\"\n\nfrom math import sqrt\nimport sys\nfrom pymclevel import mclevel\nfrom pymclevel.box import BoundingBox\nimport re\nimport argparse\nimport os\nimport yaml\n\n# Possible blocks in (Name, ID, (RGB1,RGB2,..),Data)\n\t#RGBs are used to color match. \npossibleBlocks = ( \t\\\n\t\t\t\t\t(\"Smooth Stone\", 1, (\t\\\n\t\t\t\t\t\t(125,125, 125),),0), \\\n\t\t\t\t\t(\"Dirt\", 3, (\t\\\n\t\t\t\t\t\t(133,96,66),),0), \\\n\t\t\t\t\t(\"Cobblestone\", 4, (\t\\\n\t\t\t\t\t\t(117,117,117),),0), \\\n\t\t\t\t\t(\"Wooden Plank\", 5, (\t\\\n\t\t\t\t\t\t(156,127,78),),0), \\\n\t\t\t\t\t(\"Bedrock\", 7, ( \\\n\t\t\t\t\t\t(83,83,83),),0),\t\\\n\t\t\t\t\t#(\"Lava\", 11, ( \\\n\t\t\t\t\t#\t(255,200,200),),0),\t\\\n\t\t\t\t\t(\"Sand\", 12, (\t\\\n\t\t\t\t\t\t(217,210,158),),0), \\\n\t\t\t\t\t(\"Gravel\", 13, ( \t\\\n\t\t\t\t\t\t(136, 126, 125),),0), \\\n\t\t\t\t\t(\"Gold Ore\", 14, (\t\\\n\t\t\t\t\t\t(143,139,124),),0),\t\\\n\t\t\t\t\t(\"Iron Ore\", 15, (\t\\\n\t\t\t\t\t\t(135,130,126),),0),\t\\\n\t\t\t\t\t(\"Coal Ore\", 16, (\t\\\n\t\t\t\t\t\t(115,115,115),),0),\t\\\n\t\t\t\t\t(\"Wood\", 17, (\t\\\n\t\t\t\t\t\t(154,125,77),),0), \\\n\t\t\t\t\t(\"Sponge\", 19, (\t\\\n\t\t\t\t\t\t(182,182,57),),0), \\\n\t\t\t\t\t#(\"Glass\", 20, (\t\\\n\t\t\t\t\t#\t(60,66,67),),0), \\\n\t\t\t\t\t(\"White Wool\", 35, (\t\\\n\t\t\t\t\t\t(221,221,221),),0),\t\\\n\t\t\t\t\t(\"Orange Wool\", 35, (\t\\\n\t\t\t\t\t\t(233,126,55),),1),\t\\\n\t\t\t\t\t(\"Magenta Wool\", 35, (\t\\\n\t\t\t\t\t\t(179,75,200),),2),\t\\\n\t\t\t\t\t(\"Light Blue Wool\", 35, (\t\\\n\t\t\t\t\t\t(103,137,211),),3),\t\\\n\t\t\t\t\t(\"Yellow Wool\", 35, (\t\\\n\t\t\t\t\t\t(192,179,28),),4),\t\\\n\t\t\t\t\t(\"Light Green Wool\", 35, (\t\\\n\t\t\t\t\t\t(59,187,47),),5),\t\\\n\t\t\t\t\t(\"Pink Wool\", 35, (\t\\\n\t\t\t\t\t\t(217,132,153),),6),\t\\\n\t\t\t\t\t(\"Dark Gray Wool\", 35, (\t\\\n\t\t\t\t\t\t(66,67,67),),7),\t\\\n\t\t\t\t\t(\"Gray Wool\", 35, (\t\\\n\t\t\t\t\t\t(157,164,165),),8),\t\\\n\t\t\t\t\t(\"Cyan Wool\", 35, (\t\\\n\t\t\t\t\t\t(39,116,148),),9),\t\\\n\t\t\t\t\t(\"Purple Wool\", 35, (\t\\\n\t\t\t\t\t\t(128,53,195),),10),\t\\\n\t\t\t\t\t(\"Blue Wool\", 35, (\t\\\n\t\t\t\t\t\t(39,51,153),),11),\t\\\n\t\t\t\t\t(\"Brown Wool\", 35, (\t\\\n\t\t\t\t\t\t(85,51,27),),12),\t\\\n\t\t\t\t\t(\"Dark Green Wool\", 35, (\t\\\n\t\t\t\t\t\t(55,76,24),),13),\t\\\n\t\t\t\t\t(\"Red Wool\", 35, (\t\\\n\t\t\t\t\t\t(162,44,42),),14),\t\\\n\t\t\t\t\t(\"Black Wool\", 35, (\t\\\n\t\t\t\t\t\t(26,23,23),),15),\t\\\n\t\t\t\t\t(\"Gold\", 41, (\t\\\n\t\t\t\t\t\t(249,236,77),),0), \\\n\t\t\t\t\t(\"Iron\", 42, (\t\\\n\t\t\t\t\t\t(230,230,230),),0),\t\\\n\t\t\t\t\t(\"TwoHalves\", 43, (\n\t\t\t\t\t\t(159,159,159),),0),\n\t\t\t\t\t(\"Brick\", 45, ( \\\n\t\t\t\t\t\t(155,110,97),),0), \\\n\t\t\t\t\t#(\"TNT\", 46, ( \\\n\t\t\t\t\t#\t(200,50,50),),0), \\\n\t\t\t\t\t(\"Mossy Cobblestone\", 48, (\t\\\n\t\t\t\t\t\t(90,108,90),),0), \\\n\t\t\t\t\t(\"Obsidian\", 49, (\t\\\n\t\t\t\t\t\t(20,18,29),),0),\t\\\n\t\t\t\t\t(\"Diamond Ore\", 56, (\t\\\n\t\t\t\t\t\t(129,140,143),),0), \\\n\t\t\t\t\t(\"Diamond Block\", 57, (\t\\\n\t\t\t\t\t\t(99,219,213),),0), \\\n\t\t\t\t\t(\"Workbench\", 58, (\t\\\n\t\t\t\t\t\t(107,71,42),),0), \\\n\t\t\t\t\t(\"Redstone Ore\", 73, (\t\\\n\t\t\t\t\t\t(132,107,107),),0),\t\\\n\t\t\t\t\t#(\"Ice\", 79, (\t\\\n\t\t\t\t\t#\t(125,173,255),),0),\t\\\n\t\t\t\t\t(\"Snow Block\", 80, (\t\\\n\t\t\t\t\t\t(239,251,251),),0),\t\\\n\t\t\t\t\t(\"Clay\", 82, (\t\\\n\t\t\t\t\t\t(158,164,176),),0),\t\\\n\t\t\t\t\t(\"Jukebox\", 84, (\t\\\n\t\t\t\t\t\t(107,73,55),),0),\t\\\n\t\t\t\t\t(\"Pumpkin\", 86, (\t\\\n\t\t\t\t\t\t(192,118,21),),0),\t\\\n\t\t\t\t\t(\"Netherrack\", 87, (\t\\\n\t\t\t\t\t\t(110,53,51),),0),\t\\\n\t\t\t\t\t(\"Soul Sand\", 88, (\t\\\n\t\t\t\t\t\t(84,64,51),),0),\t\\\n\t\t\t\t\t(\"Glowstone\", 89, (\t\\\n\t\t\t\t\t\t(137,112,64),),0)\t\\\n\t\t\t\t\t)\n\n\n# /////////////////////////////////////////////////////////////////////////////\t\t\n# Calculates distance between two HLS colors\n# /////////////////////////////////////////////////////////////////////////////\t\ndef getColorDist(colorRGB, blockRGB):\n # RGB manhatten distance\n return sqrt( pow(colorRGB[0]-blockRGB[0],2) + pow(colorRGB[1]-blockRGB[1],2) + pow(colorRGB[2]-blockRGB[2],2))\n\t\n\t\n# /////////////////////////////////////////////////////////////////////////////\t\t\n# For a given RGB color, determines which block should represent it\n# /////////////////////////////////////////////////////////////////////////////\t\ndef getBlockFromColor(RGB):\n # find the closest color\n smallestDistIndex = -1\n smallestDist = 300000\n curIndex = 0\n for block in possibleBlocks:\n for blockRGB in block[2]:\n curDist = getColorDist(RGB, blockRGB)\n \n if (curDist < smallestDist):\n smallestDist = curDist\n smallestDistIndex = curIndex\n \n curIndex = curIndex + 1\n if (smallestDistIndex == -1):\n return -1\n return possibleBlocks[smallestDistIndex]\n\n\n########################################################\n########################################################\nclass Octomap2Minecraft():\n########################################################\n########################################################\n ##########################################\n def __init__(self):\n ##########################################\n self.min_x = 1e99\n self.min_y = 1e99\n self.min_z = 1e99\n self.max_x = -1e99\n self.max_y = -1e99\n self.max_z = -1e99\n self.size_x = 0\n self.size_y = 0\n self.size_z = 0\n self.resolution = 0\n \n self.settings = {}\n \n ###############################################\n def read_settings(self, filename):\n ###############################################\n \n defaults = { \n \"level_name\" : \"robot_octo\",\n \"origin_x\" : 0,\n \"origin_y\" : 100,\n \"origin_z\" : 0,\n \"spawn_x\" : 246,\n \"spawn_y\" : 1,\n \"spawn_z\" : 77,\n \"oversize\" : 100,\n \"clear_height\" : 256,\n \"base_item\" : \"3:0\"}\n \n parser = argparse.ArgumentParser(description='Translate a ROS map to a minecraft world')\n parser.add_argument(\"--settings\", default=filename, dest=\"filename\")\n for setting in defaults.keys():\n parser.add_argument(\"--\"+setting, dest=setting)\n \n args = parser.parse_args()\n \n print( \"reading settings from %s\" % args.filename)\n \n stream = open(args.filename)\n settings_file = yaml.load(stream)\n \n for setting in defaults.keys():\n if vars(args)[setting] == None:\n if setting in settings_file:\n self.settings[ setting ] = settings_file[ setting ]\n else:\n self.settings[ setting ] = defaults[ setting ]\n else:\n self.settings[ setting ] = vars(args)[setting]\n \n print( \"settings: %s\" % (str(self.settings)))\n\n ##########################################\n def check_empty(self):\n ##########################################\n retval = False\n if self.min_x == 1e99:\n print \"no value for min_x found\"\n retval = True\n if self.min_y == 1e99:\n print \"no value for min_y found\"\n retval = True\n if self.min_z == 1e99:\n print \"no value for min_z found\"\n retval = True\n\n if self.max_x == -1e99:\n print \"no value for max_x found\"\n retval = True\n if self.max_y == -1e99:\n print \"no value for max_y found\"\n retval = True\n if self.max_z == -1e99:\n print \"no value for max_z found\"\n retval = True\n\n if self.size_x == 0:\n print \"no value for size_x found\"\n retval = True\n if self.size_y == 0:\n print \"no value for size_y found\"\n retval = True\n if self.size_z == 0:\n print \"no value for size_z found\"\n retval = True\n \n if self.resolution == 0:\n print \"no value for resolution found\"\n retval = True\n \n return retval\n \n \n ##########################################\n def read_input(self):\n ##########################################\n print \"starting\" \n firstline = True\n beforefirstblock = True\n linecount = 0\n actual_min = 256\n\n print \"opening file\"\n for line in sys.stdin:\n if firstline:\n firstline = False\n if re.match(\"^#octomap dump\", line) :\n print \"first line found\"\n else:\n print \"ERROR: First line is not \"\"#octomap dump\"\"\"\n exit(-1)\n \n if beforefirstblock:\n \n a = re.match(\"(\\w+): x (-?\\d+.?\\d*) y (-?\\d+.?\\d*) z (-?\\d+.?\\d*)\", line)\n if a:\n print(\"found values: %s\" % str(a.groups()))\n \n if (a.groups()[0] == 'min'):\n self.min_x = float(a.groups()[1])\n self.min_y = float(a.groups()[2])\n self.min_z = float(a.groups()[3])\n \n if (a.groups()[0] == 'max'):\n self.max_x = float(a.groups()[1])\n self.max_y = float(a.groups()[2])\n self.max_z = float(a.groups()[3])\n\n if (a.groups()[0] == 'size'):\n self.size_x = float(a.groups()[1])\n self.size_y = float(a.groups()[2])\n self.size_z = float(a.groups()[3])\n \n a = re.match(\"resolution: (-?\\d+.\\d+)\", line)\n if a:\n print(\"found resolution: %s\" % str(a.groups())) \n self.resolution = float(a.groups()[0])\n \n \n if re.match(\"^block\", line):\n if self.check_empty():\n print \"ERROR: not all values found!\"\n exit(-1)\n self.init_map()\n beforefirstblock = False\n \n if beforefirstblock == False:\n a = re.match(\"block (-?\\d+.?\\d*) (-?\\d+.?\\d*) (-?\\d+.?\\d*) \\((\\d+) (\\d+) (\\d+)\\) (-?\\d+.?\\d*)\", line)\n if a:\n linecount += 1\n if linecount % 1000 == 0 :\n print \"processed %d lines\" % linecount\n self.add_block(a.groups())\n else:\n print \"ERROR: line improperly formed: %s\" % line\n\n print(\"saving map\")\n self.level.saveInPlace() \n \n\n ###############################################\n def readBlockInfo(self, keyword):\n ###############################################\n blockID, data = map(int, keyword.split(\":\"))\n blockInfo = self.level.materials.blockWithID(blockID, data)\n return blockInfo\n \n ###############################################\n def create_map(self):\n ###############################################\n if (os.path.exists( self.settings[\"level_name\"])) :\n print(\"ERROR: %s directory already exists. Delete it or pick a new name\" % self.settings[\"level_name\"])\n sys.exit()\n if (os.path.exists( os.getenv(\"HOME\") + \"/.minecraft/saves/\" + self.settings[\"level_name\"])) :\n print(\"ERROR: Minecraft world %s already exists. Delete it (at ~/.minecraft/saves/%s) or pick a new name\" % (self.settings[\"level_name\"], self.settings[\"level_name\"]))\n sys.exit()\n print(\"creating map file\")\n os.system(\"pymclevel/mce.py \" + self.settings[\"level_name\"] + \" create\")\n\n ###############################################\n def init_map(self):\n ###############################################\n filename = self.settings[\"level_name\"]\n self.level = mclevel.fromFile(filename)\n self.level.setPlayerGameType(1, \"Player\")\n pos = [self.settings[\"spawn_x\"], self.settings[\"spawn_y\"], self.settings[\"spawn_z\"]]\n \n self.level.setPlayerPosition( pos )\n self.level.setPlayerSpawnPosition( pos )\n \n rows = self.size_x / self.resolution\n cols = self.size_y / self.resolution\n \n o_x = self.settings[\"origin_x\"]\n o_y = self.settings[\"origin_y\"]\n o_z = self.settings[\"origin_z\"]\n ovs = self.settings[\"oversize\"]\n \n box = BoundingBox( (o_x - ovs, o_y, o_z - ovs ), \n ( rows + ovs * 2, ovs, cols + ovs * 2))\n \n print(\"creating chunks\") \n chunksCreated = self.level.createChunksInBox( box )\n print(\"Created %d chunks\" % len( chunksCreated ) )\n \n print(\"filling air\")\n self.level.fillBlocks( box, self.level.materials.blockWithID(0,0) )\n print(\"filled %d blocks\" % box.volume )\n \n print(\"filling base layer\")\n box = BoundingBox( (o_x - ovs, o_y - 10, o_z - ovs ), \n ( rows + ovs * 2, 10, cols + ovs * 2))\n item = self.readBlockInfo( self.settings[\"base_item\"] )\n self.level.fillBlocks( box, item )\n print(\"filled %d blocks\" % box.volume )\n \n ###############################################\n def add_block(self, blk):\n ###############################################\n \n o_x = self.settings[\"origin_x\"]\n o_y = self.settings[\"origin_y\"]\n o_z = self.settings[\"origin_z\"]\n\n blk_size = float(blk[6]) / self.resolution \n \n x1 = (self.max_x - float(blk[0])) / self.resolution + o_x\n y1 = (float(blk[1]) - self.min_y) / self.resolution + o_y\n z1 = (float(blk[2]) - self.min_z) / self.resolution + o_z\n \n r = (int(blk[3]))\n g = (int(blk[4]))\n b = (int(blk[5]))\n \n box = BoundingBox( ( x1, y1, z1 ), (blk_size, blk_size, blk_size) )\n \n closest_block = getBlockFromColor( ( r,g,b))\n blockID = closest_block[1]\n data = closest_block[3]\n item = self.level.materials.blockWithID(blockID, data)\n \n self.level.fillBlocks( box, item )\n \n ###############################################\n def move_map(self):\n ###############################################\n print(\"moving to minecraft saves\")\n os.system(\"mv %s ~/.minecraft/saves/\" % self.settings[\"level_name\"])\n\n\n \nif __name__ == \"__main__\":\n o = Octomap2Minecraft()\n o.read_settings(\"map_octo.yaml\")\n o.create_map()\n o.read_input()\n o.move_map()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.7727272510528564, "avg_line_length": 44, "blob_id": "e74f359f4b681603ec5cb4bde20dbfebe69d7c63", "content_id": "a5e709a671bc7cdc88cc312ecfc8157b743595ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "no_license", "max_line_length": 44, "num_lines": 1, "path": "/src/requirements.txt", "repo_name": "jfstepha/minecraft-ros", "src_encoding": "UTF-8", "text": "-e git+git://github.com/mcedit/pymclevel.git" }, { "alpha_fraction": 0.438178688287735, "alphanum_fraction": 0.448914110660553, "avg_line_length": 39.92424392700195, "blob_id": "e0df535963493a4af3c8499bed8c8b9d4d5daa84", "content_id": "69a5057de15f7711c390ce4f838e9021b70ebbac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8104, "license_type": "no_license", "max_line_length": 180, "num_lines": 198, "path": "/src/map_2d_2_minecraft.py", "repo_name": "jfstepha/minecraft-ros", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nimport re\nimport numpy\nimport yaml\nimport sys\nimport argparse\ntry:\n from pymclevel import mclevel\n from pymclevel.box import BoundingBox\nexcept:\n print (\"\\nERROR: pymclevel could not be imported\")\n print (\" Get it with git clone git://github.com/mcedit/pymclevel.git\\n\\n\")\n raise\nimport os\n\n############################################################################\n############################################################################\nclass Map2d2Minecraft():\n############################################################################\n############################################################################\n ###############################################\n def __init__(self):\n ###############################################\n self.settings = {}\n\n ###############################################\n def readBlockInfo(self, keyword):\n ###############################################\n blockID, data = map(int, keyword.split(\":\"))\n blockInfo = self.level.materials.blockWithID(blockID, data)\n return blockInfo\n \n ###############################################\n def read_settings(self, filename):\n ###############################################\n \n defaults = { \n \"level_name\" : \"robot_map\",\n \"map_file\" : \"/home/jfstepha/ros_workspace/maps/map_whole_house_13_02_17_fixed.pgm\",\n \"occ_thresh\" : 200,\n \"empty_thresh\" : 250,\n \"empty_item\" : \"12:0\",\n \"empty_height\" : 1,\n \"occupied_item\" : \"5:0\",\n \"occupied_height\" : 15,\n \"unexplored_item\" : \"3:0\",\n \"origin_x\" : 0,\n \"origin_y\" : 100,\n \"origin_z\" : 0,\n \"spawn_x\" : 246,\n \"spawn_y\" : 1,\n \"spawn_z\" : 77,\n \"oversize\" : 100,\n \"clear_height\" : 256,\n \"do_ceiling\" : True,\n \"ceiling_item\" : \"89:0\"}\n \n parser = argparse.ArgumentParser(description='Translate a ROS map to a minecraft world')\n parser.add_argument(\"--settings\", default=filename, dest=\"filename\")\n for setting in defaults.keys():\n parser.add_argument(\"--\"+setting, dest=setting)\n \n args = parser.parse_args()\n \n print( \"reading settings from %s\" % args.filename)\n this_dir, this_file = os.path.split( os.path.realpath(__file__) )\n stream = open( os.path.join( this_dir, args.filename ) )\n settings_file = yaml.load(stream)\n \n for setting in defaults.keys():\n if vars(args)[setting] == None:\n if setting in settings_file:\n self.settings[ setting ] = settings_file[ setting ]\n else:\n self.settings[ setting ] = defaults[ setting ]\n else:\n self.settings[ setting ] = vars(args)[setting]\n \n print( \"settings: %s\" % (str(self.settings)))\n\n ###############################################\n def do_convert(self, image):\n ###############################################\n filename = self.settings[\"level_name\"]\n self.level = mclevel.fromFile(filename)\n self.level.setPlayerGameType(1, \"Player\")\n pos = [self.settings[\"spawn_x\"], self.settings[\"spawn_y\"], self.settings[\"spawn_z\"]]\n \n self.level.setPlayerPosition( pos )\n self.level.setPlayerSpawnPosition( pos )\n \n rows = image.shape[0]\n cols = image.shape[1]\n \n o_x = self.settings[\"origin_x\"]\n o_y = self.settings[\"origin_y\"]\n o_z = self.settings[\"origin_z\"]\n ovs = self.settings[\"oversize\"]\n \n box = BoundingBox( (o_x - ovs, o_y - ovs, o_z - ovs ), \n ( rows + ovs * 2, ovs * 2, cols + ovs * 2))\n \n print(\"creating chunks\") \n chunksCreated = self.level.createChunksInBox( box )\n print(\"Created %d chunks\" % len( chunksCreated ) )\n \n print(\"filling air\")\n self.level.fillBlocks( box, self.level.materials.blockWithID(0,0) )\n print(\"filled %d blocks\" % box.volume )\n \n print(\"filling base layer\")\n box = BoundingBox( (o_x - ovs, o_y - 10, o_z - ovs ), \n ( rows + ovs * 2, 10, cols + ovs * 2))\n item = self.readBlockInfo( self.settings[\"unexplored_item\"] )\n self.level.fillBlocks( box, item )\n print(\"filled %d blocks\" % box.volume )\n \n print(\"creating map\")\n\n for r in range( rows ):\n\n\n print(\" row %d / %d\" % (r, rows) );\n \n for c in range( cols ):\n x = o_x + r\n y = o_y\n z = o_z + c\n \n if image[rows-r-1,c] > self.settings[\"empty_thresh\"]:\n item = self.readBlockInfo( self.settings[\"empty_item\"])\n self.level.setBlockAt(x,y,z, item.ID)\n if self.settings[\"do_ceiling\"] :\n item = self.readBlockInfo( self.settings[\"ceiling_item\"])\n y2 = y + self.settings[\"occupied_height\"]\n self.level.setBlockAt(x,y2,z, item.ID)\n if image[rows-r-1,c] < self.settings[\"occ_thresh\"]:\n h = self.settings[\"occupied_height\"]\n item = self.readBlockInfo( self.settings[\"occupied_item\"])\n box = BoundingBox( (x,y,z),(1,h,1) )\n\n self.level.fillBlocks( box, item )\n print(\"saving map\")\n self.level.saveInPlace() \n \n print(\"done\")\n\n ###############################################\n def read_pgm(self, filename, byteorder='>'):\n ###############################################\n \"\"\"Return image data from a raw PGM file as numpy array.\n\n Format specification: http://netpbm.sourceforge.net/doc/pgm.html\n\n \"\"\"\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return numpy.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n ).reshape((int(height), int(width)))\n \n ###############################################\n def create_map(self):\n ###############################################\n if (os.path.exists( self.settings[\"level_name\"])) :\n print(\"ERROR: %s directory already exists. Delete it or pick a new name\" % self.settings[\"level_name\"])\n sys.exit()\n if (os.path.exists( os.getenv(\"HOME\") + \"/.minecraft/saves/\" + self.settings[\"level_name\"])) :\n print(\"ERROR: Minecraft world %s already exists. Delete it (at ~/.minecraft/saves/%s) or pick a new name\" % (self.settings[\"level_name\"], self.settings[\"level_name\"]))\n sys.exit()\n print(\"creating map file\")\n os.system(\"pymclevel/mce.py \" + self.settings[\"level_name\"] + \" create\")\n ###############################################\n def move_map(self):\n ###############################################\n print(\"moving to minecraft saves\")\n os.system(\"mv %s ~/.minecraft/saves/\" % self.settings[\"level_name\"])\n\n\nif __name__ == \"__main__\":\n map2d2minecraft = Map2d2Minecraft()\n map2d2minecraft.read_settings(\"map_2d.yaml\")\n image = map2d2minecraft.read_pgm(map2d2minecraft.settings[\"map_file\"], byteorder='<')\n map2d2minecraft.create_map()\n map2d2minecraft.do_convert( image )\n map2d2minecraft.move_map()\n\n" }, { "alpha_fraction": 0.6284584999084473, "alphanum_fraction": 0.6352657079696655, "avg_line_length": 35.150794982910156, "blob_id": "301277f545054b14c5c066d8a0856845390542a7", "content_id": "60f71ea97026cf3c479b2c77fb1d5c9fe9a5f071", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4554, "license_type": "no_license", "max_line_length": 94, "num_lines": 126, "path": "/src/octomap_dump.cpp", "repo_name": "jfstepha/minecraft-ros", "src_encoding": "UTF-8", "text": "/*\n * OctoMap - An Efficient Probabilistic 3D Mapping Framework Based on Octrees\n * http://octomap.github.com/\n *\n * Copyright (c) 2009-2013, K.M. Wurm and A. Hornung, University of Freiburg\n * All rights reserved.\n * License: New BSD\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * Neither the name of the University of Freiburg nor the names of its\n * contributors may be used to endorse or promote products derived from\n * this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n */\n\n#include <octomap/octomap.h>\n#include <octomap/OcTree.h>\n#include <string.h>\n#include <octomap/ColorOcTree.h>\n\n\nusing namespace std;\nusing namespace octomap;\n\nvoid printUsage(char* self){\n std::cerr << \"\\nUSAGE: \" << self << \" input.ot [ depth ] [> output.txt ]\\n\\n\";\n\n std::cerr << \"This tool will convert the occupied voxels of a binary OctoMap \\n\"\n << \"file input.ot to a script for minecraft. The optional depth\"\n << \"parameter specifies a maximum depth to traverse the tree.\\n\\n\";\n\n std::cerr << \"WARNING: The output files will be quite large!\\n\\n\";\n\n exit(0);\n}\n\nint main(int argc, char** argv) {\n \n string scriptFilename = \"\";\n string otFilename = \"\";\n\n if ( (argc != 2 && argc != 3) || (argc > 1 && strcmp(argv[1], \"-h\") == 0)){\n printUsage(argv[0]);\n }\n\n otFilename = std::string(argv[1]);\n int depth=0;\n if (argc > 2) {\n depth = strtol(argv[2], NULL, 10);\n }\n \n std::ifstream infile(otFilename.c_str(), std::ios_base::in |std::ios_base::binary);\n if (!infile.is_open()) {\n cout << \"file \"<< otFilename << \" could not be opened for reading.\\n\";\n return -1;\n }\n //OcTree tree (0.1); // create empty tree with resolution 0.1\n AbstractOcTree* read_tree = AbstractOcTree::read(otFilename);\n ColorOcTree* tree = dynamic_cast<ColorOcTree*>(read_tree);\n cerr << \"opened file\" << endl;\n cerr << \"creating tree\" << endl;\n cerr << \"color tree read from \"<< otFilename <<\"\\n\"; \n \n cerr << \"walking the tree to get resolution \" << endl;\n\n double res = 999;\n for(ColorOcTree::leaf_iterator it = tree->begin( depth ), end=tree->end(); it!= end; ++it) {\n if(tree->isNodeOccupied(*it)){\n double size = it.getSize();\n if (size < res) {\n res = size;\n }\n }\n }\n\n cerr << \"writing parameters\" << endl;\n double min_x, min_y,min_z;\n double max_x, max_y,max_z;\n double size_x, size_y,size_z;\n \n tree->getMetricMin( min_x, min_y, min_z);\n cout << \"#octomap dump\\n\";\n cout << \"min: x \" << min_x << \" y \" << min_y << \" z \" << min_z << endl;\n tree->getMetricMax( max_x, max_y, max_z);\n cout << \"max: x \" << max_x << \" y \" << max_y << \" z \" << max_z << endl;\n tree->getMetricSize( size_x, size_y, size_z);\n cout << \"size: x \" << size_x << \" y \" << size_y << \" z \" << size_z << endl;\n cout << \"resolution: \" << res << endl;\n \n size_t count(0);\n \n cerr << \"dumping tree\" << endl;\n // std::ofstream outfile (scriptFilename.c_str());\n for(ColorOcTree::leaf_iterator it = tree->begin( depth ), end=tree->end(); it!= end; ++it) {\n if(tree->isNodeOccupied(*it)){\n count++;\n double size = it.getSize();\n cout << \"block \"\n << it.getX() << \" \" \n << it.getZ() << \" \"\n << it.getY() << \" \"\n << it->getColor() << \" \"\n << size << endl; \n }\n }\n\n\n}" } ]
4
Cryptek768/MacGyver-Game
https://github.com/Cryptek768/MacGyver-Game
e023db165a53ae1fd41170cd9310d67e579c4053
fcfa1734764a262efb6fb709b7c80f3ea3166e44
eafb65a2fbea369e0b4a6598d1732bc9b8798f70
refs/heads/master
2020-04-08T02:55:44.273516
2019-07-18T20:06:49
2019-07-18T20:06:49
158,954,500
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5199264287948608, "alphanum_fraction": 0.5291232466697693, "avg_line_length": 32.70212936401367, "blob_id": "a27b4d150fda1e8f618a839a72bb8da5f1601c3f", "content_id": "1f3e8d5effd0fc551c96b3cd7c4050a7e2a52b8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 73, "num_lines": 47, "path": "/Maze.py", "repo_name": "Cryptek768/MacGyver-Game", "src_encoding": "UTF-8", "text": "import pygame\r\nimport random\r\nfrom Intel import *\r\n\r\n#Classe du Niveau(placement des murs)\r\n\r\nclass Level:\r\n\r\n #Preparation de la classe\r\n def __init__(self, map_pool):\r\n self.map_pool = map_pool\r\n self.map_structure = []\r\n self.position_x = 0\r\n self.position_y = 0\r\n self.sprite_x = int(0 /30)\r\n self.sprite_y = int(0 /30)\r\n self.image_Macgyver = pygame.image.load(MacGyver).convert_alpha()\r\n self.image_Guardian = pygame.image.load(Guardian).convert_alpha()\r\n self.background = pygame.image.load(Background).convert()\r\n \r\n #Prépartion de la liste pour le fichier map\r\n def level(self):\r\n with open (self.map_pool, \"r\") as map_pool:\r\n level_structure = []\r\n for line in map_pool:\r\n line_level = []\r\n for char in line:\r\n if char != '/n':\r\n line_level.append(char)\r\n level_structure.append(line_level)\r\n self.map_structure = level_structure\r\n \r\n #Placement des murs\r\n def display_wall (self, screen):\r\n\r\n wall = pygame.image.load(Wall).convert_alpha()\r\n screen.blit(self.background, (0, 0))\r\n num_line = 0\r\n for ligne_horiz in self.map_structure:\r\n num_col = 0\r\n for ligne_verti in ligne_horiz:\r\n position_x = num_col * Sprite_Size\r\n position_y = num_line * Sprite_Size\r\n if ligne_verti == str(1):\r\n screen.blit(wall, (position_x, position_y))\r\n num_col +=1\r\n num_line +=1\r\n" }, { "alpha_fraction": 0.5846372842788696, "alphanum_fraction": 0.5945945978164673, "avg_line_length": 29.954545974731445, "blob_id": "199ea64dccc6d48aebbed8f1740cb77f25aad0c7", "content_id": "d708e1c3dd7f9bd46dd98b821a5d53aef5d9d0b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 70, "num_lines": 22, "path": "/Items.py", "repo_name": "Cryptek768/MacGyver-Game", "src_encoding": "UTF-8", "text": "import pygame\r\nimport random\r\nfrom Intel import *\r\n\r\n\r\n#Classe des placements d'objets\r\n\r\nclass Items:\r\n\r\n #Preparation de la classe\r\n def __init__(self, map_pool):\r\n self.item_needle = pygame.image.load(Object_N).convert_alpha()\r\n self.item_ether = pygame.image.load(Object_E).convert_alpha()\r\n self.item_tube = pygame.image.load(Object_T).convert_alpha()\r\n \r\n #Méthode de spawn des objets\r\n def items_spawn(self, screen):\r\n while items:\r\n rand_x = random.randint(0, 14)\r\n rand_y = random.randint(0, 14)\r\n if self.map_structure [rand_x][rand_y] == 0:\r\n screen.blit(self.image_(Object_N), (rand_x, rand_y))\r\n" }, { "alpha_fraction": 0.6909975409507751, "alphanum_fraction": 0.7007299065589905, "avg_line_length": 27.35714340209961, "blob_id": "f1f812f710948296d8186153aa6ba799414a805e", "content_id": "24077cf3407983707f6a22c9a612ee719a5ea65f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 48, "num_lines": 14, "path": "/Intel.py", "repo_name": "Cryptek768/MacGyver-Game", "src_encoding": "UTF-8", "text": "# Information des variables Global et des images\r\n\r\nSprite_Size_Level = 15\r\nSprite_Size = 30\r\nSize_Level = Sprite_Size_Level * Sprite_Size\r\n\r\nBackground = 'images/Background.jpg'\r\nWall = 'images/Wall.png'\r\nMacGyver = 'images/MacGyver.png'\r\nGuardian = 'images/Guardian.png'\r\nObject_N = 'images/Needle.png'\r\nObject_E = 'images/Ether.png'\r\nObject_T = 'images/Tube.png'\r\nitems = [\"Object_N\",\"Object_E\",\"Object_T\"]\r\n" }, { "alpha_fraction": 0.4268934726715088, "alphanum_fraction": 0.44228431582450867, "avg_line_length": 42.08928680419922, "blob_id": "3019c3912798ade7669b26f7d06f791391530657", "content_id": "0abb2befbe47fffea8718fc955477693f8b2fb23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2471, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/Characters.py", "repo_name": "Cryptek768/MacGyver-Game", "src_encoding": "UTF-8", "text": "import pygame\r\nfrom Intel import *\r\n\r\nclass Characters:\r\n\r\n def __init__(self, map_pool):\r\n self.map_pool = map_pool\r\n self.position_x = 0\r\n self.position_y = 0\r\n self.sprite_x = int(0 /30)\r\n self.sprite_y = int(0 /30)\r\n self.image_Macgyver = pygame.image.load(MacGyver).convert_alpha()\r\n self.image_Guardian = pygame.image.load(Guardian).convert_alpha()\r\n \r\n #Placement du Gardien\r\n def blit_mg(self, screen):\r\n screen.blit(self.image_Macgyver, (self.position_x, self.position_y))\r\n #Placement de Macgyver\r\n def blit_g(self, screen):\r\n num_line = 14\r\n for line in self.map_structure:\r\n num_col = 14\r\n for ligne_verti in line:\r\n position_x = num_col * Sprite_Size\r\n position_y = num_line * Sprite_Size\r\n if ligne_verti == str(3):\r\n screen.blit(self.image_Guardian, (position_x, position_y))\r\n else:\r\n if ligne_verti == str(3):\r\n self.available_tiles.append((num_col, num_line))\r\n \r\n #Méthode de déplacement de Macgyver(player)\r\n def move_mg(self, direction, screen):\r\n if direction == 'down':\r\n if self.sprite_y < (Sprite_Size_Level - 1):\r\n if self.map_structure[self.sprite_y+1][self.sprite_x] != '1':\r\n self.position_y += 30\r\n self.sprite_y += 1\r\n \r\n elif direction == 'up':\r\n if self.sprite_y > 0:\r\n if self.map_structure[self.sprite_y-1][self.sprite_x] != '1':\r\n self.position_y -= 30\r\n self.sprite_y -= 1\r\n \r\n elif direction == 'left':\r\n if self.sprite_x > 0: \r\n if self.map_structure[self.sprite_y][self.sprite_x-1] != '1':\r\n self.position_x -= 30\r\n self.sprite_x -= 1\r\n \r\n elif direction == 'right':\r\n if self.sprite_x < (Sprite_Size_Level - 1):\r\n if self.map_structure[self.sprite_y][self.sprite_x+1] != '1':\r\n self.position_x += 30\r\n self.sprite_x += 1\r\n" }, { "alpha_fraction": 0.5120663642883301, "alphanum_fraction": 0.5128205418586731, "avg_line_length": 34.83333206176758, "blob_id": "8c7e9bc9a23c58ffc4301ba11b5373c3d1fae6b8", "content_id": "1533116d9fbca4b703ac3c666b95a21e43ff2a8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 66, "num_lines": 36, "path": "/Main.py", "repo_name": "Cryptek768/MacGyver-Game", "src_encoding": "UTF-8", "text": "import pygame\r\nfrom Maze import *\r\nfrom Intel import *\r\nfrom Characters import *\r\nfrom Items import *\r\nfrom pygame import K_DOWN, K_UP, K_LEFT, K_RIGHT\r\n\r\n#Classe Main du jeux avec gestion des movements et l'affichage\r\nclass Master:\r\n\r\n def master():\r\n pygame.init()\r\n screen = pygame.display.set_mode((Size_Level, Size_Level))\r\n maze = Level(\"Map.txt\")\r\n maze.level()\r\n #Boucle de rafraichisement\r\n while 1:\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == K_DOWN:\r\n Characters.move_mg(maze, 'down', screen)\r\n if event.key == K_UP:\r\n Characters.move_mg(maze, 'up', screen)\r\n if event.key == K_LEFT:\r\n Characters.move_mg(maze, 'left', screen)\r\n if event.key == K_RIGHT:\r\n Characters.move_mg(maze, 'right', screen)\r\n maze.display_wall(screen)\r\n Characters.blit_mg(maze, screen)\r\n Characters.move_mg(maze, 'direction', screen)\r\n Characters.blit_g(maze, screen)\r\n Items. items_spawn(maze, screen)\r\n pygame.display.flip()\r\n \r\n if __name__ ==\"__main__\":\r\n master()\r\n" } ]
5
cnsr/social
https://github.com/cnsr/social
bd23ef50f943a46b771fc52d6b32c2bc964cae28
1d11424e5dba6cd9ff41a8e4641be1e635b533c5
00a7961c443a873cb371e57e5a81fd0cdcbd8bf6
refs/heads/master
2020-02-07T14:13:09.087222
2017-09-14T06:16:57
2017-09-14T06:16:57
99,278,976
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5822415351867676, "alphanum_fraction": 0.5850977897644043, "avg_line_length": 36.8051643371582, "blob_id": "6e973989da036c6e9518a353cbec061a07290525", "content_id": "5fb47d994ee5285ffb817b49434c80e4982abff5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16105, "license_type": "no_license", "max_line_length": 116, "num_lines": 426, "path": "/socialapp/views.py", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom socialapp.models import *\nfrom socialapp.forms import *\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom unidecode import unidecode\nfrom django.db.models import Count\nimport json\nfrom django.http import HttpResponseRedirect, HttpResponse, HttpResponseServerError, JsonResponse\nfrom django.template import defaultfilters\n# Create your views here.\n\n\ndef index(request):\n if not request.user.is_authenticated:\n uf = UserForm()\n upf = ExtUserForm()\n if 'login' in request.POST:\n log_in(request)\n # return redirect('profile', request.user.id)\n elif 'register' in request.POST:\n register(request)\n # return redirect('profile', request.user.id)\n context = {'userform': uf, 'extuserform': upf}\n return render(request, 'index.html', context)\n else:\n try:\n return redirect('profile', request.user.id)\n except Exception as e:\n print(e)\n\n\ndef log_in(request):\n logout(request)\n username = password = ''\n if request.POST:\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return redirect('profile', request.user.id)\n else:\n messages.error(request, 'Wrong username or password.')\n\n\ndef register(request):\n if request.method == 'POST':\n uf = UserForm(request.POST)\n upf = ExtUserForm(request.POST, request.FILES)\n if uf.is_valid() * upf.is_valid():\n user = User.objects.create_user(username=uf.data['username'], password=uf.data['password'])\n userprofile = upf.save(commit=False)\n userprofile.user = user\n userprofile.save()\n user_tologin = authenticate(request, username=user.username, password=user.password)\n if user_tologin is not None:\n if user_tologin.is_active:\n messages.info(request, 'registration successful.')\n login(request, user_tologin)\n # return redirect('index')\n return redirect('profile', user_tologin.id)\n else:\n messages.error(request, uf.errors)\n messages.error(request, upf.errors)\n\n\ndef log_out(request):\n if request.user.is_active and request.user.is_authenticated:\n logout(request)\n return redirect('index')\n\n\ndef profile(request, u_id):\n try:\n user = User.objects.get(id=u_id)\n extuser = ExtUser.objects.get(user=user)\n posts = WallPost.objects.filter(page=user).order_by('-date')\n wallform = WallPostForm()\n commentform = WallPostCommentForm()\n if 'wall' in request.POST:\n wallform = WallPostForm(request.POST, request.FILES)\n if wallform.is_valid():\n wf = wallform.save(commit=False)\n wf.page = user\n wf.author = request.user\n wf.save()\n wallform = WallPostForm()\n if 'add' in request.POST:\n if request.user not in user.extuser.friend_requests.all():\n user.extuser.friend_requests.add(request.user)\n user.save()\n messages.info(request, 'Request has been sent!')\n else:\n messages.info(request, 'Request has been already been sent before.')\n if 'remove' in request.POST:\n request.user.extuser.friends.remove(user)\n user.extuser.friends.remove(request.user)\n request.user.extuser.friend_requests.add(user)\n messages.info(request, 'Friend has been removed')\n context = {'user': user,\n 'extuser': extuser,\n 'wallform': wallform,\n 'posts': posts,\n 'commentform': commentform}\n return render(request, 'profile.html', context)\n except ObjectDoesNotExist:\n messages.error(request, 'User does not exist')\n return redirect('index')\n\n\ndef profile_edit(request, u_id):\n try:\n user = User.objects.get(id=u_id)\n if request.user == user:\n extuser = ExtUser.objects.get(user=user)\n upf = ExtUserForm(instance=extuser)\n if request.method == 'POST':\n upf = ExtUserForm(request.POST or None, request.FILES or None, instance=extuser)\n if upf.is_valid():\n password = request.POST['password']\n user = authenticate(username=user.username, password=password)\n if user is not None:\n if user.is_active:\n upf.save()\n messages.info(request, 'User profile has been saved.')\n return redirect('profile', u_id)\n else:\n messages.error(request, 'Incorrect password.')\n else:\n messages.error(request, 'Form not valid.')\n context = {'upf': upf, 'extuser': extuser}\n return render(request, 'profile_edit.html', context)\n except ObjectDoesNotExist:\n messages.error(request, 'User does not exist')\n return redirect('index')\n\n\ndef search(request):\n form = SearchForm()\n context = {'results': None, 'form': form}\n if request.POST:\n query = dict(request.POST)\n query.pop('csrfmiddlewaretoken')\n query.pop('search')\n query = {k: v[0] for k, v in query.items() if v != ['']}\n if not query:\n messages.error(request, 'Search query is empty')\n context['results'] = None\n else:\n filtered = ExtUser.objects.filter(**query)\n context['results'] = filtered\n return render(request, 'search.html', context)\n\n\ndef allmessages(request):\n msgs = []\n for x in request.user.extuser.contacts.all():\n try:\n if x != request.user:\n m1 = Message.objects.filter(sender=request.user).filter(receiver=x)\n m2 = Message.objects.filter(sender=x).filter(receiver=request.user)\n # m3 = (m1 | m2).distinct().order_by('created_at')#.latest('created_at')\n if m2 or m1:\n msgs.append(x)\n except Message.DoesNotExist:\n pass\n context = {'msgs': msgs}\n return render(request, 'allmessages.html', context)\n\n\ndef usermessages(request, u1_id, u2_id):\n form = MessageForm()\n u1 = User.objects.get(id=u1_id)\n u2 = User.objects.get(id=u2_id)\n if request.user == u1:\n u_with = u2\n else:\n u_with = u1\n if request.user != u1 and request.user != u2:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n else:\n m1 = Message.objects.filter(sender=u1).filter(receiver=u2)\n m2 = Message.objects.filter(sender=u2).filter(receiver=u1)\n msgs = (m1 | m2).distinct().order_by('-created_at')\n if request.POST:\n form = MessageForm(request.POST)\n if form.is_valid():\n if u2 not in u1.extuser.contacts.all():\n u1.extuser.contacts.add(u2)\n u1.save()\n if u1 not in u2.extuser.contacts.all():\n u2.extuser.contacts.add(u1)\n u2.save()\n msg = form.save(commit=False)\n msg.sender = u1\n msg.receiver = u2\n msg.save()\n #messages.info(request, 'Message have been sent.')\n form = MessageForm()\n context = {'msgs': msgs, 'form': form, 'uwith': u_with}\n return render(request, 'usermessages.html', context)\n\n\ndef friends(request, u_id):\n try:\n user = User.objects.get(id=u_id)\n friends = user.extuser.friends.all()\n context = {'friends': friends}\n return render(request, 'friends.html', context)\n except ObjectDoesNotExist:\n return redirect('index')\n\n\ndef friendrequests(request):\n frequests = request.user.extuser.friend_requests.all()\n if 'accept' in request.POST:\n f_id = request.POST.get('f_id')\n fr = User.objects.get(id=f_id)\n request.user.extuser.friend_requests.remove(fr)\n fr.extuser.friends.add(request.user)\n request.user.extuser.friends.add(fr)\n messages.info(request, 'User has been added as friend')\n if 'decline' in request.POST:\n f_id = request.POST.get('f_id')\n fr = User.objects.get(id=f_id)\n request.user.extuser.friend_requests.remove(fr)\n messages.info(request, 'Friend request has been declined')\n context = {'friendrequests': frequests}\n return render(request, 'friendrequests.html', context)\n\n\ndef community_creator(request):\n form = CommunityForm()\n if 'create' in request.POST:\n form = CommunityForm(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.creator = request.user\n f.save()\n # this doesnt seem right\n comm = Community.objects.all().latest('date')\n c_id = comm.id\n comm.admins.add(request.user)\n comm.members.add(request.user)\n comm.save()\n messages.info(request, 'Community has been created.')\n return redirect('community', c_id)\n else:\n messages.error(request, 'Form not valid')\n context = {'form': form}\n return render(request, 'community_creator.html', context)\n\n\ndef communities(request):\n if request.user.is_authenticated and request.user.extuser.communities.all():\n communities = request.user.extuser.communities.all().annotate(m_count=Count('members')).order_by('-m_count')\n else:\n communities = Community.objects.all().annotate(m_count=Count('members')).order_by('-m_count')\n context = {'communities': communities}\n return render(request, 'communities.html', context)\n\n\ndef communities_all(request):\n communities = Community.objects.all().annotate(m_count=Count('members')).order_by('-m_count')\n context = {'communities': communities}\n return render(request, 'communities.html', context)\n\n\ndef community(request, c_id):\n try:\n c = Community.objects.get(id=c_id)\n form = CommunityPostForm()\n commentform = CommunityPostCommentForm()\n context = {'c': c, 'form': form, 'p': None, 'commentform': commentform}\n try:\n context['p'] = CommunityPost.objects.filter(community=c).order_by('-date')\n except ObjectDoesNotExist:\n pass\n if 'join' in request.POST:\n if request.user not in c.members.all():\n c.members.add(request.user)\n request.user.extuser.communities.add(c)\n else:\n messages.error(request, 'Already a member.')\n if 'leave' in request.POST:\n if request.user in c.members.all():\n c.members.remove(request.user)\n request.user.extuser.communities.remove(c)\n else:\n messages.error(request, 'You are not member of the group.')\n if 'newpost' in request.POST:\n form = CommunityPostForm(request.POST, request.FILES)\n if form.is_valid():\n f = form.save(commit=False)\n f.author = request.user\n if request.user in c.admins.all():\n f.admin_post = True\n f.community = c\n f.save()\n messages.info(request, 'Post has been sent')\n return render(request, 'community.html', context)\n except ObjectDoesNotExist:\n messages.error(request, 'Community does not exist.')\n return redirect('communities')\n\n\ndef community_members(request, c_id):\n try:\n c = Community.objects.get(id=c_id)\n context = {'c': c}\n return render(request, 'community_members.html', context)\n except ObjectDoesNotExist:\n messages.error(request, 'Community does not exist.')\n return redirect('communities')\n\n\ndef feed(request):\n u = request.user\n c = u.extuser.communities.all()\n f = u.extuser.friends.all()\n feed = []\n for comm in c:\n try:\n posts = CommunityPost.objects.filter(community=comm).filter(admin_post=True)[:25]\n feed.extend(list(posts))\n except ObjectDoesNotExist:\n pass\n for friend in f:\n try:\n wall = WallPost.objects.filter(page=friend).filter(author=friend)[:10]\n feed.extend(list(wall))\n except ObjectDoesNotExist:\n pass\n # this is the only way this shit works, probably slow solution\n feed.sort(key=lambda x: x.date)\n feed.reverse()\n context = {'feed': feed}\n return render(request, 'feed.html', context)\n\n\ndef communities_search(request):\n form = CommunitySearchForm()\n if 'search' in request.POST:\n try:\n result = Community.objects.filter(name__icontains=request.POST['name'])\n except ObjectDoesNotExist:\n result = None\n else:\n result = None\n context = {'result': result, 'form': form}\n return render(request, 'community_search.html', context)\n\n\ndef ajax_like(request):\n\n if request.method == 'POST':\n eid = request.POST.get('id')\n uid = request.POST.get('user')\n user = User.objects.get(id=uid)\n action = request.POST.get('action')\n response_data = {}\n if action == 'wallpost':\n wp = WallPost.objects.get(id=eid)\n if user not in wp.rated.all():\n wp.rated.add(user)\n else:\n wp.rated.remove(user)\n response_data['count'] = wp.rated.count()\n elif action == 'commpost':\n cp = CommunityPost.objects.get(id=eid)\n if user not in cp.rated.all():\n cp.rated.add(user)\n else:\n cp.rated.remove(user)\n response_data['count'] = cp.rated.count()\n response_data['result'] = 'successful'\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )\n\n\ndef ajax_comment(request):\n if request.method == 'POST':\n eid = request.POST.get('id')\n uid = request.POST.get('user')\n user = User.objects.get(id=uid)\n content = request.POST.get('content')\n type = request.POST.get('type')\n response_data = {}\n if type == 'wall':\n wp = WallPost.objects.get(id=eid)\n x = WallPostComment.objects.create(wallpost=wp, author=user, content=content)\n x.save()\n wp.comments.add(x)\n response_data['date'] = defaultfilters.date(x.date, \"DATETIME_FORMAT\")\n elif type == 'comm':\n cp = CommunityPost.objects.get(id=eid)\n x = CommunityPostComment.objects.create(commpost=cp, author=user, content=content)\n x.save()\n cp.comments.add(x)\n response_data['date'] = defaultfilters.date(x.date, \"DATETIME_FORMAT\")\n response_data['result'] = 'successful'\n response_data['name'] = user.extuser.name + ' ' + user.extuser.surname\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )\n" }, { "alpha_fraction": 0.40551501512527466, "alphanum_fraction": 0.40713706612586975, "avg_line_length": 37.5625, "blob_id": "ffe45ea5b8a3cba5c73c99238e5912a743ff8289", "content_id": "3be0d023d6d121137658e3c62d35fa2ce11449cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 85, "num_lines": 32, "path": "/socialapp/templates/friends.html", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "{% extends 'index.html' %}\n{% block title %}Friends{% endblock %}\n\n{% block css %}\n {% load static %}\n <link type=\"text/css\" rel=\"stylesheet\" href=\"{% static 'css/profile.css' %}\">\n{% endblock %}\n\n{% block content %}\n {% if friends %}\n {% for friend in friends %}\n <div class=\"friend\">\n <a href=\"{% url 'profile' friend.id %}\">\n {% if friend.extuser.avatar %}\n <img class=\"mini\" src=\"{{ friend.extuser.preview.url }}\">\n {% else %}\n {% if friend.extuser.gender == 'M' %}\n <img class=\"mini\" src=\"{% static 'maleuser.png' %}\">\n {% elif friend.extuser.gender == 'F' %}\n <img class=\"mini\" src=\"{% static 'femaleuser.png' %}\">\n {% else %}\n <img class=\"mini\" src=\"{% static 'snowflakeuser.png' %}\">\n {% endif %}\n {% endif %}\n <p>{{ friend.extuser.name }}{{ friend.extuser.surname }}</p>\n </a>\n </div>\n {% endfor %}\n {% else %}\n <h2>No friends yet!</h2>\n {% endif %}\n{% endblock %}" }, { "alpha_fraction": 0.6273175477981567, "alphanum_fraction": 0.6427963972091675, "avg_line_length": 45.650794982910156, "blob_id": "ea50285dc66109acc341c76ddfd9e81c8ef44a83", "content_id": "d211e1f4157ada6ce5547443b8a348b0e256876d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5879, "license_type": "no_license", "max_line_length": 112, "num_lines": 126, "path": "/socialapp/models.py", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom imagekit.models import ImageSpecField\nfrom imagekit.processors import ResizeToFit\nfrom django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator\nfrom django_countries.fields import CountryField\nfrom django.utils import timezone\n# Create your models here.\n\n\nclass ExtUser(models.Model):\n user = models.OneToOneField(User, null=True)\n friends = models.ManyToManyField(User, blank=True, related_name='friends')\n friend_requests = models.ManyToManyField(User, blank=True, related_name='friend_requests')\n contacts = models.ManyToManyField(User, blank=True, related_name='contacts')\n name = models.CharField(blank=False, null=False,\n max_length=100, default=None)\n surname = models.CharField(blank=False, null=False,\n max_length=100, default=None)\n phone_regex = RegexValidator(regex=r'^\\+?\\d{2}?\\d{9,13}$',\n message=\"Phone must be entered in +999999999 format, up to 15 digits allowed.\")\n phone = models.CharField(validators=[phone_regex], blank=False, null=False, max_length=15)\n country = CountryField(blank_label='(select country)')\n GENDER_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female'),\n ('S', 'Special'),\n )\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default='S')\n avatar = models.ImageField(upload_to='images/avatars',\n null=True,\n blank=True)\n preview = ImageSpecField(source='avatar',\n processors=[ResizeToFit(250, 250)],\n format='JPEG',\n options={'quality': 80})\n communities = models.ManyToManyField('Community', blank=True)\n\n\nclass WallPost(models.Model):\n wall = models.BooleanField(default=True)\n page = models.ForeignKey(User, null=True)\n author = models.ForeignKey(User, null=True, related_name='author')\n content = models.TextField(max_length=1000, null=False, blank=True)\n image = models.ImageField(upload_to='images/post_images',\n null=True,\n blank=True)\n preview = ImageSpecField(source='image',\n processors=[ResizeToFit(150, 150)],\n format='JPEG',\n options={'quality': 80})\n date = models.DateTimeField(default=timezone.now, blank=True)\n rated = models.ManyToManyField(User, related_name='rated_wall')\n commentable = models.BooleanField(default=True)\n comments = models.ManyToManyField('WallPostComment', blank=True, related_name='pcomment')\n\n\nclass WallPostComment(models.Model):\n wallpost = models.ForeignKey(WallPost)\n author = models.ForeignKey(User, null=True)\n content = models.TextField(max_length=1000, null=False, blank=True)\n date = models.DateTimeField(default=timezone.now, blank=False)\n # rated = models.ManyToManyField(User, related_name='rated_wall_comment')\n\n\nclass Search(models.Model):\n name = models.CharField(max_length=100, null=True, blank=True)\n surname = models.CharField(max_length=100, null=True, blank=True)\n GENDER_CHOICES = (\n ('M', 'Male'),\n ('F', 'Female'),\n ('S', 'Special'),\n )\n gender = models.CharField(max_length=1, choices=GENDER_CHOICES, blank=True)\n country = CountryField(blank_label='(select country)', blank=True, null=True)\n\n\nclass Message(models.Model):\n sender = models.ForeignKey(User, related_name=\"sender\")\n receiver = models.ForeignKey(User, related_name=\"receiver\")\n msg_content = models.TextField(max_length=1000, null=False, blank=False)\n created_at = models.DateTimeField(default=timezone.now, blank=True)\n seen = models.BooleanField(default=False)\n\n\nclass Community(models.Model):\n creator = models.ForeignKey(User)\n admins = models.ManyToManyField(User, blank=True, related_name='admins')\n members = models.ManyToManyField(User, blank=True, related_name='members')\n name = models.CharField(max_length=100)\n about = models.TextField(max_length=1000)\n avatar = models.ImageField(upload_to='images/groupavatars',\n null=True,\n blank=True)\n preview = ImageSpecField(source='avatar',\n processors=[ResizeToFit(250, 250)],\n format='JPEG',\n options={'quality': 80})\n post_creation_privacy = models.BooleanField(default=False)\n date = models.DateTimeField(default=timezone.now, blank=True)\n\n\nclass CommunityPost(models.Model):\n wall = models.BooleanField(default=False)\n author = models.ForeignKey(User, null=True)\n admin_post = models.BooleanField(default=False)\n community = models.ForeignKey(Community)\n content = models.TextField(max_length=10000, null=True, blank=True)\n image = models.ImageField(upload_to='images/post_images',\n null=True,\n blank=True)\n preview = ImageSpecField(source='image',\n processors=[ResizeToFit(150, 150)],\n format='JPEG',\n options={'quality': 80})\n date = models.DateTimeField(default=timezone.now, blank=True)\n rated = models.ManyToManyField(User, blank=True, related_name='rated_comm')\n commentable = models.BooleanField(default=True)\n comments = models.ManyToManyField('CommunityPostComment', blank=True, related_name='ccomments')\n\n\nclass CommunityPostComment(models.Model):\n commpost = models.ForeignKey(CommunityPost)\n author = models.ForeignKey(User, null=True)\n content = models.TextField(max_length=1000, null=False, blank=False)\n date = models.DateTimeField(default=timezone.now, blank=True)\n\n" }, { "alpha_fraction": 0.49408984184265137, "alphanum_fraction": 0.4969267249107361, "avg_line_length": 37.47272872924805, "blob_id": "0d6f1a2434f2a57aa64be52588ec37cc87e5f398", "content_id": "d79fbc4f5815539468fe3dbd5a731e892193e068", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2115, "license_type": "no_license", "max_line_length": 128, "num_lines": 55, "path": "/socialapp/static/js/comment.js", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n $('.comment').on('click', function(event) {\n event.preventDefault();\n var form = $(this).closest('form');\n var content = form.find('#id_content')[0].value,\n postid = $(this).attr('data-postid'),\n type = $(this).attr('data-type'),\n user = $(this).attr('data-user');\n var father = $(this).closest('.wallpost_comments');\n sendAjax(user, postid, content, type, form);\n });\n $.ajaxSettings.traditional = true;\n function sendAjax(a,b,c,d,form) {\n $.ajax({\n url : \"/ajax_comment/\", // the endpoint\n type : \"POST\", // http method\n data : {user: a, id: b, content: c, type: d}, // data sent with the post request\n\n // handle a successful response\n success : function(json) {\n form.before('<div class=\"wallpost_comment\"><div class=\"wallpost_comment_img\"><b class=\"wallpost_comment_name\">'+\n json.name +'</b></div><div class=\"wallpost_comment_content\"><p class=\"wallpost_comment_content\">'+\n c + '</p><p>'+ json.date + '</p></div></div>');\n form.find('#id_content')[0].value = '';\n },\n\n // handle a non-successful response\n error : function(xhr,errmsg,err) {\n console.log(xhr.status + \": \" + xhr.responseText);\n // provide a bit more info about the error to the console\n }\n });\n };\n\n $(function () {\n $.ajaxSetup({\n headers: { \"X-CSRFToken\": getCookie(\"csrftoken\") }\n });\n });\n function getCookie(c_name)\n {\n if (document.cookie.length > 0)\n {\n c_start = document.cookie.indexOf(c_name + \"=\");\n if (c_start != -1)\n {\n c_start = c_start + c_name.length + 1;\n c_end = document.cookie.indexOf(\";\", c_start);\n if (c_end == -1) c_end = document.cookie.length;\n return unescape(document.cookie.substring(c_start,c_end));\n }\n }\n return \"\";\n };\n});" }, { "alpha_fraction": 0.6244869828224182, "alphanum_fraction": 0.6244869828224182, "avg_line_length": 19.30555534362793, "blob_id": "6a739d83597d6ae42b90550999f76ff7ece89d88", "content_id": "3be82e86bd666ce6bb31e0909d80731c80c60cad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 92, "num_lines": 72, "path": "/socialapp/forms.py", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "from socialapp.models import *\nimport django.forms as DF\nfrom django.forms.fields import DateTimeField\n\n\nclass UserForm(DF.ModelForm):\n\n class Meta:\n model = User\n fields = ['username', 'password']\n\n\nclass ExtUserForm(DF.ModelForm):\n\n class Meta:\n model = ExtUser\n exclude = ['user', 'friends', 'friend_requests', 'contacts', 'communities']\n\n\nclass WallPostForm(DF.ModelForm):\n\n class Meta:\n model = WallPost\n exclude = ['author', 'page', 'date', 'wall', 'rated', 'comments']\n\n\nclass SearchForm(DF.ModelForm):\n\n class Meta:\n model = Search\n fields = ['name', 'surname', 'country', 'gender']\n\n\nclass MessageForm(DF.ModelForm):\n\n class Meta:\n model = Message\n fields = ['msg_content']\n\n\nclass CommunityForm(DF.ModelForm):\n\n class Meta:\n model = Community\n fields = ['name', 'about', 'post_creation_privacy', 'avatar']\n\n\nclass CommunityPostForm(DF.ModelForm):\n\n class Meta:\n model = CommunityPost\n exclude = ['author', 'community', 'date', 'wall', 'admin_post', 'rated', 'comments']\n\n\nclass CommunitySearchForm(DF.ModelForm):\n\n class Meta:\n model = Community\n fields = ['name']\n\n\nclass WallPostCommentForm(DF.ModelForm):\n\n class Meta:\n model = WallPostComment\n fields = ['content']\n\n\nclass CommunityPostCommentForm(DF.ModelForm):\n class Meta:\n model = CommunityPostComment\n fields = ['content']\n" }, { "alpha_fraction": 0.41987180709838867, "alphanum_fraction": 0.41987180709838867, "avg_line_length": 21.35714340209961, "blob_id": "c8bfc428e0c6f3688d2a9f7fea28b917075d7bd4", "content_id": "9cd89c1751b2562c608a6d00d9896f44112136c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 312, "license_type": "no_license", "max_line_length": 43, "num_lines": 14, "path": "/socialapp/static/js/index.js", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n var $reg = $('.register');\n var $log = $('.login');\n $reg.hide();\n $log.hide();\n $('#login').on('click', function() {\n $reg.hide();\n $log.show();\n });\n $('#register').on('click', function() {\n $log.hide();\n $reg.show();\n });\n});" }, { "alpha_fraction": 0.4661061465740204, "alphanum_fraction": 0.4687335789203644, "avg_line_length": 33.61818313598633, "blob_id": "1b8f4d1b3310af9040db7e730dad5d91a7a0cf0b", "content_id": "76ce191cac67d30eb775760d17ba30e7d904583f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1903, "license_type": "no_license", "max_line_length": 91, "num_lines": 55, "path": "/socialapp/static/js/like.js", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "$(document).ready(function() {\n $('.like').on('click', function(event) {\n event.preventDefault();\n var $btn = $(this);\n var $id = $btn.attr('id').substring(3,);\n var $name = $btn.attr('name');\n var $action = $(this).parent().parent().attr('class');\n var $user = $(this).closest('.' + $action).attr('data-user');\n sendAjax($user, $id, $name, $action);\n $btn.text($btn.text() == 'Like' ? 'Unlike' : 'Like');\n //console.log($user, $id, $name, $action);\n });\n $.ajaxSettings.traditional = true;\n function sendAjax(a,b,c,d) {\n $.ajax({\n url : \"/ajax/\", // the endpoint\n type : \"POST\", // http method\n data : {user: a, id: b, name: c, action: d}, // data sent with the post request\n\n // handle a successful response\n success : function(json) {\n var $pattern = '[id$=' + d + b + ']';\n $($pattern).text('Likes: ' + json.count);\n },\n\n // handle a non-successful response\n error : function(xhr,errmsg,err) {\n console.log(xhr.status + \": \" + xhr.responseText);\n // provide a bit more info about the error to the console\n }\n });\n };\n\n $(function () {\n $.ajaxSetup({\n headers: { \"X-CSRFToken\": getCookie(\"csrftoken\") }\n });\n });\n function getCookie(c_name)\n {\n if (document.cookie.length > 0)\n {\n c_start = document.cookie.indexOf(c_name + \"=\");\n if (c_start != -1)\n {\n c_start = c_start + c_name.length + 1;\n c_end = document.cookie.indexOf(\";\", c_start);\n if (c_end == -1) c_end = document.cookie.length;\n return unescape(document.cookie.substring(c_start,c_end));\n }\n }\n return \"\";\n };\n\n});" }, { "alpha_fraction": 0.5245097875595093, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 16.7391300201416, "blob_id": "398019c64cad8a4af11c05e7c58819a9a991dcb5", "content_id": "c036573566cf40c5f0e01d34841b5eb743d8e0b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 408, "license_type": "no_license", "max_line_length": 27, "num_lines": 23, "path": "/requirements.txt", "repo_name": "cnsr/social", "src_encoding": "UTF-8", "text": "decorator==4.1.2\nDjango==1.11.4\ndjango-appconf==1.0.2\ndjango-countries==4.6.1\ndjango-el-pagination==3.1.0\ndjango-imagekit==4.0.1\nipython==6.1.0\nipython-genutils==0.2.0\njedi==0.10.2\nolefile==0.44\npexpect==4.2.1\npickleshare==0.7.4\npilkit==2.0\nPillow==4.2.1\nprompt-toolkit==1.0.15\nptyprocess==0.5.2\nPygments==2.2.0\npytz==2017.2\nsimplegeneric==0.8.1\nsix==1.10.0\ntraitlets==4.3.2\nUnidecode==0.4.21\nwcwidth==0.1.7\n" } ]
8
daphnejwang/MentoreeMatch
https://github.com/daphnejwang/MentoreeMatch
c8fee21184a2f54427d5af322f77179cdfd3447e
1e432b6b3aa6d323fa91bc8edd0e9f5727772f3e
24ddf3f98ca3dc6cc68c2d0ddbb2b6194fe2374c
refs/heads/master
2020-05-18T10:05:26.165672
2014-08-12T07:56:39
2014-08-12T07:56:39
21,997,202
0
1
null
2014-07-18T23:22:51
2014-07-18T23:26:20
2014-07-21T18:04:17
Python
[ { "alpha_fraction": 0.6921849846839905, "alphanum_fraction": 0.7224880456924438, "avg_line_length": 39.48387145996094, "blob_id": "503f1170c45249cbd702881b371511ea0e88f642", "content_id": "7740dacb7367205cbd66781eb943dd17c4ce2aac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1254, "license_type": "no_license", "max_line_length": 60, "num_lines": 31, "path": "/topic_seed.sql", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "BEGIN TRANSACTION;\nCREATE TABLE topics (\n\ttopic_id SERIAL NOT NULL, \n\ttitle VARCHAR(100),\n\tPRIMARY KEY (topic_id)\n);\nINSERT INTO \"topics\" VALUES(1,'Arts & Crafts');\nINSERT INTO \"topics\" VALUES(2,'Career & Business');\nINSERT INTO \"topics\" VALUES(3,'Community & Environment');\nINSERT INTO \"topics\" VALUES(4,'Education & Learning');\nINSERT INTO \"topics\" VALUES(5,'Fitness');\nINSERT INTO \"topics\" VALUES(6,'Food & Drinks');\nINSERT INTO \"topics\" VALUES(7,'Health & Well Being');\nINSERT INTO \"topics\" VALUES(8,'Language & Ethnic Identity');\nINSERT INTO \"topics\" VALUES(9,'Life Experiences');\nINSERT INTO \"topics\" VALUES(10,'Literature & Writing');\nINSERT INTO \"topics\" VALUES(11,'Motivation');\nINSERT INTO \"topics\" VALUES(12,'New Age & Spirituality');\nINSERT INTO \"topics\" VALUES(13,'Outdoors & Adventure');\nINSERT INTO \"topics\" VALUES(14,'Parents & Family');\nINSERT INTO \"topics\" VALUES(15,'Peer Pressure');\nINSERT INTO \"topics\" VALUES(16,'Pets & Animals');\nINSERT INTO \"topics\" VALUES(17,'Religion & Beliefs');\nINSERT INTO \"topics\" VALUES(18,'Self-improvement/Growth');\nINSERT INTO \"topics\" VALUES(19,'Sports & Recreation');\nINSERT INTO \"topics\" VALUES(20,'Support');\nINSERT INTO \"topics\" VALUES(21,'Tech');\nINSERT INTO \"topics\" VALUES(22,'Women');\n\n\nCOMMIT;" }, { "alpha_fraction": 0.5133333206176758, "alphanum_fraction": 0.6966666579246521, "avg_line_length": 14.789473533630371, "blob_id": "33b801b4e8ed48c4768b97094693abf00dd736e0", "content_id": "f2384f5272c8b2fcb554ebc183cf47db1d1c17b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 300, "license_type": "no_license", "max_line_length": 22, "num_lines": 19, "path": "/requirements.txt", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "Babel==1.3\nFlask==0.9\nFlask-Babel==0.9\nFlask-Mail==0.9.0\nFlask-OAuthlib==0.5.0\nFlask-SQLAlchemy==0.16\nFlask-WTF==0.8\nJinja2==2.6\nSQLAlchemy==0.7.9\nWTForms==1.0.2\nWerkzeug==0.8.3\nblinker==1.3\ngunicorn==19.1.0\noauthlib==0.6.3\npsycopg2==2.5.3\npytz==2014.4\nrequests==2.3.0\nspeaklater==1.3\nwsgiref==0.1.2\n" }, { "alpha_fraction": 0.5940476059913635, "alphanum_fraction": 0.6357142925262451, "avg_line_length": 22.36111068725586, "blob_id": "e1f5460a2769aaa90351249c30cbd95d10532dbf", "content_id": "3868226dcb11fd1b1c3ef7d2595249b228eb592d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 43, "num_lines": 36, "path": "/Project/topic_seed.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "import tabledef\nfrom tabledef import Topic\n\nTOPICS = {1: \"Arts & Crafts\",\n\t\t2: \"Career & Business\",\n\t\t3: \"Community & Environment\",\n\t\t4: \"Education & Learning\",\n\t\t5: \"Fitness\",\n\t\t6: \"Food & Drinks\",\n\t\t7: \"Health & Well Being\",\n\t\t8: \"Language & Ethnic Identity\",\n\t\t9: \"Life Experiences\",\n\t\t10: \"Literature & Writing\",\n\t\t11: \"Motivation\",\n\t\t12: \"New Age & Spirituality\",\n\t\t13: \"Outdoors & Adventure\",\n\t\t14: \"Parents & Family\",\n\t\t15: \"Peer Pressure\",\n\t\t16: \"Pets & Animals\",\n\t\t17: \"Religion & Beliefs\",\n\t\t18: \"Self-improvement/Growth\",\n\t\t19: \"Sports & Recreation\",\n\t\t20: \"Support\",\n\t\t21: \"Tech\",\n\t\t22: \"Women\"}\n\ndef seed_topic_table():\n\ttopics = []\n\tfor items in TOPICS:\n\t\ttopics.append(Topic(title=TOPICS[items]))\n\tprint \"~~~~~ TOPICS ~~~~~~~\"\n\tprint topics\n\ttabledef.dbsession.add_all(topics)\n\ttabledef.dbsession.commit()\n\nseed_topic_table()" }, { "alpha_fraction": 0.6951825022697449, "alphanum_fraction": 0.6975182294845581, "avg_line_length": 35.82795715332031, "blob_id": "989cde391fb98a97663d70cbcaa39e28eb172b7c", "content_id": "5ecb92af6327f268e1e16aac9266341f831957d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3425, "license_type": "no_license", "max_line_length": 207, "num_lines": 93, "path": "/Project/linkedin.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "from flask_oauthlib.client import OAuth\nfrom flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session\nimport jinja2\nimport tabledef\nfrom tabledef import *\nfrom sqlalchemy import update\nfrom xml.dom.minidom import parseString\nimport os\nimport urllib\nimport json\nfrom Project import app\nimport pdb\nfrom tabledef import User\n\noauth = OAuth(app)\n\nlinkedin = oauth.remote_app(\n 'linkedin',\n consumer_key='75ifkmbvuebxtg',\n consumer_secret='LAUPNTnEbsBu7axq',\n request_token_params={\n 'scope': 'r_fullprofile,r_basicprofile,r_emailaddress',\n 'state': 'RandomString',\n },\n base_url='https://api.linkedin.com/v1/',\n request_token_url=None,\n access_token_method='POST',\n access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',\n authorize_url='https://www.linkedin.com/uas/oauth2/authorization',\n)\n\n\ndef authorized(resp):\n if resp is None:\n return 'Access denied: reason=%s error=%s' % (\n request.args['error_reason'],\n request.args['error_description']\n )\n session['linkedin_token'] = (resp['access_token'], '')\n linkedin_json_string = linkedin.get('people/~:(id,first-name,last-name,industry,headline,site-standard-profile-request,certifications,educations,summary,specialties,positions,picture-url,email-address)')\n session['linkedin_id'] = linkedin_json_string.data['id']\n \n tabledef.import_linkedin_user(linkedin_json_string.data)\n return jsonify(linkedin_json_string.data)\n\n\[email protected]\ndef get_linkedin_oauth_token():\n return session.get('linkedin_token')\n\ndef change_linkedin_query(uri, headers, body):\n auth = headers.pop('Authorization')\n headers['x-li-format'] = 'json'\n if auth:\n auth = auth.replace('Bearer', '').strip()\n if '?' in uri:\n uri += '&oauth2_access_token=' + auth\n else:\n uri += '?oauth2_access_token=' + auth\n return uri, headers, body\n\ndef save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics):\n tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).update({\n 'mentor': mentoree_choice,\n 'age':age_range,\n 'gender':gender_input,\n 'description':description_input,\n 'new_user':False})\n\n for topics in mentor_topics:\n mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id'])\n tabledef.dbsession.add(mentor_selected_topics)\n return tabledef.dbsession.commit()\n\ndef update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics):\n user = tabledef.dbsession.query(User).filter_by(linkedin_id=session['linkedin_id']).first()\n\n user.mentor = mentoree_choice\n user.age = age_range\n user.gender = gender_input\n user.description = description_input\n\n current_selected_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=session['linkedin_id']).all()\n for curr_topics in current_selected_topics:\n tabledef.dbsession.delete(curr_topics)\n # pdb.set_trace()\n for topics in mentor_topics:\n mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id'])\n tabledef.dbsession.add(mentor_selected_topics)\n\n return tabledef.dbsession.commit()\n\nlinkedin.pre_request = change_linkedin_query\n" }, { "alpha_fraction": 0.6557851433753967, "alphanum_fraction": 0.6636363863945007, "avg_line_length": 36.24615478515625, "blob_id": "a448c5b0e289da7bd0858e3a8f9458c14ca653b3", "content_id": "038f7197e291f12120efd11d6d5e4502b5852292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2420, "license_type": "no_license", "max_line_length": 133, "num_lines": 65, "path": "/Project/email_module.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "import tabledef\nfrom tabledef import User, MentoreeTopic, Topic, Email\nimport requests\nimport sqlalchemy\nfrom sqlalchemy import update\nimport datetime\nfrom flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session\n\n# import pdb\n\ndef save_email_info_to_database(sender, mentor, subject, subject_body):\n today = datetime.datetime.now()\n email_info = tabledef.Email(sender_id=sender, receiver_id=mentor, subject=subject, text_body=subject_body, sent_date=today)\n print \"!!~~~!!^^^ email info\"\n print email_info\n tabledef.dbsession.add(email_info)\n return tabledef.dbsession.commit()\n\n\ndef send_email(sender_email, mentor_email, subject, subject_body):\n return requests.post(\n \"https://api.mailgun.net/v2/app27934969.mailgun.org/messages\",\n auth=(\"api\", \"key-21q1narswc35vqr1u3f9upn3vf6ncbb9\"),\n data={\"from\": sender_email,\n \"to\": mentor_email,\n \"subject\": subject,\n \"text\": subject_body})\n\ndef get_email_history_per_mentor(linkedin_id):\n email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).filter_by(receiver_id=linkedin_id).all()\n return email_hist\n\ndef get_sent_email_history_per_sender():\n email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).all()\n return email_hist\n\ndef get_email_history():\n email_hist = tabledef.dbsession.query(Email).filter_by(receiver_id=session['linkedin_id']).all()\n for mail in email_hist:\n print \"~!@#$%^&*( email history!! !@#$%^&\"\n print mail.subject\n return email_hist\n\ndef get_email_with_id(email_id):\n email_id = tabledef.dbsession.query(Email).filter_by(id=email_id).all()\n eid = email_id[0]\n return eid\n\ndef format_json(row):\n formatted_json_dict={}\n for column in row.__table__.columns:\n formatted_json_dict[column.name] = str(getattr(row, column.name))\n return formatted_json_dict\n\ndef delete_email(id):\n deleted_email=tabledef.dbsession.query(Email).filter_by(id=id).first()\n tabledef.dbsession.delete(deleted_email)\n tabledef.dbsession.commit()\n# return requests.post(\n# \"https://api.mailgun.net/v2/app27934969.mailgun.org/messages\",\n# auth=(\"api\", \"key-21q1narswc35vqr1u3f9upn3vf6ncbb9\"),\n# data={\"from\": \"Excited User <[email protected]>\",\n# \"to\": \"[email protected]\",\n# \"subject\": \"Hello\",\n# \"text\": \"Testing some Mailgun awesomness!\"})" }, { "alpha_fraction": 0.7425876259803772, "alphanum_fraction": 0.7452830076217651, "avg_line_length": 38.05263137817383, "blob_id": "0afac4d2ac3db15ea136feff1db3d66d792e7185", "content_id": "c8ed024e11219e05a85df659b9744e8996073064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1484, "license_type": "no_license", "max_line_length": 111, "num_lines": 38, "path": "/Project/search.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "from flask_oauthlib.client import OAuth\nfrom flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session\nimport jinja2\nimport tabledef\nfrom tabledef import User, MentoreeTopic, Topic\nimport linkedin\nfrom xml.dom.minidom import parseString\nimport pdb\n# from Project import app\n\ndef search(searchtopics):\n\tsearch_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all()\n\treturn search_results\n\ndef search_topic_display(searchtopics):\n\tsearch_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all()\n\tsearch_topic = tabledef.dbsession.query(tabledef.Topic).filter_by(topic_id=search_results[0].topic_id).first()\n\n\tsearch_topic_title = search_topic.title\n\tprint search_topic_title\n\treturn search_topic_title\n\ndef mentor_detail_display(linkedin_id):\n\t# pdb.set_trace()\n\tment_data = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=linkedin_id).first()\n\t# print \"!!~~~~~~~~~~~ment_data.positions[0].positions_title~~~~~~~~~~~~~~~~~~~~~~!!\"\n\t# print ment_data.positions[0].positions_title\n\t# ment_data.positions.positions_title\n\treturn ment_data\n\ndef mentor_personal_topics(linkedin_id):\n\t# pdb.set_trace()\n\tment_pers_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=linkedin_id).all()\n\t# for topics in ment_pers_topics:\n\t\t# print \"((((((~~~~~~~~~~~topics.topic_id~~~~~~~~~~~~~~~~~~~~~~))\"\n\t\t# print topics.topic_id\n\n\treturn ment_pers_topics\n" }, { "alpha_fraction": 0.31594255566596985, "alphanum_fraction": 0.3206689655780792, "avg_line_length": 38.582733154296875, "blob_id": "f93ad61992d3256b6e9e10ec9bc62f40338e91f3", "content_id": "0a54190cea15b3d3c3d02b1ff557be24f9fc277e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5501, "license_type": "no_license", "max_line_length": 170, "num_lines": 139, "path": "/Project/templates/email_history.html", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block body %}\n<html>\n {% if error %}\n <h2> {{ error }} </h2>\n {% else %}\n <html>\n <body>\n <form>\n <br>\n <div class=\"jumbotron jumbotron-sm\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm-12 col-lg-12\">\n <div class=\"col-sm-3 column\">\n </div>\n <div class=\"col-sm-9 column\">\n <h1 class=\"h1\">\n &nbsp;&nbsp;Email Inbox\n </h1>\n </div>\n </div>\n </div>\n </div>\n </div>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-4\">\n <legend><span class=\"glyphicon glyphicon-globe\"></span>&nbsp;Email Received <a href=\"/email_sent_history\" class=\"btn\" type=\"button\">Sent</a></legend>\n\n <table class=\"table table-hover\">\n <thead>\n <tr>\n <th>\n #\n </th>\n <th>\n Date\n </th>\n <th>\n Subject\n </th>\n <th>\n From\n </th>\n </tr>\n </thead>\n <tbody>\n {% set count = 1 %}\n {% for emails in email_history %}\n <tr id=\"{{emails.id}}\">\n <td>\n {{count}}\n {% set count= count+1 %}\n </td>\n <td>\n {{emails.sent_date.strftime('%m/%d/%Y')}}\n </td>\n <td>\n {{emails.subject}}\n </td>\n <td>\n {{emails.sender.first_name}}&nbsp;{{emails.sender.last_name}}\n </td>\n </tr>\n {% endfor %}\n </tbody>\n </table>\n </div>\n <div class=\"col-md-8\">\n <div class=\"well well-sm\">\n <div class=\"row\">\n <div class=\"col-md-12\">\n <div class=\"email_contents\">\n <img src=\"static/img/mentor_logo_grey.png\" class=\"img-responsive center-block\">\n </div>\n </div>\n </div>\n </div>\n </div>\n </div>\n </div>\n </form>\n <script>\n $(document).ready(function (){\n $(\"#myTab .active\").removeClass(\"active\");\n $(\"#myTab .email\").addClass(\"active\");\n \n $(\"tbody tr\").click(function(){\n var emailId = $(this).attr('id');\n \n $.getJSON(\"/email_detail/\"+emailId,function(result){\n console.log(result);\n $(\".email_contents\").html(\n '<div class=\"container\">'+\n '<div class=\"row clearfix\">'+\n '<div class=\"col-md-7 column\">'+\n '<div class=\"list-group\">'+\n '<h5>From:&nbsp;'+result.sender.first_name+'&nbsp;'+result.sender.last_name+' </h5>'+\n '<div class=\"list-group-item\">'+\n '<h4>Subject:&nbsp;<small>'+result.subject+'</small></h4>'+\n '</div>'+\n '<div class=\"list-group-item\">'+\n '<h4 class=\"list-group-item-heading text-right\">'+\n '<small>'+result.sent_date+'</small></h4><br>'+\n '<p class=\"list-group-item-text\">'+\n result.text_body+\n '</p>'+\n '</div>'+\n '</div>'+\n '</div>'+\n '</div>'+\n '</div>'+\n '<div class=\"form-group\">\\\n <a class=\"btn btn-primary btn-large pull-right\" style=\"margin-left: 20px;\" href=\"/email/'+result.sender_id+'\">Reply</a>\\\n <a id=\"delete_email\" class=\"btn btn-primary btn-large pull-right\" href=\"/delete_email/'+result.id+'\">Delete</a>\\\n </div>'\n );\n\n $(\"#delete_email\").click(function(e){\n e.preventDefault();\n var href = $(this).attr('href');\n console.log(\"delete email!\", href);\n $.ajax(href).done(function(res_server){\n if (res_server == 'error'){\n return;\n }\n $(\".email_contents\").html('<img src=\"static/img/mentor_logo_grey.png\" class=\"img-responsive center-block\">');\n $('tr[id='+res_server+']').remove();\n });\n });\n });\n });\n });\n </script>\n {% endif %}\n </body>\n </html>\n {% endblock %}" }, { "alpha_fraction": 0.5993179678916931, "alphanum_fraction": 0.6044330596923828, "avg_line_length": 33.514705657958984, "blob_id": "e82ab37c2927c619485384e66ba512df90193588", "content_id": "5ca079cbe569b6301826e2f26e6eddb363458581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2346, "license_type": "no_license", "max_line_length": 208, "num_lines": 68, "path": "/Project/mentorsearch.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "# from flask import Flask, render_template, redirect, request, flash, url_for, session\n# import jinja2\n# import tabledef\n# from tabledef import Users, MentorCareer, MentorSkills\n# from xml.dom.minidom import parseString\n# import os\n# import urllib\n\n# app = Flask(__name__)\n# app.secret_key = \"topsecretkey\"\n# app.jinja_env.undefined = jinja2.StrictUndefined\n\n# @app.route(\"/\")\n# def index():\n# print \"hello\"\n# return \"hello\"\n\n# @app.route(\"/login\", methods=[\"GET\"])\n# def get_userlogin():\n# error = None\n# f = urllib.urlopen(\"http://127.0.0.1:5000/login\")\n# print \"!~~~~!~~~~!\"\n# print f.read()\n# # url = os.environ['HTTP_HOST']\n# # xmlDoc = parseString(url)\n# # print xmlDoc\n# # linkedin_auth = {}\n# return render_template(\"login.html\", error = error)\n\n# @app.route(\"/login\", methods=[\"POST\"])\n# def login_user():\n# found_user = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first()\n# print \"found user\", found_user\n# error = None\n# if found_user:\n# print \"User found\"\n# session['user'] = found_user.id\n# return redirect(\"/\")\n# else:\n# print \"User not found\"\n# #flash('Invalid username/password.')\n# error = \"Invalid Username\"\n# return render_template('login.html', error = error)\n# # return redirect(\"/\")\n\n# @app.route(\"/create_newuser\", methods=[\"GET\"])\n# def get_newuser():\n# return render_template(\"newuser.html\")\n\n# @app.route(\"/create_newuser\", methods=[\"POST\"])\n# def create_newuser():\n# # print \"SESSION\", tabledef.dbsession\n# user_exists = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first()\n# print \"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\"\n# print \"USER EXISTS\", user_exists\n# if user_exists != None:\n# flash(\" User already exists. Please login\")\n# return redirect(\"/create_newuser\")\n# else: \n# user = User(email=request.form['email'], password= request.form['password'], age=request.form['age'], sex=request.form['sex'], occupation=request.form['occupation'], zipcode=request.form['zipcode'])\n# tabledef.dbsession.add(user)\n# tabledef.dbsession.commit()\n# flash(\"Successfully added new user!\")\n# return redirect(\"/\")\n\n\n# if __name__ == \"__main__\":\n# app.run(debug = True)" }, { "alpha_fraction": 0.720588207244873, "alphanum_fraction": 0.720588207244873, "avg_line_length": 45.064517974853516, "blob_id": "1bedf42e1e896aea997b9cbac925761ef772351e", "content_id": "d7c65eb85047a6f43ebf2786b7b950d5a00734c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1428, "license_type": "no_license", "max_line_length": 159, "num_lines": 31, "path": "/Project/endorsements.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "import tabledef\nfrom tabledef import User, MentoreeTopic, Topic, Email, Endorsement\nimport requests\nimport sqlalchemy\nfrom sqlalchemy import update\nimport datetime\nfrom flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session\n\n# import pdb\n\ndef save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body):\n today = datetime.datetime.now()\n endorsement_info = tabledef.Endorsement(sender_id=sender, receiver_id=mentor, title=endorsement_title, endorsements_text=endorsement_body, sent_date=today)\n print \"!!~~~!!^^^ endorsement_info info\"\n print endorsement_info\n tabledef.dbsession.add(endorsement_info)\n return tabledef.dbsession.commit()\n\ndef get_endorsement_info_per_mentor(linkedin_id):\n endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=linkedin_id).all()\n # for endorsements in endorsement_hist:\n # print \"!^^^^^^^^^^^^^^^^endorsement history!! ^^^^^^^^^^^^^^^^^^^^^\"\n # print endorsements.sender.picture_url\n return endorsement_hist\n\ndef get_endorsement_info_for_self():\n profile_endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=session['linkedin_id']).all()\n for endorsements in profile_endorsement_hist:\n print \"!^^^^^^^^^^^^^^^^endorsements_text!!^^^^^^^^^^^^^^^^\"\n print endorsements.endorsements_text\n return profile_endorsement_hist\n" }, { "alpha_fraction": 0.6979626417160034, "alphanum_fraction": 0.6988027691841125, "avg_line_length": 38.489627838134766, "blob_id": "f68adb747a22e2c2a3391b61fd2f9b4e5e35f220", "content_id": "291f8b9338af6838d308914ba386ce3950f1bbf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9522, "license_type": "no_license", "max_line_length": 131, "num_lines": 241, "path": "/Project/main.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "from flask_oauthlib.client import OAuth\nfrom flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session\nimport jinja2\nimport tabledef\nimport search\nfrom tabledef import User, MentoreeTopic, Topic\nimport linkedin\nfrom xml.dom.minidom import parseString\nfrom Project import app\nimport json\nfrom flask import redirect\nimport pagination\nimport email_module\nimport endorsements\n\napp.debug = True\napp.secret_key = 'iLoveHelloKitty'\n\n# Pagination\nPER_PAGE = 5\n\ndef url_for_other_page(page, mentee_topic_choice):\n args = dict(request.view_args.items() + request.args.to_dict().items()) \n args['page'] = page\n args['mentee_topic_choice'] = mentee_topic_choice\n return url_for(request.endpoint, **args)\napp.jinja_env.globals['url_for_other_page'] = url_for_other_page\n\n# LOGIN Pages\[email protected]('/login')\ndef login():\n return linkedin.linkedin.authorize(callback=url_for('get_linkedin_data', _external=True))\n\[email protected]('/logout')\ndef logout():\n session.pop('linkedin_token', None)\n return redirect(url_for('index'))\n\[email protected]('/login/authorized')\[email protected]_handler\ndef get_linkedin_data(resp): \n\n user_json = linkedin.authorized(resp)\n user_json = user_json.data\n user_string = json.loads(user_json)\n\n user = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=user_string[\"id\"]).first()\n if user and user.new_user:\n return redirect(url_for('addinfo_page'))\n # print linkedin.authorize(callback=url_for('authorized', _external=True))\n\n return redirect(url_for('index'))\n\n# HOME & ACCOUNT CREATION Pages\[email protected]('/')\ndef homepage():\n return render_template('home_page.html')\n\[email protected]('/home')\ndef index():\n if 'linkedin_token' in session:\n me = linkedin.linkedin.get('people/~')\n jsonify(me.data)\n # linkedin_data = json.loads(linkedin_json_string)\n topics = tabledef.Topic.query.order_by(\"topic_id\").all()\n return render_template('index.html', topics=topics)\n return redirect(url_for('login'))\n\[email protected]('/additionalinfo', methods=[\"GET\"])\ndef addinfo_page():\n return render_template('additionalinfo.html')\n\[email protected]('/additionalinfo', methods=[\"POST\"])\ndef addinfo():\n mentoree_choice = request.form.get('mentoree-radios')\n age_range = request.form.get('agerange')\n gender_input = request.form.get('gender_radios')\n description_input = request.form.get('description')\n mentor_topics = request.form.getlist('mentortopics')\n\n linkedin.save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics)\n # current_user = tabledef.dbsession.query(tabledef.User).filter_by(linkedintoken=session['linkedin_token']).first()\n return redirect(url_for('index'))\n\n\[email protected]('/home', defaults={'page': 1}, methods=[\"POST\"])\[email protected]('/home/page/<int:page>/<mentee_topic_choice>')\ndef search_results(page, mentee_topic_choice = None):\n mentee_topic_choice = mentee_topic_choice or request.form.get('searchtopics')\n print \"~~~~~~~~~~~~~~~~mentee_topic_choice\"\n print mentee_topic_choice\n mentor_data = search.search(mentee_topic_choice)\n if mentor_data:\n\n start_index = (page - 1) * (PER_PAGE)\n end_index = (page) * (PER_PAGE)\n\n ment_count = len(mentor_data)\n users = mentor_data[start_index:end_index]\n # users = mentor_data.paginate(page, PER_PAGE, False)\n\n if not users and page != 1:\n abort(404)\n pagination_per_page = pagination.Pagination(page, PER_PAGE, ment_count)\n search_topic = search.search_topic_display(mentee_topic_choice)\n return render_template('searchresults.html', search_topic_display=search_topic, \n pagination=pagination_per_page, users=users, mentee_topic_choice=mentee_topic_choice)\n messages = flash('Sorry! There are no mentors under this search topic')\n return redirect(url_for('index'))\n\n# MENTOR DETAIL PAGES\[email protected]('/mentor_detail/<linkedin_id>', methods=[\"GET\"])\ndef mentor_page(linkedin_id):\n ment_data = search.mentor_detail_display(linkedin_id)\n user_data = search.mentor_detail_display(session['linkedin_id'])\n endorsement_history = endorsements.get_endorsement_info_per_mentor(linkedin_id)\n\n return render_template('mentor_detail.html', ment_data=ment_data, user_data=user_data, endorsement_history=endorsement_history)\n\[email protected]('/mentor_detail', methods=[\"POST\"])\ndef add_endorsement():\n sender = session['linkedin_id']\n sender_data= search.mentor_detail_display(sender)\n\n mentor = request.form.get('mentor_id')\n print \"~~~~~~~~~~~~~~~~MENTOR ID on main\"\n print mentor\n mentor_data = search.mentor_detail_display(mentor)\n\n endorsement_title = request.form.get('endorsement_title')\n endorsement_body = request.form.get('endorsement_txt')\n\n endorsements.save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body)\n\n return redirect(url_for('mentor_page', linkedin_id=mentor))\n\n# SELF PROFILE PAGES\[email protected]('/profile', methods=[\"GET\"])\ndef self_page():\n if 'linkedin_id' in session:\n ment_data = search.mentor_detail_display(session['linkedin_id'])\n profile_endorsement_hist = endorsements.get_endorsement_info_for_self()\n return render_template('self_profile.html', ment_data=ment_data, profile_endorsement_hist=profile_endorsement_hist)\n return redirect(url_for('login'))\n\[email protected]('/profile', methods=[\"POST\"])\ndef update_self_page():\n if 'linkedin_id' in session:\n ment_data = search.mentor_detail_display(session['linkedin_id'])\n update_data = tabledef.update_linkedin_user()\n return render_template('self_profile.html', ment_data=ment_data)\n return redirect(url_for('self_page'))\n\[email protected]('/edit_profile', methods=[\"GET\"])\ndef mentor_page_update():\n if 'linkedin_id' in session:\n ment_data = search.mentor_detail_display(session['linkedin_id'])\n ment_pers_topics = search.mentor_personal_topics(session['linkedin_id'])\n topics = tabledef.Topic.query.order_by(\"topic_id\").all()\n return render_template('edit_self_profile.html', ment_data=ment_data, ment_pers_topics=ment_pers_topics, topics=topics)\n return redirect(url_for('login'))\n\[email protected]('/edit_profile', methods=[\"POST\"])\ndef mentor_page_update_post():\n mentoree_choice = request.form.get('mentoree-radios')\n age_range = request.form.get('agerange')\n gender_input = request.form.get('gender_radios')\n description_input = request.form.get('description')\n mentor_topics = request.form.getlist('mentortopics')\n\n linkedin.update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics)\n return redirect(url_for('self_page'))\n\n# EMAIL FORM Page\[email protected]('/email/<linkedin_id>', methods=[\"GET\"])\ndef email_get(linkedin_id):\n ment_data = search.mentor_detail_display(linkedin_id)\n user_data = search.mentor_detail_display(session['linkedin_id'])\n email_history = email_module.get_email_history_per_mentor(linkedin_id)\n return render_template('email_form.html', ment_data=ment_data, user_data=user_data, email_history=email_history)\n\[email protected]('/email', methods=[\"POST\"])\ndef email_post():\n sender = session['linkedin_id']\n sender_data= search.mentor_detail_display(sender)\n sender_email = sender_data.email\n\n mentor = request.form.get('mentor_id')\n mentor_data = search.mentor_detail_display(mentor)\n mentor_email = mentor_data.email\n\n subject = request.form.get('subject')\n subject_body = request.form.get('message')\n\n email_module.save_email_info_to_database(sender, mentor, subject, subject_body)\n email_module.send_email(sender_email, mentor_email, subject, subject_body)\n\n messages = flash('Success! Your message has been sent successfully.')\n\n return redirect(url_for('email_get', linkedin_id=mentor, messages=messages))\n\n# EMAIL INBOX Page\[email protected]('/email_history', methods=[\"GET\"])\ndef email_history():\n user_data = search.mentor_detail_display(session['linkedin_id'])\n email_history = email_module.get_email_history()\n return render_template('email_history.html', user_data=user_data, email_history=email_history)\n\[email protected]('/email_sent_history', methods=[\"GET\"])\ndef email_sent_history():\n user_data = search.mentor_detail_display(session['linkedin_id'])\n email_history = email_module.get_sent_email_history_per_sender()\n return render_template('email_sent_history.html', user_data=user_data, email_history=email_history)\n\[email protected]('/email_detail/<email_id>', methods=[\"GET\"])\ndef email_detail(email_id):\n eid = email_module.get_email_with_id(email_id)\n email_selected = {}\n email_selected[\"id\"] = eid.id\n email_selected[\"receiver_id\"] = eid.receiver_id\n email_selected[\"sender_id\"] = eid.sender_id\n email_selected[\"sent_date\"] = eid.sent_date.strftime(\"%d/%m/%Y\")\n email_selected[\"subject\"] = eid.subject\n email_selected[\"text_body\"] = eid.text_body\n\n email_selected[\"sender\"] = {}\n email_selected[\"sender\"][\"first_name\"] = eid.sender.first_name\n email_selected[\"sender\"][\"last_name\"] = eid.sender.last_name\n\n return json.dumps(email_selected)\n\[email protected]('/delete_email/<int:id>', methods=[\"GET\"])\ndef delete_email(id):\n if 'linkedin_id' not in session:\n return 'error'\n email_module.delete_email(id)\n return str(id)\n\[email protected]('/about', methods=[\"GET\"])\ndef about_us():\n return render_template('about_us.html')\n\n\n " }, { "alpha_fraction": 0.43645185232162476, "alphanum_fraction": 0.4476321041584015, "avg_line_length": 47.3775520324707, "blob_id": "ec3eac46606b1948c7052a647c7f4152f0d569bb", "content_id": "199f48f857eac388f6a582eae1ed8c0061aedf9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 9485, "license_type": "no_license", "max_line_length": 171, "num_lines": 196, "path": "/Project/templates/mentor_detail.html", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block body %}\n<html>\n {% if error %}\n <h2> {{ error }} </h2>\n {% else %}\n <div class=\"container\">\n <div class=\"row clearfix\">\n <div class=\"col-md-12 column\">\n <div class=\"page-header\">\n <h1>\n {{ment_data.first_name}}&nbsp;{{ment_data.last_name}}'s&nbsp;<small>Profile page</small>\n </h1>\n </div>\n <div class=\"row clearfix\">\n <div class=\"col-md-1 column\">\n </div>\n <div class=\"col-md-2 brd\">\n <div class=\"center-block placeholder\">\n {% if ment_data.picture_url != None %}\n <img alt=\"140x140\" src=\"{{ment_data.picture_url}}\"/>\n {% else %}\n <img alt=\"110x110\" src=\"http://pipsum.com/110x110.jpg\"/>\n {% endif %}\n </div>\n </div>\n <div class=\"col-md-4 column\">\n <blockquote>\n <p style=\"font-size:17px\"><b>Position:&nbsp;</b>\n {% if ment_data.positions|length > 0 %}\n {{ment_data.positions[0].positions_title}}\n {% endif %}\n </p>\n <p style=\"font-size:17px\"><b>Industry:&nbsp;</b>{{ment_data.industry}}</p>\n <a href=\"/email/{{ment_data.linkedin_id}}\" id=\"EmailMentor\" name=\"EmailMentor\" class=\"btn btn-sm btn-default\" role=\"button\">Email Me!</a>\n </blockquote>\n </div>\n <div class=\"col-md-5 column\">\n <blockquote>\n <p style=\"font-size:17px\"><b>Field of Study:&nbsp;</b>\n {% if ment_data.educations|length > 0 %}\n {{ment_data.educations[0].educations_field_of_study}}\n </p>\n <p style=\"font-size:17px\"><b>Degree:&nbsp;</b>{{ment_data.educations[0].educations_degree}}</p>\n {% endif %}\n <p style=\"font-size:17px\"><b>Age:&nbsp;</b>{{ment_data.age}}</p>\n </blockquote>\n </div>\n </div>\n <div class=\"col-sm-12\" contenteditable=\"false\" style=\"\">\n <div class=\"panel panel-default\">\n <div class=\"panel-heading\">{{ment_data.first_name}}'s Bio</div>\n {% if ment_data.summary != None %}\n <div class=\"panel-body\"> {{ment_data.summary}}\n {% else %}\n <div class=\"panel-body\"> <h4><small>This data is pulled from the user's Linkedin Profile. This user does not have a bio</small></h4>\n {% endif %}\n </div>\n </div>\n </div>\n <div class=\"col-sm-12\" contenteditable=\"false\" style=\"\">\n <div class=\"panel panel-default\">\n <div class=\"panel-heading\">What I would like to mentor on:</div>\n <div class=\"panel-body\"> \"{{ment_data.description}}\"\n </div>\n </div>\n <br>\n <div class=\"section subsection-reorder\" id=\"profile-experience\" style=\"display:block\">\n <div class=\"header\">\n <h1><small>Experience</small></h1>\n <hr style =\"border-width: 3px\" width=\"60%\" align=left>\n </div>\n <div class=\"content vcalendar\">\n <div>\n <div>\n {% if ment_data.positions|length > 0 %}\n {% for pos in ment_data.positions %}\n <div class=\"position first experience vevent vcard summary-current\" style=\"display:block\">\n <div class=\"postitle\">\n <h3 class=\"false\">\n <span class=\"title\">{{pos.positions_title}}</span>\n </h3>\n <h4><strong>\n <span class=\"org summary\">{{pos.positions_company_name}}</span>\n </strong>\n </h4>\n <p class=\"description\">Industry:&nbsp;{{pos.positions_industry}}</p>\n </div>\n <p class=\"period\">\n <abbr class=\"dtstart\" title=\"pos_start_date\">{{pos.positions_start_year}}</abbr>\n – <abbr class=\"dtstamp\" title=\"pos_end_date\">{{pos.positions_end_year}}</abbr>\n {% if pos.positions_end_year > 0 %}\n <span class=\"duration\"><span class=\"value-title\" title=\"year\"> </span>({{ pos.positions_end_year - pos.positions_start_year }} years)</span>\n {% endif %}\n </p>\n </div>\n {% endfor %}\n {% endif %}\n <br>\n <div id=\"background-education-container\" class=\"background-section\" data-li-template=\"education_section\">\n <div id=\"background-education\" class=\"background-education edit-default\">\n <h1><small>Education</small></h1>\n <hr style =\"border-width: 3px\" width=\"60%\" align=left>\n {% if ment_data.educations|length > 0 %}\n {% for edu in ment_data.educations %}\n <div id=\"education\" class=\"editable-item section-item\">\n <div id=\"education-view\">\n <div class=\"education first\">\n <header>\n <h4 class=\"summary fn org\">{{edu.educations_school_name}}</h4>\n <h5><span class=\"degree\">{{edu.educations_degree}}, </span><span class=\"major\">{{edu.educations_field_of_study}}</span></h5>\n </header>\n <span class=\"education-date\"><time>{{edu.educations_start_year}}</time><time> – {{edu.educations_end_year}}</time></span>\n </div>\n </div>\n </div>\n </div>\n </div>\n {% endfor %}\n {% endif %}\n <br>\n <br>\n <hr>\n <br>\n <br>\n <div class=\"col-sm-12\" contenteditable=\"false\" style=\"\">\n <div class=\"panel panel-default\">\n <div class=\"panel-heading\">My Endorsements:</div>\n <div class=\"panel-body\">\n <ul class=\"media-list\">\n {% for endorsements in endorsement_history %}\n <li class=\"media\">\n <a class=\"pull-left\" href=\"/mentor_detail/{{endorsements.sender.linkedin_id}}\">\n {% if endorsements.sender.picture_url != None %}\n <img class=\"media-object\" src=\"{{endorsements.sender.picture_url}}\">\n {% else %}\n <img alt=\"80x80\" src=\"http://pipsum.com/80x80.jpg\"/>\n {% endif %}\n </a>\n <div class=\"media-body\">\n <a href=\"/mentor_detail/{{endorsements.sender.linkedin_id}}\">\n <h2 class=\"media-heading\"><small>{{endorsements.sender.first_name}}&nbsp;{{endorsements.sender.last_name}}</small></h2></a>\n <h4 class=\"media-heading text-right\"><small>{{endorsements.sent_date.strftime('%m/%d/%Y')}}</small></h4>\n <h4 class=\"media-heading\">{{endorsements.title}}</h4>\n <p>{{endorsements.endorsements_text}}</p>\n </div>\n </li>\n <br>\n {% endfor %}\n <form method=\"POST\" action=\"/mentor_detail\">\n <li class=\"media\">\n <a class=\"pull-left\" href=\"/mentor_detail/{{user_data.linkedin_id}}\">\n {% if user_data.picture_url != None %}\n <img class=\"media-object\" alt=\"140x140\" src=\"{{user_data.picture_url}}\">\n {% else %}\n <img alt=\"80x80\" src=\"http://pipsum.com/80x80.jpg\"/>\n {% endif %}\n </a>\n <div class=\"media-body\">\n <div class=\"form-group\">\n <label for=\"endorsement_title\">\n Title:</label>\n <input type=\"text\" id=\"endorsement_title\" name=\"endorsement_title\"class=\"form-control\" placeholder=\"Please enter an endorsement title\">\n </div>\n <div class=\"form-group\">\n <label for=\"endorsement_txt\">\n Endorsement:</label>\n <textarea name=\"endorsement_txt\" id=\"endorsement_txt\" class=\"form-control\" rows=\"2\" style=\"width:100%\"\n placeholder=\"Please enter an endorsement here\"></textarea>\n </div>\n <div class=\"form-group\">\n <div class=\"col-md-12\">\n <button type=\"submit\" class=\"btn btn-primary pull-right\" id=\"btnEmail\">\n Submit</button>\n </div>\n </div>\n <input type = \"hidden\" name=\"mentor_id\" value=\"{{ment_data.linkedin_id}}\"/>\n </form>\n </div>\n </li>\n </ul>\n </div>\n </div>\n </div>\n </div>\n </div>\n </div>\n <script>\n $(document).ready(function (){\n $(\"#myTab .active\").removeClass(\"active\");\n $(\"#myTab .home\").addClass(\"active\");\n });\n </script>\n {% endif %}\n</html>\n{% endblock %}" }, { "alpha_fraction": 0.30849358439445496, "alphanum_fraction": 0.3153044879436493, "avg_line_length": 43.3136100769043, "blob_id": "dec3ded5e4a5df1c5e6d1beff567737d8c45e166", "content_id": "92cc7de192e041cebb1d55bf324fc5634418f315", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 7488, "license_type": "no_license", "max_line_length": 180, "num_lines": 169, "path": "/Project/templates/email_form.html", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block body %}\n<html>\n {% if error %}\n <h2> {{ error }} </h2>\n {% else %}\n <html>\n <body>\n <form method=\"POST\" action=\"/email\">\n <br>\n <div class=\"jumbotron jumbotron-sm\">\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm-12 col-lg-12\">\n <div class=\"col-sm-1 column\">\n <div class=\"center-block placeholder\">\n {% if ment_data.picture_url != None %}\n <img alt=\"140x140\" src=\"{{ment_data.picture_url}}\"/>\n {% else %}\n <img alt=\"140x140\" src=\"http://pipsum.com/140x140.jpg\"/>\n {% endif %}\n </div>\n </div>\n <div class=\"col-sm-11 column\">\n <h1 class=\"h1\" style=\"text-align: center\">\n <a style=\"color:Black\" href=\"/mentor_detail/{{ment_data.linkedin_id}}\">Email: <small>{{ment_data.first_name}}&nbsp;{{ment_data.last_name}}</small></a>\n </h1>\n </div>\n </div>\n </div>\n </div>\n </div>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-4\">\n <legend><span class=\"glyphicon glyphicon-globe\"></span> Emails Sent to:&nbsp;{{ment_data.first_name}}</legend>\n <table class=\"table\">\n <thead>\n <tr>\n <th>\n #\n </th>\n <th>\n Date\n </th>\n <th>\n Subject\n </th>\n <th>\n From\n </th>\n </tr>\n </thead>\n <tbody>\n {% set count = 1 %}\n {% for emails in email_history %}\n {% if loop.index <= 5 %}\n <tr id=\"{{emails.id}}\">\n <td>\n {{count}}\n {% set count= count+1 %}\n </td>\n <td>\n {{emails.sent_date.strftime('%m/%d/%Y')}}\n </td>\n <td>\n {{emails.subject}}\n </td>\n <td>\n {{emails.sender.first_name}}&nbsp;{{emails.sender.last_name}}\n </td>\n </tr>\n {% endif %}\n {% endfor %}\n </tbody>\n </table>\n </div>\n <div class=\"col-md-8\">\n <div class=\"well well-sm\">\n <div class=\"row\">\n <div class=\"col-md-12\">\n <div class=\"email_contents\">\n </div>\n </div>\n <div class=\"col-md-6\">\n <fieldset disabled>\n <div class=\"form-group\">\n <label for=\"mentor_name\">\n To:</label>\n <input type=\"text\" id=\"disabledTextInput\" class=\"form-control\" placeholder=\"{{ment_data.first_name}}&nbsp;{{ment_data.last_name}}\">\n </div>\n </fieldset>\n <fieldset disabled>\n <div class=\"form-group\">\n <label for=\"user_name\">\n From:</label>\n <input type=\"text\" id=\"disabledTextInput\" class=\"form-control\" placeholder=\"{{user_data.first_name}}&nbsp;{{user_data.last_name}}\">\n </div>\n </fieldset>\n <div class=\"form-group\">\n <label for=\"subject\">\n Subject</label>\n <input type=\"text\" class=\"form-control\" name=\"subject\" id=\"subject\" placeholder=\"Can you be my mentor?\" required=\"required\" />\n </div>\n </div>\n <div class=\"col-md-6\">\n <div class=\"form-group\">\n <label for=\"name\">\n Message</label>\n <textarea name=\"message\" id=\"message\" class=\"form-control\" rows=\"9\" cols=\"25\" required=\"required\"\n placeholder=\"Message\"></textarea>\n </div>\n </div>\n <div class=\"form-group\">\n <div class=\"col-md-12\">\n <button type=\"submit\" class=\"btn btn-primary pull-right\" id=\"btnEmail\">\n Send Message</button>\n </div>\n </div>\n </div>\n <input type = \"hidden\" name=\"mentor_id\" value=\"{{ment_data.linkedin_id}}\"/>\n </div>\n </div>\n </div>\n </div>\n </form>\n <script>\n function email_detail(result) {\n $(\".email_contents\").html(\n '<div class=\"container\">'+\n '<div class=\"row clearfix\">'+\n '<div class=\"col-md-7 column\">'+\n '<div class=\"list-group\">'+\n '<h5>From:&nbsp;'+result.sender.first_name+'&nbsp;'+result.sender.last_name+' </h5>'+\n '<div class=\"list-group-item\">'+\n '<h4>Subject:&nbsp;<small>'+result.subject+'</small></h4>'+\n '</div>'+\n '<div class=\"list-group-item\">'+\n '<h4 class=\"list-group-item-heading text-right\">'+\n '<small>'+result.sent_date+'</small></h4><br>'+\n '<p class=\"list-group-item-text\">'+\n result.text_body+\n '</p>'+\n '</div>'+\n '</div>'+\n '</div>'+\n '</div>'+\n '</div>')\n }\n\n $(document).ready(function (){\n $(\"#myTab .active\").removeClass(\"active\");\n $(\"#myTab .email\").addClass(\"active\");\n \n\n $(\"tbody tr\").click(function(){\n var emailId = $(this).attr('id');\n \n $.getJSON(\"/email_detail/\"+emailId,function(result){\n email_detail(result) \n })\n })\n });\n\n </script>\n {% endif %}\n </body>\n </html>\n {% endblock %}" }, { "alpha_fraction": 0.7448979616165161, "alphanum_fraction": 0.7448979616165161, "avg_line_length": 18.600000381469727, "blob_id": "f3abd6405bdba6abd02980cf5ffe38eccbe1ab4c", "content_id": "8057558dded616c4058b5eecccf54e8924c2e5cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 30, "num_lines": 5, "path": "/server.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "from Project import app\n# app.run(debug=True)\napp.run(debug=True)\n\napp.secret_key = 'development'\n" }, { "alpha_fraction": 0.6033433675765991, "alphanum_fraction": 0.6107387542724609, "avg_line_length": 37.29203414916992, "blob_id": "80039472ef43b1e400b3af2e91ae5a15f1a52be6", "content_id": "bea1dd84baf5c14ec7ace12b51bbb01af071da4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12981, "license_type": "no_license", "max_line_length": 96, "num_lines": 339, "path": "/Project/tabledef.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Column, Integer, String, Boolean, Text, DateTime\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nimport pdb\nimport os\n\nDATABASE_URL = os.environ.get(\"DATABASE_URL\", \"sqlite:///mentoring.db\")\nengine = create_engine(DATABASE_URL, echo=False)\ndbsession = scoped_session(sessionmaker(bind=engine, autocommit=False, autoflush=False))\n\nBase = declarative_base()\nBase.query = dbsession.query_property()\n\n\n### Class declarations\nclass User(Base):\n __tablename__ = \"users\"\n\n # use linkedin ID, therefore never duplicating a user\n linkedin_id = Column(String(50), primary_key = True)\n linkedintoken = Column(String(50), nullable = True)\n new_user = Column(Boolean, nullable = True)\n first_name = Column(String(64), nullable = True)\n last_name = Column(String(64), nullable = True)\n email = Column(String(255), nullable = True)\n #~~~# Data From Additional Info Page\n mentor = Column (Boolean, nullable = True)\n age = Column(String(50), nullable = True)\n gender = Column(String(50), nullable = True)\n description = Column(String(1000), nullable = True)\n #~~~#\n industry = Column(String(64), nullable = True)\n headline = Column(String(100), nullable = True)\n picture_url = Column(String(200), nullable = True)\n certifications = Column(String(200), nullable = True)\n summary = Column(String(500), nullable=True)\n\n educations = relationship(\"Education\")\n positions = relationship(\"Position\")\n \ndef import_linkedin_user(data):\n user = User();\n # parsing siteStandardProfileRequest to get authToken\n user.linkedin_id = data.get('id',None)\n user.new_user = True\n token = data.get('siteStandardProfileRequest', None)\n if token != None:\n token_data = token['url']\n start = token_data.find('authToken=')+10\n end = token_data.find('=api', start)\n user.linkedintoken = token_data[start:end]\n\n user.first_name = data.get('firstName', None)\n user.last_name = data.get('lastName', None)\n user.email = data.get('emailAddress', None)\n user.industry = data.get('industry', None)\n user.headline = data.get('headline',None)\n\n \n educations = data.get('educations',None)\n education_models = []\n # pdb.set_trace()\n ed_values = educations.get('values',None)\n if ed_values != None:\n for entry in ed_values:\n education = Education()\n education.linkedin_id = user.linkedin_id\n if 'startDate' in entry:\n edstartyear = entry['startDate']['year']\n # print edstartyear\n education.educations_start_year = edstartyear\n if 'endDate' in entry:\n edendyear = entry['endDate']['year']\n # print edendyear\n education.educations_end_year = edendyear\n if 'schoolName' in entry:\n schlname = entry['schoolName']\n # print schlname\n education.educations_school_name = schlname\n if 'fieldOfStudy' in entry:\n edfield = entry['fieldOfStudy']\n # print edfield\n education.educations_field_of_study = edfield\n if 'degree' in entry:\n eddegree = entry['degree']\n # print eddegree\n education.educations_degree = eddegree\n education_models.append(education)\n\n positions = data.get('positions',None)\n position_models = []\n pos_values = positions.get('values',None)\n if pos_values != None:\n for entry in pos_values:\n position = Position()\n position.linkedin_id = user.linkedin_id\n if 'startDate' in entry:\n posstartyear = entry['startDate']['year']\n # print posstartyear\n position.positions_start_year = posstartyear\n if 'endDate' in entry:\n posendyear = entry['endDate']['year']\n # print posendyear\n position.positions_end_year = posendyear\n if 'title' in entry:\n postitle = entry['title']\n # print postitle\n position.positions_title = postitle\n if 'company' in entry:\n co_entry = entry['company']\n if 'name' in co_entry:\n print \"~~~~~~~~~~~~~~~~~~~~~~ company name\"\n print entry\n print entry['company']\n coname = entry['company']['name']\n print coname\n position.positions_company_name = coname\n position_models.append(position)\n\n cert = data.get('certifications',None)\n if cert != None:\n cert_name = cert['values'][0]['name']\n user.certifications = cert_name\n\n mentor_topics = MentoreeTopic()\n mentor_topics.linkedin_id = user.linkedin_id\n\n user.summary = data.get('summary',None)\n user.picture_url = data.get('pictureUrl', None)\n\n current_user_id = user.linkedin_id\n # print \"~~!!^_^!!~~\"\n existing_user = dbsession.query(User).filter_by(linkedin_id = current_user_id).first()\n if existing_user == None:\n dbsession.add(user)\n dbsession.add(mentor_topics)\n\n for model in education_models:\n # print \"model\"\n # print model\n dbsession.add(model)\n\n for models in position_models:\n dbsession.add(models)\n\n dbsession.commit()\n\n return user\n\ndef update_linkedin_user(data):\n user = dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).first();\n # parsing siteStandardProfileRequest to get authToken\n user.linkedin_id = data.get('id',None)\n user.new_user = True\n token = data.get('siteStandardProfileRequest', None)\n if token != None:\n token_data = token['url']\n start = token_data.find('authToken=')+10\n end = token_data.find('=api', start)\n user.linkedintoken = token_data[start:end]\n\n user.first_name = data.get('firstName', None)\n user.last_name = data.get('lastName', None)\n user.email = data.get('emailAddress', None)\n user.industry = data.get('industry', None)\n user.headline = data.get('headline',None)\n\n \n educations = data.get('educations',None)\n education_models = []\n # pdb.set_trace()\n ed_values = educations.get('values',None)\n if ed_values != None:\n for entry in ed_values:\n education = Education()\n education.linkedin_id = user.linkedin_id\n if 'startDate' in entry:\n edstartyear = entry['startDate']['year']\n # print edstartyear\n education.educations_start_year = edstartyear\n if 'endDate' in entry:\n edendyear = entry['endDate']['year']\n # print edendyear\n education.educations_end_year = edendyear\n if 'schoolName' in entry:\n schlname = entry['schoolName']\n # print schlname\n education.educations_school_name = schlname\n if 'fieldOfStudy' in entry:\n edfield = entry['fieldOfStudy']\n # print edfield\n education.educations_field_of_study = edfield\n if 'degree' in entry:\n eddegree = entry['degree']\n # print eddegree\n education.educations_degree = eddegree\n education_models.append(education)\n\n positions = data.get('positions',None)\n position_models = []\n pos_values = positions.get('values',None)\n if pos_values != None:\n for entry in pos_values:\n position = Position()\n position.linkedin_id = user.linkedin_id\n if 'startDate' in entry:\n posstartyear = entry['startDate']['year']\n # print posstartyear\n position.positions_start_year = posstartyear\n if 'endDate' in entry:\n posendyear = entry['endDate']['year']\n # print posendyear\n position.positions_end_year = posendyear\n if 'title' in entry:\n postitle = entry['title']\n # print postitle\n position.positions_title = postitle\n if 'company' in entry:\n co_entry = entry['company']\n if 'name' in co_entry:\n print \"~~~~~~~~~~~~~~~~~~~~~~ company name\"\n print entry\n print entry['company']\n coname = entry['company']['name']\n print coname\n position.positions_company_name = coname\n position_models.append(position)\n\n cert = data.get('certifications',None)\n if cert != None:\n cert_name = cert['values'][0]['name']\n user.certifications = cert_name\n\n mentor_topics = MentoreeTopic()\n mentor_topics.linkedin_id = user.linkedin_id\n\n user.summary = data.get('summary',None)\n user.picture_url = data.get('pictureUrl', None)\n\n current_user_id = user.linkedin_id\n # print \"~~!!^_^!!~~\"\n existing_user = dbsession.query(User).filter_by(linkedin_id = current_user_id).first()\n if existing_user == None:\n dbsession.add(user)\n dbsession.add(mentor_topics)\n\n for model in education_models:\n # print \"model\"\n # print model\n dbsession.add(model)\n\n for models in position_models:\n dbsession.add(models)\n\n dbsession.commit()\n\n return user\n\nclass Education(Base):\n __tablename__=\"educations\"\n id = Column(Integer, primary_key=True)\n linkedin_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = True)\n # educations\n educations_start_year = Column(Integer, nullable = True)\n educations_end_year = Column(Integer, nullable = True)\n educations_school_name = Column(String(200), nullable = True)\n educations_field_of_study = Column(String(200), nullable = True)\n educations_degree = Column(String(200), nullable = True)\n\n # ment_user = relationship(\"User\", backref=backref(\"educations\", order_by=id))\n\nclass Position(Base):\n __tablename__=\"positions\"\n id = Column(Integer, primary_key=True)\n linkedin_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = True)\n positions_start_year = Column(Integer, nullable = True)\n positions_end_year = Column(Integer, nullable = True)\n positions_company_name = Column(String(200), nullable = True)\n positions_industry = Column(String(200), nullable = True)\n positions_title = Column(String(200), nullable = True)\n\n # ment_user = relationship(\"User\", backref=backref(\"positions\", order_by=id))\n\nclass MentoreeTopic(Base):\n __tablename__ = \"mentoree_topics\"\n id = Column(Integer, primary_key=True)\n topic_id = Column(Integer, ForeignKey('topics.topic_id'), nullable=True)\n mentor_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable=True)\n\n ment_user = relationship(\"User\", backref=backref(\"mentoree_topics\", order_by=id))\n\nclass Topic(Base):\n __tablename__ = \"topics\"\n topic_id = Column(Integer, primary_key=True)\n title = Column(String(100), nullable=True)\n\nclass Endorsement(Base):\n __tablename__ = \"endorsements\"\n id = Column(Integer, primary_key=True)\n sender_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)\n receiver_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)\n title = Column(String(100), nullable=True)\n endorsements_text = Column(String(500), nullable=True)\n sent_date = Column(DateTime, nullable=True)\n\n sender = relationship(\"User\", primaryjoin=\"User.linkedin_id==Endorsement.sender_id\")\n receiver = relationship(\"User\", primaryjoin=\"User.linkedin_id==Endorsement.receiver_id\")\n\nclass Email(Base):\n __tablename__ = \"emails\"\n id = Column(Integer, primary_key=True)\n sender_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)\n receiver_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False)\n subject = Column(String(100), nullable=True)\n text_body = Column(String(50000), nullable=True)\n sent_date = Column(DateTime, nullable=True)\n\n sender = relationship(\"User\", primaryjoin=\"User.linkedin_id==Email.sender_id\")\n receiver = relationship(\"User\", primaryjoin=\"User.linkedin_id==Email.receiver_id\")\n\nclass Quote(Base):\n __tablename__ = \"quotes\"\n id = Column(Integer, primary_key=True)\n quote_author = Column(String(100), nullable=True)\n quote = Column(String(10000), nullable=True)\n\ndef createTable():\n Base.metadata.create_all(engine)\n\ndef main():\n \"\"\"In case we need this for something\"\"\"\n pass\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5792636871337891, "alphanum_fraction": 0.5837715864181519, "avg_line_length": 29.953489303588867, "blob_id": "d0b250a91f706de331320f66e4398f79105bcd3a", "content_id": "88c794cc9a74deaacf82829dd05c9cced5d7ee07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1331, "license_type": "no_license", "max_line_length": 73, "num_lines": 43, "path": "/Project/email_.py", "repo_name": "daphnejwang/MentoreeMatch", "src_encoding": "UTF-8", "text": "#import tabledef\n#from tabledef import User, MentoreeTopic, Topic\nimport requests\nprint requests\n# import pdb\n\n\n# def send_message(recipient, subject, text):\n# return requests.post(\n# \"https://api.mailgun.net/v2/samples.mailgun.org/messages\",\n# auth=(\"api\", \"key-21q1narswc35vqr1u3f9upn3vf6ncbb9\"),\n# data={\"from\": \"Mentoree Match <[email protected]>\",\n# \"to\": recipient.email_address,\n# \"subject\": subject,\n# \"text\": \"Testing some Mailgun awesomness!\"})\n\ndef send_message():\n # pdb.set_trace()\n print dir(requests)\n x = requests.post(\n \"https://api.mailgun.net/v2/samples.mailgun.org/messages\",\n auth=(\"api\", \"key-21q1narswc35vqr1u3f9upn3vf6ncbb9\"),\n data={\"from\": \"Mentoree Match <[email protected]>\",\n \"to\": \"[email protected]\",\n \"subject\": \"testing email\",\n \"text\": \"Testing some Mailgun awesomness!\"})\n return 'hi'\n# key = 'YOUR API KEY HERE'\n# sandbox = 'YOUR SANDBOX URL HERE'\n# recipient = 'YOUR EMAIL HERE'\n\n# request_url = 'https://api.mailgun.net/v2/{0}/messages'.format(sandbox)\n# request = requests.post(request_url, auth=('api', key), data={\n# 'from': '[email protected]',\n# 'to': recipient,\n# 'subject': 'Hello',\n# 'text': 'Hello from Mailgun'\n# })\n\n# print 'Status: {0}'.format(request.status_code)\n# print 'Body: {0}'.format(request.text)\n\nsend_message()\n" } ]
15
JUNGEEYOU/QuickSort
https://github.com/JUNGEEYOU/QuickSort
65708afe935c33530d9b3b93be218dc303a50e0e
e514373aac945cada0d9a72b4e17bf63817134a0
16e5bc68d39c8e02f47cc0bdac83aeb5b6539f75
refs/heads/master
2020-06-06T11:59:01.741692
2019-06-19T14:34:14
2019-06-19T14:34:14
192,734,296
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4974619150161743, "alphanum_fraction": 0.5279187560081482, "avg_line_length": 23.625, "blob_id": "da8de3df268f3df1ee2d2452be047d310f0bf483", "content_id": "43ff8e0abbea72e5933c4bdc93b3930d0145fd90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 424, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/1_basic_quick_sort.py", "repo_name": "JUNGEEYOU/QuickSort", "src_encoding": "UTF-8", "text": "def quick_sort(array):\n \"\"\"\n 분할 정복을 이용한 퀵 정렬 재귀함수\n :param array:\n :return:\n \"\"\"\n if(len(array)<2):\n return array\n else:\n pivot = array[0]\n less = [i for i in array[1:] if i <= pivot]\n greater = [i for i in array[1:] if i > pivot]\n return quick_sort(less) + [pivot] + quick_sort(greater)\n\nexam1 = [4, 2, 1, 7, 10]\nprint(quick_sort(exam1))\n" }, { "alpha_fraction": 0.6301507353782654, "alphanum_fraction": 0.6452261209487915, "avg_line_length": 35.85185241699219, "blob_id": "2becaf02ac70036c937b8a3f5ed0aa2c0508dcb2", "content_id": "7f91786b57ea8b0a3a4f2702567f485c175136a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1721, "license_type": "no_license", "max_line_length": 137, "num_lines": 27, "path": "/README.md", "repo_name": "JUNGEEYOU/QuickSort", "src_encoding": "UTF-8", "text": "# 4. 퀵 정렬 \n## 4-1. 분할 정복\n> 분할 정복이란? 주어진 문제를 작은 사례로 나누고(Divide) 각각의 작은 문제들을 해결하여 정복 (Conquer)하며, 작은 사례에 대한 해답을 통합(Combine)하여 원래 사례의 해답을 구한다. 이런 분할 정복은 퀵 정렬에서 사용된다.\n\n- 분할(Divide): 문제 사례를 2개 이상으로 분리 한다.(재귀 단계)\n- 정복(Conquer): 문제가 더 분할이 가능하면(재귀 단계), 또 다시 분할. 그렇지 않으면 문제 푼다.\n- 통합(Combine): Conquer한 문제를 통합하여 원래의 문제의 답을 얻는다.\n* [분할 정복 문제. sum 함수 작성](https://github.com/JUNGEEYOU/QuickSort/blob/master/2_sum_function.py)\n* [분할 정복 문제. 가장 큰 수 찾기](https://github.com/JUNGEEYOU/QuickSort/blob/master/1_basic_quick_sort.py)\n---\n\n## 4-2. 퀵 정렬\n\n- 퀵 정렬 순서\n1. 분할(Divide): 피벗(기준 원소) 기준으로 왼쪽(피벗보다 작은 값) + 피벗 + 오른쪽(피벗보다 큰 값)으로 분리한다. \n2. 정복(Conquer): 문제가 분할 가능하면 재귀함수 호출, 그렇지 않으면 정렬한다. \n3. 통합(Combine): 정복한 문제를 통합한다. \n* [퀵 정렬 문제](https://github.com/JUNGEEYOU/QuickSort/blob/master/1_basic_quick_sort.py)\n\n---\n\n## 4-3. 빅오 표기법 복습\n\n> 퀵 정렬은 최선의 경우와 평균의 경우 O(n*logn)이며 최악의 경우 O(n^2) 이다.\n\n- 최선, 평균의 경우: O(n): 원소 비교 * O(logn) : 중간 값을 피벗으로 선택할 경우 logn 의 깊이가 나온다.\n- 최악의 경우: 가장 작은 혹은 가장 큰 값을 피벗으로 선택할 경우 O(n): 원소 비교 * O(n): 깊이\n" }, { "alpha_fraction": 0.41624364256858826, "alphanum_fraction": 0.46700507402420044, "avg_line_length": 13.142857551574707, "blob_id": "c1e4f2e9105bcd7b56708d623233e44550c4bcc9", "content_id": "f7b7ae08ec05a58a31c5f40b338b2d0ba6356748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 41, "num_lines": 14, "path": "/2_sum_function.py", "repo_name": "JUNGEEYOU/QuickSort", "src_encoding": "UTF-8", "text": "def sum_func(arr):\n \"\"\"\n\n :param arr:\n :return:\n \"\"\"\n if len(arr) <1:\n return 0\n else:\n return arr[0] + sum_func(arr[1:])\n\n\narr1 = [1, 4, 5, 9]\nprint(sum_func(arr1))" }, { "alpha_fraction": 0.48051947355270386, "alphanum_fraction": 0.48051947355270386, "avg_line_length": 11.833333015441895, "blob_id": "8f8bbcaf885ebb6a424376ceaf464bf523cf259b", "content_id": "9266009eba46ea41b58059f8b36f0030e7999fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 30, "num_lines": 6, "path": "/3_find_the_largest_num.py", "repo_name": "JUNGEEYOU/QuickSort", "src_encoding": "UTF-8", "text": "def find_the_largest_num(arr):\n \"\"\"\n\n :param arr:\n :return:\n \"\"\"\n" } ]
4
Terfno/tdd_challenge
https://github.com/Terfno/tdd_challenge
e57f042f08ca47e46e156902e01cb175166d88d0
9c10b364544245eb0757ae2e1a91f7c3bff6e541
51eac96b73986884c9fe0c52f9af62082493b342
refs/heads/master
2020-03-28T02:47:15.492541
2018-09-11T06:27:00
2018-09-11T06:27:00
147,596,466
0
0
null
2018-09-06T00:46:09
2018-09-07T08:26:07
2018-09-07T09:36:00
Python
[ { "alpha_fraction": 0.547968864440918, "alphanum_fraction": 0.6413137316703796, "avg_line_length": 36.32258224487305, "blob_id": "81f79a676af7bf1e80d2575160388d1a2f6a5220", "content_id": "cec0a61b302c753244ed14cc2f5da47529ccb6fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 103, "num_lines": 31, "path": "/test/calc_price.py", "repo_name": "Terfno/tdd_challenge", "src_encoding": "UTF-8", "text": "import sys\nimport io\nimport unittest\nfrom calc_price import Calc_price\nfrom di_sample import SomeKVSUsingDynamoDB\n\n\nclass TestCalculatePrice(unittest.TestCase):\n def test_calculater_price(self):\n calc_price = Calc_price()\n assert 24 == calc_price.calculater_price([10, 12])\n assert 62 == calc_price.calculater_price([40, 16])\n assert 160 == calc_price.calculater_price([100, 45])\n assert 171 == calc_price.calculater_price([50, 50, 55])\n assert 1100 == calc_price.calculater_price([1000])\n assert 66 == calc_price.calculater_price([20,40])\n assert 198 == calc_price.calculater_price([30,60,90])\n assert 40 == calc_price.calculater_price([11,12,13])\n\n def test_input_to_data(self):\n calc_price = Calc_price()\n \n input = io.StringIO('10,12,3\\n40,16\\n100,45\\n')\n calc_price.input_to_data(input)\n\n input = io.StringIO('1,25,3\\n40,16\\n\\n100,45\\n')\n calc_price.input_to_data(input)\n\n def test_calculater(self):\n calc_price = Calc_price()\n self.assertEqual(calc_price.calculater(io.StringIO('1,25,3\\n40,16\\n\\n100,45\\n')),[32,62,0,160])\n" }, { "alpha_fraction": 0.5319148898124695, "alphanum_fraction": 0.542553186416626, "avg_line_length": 17.799999237060547, "blob_id": "160b4e3ded52611eb5d11b914a994bd6cefe7d3f", "content_id": "048765317c2e0f2d589aae3a49870798671d9038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/stack.py", "repo_name": "Terfno/tdd_challenge", "src_encoding": "UTF-8", "text": "class STACK():\n def isEmpty(self):\n return True\n def top(self):\n return 1\n" }, { "alpha_fraction": 0.476673424243927, "alphanum_fraction": 0.48377281427383423, "avg_line_length": 24.28205108642578, "blob_id": "c6ad238a1c26188cd49ac450325b757645177519", "content_id": "4421066cf6000d575208b001b134c454cd02b63c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 986, "license_type": "no_license", "max_line_length": 57, "num_lines": 39, "path": "/calc_price.py", "repo_name": "Terfno/tdd_challenge", "src_encoding": "UTF-8", "text": "import sys\n\nclass Calc_price():\n def calculater_price(self, values):\n round=lambda x:(x*2+1)//2\n sum = 0\n for value in values:\n sum += int(value)\n ans = sum * 1.1\n ans = int(round(ans))\n return ans\n\n def input_to_data(self, input):\n result = []\n lines = []\n input = input.read()\n input = input.split('\\n')\n for i in input:\n i = i.split(',')\n lines.append(i)\n lines.pop(-1)\n for i in lines:\n if i == [''] :\n result.append([])\n continue\n result.append(list(map(lambda x: int(x), i)))\n return result\n\n def calculater(self,input):\n result = []\n input = self.input_to_data(input)\n for i in input:\n result.append(self.calculater_price(i))\n \n return result\n\nif __name__ == '__main__':\n calc_price = Calc_price()\n print(calc_price.calculater(sys.stdin))\n" }, { "alpha_fraction": 0.6631944179534912, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 21.153846740722656, "blob_id": "63f8d1dccca159827808e6a3634d17131fa89b79", "content_id": "d0f3e0be26e925e38ff70bdac25fa5966110fdf5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 47, "num_lines": 13, "path": "/test/stack.py", "repo_name": "Terfno/tdd_challenge", "src_encoding": "UTF-8", "text": "import unittest\nfrom stack import STACK\n\nclass TestSTACK(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n stack=STACK()\n\n def test_isEmpty(self):\n self.assertEqual(stack.isEmpty(), True)\n\n def test_push_top(self):\n self.assertEqual(stack.top(),1)\n" } ]
4
ksoltan/robot_learning
https://github.com/ksoltan/robot_learning
9f4cd1693acac62ee0a51bbc5efb6cb531c09326
5c91445c7c60d34fb321acae8ee11f2e2d4719b3
c2b9d5b6395e325dbee0c30524d7af84a2e49ba1
refs/heads/master
2020-04-01T14:35:27.207922
2018-11-07T04:51:13
2018-11-07T04:51:13
153,300,348
0
0
null
2018-10-16T14:31:01
2018-10-16T03:03:09
2018-10-16T03:03:08
null
[ { "alpha_fraction": 0.8010083436965942, "alphanum_fraction": 0.8055774569511414, "avg_line_length": 150.11904907226562, "blob_id": "55ce5b8846c6f66b8fb7e00a01e1be5b078fdc94", "content_id": "04a58a494b99b1d039f50c15917c2a5e348816b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6367, "license_type": "no_license", "max_line_length": 777, "num_lines": 42, "path": "/README.md", "repo_name": "ksoltan/robot_learning", "src_encoding": "UTF-8", "text": "# Robot Learning: Person Detection\n## Project Goal\nThe goal of our project was to use the Neato robot's camera to interact with a human and play the game of tag. The game starts with a state machine that qualifies the Neato as either “it” or “not it.” The mode dictates whether or not the Neato is actively chasing the person in its view or driving away. Switching between the modes is toggled with the bump sensor.\n\nWe focused on the processing of camera data to locate a human in the image, outputting a position that the Neato would chase or avoid. The camera is first used to gather a training set of images to build a convolutional neural network for mapping a new image to a person’s position. This trained model would then be downloaded and used live on the Neato for locating the person and adjusting its linear and angular velocities appropriately. Due to challenges in collecting good data, our resulting model was not quite successful in calculating the position of the human in the image.\n\n# Approach\nTo identify the position of a person relative to the robot from an image, we trained a multi-layered neural network with four convolution/max-pool layers and a final flatten/dense layer to output an x and y coordinate. We collected a data set of images taken from the Neato's camera and the approximate center of mass of the person based on an average of laser scan points in the frame of view of the camera. Because the image and lidar sampling happened at different rates, before training, each image was paired with a scan result with a maximum timestamp difference of 0.025s. Additionally, the images were scaled down to 200 x 150 pixels instead of 680 x 480. You can find our training code [here](https://colab.research.google.com/drive/1UaE06H4dS8kt_A7o_D8_NWij7EhDyHtn).\n\nThe ground truth position of the person was taken as the average position of the laser scan points in the field of view of the camera which were less than 2 m away from the robot. Above, the red points are laser scan points, red arrows are the points contributing to person detection, and the green sphere represents the estimate of the person's position.\n![](https://github.com/ksoltan/robot_learning/blob/master/documentation/data_record_ml_tag_video.gif)\n\nThe resulting model was unfortunately not very accurate, even on its own training data. There was an extremely high variability in the predicted person positions, as seen in the graphs below:\n\n#### Training Data:\n![](https://github.com/ksoltan/robot_learning/blob/master/documentation/predicted_x_train.png)\n![](https://github.com/ksoltan/robot_learning/blob/master/documentation/predicted_y_train.png)\n\n#### Test Data:\n![](https://github.com/ksoltan/robot_learning/blob/master/documentation/predicted_x_test.png)\n![](https://github.com/ksoltan/robot_learning/blob/master/documentation/predicted_y_test.png)\n\nThe saliency map shows that the neural network was not identifying the key locations of the person in the image, explaining the model's discrepancy. The points of interest (more cyan) are in the correct general area, but are extremely dispersed:\n![](https://github.com/ksoltan/robot_learning/blob/master/documentation/saliency_many.png)\n\n## Design Decisions\nOne design decision we made was using the lidar to classify the position of the person instead of using a mouse to indicate the location of the person in the image. The mouse approach did not allow us to account for how the person's depth changes in the image as they move around the space. Additionally, outputting a position in the room's spatial coordinates had the additional advantage of a simpler conversion into angular and linear velocity commands to move the robot.\n\nAnother reason for using the lidar to label our training set was the goal of automating and parallelizing our image recording and classification. Both the data collection and labelling was done with ROS topics running on neato-hardware, which in an ideal world would make accurate data collection in large batches a fast and simple process.\n\n## Challenges\n- We started by training the model to identify a person in the image’s 2D space. However, bad mouse-tracking data and a generally poor data set for this type of relationship led to frustrating results that never made any valuable predictions.\n- We tried a couple different approaches to data logging and classifying the center of mass. This should have been much more simple but was not porting over from the warmup project as successfully as we would have hoped and led to delays.\n- We pivoted to focus 100% on the lidar-based classification and ran into many challenges related to image classification. A critical error was the “interpolated_scan” topic being unable to function correctly, creating the subsequent challenge of correlating lidar data and image data that was recorded at different frequencies.\n\n## Future Improvements\n- Experiment with different classification methods and tuning our dataset to be as clear and correlative to our labels as possible. Although we used a classic convolutional neural network approach, there may have been other parameters or paths to explore which could have worked better for our data.\n- If our model was functional, we would have dove further into the robot implementation. There are many design decisions and levels of depth we could have taken the on-robot implementation to. Having time to experiment with and develop this part of the project would have been another exciting challenge to handle.\n\n## Lessons Learned\n- Verifying and understanding large datasets. Building algorithms for both robust data collection, processing, and verification at the very beginning of the project would have saved us a ton of time at the end when we needed time to tune our neural net or rapidly take sets of data.\n- Being very deliberate with our decision making around the “black box” that is top-down machine learning. We definitely had a hard time locating failure modes between our different datasets, processing methods, and the neural net itself. Having a better understanding of how each of these pieces affect the performance of the model and what tuning knobs we have at our disposal would have encouraged us to tackle our data issues before our neural network problems and helped us to debug more efficiently, instead of running around in circles.\n" }, { "alpha_fraction": 0.6022794246673584, "alphanum_fraction": 0.6117115616798401, "avg_line_length": 41.94514846801758, "blob_id": "bb580689b020dee3fc82caaaf4cc6af77f5744de", "content_id": "5049fc804a96643e3883bb37e1cb6baaeb2cde58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10178, "license_type": "no_license", "max_line_length": 164, "num_lines": 237, "path": "/data_processing_utilities/scripts/ml_tag.py", "repo_name": "ksoltan/robot_learning", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom keras.models import load_model\nimport tensorflow as tensorflow\n\n# import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport math\n# import glob\n# from PIL import Image\n# from scipy.misc import imread, imresize\n\nimport rospy\nimport cv2 # OpenCV\nfrom sensor_msgs.msg import CompressedImage, LaserScan\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom geometry_msgs.msg import PoseArray, Point, PoseStamped, Pose, PoseWithCovarianceStamped\nfrom neato_node.msg import Bump\nimport tf\n\n# from data_processing_utilities.msgs import ImageScanStamped\n\nclass MLTag(object):\n # TODO: Add cmd_vel command based on where person supposedly is\n # TODO: Add logic if robot does not see person\n # TODO: Tag logic\n\n def __init__(self, model_name='convolutional_model_v5.h5'):\n rospy.init_node(\"ml_tag_node\")\n\n self.my_model = load_model(model_name)\n self.my_graph = tensorflow.get_default_graph()\n self.scan_ranges = []\n self.is_tagger = True # Switch state based on whether robot is tagging or running away\n self.got_scan = False\n self.ready_to_process = False\n\n self.camera_subscriber = rospy.Subscriber(\"/camera/image_raw/compressed\", CompressedImage, self.process_image)\n self.scan_subscriber = rospy.Subscriber(\"/scan\", LaserScan, self.process_scan)\n self.bump_subscriber = rospy.Subscriber(\"/bump\", Bump, self.process_bump)\n\n # Publisher for logging\n self.object_from_scan_publisher = rospy.Publisher(\"/object_from_scan\", PoseStamped, queue_size=10)\n\n # Transform\n self.tf_listener = tf.TransformListener()\n\n # Visuzaliations\n self.position_publisher = rospy.Publisher('/positions_pose_array', PoseArray, queue_size=10)\n self.position_pose_array = PoseArray()\n self.position_pose_array.header.frame_id = \"base_link\"\n\n # self.image_scan_publisher = rospy.Publisher('/image_scan_pose', ImageScanStamped, queue_size=10)\n # self.last_scan_msg = None\n # self.last_image_msg = None\n\n self.object_publisher = rospy.Publisher('/object_marker', Marker, queue_size=10)\n self.my_object_marker = Marker()\n self.my_object_marker.header.frame_id = \"base_link\"\n self.my_object_marker.color.a = 0.5\n self.my_object_marker.color.g = 1.0\n self.my_object_marker.type = Marker.SPHERE\n self.my_object_marker.scale.x = 0.25\n self.my_object_marker.scale.y = 0.25\n self.my_object_marker.scale.z = 0.25\n\n self.model_object_publisher = rospy.Publisher('/model_object_marker', Marker, queue_size=10)\n self.my_model_object_marker = Marker()\n self.my_model_object_marker.header.frame_id = \"base_link\"\n self.my_model_object_marker.color.a = 0.5\n self.my_model_object_marker.color.b = 1.0\n self.my_model_object_marker.type = Marker.SPHERE\n self.my_model_object_marker.scale.x = 0.25\n self.my_model_object_marker.scale.y = 0.25\n self.my_model_object_marker.scale.z = 0.25\n\n\n def process_image(self, compressed_image_msg):\n # Display compressed image:\n # http://wiki.ros.org/rospy_tutorials/Tutorials/WritingImagePublisherSubscriber\n #### direct conversion to CV2 ####\n # if(self.got_scan and not self.ready_to_process):\n np_arr = np.fromstring(compressed_image_msg.data, np.uint8)\n image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n\n # Show image\n cv2.imshow('cv_img', image_np)\n cv2.waitKey(2)\n\n # Resize image\n height, width = image_np.shape[:2]\n new_width = 200\n new_height = int(height * new_width * 1.0 / width)\n image_np_resized = cv2.resize(image_np, (new_width, new_height), interpolation = cv2.INTER_CUBIC)\n\n img_tensor = np.expand_dims(image_np_resized, axis=0) # Add 4th dimension it expects\n with self.my_graph.as_default():\n # Without using graph, it gives error: Tensor is not an element of this graph.\n # Could fix this by not doing image processing in the callback, and in the main run loop.\n # https://stackoverflow.com/questions/47115946/tensor-is-not-an-element-of-this-graph\n predicted = self.my_model.predict(img_tensor)\n # print(\"Model predict: x: {}, y:{}, theta: {}\".format(predicted[0][0], predicted[0][1], math.degrees(math.atan2(predicted[0][0], predicted[0][1]))))\n self.my_model_object_marker.pose.position.x = predicted[0][0]\n self.my_model_object_marker.pose.position.y = predicted[0][1]\n self.model_object_publisher.publish(self.my_model_object_marker)\n # self.last_image_msg = compressed_image_msg\n # self.got_scan = False\n # self.ready_to_process = True\n\n def process_scan(self, scan_msg):\n self.scan_ranges = scan_msg.ranges\n self.visualize_positions_in_scan()\n self.visualize_object_from_scan()\n # if(not self.ready_to_process):\n # self.scan_ranges = scan_msg.ranges\n # self.last_scan_msg = scan_msg\n # self.got_scan = True\n\n def process_bump(self, bump_msg):\n pass\n\n def find_poses_in_scan(self):\n # Use front field of view of the robot's lidar to detect a person's x, y offset\n field_of_view = 40\n maximum_range = 2 # m\n\n # Cycle through ranges and filter out 0 or too far away measurements\n # Calculate the x, y coordinate of the point the lidar detected\n poses = []\n\n for angle in range(-1 * field_of_view, field_of_view):\n r = self.scan_ranges[angle]\n # print(\"angle: {}, r = {}\".format(angle, r))\n if(r > 0 and r < maximum_range):\n try:\n # Confirm that transform exists.\n (trans,rot) = self.tf_listener.lookupTransform('/base_link', '/base_laser_link', rospy.Time(0))\n # Convert angle to radians. Adjust it to compensate for lidar placement.\n theta = math.radians(angle + 180)\n x_pos = r * math.cos(theta)\n y_pos = r * math.sin(theta)\n\n # Use transform for correct positioning in the x, y plane.\n p = PoseStamped()\n p.header.stamp = rospy.Time.now()\n p.header.frame_id = 'base_laser_link'\n\n p.pose.position.x = x_pos\n p.pose.position.y = y_pos\n\n p_model = PoseStamped()\n p_model.header.stamp = rospy.Time.now()\n p_model.header.frame_id = 'base_laser_link'\n\n p_model.pose.position.x = self.my_model_object_marker.pose.position.x\n p_model.pose.position.y = self.my_model_object_marker.pose.position.y\n\n\n p_base_link = self.tf_listener.transformPose('base_link', p)\n p_model_base_link = self.tf_listener.transformPose('base_link', p_model)\n # print(\"{}, {} at angle {}\".format(p_base_link.pose.position.x, p_base_link.pose.position.y, math.degrees(theta)))\n print(\"Lidar predict: x: {}, y:{}, theta: {}\".format(p_base_link.pose.position.x, p_base_link.pose.position.y, math.degrees(theta)))\n print(\"Lidar predict: x: {}, y:{}, theta: {}\".format(p_model_base_link.pose.position.x, p_model_base_link.pose.position.y, math.degrees(theta)))\n\n # Only care about the pose\n poses.append(p_base_link.pose)\n\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n # Return a list of poses (no header)\n return poses\n\n def find_object_from_scan(self):\n # Get the x, y coordinates of objects in the field of view\n poses = self.find_poses_in_scan()\n min_points_for_object = 3\n\n if(len(poses) < min_points_for_object):\n # Not enough points\n pose_stamped = PoseStamped()\n pose_stamped.header.stamp = rospy.Time.now()\n pose_stamped.header.frame_id = \"base_link\"\n self.object_from_scan_publisher.publish(pose_stamped)\n return (0, 0)\n\n # Not the most efficient list traversal (double), but we don't have that many values.\n center_of_mass = (sum([pose.position.x for pose in poses]) * 1.0 / len(poses),\n sum([pose.position.y for pose in poses]) * 1.0 / len(poses))\n\n pose_stamped = PoseStamped()\n pose_stamped.header.stamp = rospy.Time.now()\n pose_stamped.header.frame_id = \"base_link\"\n pose_stamped.pose.position.x = center_of_mass[0]\n pose_stamped.pose.position.y = center_of_mass[1]\n self.object_from_scan_publisher.publish(pose_stamped)\n\n return center_of_mass\n\n def visualize_positions_in_scan(self):\n poses = self.find_poses_in_scan()\n\n self.position_pose_array.poses = poses\n self.position_publisher.publish(self.position_pose_array)\n\n def visualize_object_from_scan(self):\n x, y = self.find_object_from_scan()\n\n self.my_object_marker.header.stamp = rospy.Time.now()\n self.my_object_marker.pose.position.x = x\n self.my_object_marker.pose.position.y = y\n\n self.object_publisher.publish(self.my_object_marker)\n\n def run(self):\n # while not rospy.is_shutdown():\n # if(self.ready_to_process):\n # self.visualize_positions_in_scan()\n # # Publish an image/scan msg\n # self.publish_image_scan()\n rospy.spin()\n\n # def publish_image_scan(self):\n # msg = ImageScanStamped()\n # msg.header.stamp = rospy.Time.now()\n # msg.image = self.last_image_msg\n # msg.scan = self.last_scan_msg\n # x, y = self.visualize_object_from_scan()\n # msg.pose.position.x = x\n # msg.pose.position.y = y\n # self.image_scan_publisher.publish(msg)\n # self.ready_to_process = False\n\nif __name__ == \"__main__\":\n tag = MLTag()\n tag.run()\n" }, { "alpha_fraction": 0.5411692261695862, "alphanum_fraction": 0.5547550320625305, "avg_line_length": 28.621952056884766, "blob_id": "a33330df6a8b4754c7248ac9007236ae95f054ee", "content_id": "b67976ca045c7f592c250755afc8debf1e3f3d69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4858, "license_type": "no_license", "max_line_length": 121, "num_lines": 164, "path": "/data_preparation/clean_process.py", "repo_name": "ksoltan/robot_learning", "src_encoding": "UTF-8", "text": "# Given a folder of images and a metadata.csv file, output an npz file with an imgs, spatial x, and spatial x dimensions.\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport glob\nimport math\nfrom PIL import Image\nfrom scipy.misc import imread, imresize\n\ndef process_scan(ranges):\n \"\"\"\n process a 360 point set of laser data in a certain viewing range.\n\n inputs: list of ranges from the laser scan\n output: lists of x and y points within viewing angle and range\n \"\"\"\n max_r = 1.5\n view_angle = int(70 / 2) # only look at points in the forwardmost 70 degs\n infront = range(-view_angle, view_angle)\n # ranges[0:int(view_angle/2)]+ranges[int(360-view_angle/2):360]\n xs = []\n ys = []\n\n # loop through and grab points in desired view range\n for i in range(-view_angle, view_angle):\n if ranges[i] != 0:\n theta = math.radians(90 + i)\n r = ranges[i]\n xf = r * math.cos(theta)\n yf = r * math.sin(theta)\n xs.append(xf)\n ys.append(yf)\n\n return(xs, ys)\n\ndef center_of_mass(x, y):\n \"\"\"\n compute the center of mass in a lidar scan.\n\n inputs: x and y lists of cleaned laser data\n output: spatial x and y coordinate of the CoM\n \"\"\"\n if len(x) < 4: # if below a threshold of grouped points\n return(np.inf, np.inf)\n else:\n x_cord = sum(x)/len(x)\n y_cord = sum(y)/len(y)\n\n plt.plot(x, y, 'ro')\n plt.plot(0,0, 'bo', markersize=15)\n plt.plot(x_cord, y_cord, 'go', markersize=15)\n plt.ylim(-2,2)\n plt.xlim(-2,2) # plt.show()\n return (x_cord, y_cord)\n\ndef resize_image(img_name):\n \"\"\"\n load and resize images for the final numpy array.\n\n inputs: filename of an image\n output: resized image as a numpy array\n \"\"\"\n # new size definition\n width = 200\n height = 150\n new_size = width, height\n\n img = Image.open(img_name, 'r')\n resize = img.resize(new_size)\n array = np.array(resize)\n return array\n\ndef find_corresponding_scan(image_time, scan_times, start_idx):\n max_tolerance = 0.015\n while start_idx < len(scan_times):\n diff = abs(scan_times[start_idx] - image_time)\n # print(\"Idx: {}, Diff: {}\".format(start_idx, abs(scan_times[start_idx] - image_time)))\n if diff < max_tolerance:\n return (start_idx, diff)\n start_idx += 1\n return None\n\n\nif __name__ == '__main__':\n # location definitions\n # # Katya\n data_path = '/home/ksoltan/catkin_ws/src/robot_learning/data_processing_utilities/data/'\n # Anil\n # data_path ='/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/'\n folder_name = 'anil_shining_2'\n # folder_name = 'latest_person'\n\n path = data_path + folder_name + '/'\n metadata_csv = data_path + folder_name + '/' + 'metadata.csv'\n\n # image definitions\n os.chdir(path)\n filenames = glob.glob(\"*.jpg\")\n\n # pull from metadata\n array_form = np.genfromtxt(metadata_csv, delimiter=\",\")\n lidar_all = array_form[:,6:366]\n pic_times = array_form[:,0]\n lidar_times = array_form[:,-1]\n\n images = []\n object_xs = []\n object_ys = []\n\n i_s = []\n j_s = []\n # loop through all images\n for i in range(lidar_all.shape[0]-26):\n for j in range(i,i+25):\n delta = lidar_times[j]-pic_times[i]\n if abs(delta) < 0.025:\n i_s.append(i)\n j_s.append(j)\n\n # print('pic', i)\n # print('lid', j)\n # print('delta', delta)\n # print('------------------')\n break\n\n imgs_a = []\n xs_a = []\n ys_a = []\n\n for i in range(len(i_s)):\n img_ind = i_s[i]\n lid_ind = j_s[i]\n\n scan_now = lidar_all[lid_ind] # scan data for this index\n\n # process if scan isn't NaN (laser hasn't fired yet)\n if not np.isnan(scan_now[10]):\n points_x, points_y = process_scan(scan_now)\n xp, yp = center_of_mass(points_x, points_y)\n\n # only add if CoM is defined, AKA object is in frame\n if xp != np.inf:\n # print(pic_times[img_ind]-lidar_times[lid_ind], xp, yp, round(math.degrees(math.atan2(xp, yp)),2))\n\n # add image\n img_name = filenames[img_ind]\n img_np = resize_image(img_name)\n imgs_a.append(img_np)\n\n # add object position\n xs_a.append(xp)\n ys_a.append(yp)\n\n # verify\n # plt.show()\n\n plt.imshow(img_np)\n # plt.show()\n\n print(len(imgs_a))\n # save all data\n save_path = data_path + folder_name + '_data' '.npz'\n np.savez_compressed(save_path, imgs=imgs_a, object_x=xs_a, object_y=ys_a)\n" }, { "alpha_fraction": 0.6708805561065674, "alphanum_fraction": 0.6787270903587341, "avg_line_length": 27.674999237060547, "blob_id": "98ebe141e9213019813db836ca3d149e4f64dac4", "content_id": "8d20252df372baa21460090c4f242903656998f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2294, "license_type": "no_license", "max_line_length": 128, "num_lines": 80, "path": "/data_preparation/image_processing.py", "repo_name": "ksoltan/robot_learning", "src_encoding": "UTF-8", "text": "# Given a folder of images and a metadata.csv file, output an npz file with an imgs, mouse_x, and mouse_y columns.\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport glob\nfrom PIL import Image\nfrom scipy.misc import imread, imresize\n\nfolder_name = 'ball_dataset_classroom'\n\n# Katya\ndata_path = '/home/ksoltan/catkin_ws/src/robot_learning/data_processing_utilities/data/'\n# Anil\n# data_path ='/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/'\n\npath = data_path + folder_name + '/'\n\nmetadata_name = 'metadata.csv'\n\nos.chdir(path)\nfilenames = glob.glob(\"*.jpg\")\n\n# Get size of images\nfilename = filenames[0]\nsample_img = Image.open(filename, 'r')\nprint(\"height: {}, width: {}, aspect: {}\".format(sample_img.height, sample_img.width, 1.0 * sample_img.height/sample_img.width))\naspect = 1.0 * sample_img.height / sample_img.width\nwidth = 200\nheight = int(width*aspect)\nnew_size = width, height\n\n\n# Create numpy array of all x and y mouse positions\nMETADATA_CSV = data_path + folder_name + '/' + metadata_name\ndf = pd.read_csv(METADATA_CSV, ',')[['image_file_name', 'object_from_scan_x', 'object_from_scan_y']]\nprint(df.head())\nprint(df.info())\n\nimages = []\nobject_xs = []\nobject_ys = []\n# Loop through lidar predicted object positions and save only those that do not contain 0, 0\nfor index in range(len(df.object_from_scan_x)):\n x = df.object_from_scan_x[index]\n y = df.object_from_scan_y[index]\n if(x == 0.0 and y == 0.0):\n continue\n\n # Add image\n img_name = filenames[index]\n img = Image.open(img_name, 'r')\n resize = img.resize(new_size)\n array = np.array(resize)\n images.append(array)\n\n # Add object position\n object_xs.append(x)\n object_ys.append(y)\n\n\n#\n# # plt.imshow(data)\n# # plt.show()\n# index = 0\n# images = []\n# # Create numpy array of resized images\n# for name in filenames:\n# img = Image.open(name, 'r')\n# resize = img.resize(new_size)\n# array = np.array(resize)\n# # images[:,:,:,index] = array\n# images.append(array)\n# index += 1\n\n\nSAVE_FILENAME = data_path + folder_name + '_data' '.npz'\nnp.savez_compressed(SAVE_FILENAME, imgs=images, object_x=object_xs, object_y=object_ys)\ntest_data = np.load(SAVE_FILENAME)\nprint(test_data['object_x'].shape)\n" }, { "alpha_fraction": 0.5490501523017883, "alphanum_fraction": 0.5664901733398438, "avg_line_length": 32.79999923706055, "blob_id": "64168601f436c2d0d75ee5a4b3d1fad03baccf1d", "content_id": "0f462700ed509f775d8c0b3496380cd4e8692b05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3211, "license_type": "no_license", "max_line_length": 109, "num_lines": 95, "path": "/data_preparation/lidar_processing.py", "repo_name": "ksoltan/robot_learning", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"quick script for trying to pull spatial x, y from metadata\"\"\"\n\nfrom __future__ import print_function\nfrom geometry_msgs.msg import PointStamped, PointStamped, Twist\nfrom std_msgs.msg import Header\nfrom neato_node.msg import Bump\nfrom sensor_msgs.msg import LaserScan\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport pandas as pd\nimport time, numpy, math, rospy, statistics\n\ndef process_scan(ranges):\n \"\"\" process a 360 point set of laser data in a certain viewing range \"\"\"\n max_r = 1.5\n view_angle = 80 # only look at points in the forwardmost 70 degs\n infront = ranges[0:int(view_angle/2)]+ranges[int(360-view_angle/2):360]\n\n xs = []\n ys = []\n\n # loop through and grab points in desired view range\n for i in range(len(ranges)):\n if i<len(infront):\n if infront[i] !=0 and infront[i] < max_r:\n if i >= view_angle/2:\n theta = math.radians(90-(view_angle-i))\n else:\n theta = math.radians(i+90)\n r = infront[i]\n xf = math.cos(theta)*r\n yf = math.sin(theta)*r\n xs.append(xf)\n ys.append(yf)\n\n return(xs, ys)\n\ndef center_of_mass(x, y):\n \"\"\" with arguments as lists of x and y values, compute center of mass \"\"\"\n if len(x) < 4: # if below a threshold of grouped points\n return(0, 0) # TODO pick a return value for poor scans\n\n x_cord = sum(x)/len(x)\n y_cord = sum(y)/len(y)\n plt.plot(x_cord, y_cord, 'go', markersize=15)\n return (x_cord, y_cord)\n\nif __name__ == '__main__':\n path = '/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/'\n folder = 'mydataset'\n look_in = path+folder + '/' # final path for metadata\n\n filename = 'metadata.csv'\n file_csv = look_in + filename\n\n array_form = numpy.genfromtxt(file_csv, delimiter=\",\")\n lidar_all = array_form[:, 6:366]\n lidar_label = []\n\n ind = 0\n for i in range(lidar_all.shape[0]):\n scan_now = lidar_all[i,:]\n\n if not numpy.isnan(scan_now[10]):\n points_x, points_y = process_scan(scan_now)\n xp, yp = center_of_mass(points_x, points_y)\n\n if xp != 0:\n # lidar_label[ind,0] = i\n # lidar_label[ind,1] = xp\n # lidar_label[ind,2] = yp\n # ind += 1\n lidar_label.append([i, xp, yp])\n print(ind, i, xp, yp, math.degrees(math.atan2(xp, yp)))\n\n # plt.plot(points_x, points_y, 'ro')\n # plt.plot(0,0, 'bo', markersize=15)\n # plt.show()\n\n lidar_label = numpy.array(lidar_label)\n print(lidar_label[:,0])\n SAVE_FILENAME = path + folder + '.npz'\n numpy.savez_compressed(SAVE_FILENAME, indices=lidar_label[:,0], xs=lidar_label[:,1], ys=lidar_label[:,2])\n \"\"\"\n # loop through images and get spatial x and y\n for i in range(lidar_all.shape[0]):\n lidar_here = lidar_all[i,:]\n xs, ys = process_scan(lidar_here)\n xp, yp = center_of_mass(xs, ys)\n\n lidar_label[i,0] = xp\n lidar_label[i,1] = yp\n print(xp, yp)\n \"\"\"\n" } ]
5
DSGDSR/pykedex
https://github.com/DSGDSR/pykedex
573f41a7cf7ee436f5b8f04709d507f3eff80f5d
8bfb332a68c763340db8693bc6384a116ee7d720
e8c1459a9f7dbc82562a5d4883506ed8d4dcef38
refs/heads/master
2020-03-23T16:15:59.137606
2019-01-24T21:39:05
2019-01-24T21:39:05
141,801,198
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.3834453523159027, "alphanum_fraction": 0.38698267936706543, "avg_line_length": 37.202701568603516, "blob_id": "47dd11bf5b5947f9fae36c0fc3602876d29a256c", "content_id": "40a4abb1b30805407f01659ef62a3e97fc7a5a2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2827, "license_type": "no_license", "max_line_length": 131, "num_lines": 74, "path": "/main.py", "repo_name": "DSGDSR/pykedex", "src_encoding": "UTF-8", "text": "import sys, requests, json\nfrom io import BytesIO\nfrom PIL import Image \nfrom pycolors import *\nfrom funcs import *\n\n\nprint( pycol.BOLD + pycol.HEADER + \"Welcome to the pokedex, ask for a pokemon: \" + pycol.ENDC, end=\"\" )\npokemon = input()\n\nwhile True:\n response = getPokemon(pokemon)\n\n if response.status_code == 404:\n print( \"This pokemon name is not valid, try again: \", end=\"\" )\n pokemon = input()\n continue\n \n data = response.json()\n\n\n #############################################################\n ########################### IMAGE ###########################\n #############################################################\n #imgburl = \"https://assets.pokemon.com/assets/cms2/img/pokedex/full/\" + str(data[\"id\"]) + \".png\"\n imgburl = \"https://img.pokemondb.net/artwork/\" + str(data[\"name\"]) + \".jpg\"\n imgr = requests.get(imgburl)\n img = Image.open(BytesIO(imgr.content))\n w, h = img.size\n img.resize((w, h)).show()\n\n\n #############################################################\n ######################### BASE INFO #########################\n #############################################################\n print( \"\\n\" + pycol.BOLD + pycol.UNDERLINE + data[\"name\"].capitalize() + pycol.ENDC + \" (ID: \" + str(data[\"id\"]) + \")\" + \"\\n\" +\n \"Weight: \" + str(data[\"weight\"]/10) + \"kg\\n\" +\n \"Height: \" + str(data[\"height\"]/10) + \"m\\n\" +\n \"Base experience: \" + str(data[\"base_experience\"]) )\n ########################### TYPES ###########################\n types, abilities = [], []\n for t in data[\"types\"]:\n types.append(t[\"type\"][\"name\"])\n print( \"Types: \" + ', '.join(types) )\n ######################### ABILITIES #########################\n for a in data[\"abilities\"]:\n ab = a[\"ability\"][\"name\"]\n if a[\"is_hidden\"]:\n ab = ab + \" (hidden ab.)\"\n abilities.append(ab)\n print( \"Abilities: \" )\n for ab in abilities:\n print( \" - \" + ab.capitalize() )\n ########################### STATS ###########################\n print( \"Stats: \" )\n for s in data[\"stats\"]:\n print(getStrBar((s[\"stat\"][\"name\"] + \":\").ljust(17), s[\"base_stat\"]))\n ######################## EVOL CHAIN #########################\n print(\"Evolutions:\\n\" + \" \" + getEvolChain(data[\"id\"]))\n print()\n #############################################################\n\n\n\n #############################################################\n ######################## END OF LOOP ########################\n #############################################################\n print( \"Do you wanna ask for another pokemon? (Y/n) \", end=\"\" )\n answer = input()\n if answer == 'n':\n break\n else:\n print( \"Enter the pokemon name: \", end=\"\" )\n pokemon = input()\n" }, { "alpha_fraction": 0.7252174019813538, "alphanum_fraction": 0.7539130449295044, "avg_line_length": 38.68965530395508, "blob_id": "c6c391840b6740bf7410a5157534f80e3aa84e1c", "content_id": "14d67aa21495d2158b04b646e6e82469f55afe49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 279, "num_lines": 29, "path": "/README.MD", "repo_name": "DSGDSR/pykedex", "src_encoding": "UTF-8", "text": "# ![](/images/pokeball.png?raw=true \"pika pika\") Pykedex\n\n[![Issues](https://img.shields.io/github/issues/DSGDSR/pykedex.svg)](https://github.com/DSGDSR/pykedex/issues)\n[![Stars](https://img.shields.io/github/stars/DSGDSR/pykedex.svg)](https://github.com/DSGDSR/pykedex/)\n[![Tweet](https://img.shields.io/twitter/url/https/github.com/DSGDSR/pykedex.svg?style=social)](https://twitter.com/intent/tweet?text=Look%20what%20I%20just%20found%3A%20%F0%9F%94%B4%20pykedex%20for%20Python,%20pokedex%20with%20comands%20at%20https%3A//github.com/DSGDSR/pykedex)\n[![Follow on twitter](https://img.shields.io/twitter/follow/dsgdsrmc.svg?style=social&logo=twitter)](https://twitter.com/DSGDSRMC)\n\n\n\nA python simple command line program working as a pokedex, using [PokéAPI](https://pokeapi.co) and [Pokémon Database](https://pokemondb.net) for images and sprites.\n\nMaintainer: [DSGDSR](https://github.com/DSGDSR)\n\n# Installation and usage\nClone repo and execute:\n\n```\n$ clone http://github.com/DSGDSR/pykedex.git\n$ cd pykedex\n$ python main.py\n```\n\n# Features\n\n* Pokemon stats and type\n* Evolutions\n* Abilities\n* Pokemon be, weight and height\n* Previews the sprite" }, { "alpha_fraction": 0.5650602579116821, "alphanum_fraction": 0.575903594493866, "avg_line_length": 28.678571701049805, "blob_id": "6e557e73f788e6adbba56169a7a6ee8fb3bc91b0", "content_id": "afeb37e741e08fca06d874fbd3b32d25a21990e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 864, "license_type": "no_license", "max_line_length": 73, "num_lines": 28, "path": "/funcs.py", "repo_name": "DSGDSR/pykedex", "src_encoding": "UTF-8", "text": "import requests, math\n\ndef getPokemon(pokemon):\n return requests.get(\"http://pokeapi.co/api/v2/pokemon/\"+pokemon)\n\ndef getEvolChain(id):\n url = \"http://pokeapi.co/api/v2/pokemon-species/\" + str(id)\n resp = requests.get(url)\n data = resp.json()\n evol = requests.get(data[\"evolution_chain\"][\"url\"]).json()[\"chain\"]\n evols = evol[\"species\"][\"name\"].capitalize()\n while evol[\"evolves_to\"]:\n evol = evol[\"evolves_to\"][0]\n evols = evols + \" -> \" + evol[\"species\"][\"name\"].capitalize()\n return evols\n\n\ndef getStrBar(stat, base):\n # ▓▓▓▓▓▓▓▓░░░░░░░ \n num = math.ceil(base/20)\n stat = stat.capitalize()\n statStr = \" - \" + stat + \"▓\" * num + \"░\" * (10-num) + \" \" + str(base)\n return statStr\n\n\nif __name__ == \"__main__\":\n print(getStrBar(\"speed\", 90))\n #print(getPokemon(\"pikachu\"))" } ]
3
lily7630/lexy_blog
https://github.com/lily7630/lexy_blog
4c51c051235fd8fd60964866fc736a1086644d5b
8f6697cea9e20a859b8fb29dd0344b032b02ba7f
3319f696345a687a3b3ce7aa2cf40d0615c898c7
refs/heads/master
2020-04-07T01:45:11.974862
2018-11-18T14:44:29
2018-11-18T14:44:29
157,950,864
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5488371849060059, "alphanum_fraction": 0.6209302544593811, "avg_line_length": 21.63157844543457, "blob_id": "600ed10c7aceb3c0ddc4f50a826e10d1c1446a9c", "content_id": "fd382dede17434adeafaa5cb2847b4ec3f8e5e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 68, "num_lines": 19, "path": "/li_blog/migrations/0004_auto_20181118_2003.py", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-18 12:03\n\nimport ckeditor_uploader.fields\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('li_blog', '0003_auto_20181118_1906'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='article',\n name='content',\n field=ckeditor_uploader.fields.RichTextUploadingField(),\n ),\n ]\n" }, { "alpha_fraction": 0.7386363744735718, "alphanum_fraction": 0.7386363744735718, "avg_line_length": 16.600000381469727, "blob_id": "1c2ef393153ac993ab414f5bc66b7c3d4753f546", "content_id": "57fb33d143ff8c0a8902f13f4f30feb796fef926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/li_blog/apps.py", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass LiBlogConfig(AppConfig):\n name = 'li_blog'\n" }, { "alpha_fraction": 0.5544554591178894, "alphanum_fraction": 0.6014851331710815, "avg_line_length": 20.263158798217773, "blob_id": "29d79e6c00a8efbe53b23afcb8bf800a39092534", "content_id": "440fd8ca6bd3de6b8f64b6506928cd597e4719c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 50, "num_lines": 19, "path": "/li_blog/migrations/0003_auto_20181118_1906.py", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-18 11:06\n\nimport ckeditor.fields\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('li_blog', '0002_remove_article_link'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='article',\n name='content',\n field=ckeditor.fields.RichTextField(),\n ),\n ]\n" }, { "alpha_fraction": 0.7650273442268372, "alphanum_fraction": 0.7732240557670593, "avg_line_length": 35.5, "blob_id": "74764719553840da24cb3cc9ffa8eb9c38326581", "content_id": "7583d467fa4b92ddf9563b8be01c2ec800c01621", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 77, "num_lines": 10, "path": "/li_blog/views.py", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import Article\n\n# Create your views here.\ndef index(request):\n article_list = Article.objects.all().order_by('-publish_date')\n\n return render(request,'li_blog/index.html',{'article_list':article_list})\n\n" }, { "alpha_fraction": 0.5063291192054749, "alphanum_fraction": 0.5664557218551636, "avg_line_length": 17.58823585510254, "blob_id": "d803ad0190b101128ca7214c56398bbed172bce4", "content_id": "0861dad2603f59c588e1dd9f92f558bf05f16bb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/li_blog/migrations/0002_remove_article_link.py", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.3 on 2018-11-17 10:43\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('li_blog', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='article',\n name='link',\n ),\n ]\n" }, { "alpha_fraction": 0.7070357799530029, "alphanum_fraction": 0.7174164056777954, "avg_line_length": 26.967741012573242, "blob_id": "d632021afcdf928e9399f8b4fe7833ac0dd3d316", "content_id": "f08dc77547d726bca30f9bd99b9239eb5f111a9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 67, "num_lines": 31, "path": "/li_blog/models.py", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom ckeditor.fields import RichTextField\nfrom ckeditor_uploader.fields import RichTextUploadingField\n\n# Create your models here.\nclass Category(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Article(models.Model):\n title = models.CharField(max_length=100)\n publish_date = models.DateTimeField()\n # content = models.TextField()\n content = RichTextUploadingField()\n author = models.ForeignKey(User,on_delete=models.CASCADE)\n category = models.ForeignKey(Category,on_delete=models.CASCADE)\n tag = models.ManyToManyField(Tag,blank=True)\n\n def __str__(self):\n return self.title\n" }, { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 16.5, "blob_id": "0385f2430129d7e7979d8967f61d30040bb8299f", "content_id": "51bbea0112431c9e0c193a3e5ff8b51fb1f50d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "lily7630/lexy_blog", "src_encoding": "UTF-8", "text": "# lexy_blog\n基于 Python Django 的个人博客\n" } ]
7
tbohne/AoC18
https://github.com/tbohne/AoC18
4bb39de1962e25bd2154d61ab93da3168bc9d7c5
75cf3ac0f6db9a3e83fd694a93b7bc64d48c8983
62dfc91e73cf99767386c2d8304a6f2bb914ecb3
refs/heads/master
2020-04-08T20:53:24.097386
2018-12-08T12:18:24
2018-12-08T12:18:24
159,719,142
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48389095067977905, "alphanum_fraction": 0.5061957836151123, "avg_line_length": 28.88888931274414, "blob_id": "dfa65443e205192467c73f038612c003b686037b", "content_id": "1b9fc29b4d287ba45dfb70b109d55769da4708b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 73, "num_lines": 54, "path": "/day3/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nimport copy\n\ndef parse_info(claim):\n offsets = claim.strip().split(\"@\")[1].split(\":\")[0].split(\",\")\n inches_from_left = int(offsets[0].strip())\n inches_from_top = int(offsets[1].strip())\n\n dims = claim.strip().split(\"@\")[1].split(\":\")[1].split(\"x\")\n width = int(dims[0].strip())\n height = int(dims[1].strip())\n\n return (inches_from_left, inches_from_top, width, height)\n\ndef part_one(square, input):\n collision_cnt = 0\n\n for claim in input:\n info = parse_info(claim)\n\n for i in range(info[1], info[1] + info[3]):\n for j in range(info[0], info[0] + info[2]):\n if square[i][j] == \"#\":\n square[i][j] = \"X\"\n collision_cnt += 1\n elif square[i][j] == \".\":\n square[i][j] = \"#\"\n\n print(\"sol p1: \" + str(collision_cnt))\n return square\n\ndef part_two(filled_square, input):\n for claim in input:\n info = parse_info(claim)\n overlapping = False\n\n for i in range(info[1], info[1] + info[3]):\n if overlapping:\n break\n for j in range(info[0], info[0] + info[2]):\n if filled_square[i][j] == \"X\":\n overlapping = True\n break\n\n if not overlapping:\n print(\"sol p2: \" + claim.split(\"#\")[1].split(\"@\")[0].strip())\n\nif __name__ == '__main__':\n input = sys.stdin.readlines()\n lst = [\".\" for _ in range(0, 1000)]\n square = [copy.copy(lst) for _ in range(0, 1000)]\n\n filled_square = part_one(square, input)\n part_two(filled_square, input)\n" }, { "alpha_fraction": 0.46633341908454895, "alphanum_fraction": 0.4792708158493042, "avg_line_length": 28.57391357421875, "blob_id": "541a5e2b8f066424dfe7210a993270b64d7d3229", "content_id": "b133bdeae93e0ac9156e337d99e28fb909481b77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3401, "license_type": "no_license", "max_line_length": 166, "num_lines": 115, "path": "/day7/p2.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nimport copy\nfrom string import ascii_lowercase\n\ndef step_time(letter, sample):\n if not sample:\n return 60 + ord(letter) - 64\n else:\n return ord(letter) - 64\n\ndef get_names():\n names = dict()\n cnt = 0\n for i in ascii_lowercase:\n if cnt == len(input) - 1:\n break\n names[i.upper()] = []\n cnt += 1\n return names\n\ndef delete_item(item):\n for i in names.keys():\n if i == item:\n del names[i]\n break\n\n for i in names.keys():\n if item in names[i]:\n names[i].remove(item)\n\ndef get_waiting_lists(names):\n waiting_lists = []\n for i in names.keys():\n waiting_lists.append((names[i], i))\n return waiting_lists\n\ndef get_admissible_item(waiting_lists):\n\n tmp = copy.copy(waiting_lists)\n valid = False\n\n while not valid:\n valid = True\n if len(tmp) == 0:\n return None\n tmp_best = min(tmp)\n\n if len(tmp_best[0]) == 0:\n for w in workers:\n if w[2] == tmp_best[1]:\n valid = False\n else:\n valid = False\n\n if not valid:\n tmp.remove(tmp_best)\n\n return tmp_best[1]\n\nif __name__ == '__main__':\n\n input = sys.stdin.readlines()\n num_of_workers = 5\n sample = False\n names = get_names()\n workers = []\n\n for i in range(0, num_of_workers):\n # (idx, available, working item, time_left)\n workers.append((i, True, \"\", 0))\n\n for i in input:\n before = i.strip().split(\"must\")[0].split(\"Step\")[1].strip()\n after = i.strip().split(\"can\")[0].split(\"step\")[1].strip()\n names[after].append(before)\n\n time = 0\n\n while len(names.keys()) > 0:\n\n for w in workers:\n # worker available\n if w[1]:\n waiting_lists = get_waiting_lists(names)\n item = get_admissible_item(waiting_lists)\n if item == None:\n pass\n # print(\"no item available for worker\" + str(w[0]))\n else:\n workers[workers.index(w)] = (w[0], False, item, step_time(item, sample))\n # print(\"time \" + str(time) + \" worker\" + str(w[0]) + \" starts to work on item \" + str(item) + \" needs time: \" + str(step_time(item, sample)))\n # worker busy\n else:\n time_left = w[3] - 1\n if time_left != 0:\n workers[workers.index(w)] = (w[0], False, w[2], time_left)\n else:\n delete_item(str(w[2]))\n # print(\"time \" + str(time) + \" worker\" + str(w[0]) + \" finished working on item \" + str(w[2]))\n\n waiting_lists = get_waiting_lists(names)\n item = get_admissible_item(waiting_lists)\n\n if item == None:\n workers[workers.index(w)] = (w[0], True, \"\", 0)\n # print(\"no item available for worker\" + str(w[0]))\n else:\n workers[workers.index(w)] = (w[0], False, item, step_time(item, sample))\n # print(\"time \" + str(time) + \" worker\" + str(w[0]) + \" starts to work on item \" + str(item) + \" needs time: \" + str(step_time(item, sample)))\n\n continue\n\n time += 1\n\n print(\"sol p2: \" + str(time - 1))\n" }, { "alpha_fraction": 0.48859649896621704, "alphanum_fraction": 0.4964912235736847, "avg_line_length": 20.923076629638672, "blob_id": "d989952f73e51e24eb405175d682d87cebdb4b80", "content_id": "cab09e1da75525d1946746e1033545a100438262", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 68, "num_lines": 52, "path": "/day7/p1.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nimport copy\nimport string\nfrom string import ascii_lowercase\n\ndef get_names():\n names = dict()\n cnt = 0\n for i in ascii_lowercase:\n if cnt == len(input) - 1:\n break\n names[i.upper()] = []\n cnt += 1\n return names\n\ndef delete_item(item):\n for i in names.keys():\n if i == item:\n del names[i]\n break\n\n for i in names.keys():\n if item in names[i]:\n names[i].remove(item)\n\ndef parse_input():\n for i in input:\n before = i.strip().split(\"must\")[0].split(\"Step\")[1].strip()\n after = i.strip().split(\"can\")[0].split(\"step\")[1].strip()\n names[after].append(before)\n\nif __name__ == '__main__':\n\n input = sys.stdin.readlines()\n names = get_names()\n parse_input()\n order = []\n\n while len(names) > 0:\n\n deps = []\n for i in names.keys():\n deps.append(names[i])\n min_list = min(deps)\n\n for j in names.keys():\n if names[j] == min_list:\n order.append(j)\n delete_item(j)\n break\n\n print(\"sol p1: \" + \"\".join(order))\n" }, { "alpha_fraction": 0.4413457214832306, "alphanum_fraction": 0.4621514081954956, "avg_line_length": 24.804597854614258, "blob_id": "910693e1e9a419209d40130f269da7f592e86a96", "content_id": "d9578c2186bf71aa6bf6007103fbf8f8936d6ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2259, "license_type": "no_license", "max_line_length": 87, "num_lines": 87, "path": "/day8/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nimport copy\nimport string\nfrom string import ascii_lowercase\n\n# 42384 too low\n\nif __name__ == '__main__':\n\n input = sys.stdin.read().split()\n print(input)\n\n stack = []\n tree = []\n tmp_input = copy.copy(input)\n open_meta_data = 0\n idx = 0\n\n while len(tmp_input) > open_meta_data:\n\n print(\"len: \" + str(len(tmp_input)))\n print(\"need: \" + str(int(input[idx + 1]) + 2))\n print(\"open meta len: \" + str(open_meta_data))\n\n need = int(input[idx + 1]) + 2\n\n if need + open_meta_data > len(tmp_input):\n print(\"DONE\")\n break\n\n node = (input[idx], input[idx + 1], [])\n\n print(\"looking at: \" + str(node))\n\n # if len(tmp_input) <= open_meta_data:\n # print(\"len of rest: \" + str(len(tmp_input)))\n # print(\"open meta data: \" + str(open_meta_data))\n # print(\"current need: \" + str(node[1]))\n # print(\"DONE\")\n # break\n\n for i in range(0, len(tmp_input) - 1):\n if tmp_input[i] == node[0] and tmp_input[i + 1] == node[1]:\n tmp_idx = i\n\n if node[0] == '0':\n print(\"remove: \" + str(tmp_input[tmp_idx : (tmp_idx + 2 + int(node[1]))]))\n del tmp_input[tmp_idx : (tmp_idx + 2 + int(node[1]))]\n else:\n print(\"remove::: \" + str(tmp_input[tmp_idx : tmp_idx + 2]))\n del tmp_input[tmp_idx : tmp_idx + 2]\n\n # no childs\n if node[0] == '0':\n print(\"handle now\")\n print(node)\n\n for i in range(idx + 2, idx + 2 + int(node[1])):\n node[2].append(input[i])\n\n tree.append(node)\n\n else:\n open_meta_data += int(node[1])\n print(\"append to stack\")\n stack.append(node)\n print(node)\n\n idx += 2\n if node[0] == '0':\n idx += int(node[1])\n\n print(\"TODO: \" + str(tmp_input))\n\n for i in stack:\n node = (i[0], i[1], [])\n\n for j in range(0, int(i[1])): \n node[2].append(tmp_input[j])\n del tmp_input[0 : int(i[1])]\n tree.append(node)\n\n res = 0\n for i in tree:\n res += sum([int(x) for x in i[2]])\n\n print(\"sol p1: \" + str(res))\n\n \n" }, { "alpha_fraction": 0.5779221057891846, "alphanum_fraction": 0.5844155550003052, "avg_line_length": 25.399999618530273, "blob_id": "a853cdaeec7205fd3823e43ce2b8a178c1410c6c", "content_id": "f892efba471967ac70bfcdb3bea6e4cedb8b5997", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "no_license", "max_line_length": 101, "num_lines": 35, "path": "/day5/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nimport copy\nfrom string import ascii_lowercase\n\ndef remove_unit(tmp_input, idx):\n del tmp_input[idx]\n del tmp_input[idx]\n\ndef react_polymer(tmp_input):\n\n modified = True\n\n while modified:\n modified = False\n\n for i in range(0, len(tmp_input) - 1):\n if tmp_input[i] != tmp_input[i + 1] and tmp_input[i].lower() == tmp_input[i + 1].lower():\n modified = True\n remove_unit(tmp_input, i)\n break\n return tmp_input\n\nif __name__ == '__main__':\n input = sys.stdin.read().strip()\n polymer_lengths = []\n\n print(\"sol p1: \" + str(len(react_polymer(list(input)))))\n\n for unit_type in ascii_lowercase:\n\n tmp_input = list(input.replace(unit_type, \"\").replace(unit_type.upper(), \"\"))\n tmp_input = react_polymer(tmp_input)\n polymer_lengths.append(len(tmp_input))\n\n print(\"sol p2: \" + str(min(polymer_lengths)))\n" }, { "alpha_fraction": 0.4525139629840851, "alphanum_fraction": 0.4581005573272705, "avg_line_length": 22.866666793823242, "blob_id": "1b73b96a696d8f73d5c09e3680604f51337a1738", "content_id": "b341f6060f1077b972815b30a57f5e824304e421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 50, "num_lines": 30, "path": "/day1/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\n\nif __name__ == '__main__':\n\n input = sys.stdin.readlines()\n curr_freq = 0\n reached_twice = False\n list_of_freqs = []\n\n while not reached_twice:\n\n for change in input:\n\n sign = change[0]\n change = int(change.replace(sign, \"\"))\n\n if (sign == \"+\"):\n curr_freq += change\n else:\n curr_freq -= change\n\n if curr_freq in list_of_freqs:\n reached_twice = True\n print(\"sol p2: \" + str(curr_freq))\n break\n else:\n list_of_freqs.append(curr_freq)\n\n if len(list_of_freqs) == len(input):\n print(\"sol p1: \" + str(curr_freq))\n" }, { "alpha_fraction": 0.7747747898101807, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 36, "blob_id": "3ef4ffbe14ef568d83e0e24828bd8d04a5dd23d2", "content_id": "20866d9cd1e80384683f5b61fd4dd0a73221df17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 222, "license_type": "no_license", "max_line_length": 88, "num_lines": 6, "path": "/README.md", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "# AoC18\nMy intuitive solutions to this year's https://adventofcode.com/.\n\nJust for fun and getting comfortable with python (again).\n\nThe solutions are in no way optimized or particularly beautiful (often quick and dirty).\n" }, { "alpha_fraction": 0.44724467396736145, "alphanum_fraction": 0.47000187635421753, "avg_line_length": 29.210227966308594, "blob_id": "a5b3749a07d3948369365cba54418ea0ae9e60ac", "content_id": "3446f8ca36152adf818f09a9ccd93e75a093afd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5317, "license_type": "no_license", "max_line_length": 195, "num_lines": 176, "path": "/day6/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nimport copy\nfrom string import ascii_lowercase\n\ndef manhattan_dist(c1, c2):\n return abs(c1[1] - c2[1]) + abs(c1[0] - c2[0])\n\ndef part_two():\n\n total = 0\n for i in range(0, 1000):\n for j in range(0, 1000):\n sum = 0\n for c in coord_by_name.keys():\n sum += manhattan_dist((j, i), coord_by_name[c])\n if sum < 10000:\n total += 1\n\n print(\"sol p2: \" + str(total))\n\ndef part_one():\n\n for i in range(0, 1000):\n for j in range(0, 1000):\n\n if square[i][j] == \".\":\n min_dist = 99999\n name = \"\"\n collision_dist = min_dist\n for coords in list_of_coords:\n distance = abs(i - coords[1]) + abs(j - coords[0])\n if distance < min_dist:\n min_dist = distance\n name = coordinate_names[coords].lower()\n elif distance == min_dist:\n collision_dist = min_dist\n\n if collision_dist == min_dist:\n square[i][j] = \".\"\n else:\n square[i][j] = name\n\n area_cnt = dict()\n\n y_min = 2000\n x_min = 2000\n x_max = 0\n y_max = 0\n x_min_remove = []\n x_max_remove = []\n y_min_remove = []\n y_max_remove = []\n\n for c in list_of_coords:\n if c[0] <= x_min:\n x_min = c[0]\n x_min_remove.append(coordinate_names[c])\n for i in x_min_remove:\n if coord_by_name[i][0] > x_min:\n x_min_remove.remove(i)\n if c[0] >= x_max:\n x_max = c[0]\n x_max_remove.append(coordinate_names[c])\n for i in x_max_remove:\n if coord_by_name[i][0] < x_max:\n x_max_remove.remove(i)\n if c[1] <= y_min:\n y_min = c[1]\n y_min_remove.append(coordinate_names[c])\n for i in y_min_remove:\n if coord_by_name[i][1] > y_min:\n y_min_remove.remove(i)\n if c[1] >= y_max:\n y_max = c[1]\n y_max_remove.append(coordinate_names[c])\n for i in y_max_remove:\n if coord_by_name[i][1] < y_max:\n y_max_remove.remove(i)\n\n for i in coordinate_names.values():\n\n dist = abs(coord_by_name[i][1] - x_max)\n man_dists = []\n for j in coordinate_names.values():\n if coord_by_name[j][1] == x_max:\n man_dist = manhattan_dist((coord_by_name[i][0], x_max), coord_by_name[j])\n man_dists.append(man_dist)\n if min(man_dists) > dist:\n x_max_remove.append(i)\n\n dist = abs(coord_by_name[i][1] - x_min)\n man_dists = []\n for j in coordinate_names.values():\n if coord_by_name[j][1] == x_min:\n man_dist = manhattan_dist((coord_by_name[i][0], x_min), coord_by_name[j])\n man_dists.append(man_dist)\n if min(man_dists) > dist:\n x_min_remove.append(i)\n\n dist = abs(coord_by_name[i][0] - y_max)\n man_dists = []\n for j in coordinate_names.values():\n if coord_by_name[j][0] == y_max:\n man_dist = manhattan_dist((y_max, coord_by_name[i][1]), coord_by_name[j])\n man_dists.append(man_dist)\n if min(man_dists) > dist:\n y_max_remove.append(i)\n\n dist = abs(coord_by_name[i][0] - y_min)\n man_dists = []\n for j in coordinate_names.values():\n if coord_by_name[j][0] == y_min:\n man_dist = manhattan_dist((y_min, coord_by_name[i][1]), coord_by_name[j])\n man_dists.append(man_dist)\n if min(man_dists) > dist:\n y_min_remove.append(i)\n\n area_cnt[i] = 0\n\n for i in range(0, 1000):\n for j in range(0, 1000):\n\n if square[i][j].islower():\n if square[i][j].upper() not in x_max_remove and square[i][j].upper() not in x_min_remove and square[i][j].upper() not in y_max_remove and square[i][j].upper() not in y_min_remove:\n area_cnt[square[i][j].upper()] += 1\n\n max = 0\n caused_by = \"\"\n for i in area_cnt.keys():\n cnt = 0\n if i != 0:\n cnt = area_cnt[i] + 1\n\n if cnt > max:\n max = cnt\n caused_by = i\n\n print(caused_by + \": \" + str(max))\n\nif __name__ == '__main__':\n\n input = sys.stdin.readlines()\n\n test = dict()\n tmp_cnt = 0\n\n for c in ascii_lowercase:\n test[tmp_cnt] = c.upper()\n tmp_cnt += 1\n\n rest = len(input) - 26\n\n for c in ascii_lowercase:\n if rest > 0:\n rest -= 1\n test[tmp_cnt] = c.upper() + c.upper()\n tmp_cnt += 1\n\n cnt = 0\n lst = [\".\" for _ in range(0, 1000)]\n square = [copy.copy(lst) for _ in range(0, 1000)]\n\n list_of_coords = []\n coordinate_names = dict()\n coord_by_name = dict()\n\n for i in input:\n coords = (int(i.strip().split(\",\")[0]), int(i.strip().split(\",\")[1].strip()))\n list_of_coords.append(coords)\n square[coords[1]][coords[0]] = test[cnt]\n coordinate_names[coords] = test[cnt]\n coord_by_name[test[cnt]] = (coords[1], coords[0])\n cnt += 1\n\n part_one()\n part_two()\n" }, { "alpha_fraction": 0.5323383212089539, "alphanum_fraction": 0.5497512221336365, "avg_line_length": 25.799999237060547, "blob_id": "cabcd058df23ef267940bcb7d5a7e9dd79f40684", "content_id": "dfe428192a713bee6ceec0e9d96d6fc0e6a46464", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "no_license", "max_line_length": 88, "num_lines": 30, "path": "/day2/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\n\ndef part_one(input):\n exactly_two = 0\n exactly_three = 0\n\n for boxID in input:\n letter_count = [boxID.count(letter) for letter in boxID]\n\n if 2 in letter_count:\n exactly_two += 1\n if 3 in letter_count:\n exactly_three += 1\n\n return exactly_two * exactly_three\n\ndef part_two(input):\n\n for boxID_one in input:\n for boxID_two in input:\n if boxID_one != boxID_two:\n equal_letters = [l1 for l1, l2 in zip(boxID_one, boxID_two) if l1 == l2]\n\n if len(boxID_one) - len(equal_letters) == 1:\n return \"\".join(equal_letters).strip()\n\nif __name__ == '__main__':\n input = sys.stdin.readlines()\n print(\"sol p1: \" + str(part_one(input)))\n print(\"sol p2: \" + part_two(input))\n" }, { "alpha_fraction": 0.5303388237953186, "alphanum_fraction": 0.552403450012207, "avg_line_length": 35.25714111328125, "blob_id": "4d3291d6acead00796c8ffa84b1729a5be01c799", "content_id": "3c50366a29d975cb503fd8cd4095ce62ebd46d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2538, "license_type": "no_license", "max_line_length": 110, "num_lines": 70, "path": "/day4/main.py", "repo_name": "tbohne/AoC18", "src_encoding": "UTF-8", "text": "import sys\nfrom datetime import datetime\n\ndef calc_timespan(t1, t2):\n fmt = '%H:%M'\n return datetime.strptime(t2, fmt) - datetime.strptime(t1, fmt)\n\ndef parse_info():\n date = i.split(\"[\")[1].split(\"]\")[0].split(\" \")[0].strip()\n time = i.split(\"[\")[1].split(\"]\")[0].split(\" \")[1].strip()\n action = i.split(\"[\")[1].split(\"]\")[1].strip()\n return (date, time, action)\n\nif __name__ == '__main__':\n\n input = sys.stdin.readlines()\n input.sort()\n\n current_guard_id = \"\"\n start_sleeping = -1\n sleep_times = dict()\n sleeping_minutes = dict()\n\n for i in input:\n\n info = parse_info()\n\n if current_guard_id != \"\":\n if \"falls\" in i:\n start_sleeping = info[1]\n elif \"wakes\" in i:\n\n if not current_guard_id in sleep_times.keys():\n sleep_times[current_guard_id] = 0\n if not current_guard_id in sleeping_minutes.keys():\n sleeping_minutes[current_guard_id] = []\n\n time_to_add_in_minutes = int(str(calc_timespan(start_sleeping, info[1])).split(\":\")[0]) * 60\n time_to_add_in_minutes += int(str(calc_timespan(start_sleeping, info[1])).split(\":\")[1])\n start = int(start_sleeping.split(\":\")[1])\n end = int(info[1].split(\":\")[1]) - 1\n sleeping_minutes[current_guard_id].append(start)\n sleeping_minutes[current_guard_id].append(end)\n\n for idx in range(start + 1, start + time_to_add_in_minutes - 1):\n sleeping_minutes[current_guard_id].append(idx % 60)\n\n current_sleep_time = sleep_times[current_guard_id] + time_to_add_in_minutes\n sleep_times[current_guard_id] = int(current_sleep_time)\n\n if \"#\" in info[2]:\n current_guard_id = info[2].split(\"#\")[1].split(\"begins\")[0].strip()\n\n lazy_guard = max(sleep_times, key = sleep_times.get)\n\n # min, guard\n strategy1 = [max(sleeping_minutes[lazy_guard], key = sleeping_minutes[lazy_guard].count), int(lazy_guard)]\n # min, count, guard\n strategy2 = [0, 0, 0]\n\n for i in sleep_times.keys():\n tmp_min = max(sleeping_minutes[i], key = sleeping_minutes[i].count)\n\n if sleeping_minutes[i].count(tmp_min) > strategy2[1]:\n strategy2[0] = tmp_min\n strategy2[1] = sleeping_minutes[i].count(tmp_min)\n strategy2[2] = i\n\n print(\"sol p1: \" + str(strategy1[0] * strategy1[1]))\n print(\"sol p2: \" + str(int(strategy2[2]) * strategy2[0]))\n" } ]
10
w5688414/selfdriving_cv
https://github.com/w5688414/selfdriving_cv
45638b69c0083f1c3453db7527bc76220fdc65ef
885695468b97e4b46efc4325ab9c16b7d1e57f48
9e761fbd5297b4cf999163d59c9787abd48d8f7f
refs/heads/master
2020-03-24T16:27:53.891363
2018-08-19T15:33:49
2018-08-19T15:33:49
142,825,663
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.54802405834198, "alphanum_fraction": 0.5659513473510742, "avg_line_length": 37.786800384521484, "blob_id": "bda353e907ece1ecacd115f0698a9ca3193cc96f", "content_id": "8188d5d3baece3456c82b2f31c9d87e15bb662ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7642, "license_type": "no_license", "max_line_length": 103, "num_lines": 197, "path": "/carla-train/network_fine_tune.py", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\n\ndef weight_ones(shape, name):\n initial = tf.constant(1.0, shape=shape, name=name)\n return tf.Variable(initial)\n\n\ndef weight_xavi_init(shape, name):\n initial = tf.get_variable(name=name, shape=shape,\n initializer=tf.contrib.layers.xavier_initializer())\n return initial\n\n\ndef bias_variable(shape, name):\n initial = tf.constant(0.1, shape=shape, name=name)\n return tf.Variable(initial)\n\n\nclass Network(object):\n\n def __init__(self, train_state):\n \"\"\" We put a few counters to see how many times we called each function \"\"\"\n self._count_conv = 0\n self._count_pool = 0\n self._count_bn = 0\n self._count_dropouts = 0\n self._count_activations = 0\n self._count_fc = 0\n self._count_lstm = 0\n self._count_soft_max = 0\n self._conv_kernels = []\n self._conv_strides = []\n self._weights = {}\n self._features = {}\n self._train_state = train_state\n\n \"\"\" Our conv is currently using bias \"\"\"\n\n def conv(self, x, kernel_size, stride, output_size, padding_in='SAME'):\n self._count_conv += 1\n\n filters_in = x.get_shape()[-1]\n shape = [kernel_size, kernel_size, filters_in, output_size]\n\n weights = weight_xavi_init(shape, 'W_c_' + str(self._count_conv))\n bias = bias_variable([output_size], name='B_c_' + str(self._count_conv))\n\n self._weights['W_conv' + str(self._count_conv)] = weights\n self._conv_kernels.append(kernel_size)\n self._conv_strides.append(stride)\n\n conv_res = tf.add(tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding=padding_in,\n name='conv2d_' + str(self._count_conv)), bias,\n name='add_' + str(self._count_conv))\n\n self._features['conv_block' + str(self._count_conv - 1)] = conv_res\n\n return conv_res\n\n def max_pool(self, x, ksize=3, stride=2):\n self._count_pool += 1\n return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1],\n padding='SAME', name='max_pool' + str(self._count_pool))\n\n def bn(self, x):\n self._count_bn += 1\n return tf.contrib.layers.batch_norm(x, is_training=False,\n updates_collections=None, scope='bn' + str(self._count_bn))\n\n def activation(self, x):\n self._count_activations += 1\n return tf.nn.relu(x, name='relu' + str(self._count_activations))\n\n def dropout(self, x, prob=1):\n print (\"Dropout\", self._count_dropouts)\n self._count_dropouts += 1\n output = tf.nn.dropout(x, prob,\n name='dropout' + str(self._count_dropouts))\n return output\n\n def fc(self, x, output_size):\n self._count_fc += 1\n filters_in = x.get_shape()[-1]\n shape = [filters_in, output_size]\n\n weights = weight_xavi_init(shape, 'W_f_' + str(self._count_fc))\n bias = bias_variable([output_size], name='B_f_' + str(self._count_fc))\n\n return tf.nn.xw_plus_b(x, weights, bias, name='fc_' + str(self._count_fc))\n\n def conv_block(self, x, kernel_size, stride, output_size, padding_in='SAME', dropout_prob=None):\n print (\" === Conv\", self._count_conv, \" : \", kernel_size, stride, output_size)\n with tf.name_scope(\"conv_block\" + str(self._count_conv)):\n x = self.conv(x, kernel_size, stride, output_size, padding_in=padding_in)\n x = self.bn(x)\n if dropout_prob is not None:\n x = tf.cond(self._train_state,\n true_fn=lambda: self.dropout(x, dropout_prob),\n false_fn=lambda: x)\n\n return self.activation(x)\n\n def fc_block(self, x, output_size, dropout_prob=None):\n print (\" === FC\", self._count_fc, \" : \", output_size)\n with tf.name_scope(\"fc\" + str(self._count_fc + 1)):\n x = self.fc(x, output_size)\n if dropout_prob is not None:\n x = tf.cond(self._train_state,\n true_fn=lambda: self.dropout(x, dropout_prob),\n false_fn=lambda: x)\n self._features['fc_block' + str(self._count_fc + 1)] = x\n return self.activation(x)\n\n def get_weigths_dict(self):\n return self._weights\n\n def get_feat_tensors_dict(self):\n return self._features\n\n\ndef make_network():\n inp_img = tf.placeholder(tf.float32, shape=[None, 88, 200, 3], name='input_image')\n inp_speed = tf.placeholder(tf.float32, shape=[None, 1], name='input_speed')\n\n target_control = tf.placeholder(tf.float32, shape=[None, 3], name='target_control')\n #target_command = tf.placeholder(tf.float32, shape=[None, 4], name='target_command')\n train_state = tf.placeholder(tf.bool, shape=[], name='train_state')\n\n network_manager = Network(train_state)\n\n with tf.name_scope('Network'):\n xc = network_manager.conv_block(inp_img, 5, 2, 32, padding_in='VALID')\n print (xc)\n xc = network_manager.conv_block(xc, 3, 1, 32, padding_in='VALID')\n print (xc)\n\n xc = network_manager.conv_block(xc, 3, 2, 64, padding_in='VALID')\n print (xc)\n xc = network_manager.conv_block(xc, 3, 1, 64, padding_in='VALID')\n print (xc)\n\n xc = network_manager.conv_block(xc, 3, 2, 128, padding_in='VALID')\n print (xc)\n xc = network_manager.conv_block(xc, 3, 1, 128, padding_in='VALID')\n print (xc)\n\n xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')\n print (xc)\n xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID')\n print (xc)\n\n x = tf.reshape(xc, [-1, int(np.prod(xc.get_shape()[1:]))], name='reshape')\n print (x)\n\n x = network_manager.fc_block(x, 512, dropout_prob=0.7)\n print (x)\n x = network_manager.fc_block(x, 512, dropout_prob=0.7)\n\n with tf.name_scope(\"Speed\"):\n speed = network_manager.fc_block(inp_speed, 128, dropout_prob=0.5)\n speed = network_manager.fc_block(speed, 128, dropout_prob=0.5)\n\n j = tf.concat([x, speed], 1)\n j = network_manager.fc_block(j, 512, dropout_prob=0.5)\n\n control_out = network_manager.fc_block(j, 256, dropout_prob=0.5)\n control_out = network_manager.fc_block(control_out, 256)\n control_out = network_manager.fc(control_out, 3)\n loss = tf.reduce_mean(tf.square(tf.subtract(control_out, target_control)))\n tf.summary.scalar('loss', loss)\n\n '''\n branch_config = [[\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], \\\n [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"]]\n\n branches = []\n losses = []\n for i in range(0, len(branch_config)):\n with tf.name_scope(\"Branch_\" + str(i)):\n branch_output = network_manager.fc_block(j, 256, dropout_prob=0.5)\n branch_output = network_manager.fc_block(branch_output, 256)\n branches.append(network_manager.fc(branch_output, len(branch_config[i])))\n losses.append(tf.square(tf.subtract(branches[i], target_control)))\n\n print (branch_output)\n\n losses = tf.convert_to_tensor(losses)\n losses = tf.reduce_mean(tf.transpose(losses, [1, 2, 0]), axis=1) * target_command;\n loss = tf.reduce_sum(losses)\n '''\n\n return {'loss': loss,\n 'train_state': train_state,\n 'inputs': [inp_img, inp_speed],\n 'labels': [target_control],\n 'outputs': [control_out]}\n\n" }, { "alpha_fraction": 0.6401785612106323, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 25.046510696411133, "blob_id": "2ef94215d2f9c72d82fa13043370dc7fc1f668ef", "content_id": "4ff953debdfab620ac715fdb4245b7b37d7acdfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 88, "num_lines": 43, "path": "/carla-train/h5_to_tfrecord.py", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "import tensorflow as tf\n\nfrom tensorflow.python_io import TFRecordWriter\n\nimport numpy as np\nimport h5py\nimport glob\nimport os\nfrom tqdm import tqdm\n\nfrom IPython import embed\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\ninput_roots = '/data/dataTrain/val_*/'\noutput_name = '/data/dataTrain/val.tfrecords'\n\nwriter = TFRecordWriter(output_name)\n\nh5files = glob.glob(os.path.join(input_roots, '*.h5'))\n\nfor h5file in tqdm(h5files):\n try:\n data = h5py.File(h5file, 'r')\n for i in range(200):\n img = data['CameraRGB'][i]\n target = data['targets'][i]\n\n feature_dict = {'image': _bytes_feature(img.tostring()),\n 'targets': _float_feature(target)}\n\n example = tf.train.Example(features=tf.train.Features(feature=feature_dict))\n writer.write(example.SerializeToString())\n data.close()\n except:\n print('filename: {}'.format(h5file))\n\n\nwriter.close()\n" }, { "alpha_fraction": 0.5804427862167358, "alphanum_fraction": 0.6129151582717896, "avg_line_length": 32.44444274902344, "blob_id": "b4fedf50e00b63dbc297d9f6d72ced7debd03227", "content_id": "25737d7ae8b3e441712fe2b27a4d0ad9708c4f08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2710, "license_type": "no_license", "max_line_length": 100, "num_lines": 81, "path": "/carla-train/data_provider.py", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nimport glob\nimport os\nimport h5py\nfrom imgaug.imgaug import Batch, BatchLoader, BackgroundAugmenter\nimport imgaug.augmenters as iaa\nimport cv2\n\nfrom IPython import embed\n\nBATCHSIZE = 120\n\nst = lambda aug: iaa.Sometimes(0.4, aug)\noc = lambda aug: iaa.Sometimes(0.3, aug)\nrl = lambda aug: iaa.Sometimes(0.09, aug)\n\nseq = iaa.Sequential([\n rl(iaa.GaussianBlur((0, 1.5))),\n rl(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5)),\n oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)),\n oc(iaa.CoarseDropout((0.0, 0.10), size_percent=(0.08, 0.2),per_channel=0.5)),\n oc(iaa.Add((-40, 40), per_channel=0.5)),\n st(iaa.Multiply((0.10, 2.5), per_channel=0.2)),\n rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)),\n], random_order=True)\n\n'''\ndef augmentation(imgs):\n return imgs\n'''\n\ndef parse_proto(example_proto):\n features = tf.parse_single_example(example_proto,\n features={'image': tf.FixedLenFeature([], tf.string),\n 'targets': tf.FixedLenSequenceFeature([], tf.float32, allow_missing=True)})\n image = tf.decode_raw(features['image'], tf.uint8)\n image = tf.reshape(image, [88, 200, 3])\n\n speed = features['targets'][10]\n target_control = features['targets'][0:3]\n target_command = features['targets'][24] % 4\n return image, speed[None], target_control, target_command\n\nclass DataProvider:\n def __init__(self, filename, session):\n dataset = tf.data.TFRecordDataset(filename)\n dataset = dataset.repeat().shuffle(buffer_size=2000).map(parse_proto).batch(BATCHSIZE)\n iterator = tf.data.Iterator.from_structure(dataset.output_types,\n dataset.output_shapes)\n dataset_init = iterator.make_initializer(dataset)\n session.run(dataset_init)\n\n self.dataset = dataset\n self.session = session\n self.next = iterator.get_next()\n\n def get_minibatch(self, augment = False):\n data = self.session.run(self.next)\n imgs = data[0].astype('float32')\n if augment:\n imgs = seq.augment_images(imgs)\n return Batch(images=imgs, data=data[1:])\n\n def show_imgs(self):\n batch = self.get_minibatch(True)\n for img in batch.images:\n cv2.imshow('img', img)\n cv2.waitKey(0)\n\n# Test tf.data & imgaug backgroud loader APIs\nif __name__ == '__main__':\n import time\n sess = tf.Session()\n dp = DataProvider('/mnt/AgentHuman/train.tfrecords', sess)\n\n while True:\n a = time.time()\n dp.get_minibatch()\n b = time.time()\n print(b-a)\n\n" }, { "alpha_fraction": 0.6266152858734131, "alphanum_fraction": 0.671843945980072, "avg_line_length": 39.40703582763672, "blob_id": "710e8f47cb3ea86061c377f2664ed594339194a8", "content_id": "689858b899d8ccf125e37f612fb819be1fc73ae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9716, "license_type": "no_license", "max_line_length": 185, "num_lines": 199, "path": "/train_explaination.md", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "\n# 主代码 train.py 函数\n 该代码用于训练imitation learning的网络,网络的输入为图像数据,speed和command,网络输出为steer gas brake。\n## 参数:\n- trainScratch = True # params[0]--True表示不重载训练参数;False表示重载训练参数 \n- dropoutVec # params[1] 输入矢量23维\n- image_cut=[115, 510] # params[2]\n- learningRate = 0.0002 # params[3] multiplied by 0.5 every 50000 mini batch\n- beta1 = 0.7 # params[4]\n- beta2 = 0.85 # params[5]\n- num_images # params[6] 图像数目:200*3289=657800\n- iterNum = 294000 # params[7]\n- batchSize = 120 -# params[8] size of batch \n- valBatchSize = 120 # params[9] size of batch for validation set\n- NseqVal = 5 # params[10] number of sequences to use for validation\n- epochs = 100 # params[11]\n- samplesPerEpoch = 500 # params[12]\n- L2NormConst = 0.001 # params[13]\n\nparams = [trainScratch, dropoutVec, image_cut, learningRate, beta1, beta2, num_images, iterNum, batchSize, valBatchSize, NseqVal, epochs, samplesPerEpoch, L2NormConst]\n\n\n- timeNumberFrames = 1 --- number of frames in each samples\n- prefSize = _image_size = (88, 200, 3) # 图像的大小88*200*3\n- memory_fraction=0.25 # 和内存有关的参数\n- cBranchesOutList = ['Follow Lane','Go Left','Go Right','Go Straight','Speed Prediction Branch'] #\n- controlInputs = [2,5,3,4] # Control signal, int ( 2 Follow lane, 3 Left, 4 Right, 5 Straight)\n- branchConfig = [[\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"]]\n\n\n## 配置GPU\nconfig = tf.ConfigProto(allow_soft_placement=True)\n## 准备数据\n - datasetDirTrain = '/home/ydx/AutoCarlaData/AgentHuman/SeqTrain/' # 训练集目录\n - datasetDirVal = '/home/ydx/AutoCarlaData/AgentHuman/SeqVal/' # 测试集目录\n - datasetFilesTrain = glob.glob(datasetDirTrain+'*ddd.h5') # 训练集文件总共3289\n - datasetFilesVal = glob.glob(datasetDirVal+'*.h5') # 测试集文件总共374\n### 网络输入数据\n - inputs: 维度为2的列表\n - input[0]=input_images 一维表示输入的图像inputImages 88*200*3;\n - input[1]=input_data \n input_data[0]: 一维表示输入的控制量input_control (维度为4的one-hot矢量) controlInputs = [2,5,3,4] # Control signal, int ( 2 Follow lane, 3 Left, 4 Right, 5 Straight), 比如[0 1 0 0]表示 Straighut\n input_data[1]: 测量量input_speed(一个维度)\n### 网络输出数据/网络标签值\n - targets:维度为2的列表\n targets[0]: 一维表示target_speed(维度为1)\n targets[1]: 一维表示target_control(维度为3)\n\n## 创建网络\n- netTensors = Net(branchConfig, params, timeNumberFrames, prefSize)\n - netTensors = {\n - 'inputs': inputs, \n - 'targets': targets,\n - 'params': params,\n - 'dropoutVec': dout, \n - 'output': controlOpTensors\n }\n- controlOpTensors= {\n - 'optimizers': contSolver, \n - 'losses': contLoss, 总的损失值\n - 'output': networkTensor, 网络的输出值\n - 'print': pr % 应该表示的是命令\n }\n## 训练网络\n- epoch: 运行次数\n- steps: 运行的第几个batch\n### 选择数据\n- cur_branch: 根据Branch的数目产生一个随机数(若有4个Branch则该随机数大于等于0小于等于3)确定想要训练哪一个Branch\n - cur_branch = np.random.choice(branch_indices)\n- 根据随机产生的cur_branch数值提取训练数据:\n - xs, ys = next(genBranch(fileNames=datasetFilesTrain,branchNum=controlInputs[cur_branch],batchSize=batchSize))\n### 增强数据\n××× 参考https://blog.csdn.net/u012897374/article/details/80142744×××\n\n- st = lambda aug: iaa.Sometimes(0.4, aug) # iaa.Sometimes 对batch中的一部分图片应用一部分Augmenters,剩下的图片应用另外的Augmenters。\n- oc = lambda aug: iaa.Sometimes(0.3, aug)\n- rl = lambda aug: iaa.Sometimes(0.09, aug)\n- seq = iaa.Sequential(\n - [rl(iaa.GaussianBlur((0, 1.5))), # 高斯 blur images with a sigma between 0 and 1.5\n - rl(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5)), # add gaussian noise to images\n - oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)), # randomly remove up to X% of the pixels\n - oc(iaa.CoarseDropout((0.0, 0.10), size_percent=(0.08, 0.2),per_channel=0.5)), # randomly remove up to X% of the pixels\n - oc(iaa.Add((-40, 40), per_channel=0.5)), # change brightness of images (by -X to Y of original value)\n - st(iaa.Multiply((0.10, 2.5), per_channel=0.2)), # change brightness of images (X-Y% of original value)\n - rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)), # improve or worsen the contrast], \n - random_order=True)\n- xs = seq.augment_images(xs)\n### 输入数据\n- inputData.append(sess.run(tf.one_hot(ys[:, 24], 4))) # Command Control, 4 commands\n- inputData.append(ys[:, 10].reshape([batchSize, 1])) # Speed\n- feedDict = {\n - netTensors['inputs'][0]: xs, \n - netTensors['inputs'][1][0]: inputData[0],\n - netTensors['inputs'][1][1]: inputData[1], \n - netTensors['dropoutVec']: dropoutVec,\n - netTensors['targets'][0]: ys[:, 10].reshape([batchSize, 1]),\n - netTensors['targets'][1]: ys[:, 0:3]}\n### 训练\n_, p, loss_value = sess.run([contSolver, pr, contLoss], feed_dict=feedDict)\n### 策略\n - 每10个Batch输出训练结果\n - 每500个Batch输出验证结果\n - 每250个Batch保存Checkpoint结果\n - 每50000个Batch学习速率减半\n\n# 函数---Net\n## 输入\n - branchConfig # 4个Branch的\n - params # 参数\n - timeNumberFrames \n - prefSize # 图像大小\n## 输出\n tensors = {\n - 'inputs': inputs, \n - 'targets': targets,\n - 'params': params,\n - 'dropoutVec': dout,\n - 'output': controlOpTensors}\n## 重要中间变量\n - inputs: 维度为2的列表\n - input[0]=input_images 一维表示输入的图像inputImages 88*200*3;\n - input[1]=input_data \n - input_data[0]: 一维表示输入的控制量input_control (维度为4的one-hot矢量) controlInputs = [2,5,3,4] # Control signal, int ( 2 Follow lane, 3 Left, 4 Right, 5 Straight), 比如[0 1 0 0]表示 Straight\n - input_data[1]: 测量量input_speed(一个维度)\n - targets:维度为2的列表\n - targets[0]: 一维表示target_speed(维度为1)\n - targets[1]: 一维表示target_control(维度为3)\n## 引用函数\n - controlOpTensors = controlNet(inputs, targets, shapeInput, dout, branchConfig, params, scopeName='controlNET')\n### 函数-- controlNet\ncontrolOpTensors = controlNet(inputs, targets, shapeInput, dout, branchConfig, params, scopeName='controlNET')\n\n#### 输入:\n - inputs\n - targets\n - shapeInput\n - dout\n - branchConfig\n - params\n - scopeName='controlNET'\n#### 输出:\n - controlOpTensors= {\n - 'optimizers': contSolver, \n - 'losses': contLoss, 总的损失值\n - 'output': networkTensor, 网络的输出值\n - 'print': pr % 应该表示的是命令}\n#### 主要内容:\n - networkTensor = load_imitation_learning_network(inputs[0], inputs[1],shape[1:3], dropoutVec)\n - networkTensor: 返回值是和branchConfig个数相关的量;networkTensor的维度是和branchConfig维度一样; networkTensor 是Branch网络的输出值\n - 对比网络的输出networkTensor和标签值targets可以得到误差means:\n - [steer gas brake]类型的Branch的输出为:\n - part = tf.square(tf.subtract(networkTensor[i], targets[1]))\n - [speed]类型的Branch的输出为:\n - part = tf.square(tf.subtract(networkTensor[-1], targets[0]))\n - means = tf.convert_to_tensor(parts)\n - 根据输入的控制命令需要创建一个mask\n - mask = tf.convert_to_tensor(inputs[1][0])\n - 总的损失函数值为contLoss\n\n\n#### 函数---load_imitation_learning_network\nnetworkTensor = load_imitation_learning_network(inputs[0], inputs[1],shape[1:3], dropoutVec)\n##### 输入:\n - inputs[0] 输入图像shape=(?, 88, 200, 3)\n - inputs[1] 输入的控制指令和测量量 \n - input_data[0]: 一维表示输入的控制量input_control (维度为4)\n - input_data[1]: 测量量input_speed(一个维度)\n - shape[1:3] 图像大小[88, 200]\n - dropoutVec shape=(23,)\n##### 输出;\n - networkTensor: 返回值是和branchConfig个数相关的量;networkTensor的维度是和branchConfig维度一样。\n - 比如: branchConfig = [[\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"]]\n 那么networkTensor是维度为4的列表,因为branchConfig的元素都是三维的,因此networkTensor列表的每一个元素也都是三维的。\n\n##### 引用函数\n - network_manager = Network(dropout, tf.shape(x))\n##### 主要内容:构建网络结构\n- 输入图像88*200*3---经卷积和全连接层输出512维矢量 x\n- 输入speed ---经全连接层输出128维矢量 speed\n- 将两种输入量结合在一起,用变量j表示:j = tf.concat([x, speed], 1)\n- 变量j----经全连接层输出512维矢量\n- Branch网络:\n - branch_config:四个命令[[\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"], [\"Steer\", \"Gas\", \"Brake\"]]\n - 若branch_config=[\"Steer\", \"Gas\", \"Brake\"] 则输入j输出256维量\n - 若branch_config=\"Speed\" 则输入x输出256维量\n\n##### 函数---Network 函数提供了用于构建网络各层函数\nnetwork_manager = Network(dropout, tf.shape(x))\n###### 输入\n - dropout 23维初始化矢量\n - tf.shape(x) 图像形状\n###### 输出\n - network_manager\n\n# genBranch函数\ngenBranch(fileNames=datasetFilesTrain, branchNum=3, batchSize=200):\n- 输入: fileNames,branchNum=3(第几个Branch), batchSize=200\n- 输出: batchX, batchY\n - batchX维度:(batchSize, 88, 200, 3)\n - batchY维度:(batchSize, 28)\n \n" }, { "alpha_fraction": 0.5201356410980225, "alphanum_fraction": 0.5379397869110107, "avg_line_length": 37.01612854003906, "blob_id": "cd39a623b1c345d13ce5d1f31794de0fe3177990", "content_id": "85b7e42e39260e6c771b5df75210feb783573d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2359, "license_type": "no_license", "max_line_length": 96, "num_lines": 62, "path": "/carla-train/train.py", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\n\nfrom network import make_network\nfrom data_provider import DataProvider\nfrom tensorflow.core.protobuf import saver_pb2\n\nimport time\nimport os\n\nlog_path = './log'\nsave_path = './data'\n\nif __name__ == '__main__':\n\n with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:\n train_provider = DataProvider('/data/dataTrain/train.tfrecords', sess)\n val_provider = DataProvider('/data/dataTrain/val.tfrecords', sess)\n\n network = make_network()\n\n lr = 0.0001\n lr_placeholder = tf.placeholder(tf.float32, [])\n optimizer = tf.train.AdamOptimizer(learning_rate=lr_placeholder,\n beta1=0.7, beta2=0.85)\n optimizer = optimizer.minimize(network['loss'])\n\n sess.run(tf.global_variables_initializer())\n merged_summary_op = tf.summary.merge_all()\n\n saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V2)\n saver.restore(sess, os.path.join(save_path, 'step-7500.ckpt'))\n\n step = 0\n\n while True:\n if step % 50 == 0:\n val_batch = val_provider.get_minibatch()\n val_loss = sess.run(network['loss'],\n feed_dict={network['inputs'][0]: val_batch.images,\n network['inputs'][1]: val_batch.data[0],\n network['labels'][0]: val_batch.data[1]})\n print('VALIDATION--------loss: %.4f' % val_loss)\n if step % 500 == 0:\n model_path = os.path.join(save_path, 'step-%d.ckpt' % step)\n saver.save(sess, model_path)\n print(\"Checkpoint saved to %s\" % model_path)\n\n a = time.time()\n batch = train_provider.get_minibatch(augment=True)\n imgs = batch.images\n speed, target_control, _ = batch.data\n b = time.time()\n _, train_loss = sess.run([optimizer, network['loss']],\n feed_dict={network['inputs'][0]: imgs,\n network['inputs'][1]: speed,\n network['labels'][0]: target_control,\n lr_placeholder: lr})\n c = time.time()\n print('step: %d loss %.4f prepare: %.3fs gpu: %.3fs' % (step, train_loss, b-a, c-b))\n\n step += 1\n\n\n" }, { "alpha_fraction": 0.6268894076347351, "alphanum_fraction": 0.6821798086166382, "avg_line_length": 23.647058486938477, "blob_id": "4e77d50d1d4aa7fc82e33c1d158681e19b1b7b72", "content_id": "9b576d935fbebfad03fb88258d04742270b97335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2566, "license_type": "no_license", "max_line_length": 91, "num_lines": 102, "path": "/README.md", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "# selfdriving_cv\nthis is the repository of deep learning for self driving\n\n# linux tutorials\nUbuntu14.04下搜狗输入法安装:\nhttps://blog.csdn.net/u011006622/article/details/69281580\n\n记录:Ubuntu14.04 下 chrome的安装过程: https://blog.csdn.net/m0_37676373/article/details/78616715\n\nUbuntu 14.04右键终端的设置:https://www.linuxidc.com/Linux/2014-04/100498.htm\n\n# environment\n```\nubuntu 14.04\ntensorflow\ncarla\npython 2.7 (python 3.x not test)\n```\n\n## install carla python\n```\ncd PythonClient/\nsudo python setup.py install\n```\n# instructions\n```\nsudo pip install tensorflow\nsudo pip install scipy\nsudo pip install numpy=1.14.5\n./CarlaUE4.sh -windowed -ResX=640 -ResY=480\n```\n# run model\n```\npython run_CIL.py\n./CarlaUE4.sh -windowed -ResX=640 -ResY=480 -carla-server\n```\n# train model\n```\nsudo pip install keras\nsudo pip install imgaug\nsudo pip install opencv-python\npython train.py\n```\n\n## data instruction\n### input\nimages,measurements,commdand\n\nimages: data.rgb\n\nmeasurements: \n```\n targets[:,8]---Brake Noise, float\n targets[:,9]---Position X, float\n targets[:,10]---Position Y, float\n targets[:,10]---Speed, float\n targets[:,11]---Collision Other, float\n targets[:,12]---Collision Pedestrian, float\n targets[:,13]---Collision Car, float\n targets[:,14]---Opposite Lane Inter, float\n targets[:,15]---Sidewalk Intersect, float\n\n targets[:,21]---Orientation X, float\n targets[:,22]---Orientation Y, float\n targets[:,23]---Orientation Z, float\n```\ncommand: \n```\n targets[:,0]---Steer, float \n targets[:,1]---Gas, float\n targets[:,2]---Brake, float\n targets[:,3]---Hand Brake, boolean\n targets[:,4]---Reverse Gear, boolean\n targets[:,5]---Steer Noise, float\n targets[:,6]---Gas Noise, float\n targets[:,7]---Brake Noise, float\n targets[:,24]---High level command, int ( 2 Follow lane, 3 Left, 4 Right, 5 Straight)\n```\n\n### parameter\n targets[:,19]---Platform time, float\n targets[:,20]---Game Time, float\n targets[:,24]---Noise, Boolean ( If the noise, perturbation, is activated, (Not Used) )\n targets[:,25]---Camera (Which camera was used)\n targets[:,26]---Angle (The yaw angle for this camera)\n\n### output\naction: steering angle, acceleration\n```\n targets[:,16]---Acceleration X,float\n targets[:,17]---Acceleration Y, float\n targets[:,18]---Acceleration Z, float\n```\n\n\n# reference\n[CARLA Documentation][1]\n\n[Conditional Imitation Learning at CARLA][2]\n\n[1]:https://carla.readthedocs.io/en/latest/\n[2]:https://github.com/carla-simulator/imitation-learning\n" }, { "alpha_fraction": 0.6502857208251953, "alphanum_fraction": 0.7024762034416199, "avg_line_length": 25.795917510986328, "blob_id": "e18fe7e1cfcfe6968f247240260b6f452e0d840b", "content_id": "3e2946ad379c0a9d0aa6731079eb39808c81228f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3309, "license_type": "no_license", "max_line_length": 265, "num_lines": 98, "path": "/ucloud.md", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "## remote operations\n- 服务器密码:t88888888\n\n```\nssh [email protected]\n或\nssh [email protected]\n\n上传本地文件到远程机器指定目录\nscp -r /home/ydx/Desktop/Auto\\ Vechicles/CORL2017ImitationLearningData.tar.gz [email protected]:/data\n从服务器上下载指定目录到本机:\nscp -r [email protected]:~/carla-train /home/eric/\n查看云端文件\ndf -h\n\nchmod –R 777 * :\n参数-R : 对目前目录下的所有档案与子目录进行相同的权限变更(即以递回的方式逐个变更)\n\n```\n## tensorboard 使用\n```\ntensorboard --logdir=/tmp --port=6005\nssh -L 6005:127.0.0.1:6005 [email protected]\nssh之后去浏览器打开127.0.0.1:6005\n```\n\n## vim 命令\n```\n yy 复制整行(nyy或者yny ,复制n行,n为数字); \n p 小写p代表贴至游标后(下),因为游标是在具体字符的位置上,所以实际是在该字符的后面 \n P 大写P代表贴至游标前(上) \n 整行的复制粘贴在游标的上(下)一行,非整行的复制则是粘贴在游标的前(后)\ndd:删除游标所在的一整行(常用)\nndd:n为数字。删除光标所在的向下n行,例如20dd则是删除光标所在的向下20行\n```\n\n## tmux 命令\n```\ntmux new -s demo # 新建一个名称为demo的会话\ntmux a -t demo # 进入到名称为demo的会话\nCtrl+b\td\t断开当前会话\nCtrl+b % 将当前面板平分为左右两块\n```\nhttps://blog.csdn.net/chenqiuge1984/article/details/80132042\n\n## ucloud 挂载云盘\n```\n mount /dev/vde /tenplus\n```\nhttps://docs.ucloud.cn/storage_cdn/udisk/userguide/format/linux\n\n## UAI Train训练平台\n```\nsudo python tf_tool.py pack \\\n --public_key=/HOySV2WKVkciASUmRP9dlLzVhiRTmSHz2mx9jHmmXdsehqAVrWOdA== \\\n\t\t\t--private_key=f0d85113ac1f17ff822e2f63dc195109280982fd \\\n\t\t\t--code_path=./code/ \\\n\t\t\t--mainfile_path=train.py \\\n\t\t\[email protected] \\\n\t\t\t--uhub_password=cmbjxX666 \\\n\t\t\t--uhub_registry=trytrain \\\n\t\t\t--uhub_imagename=trainjx \\\n --internal_uhub=true \\\n\t\t\t--ai_arch_v=tensorflow-1.1.0 \\\n\t\t\t--test_data_path=/data/test/data \\\n\t\t\t--test_output_path=/data/test/output \\\n\t\t\t--train_params=\"--max_step=2000\" \\\n\n创建docker\nsudo docker build -t test-cpu:uaitrain -f uaitrain-cpu.Dockerfile .\n\n本地运行\nsudo docker run -it -v /data/test/data/:/data/data -v /data/test/output/:/data/output test-cpu:uaitrain /bin/bash -c \"cd /data && /usr/bin/python /data/train.py --max_step=2000 --work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output\"\n\n上传镜像\nsudo docker push uhub.service.ucloud.cn/trytrain/trainjx:uaitrain\n\n上传数据集\n./filemgr-linux64 --action mput --bucket datasets --dir /home/eric/self-driving/docker/uai-sdk/examples/tensorflow/train/mnist_summary_1.1/data --trimpath /home/eric/self-driving/docker/uai-sdk/examples/tensorflow/train/mnist_summary_1.1\n```\n\n## reference\n[TensorFlow训练镜像打包][1]\n\n[使用UAI Train训练平台][2]\n\n[使用UFile管理工具上传下载数据][3]\n\n[vim 删除一整块,vim 删除一整行][4]\n\n[vi/vim复制粘贴命令][5]\n\n\n[1]: https://docs.ucloud.cn/ai/uai-train/guide/tensorflow/packing\n[2]: https://docs.ucloud.cn/ai/uai-train/tutorial/tf-mnist/train\n[3]: https://docs.ucloud.cn/ai/uai-train/base/ufile/files\n[4]: https://blog.csdn.net/chenyoper/article/details/78260007\n[5]: https://blog.csdn.net/lanxinju/article/details/5727262" }, { "alpha_fraction": 0.6557863354682922, "alphanum_fraction": 0.7566765546798706, "avg_line_length": 23.14285659790039, "blob_id": "9e03310a1f684381460cfa5850579e6472a73bc4", "content_id": "30839f0a6905ddb61dec2cae93a2d985f396bae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 391, "license_type": "no_license", "max_line_length": 64, "num_lines": 14, "path": "/tensorflow.md", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "# tensorflow notes\n\n\n\n# reference\n[【tensorflow】fine-tuning, 选择性加载ckpt部分权重][1]\n\n[tensorflow设置显存自适应和显存比例.][2]\n\n[tf.variable_scope和tf.name_scope的用法][3]\n\n[1]: https://blog.csdn.net/shwan_ma/article/details/78874881\n[2]: https://blog.csdn.net/threadroc/article/details/54849160\n[3]: https://blog.csdn.net/uestc_c2_403/article/details/72328815" }, { "alpha_fraction": 0.6768420934677124, "alphanum_fraction": 0.6989473700523376, "avg_line_length": 34.22222137451172, "blob_id": "38d474537db80d800d8334daea813c6e9519ebe1", "content_id": "992c3be07dd5123c14a33e50414b765b352fe65b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 85, "num_lines": 27, "path": "/carla-train/predict.py", "repo_name": "w5688414/selfdriving_cv", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport glob\nimport h5py\nimport numpy as np\nfrom network import make_network\n# read an example h5 file\ndatasetDirTrain = '/home/eric/self-driving/AgentHuman/SeqTrain/'\ndatasetDirVal = '/home/eric/self-driving/AgentHuman/SeqVal/'\ndatasetFilesTrain = glob.glob(datasetDirTrain+'*.h5')\ndatasetFilesVal = glob.glob(datasetDirVal+'*.h5')\nprint(\"Len train:{0},len val{1}\".format(len(datasetFilesTrain),len(datasetFilesVal)))\ndata = h5py.File(datasetFilesTrain[1], 'r')\nimage_input = data['rgb'][1]\ninput_speed =np.array([[100]])\nimage_input = image_input.reshape(\n (1, 88, 200, 3))\n\nwith tf.Session() as sess:\n network = make_network()\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(\"./data\")\n if ckpt:\n saver.restore(sess, ckpt)\n output=sess.run(network['outputs'], feed_dict={network['inputs'][0]:image_input,\n network['inputs'][1]: input_speed})\n print(output)\n sess.close()" } ]
9
rojoso/pydot
https://github.com/rojoso/pydot
e2c1b8b193a2cd22c14e0d99b8f2612d07c2ad94
15dbe74cb955e8ea6e5d03b0fcee6ea8e65f7094
14c25a22f2cd123dec58b4980d6660ffb23fb92a
refs/heads/master
2021-04-12T12:28:20.611496
2018-03-23T14:09:06
2018-03-23T14:09:06
126,493,686
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8205128312110901, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 18.5, "blob_id": "a4632a760952559f9c802dc1f66094cbf658fa48", "content_id": "2c4d1b645dad3456c2b845127d53ebb7f6f72dde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/README.md", "repo_name": "rojoso/pydot", "src_encoding": "UTF-8", "text": "# pydot\n应用graphviz 建立图片之间的相似关系,-利用sift\n" }, { "alpha_fraction": 0.6794871687889099, "alphanum_fraction": 0.6820513010025024, "avg_line_length": 20.5, "blob_id": "75aaedef0943c2d496990c54c3a14e7dfc690b8e", "content_id": "0b982f6516572628a128b532d1738cc47fe392eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 68, "num_lines": 18, "path": "/auto-sift.py", "repo_name": "rojoso/pydot", "src_encoding": "UTF-8", "text": "from PIL import Image\nfrom numpy import *\nfrom pylab import *\nimport os\nimport sift\n\nimlist = os.listdir('pages')\nnbr_images = len(imlist)\nimlist_dir = [str('../pages/'+imlist[n]) for n in range(nbr_images)]\n\nimname = [imlist[n][:-4] for n in range(nbr_images)]\n\nos.mkdir('sifts')\n\nos.chdir('sifts')\n\nfor n in range(nbr_images):\n\tsift.process_image(imlist_dir[n],str(imname[n]+'.sift'))\n\n\n\n" } ]
2
JulianATA/tictactoe
https://github.com/JulianATA/tictactoe
1693f0687a23dcbc405eee4c65deed8491e84e9b
b0ce95778e95b566195644a8e3db275b16468236
a69b453b45006e30ab7632e7bcf8dad27fef695d
refs/heads/master
2020-04-12T11:00:07.164869
2018-12-20T03:23:39
2018-12-20T03:23:39
162,446,801
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4194842278957367, "alphanum_fraction": 0.4283667504787445, "avg_line_length": 25.2406005859375, "blob_id": "80c6448b0e3afbff5e336d42c17db622777d0ab0", "content_id": "2b53e0ef7de7a8a2ea222b667af53f4022ab9662", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3616, "license_type": "no_license", "max_line_length": 66, "num_lines": 133, "path": "/app.py", "repo_name": "JulianATA/tictactoe", "src_encoding": "UTF-8", "text": "from bottle import Bottle, route, run, request, abort, static_file\nfrom fsm import TocMachine\nimport jieba\nimport os\n\napp = Bottle()\n\njieba.load_userdict(\"dict.txt\")\nVERIFY_TOKEN = os.environ['VERIFY_TOKEN']\nPORT = os.environ['PORT']\nmachine = TocMachine(\n states=[\n 'start',\n 'gaming',\n 'PlayerWin',\n 'CpWin'\n ],\n transitions=[\n {\n 'trigger': 'Drop',\n 'source': 'start',\n 'dest': 'gaming',\n 'conditions': 'FirstDrop'\n },\n {\n 'trigger': 'Drop',\n 'source': 'gaming',\n 'dest': 'gaming',\n 'conditions': 'normal'\n },\n {\n 'trigger': 'Drop',\n 'source': 'gaming',\n 'dest':'CpWin',\n 'conditions': 'CpWinning'\n },\n {\n 'trigger': 'Drop',\n 'source': 'gaming',\n 'dest':'PlayerWin',\n 'conditions':'PlayerWining'\n },\n {\n 'trigger': 'new_game',\n 'source': [\n 'gaming',\n 'PlayerWin',\n 'CpWin'\n ],\n 'dest': 'start'\n }\n ],\n initial='start',\n auto_transitions=False,\n show_conditions=True,\n)\n\n\[email protected](\"/webhook\", method=\"GET\")\ndef setup_webhook():\n mode = request.GET.get(\"hub.mode\")\n token = request.GET.get(\"hub.verify_token\")\n challenge = request.GET.get(\"hub.challenge\")\n\n if mode == \"subscribe\" and token == VERIFY_TOKEN:\n print(\"WEBHOOK_VERIFIED\")\n return challenge\n else:\n abort(403)\n\n\[email protected](\"/webhook\", method=\"POST\")\ndef webhook_handler():\n body = request.json\n print('\\nFSM STATE: ' + machine.state)\n print('REQUEST BODY: ')\n print(body)\n id = body['entry'][0]['messaging'][0]['sender']['id']\n if body['object'] == \"page\":\n msg = body['entry'][0]['messaging'][0]['message']['text']\n seg_list = jieba.cut(msg)\n x = -1\n y = -1\n for word in seg_list:\n if word in ['左上','左上角']:\n x = 0\n y = 0\n if word in ['中間右邊', '中右', '右中']:\n x = 1\n y = 2\n if word in ['中間左邊', '中左']:\n x = 1\n y = 0\n if word in ['中間', '中央', '正中間']:\n x = 1\n y = 1\n if word in ['中上','中間上面']:\n x = 0\n y = 1\n if word in ['中下','中間下面']:\n x = 2\n y = 1\n if word in ['左下','左下角']:\n x = 0\n y = 2\n if word in ['右上', '右上角']:\n x = 0\n y = 2\n if word in ['右下','右下角']:\n x = 2\n y = 2\n if word in ['新遊戲','重新','new_game','重來','再一次']:\n machine.new_game()\n return 'OK'\n print(word)\n if machine.check(x,y):\n print(x,\" \",y)\n machine.status['id'] = id\n machine.fill_in(x,y)\n machine.eval()\n print(machine.status['table'])\n print(machine.status['Win'])\n machine.Drop()\n return 'OK'\n\n\[email protected]('/show-fsm', methods=['GET'])\ndef show_fsm():\n machine.get_graph().draw('fsm.png', prog='dot', format='png')\n return static_file('fsm.png', root='./', mimetype='image/png')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=PORT, debug=True, reloader=True)\n" }, { "alpha_fraction": 0.7383592128753662, "alphanum_fraction": 0.7649667263031006, "avg_line_length": 16.346153259277344, "blob_id": "d9ac7b0097d74ee14f00f7855c7ca0f72ef58042", "content_id": "eaaa98cc633086367feafebe58b5a113c3e3a6d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 501, "license_type": "no_license", "max_line_length": 72, "num_lines": 26, "path": "/README.md", "repo_name": "JulianATA/tictactoe", "src_encoding": "UTF-8", "text": "# Tic-Tac-Toe gamebot\n### Features\n1.Using four state to implement response\n\n2.Using transitions package\n\n3.Implement Chat bot on MESSENGER based on FSM\n\n4.Created a gamebot which can interact with player\n\n5.A dictionary for key words of the game\n\n6.Parsing Chinese using jieba making the bot can analysis simple Chinese\n* 「我要下右下角」\n* 「右上角好了」\n* 「新遊戲」\n* 「正中央」\n\n7.Deployed by Heroku\n\nRun Locally\n\npython3 app.py\nngrok http 5000\n\nand webhook to messenger\n" }, { "alpha_fraction": 0.45100104808807373, "alphanum_fraction": 0.4695468842983246, "avg_line_length": 31.060810089111328, "blob_id": "22994919f3afd2c297f8c9041307dda948667479", "content_id": "f729fe6dcf932002dcfe1a71f016789c2de723de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4745, "license_type": "no_license", "max_line_length": 70, "num_lines": 148, "path": "/fsm.py", "repo_name": "JulianATA/tictactoe", "src_encoding": "UTF-8", "text": "from transitions.extensions import GraphMachine\n\nfrom utils import send_text_message \n\nclass TocMachine(GraphMachine):\n def __init__(self, **machine_configs):\n self.machine = GraphMachine(\n model=self,\n **machine_configs\n )\n self.table =[[0,0,0],[0,0,0],[0,0,0]]\n self.status ={'id':'','table':'NNN\\nNNN\\nNNN\\n','Win':''}\n def FirstDrop(self):\n cnt = 0\n for i in range(0,3):\n for j in range(0,3):\n cnt+= self.table[i][j]\n if cnt <= 3:\n return True\n else:\n return False\n def on_enter_start(self):\n self.table =[[0,0,0],[0,0,0],[0,0,0]]\n self.status ={'id':'','table':'NNN\\nNNN\\nNNN\\n','Win':''}\n\n def on_exit_start(self):\n sender_id = self.status['id']\n def normal(self):\n print('check if normal.')\n if self.status['Win'] == '' and not self.FirstDrop():\n print('normal')\n return True\n return False\n\n def CpWinning(self):\n print('check if Cp win')\n return self.status['Win'] == \"Cp\"\n \n def PlayerWining(self):\n print('check if Player win')\n return self.status['Win'] == \"Player\"\n\n def on_enter_gaming(self):\n print(\"I'm entering gaming\")\n sender_id = self.status['id']\n send_text_message(sender_id, self.status['table'])\n send_text_message(sender_id, \"let's find out who is smarter!\")\n \n def on_exit_gaming(self):\n print('Leaving gaming')\n \n def on_enter_PlayerWin(self):\n print(\"I'm entering PlWin\")\n sender_id = self.status['id']\n send_text_message(sender_id, self.status['table'])\n send_text_message(sender_id, \"Lucky you.\")\n self.new_game()\n\n def on_exit_PlayerWin(self):\n print('Leaving PlayerWin')\n\n def on_enter_CpWin(self):\n print(\"I'm entering CpWin\")\n sender_id = self.status['id']\n send_text_message(sender_id, self.status['table'])\n send_text_message(sender_id, \"You suck.\")\n send_text_message(sender_id, \"And useless.\")\n send_text_message(sender_id, \"And far cheaper than me!\")\n self.new_game()\n\n def on_exit_CpWin(self):\n print('Leaving CpWin')\n\n def check(self, x, y):\n if x in range(0,3):\n if y in range(0,3):\n if self.table[x][y] == 0:\n return True\n return False\n\n def fill_in(self,x,y):\n if x in range(0,3):\n if y in range(0,3):\n self.table[x][y] = 1\n\n def checkWin(self, x):\n cnt2 = 0\n cnt3 = 0\n for i in range(0,3):\n cnt0 = 0\n cnt1 = 0\n for j in range(0,3):\n if self.table[i][j] == x:\n cnt0 += 1\n if self.table[j][i] == x:\n cnt1 += 1\n if cnt1 == 3 or cnt0 == 3:\n return True\n if self.table[i][i] == x:\n cnt2 += 1\n if self.table[i][2-i] == x:\n cnt3 += 1\n if cnt2 == 3 or cnt3 == 3:\n return True\n return False\n def checkBlank(self):\n for i in range(0,3):\n for j in range(0,3):\n if self.table[i][j] == 0:\n return False\n return True\n def AIfill_in(self):\n for i in range(0,3):\n for j in range(0,3):\n if self.table[i][j] == 0:\n self.table[i][j] = 2\n return True\n def eval(self):\n sender_id = self.status['id']\n if self.checkWin(1):\n self.status['Win'] = 'Player'\n elif self.checkWin(2):\n self.status['Win'] = 'Cp'\n elif self.checkBlank():\n self.status['Win'] = 'Noone'\n self.status['table'] = ''\n for i in range(0,3):\n for j in range(0,3):\n if self.table[i][j] == 0:\n self.status['table'] += \"N\"\n elif self.table[i][j] == 1:\n self.status['table'] += \"O\"\n else:\n self.status['table'] += \"X\"\n self.status['table'] += '\\n'\n send_text_message(sender_id, self.status['table'])\n send_text_message(sender_id, \"what to do~~~ what to do~~~\")\n self.AIfill_in()\n self.status['table'] = ''\n for i in range(0,3):\n for j in range(0,3):\n if self.table[i][j] == 0:\n self.status['table'] += \"N\"\n elif self.table[i][j] == 1:\n self.status['table'] += \"O\"\n else:\n self.status['table'] += \"X\"\n self.status['table'] += '\\n'\n" } ]
3
enverbashirov/YOLOv3-mMwave-Radar
https://github.com/enverbashirov/YOLOv3-mMwave-Radar
56e9fd342ec386042846c638e89ebad428d550d4
fbff0c89def12f1377a318f5167a2f97394d2e2c
9007f77f0181040f17191b57935ab7bebc99c685
refs/heads/master
2023-07-18T07:22:03.974541
2021-05-25T01:19:23
2021-05-25T01:19:23
328,881,573
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5375000238418579, "alphanum_fraction": 0.574999988079071, "avg_line_length": 11.230769157409668, "blob_id": "5bef540ef6c2c1df9ea6b36f22d9b4da112ab617", "content_id": "0bab214bd97bdaa33f1fec60d4459b07ccc28015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/test.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nimport os\n\nl = [{'test': 0, 'test2': 1}, {'test': 3, 'test2': 4}]\n\nprint(l)\n\nfor i, j in enumerate(l):\n print(i)\n\n\nprint(l)\n\n" }, { "alpha_fraction": 0.5257889628410339, "alphanum_fraction": 0.5533443093299866, "avg_line_length": 49.85628890991211, "blob_id": "d060958ed9d395c694d07d5b22a1c1796713a50c", "content_id": "93a9f8d1208d58fdc6061102b7b1558d0e4cbe0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8492, "license_type": "no_license", "max_line_length": 149, "num_lines": 167, "path": "/dataprep/processing.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import os, shutil, gc\nfrom argparse import ArgumentParser\nfrom time import sleep\n\nimport h5py\nimport numpy as np\nimport scipy as sp\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy import io, signal\nfrom scipy.signal.windows import nuttall, taylor\n\nfrom .util import *\n\ndef proc(args):\n rawpath = f'dataset/{args.pathin}/chext'\n savepath = f'dataset/{args.pathout}/proc' if args.pathout else f'dataset/{args.pathin}/proc'\n print(f'[LOG] Proc | Starting: {args.pathin}')\n\n # Create the subsequent save folders\n # if os.path.isdir(savepath):\n # shutil.rmtree(savepath)\n if not os.path.isdir(savepath):\n os.makedirs(savepath + '/raw/')\n os.mkdir(savepath + '/denoised/')\n\n # # # PARAMETERS INIT # # #\n\n c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12) # speed of light\n f_start = 76e9\n f_stop = 78e9\n # Tramp_up = 180e-6\n # Tramp_down = 32e-6\n Tp = 250e-6\n # T_int = 66.667e-3\n N = 512\n # N_frames = 1250\n N_loop = 256\n # Tx_power = 100\n kf = 1.1106e13\n BrdFuSca = 4.8828e-5\n fs = 2.8571e6\n fc = (f_start + f_stop)/2\n\n # # # CONFIGURE SIGNAL PROCESSING # # # \n\n # # Range dimension\n NFFT = 2**10 # number of fft points in range dim\n nr_chn = 16 # number of channels\n # fft will be computed using a hannng window to lower border effects\n win_range = np.broadcast_to(np.hanning(N-1), (N_loop, nr_chn, N-1)).T # integral of the window for normalization\n # print(win_range.shape)\n sca_win = np.sum(win_range[:, 0, 0])\n\n v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf) # vector of range values for each range bin\n\n r_min = 0 # min range considered\n r_max = 10 # max range considered\n\n arg_rmin = np.argmin(np.abs(v_range - r_min)) # index of the min range considered value\n arg_rmax = np.argmin(np.abs(v_range - r_max)) # index of the max range considered value\n vrange_ext = v_range[arg_rmin:arg_rmax+1] # vector of range values from rmin to rmax\n\n # # Doppler dimension\n NFFT_vel = 256 # number of fft points in angle dim\n win_vel = np.broadcast_to(np.hanning(N_loop).reshape(1, 1, -1), (vrange_ext.shape[0], nr_chn, N_loop))\n scawin_vel = np.sum(win_vel[0, 0, :])\n vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp) # vector of considered frequencies in Doppler dim\n v_vel = vfreq_vel*c0/(2*fc) # transform freqs into velocities\n v_vel = np.delete(v_vel, np.arange(124, 132)) # delete velocities close to 0\n\n # # Angle dimension\n NFFT_ant = 64 # number of fft points in angle dim\n win_ant = np.broadcast_to(taylor(nr_chn, nbar=20, sll=20).reshape(1,-1,1), (vrange_ext.shape[0], nr_chn, NFFT_vel))\n scawin_ant = np.sum(win_ant[0, :, 0])\n # win_ant = np.tile(win_ant, (len(vrange_ext), 1))\n # vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180 # vector of considered angles [-90, 90-dtheta]\n # print(vang_deg)\n # print(deg2rad_shift(vang_deg))\n \n # ant_idx = np.concatenate([np.arange(nr_chn), np.arange(nr_chn+1, 2*nr_chn)]) # indices of virtual antenna elements\n # ant_idx = np.arange(nr_chn)\n cal_data = io.loadmat('dataprep/calibration.mat')['CalData'] # load complex calibration weights for each antenna element \n cal_data = cal_data[:16] # keep weights for TX1 only\n mcal_data = np.broadcast_to(cal_data, (N-1, cal_data.shape[0], N_loop))\n \n # # # PROCESS THE RDA SLICES FOR EACH FRAME # # #\n # sequences = [1, 2, 3, 4, 5, 6] # this is just as an example, you should put here the ids of the sequences you want to process\n # sequences = range(0, len(os.listdir(rawpath))) # this is just as an example, you should put here the ids of the sequences you want to process\n for i, fname in enumerate(os.listdir(rawpath)):\n frawname = fname.split('.')[0]\n logprefix = f'[LOG] Proc | {i+1} / {len(os.listdir(rawpath))} {frawname}'\n print(f'{logprefix} {fname}', end='\\r')\n\n Data_orig = np.load(f'{rawpath}/{fname}')\n # print(f'{logprefix} Original data shape: {Data_orig.shape}', end='\\r') \n\n parts = [0, 1, 2, 3]\n SIDELOBE_LEVEL = 3\n LINTHR_HIGH = -97\n LINTHR_LOW = -107 \n\n for part in parts: # split processing in parts for memory, each track is split in 4\n savename = f'{args.saveprefix}_seq_{frawname.split(\"_\")[2]}_sub_{part}' \\\n if args.saveprefix else f'{frawname}_sub_{part}'\n logprefix = f'[LOG] Proc | {i*len(parts)+part+1} / {len(os.listdir(rawpath))*len(parts)} {frawname}'\n print(f'{logprefix} {savename}', end='\\r')\n\n Data = Data_orig[:, :, part*32000:(part+1)*32000] # each part has 32k blocks (128k/4)\n split_locs = np.arange(Data.shape[2], step=N_loop, dtype=np.int)[1:]\n Data = np.stack(np.split(Data, split_locs, axis=2)[:-1], axis=-1) # split data into a sequence of radar cubes\n print(f'{logprefix} Time-split \\t\\t\\t', end='\\r')\n \n nsteps = Data.shape[-1] # last dim is time\n rda_data = np.zeros((len(vrange_ext), NFFT_ant, NFFT_vel, nsteps), dtype=np.float32)\n raw_ra = np.zeros((len(vrange_ext), NFFT_ant, nsteps), dtype=np.float32)\n for j in range(nsteps): # loop on the timesteps\n print(f'{logprefix} Timestep: {j+1} \\t\\t\\t', end='\\r')\n RawRadarCube = Data[1:, :, :, j]\n # print(RawRadarCube.shape)\n # Range fft: window, calibration and scaling are applied\n range_profile = np.fft.fft(RawRadarCube*win_range*mcal_data, NFFT, axis=0)*BrdFuSca/sca_win\n rp_ext = range_profile[arg_rmin:arg_rmax+1] # extract only ranges of interest (0 to 10 m)\n # background subtraction for MTI\n rp_ext -= np.mean(rp_ext, axis=2, keepdims=True)\n # Doppler fft\n range_doppler = np.fft.fftshift(np.fft.fft(rp_ext*win_vel, NFFT_vel, axis=2)/scawin_vel, axes=2)\n # Angle fft\n range_angle_doppler = np.fft.fftshift(np.fft.fft(range_doppler*win_ant, NFFT_ant, axis=1)/scawin_ant, axes=1)\n \n # absolute value + 20log10 to compute power\n range_angle_doppler = 20*np.log10(np.abs(range_angle_doppler))\n\n # fig, ax = plt.subplots(1, 2)\n # ax[0].imshow(range_angle_doppler.max(2))\n # ax[1].imshow(range_angle_doppler.max(1))\n # plt.show()\n\n raw_ra[..., j] = range_angle_doppler.max(2) # store raw range-angle image\n\n # at this point you have the RDA representation and you can apply further denoising\n rdep_thr = np.linspace(LINTHR_HIGH, LINTHR_LOW, range_angle_doppler.shape[0]).reshape((-1, 1, 1))\n \n range_angle_doppler -= rdep_thr\n range_angle_doppler[range_angle_doppler < 0] = 0\n\n maxs = np.max(range_angle_doppler, axis=1).reshape(range_angle_doppler.shape[0], 1, range_angle_doppler.shape[2])\n # maxs = np.max(range_angle_doppler, axis=(0, 2)).reshape(1, range_angle_doppler.shape[1], 1)\n threshold = maxs - SIDELOBE_LEVEL\n range_angle_doppler[range_angle_doppler < threshold] = 0\n\n rda_data[..., j] = range_angle_doppler\n\n # fig, ax = plt.subplots(1, 2)\n # ax[0].imshow(range_angle_doppler.max(2))\n # ax[1].imshow(range_angle_doppler.max(1))\n # plt.show()\n\n print(f'{logprefix} Saving: {savename} \\t\\t\\t')\n np.save(f'{savepath}/denoised/{savename}.npy', rda_data)\n np.save(f'{savepath}/raw/{savename}.npy', raw_ra)\n\n del Data, rda_data, split_locs, raw_ra\n gc.collect()\n del Data_orig\n gc.collect()\n print('\\n')" }, { "alpha_fraction": 0.5111385583877563, "alphanum_fraction": 0.5309479832649231, "avg_line_length": 42.546268463134766, "blob_id": "d2c95b604277393b75fdf37ea4e9ffad7c7d8c3d", "content_id": "d41f8fe4de4654f2a416bfccabc35aff5a1368d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14589, "license_type": "no_license", "max_line_length": 145, "num_lines": 335, "path": "/dataprep/truth.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import os\n# import shutil, time, pickle\n# from argparse import ArgumentParser\n\n# import matplotlib\nimport matplotlib.patches as patches\nfrom matplotlib import pyplot as plt\n# from matplotlib import rc\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\n\n# from .channel_extraction import ChannelExtraction\nfrom .util import Cluster, deg2rad_shift, get_box\nfrom .kalman_tracker import KalmanTracker\n\ndef truth(args):\n action = 'save'\n rawpath = f'dataset/{args.pathin}/proc'\n savepath = f'dataset/{args.pathout}/final' if args.pathout else f'dataset/{args.pathin}/final'\n print(f'[LOG] Truth | Starting: {args.pathin}')\n\n # Create the subsequent save folders\n # if os.path.isdir(savepath):\n # shutil.rmtree(savepath)\n if not os.path.isdir(savepath):\n os.makedirs(savepath)\n \n for i, fname in enumerate(os.listdir(rawpath + '/denoised')):\n frawname = args.saveprefix if args.saveprefix else args.pathin\n frawname = f'{frawname}_ra_{fname.split(\"_\")[2]}{fname.split(\"_\")[4].split(\".\")[0]}'\n logprefix = f'[LOG] Truth | {i+1} / {len(os.listdir(rawpath + \"/denoised\"))}'\n print(f'{logprefix} {frawname}', end='\\r')\n \n # starting index in the loaded data\n start = 10\n # load RDA data, MUST have 4D shape: (N_range_bins, N_angle_bins, N_doppler_bins, N_timesteps)\n rda_data = np.load(f'{rawpath}/denoised/{fname}')[..., start:]\n raw_ra_seq = np.load(f'{rawpath}/raw/{fname}')[..., start:]\n \n # path where to save the resulting figures\n # initialize clustering/tracker parameters\n MAX_AGE = 10\n MIN_DET_NUMBER = 15\n MIN_PTS_THR = 30\n MIN_SAMPLES = 40\n EPS = 0.04\n thr = 20\n # assoc_score = 'Mahalanobis' # either 'IOU' or 'Mahalanobis'\n # CLASS_CONF_THR = 0.0\n\n # init radar parameters\n c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12)\n f_start = 76e9\n f_stop = 78e9\n # Tramp_up = 180e-6\n # Tramp_down = 32e-6\n Tp = 250e-6\n # T_int = 66.667e-3\n # N = 512\n # N_loop = 256\n # Tx_power = 100\n kf = 1.1106e13\n # BrdFuSca = 4.8828e-5\n fs = 2.8571e6\n fc = (f_start + f_stop)/2\n\n # compute range angle doppler intervals\n NFFT = 2**10\n # nr_chn = 16\n v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf)\n r_min = 0.5\n r_max = 10\n arg_rmin = np.argmin(np.abs(v_range - r_min))\n arg_rmax = np.argmin(np.abs(v_range - r_max))\n vrange_ext = v_range[arg_rmin:arg_rmax+1]\n NFFT_ant = 64\n vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180\n NFFT_vel = 256\n vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp)\n v_vel = vfreq_vel*c0/(2*fc)\n\n # delta_r = vrange_ext[1] - vrange_ext[0]\n # delta_v = v_vel[1] - v_vel[0]\n # delta_a = vang_deg[1] - vang_deg[0]\n\n track_id_list = list(range(1000)) # list with possible track id numbers\n tracking_list = []\n\n # loop over the time-steps\n for timestep in range(rda_data.shape[-1]):\n print(f'{logprefix} {frawname} Timestep: {timestep} \\t\\t\\t', end='\\r')\n # select RDA map of the current time-step\n data = rda_data[..., timestep]\n data = data[arg_rmin:arg_rmax + 1]\n\n # plt.imshow(data.max(1))\n # plt.show()\n\n # compute normalized maps for DBSCAN\n norm_ang = (vang_deg - np.min(vang_deg)) / (np.max(vang_deg) - np.min(vang_deg))\n norm_vel = (v_vel - np.min(v_vel)) / (np.max(v_vel) - np.min(v_vel))\n norm_ran = (vrange_ext - np.min(vrange_ext)) / (np.max(vrange_ext) - np.min(vrange_ext))\n\n rav_pts = np.asarray(np.meshgrid(vrange_ext, vang_deg, v_vel, indexing='ij'))\n # print(rav_pts[1, :, :, 0])\n norm_rav_pts = np.asarray(np.meshgrid(norm_ran, norm_ang, norm_vel, indexing='ij'))\n\n # select values which are over the threshold\n raw_ra = raw_ra_seq[arg_rmin:arg_rmax + 1, :, timestep]\n\n full_indices = (data > thr)\n data[data < thr] = 0\n rav_pts = rav_pts[:, full_indices]\n\n power_values_full = data[full_indices]\n norm_rav_pts = norm_rav_pts[:, full_indices]\n rav_pts_lin = rav_pts.reshape(rav_pts.shape[0], -1)\n\n # save range and angle for tracking\n ra_totrack = np.copy(rav_pts_lin[:2, :])\n ra_totrack[1] = deg2rad_shift(ra_totrack[1])\n\n normrav_pts_lin = norm_rav_pts.reshape(norm_rav_pts.shape[0], -1)\n\n if rav_pts.shape[1] > MIN_SAMPLES:\n # apply DBSCAN on normalized RDA map\n labels = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit_predict(normrav_pts_lin.T)\n unique, counts = np.unique(labels, return_counts=True)\n if not len(unique):\n print('[WAR] Truth | DBSCAN found no clusters! Skipping frame.')\n continue\n else:\n print('[WAR] Truth | No points to cluster! Skipping frame.')\n continue\n\n # loop over the detected clusters \n detected_clusters = [] # list containing all the detected clusters\n for cluster_id in unique:\n if cluster_id == -1: # -1 is the label for noise in DBSCAN, skip it\n continue\n number = counts[unique == cluster_id]\n if number < MIN_PTS_THR:\n continue \n # initialize new cluster object and fill its fields\n new_cluster = Cluster(cluster_id)\n new_cluster.cardinality = number\n new_cluster.elements = ra_totrack[:, labels == cluster_id] # range and angle\n new_cluster.dopplers = rav_pts_lin[2, labels == cluster_id]\n\n w = np.squeeze(power_values_full[labels == cluster_id])\n weights = w/np.sum(w) # normalized powers\n new_cluster.center_polar = np.average(new_cluster.elements, weights=weights, axis=1).reshape(2, 1)\n new_cluster.center_cartesian = np.array([new_cluster.center_polar[0]*np.cos(new_cluster.center_polar[1]), \n new_cluster.center_polar[0]*np.sin(new_cluster.center_polar[1])], \n dtype=np.float64).reshape(-1, 1)\n new_cluster.box = get_box(new_cluster)\n detected_clusters.append(new_cluster)\n\n if not timestep: # happens only in the first time-step\n for cl in detected_clusters:\n tracking_list.append(KalmanTracker(id_=track_id_list.pop(0), \n s0=np.array([cl.center_cartesian[0], 0, cl.center_cartesian[1], 0], \n dtype=np.float64).reshape(-1,1)))\n tracking_list[-1].box = cl.box \n sel_tracking_list = np.copy(tracking_list)\n\n elif timestep: # happens in all other time-steps\n # prepare the data association building the cost matrix\n detected_centers = [x.center_cartesian for x in detected_clusters]\n prev_cartcenters = []\n prev_centers = []\n if len(tracking_list) > 0:\n for trk in tracking_list:\n prev_cartcenters.append(trk.xy)\n prev_centers.append(trk.rtheta)\n cost_matrix = np.zeros((len(detected_centers), len(prev_cartcenters)))\n for i in range(len(detected_centers)):\n for j in range(len(prev_cartcenters)):\n # cost is the Mahalanobis distance\n cost_matrix[i, j] = KalmanTracker.get_mahalanobis_distance(\n detected_centers[i] - prev_cartcenters[j], \n tracking_list[j].get_S()) \n cost_matrix = np.asarray(cost_matrix)\n\n # hungarian algorithm for track association\n matches, undet, _ = KalmanTracker.hungarian_assignment(cost_matrix)\n\n # handle matched tracks\n if len(matches) > 0:\n for detec_idx, track_idx in matches:\n # get observation, polar coords center of the detected cluster\n obs = detected_clusters[detec_idx].center_polar\n # get tracker object of the detection\n current_tracker = tracking_list[track_idx]\n # KF predict-update step\n current_tracker.predict()\n current_tracker.update(obs.reshape(2, 1))\n current_tracker.box = get_box(detected_clusters[detec_idx])\n current_tracker.hits += 1\n current_tracker.misses_number = 0 \n # imaging(current_tracker, detected_clusters[detec_idx], data, labels, full_indices.ravel())\n else:\n print('[WAR] Truth | No detections-tracks matches found! Skipping frame.')\n continue\n\n # deal with undetected tracks\n if len(undet) > 0:\n for track_idx in undet:\n old_tracker = tracking_list[track_idx]\n old_tracker.misses_number += 1\n # predict only as no obs is detected\n old_tracker.predict()\n old_tracker.box = get_box(None, \n c=old_tracker.xy, \n h=old_tracker.box[0],\n w=old_tracker.box[0])\n # filter out tracks outside room borders (ghost targets)\n tracking_list = [t for t in tracking_list if (t.xy[0] > -1.70) and (t.xy[0] < 2.30)] # kill tracks outside the room boundaries\n # select the valid tracks, i.e., the ones with less than the max. misses and enough hits\n sel_tracking_list = [t for t in tracking_list if (t.misses_number <= MAX_AGE) and (t.hits >= MIN_DET_NUMBER)]\n\n plot4train(f'{savepath}/{frawname}{int(4-len(str(timestep)))*\"0\"}{timestep}', \n data,\n raw_ra,\n sel_tracking_list, \n vrange_ext, \n vang_deg,\n args.reso, \n action)\n\n print(f'[LOG] Truth | Truth data ready: {savepath}')\n \n\ndef imaging(tracker, cluster, data, labels, full_indices):\n flat_data = np.copy(data.ravel())\n full_data = flat_data[full_indices]\n full_data[labels != cluster.label] = 0 \n flat_data[full_indices] = full_data\n flat_data = flat_data.reshape(data.shape)\n \n # print(flat_data.shape)\n ra = flat_data.max(2)\n rd = flat_data.max(1)\n plt.subplot(121)\n plt.imshow(rd, aspect='auto')\n plt.subplot(122)\n plt.imshow(ra, aspect='auto', extent=(np.pi, 0.25065, 0.5, 10))\n\n plt.scatter(tracker.rtheta[1], tracker.rtheta[0], marker='x', c='r')\n\n plt.colorbar()\n plt.show()\n plt.close()\n\ndef plot(path, data_points, ra, noisy_ramap, t_list, action, index, ranges, angles):\n boxes = np.array([kt.box for kt in t_list])\n\n angles = deg2rad_shift(angles)\n \n # ramap = data_points.mean(2)\n\n _, ax = plt.subplots(1, 2)\n ax[0].set_title('Point-cloud representation')\n ax[1].set_title('RA map image representation')\n ax[0].scatter(ra[1], ra[0], marker='.')#, c=labels)\n ax[1].imshow(noisy_ramap, aspect='auto')\n ax[0].set_xlabel(r'$\\theta$ [rad]')\n ax[0].set_ylabel(r'$R$ [m]')\n ax[0].set_xlim([0.25065, np.pi])\n ax[0].set_ylim([0.5, 10])\n ax[0].grid()\n for i in range(len(boxes)):\n # add real valued bb on point cloud plot\n add_bb(boxes[i], ax[0], t_list[i].id)\n # add pixel-level bb to ra image\n int_box = adjust_bb(boxes[i], ranges, angles)\n add_bb(int_box, ax[1], t_list[i].id)\n\n if action == 'save':\n plt.savefig(path + f'fig_{index}', format='png', dpi=300)\n plt.close()\n elif action == 'plot':\n plt.title(f'Frame {index}')\n plt.show()\n plt.close()\n\ndef plot4train(path, data_points, noisy_ramap, t_list, ranges, angles, reso=416, action='save'):\n boxes = np.array([kt.box for kt in t_list])\n\n angles = deg2rad_shift(angles)\n \n fig = plt.figure(figsize=(1, 1), dpi=reso, frameon=False)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n ax.imshow(noisy_ramap, aspect='auto')\n\n\n w_scale = reso/len(angles)\n h_scale = reso/len(ranges)\n bbs = []\n for i in range(0,min(4, len(boxes))):\n # # add pixel-level bb to ra image\n bb = adjust_bb(boxes[i], ranges, angles, w_scale, h_scale)\n bbs.append(list(map(int, [bb[1][0], bb[0][0], bb[3][0], bb[2][0]])))\n # add_bb(bb, ax, t_list[i].id)\n\n if bbs and action == 'save':\n plt.savefig(f'{path}_{bbs}.png'.replace(' ', ''), format='png', dpi=reso)\n elif action == 'plot':\n plt.show()\n plt.close()\n\ndef add_bb(bb, ax, note):\n ax.add_patch(patches.Rectangle((bb[1] - bb[3]/2, bb[0] - bb[2]/2), # top left corner coordinates\n bb[3], # width\n bb[2], # height\n linewidth=1,\n edgecolor='r',\n facecolor='none'))\n\ndef adjust_bb(bb_real, r, a, w_scale = 1, h_scale = 1):\n '''\n this function is needed to map the bb obtained in real values to the image \n pixel coordinates without the bias introduced by non-uniform spacing of angle bins\n '''\n bb_ind = np.zeros(bb_real.shape[0])\n bb_ind[0] = np.argmin(np.abs(r - bb_real[0])) * h_scale\n bb_ind[1] = np.argmin(np.abs(a - bb_real[1])) * w_scale\n top = np.argmin(np.abs(r - (bb_real[0] - bb_real[2]/2)))\n bottom = np.argmin(np.abs(r - (bb_real[0] + bb_real[2]/2)))\n left = np.argmin(np.abs(a - (bb_real[1] + bb_real[3]/2)))\n right = np.argmin(np.abs(a - (bb_real[1] - bb_real[3]/2)))\n bb_ind[2] = np.abs(top - bottom) * h_scale\n bb_ind[3] = np.abs(left - right) * w_scale\n return bb_ind.reshape(-1, 1)\n\n" }, { "alpha_fraction": 0.5682182908058167, "alphanum_fraction": 0.5789192318916321, "avg_line_length": 41.5, "blob_id": "c68068772367221a7a198ef0dd4f625f52f9918d", "content_id": "7ac07858844a6d06d7bbd528058c3ddfe67fc1de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1869, "license_type": "no_license", "max_line_length": 136, "num_lines": 44, "path": "/dataprep/channel_extraction.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import h5py\nimport numpy as np\nimport os, shutil\n\ndef chext(args):\n rawpath = f'raw/{args.pathin}'\n savepath = f'dataset/{args.pathout}/chext' if args.pathout else f'dataset/{args.pathin}/chext'\n print(f'[LOG] ChExt | Starting: {args.pathin}')\n\n # Create the subsequent save folders\n # if os.path.isdir(savepath):\n # shutil.rmtree(savepath)\n if not os.path.isdir(savepath):\n os.makedirs(savepath)\n \n for i, fname in enumerate(os.listdir(rawpath)):\n logprefix = f'[LOG] ChExt | {i+1} / {len(os.listdir(rawpath))}'\n savename = f'{args.saveprefix}_seq_{i}' if args.saveprefix else f'{fname.split(\"_\")[0]}_seq_{fname.split(\"_\")[1].split(\".\")[0]}'\n print(f'{logprefix} fname', end='\\r')\n channel_extraction(\n f'{rawpath}/{fname}',\n savepath,\n savename,\n action='SAVE',\n logprefix=logprefix)\n print('\\n')\n\ndef channel_extraction(loadpath, savepath, savename, action, logprefix='', nr_chn=16):\n with h5py.File(loadpath, 'r+') as h5data:\n print(f'{logprefix} Initializing: {loadpath}', end='\\r')\n Data = np.zeros((h5data['Chn1'].shape[1], nr_chn, h5data['Chn1'].shape[0]), dtype=np.float32)\n for i in range(nr_chn):\n print(f'{logprefix} Extracting channel {i+1} \\t\\t\\t', end='\\r')\n channel = np.asarray(h5data['Chn{}'.format(i+1)])\n Data[:, i, :] = channel.T\n print(f'{logprefix} Finalizing {savepath}', end='\\r')\n if action == 'SAVE':\n print(f'{logprefix} Saving', end='\\r')\n np.save(f'{savepath}/{savename}', Data)\n print(f'{logprefix} Saved: {savepath}/{savename} Data shape: {Data.shape}')\n elif action == 'RETURN':\n return Data\n else:\n print(f'[ERR] ChExt | Invalid action, please select SAVE or RETURN')" }, { "alpha_fraction": 0.532779335975647, "alphanum_fraction": 0.5706371068954468, "avg_line_length": 28.94285774230957, "blob_id": "34eb464da09959eeed83b5036701142b86cbc4c3", "content_id": "80bf809c2dc86722e95ac6213e6b4a736d554db6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2166, "license_type": "no_license", "max_line_length": 96, "num_lines": 70, "path": "/dataprep/util.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import os\r\nimport shutil\r\nfrom dataclasses import dataclass, field\r\nfrom typing import List\r\nimport h5py\r\n\r\nimport matplotlib.patches as patches\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n@dataclass\r\nclass Cluster:\r\n # cluster object, contains detected cluster points and additional values\r\n label: int \r\n cardinality: int = 0\r\n elements: List = field(default_factory=list)\r\n dopplers: List = field(default_factory=list)\r\n center_polar: np.ndarray = np.empty((2, 1))\r\n center_cartesian: np.ndarray = np.empty((2, 1))\r\n box: np.ndarray = np.empty((4, 1))\r\n\r\ndef polar2cartesian(xp):\r\n # angles in rad\r\n return np.array([xp[0]*np.cos(xp[1]), xp[0]*np.sin(xp[1])], dtype=np.float64).reshape(-1, 1)\r\n\r\ndef cartesian2polar(xy):\r\n # angles in rad\r\n return np.array([np.sqrt(xy[0]**2 + xy[1]**2), np.arctan2(xy[1], xy[0])]).reshape(-1, 1)\r\n\r\ndef deg2rad_shift(angles):\r\n a = np.copy(angles)\r\n a = np.pi*a/180\r\n a = -a + np.pi/2\r\n return a\r\n\r\ndef shift_rad2deg(angles):\r\n a = np.copy(angles)\r\n a = -a + np.pi/2\r\n a = 180*a/np.pi\r\n return a\r\n\r\ndef get_box(cluster, c=None, h=0.5, w=0.3):\r\n if cluster is not None:\r\n r_ext = cluster.elements[0].max() - cluster.elements[0].min()\r\n # print(cluster.elements[1])\r\n a_ext = cluster.elements[1].max() - cluster.elements[1].min()\r\n out = np.array([cluster.center_polar[0].squeeze(),\r\n cluster.center_polar[1].squeeze(),\r\n r_ext,\r\n a_ext]).reshape(4, 1)\r\n return out\r\n else:\r\n return np.array([c[0], c[1], h, w]).reshape(4, 1)\r\n\r\ndef IOU_score(a, b):\r\n\t# returns the IOU score of the two input boxes\r\n x1 = max(a[0], b[0])\r\n y1 = max(a[1], b[1])\r\n x2 = min(a[2], b[2])\r\n y2 = min(a[3], b[3])\r\n width = x2 - x1\r\n height = y2 - y1\r\n if (width < 0) or (height < 0):\r\n return 0.0\r\n area_intersection = width*height\r\n area_a = (a[2] - a[0])*(a[3] - a[1])\r\n area_b = (b[2] - b[0])*(b[3] - b[1])\r\n area_union = area_a + area_b - area_intersection\r\n return area_intersection/area_union\r\n" }, { "alpha_fraction": 0.6380952596664429, "alphanum_fraction": 0.6469388008117676, "avg_line_length": 30.276596069335938, "blob_id": "c2fd3841be3603999d87dae967fcd930ca24a1ae", "content_id": "529e75cede3598df6d120ede26e39653927a9908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 92, "num_lines": 47, "path": "/dataprep/__init__.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import argparse\nimport sys, gc\n\nfrom .channel_extraction import chext\nfrom .processing import proc\nfrom .truth import truth\n\ndef parse_arg():\n parser = argparse.ArgumentParser(description='Data preprocessing module', add_help=True)\n\n parser.add_argument('--pathin', type=str, required=True,\n help=\"Path for the input folder\")\n parser.add_argument('--pathout', type=str,\n help=\"Path for the output folder\")\n parser.add_argument('--saveprefix', type=str,\n help=\"Prefix for the save file\")\n\n parser.add_argument('--chext', action='store_true',\n help=\"Perform channel extraction\")\n parser.add_argument('--proc', action='store_true',\n help=\"Perform signal processing (FFT and denoising)\")\n parser.add_argument('--truth', action='store_true',\n help=\"Perform ground truth (clustering, tracking) bouding box calculations\")\n\n \n parser.add_argument('--objcount', type=int, default=1,\n help=\"Number of objects per image (default: 1)\")\n parser.add_argument('--reso', type=int, default=416,\n help=\"Input image resolution (def: 416)\")\n\n parser.add_argument('--v', type=int, default=0, \n help=\"Verbose (0 minimal (def), 1 normal, 2 all\")\n \n return parser.parse_args(sys.argv[2:])\n\ndef main():\n args = parse_arg()\n\n if args.chext:\n chext(args)\n gc.collect()\n if args.proc:\n proc(args)\n gc.collect()\n if args.truth:\n truth(args)\n gc.collect()\n" }, { "alpha_fraction": 0.5904726982116699, "alphanum_fraction": 0.5993353128433228, "avg_line_length": 36.61111068725586, "blob_id": "39a453783c6573b7ce2b306748bad240e2c85475", "content_id": "1a44b2c62dcad3e17574f432472c89e5f693dd51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5416, "license_type": "no_license", "max_line_length": 111, "num_lines": 144, "path": "/yolo/predict.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import torch\n# import torch.nn as nn\n# import torch.nn.functional as F\n# import torch.optim as optim\n# import torchvision\nimport torchvision.transforms as transforms\n\nimport os, sys \n# import pickle, time, random\n\nimport numpy as np\n# from PIL import Image\nimport argparse\n\nfrom .darknet import DarkNet\nfrom .dataset import *\nfrom .util import *\n\ndef parse_arg():\n parser = argparse.ArgumentParser(description='MmWaveYoLo Prediction module', add_help=True)\n\n parser.add_argument('--cfg', type=str, default='yolov3micro',\n help=\"Name of the network config (default: yolov3micro)\")\n parser.add_argument('--pathin', type=str,\n help=\"Path for the input folder (default: testset)\")\n parser.add_argument('--pathout', type=str,\n help=\"Path for the output folder\")\n parser.add_argument('--video', type=str, default='False',\n help=\"Create video after prediction (default: False)\")\n \n parser.add_argument('--datasplit', type=float, default=0, \n help=\"Dataset split percentage (default: 0 (single set))\")\n parser.add_argument('--seed', type=float, default=0, \n help=\"Seed for the random shuffling (default: 0, (no shuffle))\")\n parser.add_argument('--bs', type=int, default=8, \n help=\"Batch size (default: 8)\")\n parser.add_argument('--ckpt', type=str, default='10.0',\n help=\"Checkpoint name <'epoch'.'iteration'>\")\n\n parser.add_argument('--nms', type=float, default=0.5, \n help=\"NMS threshold (default: 0.5)\")\n parser.add_argument('--obj', type=float, default=0.5, \n help=\"Objectiveness threshold (default: 0.5)\")\n parser.add_argument('--iou', type=float, default=0.5, \n help=\"Intersection over Union threshold (default: 0.5)\")\n parser.add_argument('--reso', type=int, default=416,\n help=\"Input image resolution (default: 416)\")\n\n parser.add_argument('--v', type=int, default=0, \n help=\"Verbose (0 minimal (default), 1 normal, 2 all\")\n \n return parser.parse_args(sys.argv[2:])\n\ndef predict():\n torch.cuda.empty_cache()\n \n # CONSTANTS\n args = parse_arg()\n pathcfg = f\"cfg/{args.cfg}.cfg\"\n pathin = f\"dataset/{args.pathin}/final\"\n pathout = f\"results/{args.pathout}\"\n num_workers = 2\n\n # NETWORK\n darknet = DarkNet(pathcfg, args.reso, args.obj, args.nms)\n pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad)\n print('# of params: ', pytorch_total_params)\n if args.v > 0:\n print(darknet.module_list)\n\n # IMAGE PREPROCESSING!!!\n transform = transforms.Compose([\n transforms.Resize(size=(args.reso, args.reso), interpolation=3),\n transforms.ToTensor()\n ])\n # ====================================================\n\n # Test data allocation\n _, testloader = getDataLoaders(pathin, transform, train_split=args.datasplit, batch_size=args.bs, \\\n num_workers=num_workers, collate_fn=collate, random_seed=args.seed)\n # ====================================================\n\n start_epoch = 2\n start_iteration = 0\n\n # LOAD A CHECKPOINT!!!\n start_epoch, start_iteration = args.ckpt.split('.')\n start_epoch, start_iteration, state_dict, _, _, _, _ = load_checkpoint(\n f'save/checkpoints/',\n int(start_epoch),\n int(start_iteration)\n )\n darknet.load_state_dict(state_dict)\n # ====================================================\n\n # Use GPU if available\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n darknet.to(device) # Put the network on device\n if args.v > 0:\n print(next(darknet.parameters()).device)\n\n # Create the subsequent save folders\n # if os.path.isdir(pathout):\n # shutil.rmtree(pathout)\n if not os.path.isdir(pathout):\n os.makedirs(pathout)\n\n # PREDICT\n print(f'[LOG] PREDICT | Test set: {len(testloader.dataset)}')\n darknet.eval() # set network to evaluation mode\n outcomes = np.zeros(4)\n predList = []\n countLabels = 0\n with torch.no_grad():\n for bidx, (paths, inputs, targets) in enumerate(testloader):\n inputs = inputs.to(device)\n predictions = darknet(inputs)\n\n for idx, path in enumerate(paths):\n print(f'[LOG] PREDICT | Predicting {(bidx*args.bs)+idx+1}/{len(testloader.dataset)}', end='\\r')\n savename = path.split('/')[-1].split('_')[2]\n \n try:\n prediction = predictions[predictions[:, 0] == idx]\n except Exception:\n prediction = torch.Tensor([])\n print(f'[ERROR] TEST | No prediction? {prediction}')\n\n tempL, _= correctness(prediction, targets[idx], reso=darknet.reso, iou_thresh=args.iou)\n predList.extend(tempL)\n countLabels += targets[idx].size(0)\n\n # draw_prediction(path, prediction, targets[idx], darknet.reso, \\\n # names=[''], pathout=f'{pathout}/preds', savename=f'{savename}.png')\n\n if args.video:\n animate_predictions(pathout, args.video)\n\n print(countLabels)\n predList = precision_recall(predList, countLabels)\n plot_precision_recall(predList, pathout=f'{pathout}/map', savename='')\n # plot_precision_recall(predList, pathout=f'{pathout}/map', savename=f'iou{args.iou}.png')\n \n # ====================================================\n" }, { "alpha_fraction": 0.5299039483070374, "alphanum_fraction": 0.5562441945075989, "avg_line_length": 35.56940460205078, "blob_id": "53306eaaa72d7188d0d752df140ae13267e89f96", "content_id": "8b7b485bd7c412db2eb238ac0e762610da443c21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12908, "license_type": "no_license", "max_line_length": 126, "num_lines": 353, "path": "/yolo/util.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "from __future__ import division\nimport torch\n\nimport os\nfrom operator import itemgetter\nimport numpy as np\nimport cv2\nfrom PIL import Image, ImageDraw\nimport matplotlib.pyplot as plt\n\ndef draw_prediction(img_path, prediction, target, reso, names, pathout, savename):\n \"\"\"Draw prediction result\n\n Args\n - img_path: (str) Path to image\n - prediction: (np.array) Prediction result with size [#bbox, 8]\n 8 = [batch_idx, x1, y1, x2, y2, objectness, cls_conf, class idx]\n - target: (np.array) Prediction result with size [#bbox, 5]\n 8 = [batch_idx, x1, y1, x2, y2, class idx]\n - reso: (int) Image resolution\n - names: (list) Class names\n - save_path: (str) Path to save prediction result\n \"\"\"\n img = Image.open(img_path).convert('RGB')\n w, h = img.size\n h_ratio = h / reso\n w_ratio = w / reso\n draw = ImageDraw.Draw(img)\n\n # Drawing targets (labels)\n try:\n for i in range(target.shape[0]):\n bbox = target[i, 0:4].numpy()\n bbox = xywh2xyxy(bbox, target=True)\n caption = f'truth #{i}'\n\n color = (255, 255, 255)\n x1, y1, x2, y2 = bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h\n draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)),\n outline=color, width=2)\n draw.rectangle((x1 * w_ratio, y2 * h_ratio + 15,\n x2 * w_ratio, y2 * h_ratio),\n fill=color)\n draw.text((x1 * w_ratio + 2, y2 * h_ratio),\n caption, fill='black')\n except Exception:\n print(f'[ERR] TEST | Could not draw target')\n\n # Drawing predictions\n try:\n for i in range(prediction.shape[0]):\n bbox = prediction[i, 1:5]\n conf = '%.2f' % prediction[i, -3]\n caption = f'pred {conf}'\n\n color = (0, 0, 255)\n x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]\n draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)),\n outline=color, width=int(1+prediction[i, -3]*5))\n draw.rectangle((x1 * w_ratio, y1 * h_ratio - 15,\n x2 * w_ratio, y1 * h_ratio),\n fill=color)\n draw.text((x1 * w_ratio + 2, y1 * h_ratio - 15),\n caption, fill='white')\n except Exception:\n print(f'[ERR] TEST | Could not draw prediction')\n\n # img.show()\n os.makedirs(pathout, exist_ok=True)\n img.save(f'{pathout}/{savename}')\n img.close()\n\ndef animate_predictions(path, savetype='gif'):\n fps = 5\n if savetype == 'gif':\n gif = []\n images = (Image.open(f'{path}/preds/{f}').copy() for f in sorted(os.listdir(f'{path}/preds')) if f.endswith('.png'))\n for image in images:\n gif.append(image)\n\n os.makedirs(path, exist_ok=True)\n gif[0].save(f'{path}/sequence.gif', save_all=True, \\\n optimize=False, append_images=gif[1:], loop=0, \\\n duration=int(1000/fps))\n print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.gif')\n elif savetype == 'avi':\n images = [img for img in sorted(os.listdir(f'{path}/preds')) if img.endswith(\".png\")]\n frame = cv2.imread(f'{path}/preds/{images[0]}')\n height, width, _ = frame.shape\n\n video = cv2.VideoWriter(f'{path}/sequence.avi', 0, fps, (width,height))\n\n for image in images:\n video.write(cv2.imread(f'{path}/preds/{image}'))\n\n cv2.destroyAllWindows()\n video.release()\n print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.avi')\n\ndef IoU(box1, box2):\n \"\"\" Compute IoU between box1 and box2 \"\"\"\n\n if box1.is_cuda == True:\n box1 = box1.cpu()\n if box2.is_cuda == True:\n box2 = box2.cpu()\n\n #Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[...,0], box1[...,1], box1[...,2], box1[...,3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[...,0], box2[...,1], box2[...,2], box2[...,3]\n \n #get the corrdinates of the intersection rectangle\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\n \n #Intersection area\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)\n\n #Union Area\n b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)\n b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)\n \n iou = inter_area / (b1_area + b2_area - inter_area)\n \n return iou\n\n# TP / FP / FN / TN calculations\ndef correctness(prediction, target, reso=416, iou_thresh=0.5):\n flagP = np.zeros([prediction.size(0), 2]) # Flag for predictions\n flagP[:,1] -= 1\n tempCor = np.zeros(4)\n flagT = np.zeros(target.size(0))-1\n tempList = []\n if prediction.size(0) != 0:\n for i, p in enumerate(prediction):\n for j, t in enumerate(target):\n iou = IoU(p[1:5], xywh2xyxy(t[0:4]*reso)).numpy()[0]\n if iou > flagP[i, 0]: \n flagP[i,:] = [iou, j]\n\n for i in range(flagP.shape[0]):\n if flagP[i,0] >= iou_thresh and flagT[int(flagP[i,1])] == -1:\n # True Positive: iou >= thresh\n tempCor[0] += 1\n flagT[int(flagP[i,1])] = 1\n tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], False])\n else:\n # False Positive: iou < thresh or duplicates\n tempCor[1] = 1\n tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], True])\n \n # False Negative\n if np.count_nonzero(flagP[:, 1] == -1) == prediction.size(0):\n tempCor[2] += 1\n\n return tempList, tempCor\n\n# Precision and recall calculations\ndef precision_recall(predList, countLabels):\n predList.sort(key = itemgetter(1), reverse=True) # Sort by IoU\n predList.sort(key = itemgetter(2)) # Sort by TP\n predList.sort(key = itemgetter(0), reverse=True) # Sort by objectiveness\n\n for i, l in enumerate(predList):\n temp = [0, 0, 0, 0]\n if l[2] == False: temp[0] = 1 # TP\n else: temp[1] = 1 # FP\n\n if i != 0:\n temp[0] += predList[i-1][3] # Cumulative TP\n temp[1] += predList[i-1][4] # Cumulative FP\n temp[2] = float(temp[0] / (temp[0] + temp[1])) # Precision\n temp[3] = float(temp[0] / countLabels) # Recall\n l.extend(temp)\n\n return predList\n\n# Drawing precision/recall curve\ndef plot_precision_recall(predList, pathout, savename=''):\n\n predArr = np.array(predList, dtype=np.float)\n \n # print(np.round(predArr[:,-2:], 2))\n fig, _= plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1]})\n plt.subplot(2, 1, 1)\n plt.plot(predArr[:, -1], predArr[:, -2])\n plt.plot(np.round(predArr[:,-1], 2), np.round(predArr[:,-2], 2))\n plt.grid(True)\n plt.title(f'Precision/Recall graph ({savename})')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n\n plt.subplot(2, 1, 2)\n plt.plot(predArr[:,0])\n ax = plt.gca()\n ax.axes.xaxis.set_visible(False)\n # ax.axes.yaxis.set_visible(False)\n plt.rcParams['axes.titley'] = 1.0 # y is in axes-relative coordinates.\n plt.rcParams['axes.titlepad'] = -14 # pad is in points...\n plt.title(f'Objectiveness score')\n\n if savename != '':\n os.makedirs(f'{pathout}/{savename}', exist_ok=True)\n plt.savefig(f'{pathout}/{savename}', dpi=100)\n print(f'[LOG] TRAIN | Precision/Recall graph save \\\"{pathout}/{savename}\\\"')\n else:\n plt.show()\n plt.close()\n\ndef xywh2xyxy(bbox, target=False):\n if target:\n xc, yc = bbox[0], bbox[1]\n half_w, half_h = bbox[2] / 2, bbox[3] / 2\n return [xc - half_w, yc - half_h, xc + half_w, yc + half_h]\n \n bbox_ = bbox.clone()\n if len(bbox_.size()) == 1:\n bbox_ = bbox_.unsqueeze(0)\n xc, yc = bbox_[..., 0], bbox_[..., 1]\n half_w, half_h = bbox_[..., 2] / 2, bbox_[..., 3] / 2\n bbox_[..., 0] = xc - half_w\n bbox_[..., 1] = yc - half_h\n bbox_[..., 2] = xc + 2 * half_w\n bbox_[..., 3] = yc + 2 * half_h\n return bbox_\n\n#Check if it is working!!!\ndef xyxy2xywh(bbox, target=False):\n if target:\n w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]\n xc, yc = bbox[0] + w/2, bbox[1] + h/2\n return [xc, yc, w, h]\n \n bbox_ = bbox.clone()\n if len(bbox_.size()) == 1:\n bbox_ = bbox_.unsqueeze(0)\n w, h = bbox_[..., 2] - bbox_[..., 0], bbox_[..., 3] - bbox_[..., 1]\n xc, yc = bbox_[..., 0] + w/2, bbox_[..., 1] + h/2\n\n bbox_[..., 0] = xc\n bbox_[..., 1] = yc\n bbox_[..., 2] = w\n bbox_[..., 3] = h\n return bbox_\n\ndef load_checkpoint(checkpoint_dir, epoch, iteration):\n \"\"\"Load checkpoint from path\n\n Args\n - checkpoint_dir: (str) absolute path to checkpoint folder\n - epoch: (int) epoch of checkpoint\n - iteration: (int) iteration of checkpoint in one epoch\n\n Returns\n - start_epoch: (int)\n - start_iteration: (int)\n - state_dict: (dict) state of model\n \"\"\"\n path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n if not os.path.isfile(path):\n raise Exception(\"Checkpoint in epoch %d doesn't exist\" % epoch)\n\n checkpoint = torch.load(path)\n start_epoch = checkpoint['epoch']\n state_dict = checkpoint['state_dict']\n start_iteration = checkpoint['iteration']\n tlosses = checkpoint['tlosses']\n vlosses = checkpoint['vlosses']\n optimizer = checkpoint['optimizer']\n scheduler = checkpoint['scheduler']\n\n assert epoch == start_epoch, \"epoch != checkpoint's start_epoch\"\n assert iteration == start_iteration, \"iteration != checkpoint's start_iteration\"\n return start_epoch, start_iteration, state_dict, tlosses, vlosses, optimizer, scheduler\n\ndef save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):\n \"\"\"Save checkpoint to path\n\n Args\n - path: (str) absolute path to checkpoint folder\n - epoch: (int) epoch of checkpoint file\n - iteration: (int) iteration of checkpoint in one epoch\n - save_dict: (dict) saving parameters dict\n \"\"\"\n os.makedirs(checkpoint_dir, exist_ok=True)\n path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n assert epoch == save_dict['epoch'], \"[ERROR] epoch != save_dict's start_epoch\"\n assert iteration == save_dict['iteration'], \"[ERROR] iteration != save_dict's start_iteration\"\n if os.path.isfile(path):\n print(\"[WARNING] Overwrite checkpoint in epoch %d, iteration %d\" %\n (epoch, iteration))\n try:\n torch.save(save_dict, path)\n except Exception:\n raise Exception(\"[ERROR] Fail to save checkpoint\")\n\n print(\"[LOG] Checkpoint %d.%d.ckpt saved\" % (epoch, iteration))\n\ndef parse_cfg(cfgfile):\n \"\"\"\n Takes a configuration file\n \n Returns a list of blocks. Each blocks describes a block in the neural\n network to be built. Block is represented as a dictionary in the list\n \n \"\"\"\n \n file = open(cfgfile, 'r')\n lines = file.read().split('\\n') # store the lines in a list\n lines = [x for x in lines if len(x) > 0] # get read of the empty lines \n lines = [x for x in lines if x[0] != '#'] # get rid of comments\n lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces\n file.close()\n \n block = {}\n blocks = []\n \n for line in lines:\n if line[0] == \"[\": # This marks the start of a new block\n if len(block) != 0: # If block is not empty, implies it is storing values of previous block.\n blocks.append(block) # add it the blocks list\n block = {} # re-init the block\n block[\"type\"] = line[1:-1].rstrip() \n else:\n key,value = line.split(\"=\") \n block[key.rstrip()] = value.lstrip()\n blocks.append(block)\n \n return blocks\n\ndef plot_losses(tlosses, vlosses=None, savepath=''):\n\n plt.plot(range(0, len(tlosses)), tlosses)\n if vlosses:\n plt.plot(range(0, len(vlosses)), vlosses)\n plt.legend(['Train loss', 'Valid loss'], loc='upper left')\n plt.title(f'Training and Validation loss ({len(tlosses)} Epochs) ')\n else:\n plt.legend(['Train loss'], loc='upper left')\n plt.title(f'Training loss ({len(tlosses)} Epochs) ')\n\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n\n if savepath != '':\n os.makedirs(savepath, exist_ok=True)\n plt.savefig(f'{savepath}/loss_{len(tlosses)}.png', dpi=100)\n print(f'[LOG] TRAIN | Loss graph save \\\"{savepath}/loss_{len(tlosses)}.png\\\"')\n else:\n plt.show()\n plt.close()" }, { "alpha_fraction": 0.4856632649898529, "alphanum_fraction": 0.4958883225917816, "avg_line_length": 39.9866943359375, "blob_id": "28977eba88ecd8b77924a0ea23163a540c453feb", "content_id": "2a94d73c4448203227f3900c56a6dbb3cc1539d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18484, "license_type": "no_license", "max_line_length": 128, "num_lines": 451, "path": "/yolo/darknet.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "from __future__ import division\n\nimport torch, torch.nn as nn, torch.nn.functional as F \n# from torch.autograd import Variable\nimport numpy as np\n# import cv2\n# from pprint import pprint\n\nfrom .util import *\n\n# =================================================================\n# MAXPOOL (with stride = 1, NOT SURE IF NEEDED)\nclass MaxPool1s(nn.Module):\n\n def __init__(self, kernel_size):\n super(MaxPool1s, self).__init__()\n self.kernel_size = kernel_size\n self.pad = kernel_size - 1\n\n def forward(self, x):\n padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode=\"replicate\")\n pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)\n return pooled_x\n\n# EMPTY LAYER\nclass EmptyLayer(nn.Module):\n def __init__(self):\n super(EmptyLayer, self).__init__()\n\n# YOLO / PREDICTION LAYER\nclass YOLOLayer(nn.Module):\n def __init__(self, anchors, num_classes, reso, ignore_thresh):\n super(YOLOLayer, self).__init__()\n self.anchors = anchors\n self.num_classes = num_classes\n self.reso = reso\n self.ignore_thresh = ignore_thresh\n\n def forward(self, x, y_true=None):\n bs, _, gs, _ = x.size()\n stride = self.reso // gs # no pooling used, stride is the only downsample\n num_attrs = 5 + self.num_classes # tx, ty, tw, th, p0\n nA = len(self.anchors)\n scaled_anchors = torch.Tensor(\n [(a_w / stride, a_h / stride) for a_w, a_h in self.anchors]).cuda()\n\n # Re-organize [bs, (5+nC)*nA, gs, gs] => [bs, nA, gs, gs, 5+nC]\n x = x.view(bs, nA, num_attrs, gs, gs).permute(\n 0, 1, 3, 4, 2).contiguous()\n\n pred = torch.Tensor(bs, nA, gs, gs, num_attrs).cuda()\n\n pred_tx = torch.sigmoid(x[..., 0]).cuda()\n pred_ty = torch.sigmoid(x[..., 1]).cuda()\n pred_tw = x[..., 2].cuda()\n pred_th = x[..., 3].cuda()\n pred_conf = torch.sigmoid(x[..., 4]).cuda()\n if self.training == True:\n pred_cls = x[..., 5:].cuda() # softmax in cross entropy\n else:\n pred_cls = F.softmax(x[..., 5:], dim=-1).cuda() # class\n\n grid_x = torch.arange(gs).repeat(gs, 1).view(\n [1, 1, gs, gs]).float().cuda()\n grid_y = torch.arange(gs).repeat(gs, 1).t().view(\n [1, 1, gs, gs]).float().cuda()\n anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))\n anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))\n\n pred[..., 0] = pred_tx + grid_x\n pred[..., 1] = pred_ty + grid_y\n pred[..., 2] = torch.exp(pred_tw) * anchor_w\n pred[..., 3] = torch.exp(pred_th) * anchor_h\n pred[..., 4] = pred_conf\n pred[..., 5:] = pred_cls\n\n if not self.training:\n pred[..., :4] *= stride\n return pred.view(bs, -1, num_attrs)\n else:\n loss = YOLOLoss([bs, nA, gs], scaled_anchors, self.num_classes, pred, [pred_tx, pred_ty, pred_tw, pred_th])\n loss = loss(x, y_true.float())\n return loss\n\n# YOLOv3 Loss\nclass YOLOLoss(nn.Module):\n def __init__(self, shape, scaled_anchors, num_classes, pred, pred_t):\n super(YOLOLoss, self).__init__()\n self.bs = shape[0]\n self.nA = shape[1]\n self.gs = shape[2]\n self.scaled_anchors = scaled_anchors\n self.num_classes = num_classes\n self.predictions = pred\n self.pred_conf = pred[..., 4]\n self.pred_cls = pred[..., 5:]\n self.pred_tx = pred_t[0]\n self.pred_ty = pred_t[1]\n self.pred_tw = pred_t[2]\n self.pred_th = pred_t[3]\n\n def forward(self, x, y_true):\n gt_tx = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n gt_ty = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n gt_tw = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n gt_th = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n gt_conf = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n gt_cls = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n\n obj_mask = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()\n for idx in range(self.bs):\n for y_true_one in y_true[idx]:\n y_true_one = y_true_one.cuda()\n gt_bbox = y_true_one[:4] * self.gs\n gt_cls_label = int(y_true_one[4])\n\n gt_xc, gt_yc, gt_w, gt_h = gt_bbox[0:4]\n gt_i = gt_xc.long().cuda()\n gt_j = gt_yc.long().cuda()\n\n pred_bbox = self.predictions[idx, :, gt_j, gt_i, :4]\n ious = IoU(xywh2xyxy(pred_bbox), xywh2xyxy(gt_bbox))\n best_iou, best_a = torch.max(ious, 0)\n\n w, h = self.scaled_anchors[best_a]\n gt_tw[idx, best_a, gt_j, gt_i] = torch.log(gt_w / w)\n gt_th[idx, best_a, gt_j, gt_i] = torch.log(gt_h / h)\n gt_tx[idx, best_a, gt_j, gt_i] = gt_xc - gt_i.float()\n gt_ty[idx, best_a, gt_j, gt_i] = gt_yc - gt_j.float()\n gt_conf[idx, best_a, gt_j, gt_i] = best_iou\n gt_cls[idx, best_a, gt_j, gt_i] = gt_cls_label\n\n obj_mask[idx, best_a, gt_j, gt_i] = 1\n\n MSELoss = nn.MSELoss(reduction='sum')\n BCELoss = nn.BCELoss(reduction='sum')\n CELoss = nn.CrossEntropyLoss(reduction='sum')\n\n loss = dict()\n # Xc, Yc, W, H loss calculation\n loss['x'] = MSELoss(self.pred_tx * obj_mask, gt_tx * obj_mask)\n loss['y'] = MSELoss(self.pred_ty * obj_mask, gt_ty * obj_mask)\n loss['w'] = MSELoss(self.pred_tw * obj_mask, gt_tw * obj_mask)\n loss['h'] = MSELoss(self.pred_th * obj_mask, gt_th * obj_mask)\n\n # CLASS loss calculation\n # loss['cls'] = BCELoss(pred_cls * obj_mask, cls_mask * obj_mask)\n loss['cls'] = CELoss((self.pred_cls * obj_mask.unsqueeze(-1)).view(-1, self.num_classes),\n (gt_cls * obj_mask).view(-1).long())\n\n # OBJECTIVENESS loss calculation\n # loss['conf'] = MSELoss(self.pred_conf * obj_mask * 5, gt_conf * obj_mask * 5) + \\\n # MSELoss(self.pred_conf * (1 - obj_mask), gt_conf * (1 - obj_mask))\n lambda_noobj = 0.5\n loss['conf'] = BCELoss(self.pred_conf * obj_mask, (gt_conf * obj_mask).detach()) + \\\n lambda_noobj * BCELoss(self.pred_conf * (1 - obj_mask), (gt_conf * (1 - obj_mask)).detach())\n\n # pprint(loss)\n return loss\n\n# Non-Max Suppression\nclass NMSLayer(nn.Module):\n \"\"\"\n NMS layer which performs Non-maximum Suppression\n 1. Filter background\n 2. Get prediction with particular class\n 3. Sort by confidence\n 4. Suppress non-max prediction\n \"\"\"\n\n def __init__(self, conf_thresh=0.65, nms_thresh=0.55):\n \"\"\"\n Args:\n - conf_thresh: (float) fore-ground confidence threshold\n - nms_thresh: (float) nms threshold\n \"\"\"\n super(NMSLayer, self).__init__()\n self.conf_thresh = conf_thresh\n self.nms_thresh = nms_thresh\n\n def forward(self, x):\n \"\"\"\n Args\n x: (Tensor) prediction feature map, with size [bs, num_bboxes, 5 + nC]\n\n Returns\n predictions: (Tensor) prediction result with size [num_bboxes, [image_batch_idx, 4 offsets, p_obj, max_conf, cls_idx]]\n \"\"\"\n bs, _, _ = x.size()\n predictions = torch.Tensor().cuda()\n\n for idx in range(bs):\n pred = x[idx]\n\n try:\n non_zero_pred = pred[pred[:, 4] > self.conf_thresh]\n non_zero_pred[:, :4] = xywh2xyxy(non_zero_pred[:, :4])\n max_score, max_idx = torch.max(non_zero_pred[:, 5:], 1)\n max_idx = max_idx.float().unsqueeze(1)\n max_score = max_score.float().unsqueeze(1)\n non_zero_pred = torch.cat(\n (non_zero_pred[:, :5], max_score, max_idx), 1)\n classes = torch.unique(non_zero_pred[:, -1])\n except Exception: # no object predicted\n print('No object predicted')\n continue\n\n for cls in classes:\n cls_pred = non_zero_pred[non_zero_pred[:, -1] == cls]\n conf_sort_idx = torch.sort(cls_pred[:, 5], descending=True)[1]\n cls_pred = cls_pred[conf_sort_idx]\n max_preds = []\n while cls_pred.size(0) > 0:\n max_preds.append(cls_pred[0].unsqueeze(0))\n ious = IoU(max_preds[-1], cls_pred)\n cls_pred = cls_pred[ious < self.nms_thresh]\n\n if len(max_preds) > 0:\n max_preds = torch.cat(max_preds).data\n batch_idx = max_preds.new(max_preds.size(0), 1).fill_(idx)\n seq = (batch_idx, max_preds)\n predictions = torch.cat(seq, 1) if predictions.size(\n 0) == 0 else torch.cat((predictions, torch.cat(seq, 1)))\n\n return predictions\n# =================================================================\n\n# NETWORK\nclass DarkNet(nn.Module):\n def __init__(self, cfg, reso=416, thr_obj=0.5, thr_nms=0.5):\n super(DarkNet, self).__init__()\n self.blocks = parse_cfg(cfg)\n self.reso, self.thr_obj, self.thr_nms = reso, thr_obj, thr_nms\n self.net_info, self.module_list = self.create_modules(self.blocks)\n self.nms = NMSLayer(self.thr_obj, self.thr_nms)\n\n def forward(self, x, y_true=None, CUDA=False):\n modules = self.blocks[1:]\n predictions = torch.Tensor().cuda() if CUDA else torch.Tensor()\n outputs = dict() #We cache the outputs for the route layer\n loss = dict()\n\n for i, module in enumerate(modules):\n if module[\"type\"] == \"convolutional\" or module[\"type\"] == \"upsample\":\n x = self.module_list[i](x)\n outputs[i] = x\n\n elif module[\"type\"] == \"shortcut\":\n from_ = int(module[\"from\"])\n x = outputs[i-1] + outputs[i+from_]\n outputs[i] = x\n\n elif module[\"type\"] == \"route\":\n layers = module[\"layers\"]\n layers = [int(a) for a in layers]\n \n if (layers[0]) > 0:\n layers[0] = layers[0] - i\n \n if len(layers) == 1:\n x = outputs[i + (layers[0])]\n \n else:\n if (layers[1]) > 0:\n layers[1] = layers[1] - i\n \n map1 = outputs[i + layers[0]]\n map2 = outputs[i + layers[1]]\n x = torch.cat((map1, map2), 1)\n outputs[i] = x\n \n elif module[\"type\"] == 'yolo':\n if self.training == True:\n loss_part = self.module_list[i][0](x, y_true)\n for key, value in loss_part.items():\n value = value\n loss[key] = loss[key] + \\\n value if key in loss.keys() else value\n loss['total'] = loss['total'] + \\\n value if 'total' in loss.keys() else value\n else:\n x = self.module_list[i][0](x)\n predictions = x if len(predictions.size()) == 1 else torch.cat(\n (predictions, x), 1)\n \n outputs[i] = outputs[i-1] # skip\n \n # Print the layer information\n # print(i, module[\"type\"], x.shape)\n \n # return prediction result only when evaluated\n if self.training == True:\n return loss\n else:\n predictions = self.nms(predictions)\n return predictions\n\n def create_modules(self, blocks):\n net_info = blocks[0] #Captures the information about the input and pre-processing \n module_list = nn.ModuleList()\n in_channels = 3\n out_channels_list = []\n \n for index, block in enumerate(blocks[1:]):\n module = nn.Sequential()\n \n # Convolutional Layer\n if (block[\"type\"] == \"convolutional\"):\n activation = block[\"activation\"]\n try:\n batch_normalize = int(block[\"batch_normalize\"])\n bias = False\n except:\n batch_normalize = 0\n bias = True\n \n out_channels = int(block[\"filters\"])\n kernel_size = int(block[\"size\"])\n padding = (kernel_size - 1) // 2 if int(block[\"pad\"]) else 0\n stride = int(block[\"stride\"])\n conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias = bias)\n module.add_module(\"conv_{0}\".format(index), conv)\n \n if batch_normalize:\n bn = nn.BatchNorm2d(out_channels)\n module.add_module(\"batch_norm_{0}\".format(index), bn)\n\n if activation == \"leaky\":\n activn = nn.LeakyReLU(0.1, inplace = True)\n module.add_module(\"leaky_{0}\".format(index), activn)\n \n # Up Sample Layer\n elif (block[\"type\"] == \"upsample\"):\n stride = int(block[\"stride\"]) # = 2 in Yolov3\n upsample = nn.Upsample(scale_factor = stride, mode = \"nearest\")\n module.add_module(\"upsample_{}\".format(index), upsample)\n \n # Shortcut Layer\n elif block[\"type\"] == \"shortcut\":\n shortcut = EmptyLayer()\n module.add_module(\"shortcut_{}\".format(index), shortcut)\n\n # Route Layer\n elif (block[\"type\"] == \"route\"):\n route = EmptyLayer()\n module.add_module(\"route_{0}\".format(index), route)\n\n block[\"layers\"] = block[\"layers\"].split(',')\n start = int(block[\"layers\"][0])\n if len(block['layers']) == 1:\n start = int(block['layers'][0])\n out_channels = out_channels_list[index + start]\n elif len(block['layers']) == 2:\n start = int(block['layers'][0])\n end = int(block['layers'][1])\n out_channels = out_channels_list[index + start] + out_channels_list[end] \n \n # Yolo Layer\n elif block[\"type\"] == \"yolo\":\n mask = block[\"mask\"].split(\",\")\n mask = [int(x) for x in mask]\n \n anchors = block[\"anchors\"].split(\",\")\n anchors = [int(a) for a in anchors]\n anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]\n anchors = [anchors[i] for i in mask]\n \n num_classes = int(block['classes'])\n ignore_thresh = float(block['ignore_thresh'])\n\n prediction = YOLOLayer(anchors, num_classes, self.reso, ignore_thresh)\n module.add_module(\"prediction_{}\".format(index), prediction)\n \n module_list.append(module)\n in_channels = out_channels\n out_channels_list.append(out_channels)\n \n return (net_info, module_list)\n\n def load_weights(self, path, cutoff=None):\n \"\"\"Load darknet weights from disk.\n YOLOv3 is fully convolutional, so only conv layers' weights will be loaded\n Darknet's weights data are organized as\n 1. (optinoal) bn_biases => bn_weights => bn_mean => bn_var\n 1. (optional) conv_bias\n 2. conv_weights\n\n Args\n - path: (str) path to .weights file\n - cutoff: (optinoal, int)\n \"\"\"\n fp = open(path, 'rb')\n header = np.fromfile(fp, dtype=np.int32, count=5)\n weights = np.fromfile(fp, dtype=np.float32)\n fp.close()\n\n header = torch.from_numpy(header)\n\n ptr = 0\n for i, module in enumerate(self.module_list):\n block = self.blocks[i]\n\n if cutoff is not None and i == cutoff:\n print(\"Stop before\", block['type'], \"block (No.%d)\" % (i+1))\n break\n\n if block['type'] == \"convolutional\":\n batch_normalize = int(\n block['batch_normalize']) if 'batch_normalize' in block else 0\n conv = module[0]\n\n if batch_normalize > 0:\n bn = module[1]\n num_bn_biases = bn.bias.numel()\n\n bn_biases = torch.from_numpy(\n weights[ptr:ptr+num_bn_biases])\n bn_biases = bn_biases.view_as(bn.bias.data)\n bn.bias.data.copy_(bn_biases)\n ptr += num_bn_biases\n\n bn_weights = torch.from_numpy(\n weights[ptr:ptr+num_bn_biases])\n bn_weights = bn_weights.view_as(bn.weight.data)\n bn.weight.data.copy_(bn_weights)\n ptr += num_bn_biases\n\n bn_running_mean = torch.from_numpy(\n weights[ptr:ptr+num_bn_biases])\n bn_running_mean = bn_running_mean.view_as(bn.running_mean)\n bn.running_mean.copy_(bn_running_mean)\n ptr += num_bn_biases\n\n bn_running_var = torch.from_numpy(\n weights[ptr:ptr+num_bn_biases])\n bn_running_var = bn_running_var.view_as(bn.running_var)\n bn.running_var.copy_(bn_running_var)\n ptr += num_bn_biases\n\n else:\n num_biases = conv.bias.numel()\n conv_biases = torch.from_numpy(weights[ptr:ptr+num_biases])\n conv_biases = conv_biases.view_as(conv.bias.data)\n conv.bias.data.copy_(conv_biases)\n ptr = ptr + num_biases\n\n num_weights = conv.weight.numel()\n conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])\n conv_weights = conv_weights.view_as(conv.weight.data)\n conv.weight.data.copy_(conv_weights)\n ptr = ptr + num_weights" }, { "alpha_fraction": 0.6201991438865662, "alphanum_fraction": 0.624466598033905, "avg_line_length": 24.10714340209961, "blob_id": "6ac67b092b732c89cde3907bcdd181b72c2f8040", "content_id": "53b781c275781a6efe77280930cf3cd525ccf8b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 80, "num_lines": 28, "path": "/__main__.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import argparse\nimport sys\n\nimport yolo\nimport dataprep\n\ndef parse_arg():\n parser = argparse.ArgumentParser(description='mmWave YOLOv3', add_help=True,\n usage='''python . <action> [<args>]\n\n Actions:\n train Network training module\n predict Object detection module\n dataprep Data preprocessing module\n '''\n )\n parser.add_argument('Action', type=str, help='Action to run')\n\n return parser.parse_args(sys.argv[1:2])\n\nargs = parse_arg()\n\nif args.Action == 'train' or args.Action == 'predict':\n yolo.main(args)\nelif args.Action == 'dataprep':\n dataprep.main()\nelse:\n print('Unknown action. Check \"python . --help\"')\n" }, { "alpha_fraction": 0.5450772643089294, "alphanum_fraction": 0.5688263773918152, "avg_line_length": 36.38793182373047, "blob_id": "a32327ddf6bb3c58e14715a49ddcaf3272097791", "content_id": "68459a70ffb868c7833c2e13cbdc3758ee519cf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4337, "license_type": "no_license", "max_line_length": 110, "num_lines": 116, "path": "/yolo/dataset.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import torch\nimport torch.utils.data\nfrom torch.utils.data.dataloader import default_collate\n# from torchvision import transforms\n\nimport os\n# import random\nimport numpy as np\nfrom PIL import Image\n\n# anchors_wh = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],\n# [59, 119], [116, 90], [156, 198], [373, 326]],\n# np.float32) / 416\n\nclass MmwaveDataset(torch.utils.data.Dataset):\n def __init__(self, data_dir, data_size = 0, transforms = None):\n files = sorted(os.listdir(data_dir))\n self.files = [f\"{data_dir}/{x}\" for x in files]\n \n if data_size < 0 or data_size > len(files):\n assert(\"Data size should be between 0 to number of files in the dataset\")\n \n if data_size == 0:\n data_size = len(files)\n \n self.data_size = data_size\n self.transforms = transforms\n \n def __len__(self):\n return self.data_size\n\n def __getitem__(self, idx):\n image_path = self.files[idx]\n image = Image.open(image_path)\n img_w, img_h = image.size\n \n image = self.preProcessImage(image)\n\n labels = [] # to make it array of bbs (for multiple bbs in the future)\n labels_str = image_path.split(\"_\")[-1]\n\n if \"[[\" in labels_str:\n labels_str = labels_str.split('[[')[1].split(']]')[0].split('],[')\n labels = np.zeros((4, 5))\n for i, l in enumerate(labels_str):\n label = np.zeros(5)\n label[:4] = np.array([int(a) for a in l.split(',')]) # [xc, yc, w, h]\n\n # Normalizing labels\n label[0] /= img_w #Xcenter\n label[1] /= img_h #Ycenter\n label[2] /= img_w #Width\n label[3] /= img_h #Height\n\n labels[i, :] = label\n else:\n labels_str = labels_str.split('[')[1].split(']')[0].split(',') # get the bb info from the filename\n labels = np.zeros((1, 5))\n labels[0, :4] = np.array([int(a) for a in labels_str]) # [xc, yc, w, h]\n \n if np.any(labels[0, :4] == 0):\n return image, None\n\n # Normalizing labels\n labels[0, 0] /= img_w #Xcenter\n labels[0, 1] /= img_h #Ycenter\n labels[0, 2] /= img_w #Width\n labels[0, 3] /= img_h #Height\n # labels[0, 4] = 0 # class label (0 = person)\n # print(torch.any(torch.isfinite(image) == False), labels)\n\n return image_path, image, labels\n\n #Image custom preprocessing if required\n def preProcessImage(self, image):\n image = image.convert('RGB')\n if self.transforms:\n return self.transforms(image)\n else:\n image = np.array(image)\n image = image.transpose(2,1,0)\n return image.astype(np.float32)\n\ndef collate(batch):\n batch = list(filter(lambda x:x[1] is not None, batch))\n return default_collate(batch) # Use the default method to splice the filtered batch data\n\ndef getDataLoaders(data_dir, transforms, train_split=0, batch_size=8, \\\n num_workers=2, collate_fn=collate, random_seed=0):\n \n if train_split < 0 or train_split > 1:\n raise Exception(f\"data_loader | Split ({train_split}) coefficient should be 0 < x < 1\")\n\n dataset = MmwaveDataset(data_dir=data_dir, transforms=transforms)\n shuffle = True if random_seed != 0 else False\n \n # Single Set\n if train_split == 0 or train_split == 1:\n return None, torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=shuffle, num_workers=num_workers, collate_fn = collate_fn)\n\n # Generate a fixed seed\n generator = torch.Generator()\n if random_seed != 0:\n generator.manual_seed(random_seed)\n\n train_size = int(train_split * len(dataset))\n test_size = len(dataset) - train_size\n \n trainset, testset = torch.utils.data.random_split(dataset, [train_size, test_size], generator=generator)\n\n # Train and Validation sets\n return torch.utils.data.DataLoader(trainset, batch_size=batch_size, \\\n shuffle=shuffle, num_workers=2, collate_fn = collate_fn), \\\n torch.utils.data.DataLoader(testset, batch_size=batch_size, \\\n shuffle=shuffle, num_workers=2, collate_fn = collate_fn)\n" }, { "alpha_fraction": 0.6036866307258606, "alphanum_fraction": 0.6036866307258606, "avg_line_length": 17, "blob_id": "e07217bccab3ab257354b08d42bfb08ac2440e02", "content_id": "741cda92fc84832c7adcf330c8496f6dadf330ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 34, "num_lines": 12, "path": "/yolo/__init__.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import gc\n\nfrom .train import train\nfrom .predict import predict\n\ndef main(args):\n gc.collect()\n if args.Action == 'train':\n train()\n elif args.Action == 'predict':\n predict()\n gc.collect()\n\n" }, { "alpha_fraction": 0.5549355745315552, "alphanum_fraction": 0.5680882334709167, "avg_line_length": 37.3979606628418, "blob_id": "200c4e1adc5ebd2d064bf31847aeddc65d63defc", "content_id": "e65e94fdf8cd633d61f9ae61dd3dc01f0964f521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7527, "license_type": "no_license", "max_line_length": 99, "num_lines": 196, "path": "/yolo/train.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n# import torch.nn.functional as F\nimport torch.optim as optim\n# import torchvision\nimport torchvision.transforms as transforms\n\n# import os, pickle, random\nimport time, sys\n\nimport numpy as np\n# from PIL import Image\nimport argparse\n\nfrom .darknet import DarkNet\nfrom .dataset import *\nfrom .util import *\n\ndef parse_arg():\n parser = argparse.ArgumentParser(description='mmWaveYoLov3 Training module', add_help=True)\n\n parser.add_argument('--cfg', type=str, default='yolov3micro',\n help=\"Name of the network config\")\n parser.add_argument('--pathin', type=str, default='trainset',\n help=\"Input dataset name\")\n\n parser.add_argument('--datasplit', type=float, default=0.8, \n help=\"Dataset split percentage (def: 0.8 (80 (train):20 (validation))\")\n parser.add_argument('--seed', type=float, default=42, \n help=\"Seed for the random shuffle (default: 42, 0 for no shuffling)\")\n parser.add_argument('--bs', type=int, default=8, \n help=\"Batch size (default: 8, 0 for single batch)\")\n parser.add_argument('--ckpt', type=str, default='0.0',\n help=\"Checkpoint name as <'epoch'.'iteration'>\")\n parser.add_argument('--ep', type=int, default=5,\n help=\"Total epoch number (default: 5)\")\n\n parser.add_argument('--lr', type=float, default=1e-5, \n help=\"Learning rate (default: 1e-5)\")\n parser.add_argument('--reso', type=int, default=416,\n help=\"Input image resolution (default: 416)\")\n\n parser.add_argument('--v', type=int, default=0, \n help=\"Verbose (0 minimal (default), 1 normal, 2 all\")\n \n return parser.parse_args(sys.argv[2:])\n\ndef train():\n torch.cuda.empty_cache()\n\n # CONSTANTS\n args = parse_arg()\n pathcfg = f\"cfg/{args.cfg}.cfg\"\n pathin = f\"dataset/{args.pathin}/final\"\n num_workers = 2\n\n # NETWORK\n darknet = DarkNet(pathcfg, args.reso)\n pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad)\n print('# of params: ', pytorch_total_params)\n if args.v > 0:\n print(darknet.module_list)\n\n # LOAD A CHECKPOINT!!!\n start_epoch, start_iteration = [0, 0]\n tlosses, vlosses = [], []\n optimizer, scheduler = None, None\n start_epoch, start_iteration = [int(x) for x in args.ckpt.split('.')]\n if start_epoch != 0 and start_epoch != 0:\n start_epoch, start_iteration, state_dict, \\\n tlosses, vlosses, \\\n optimizer, scheduler = load_checkpoint(\n f'save/checkpoints/',\n int(start_epoch),\n int(start_iteration)\n )\n darknet.load_state_dict(state_dict)\n # ====================================================\n\n # OPTIMIZER & HYPERPARAMETERS\n if optimizer == None:\n # optimizer = optim.SGD(filter(lambda p: p.requires_grad, darknet.parameters()), \\\n # lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, darknet.parameters()), \\\n lr=args.lr, betas=[0.9,0.999], eps=1e-8, weight_decay=0, amsgrad=False)\n if scheduler == None:\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)\n\n # IMAGE PREPROCESSING!!!\n transform = transforms.Compose([\n # transforms.RandomResizedCrop(size=args.reso, interpolation=3),\n transforms.Resize(size=(args.reso, args.reso), interpolation=3),\n transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor()\n ])\n # ====================================================\n\n # Train and Validation data allocation\n trainloader, validloader = getDataLoaders(pathin, transform, \\\n train_split=args.datasplit, batch_size=args.bs, \\\n num_workers=num_workers, collate_fn=collate, random_seed=args.seed)\n # ====================================================\n\n # Use GPU if available\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n if torch.cuda.device_count() > 1: # Use Multi GPU if available\n darknet = nn.DataParallel(darknet)\n darknet.to(device) # Put the network on device\n if args.v > 0:\n print(next(darknet.parameters()).device)\n\n # TRAIN\n print(f'[LOG] TRAIN | Training set: {len(trainloader.dataset)}')\n print(f'[LOG] TRAIN | Validation set: {len(validloader.dataset)}')\n print(f'[LOG] TRAIN | Starting to train from epoch {start_epoch} iteration {start_iteration}')\n if start_epoch > args.ep:\n print(f'[ERR] TRAIN | Total epochs ({args.ep}) is less then current epoch ({start_epoch})')\n return \n\n for epoch in range(start_epoch, args.ep):\n print(f'[LOG] TRAIN | Starting Epoch #{epoch+1}')\n darknet.train() # set network to training mode\n tloss, vloss = [], []\n start = time.time()\n\n for batch_idx, (_, inputs, targets) in enumerate(trainloader):\n optimizer.zero_grad() # clear the grads from prev passes\n inputs, targets = inputs.to(device), targets.to(device) # Images, Labels\n\n outputs = darknet(inputs, targets, device) # Loss\n outputs['total'].backward() # Gradient calculations\n \n tloss.append(outputs['total'].item())\n optimizer.step()\n\n end = time.time()\n\n # Latest iteration!\n if args.v == 1:\n print(f'x: {outputs[\"x\"].item():.2f} y: {outputs[\"y\"].item():.2f} ')\n elif args.v == 2:\n print(f'x: {outputs[\"x\"].item():.2f} y: {outputs[\"y\"].item():.2f} ' \\\n f'w: {outputs[\"w\"].item():.2f} h: {outputs[\"h\"].item():.2f} ' \\\n f'cls: {outputs[\"cls\"].item():.2f} ' \\\n f'conf: {outputs[\"conf\"].item()}')\n\n if (batch_idx % 100) == 99:\n print(f'[LOG] TRAIN | Batch #{batch_idx+1}\\\n Loss: {np.mean(tloss)}\\\n Time: {end - start}s')\n start = time.time()\n\n # Save train loss for the epoch\n tlosses.append(np.mean(tloss))\n\n scheduler.step()\n\n # VALIDATION\n with torch.no_grad():\n for batch_idx, (_, inputs, targets) in enumerate(validloader):\n inputs, targets = inputs.to(device), targets.to(device)\n\n voutputs = darknet(inputs, targets)\n vloss.append(voutputs['total'].item())\n\n # Validation loss!\n print(f'[LOG] VALID | Epoch #{epoch+1} \\\n Loss: {np.mean(vloss)}')\n \n # Save valid loss for the epoch\n vlosses.append(np.mean(vloss))\n # ====================================================\n\n if (epoch % 10) == 9:\n save_checkpoint(f'save/checkpoints/', epoch+1, 0, {\n 'epoch': epoch+1,\n 'iteration': 0,\n 'state_dict': darknet.state_dict(),\n 'tlosses': tlosses,\n 'vlosses': vlosses,\n 'optimizer': optimizer,\n 'scheduler': scheduler\n })\n plot_losses(tlosses, vlosses, f'save/losses')\n\n save_checkpoint(f'save/checkpoints/', epoch+1, 0, {\n 'epoch': epoch+1,\n 'iteration': 0,\n 'state_dict': darknet.state_dict(),\n 'tlosses': tlosses,\n 'vlosses': vlosses,\n 'optimizer': optimizer,\n 'scheduler': scheduler\n })\n plot_losses(tlosses, vlosses, f'save/losses')\n\n" }, { "alpha_fraction": 0.5024620294570923, "alphanum_fraction": 0.5291341543197632, "avg_line_length": 43.11111068725586, "blob_id": "44bb8dbda9e1c553ba4e66407b6d465852dacd13", "content_id": "621bbc75fb565a963b798f95794f22642b905c5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4874, "license_type": "no_license", "max_line_length": 127, "num_lines": 108, "path": "/dataprep/kalman_tracker.py", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "import matplotlib.animation as animation\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom matplotlib import pyplot as plt\r\n\r\nclass KalmanTracker:\r\n\r\n def __init__(self, id_, s0=None, disable_rejection_check=False):\r\n # Filter-related parameters\r\n self.dt = 66.667e-3 # T_int of the radar TX\r\n # state transition matrix\r\n self.F = np.kron(np.eye(2), np.array([[1, self.dt], [0, 1]]))\r\n # # state-acceleration matrix\r\n self.G = np.array([0.5*(self.dt**2), self.dt]).reshape(2, 1)\r\n # # observation matrix\r\n self.H = np.array([[1, 0, 0, 0],\r\n [0, 0, 1, 0]])\r\n # measurement covariance matrix\r\n self.R = np.array([[0.5, 0], [0, 0.5]]) # [wagner2017radar]\r\n # initial state covariance\r\n self.P = 0.2*np.eye(4)\r\n # state noise variance\r\n self.sigma_a = 8 # [wagner2017radar]\r\n # state noise covariance\r\n self.Q = np.kron(np.eye(2), np.matmul(self.G, self.G.T)*self.sigma_a**2)\r\n self.n = self.F.shape[1]\r\n self.m = self.H.shape[1]\r\n # initial state\r\n self.s = np.zeros((self.n, 1)) if s0 is None else s0\r\n self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)\r\n self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)\r\n self.REJECT_THR = 4.605\r\n self.disable_rejection_check = disable_rejection_check\r\n ######################################################### \r\n # Tracker-related parameters\r\n self.misses_number = 0\r\n self.hits = 0\r\n self.id = id_\r\n self.box = np.array([])\r\n self.state_memory = []\r\n self.identity_label = 'UNK' # initialize as unknown cluster\r\n self.id_dict = {-1: 'UNK', 0: 'S1', 1: 'S2', 2:'S3', 3:'S4'}\r\n # self.id_dict = {-1: 'UNK', 0: 'JP', 1: 'FM', 2:'GP', 3:'RF'}\r\n\r\n def transform_obs(self, z):\r\n z_prime = np.array([z[0]*np.cos(z[1]), z[0]*np.sin(z[1])]).reshape(-1, 1)\r\n return z_prime\r\n\r\n def reject_obs(self, i, S):\r\n chi_squared = np.matmul(np.matmul(i.T, np.linalg.inv(S)), i)[0, 0]\r\n return chi_squared >= self.REJECT_THR\r\n\r\n def predict(self):\r\n # a_x = np.random.normal(0, self.sigma_a)\r\n # a_y = np.random.normal(0, self.sigma_a)\r\n self.s = np.matmul(self.F, self.s) \r\n # check that x has the correct shape\r\n assert self.s.shape == (self.n, 1)\r\n self.P = np.matmul(np.matmul(self.F, self.P), self.F.T) + self.Q\r\n self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)\r\n self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)\r\n return self.s, self.xy\r\n\r\n def update(self, z):\r\n z = self.transform_obs(z)\r\n # innovation\r\n y = z - np.matmul(self.H, self.s)\r\n S = np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R\r\n if (not self.reject_obs(y, S)) or self.disable_rejection_check:\r\n K = np.matmul(np.matmul(self.P, self.H.T), np.linalg.inv(S))\r\n self.s = self.s + np.matmul(K, y)\r\n assert self.s.shape == (self.n, 1)\r\n self.P = np.matmul(np.eye(self.n) - np.matmul(K, self.H), self.P)\r\n self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)\r\n self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)\r\n self.state_memory.append(self.xy)\r\n return self.s, self.xy\r\n else:\r\n self.state_memory.append(self.xy)\r\n return self.s, self.xy\r\n\r\n def get_S(self):\r\n return np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R\r\n \r\n @staticmethod\r\n def get_mahalanobis_distance(x, C):\r\n # returns Mahalanobis distance given the differece vector x and covariance C\r\n return np.matmul(np.matmul(x.T, np.linalg.inv(C)), x)[0, 0]\r\n \r\n @staticmethod\r\n def hungarian_assignment(score_matrix):\r\n # call the scipy implementation of Hungarian alg.\r\n det_idx, tr_idx = sp.optimize.linear_sum_assignment(score_matrix)\r\n unmatched, undetected = [], []\r\n for t in range(score_matrix.shape[1]):\r\n if t not in tr_idx:\r\n undetected.append(t)\r\n for d in range(score_matrix.shape[0]):\r\n if d not in det_idx:\r\n unmatched.append(d)\r\n matches = []\r\n for d, t in zip(det_idx, tr_idx):\r\n matches.append(np.array([d, t]).reshape(1, 2))\r\n if len(matches) == 0:\r\n matches = np.empty((0, 2), dtype=int)\r\n else:\r\n matches = np.concatenate(matches, axis=0)\r\n return matches, np.array(undetected), np.array(unmatched)\r\n\r\n" }, { "alpha_fraction": 0.6684647798538208, "alphanum_fraction": 0.7045980095863342, "avg_line_length": 40.71071243286133, "blob_id": "498eab948b2ae43cb00b5ea7fec584b435cd308e", "content_id": "ce235cb9b49f95b0df0aa86bed488bb1280269e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11797, "license_type": "no_license", "max_line_length": 220, "num_lines": 280, "path": "/README.md", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "YOLO-mMwave-Radar\n\n# README\n\n#### Usage\n\nHelp: `python . --help`\\\nData preprocessing: `python . dataprep --help`\\\ne.g `python . dataprep --pathin <in_dir> --pathout <out_dir> --chext --proc --truth`\\\nTraining: `python . train --help`\\\ne.g `python . predict --pathin testset3 --pathout test --ckpt 80.0 --nms 0.001 --obj 0.005 --video gif`\\\nPrediction: `python . predict --help`\\\ne.g `python . train --pathin trainset --datasplit 0.9 --ckpt 80.0 --ep 500`\\\n\n#### Some Sources\n\n[YOLOv3: An Incremental Improvement (paper)](https://arxiv.org/abs/1804.02767) \\\n[YOLOv3 PyTorch](https://github.com/ecr23xx/yolov3.pytorch/blob/master/src/layers.py) \\\n[YOLOv3 PyTorch (detection)](https://blog.paperspace.com/how-to-implement-a-yolo-object-detector-in-pytorch/) \\\n[PyTorch Network Tranining Tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) \\\n[YOLOv3 Tensorflow](https://github.com/qqwweee/keras-yolo3/blob/master/yolo3/model.py) \\\n[YOLOv3 Tensorflow (alternative)](https://towardsdatascience.com/dive-really-deep-into-yolo-v3-a-beginners-guide-9e3d2666280e)\\\n\n#### FOLDER STRUCTURE\n```\n.\n├── ...\n├── cfg # DarkNet config files\n├── dataprep # LIBRARY: Preprocessing\n├── dataset # DATASETS\n│ ├── <set_name> `dataprep --pathin <set_name>\n│ │ ├── chext # Images after channel extraction `dataprep --chext`\n│ │ ├── final # Network-ready images `dataprep --truth`\n│ │ └── proc # Images after `dataprep --proc`\n├── raw # Raw dataset files (.h5)\n├── results # PREDICTIONS \n│ ├── <set_name> `predict --pathin <set_name>`\n│ │ └── pred # Images with predicted and true bbs\n├── save\n│ └── checkpoints # Model checkpoints\n├── yolo # LIBRARY: Object detection (train and predict)\n└── ...\n```\n\n#### Documentation\n\n- Network output params: (`Batch x No of BBs x BB attributes`)\n - `Batch Size`: number of images fed as a batch (e.g 8)\n - `No of BBs`: number of bounding boxes found for each image with full network config (e.g 10647 (usually))\n - `BB attributes`: (e.g 6) `bb_dims` (4) + `obj_score` (1) + `class_scores` (e.g 1 (number of objects)) \n\n\n## TODO & NOTES\n\n- Util (`yolo.util`)\n\t- mAP (`mean_average_precision()`)\n - [mAP with NMS](https://towardsdatascience.com/implementation-of-mean-average-precision-map-with-non-maximum-suppression-f9311eb92522) \\\n - [mAP](https://towardsdatascience.com/evaluating-performance-of-an-object-detection-model-137a349c517b) \\\n\t- mAP over epoch plot (`plot_mAP()`)\n- Hyperparameters check\n - [RayTune](https://pytorch.org/tutorials/beginner/hyperparameter_tuning_tutorial.html)\n\n#### Changes Required (temporal info)\n\n- Data loader (`yolo.dataset`)\n\t- Take sequence of images (instead of single image)\n\n- Model (`yolo.darknet`)\n\t- Add GRU layer\n\t- Combine GRU output with current timestep features\n\t- Add the new loss parameter\n\n- Network (`cfg\\yolov3micro.cfg`)\n - Add/modify GRU layer\n - Add/modify feedforward layer\n - Combine layer (GRU output with current timestep features)\n\t\n\n## ChangeLog\n\n25.05.2021 - EB - Version 1.3.1\n- Working on `plot_precision_recall()`\n- Implemented `correctness()` for TP/FP/FN calculations\n- Implemented `precision_recall()` for cumulative TP and FP, precision and recall calculations\n\n08.04.2021 - EB - Version 1.2\n- Images to avi\n- Fixed multi bb ground truth\n- Fixed folder structure to final version\n\n07.04.2021 - EB - Version 1.1\n- Images to gif\n - [x] Animating results\n- [x] Small truth bb issue may be existing (on w, h translation (matplotlib to PIL?))\n\n05.04.2021 - EB - Finalized dataprep\n- Fixed shuffling in `yolo.dataset`\n- Default learning rate is reduced to 1e-5 from 1e-4\n- `dataprep` is stable\n - `python . dataprep --help`\n\n31.03.2021 - EB - Version 1.0\n- Added `__main__`\n - Check `python . --help`\n - Example train run: `python . train --lr 0.00001 --ep10`\n - Example predict run: `python . predict --cfg test --pathout test/results --ckpt 3.0 --obj 0.2 --nms 0.5`\n - Example dataprep run: `python . data`\n- Renamed `custom.cfg` as `yolov3micro.cfg`\n- Removed class score (`cls`) from loss calculation as we have only 1 class\n- Changed objectiveness (`obj`) loss calculation from MSELoss to BCELoss\n - [x] ~~Objectiveness score loss calculation original uses binary cross entropy, we are using mean squared~~\n- Fixed bb calculation/scale issue\n - [x] ~~Total loss may be wrong (some inputs were skipping due to empty labels)~~\n- [x] On validation loss, keep history and add graphs\n - `yolo.util.plot_losses()`\n- Added some random image manipulations/transformations for training input\n - [x] Check the torchvision.transforms functionality\n- [x] Remove empty labelled data completely\n- Moved and renamed `dataprep.py` to `./dataprep` as `truth.py`\n- Fixed functionality of batch prediction\n\n25.03.2021 - EB - First version\n- Reintroducing class and class loss\n- `yolo.getDataLoaders()`: dataset allocation for train/val or single set\n - with `random_seed` parameter we can get the same shuffle everytime (useful for testing)\n- Validation is now carried out right after each epoch\n- [x] Output pipeline\n- [x] Apply Non-Max Suppression\n- [x] Detection (a working version)\n\n23.03.2021 - EB - custom network\n- Changed `lr` of `optim.SGD()` to 0.0001\n- [x] Reduce the network\n - Reduced number of layers from 106 to 52 (best we can do without reducing the `YOLO` layers)\n - Computation time is reduced by ~1/3\n- [x] Save the model and get weights for detection\n - `yolo.util.save_checkpoint()`, `yolo.util.load_checkpoint()` (for training)\n - `yolo.darknet.load_weights()` (for detections, still to be tested)\n- [x] Check if network output bounding box attributes are relative to the center of the prediction\n\n18.03.2021 - EB - Learning\n- Filtering the empty labels with `collate()` at `MmwaveDataset`\n- Removed 'class' score attribute from everywhere\n\n17.03.2021 - EB - Filtering empty labels\n- Added new `./checkpoints` folder for saving network training status\n- Loss is returning 'nan' after 2nd or 3rd iteration\n\n16.03.2021 - EB - Training\n- Label bounding box is fine now\n- Label class format should be fixed\n- Non-training seems to be working\n- [x] `YOLOLoss()`: Loss function\n- [x] `NMSLayer()`: Non-max suppression\n\n05.03.2021 - EB \n- Working `torch.autograd` and `loss.backward()`\n\n25.02.2021 - EB\n- [x] Network training (a working version)\n- [x] Input pipeline\n- Didn't remove classes after all. Now there is only 1 class (person)\n- Need some work on the network to raise the performance\n\n22.02.2021 - EB\n- Input doesn't match the parameter size for some reason\n- Rest of the input pipelining is done!\n\n16.02.2021 - EB\n- `yolotrain.py` not quite working at the moment, almost there\n- bb are a part of the filename now\n- Dataset shuffling for train and test sets\n\n15.02.2021 - EB\n- Pre-proccessing should be done.\n- Package `dataprep` is added. `dataprep.py` is the main data preparation file now.\n\n15.01.2021 - EB\n- Working on `train.py` which is the training module of the network.\\\n Added `detect.py` file which is used for input and output pipelining (taking input images, creating output images with bbs). Check `arg_parse()` function for input commands. Usage:\n`python detect.py --images dog-cycle-car.png --det det`\n\n13.01.2021 - EB\n- Added `Supporter` class in \"dataprep/utils.py\". Bounding box calculation for ground truth data is `label2bb()` and a function for plotting with/without BB is `plotRaw()`. Didn't compile the file, it should work though.\n\n\n07.04.2021 - EB - Version 1.1\n- Predictions to gif\n - [x] Animating results\n- [x] Small truth bb issue may be existing (on w, h translation (matplotlib to PIL?))\n\n05.04.2021 - EB - Finalized dataprep\n- Fixed shuffling in `yolo.dataset`\n- Default learning rate is reduced to 1e-5 from 1e-4\n- `dataprep` is stable\n - `python . dataprep --help`\n\n31.03.2021 - EB - Version 1.0\n- Added `__main__`\n - Check `python . --help`\n - Example train run: `python . train --lr 0.00001 --ep10`\n - Example predict run: `python . predict --cfg test --pathout test/results --ckpt 3.0 --obj 0.2 --nms 0.5`\n - Example dataprep run: `python . data`\n- Renamed `custom.cfg` as `yolov3micro.cfg`\n- Removed class score (`cls`) from loss calculation as we have only 1 class\n- Changed objectiveness (`obj`) loss calculation from MSELoss to BCELoss\n - [x] ~~Objectiveness score loss calculation original uses binary cross entropy, we are using mean squared~~\n- Fixed bb calculation/scale issue\n - [x] ~~Total loss may be wrong (some inputs were skipping due to empty labels)~~\n- [x] On validation loss, keep history and add graphs\n - `yolo.util.plot_losses()`\n- Added some random image manipulations/transformations for training input\n - [x] Check the torchvision.transforms functionality\n- [x] Remove empty labelled data completely\n- Moved and renamed `dataprep.py` to `./dataprep` as `truth.py`\n- Fixed functionality of batch prediction\n\n24.03.2021 - EB\n- Reintroducing class and class loss\n- `yolo.getDataLoaders()`: dataset allocation for train/val or single set\n - with `random_seed` parameter we can get the same shuffle everytime (useful for testing)\n- Validation is now carried out right after each epoch\n- [x] Output pipeline\n- [x] Apply Non-Max Suppression\n- [x] Detection (a working version)\n\n21.03.2021 - EB\n- Changed `lr` of `optim.SGD()` to 0.0001\n- [x] Reduce the network\n - Reduced number of layers from 106 to 52 (best we can do without reducing the `YOLO` layers)\n - Computation time is reduced by ~1/3\n- [x] Save the model and get weights for detection\n - `yolo.util.save_checkpoint()`, `yolo.util.load_checkpoint()` (for training)\n - `yolo.darknet.load_weights()` (for detections, still to be tested)\n- [x] Check if network output bounding box attributes are relative to the center of the prediction\n\n18.03.2021 - EB\n- Filtering the empty labels with `collate()` at `MmwaveDataset`\n- Removed 'class' score attribute from everywhere\n\n17.03.2021 - EB\n- Added new `./checkpoints` folder for saving network training status\n- Loss is returning 'nan' after 2nd or 3rd iteration\n\n16.03.2021 - EB\n- Label bounding box is fine now\n- Label class format should be fixed\n- Non-training seems to be working\n- [x] `YOLOLoss()`: Loss function\n- [x] `NMSLayer()`: Non-max suppression\n\n05.03.2021 - EB\n- Working `torch.autograd` and `loss.backward()`\n\n25.02.2021 - EB\n- [x] Network training (a working version)\n- [x] Input pipeline\n- Didn't remove classes after all. Now there is only 1 class (person)\n- Need some work on the network to raise the performance\n\n22.02.2021 - EB\n- Input doesn't match the parameter size for some reason\n- Rest of the input pipelining is done!\n\n16.02.2021 - EB\n- `yolotrain.py` not quite working at the moment, almost there\n- bb are a part of the filename now\n- Dataset shuffling for train and test sets\n\n15.02.2021 - EB\n- Pre-proccessing should be done.\n- Package `dataprep` is added. `dataprep.py` is the main data preparation file now.\n\n15.01.2021 - EB\n- Working on `train.py` which is the training module of the network.\\\n Added `detect.py` file which is used for input and output pipelining (taking input images, creating output images with bbs). Check `arg_parse()` function for input commands. Usage:\n`python detect.py --images dog-cycle-car.png --det det`\n\n13.01.2021 - EB\n- Added `Supporter` class in \"dataprep/utils.py\". Bounding box calculation for ground truth data is `label2bb()` and a function for plotting with/without BB is `plotRaw()`. Didn't compile the file, it should work though.\n" }, { "alpha_fraction": 0.7525083422660828, "alphanum_fraction": 0.7591972947120667, "avg_line_length": 58.20000076293945, "blob_id": "4df5f065dda8cc73ea702a1368d8f07c13a57677", "content_id": "dae93813592e7c87a8137fc0cee5ef0082289267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 299, "license_type": "no_license", "max_line_length": 94, "num_lines": 5, "path": "/dataprep/readme.txt", "repo_name": "enverbashirov/YOLOv3-mMwave-Radar", "src_encoding": "UTF-8", "text": "\nUsage: \n1) Use processing.py to perform FFT and denoising on the raw radar data that you \nhave extracted using 'channel_extraction.py' (this file is in the shared folder with Alberto).\n2) Then call \"python main.py 'path to RDA folder'\" to perform clustering and \ntracking on a specific RDA map.\n\n\n" } ]
16
michelequinto/xUDP
https://github.com/michelequinto/xUDP
7aa76e75c3a980bf0a22f0ad5a9b2560a5269071
1572efea91a89e900c01020be236946af5f33372
7aa251323b556b4a6be3fcae99856d0dbce590a5
refs/heads/master
2020-12-31T04:17:29.036024
2018-03-16T15:32:06
2018-03-16T15:32:06
63,637,021
1
2
null
2016-07-18T21:07:29
2016-07-18T21:07:30
2014-10-03T16:21:56
VHDL
[ { "alpha_fraction": 0.43287035822868347, "alphanum_fraction": 0.44907405972480774, "avg_line_length": 39.70000076293945, "blob_id": "bc1158c76f883b73a0351f3553a3bf64968d816d", "content_id": "969b8326ed8515c8af807497a85f4a3f4f10d836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 107, "num_lines": 10, "path": "/syn/xilinx/src/Manifest.py", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "files = [ \"xaui_init.vhd\",\n \"mdio/mdio.v\",\n \"mdio/mdio_ctrl.vhd\",\n \"vsc8486_init.vhd\",\n\t \"clk_wiz_v3_3_0.vhd\",\n \"xUDP_top.vhd\",\n __import__('os').path.relpath( __import__('os').environ.get('XILINX') ) + \"/verilog/src/glbl.v\" ]\n\nmodules = { \"local\" : [ \"../../../rtl/vhdl/ipcores/xilinx/xaui\"]}\n# \"../../../rtl/verilog/ipcores/xge_mac\" ]}\n \n" }, { "alpha_fraction": 0.5617021322250366, "alphanum_fraction": 0.5617021322250366, "avg_line_length": 40.47058868408203, "blob_id": "1545c84bc649555e07517958345bed65ce090ef4", "content_id": "d6ff7deb6f1da8404fa84f8505ad7f266613bd96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 84, "num_lines": 17, "path": "/bench/sv/FullDesign/tests/genericTest/Manifest.py", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "action = \"simulation\"\ninclude_dirs = [ \"../../environment\", \"../../sequences/\"]\n\nvlog_opt = '+incdir+' + \\\n__import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv+' + \\\n__import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv/mvc_base+' + \\\n__import__('os').environ.get('QUESTA_MVC_HOME') + '/include+' + \\\n__import__('os').environ.get('QUESTA_MVC_HOME') + '/examples/ethernet/common+' + \\\n__import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv/ethernet/ '\n\ntop_module = \"top\"\nsim_tool = \"modelsim\"\n\nfiles = [\"src/genericTest.sv\"]\n\nmodules = { \"local\" : [ \"../../../../../syn/xilinx/src\",\n \"../../../../../rtl/verilog/ipcores/xge_mac/\" ] }\n" }, { "alpha_fraction": 0.7191079258918762, "alphanum_fraction": 0.7191079258918762, "avg_line_length": 60.407405853271484, "blob_id": "f0b4d70a1d1f6b74fbc92100654adc48bfa1f1e7", "content_id": "da8dcc0d89cb181310b36ee499c2bcc2a21205f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1659, "license_type": "no_license", "max_line_length": 97, "num_lines": 27, "path": "/rtl/verilog/ipcores/xge_mac/.svn/pristine/f0/f0b4d70a1d1f6b74fbc92100654adc48bfa1f1e7.svn-base", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "\nemacs -batch verilog/sync_clk_wb.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/sync_clk_xgmii_tx.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/sync_clk_core.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch verilog/wishbone_if.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/fault_sm.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch verilog/tx_stats_fifo.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/rx_stats_fifo.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/stats_sm.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/stats.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch verilog/rx_dequeue.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/rx_enqueue.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/rx_data_fifo.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/rx_hold_fifo.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch verilog/tx_dequeue.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/tx_enqueue.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/tx_data_fifo.v -l ../custom.el -f verilog-auto -f save-buffer\nemacs -batch verilog/tx_hold_fifo.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch verilog/xge_mac.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch examples/test_chip.v -l ../custom.el -f verilog-auto -f save-buffer\n\nemacs -batch ../tbench/verilog/tb_xge_mac.v -l ../../rtl/custom.el -f verilog-auto -f save-buffer\n" }, { "alpha_fraction": 0.5204081535339355, "alphanum_fraction": 0.6061224341392517, "avg_line_length": 60.25, "blob_id": "45ed436e8f580bb40325ad97ec679cbad5c73ea2", "content_id": "3374a78a8830c12d328cfb3f148f5f3864fa929e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 73, "num_lines": 8, "path": "/rtl/vhdl/ipcores/xilinx/xaui/Manifest.py", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "files = [ \"./xaui_v10_4.vhd\",\n \"./xaui_v10_4/simulation/demo_tb.vhd\",\n \"./xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper_gtx.vhd\",\n \"./xaui_v10_4/example_design/xaui_v10_4_example_design.vhd\",\n \"./xaui_v10_4/example_design/xaui_v10_4_tx_sync.vhd\",\n \"./xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper.vhd\",\n \"./xaui_v10_4/example_design/xaui_v10_4_block.vhd\",\n \"./xaui_v10_4/example_design/xaui_v10_4_chanbond_monitor.vhd\" ]\n" }, { "alpha_fraction": 0.4105839431285858, "alphanum_fraction": 0.4197080433368683, "avg_line_length": 26.399999618530273, "blob_id": "ae6268ba8f2b0ca712582483dbcf6aada5fd5c17", "content_id": "88fafbd3d15a8700d9e82439629b02658edaf17c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 36, "num_lines": 20, "path": "/rtl/vhdl/Manifest.py", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "files = [ \"utilities.vhd\",\n \"arp_types.vhd\",\n \"axi_types.vhd\",\n \"ipv4_types.vhd\",\n \"xUDP_Common_pkg.vhdl\",\n \"axi_tx_crossbar.vhd\",\n \"arp_REQ.vhd\",\n \"arp_RX.vhd\",\n \"arp_STORE_br.vhd\",\n \"arp_SYNC.vhd\",\n \"arp_TX.vhd\",\n \"arp.vhd\",\n \"IPv4_RX.vhd\",\n \"IPv4_TX.vhd\",\n \"IPv4.vhd\",\n \"IPv4_Complete_nomac.vhd\",\n \"UDP_RX.vhd\",\n \"UDP_TX.vhd\",\n \"UDP_Complete_nomac.vhd\",\n \"xge_mac_axi.vhd\"]\n" }, { "alpha_fraction": 0.5118343234062195, "alphanum_fraction": 0.517011821269989, "avg_line_length": 35.513511657714844, "blob_id": "a7c0c3f6fc3c8f8d7f6a5c68065a957906c16d9f", "content_id": "cc50393a6977fa8dc8be5eb04119c3a53b49f675", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 130, "num_lines": 37, "path": "/rtl/verilog/ipcores/xge_mac/Manifest.py", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "action = \"simulation\"\n\ninclude_dirs = [\"./include\"]\n\n#vlog_opt = '+incdir+' + \\\n#\"../../../../../rtl/verilog/ipcores/xge_mac/include\"\n#__import__('os').path.dirname(__import__('os').path.abspath(__import__('inspect').getfile(__import__('inspect').currentframe())))\n#os.path.abspath(__import__('inspect').getfile(inspect.currentframe())))\n\n\nfiles = [ \"./include/utils.v\",\n \"./include/CRC32_D64.v\",\n \"./include/CRC32_D8.v\", \n \"./verilog/tx_dequeue.v\",\n \"./verilog/sync_clk_core.v\",\n \"./verilog/generic_fifo.v\",\n \"./verilog/stats.v\",\n \"./verilog/rx_hold_fifo.v\",\n \"./verilog/tx_enqueue.v\",\n \"./verilog/rx_dequeue.v\",\n \"./verilog/sync_clk_wb.v\",\n \"./verilog/tx_data_fifo.v\",\n \"./verilog/fault_sm.v\",\n \"./verilog/generic_mem_small.v\",\n \"./verilog/wishbone_if.v\",\n \"./verilog/generic_mem_medium.v\",\n \"./verilog/meta_sync_single.v\",\n \"./verilog/stats_sm.v\",\n \"./verilog/rx_stats_fifo.v\",\n \"./verilog/tx_hold_fifo.v\",\n \"./verilog/rx_data_fifo.v\",\n \"./verilog/xge_mac.v\",\n \"./verilog/rx_enqueue.v\",\n \"./verilog/generic_fifo_ctrl.v\",\n \"./verilog/sync_clk_xgmii_tx.v\",\n \"./verilog/tx_stats_fifo.v\",\n \"./verilog/meta_sync.v\" ]\n\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 7.642857074737549, "blob_id": "8d388bdb02599d72b04d65e0f7b2efceeee248ba", "content_id": "d65bf71a62dca978e96f0e841bd9b8313194c149", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 120, "license_type": "no_license", "max_line_length": 32, "num_lines": 14, "path": "/README.md", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "xUDP\n====\n\n10G UDP stack\n\n\nDirectory Layout\n----------------\n\nAs per opencores recommendation:\n- doc\n- rtl\n- bench\n- syn" }, { "alpha_fraction": 0.6034720540046692, "alphanum_fraction": 0.6209385991096497, "avg_line_length": 47.91947937011719, "blob_id": "0d7da79ff553c46910ffb986870551945565403d", "content_id": "3dee7bfaf3b2981eeb4ab591dcfdd06b079b4a86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 18836, "license_type": "no_license", "max_line_length": 388, "num_lines": 385, "path": "/bench/sv/FullDesign/tests/RandomPacketsTest/Makefile", "repo_name": "michelequinto/xUDP", "src_encoding": "UTF-8", "text": "########################################\n# This file was generated by hdlmake #\n# http://ohwr.org/projects/hdl-make/ #\n########################################\n\n## variables #############################\nPWD := $(shell pwd)\n\nMODELSIM_INI_PATH := /opt/questa_sv_afv_10.4/questasim/bin/..\n\nVCOM_FLAGS := -quiet -modelsimini modelsim.ini\nVSIM_FLAGS :=\nVLOG_FLAGS := -quiet -modelsimini modelsim.ini \nVERILOG_SRC := ../../../../../rtl/verilog/ipcores/xge_mac/verilog/stats.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_hold_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/sync_clk_core.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_enqueue.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_data_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/sync_clk_wb.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/CRC32_D8.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_enqueue.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/meta_sync.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/sync_clk_xgmii_tx.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_mem_small.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_data_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/utils.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_mem_medium.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_fifo_ctrl.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_stats_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/wishbone_if.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/fault_sm.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/xge_mac.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_stats_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/stats_sm.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/meta_sync_single.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_dequeue.v \\\nsrc/RandomPacketsTest.sv \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_dequeue.v \\\n../../../../../../../../../opt/Xilinx/14.7/ISE_DS/ISE/verilog/src/glbl.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_hold_fifo.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/CRC32_D64.v \\\n\nVERILOG_OBJ := work/stats/.stats_v \\\nwork/rx_hold_fifo/.rx_hold_fifo_v \\\nwork/sync_clk_core/.sync_clk_core_v \\\nwork/tx_enqueue/.tx_enqueue_v \\\nwork/rx_data_fifo/.rx_data_fifo_v \\\nwork/sync_clk_wb/.sync_clk_wb_v \\\nwork/CRC32_D8/.CRC32_D8_v \\\nwork/rx_enqueue/.rx_enqueue_v \\\nwork/meta_sync/.meta_sync_v \\\nwork/sync_clk_xgmii_tx/.sync_clk_xgmii_tx_v \\\nwork/generic_mem_small/.generic_mem_small_v \\\nwork/tx_data_fifo/.tx_data_fifo_v \\\nwork/utils/.utils_v \\\nwork/generic_mem_medium/.generic_mem_medium_v \\\nwork/generic_fifo_ctrl/.generic_fifo_ctrl_v \\\nwork/rx_stats_fifo/.rx_stats_fifo_v \\\nwork/generic_fifo/.generic_fifo_v \\\nwork/wishbone_if/.wishbone_if_v \\\nwork/fault_sm/.fault_sm_v \\\nwork/xge_mac/.xge_mac_v \\\nwork/tx_stats_fifo/.tx_stats_fifo_v \\\nwork/stats_sm/.stats_sm_v \\\nwork/meta_sync_single/.meta_sync_single_v \\\nwork/rx_dequeue/.rx_dequeue_v \\\nwork/RandomPacketsTest/.RandomPacketsTest_sv \\\nwork/tx_dequeue/.tx_dequeue_v \\\nwork/glbl/.glbl_v \\\nwork/tx_hold_fifo/.tx_hold_fifo_v \\\nwork/CRC32_D64/.CRC32_D64_v \\\n\nVHDL_SRC := ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_block.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4.vhd \\\n../../../../../syn/xilinx/src/xaui_init.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper_gtx.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/simulation/demo_tb.vhd \\\n../../../../../syn/xilinx/src/xUDP_top.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_example_design.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_chanbond_monitor.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_tx_sync.vhd \\\n../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper.vhd \\\n\nVHDL_OBJ := work/xaui_v10_4_block/.xaui_v10_4_block_vhd \\\nwork/xaui_v10_4/.xaui_v10_4_vhd \\\nwork/xaui_init/.xaui_init_vhd \\\nwork/xaui_v10_4_gtx_wrapper_gtx/.xaui_v10_4_gtx_wrapper_gtx_vhd \\\nwork/demo_tb/.demo_tb_vhd \\\nwork/xUDP_top/.xUDP_top_vhd \\\nwork/xaui_v10_4_example_design/.xaui_v10_4_example_design_vhd \\\nwork/xaui_v10_4_chanbond_monitor/.xaui_v10_4_chanbond_monitor_vhd \\\nwork/xaui_v10_4_tx_sync/.xaui_v10_4_tx_sync_vhd \\\nwork/xaui_v10_4_gtx_wrapper/.xaui_v10_4_gtx_wrapper_vhd \\\n\nLIBS := work\nLIB_IND := work/.work\n## rules #################################\nsim: sim_pre_cmd modelsim.ini $(LIB_IND) $(VERILOG_OBJ) $(VHDL_OBJ)\n$(VERILOG_OBJ) : modelsim.ini\n$(VHDL_OBJ): $(LIB_IND) modelsim.ini\n\nsim_pre_cmd:\n\t\t\n\nsim_post_cmd: sim\n\t\t\n\nmodelsim.ini: $(MODELSIM_INI_PATH)/modelsim.ini\n\t\tcp $< . 2>&1\nclean:\n\t\trm -rf ./modelsim.ini $(LIBS) transcript *.vcd *.wlf\n.PHONY: clean sim_pre_cmd sim_post_cmd\n\nwork/.work:\n\t(vlib work && vmap -modelsimini modelsim.ini work && touch work/.work )|| rm -rf work \n\nwork/stats/.stats_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/stats.v \\\nwork/rx_stats_fifo/.rx_stats_fifo_v \\\nwork/tx_stats_fifo/.tx_stats_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/stats_sm/.stats_sm_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/rx_hold_fifo/.rx_hold_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_hold_fifo.v \\\nwork/generic_fifo/.generic_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/sync_clk_core/.sync_clk_core_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/sync_clk_core.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/tx_enqueue/.tx_enqueue_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_enqueue.v \\\nwork/CRC32_D8/.CRC32_D8_v \\\nwork/utils/.utils_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/CRC32_D64/.CRC32_D64_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/rx_data_fifo/.rx_data_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_data_fifo.v \\\nwork/generic_fifo/.generic_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/sync_clk_wb/.sync_clk_wb_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/sync_clk_wb.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/meta_sync/.meta_sync_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/CRC32_D8/.CRC32_D8_v: ../../../../../rtl/verilog/ipcores/xge_mac/include/CRC32_D8.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/include $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/rx_enqueue/.rx_enqueue_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_enqueue.v \\\nwork/CRC32_D8/.CRC32_D8_v \\\nwork/utils/.utils_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/CRC32_D64/.CRC32_D64_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/meta_sync/.meta_sync_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/meta_sync.v \\\nwork/meta_sync_single/.meta_sync_single_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/sync_clk_xgmii_tx/.sync_clk_xgmii_tx_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/sync_clk_xgmii_tx.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/meta_sync/.meta_sync_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/generic_mem_small/.generic_mem_small_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_mem_small.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/tx_data_fifo/.tx_data_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_data_fifo.v \\\nwork/generic_fifo/.generic_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/utils/.utils_v: ../../../../../rtl/verilog/ipcores/xge_mac/include/utils.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/include $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/generic_mem_medium/.generic_mem_medium_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_mem_medium.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/generic_fifo_ctrl/.generic_fifo_ctrl_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_fifo_ctrl.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/rx_stats_fifo/.rx_stats_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_stats_fifo.v \\\nwork/generic_fifo/.generic_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/generic_fifo/.generic_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/generic_fifo.v \\\nwork/generic_fifo_ctrl/.generic_fifo_ctrl_v \\\nwork/generic_mem_small/.generic_mem_small_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/generic_mem_medium/.generic_mem_medium_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/wishbone_if/.wishbone_if_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/wishbone_if.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/fault_sm/.fault_sm_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/fault_sm.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/xge_mac/.xge_mac_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/xge_mac.v \\\nwork/stats/.stats_v \\\nwork/fault_sm/.fault_sm_v \\\nwork/tx_dequeue/.tx_dequeue_v \\\nwork/rx_hold_fifo/.rx_hold_fifo_v \\\nwork/tx_enqueue/.tx_enqueue_v \\\nwork/rx_data_fifo/.rx_data_fifo_v \\\nwork/sync_clk_wb/.sync_clk_wb_v \\\nwork/rx_dequeue/.rx_dequeue_v \\\nwork/rx_enqueue/.rx_enqueue_v \\\nwork/tx_hold_fifo/.tx_hold_fifo_v \\\nwork/sync_clk_xgmii_tx/.sync_clk_xgmii_tx_v \\\nwork/wishbone_if/.wishbone_if_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/tx_data_fifo/.tx_data_fifo_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/tx_stats_fifo/.tx_stats_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_stats_fifo.v \\\nwork/generic_fifo/.generic_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/stats_sm/.stats_sm_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/stats_sm.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/meta_sync_single/.meta_sync_single_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/meta_sync_single.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/rx_dequeue/.rx_dequeue_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/rx_dequeue.v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/RandomPacketsTest/.RandomPacketsTest_sv: src/RandomPacketsTest.sv \\\n../../environment/xUDP_pkg.sv \\\n../../environment/clk_reset.sv \\\n../../environment/xUDP_config.sv \\\n../genericTest/src/genericTest.sv \\\n../genericTest/src/FullTest_pkg.sv \\\n../../environment/xUDP_env.sv \\\n../../environment/top.sv\n\t\tvlog -work work $(VLOG_FLAGS) -sv +incdir+../../environment+../../sequences+../genericTest/src+src +incdir+/opt/questa_verification_ip_10.3c/questa_mvc_src/sv+/opt/questa_verification_ip_10.3c/questa_mvc_src/sv/mvc_base+/opt/questa_verification_ip_10.3c/include+/opt/questa_verification_ip_10.3c/examples/ethernet/common+/opt/questa_verification_ip_10.3c/questa_mvc_src/sv/ethernet/ $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/tx_dequeue/.tx_dequeue_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_dequeue.v \\\nwork/CRC32_D8/.CRC32_D8_v \\\nwork/utils/.utils_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v \\\nwork/CRC32_D64/.CRC32_D64_v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/glbl/.glbl_v: ../../../../../../../../../opt/Xilinx/14.7/ISE_DS/ISE/verilog/src/glbl.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../../../../../opt/Xilinx/14.7/ISE_DS/ISE/verilog/src $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/tx_hold_fifo/.tx_hold_fifo_v: ../../../../../rtl/verilog/ipcores/xge_mac/verilog/tx_hold_fifo.v \\\nwork/generic_fifo/.generic_fifo_v \\\n../../../../../rtl/verilog/ipcores/xge_mac/include/defines.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/verilog $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\nwork/CRC32_D64/.CRC32_D64_v: ../../../../../rtl/verilog/ipcores/xge_mac/include/CRC32_D64.v\n\t\tvlog -work work $(VLOG_FLAGS) +incdir+../../../../../rtl/verilog/ipcores/xge_mac/include+../../../../../rtl/verilog/ipcores/xge_mac/include $<\n\t\t@mkdir -p $(dir $@) && touch $@ \n\n\n\nwork/xaui_v10_4_block/.xaui_v10_4_block_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_block.vhd \\\nwork/xaui_v10_4/.xaui_v10_4_vhd \\\nwork/xaui_v10_4_chanbond_monitor/.xaui_v10_4_chanbond_monitor_vhd \\\nwork/xaui_v10_4_tx_sync/.xaui_v10_4_tx_sync_vhd \\\nwork/xaui_v10_4_gtx_wrapper/.xaui_v10_4_gtx_wrapper_vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_v10_4/.xaui_v10_4_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4.vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_init/.xaui_init_vhd: ../../../../../syn/xilinx/src/xaui_init.vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_v10_4_gtx_wrapper_gtx/.xaui_v10_4_gtx_wrapper_gtx_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper_gtx.vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/demo_tb/.demo_tb_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/simulation/demo_tb.vhd \\\nwork/xaui_v10_4_example_design/.xaui_v10_4_example_design_vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xUDP_top/.xUDP_top_vhd: ../../../../../syn/xilinx/src/xUDP_top.vhd \\\nwork/xaui_v10_4_block/.xaui_v10_4_block_vhd \\\nwork/xge_mac/.xge_mac_v\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_v10_4_example_design/.xaui_v10_4_example_design_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_example_design.vhd \\\nwork/xaui_v10_4_block/.xaui_v10_4_block_vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_v10_4_chanbond_monitor/.xaui_v10_4_chanbond_monitor_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_chanbond_monitor.vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_v10_4_tx_sync/.xaui_v10_4_tx_sync_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_tx_sync.vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\nwork/xaui_v10_4_gtx_wrapper/.xaui_v10_4_gtx_wrapper_vhd: ../../../../../rtl/vhdl/ipcores/xilinx/xaui/xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper.vhd \\\nwork/xaui_v10_4_gtx_wrapper_gtx/.xaui_v10_4_gtx_wrapper_gtx_vhd\n\t\tvcom $(VCOM_FLAGS) -work work $< \n\t\t@mkdir -p $(dir $@) && touch $@\n\n\n" } ]
8