repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
unknown
revision_date
unknown
committer_date
unknown
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
unknown
gha_updated_at
unknown
gha_pushed_at
unknown
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
RoboBrainCode/Backend
https://github.com/RoboBrainCode/Backend
ca722b060014bc068464ed80c5b9a1bf8cf205aa
534c1c157fbf2a3ddad054247464c0e814edd247
121cdf2920efb0f23ae81d4f00b726dd6a406488
refs/heads/master
"2021-03-27T19:21:34.499178"
"2014-12-03T21:53:33"
"2014-12-03T21:53:33"
23,906,767
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.639837384223938, "alphanum_fraction": 0.6476964950561523, "avg_line_length": 34.642513275146484, "blob_id": "07e4dd385f8960c05cbd593335cba1cd93b743ab", "content_id": "75d4bfbd25fca5fe4e7aa71cb604bde49209d6d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7380, "license_type": "no_license", "max_line_length": 120, "num_lines": 207, "path": "/feed/views.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom feed.models import BrainFeeds, ViewerFeed, GraphFeedback\nimport json\nimport numpy as np\nfrom django.core import serializers\nimport dateutil.parser\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.db.transaction import commit_on_success\n\n# This is a temporary function. It will be later moved to learning_plugins\ndef save_graph_feedback(request):\n\n _id_node = (request.GET.get('id','-1')) # default k=10\n _feedback_type = request.GET.get('feedback_type','')\n _node_handle = request.GET.get('node_handle','')\n _action_type = request.GET.get('action_type','')\n graph_feedback = GraphFeedback(\n id_node = _id_node,\n feedback_type = _feedback_type,\n node_handle = _node_handle,\n action_type = _action_type\n )\n graph_feedback.save()\n\n return HttpResponse(json.dumps(graph_feedback.to_json()), content_type=\"application/json\")\n \n\n# Returns k most recent feeds from BrainFeed table.\ndef return_top_k_feeds(request):\n # Number of feeds required\n top_k = int(request.GET.get('k','10')) # default k=10\n\n max_len = ViewerFeed.objects.count()\n upper_limit = min(max_len, top_k)\n\n feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[:upper_limit])\n\n brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids)\n\n # Reordering brainfeeds from the DB in order of feed_ids in O(n)\n # s.t. feed_ids == [bf.id for bf in brainfeeds]\n feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))}\n brainfeeds = [0] * len(feed_ids)\n for bf in list(brainfeeds_db):\n brainfeeds[feed_map_order[bf.id]] = bf\n\n # Deleting entries from brainfeeds where brainfeeds == 0\n delete_entries = []\n for bf in brainfeeds:\n if bf == 0:\n delete_entries.append(0)\n\n for bf in delete_entries:\n brainfeeds.remove(bf)\n\n update_scores_top_k(brainfeeds)\n json_feeds = [feed.to_json() for feed in brainfeeds]\n\n return HttpResponse(json.dumps(json_feeds), content_type=\"application/json\")\n\n\n# This function allows infinite scrolling.\ndef infinite_scrolling(request):\n\n # Feeds already present\n current_feeds = int(request.GET.get('cur','10')) # default cur=10\n\n # Number of extra feeds required\n extra_feeds = int(request.GET.get('k','10')) # default k=10\n\n max_len = ViewerFeed.objects.count()\n upper_limit = min(max_len, current_feeds + extra_feeds)\n\n feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[current_feeds:upper_limit])\n\n brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids)\n\n # Reordering brainfeeds from the DB in order of feed_ids in O(n)\n # s.t. feed_ids == [bf.id for bf in brainfeeds]\n feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))}\n brainfeeds = [0] * len(feed_ids)\n for bf in list(brainfeeds_db):\n brainfeeds[feed_map_order[bf.id]] = bf\n\n # Deleting entries from brainfeeds where brainfeeds == 0\n delete_entries = []\n for bf in brainfeeds:\n if bf == 0:\n delete_entries.append(0)\n\n for bf in delete_entries:\n brainfeeds.remove(bf)\n\n update_scores_scroll(brainfeeds, current_feeds, extra_feeds)\n json_feeds = [feed.to_json() for feed in brainfeeds]\n\n return HttpResponse(json.dumps(json_feeds), content_type=\"application/json\")\n\n@commit_on_success\ndef update_scores_top_k(brainfeeds):\n for feeds in brainfeeds:\n feeds.update_score = True\n feeds.log_normalized_feed_show += 1.0\n feeds.save()\n\n@commit_on_success\ndef update_scores_scroll(brainfeeds, current_feeds, extra_feeds):\n page_number = current_feeds/max(1.0,extra_feeds) + 1.0\n for feeds in brainfeeds:\n feeds.update_score = True\n feeds.log_normalized_feed_show += np.log10(1.0+page_number)\n feeds.save()\n\n# Filters feeds using the hash word\ndef filter_feeds_with_hashtags(request):\n\n hashword = request.GET.get('hashword')\n\n # Number of extra feeds required\n k = int(request.GET.get('k','10')) # default k=10\n\n if not hashword:\n error_response = {\n 'Error': 'hashword not provided.'\n }\n return HttpResponse(json.dumps(error_response), content_type='application/json')\n\n brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(hashtags__contains=hashword).order_by('-created_at')[:k]\n json_feeds = [feed.to_json() for feed in brain_feeds]\n return HttpResponse(json.dumps(json_feeds), content_type=\"application/json\")\n\n# Filters feeds with types\ndef filter_feeds_with_type(request):\n\n feedtype = request.GET.get('type')\n print(feedtype)\n # Number of extra feeds required\n k = int(request.GET.get('k','10')) # default k=10\n\n if not feedtype:\n error_response = {\n 'Error': 'type not provided.'\n }\n return HttpResponse(json.dumps(error_response), content_type='application/json')\n\n brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(source_text=feedtype).order_by('-created_at')[:k]\n json_feeds = [feed.to_json() for feed in brain_feeds]\n return HttpResponse(json.dumps(json_feeds), content_type=\"application/json\")\n\n\n# Return feeds created after datetime. Input time should be in ISO string format. It is them parsed to UTC format\ndef return_feeds_since(request):\n\n time_since = dateutil.parser.parse(request.GET.get('datetime'))\n\n # Number of extra feeds required\n k = int(request.GET.get('k','10')) # default k=10\n\n if not time_since:\n error_response = {\n 'Error': 'time_since not provided.'\n }\n return HttpResponse(json.dumps(error_response), content_type='application/json')\n\n brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(created_at__gte=time_since).order_by('-created_at')[:k]\n json_feeds = [feed.to_json() for feed in brain_feeds]\n return HttpResponse(json.dumps(json_feeds), content_type=\"application/json\")\n\n# Records upvotes for a feed\n@ensure_csrf_cookie\ndef upvotes_recorder(request):\n if request.method == 'GET':\n return HttpResponse('Ok')\n elif request.method == 'POST':\n payload = json.loads(request.body)\n feedid = payload['feedid']\n vote_dir = payload['vote']\n change = payload['change']\n\n if not feedid:\n error_response = {\n 'Error': 'No feedid provided'\n }\n return HttpResponse(json.dumps(error_response), content_type='application/json')\n if not vote_dir == -1 and not vote_dir == 1:\n error_response = {\n 'Error': 'voteid {0} not provided. Can only be 1 or -1'.format(vote_dir)\n }\n return HttpResponse(json.dumps(error_response), content_type='application/json')\n\n brain_feed = BrainFeeds.objects.get(id=feedid)\n votes = {}\n if vote_dir == 1:\n brain_feed.upvotes += 1\n if change:\n brain_feed.downvotes -= 1\n if vote_dir == -1:\n brain_feed.downvotes += 1\n if change:\n brain_feed.upvotes -= 1\n votes = {\n 'upvotes': max(brain_feed.upvotes, 0),\n 'downvotes': max(brain_feed.downvotes, 0)\n }\n brain_feed.save()\n\n return HttpResponse(json.dumps(votes), content_type='application/json')\n\n\n" }, { "alpha_fraction": 0.709197461605072, "alphanum_fraction": 0.709197461605072, "avg_line_length": 44.26530456542969, "blob_id": "d14395355614e2ad89ab649f4e6e14818c700edd", "content_id": "eeb7da43a02b9ea25ab53f8ba8b63a76f300a6d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2218, "license_type": "no_license", "max_line_length": 114, "num_lines": 49, "path": "/rest_api/serializer.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.forms import widgets\nfrom rest_framework import serializers\nfrom feed.models import JsonFeeds\nfrom djangotoolbox.fields import ListField\n\nimport drf_compound_fields.fields as drf\nfrom datetime import datetime\n\nclass TagFieldS(serializers.Serializer):\n media = serializers.CharField(required=False) \n \n\t\nclass FeedSerializer(serializers.Serializer):\n pk = serializers.Field() # Note: `Field` is an untyped read-only field.\n feedtype = serializers.CharField(required=False)\n text = serializers.CharField(required=False)\n source_text = serializers.CharField(required=False)\n source_url = serializers.CharField(required=False)\n hashtags = serializers.CharField(required=False)\n created_at = serializers.DateTimeField(required=False)\n upvotes = serializers.IntegerField(required=False) \n media = drf.ListField(serializers.CharField(),required=False)# serializers.CharField(required=False,many=True)\n mediamap = drf.ListField(serializers.CharField(),required=False) \n mediatype = drf.ListField(serializers.CharField(),required=False)\n keywords = drf.ListField(serializers.CharField(),required=False)\n graphStructure = drf.ListField(serializers.CharField(),required=False)\n mediashow = drf.ListField(serializers.CharField(),required=False)\n username = serializers.CharField(required=False)\n\n def restore_object(self, attrs, instance=None):\n \"\"\"\n Create or update a new snippet instance, given a dictionary\n of deserialized field values.\n\n Note that if we don't define this method, then deserializing\n data will simply return a dictionary of items.\n \"\"\"\n if instance:\n # Update existing instance\n #instance.feedtype = attrs.get('feedtype', instance.feedtype)\n #instance.code = attrs.get('code', instance.code)\n #instance.linenos = attrs.get('linenos', instance.linenos)\n #instance.language = attrs.get('language', instance.language)\n #instance.style = attrs.get('style', instance.style)\n return instance\n\n # Create new instance\n attrs['created_at']=datetime.now()\n return JsonFeeds(**attrs)\n" }, { "alpha_fraction": 0.6905550956726074, "alphanum_fraction": 0.6992543339729309, "avg_line_length": 33, "blob_id": "0ba3c4a722f03018e19989eac27982b0fcee8e9c", "content_id": "3f8368d224498987ee4a220f846e172885d2d864", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2414, "license_type": "no_license", "max_line_length": 94, "num_lines": 71, "path": "/auth/auth.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nimport json\nfrom django.contrib.auth.models import User\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django import forms\nfrom django.contrib.auth import login, logout\nfrom django.contrib.auth import authenticate\nfrom base64 import b64decode\n\n\n@ensure_csrf_cookie\ndef create_user_rb(request):\n if request.method == 'GET':\n return HttpResponse('Ok')\n elif request.method == 'POST':\n payload = json.loads(request.body)\n username = payload['username']\n email = payload['email']\n password = payload['password']\n if email and User.objects.filter(email=email).exclude(username=username).count():\n return HttpResponse('This email address is already in use! Try logging in.', status=401)\n if email and User.objects.filter(email=email, username=username).count():\n return HttpResponse('This account already exists! Try logging in.', status=401)\n user = User.objects.create_user(username, email, password)\n user.save()\n return HttpResponse('Ok')\n\n@ensure_csrf_cookie\ndef login_rb(request):\n if request.user.is_authenticated():\n user = request.user\n user_data = {\n 'id': user.id,\n 'username': user.username,\n 'email': user.email,\n 'loggedin': 'True'\n };\n return HttpResponse(json.dumps(user_data), content_type='application/json')\n if request.method == 'GET':\n return HttpResponse('Ok')\n elif request.method == 'POST':\n decodedCredentials = b64decode(request.body)\n if not ':' in decodedCredentials:\n return HttpResponse('Not logged in', status=401)\n email, password = decodedCredentials.split(':')\n user = authenticateEmail(email, password)\n if not user:\n return HttpResponse('Invalid Credentials', status=401)\n user = authenticate(username=user.username, password=password)\n if not user:\n return HttpResponse('Invalid Credentials', status=401)\n login(request, user)\n\n user_data = {\n 'id': user.id,\n 'username': user.username,\n 'email': user.email\n };\n return HttpResponse(json.dumps(user_data), content_type='application/json')\n\ndef authenticateEmail(email=None, password=None):\n try:\n user = User.objects.get(email=email)\n if user.check_password(password):\n return user\n except User.DoesNotExist:\n return None\n\ndef logout_rb(request):\n logout(request)\n return HttpResponse('Logged Out')\n" }, { "alpha_fraction": 0.5608276128768921, "alphanum_fraction": 0.5663448572158813, "avg_line_length": 30.25, "blob_id": "9f6199b501d013cab3a11a2332e63002d35e5b92", "content_id": "3c645a94c5006b7ce6c26d419447dee257e04adf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3625, "license_type": "no_license", "max_line_length": 121, "num_lines": 116, "path": "/UpdateViewerFeeds/updateViewerFeed.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "import ConfigParser\nimport pymongo as pm\nfrom datetime import datetime \nimport numpy as np\nimport importlib\nimport sys\nsys.path.insert(0,'/var/www/Backend/Backend/')\n\ndef readConfigFile():\n \"\"\"\n Reading the setting file to use.\n Different setting files are used on Production and Test robo brain\n \"\"\"\n\n global setfile\n config = ConfigParser.ConfigParser()\n config.read('/tmp/backend_uwsgi_setting')\n env = config.get('uwsgi','env')\n setting_file_name = env.strip().split('.')[1]\n setfile = importlib.import_module(setting_file_name)\t\n\ndef establishConnection():\n \"\"\"\n Establishes connection to remote db\n \"\"\"\n \n global brain_feeds, viewer_feeds\n client = pm.MongoClient(host,port)\n db = client[dbname]\n brain_feeds = db['brain_feeds']\n viewer_feeds = db['viewer_feeds']\n\ndef viewerFeedsUpdate():\n \"\"\"\n Sorts Brain Feeds on Basis of score and pushes them to ViewerFeeds table\n \"\"\"\n\n feeds_ordered = brain_feeds.find().sort('score',pm.DESCENDING)\n overall_counter = 0\n feeds_to_push = []\n first_time = True\n\n for feeds in feeds_ordered:\n try:\n new_feed = {}\n new_feed['_id'] = overall_counter\n new_feed['feedid'] = feeds['_id'].__str__()\n feeds_to_push.append(new_feed)\n overall_counter += 1\n print \"{0} {1} {2}\".format(overall_counter,feeds['score'],feeds['source_url'])\n if overall_counter % 100 == 0:\n if first_time:\n viewer_feeds.drop()\n first_time = False\n viewer_feeds.insert(feeds_to_push)\n feeds_to_push = []\n except:\n print \"**************skipping*************\"\n\ndef viewerFeedsUpdate_deprecated():\n \"\"\"\n DEPRECATED\n Equally represent each project\n \"\"\"\n different_projects = brain_feeds.distinct('source_url')\n different_projects = sorted(different_projects,key=len) \n feeds_each_project = {}\n feeds_count = {}\n for url in different_projects:\n feeds_each_project[url] = brain_feeds.find({'source_url':url},{'created_at':1}).sort('created_at',pm.DESCENDING)\n feeds_count[url] = feeds_each_project[url].count()\n\n feeds_to_push = []\n overall_counter = 0\n level = 0\n first_time = True\n while True:\n toBreak = True\n remaining_projects = []\n for url in different_projects:\n if feeds_count[url] > level:\n print url\n new_feed = {}\n new_feed['_id'] = overall_counter\n new_feed['feedid'] = feeds_each_project[url][level]['_id'].__str__()\n feeds_to_push.append(new_feed)\n overall_counter += 1\n remaining_projects.append(url)\n toBreak = False\n if overall_counter % 100 == 0:\n if first_time:\n viewer_feeds.drop()\n first_time = False\n viewer_feeds.insert(feeds_to_push)\n feeds_to_push = []\n different_projects = remaining_projects\n\n if toBreak:\n break\n\n\n level += 1\n\nif __name__==\"__main__\":\n global host, dbname, port, setfile, brain_feeds, viewer_feeds\n\n # Reading the setting file for db address\n readConfigFile()\n host = setfile.DATABASES['default']['HOST']\n dbname = setfile.DATABASES['default']['NAME']\n port = int(setfile.DATABASES['default']['PORT'])\n \n # Extablishing connection to remote db \n establishConnection()\n \n viewerFeedsUpdate()\n" }, { "alpha_fraction": 0.5514403581619263, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 17.769229888916016, "blob_id": "bc139e4bd9924bddaac2bcb0973f3b132398f562", "content_id": "f500b92d791c81f06df42a37e78c9eb2536afa8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 243, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/requirements.txt", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "Django==1.5.8\ndjango-mongodb-engine==0.5.1\ndjangorestframework==2.4.2\ndjangotoolbox==1.6.2\ndrf-compound-fields==0.2.2\ndjango-cors-headers==0.13\npymongo==2.7.2\npython-dateutil==2.2\nsix==1.7.3\nuWSGI==2.0.7\nFabric==1.10.0\nnltk==3.0.0\nboto==2.34.0" }, { "alpha_fraction": 0.7046979665756226, "alphanum_fraction": 0.7136465311050415, "avg_line_length": 36.125, "blob_id": "2a53702620c887bbeaa459346af6fcd42dbd48ac", "content_id": "26884d1856f75101b086260c2fa8f9d134da0eb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "no_license", "max_line_length": 78, "num_lines": 24, "path": "/rest_api/views.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom feed.models import JsonFeeds\nfrom rest_api.serializer import FeedSerializer\nfrom datetime import datetime \nfrom rest_framework import permissions\n\n\n@api_view(['GET', 'POST'])\ndef feed_list(request):\n #List all snippets, or create a new snippet.\n if request.method == 'GET':\n feeds = JsonFeeds.objects.all()[:25]\n serializer = FeedSerializer(feeds, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = FeedSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n" }, { "alpha_fraction": 0.6172146797180176, "alphanum_fraction": 0.6187620759010315, "avg_line_length": 30.524391174316406, "blob_id": "03488686fcce472dc01415bcbc52abae79136248", "content_id": "b830e49ffa0b388bfb3acaab36c9fab6573a8a20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5170, "license_type": "no_license", "max_line_length": 97, "num_lines": 164, "path": "/feed/models.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom djangotoolbox.fields import ListField\nfrom datetime import datetime\nfrom django.db.models.signals import post_save\nfrom queue_util import add_feed_to_queue\n#from feed.models import BrainFeeds\n\nclass GraphFeedback(models.Model):\n id_node = models.TextField()\n feedback_type = models.TextField()\n node_handle = models.TextField()\n action_type = models.TextField()\n\n def to_json(self):\n return {\"_id\":self.id,\n \"id_node\":self.id_node,\n \"feedback_type\":self.feedback_type,\n \"node_handle\":self.node_handle,\n \"action_type\":self.action_type\n }\n\n class Meta:\n db_table = \"graph_feedback\"\n\nclass BrainFeeds(models.Model):\n toshow = models.BooleanField(default=True)\n feedtype = models.TextField() #originally feedtype -> type\n text = models.TextField()\n source_text = models.TextField()\n source_url = models.TextField(db_index=True)\n meta = {'indexes':['source_url']}\n media = ListField()\n mediatype = ListField()\n created_at = models.DateTimeField(default=datetime.now())\n hashtags = models.TextField(db_index=True)\n meta = {'indexes':['hashtags']}\n upvotes = models.IntegerField(default=0)\n downvotes = models.IntegerField(default=0)\n jsonfeed_id = models.TextField()\n username = models.TextField()\n score = models.FloatField(default=0.0,db_index=True)\n meta = {'indexes':['score']}\n update_score = models.BooleanField(default=True,db_index=True)\n meta = {'indexes':['update_score']}\n log_normalized_feed_show = models.FloatField(default=1.0)\n\n\n def to_json(self):\n return {\"_id\":self.id,\n \"toshow\":self.toshow,\n \"feedtype\":self.feedtype,\n \"text\":self.text,\n \"source_text\":self.source_text,\n \"source_url\":self.source_url,\n \"media\":self.media,\n \"mediatype\":self.mediatype,\n \"created_at\":self.created_at.isoformat(),\n \"hashtags\":self.hashtags,\n \"upvotes\":self.upvotes,\n \"downvotes\":self.downvotes,\n \"jsonfeed_id\":self.jsonfeed_id,\n \"username\":self.username,\n \"score\":self.score,\n \"log_normalized_feed_show\":self.log_normalized_feed_show,\n \"update_score\":self.update_score\n }\n\n class Meta:\n db_table = 'brain_feeds'\n get_latest_by = 'created_at'\n\n\nclass JsonFeeds(models.Model):\n feedtype = models.TextField() #originally feedtype -> type\n text = models.TextField()\n source_text = models.TextField()\n source_url = models.TextField()\n mediashow = ListField()\n media = ListField()\n mediatype = ListField()\n mediamap = ListField()\n keywords = ListField()\n graphStructure = ListField()\n\n created_at = models.DateTimeField()\n hashtags = models.TextField(default=datetime.now, blank=True)\n meta = {'indexes':['hashtags']}\n upvotes = models.IntegerField(default=0)\n downvotes = models.IntegerField(default=0)\n username = models.TextField()\n\n def to_json(self):\n return {\"_id\":self.id,\n \"feedtype\":self.feedtype,\n \"text\":self.text,\n \"source_text\":self.source_text,\n \"source_url\":self.source_url,\n \"mediashow\":self.mediashow,\n \"media\":self.media,\n \"mediatype\":self.mediatype,\n \"mediamap\":self.mediamap,\n \"keywords\":self.keywords,\n \"graphStructure\":self.graphStructure,\n \"created_at\":self.created_at.isoformat(),\n \"hashtags\":self.hashtags,\n \"upvotes\":self.upvotes,\n \"downvotes\":self.downvotes,\n \"username\":self.username\n }\n\n class Meta:\n db_table = 'json_feeds'\n\ndef postSaveJson(**kwargs):\n instance = kwargs.get('instance')\n print \"Post Saving JsonFeed: \", instance.to_json()\n add_feed_to_queue(instance.to_json())\n\n #Saving JsonFeed to BrainFeed\n brain_feed = BrainFeeds(\n feedtype=instance.feedtype,\n text=instance.text,\n source_text=instance.source_text,\n source_url=instance.source_url,\n hashtags=instance.hashtags,\n jsonfeed_id=instance.id,\n username=instance.username\n )\n\n media = []\n mediatype = []\n\n for mediashow,_media,_mediatype in zip(instance.mediashow,instance.media,instance.mediatype):\n if mediashow.lower() == 'true':\n media.append(_media)\n mediatype.append(_mediatype)\n brain_feed.media = media\n brain_feed.mediatype = mediatype\n brain_feed.save()\n\n\n #Saving viewer feed\n \"\"\"\n numitem = ViewerFeed.objects.all().count()\n viewer_feed = ViewerFeed(\n id = numitem,\n feedid = brain_feed.id\n )\n viewer_feed.save()\n \"\"\"\n #Saving JsonFeed to GraphDB\n\npost_save.connect(postSaveJson, JsonFeeds)\n\nclass ViewerFeed(models.Model):\n feedid = models.TextField()\n id = models.IntegerField(db_index=True,primary_key=True)\n meta = {'indexes':['id']}\n\n def to_json(self):\n return {\"_id\":self.id,\"id\":self.id,\"feedid\":self.feedid}\n\n class Meta:\n db_table = 'viewer_feeds'\n" }, { "alpha_fraction": 0.6917040348052979, "alphanum_fraction": 0.7186098694801331, "avg_line_length": 26.875, "blob_id": "0822b64a26c2290bcc9aa546f4b35a3a799434d6", "content_id": "b51a586d8039b9d6270f6c32ccda2aefe0159842", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 892, "license_type": "no_license", "max_line_length": 56, "num_lines": 32, "path": "/conf/backend_uwsgi_production.ini", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "# robobrain_uwsgi_production.ini file\n[uwsgi]\n\n# Django-related settings\n# the base directory (full path)\nchdir = /var/www/Backend/\n# Django's wsgi file\nmodule = Backend.wsgi\n# Environment Variable\nenv = DJANGO_SETTINGS_MODULE=Backend.settings_production\n# PID file\npidfile = /tmp/robobrain-master.pid\n# Respawn processes taking more than 20 seconds\nharakiri = 20\n# Limit the project to 1 GB\nlimit-as = 1024\n# Respawn processes after serving 10000 requests\nmax-requests = 10000\n# Background the process and log\ndaemonize = /var/log/uwsgi/robobrain.log\n\n# process-related settings\n# master\nmaster = true\n# maximum number of worker processes\nprocesses = 10\n# the socket (use the full path to be safe\nsocket = /tmp/backend.sock\n# ... with appropriate permissions - may be needed\nchmod-socket = 666\n# clear environment on exit\nvacuum = true\n" }, { "alpha_fraction": 0.7006688714027405, "alphanum_fraction": 0.7006688714027405, "avg_line_length": 48.83333206176758, "blob_id": "e078f33c986574a8e7dcd13099f874bdbd57cc80", "content_id": "02c4f2c64e2d39c1357d6e6d356451135d9b7134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 598, "license_type": "no_license", "max_line_length": 82, "num_lines": 12, "path": "/feed/urls.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\nfrom feed import views\n\nurlpatterns = patterns('',\n url(r'most_recent/', views.return_top_k_feeds, name='most_recent'),\n url(r'infinite_scroll/', views.infinite_scrolling, name='infinite_scrolling'),\n url(r'filter/', views.filter_feeds_with_hashtags, name='filter'),\n url(r'filter_type/', views.filter_feeds_with_type, name='filter_type'),\n url(r'since/', views.return_feeds_since, name='since'),\n url(r'upvotes/', views.upvotes_recorder, name='upvotes'),\n url(r'graph_feedback/', views.save_graph_feedback, name='graph_feedback'),\n)\n" }, { "alpha_fraction": 0.670634925365448, "alphanum_fraction": 0.670634925365448, "avg_line_length": 30.5, "blob_id": "e57886fc7556e7c5669f1c295f0392bbf9a11d79", "content_id": "b48b057781ce48c5a012735602de9145e92eaf56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/auth/urls.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\nimport auth\n\nurlpatterns = patterns('',\n url(r'create_user/', auth.create_user_rb, name='create_user'),\n url(r'login/', auth.login_rb, name='login'),\n url(r'logout/', auth.logout_rb, name='logout')\n)\n" }, { "alpha_fraction": 0.7560521364212036, "alphanum_fraction": 0.7635009288787842, "avg_line_length": 46.56962203979492, "blob_id": "71c3fd75bc5518beadf54e0ab89e8693ea349620", "content_id": "f88d31f6feefb1ab89c7b4b9b1f53e8ce9d0d7fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3759, "license_type": "no_license", "max_line_length": 543, "num_lines": 79, "path": "/README.md", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "Backend\n==============\n\n### To setup local dev/testing Environment\n\nOver the time we received a lot of feedback on how the productivity of individual contributor increases if we they do not have to deal with systems and environment stuff (Dependency packages, confilicts due to multiple package manangers across multiple projects etc). So,we made a virtual box using Vagrant which has all the right dependency packages of the packages that are required for smooth development, testing and local deployment of Backend. In other words, all instructions mentioned in the docs for getting started will run smoothly.\n\n1. Install Vagrant: Download link can be found at https://www.vagrantup.com/downloads.html\n\n2. Download the Vagrant package.box file: Can be found here https://s3-us-west-2.amazonaws.com/feedmedia/package.box\n\n3. Follow the instrucitons to login to virtual box\n\n `$ vagrant box add testbox package.box` (First time only)\n\n `$ vagrant init testbox` (First time only)\n\n `$ vagrant up`\n\n `$ vagrant ssh`\n \n `$ git config --global user.name \"Your Name\"` (First time only)\n\n `$ git config --global user.email [email protected]` (First time only)\n \n\n4. Once you are inside the virtual box, you can proceed with the instructions avalable at https://github.com/RoboBrainCode/Docs/blob/master/README.md\n5. Once you finish develpment and testing, you can issue `vagrant shutdown` to exit the virtual environment. You can always comeback by using step 3. \n\n\n\n### Graph Structure\n1. Nodes have either the label :Concept or :Media. If they have :Media, they will also have a :Image, :Video or :Text label depending on what media type they represent.\n2. Edges store the source_url, source_text and keywords (an array of strings) properties. Each edge is labeled by its edge type. For example, an edge representing the spatially_distributed_as relationship will have the :SPATIALLY_DISTRIBUTED_AS label.\n3. Nodes store the handle name without the #. eg. {handle: 'shoe'}\n4. :Media nodes referred to in the source_text with a #$ will have the word following #$ as the handle name. eg. #$image -> {handle: 'image'}\n5. :Media nodes NOT referred to in the source_text will have the file name of the media as the handle name and the full url path of the media as mediapath. eg. 'aaa/bbb/shoe.jpg' -> {handle: 'shoe.jpg', mediapath: 'aaa/bbb/shoe.jpg'}\n\n### Setting up submodules\nOnce you pull or clone the Backend repo, you need to do:\n\n1. `cd Backend/`\n\n2. `git submodule init`\n\n3. `git submodule update`\n\n\nAny time a change to any of the submodules is pushed to the Backend repo, you\nneed to do `git submodule update`.\n\n### Setup Django with MongoDB\n* Follow the URL\n\nhttp://django-mongodb-engine.readthedocs.org/en/latest/topics/setup.html\n\n* Execute the following commands:\n\n1. sudo pip install git+https://github.com/django-nonrel/[email protected]\n\n2. sudo pip install git+https://github.com/django-nonrel/djangotoolbox\n\n3. sudo pip install git+https://github.com/django-nonrel/mongodb-engine\n\n4. sudo pip install djangorestframework\n\n\nThis will also install Django for you, further any Django project created will use MongoDB (and not the default sqlite3).\n\n### Possible errors\n* You might get an error (not necessarily) that DJANGO_SETTINGS_MODULE is not set. If this happens then follow the link\n\nhttp://2buntu.com/articles/1451/installing-django-and-mongodb-in-your-virtualenv/\n\nPS: There is no needs to setup virtualenv.\n\n### Update viewer feeds\n\nThe script inside directory UpdateViewerFeeds runs as a cron job and balances the frontend feeds. Giving equal importance to all the projects. The script import the settings file mentioned in manage.py. So if you change the settings filename in manage.py, then accordingly modify the updateViewerFeed.py file. \n" }, { "alpha_fraction": 0.6473379135131836, "alphanum_fraction": 0.6562994122505188, "avg_line_length": 36.19607925415039, "blob_id": "e705a6a388c0c2c72054124cf52952f4ecacc1c7", "content_id": "afc1b4773a0f5f3d424b88fd27448ddc9f28cd01", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1897, "license_type": "no_license", "max_line_length": 67, "num_lines": 51, "path": "/fabfile.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from __future__ import with_statement\nfrom fabric.api import cd, env, local, settings, run, sudo\nfrom fabric.colors import green, red\nfrom fabric.contrib.console import confirm\n\ndef prod_deploy(user='ubuntu'):\n print(red('Deploying to production at robobrain.me...'))\n if not confirm('Are you sure you want to deploy to production?'):\n print(red('Aborting deploy.'))\n env.host_string = '54.149.21.165'\n env.key_filename = 'conf/www.pem'\n env.user = user\n env.shell = '/bin/zsh -l -c'\n with cd('/var/www/Backend'):\n # sudo('su - ubuntu')\n print(green('Checking out test...'))\n run('git checkout test')\n print(green('Pulling latest version of test...'))\n run('git pull origin test')\n print(green('Checking out production...'))\n run('git checkout production')\n print(green('Rebasing onto test...'))\n run('git rebase test')\n print(green('Pushing production upstream...'))\n run('git push origin production')\n print(green('Reloading server...'))\n sudo('uwsgi --reload /tmp/robobrain-master.pid')\n print(red('Done!'))\n\ndef test_deploy(user='ubuntu'):\n env.host_string = '54.148.225.192'\n env.key_filename = 'conf/www.pem'\n env.user = user\n env.shell = '/bin/zsh -l -c'\n print(red('Deploying to test at test.robobrain.me...'))\n with cd('/var/www/Backend'):\n print(green('Checking out master...'))\n run('git checkout master')\n print(green('Pulling latest version of master...'))\n run('git pull origin master')\n print(green('Checking out test...'))\n run('git checkout test')\n print(green('Rebasing onto master...'))\n run('git rebase master')\n print(green('Pulling latest version of test...'))\n run('git pull origin test')\n print(green('Push the latest version of test...'))\n run('git push origin test')\n print(green('Reloading server...'))\n sudo('uwsgi --reload /tmp/robobrain-master.pid')\n print(red('Done!'))\n" }, { "alpha_fraction": 0.7064846158027649, "alphanum_fraction": 0.7133105993270874, "avg_line_length": 31.55555534362793, "blob_id": "67a092f07d13d29c68195e8fd33642085ba174e0", "content_id": "8f12c2734a65dc2b97ef9811c26d0ba4451d7a90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 293, "license_type": "no_license", "max_line_length": 61, "num_lines": 9, "path": "/rest_api/urls.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = patterns('rest_api.views',\n url(r'^feeds/$', 'feed_list'),\n #url(r'^snippets/(?P<pk>[0-9]+)$', 'snippet_detail'),\n)\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n" }, { "alpha_fraction": 0.5577395558357239, "alphanum_fraction": 0.5765765905380249, "avg_line_length": 29.524999618530273, "blob_id": "b075e9183c147e3d54ce13ef2030c7420112c34c", "content_id": "235a5f3463e6d258b63c63898ba073e551d7a347", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 81, "num_lines": 40, "path": "/feed/queue_util.py", "repo_name": "RoboBrainCode/Backend", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport boto\nimport json\nimport traceback\nfrom boto.sqs.message import RawMessage\nfrom bson import json_util\n\nconn = boto.sqs.connect_to_region(\n \"us-west-2\", \n aws_access_key_id='AKIAIDKZIEN24AUR7CJA', \n aws_secret_access_key='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')\n\nfeed_queue = conn.create_queue('feed_queue')\n\ndef add_feed_to_queue(json_feed):\n m = RawMessage()\n try:\n m.set_body(json.dumps(json_feed, default=json_util.default))\n feed_queue.write(m)\n except Exception, e:\n print traceback.format_exc()\n print json_feed\n\nif __name__ == '__main__':\n add_feed_to_queue({\n \"username\" : \"arzav\",\n \"_id\": \"546e6a2f5caae434656bbc36\",\n \"feedtype\" : \"\",\n \"mediashow\" : [ ],\n \"text\" : \"#Simhat_Torah is a synonym of #Rejoicing_in_the_Law\",\n \"hashtags\" : \" simhat_torah rejoicing_in_the_law\", \n \"mediatype\" : [ ],\n \"source_url\" : \"http://wordnet.princeton.edu/\",\n \"source_text\" : \"WordNet\",\n \"mediamap\" : [ ],\n \"media\" : [ ],\n \"keywords\": [\"Simhat_Torah\",\"Rejoicing_in_the_Law\",\"synonym\",\"wordnet\"], \n \"upvotes\" : 0, \n \"graphStructure\": [\"#same_synset: #0 -> #1\", \"#same_synset: #1 -> #0\"]})\n" } ]
14
lammobile/PI
https://github.com/lammobile/PI
3e695dd283dbaa90b4247f97db66f555cf6803df
f3b75dc6b433e7ecc014f91eff79803df1d2fca6
3fa3cf515aed7422d9e5a8e7da6571f87764095c
refs/heads/master
"2020-02-27T15:26:13.055180"
"2016-07-31T09:42:24"
"2016-07-31T09:42:24"
64,542,924
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5839853882789612, "alphanum_fraction": 0.5986526012420654, "avg_line_length": 27.504140853881836, "blob_id": "2e3747d8f076cfaac0d9ea4503fb4b4a2f6afe48", "content_id": "3ed6b85ced5cd7d24d9f09d28b7ec9e09b4c24d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 28499, "license_type": "no_license", "max_line_length": 151, "num_lines": 966, "path": "/bkafis/bkafis/src/lib/bkafis/fingerprint.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS matcher\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n\r\n#include <fingerprint.h>\r\nint debug = 0;\r\n/**********************************************************************\r\n\tConvert from ISOTemplate 2005 format \r\n\tInput:\r\n\t\t\tImplicitly stored in static variable isoTemplate that is declared \r\n\t\t\tin ISOTemplate.c \r\n\tOutput:\r\n\t\t\tpointer to Fingerprint structure declared above \r\n\tUsage:\r\n\t\t\tin order to load the iso template from file call \r\n\t\t\tISORESULT LoadISOTemplate (ISOBYTE *path);\r\n\t\t\tthen in order to convert from the template into Fingerprint structure\r\n\t\t\tcall unsigned char ConvertISO2005Fingerprint(Fingerprint* finger);\r\n *********************************************************************/ \r\n\r\nISOBYTE ConvertISO2005Fingerprint(Fingerprint* finger)\r\n{\r\n\tif (finger==NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tISOWORD width, height,xRes,yRes;\r\n\tISOBYTE nViews;\r\n\tGetRecordHeader (&width,&height,&xRes,&yRes,&nViews);\r\n\tmemset(finger,0,sizeof(Fingerprint));\r\n\tfinger->width = width;\r\n\tfinger->height = height;\r\n\t\r\n\t/*\r\n\t#ifdef DEBUG\r\n\tprintf(\"Width=%d\\nHeight=%d\\n\",finger->width,finger->height);\r\n\t#endif\r\n\t*/\r\n\tif (debug)\r\n\t\tprintf(\"Width=%d\\tHeight=%d\\txRes=%d,\\tyRes=%d\\tnViews=%d\\n\",finger->width,finger->height,xRes,yRes,nViews);\r\n\t\r\n\tISOBYTE fingPos,nView,imprType,fingQuality,nMinutiae;\r\n\t/* GetFingerViewHeader (0,NULL,NULL,NULL,&quality,&nMinutiae);*/\r\n\tGetFingerViewHeader (0,&fingPos,&nView,&imprType,&fingQuality,&nMinutiae);\r\n\tif (debug)\r\n\t\tprintf(\"fingPos=%d\\tnView=%d\\timprType=%d\\tfingQuality=%d\\tnMinutiae=%d\\n\",fingPos,nView,imprType,fingQuality,nMinutiae);\r\n\tfinger->quality = fingQuality;\r\n\tfinger->nMinutiae = nMinutiae;\r\n\tMinutia** minutiae=malloc(sizeof(Minutia*)*finger->nMinutiae);\r\n\tif (minutiae == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(minutiae,0x00,sizeof(Minutia*)*finger->nMinutiae);\r\n\t\r\n\t/*\r\n\t#ifdef DEBUG\r\n\tprintf(\"Quality=%d\\nnMinutiae=%d\\n\",finger->quality,finger->nMinutiae);\r\n\t#endif\r\n\t*/\r\n\t\r\n\tunsigned char minI;\r\n\tISOBYTE type, angle;\r\n\tISOWORD x,y;\r\n\tISOBYTE quality;\r\n\tMinutia* min;\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\t\r\n\t\tmin=malloc(sizeof(Minutia));\r\n\t\tif (min==NULL){\r\n\t\t\tCleanFingerprint(finger);\r\n\t\t\treturn ISO_GENERICERROR;\r\n\t\t}\r\n\t\tGetMinutiaeData(0,minI,&type,&x,&y,&angle,&quality);\r\n\t\tmin->x = x;\r\n\t\tmin->y = -y; /* in ISO 2005, y is positive */\r\n\t\tmin->angle = angle; /* in ISO 2005, angle is in range from 0-255, hence, convert it to range 0-32 - 1.40625*M_PI/180;*/\r\n\t\tmin->type = type;\r\n\t\tmin->quality = quality;\r\n\t\tmin->nNeighbors=0;\r\n\t\tmin->ldr = 0;\r\n\t\t\r\n\t\t/* if (debug)\r\n\t\tprintf(\"%d\\t%d\\t%d\\t%d\\t%d\\n\", \r\n\t\t\tmin->x,\r\n\t\t\tmin->y,\r\n\t\t\tmin->angle,\r\n\t\t\tmin->type,\r\n\t\t\tmin->quality\r\n\t\t\t); */\r\n\t\t\r\n\t\t\r\n\t\tminutiae[minI]=min;\r\n\t}\r\n\tfinger->minutiae= minutiae;\r\n\tISOBYTE index;\r\n\tISOBYTE nBlocks;\r\n\tindex = 0;\r\n\tGetExtendedDataHeader (index,&nBlocks);\r\n\tif (debug)\r\n\t\tprintf(\"nBlocks=%d\\n\",nBlocks);\r\n\tISOBYTE blkI;\r\n\tfor (blkI=0;blkI<nBlocks;blkI++){\r\n\t\tISOWORD typeID;\r\n\t\tISOWORD dataLength;\r\n\t\tISOBYTE *data;\r\n\t\tGetExtendedDataBlock (index,blkI,&typeID,&dataLength,NULL);\r\n\t\tif (debug)\r\n\t\t\tprintf(\"Extended data blkI=%d\\ttypeID=%d,dataLength=%d\\n\",blkI,typeID,dataLength);\r\n\t}\r\n\treturn ISO_SUCCESS;\t\r\n}\r\nchar ConvertFingerprintISO2005(Fingerprint* finger)\r\n{\r\n\tunsigned int n,i;\r\n\tn=finger->nMinutiae;\r\n\tInitRecordHeader (finger->width,finger->height,197,197,1);\r\n\tInitFingerViewHeader (0,0,0,0,finger->quality,n);\r\n\t\r\n\tfor (i=0;i<n;i++){\r\n\t\tMinutia* min = finger->minutiae[i];\r\n\t\tSetMinutiaeData (0,i,min->type,min->x,-min->y,min->angle,min->quality);\r\n\t}\r\n\tInitExtendedDataHeader (0,0);\t\t\r\n}\r\n\r\nISOBYTE SaveFingerprintText(char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp = fopen(path,\"w\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\t/*\r\n\tfprintf(fp,\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp,\"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbors\\tIndex\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\t*/\r\n\t\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\r\n\t\tif \t( finger->minutiae[minI] )\r\n\t\t\tfprintf\t(\tfp,\t\"%d\\t%d\\t%d\\t%d\\t%d\\t%d\"\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->x\t\t\t, \r\n\t\t\t\t\t\t-finger->minutiae[minI]->y\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->angle\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->quality\t\t,\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tfinger->minutiae[minI]->ldr\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->nNeighbors\r\n\t\t\t\t\t);\r\n\t\tif (finger->minutiae[minI]->nNeighbors){\r\n\t\t\tfor ( neighborI = 0; neighborI < finger->minutiae[minI]->nNeighbors; neighborI++ )\r\n\t\t\t\t\r\n\t\t\t\t\tfprintf\t(\tfp, \"\\t%d\\t%f\\t%f\\t%d\"\t\t\t\t\t\t\t\t, \r\n\t\t\t\t\t\t\tfinger->minutiae[minI]->neighborIds[neighborI],\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tGetDistance(finger,minI,finger->minutiae[minI]->neighborIds[neighborI]),\r\n\t\t\t\t\t\t\tGetDra(finger,minI,finger->minutiae[minI]->neighborIds[neighborI]),\r\n\t\t\t\t\t\t\tGetOda(finger,minI,finger->minutiae[minI]->neighborIds[neighborI])\r\n\t\t\t\t\t\t\t); \r\n\t\t}\t\t\t\t\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nISOBYTE CleanFingerprint(Fingerprint* finger)\r\n{\r\n\tunsigned char i,j;\r\n\tif (finger->minutiae){\r\n\t\tfor (i=0;i<finger->nMinutiae;i++){\r\n\t\t\tif (finger->minutiae[i]) {\r\n\t\t\t\tfree(finger->minutiae[i]);\r\n\t\t\t}\r\n\t\t}\r\n\t\tfree(finger->minutiae);\r\n\t}\r\n\tif (finger->distances)\r\n\t\tfree(finger->distances);\r\n\tif (finger->dra)\r\n\t\tfree(finger->dra);\r\n\tif (finger->oda)\r\n\t\tfree(finger->oda);\r\n\tmemset(finger,0x00,sizeof(Fingerprint));\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nunsigned char SortMinutiaQuality(Fingerprint* finger)\r\n{\r\n\tunsigned char i, j;\r\n\t\r\n\t/*\t\tSorting minutiae by quality score in descending order\t*/\r\n \r\n\tISOBYTE quality1, quality2;\r\n\tfor (i = 0; i < finger->nMinutiae - 1; i++)\r\n for (j = finger->nMinutiae - 1; j > i; j--){\r\n\t\t\tquality1 = finger->minutiae[j]->quality;\r\n\t\t\tquality2 = finger->minutiae[j-1]->quality;\r\n\t\t\tif (quality1 > quality2){\r\n\t\t\t\tMinutia* \t\t\ttg = finger->minutiae[j];\r\n finger->minutiae[j] = finger->minutiae[j - 1];\r\n finger->minutiae[j-1] = tg;\r\n\t\t\t}\r\n\t\t}\r\n\t\r\n\t\r\n}\r\n\r\n/* FindDirectionalNeighbours */\r\nchar\tFindDirectionalNeighbors(Fingerprint* finger, unsigned char centerI)\r\n{\r\n\tunsigned char\tmaxNeighbors = MAX_FOUND_NEIGHBORS;\r\n\tfloat\tminDistance = MIN_DISTANCE;\r\n\tfloat\tmaxDistance = MAX_DISTANCE;\r\n\tfloat\tminDistances[MAX_FOUND_NEIGHBORS];\r\n\tMinutia* min = finger->minutiae[centerI];\r\n\tunsigned char\t*neighborIds = min->neighborIds;\r\n\t\r\n\tunsigned char\tnNeighbors = 0;\r\n\tunsigned char\tnIterations = 0;\r\n\tunsigned char \tsector;\r\n\tunsigned char \tn = finger->nMinutiae;\r\n\tunsigned char\ti;\r\n\tunsigned char\tminNeighborI[MAX_FOUND_NEIGHBORS];\r\n\t\r\n\tmemset( neighborIds, 255, sizeof(unsigned char) * maxNeighbors );\r\n\t/* if (debug) printf(\"Find neighbors for minutiae %d: distance = %x\\n\",centerI,finger->distances); */\r\n\r\n\twhile\t( (nNeighbors < maxNeighbors) && (nIterations < maxNeighbors) )\r\n\t{\r\n\t\tnIterations++;\r\n\t\tmemset( minDistances, 0, sizeof(float) * maxNeighbors );\r\n\t\tmemset( minNeighborI, 255, sizeof(unsigned char) * maxNeighbors );\r\n\t\t\r\n\t\tfor\t( i = 0; i < n; i++ )\r\n\t\t{\r\n\t\t\tfloat\tdist;\t\t\r\n\t\t\tif\t( i == centerI )\t\r\n\t\t\t\tcontinue;\r\n\t\t\t\r\n\t\t\tdist = \tGetDistance(finger, centerI, i );\r\n\t\t\t/* if (debug) printf(\"Consider minutiae %d:distance=%f\\n\",i,dist); */\r\n\t\t\t/* skip neighbours that are too far or too near the center minutia */\r\n\t\t\tif \t( (dist < minDistance) || (dist > maxDistance) )\t\r\n\t\t\t\tcontinue;\r\n\t\t\t\r\n\t\t\t/* skip neighbors that have been chosen */\r\n\t\t\tunsigned char\tfound = 0;\r\n\t\t\tunsigned char\tj = 0;\r\n\t\t\t\r\n\t\t\twhile ( (j < maxNeighbors) && !found )\r\n\t\t\t{\r\n\t\t\t\tif\t( neighborIds[j++] == i )\tfound = 1;\r\n\t\t\t}\r\n\t\t\tif\t(found)\r\n\t\t\t\tcontinue;\r\n\t\t\t\t\t\r\n\t\t\tfloat\td_phi = GetDra(finger, centerI, i);\r\n sector = floor( maxNeighbors * (d_phi/(2*M_PI)) );\r\n\t\t\tif (sector>=MAX_FOUND_NEIGHBORS) {\r\n\t\t\t\tprintf(\"Error sector >= MAX_FOUND_NEIGHBORS\\n\");\r\n\t\t\t\treturn -1;\r\n\t\t\t}\r\n\t\t\tif \t( minDistances[sector] == 0 )\r\n\t\t\t{\r\n\t\t\t\t/* if (debug) printf(\"Add minutiae %d, dist=%f, dra=%f, sector=%d\\n\",i, dist, d_phi, sector); */\r\n\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\tminNeighborI[sector] = i ;\r\n\t\t\t}\r\n\t\t\telse \r\n\t\t\t{\r\n\t\t\t\tif \t( minDistances[sector] > dist )\r\n\t\t\t\t{\r\n\t\t\t\t\t/* if (debug) printf(\"Replace minutiae %d, dist=%f, old_dist=%f dra=%f, sector=%d\\n\",i, dist, minDistances[sector], d_phi, sector); */\r\n\t\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\t\tminNeighborI[sector] = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tfor ( sector = 0; sector < maxNeighbors; sector++ )\r\n\t\t{\r\n\t\t\tif\t( nNeighbors == maxNeighbors )\t\r\n\t\t\t\tbreak;\t\t\t\r\n\t\t\tif\t( minNeighborI[sector] != 255 ){\r\n\t\t\t\tneighborIds[nNeighbors++] = minNeighborI[sector];\r\n\t\t\t\t/* if (debug) printf(\"Neighbor %d in sector %d: %d\\n\",nNeighbors, sector, minNeighborI[sector]); */\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tmin->nNeighbors = nNeighbors;\r\n\treturn 0;\r\n}\r\n\r\nISOBYTE SaveFingerprint( char *path, Fingerprint *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tif ( (fp = fopen(path, \"wb\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfwrite( finger, FINGERHEADERSIZE, 1, fp );\r\n\tMinutia\t*min;\r\n\t\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\r\n\t\tmin = finger->minutiae[minI];\r\n\t\tif \t(min)\r\n\t\t{\r\n\t\t\tfwrite( min, MINUTIASIZE, 1, fp );\r\n\t\t}\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn\tISO_SUCCESS;\r\n}\r\n\r\nISOBYTE\tReadFingerprint( char *path, Fingerprint *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( (fp = fopen(path,\"rb\")) == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfread( finger, FINGERHEADERSIZE, 1, fp );\r\n\t\r\n\tMinutia\t**minutiae = malloc( sizeof(Minutia*) * finger->nMinutiae );\r\n\tif \t( minutiae == NULL )\r\n\t\treturn\tISO_GENERICERROR;\r\n\t\r\n\tmemset( minutiae, 0x00, sizeof(Minutia*) * finger->nMinutiae );\r\n\t\r\n\tMinutia\t*min;\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\t\r\n\t\tmin = malloc( sizeof(Minutia) );\r\n\t\tif \t( min == NULL ) \r\n\t\t\treturn\tISO_GENERICERROR;\r\n\r\n\t\tfread( min, MINUTIASIZE, 1, fp );\r\n\t\t\r\n\t\tminutiae[minI] = min;\r\n\t}\r\n\t\r\n\tfinger->minutiae = minutiae;\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nfloat GetDistance(Fingerprint* finger, ISOBYTE min1,ISOBYTE min2)\r\n{\r\n\tif (finger->distances==NULL) return -1;\r\n\tISOBYTE n = finger->nMinutiae;\r\n\tISOWORD id = min1*n+min2;\r\n\t/* if (debug) printf(\"Get distance %d, %d: %f\\n\", min1, min2, finger->distances[id]); */\r\n\tfloat tmp = finger->distances[id];\r\n\treturn tmp;\r\n}\r\nfloat GetDra(Fingerprint* finger, ISOBYTE min1,ISOBYTE min2)\r\n{\r\n\tISOBYTE n = finger->nMinutiae;\r\n\tISOWORD id = min1*n+min2;\r\n\t/* if (debug) printf(\"Get dra %d, %d: %f\\n\", min1, min2, finger->dra[id]); */\r\n\treturn finger->dra[id];\r\n}\r\nint GetOda(Fingerprint* finger, ISOBYTE min1,ISOBYTE min2)\r\n{\r\n\tISOBYTE n = finger->nMinutiae;\r\n\tISOWORD id = min1*n+min2;\r\n\treturn finger->oda[id];\r\n}\r\n\r\n/* Tinh goc hop boi 2 vector co goc so voi truc hoanh angle1, angle2 tuong ung\r\n Dau vao la 2 goc co gia tri tu 0-31 [0-2*pi)\r\n Ket qua la goc co gia tri tu 0-16 \r\n*/\r\nint ad_pi_iso(ISOBYTE angle1, ISOBYTE angle2)\r\n{\r\n\t\treturn min(abs(angle1-angle2),MAX_ISO_ANGLE-abs(angle1-angle2));\r\n}\r\n/* Tinh goc quay theo nguoc chieu kim dong ho cua 2 goc bat ky\r\n% Input:\r\n% angle1, angle2 - Hai goc gia tri tu 0 - 32\r\n% Output:\r\n% out - Goc quay theo nguoc schieu kim dong ho tu goc angle1 den angle2\r\n*/\r\n\r\n\r\nint ad_2pi_iso(ISOBYTE angle1, ISOBYTE angle2)\r\n{\r\n\treturn (angle2>=angle1)?(angle2-angle1):(MAX_ISO_ANGLE+angle2-angle1);\r\n}\r\n/* \tTinh goc hop boi 2 vector co goc so voi truc hoanh angle1, angle2 tuong ung\r\n\tInput:\r\n\t\tangle1, angle2: 2 goc co gias tri 0-2pi dang fixepoint 3.5\r\n\tOutput:\r\n\t\tGoc hop giua 2 vector co gia tri 0-pi dang fixedpoint 3.5\r\n\t\t\r\n*/\r\nfloat ad_pi(float angle1, float angle2)\r\n{\r\n\treturn min(fabs(angle1-angle2),2*M_PI-fabs(angle1-angle2));\r\n}\r\nfloat ad_2pi(float angle1, float angle2)\r\n{\r\n\treturn (angle2>=angle1)?(angle2-angle1):(2*M_PI-angle1+angle2);\r\n\t/* if angle2>=angle1\r\n out = angle2 - angle1;\r\n else out = angle2 - angle1 + 2*pi; */\r\n}\r\n\r\nISOBYTE CalculateEdDraOda (Fingerprint* finger)\r\n{\r\n\t/* calculate distances and angles between two minutiae */\r\n\tunsigned int n=finger->nMinutiae;\r\n\tunsigned int i,j,id1, id2;\r\n\tint xi,xj, yi, yj;\r\n\tint diri, dirj;\r\n\tfloat a12,a21;\r\n\tfloat *distances = malloc(sizeof(float)*n*n);\r\n\t\t\r\n\tif (distances == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tfloat *dra = malloc(sizeof(float)*n*n);\r\n\tif (dra==NULL){\r\n\t\tfree(distances);\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tint *oda = malloc(sizeof(int)*n*n);\r\n\tif (oda==NULL){\r\n\t\tfree(distances);\r\n\t\tfree(dra);\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tmemset(distances, 0, sizeof(float)*n*n);\r\n\tmemset(dra, 0, sizeof(float)*n*n);\r\n\tmemset(oda, 0, sizeof(int)*n*n);\r\n\tfor ( i = 0; i < n; i++ )\r\n\t\tfor ( j = i+1; j < n; j++ )\t{\r\n\t\t\tid1 = i*n+j;\r\n\t\t\tid2 = j*n+i;\r\n\t\t\txi = finger->minutiae[i]->x;\r\n\t\t\tyi = finger->minutiae[i]->y;\r\n\t\t\txj = finger->minutiae[j]->x;\r\n\t\t\tyj = finger->minutiae[j]->y;\r\n\t\t\tdiri = finger->minutiae[i]->angle;\r\n\t\t\tdirj = finger->minutiae[j]->angle;\r\n\t\t\tfloat d=sqrt( (xi-xj)*(xi-xj) + (yi-yj)*(yi-yj) );\r\n\t\t\tdistances[id1] = d;\r\n\t\t\tdistances[id2] = d;\r\n\t\t\tfloat tmp1 = yj-yi;\r\n\t\t\tfloat tmp2 = xj-xi;\r\n\t\t\t\r\n\t\t\ta12= atan2(tmp1,tmp2);\r\n\t\t\tif (a12<0) a12 = a12 + 2*M_PI;\r\n\t\t\ta21 = a12-M_PI;\r\n\t\t\tif (a21<0) a21 = a21 + 2*M_PI;\r\n\t\t\tdra[id1] = ad_2pi(ConvertISOAngle(diri),a12);\r\n\t\t\tdra[id2] = ad_2pi(ConvertISOAngle(dirj),a21);\r\n\t\t\toda[id1] = ad_2pi_iso(diri,dirj);\r\n\t\t\toda[id2] = ad_2pi_iso(dirj,diri);\r\n\t\t\t/* if (debug) printf(\"Calculated Ed, Dra, Oda, between %d,%d:x1=%d,y1=%d,dir1=%d, x2=%d,y2=%d,dir2=%d,%f,%f,%f,a12=%f,%f, dist=%f,dra=%f,oda=%d\\n\",\r\n\t\t\t\t\ti,j, xi,yi,diri,xj,yj,dirj,tmp1,tmp2,tmp1/tmp2,atan2(tmp1,tmp2),a12,distances[id1],dra[id1],oda[id1]); */\r\n\t\t}\r\n\tfinger->distances = distances;\r\n\tfinger->dra = dra;\r\n\tfinger->oda = oda;\r\n/* \tif (debug) printf(\"Calculated Ed, Dra, Oda, ed=%x=%x, dra=%x, oda=%x\",finger->distances,distances, finger->dra,finger->oda); */\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\n/* sort the minutiae according to their distances to the minutiae centerI \r\n return an array order where order[0] is the index of the closest minutia to centerI\r\n*/\r\n/*************************************************************************\r\n**************************************************************************\r\n#cat: bubble_sort_double_inc_2 - Takes a list of double ranks and a\r\n#cat: corresponding list of integer attributes, and sorts the\r\n#cat: ranks into increasing order moving the attributes\r\n#cat: correspondingly.\r\n\r\n Input:\r\n ranks - list of double to be sort on\r\n items - list of corresponding integer attributes\r\n len - number of items in list\r\n Output:\r\n ranks - list of doubles sorted in increasing order\r\n items - list of attributes in corresponding sorted order\r\n**************************************************************************/\r\nvoid sort_double_inc(double *ranks, int *items, const int len)\r\n{\r\n int done = 0;\r\n int i, p, n, titem;\r\n double trank;\r\n\tn = len;\r\n\tfor(i = 0; i < n; i++){\r\n\t for(p = i+1; p < n; p++){\r\n\t\t if(ranks[p] < ranks[i]){\r\n\t\t\t /* Swap ranks. */\r\n trank = ranks[i];\r\n ranks[i] = ranks[p];\r\n ranks[p] = trank;\r\n /* Swap items. */\r\n titem = items[i];\r\n items[i] = items[p];\r\n items[p] = titem;\r\n\t\t }\r\n\t }\r\n } \r\n}\r\n\r\nint sort_distance(Fingerprint* finger, ISOBYTE centerI, int* order)\r\n{\r\n\tunsigned int num = finger->nMinutiae;\r\n\tunsigned int i;\r\n\t\t\t\r\n\t/* construct rank array from the minutiae quality. */\r\n\tdouble* ranks = (double *)malloc(num * sizeof(double));\r\n\tif(ranks == NULL){\r\n fprintf(stderr, \"ERROR : sort_minutiae_y_x : malloc : ranks\\n\");\r\n return(-310);\r\n\t}\r\n\t/* if (debug) printf(\"Sort distance for minutia %d...\\n\", centerI); */\r\n\tfor(i = 0; i < num; i++){\r\n ranks[i] = GetDistance(finger,centerI,i);\r\n\t order[i] = i;\r\n\t}\r\n\t\r\n\tsort_double_inc(ranks, order, num); \r\n\t\r\n\tfree(ranks);\r\n\treturn 0;\r\n}\r\n\r\nchar CalculateLDR(Fingerprint* finger)\r\n{\r\n\tISOBYTE\tcenterI;\r\n\tISOBYTE\t*ldr;\r\n\tISOBYTE\ti, j;\r\n\tISOBYTE\tldrN = LDR_N\t ;\r\n\tISOBYTE\tNUM = LDR_NUM;\r\n\tISOBYTE POS = LDR_POS;\r\n\tfloat\tDIR = LDR_DIR;\r\n\tISOBYTE n = finger->nMinutiae;\r\n\tunsigned char\t*queue;\r\n\tunsigned char\t*stack;\r\n\tunsigned char\tqueue_size = 0;\r\n\tunsigned char \tstack_size = 0;\r\n\t\r\n\t\r\n\tldr = malloc( sizeof(ISOBYTE) * n);\r\n\tif\t( ldr == NULL ) return (-1);\r\n\tmemset( ldr, 0, sizeof(ISOBYTE) * n);\r\n\tint* sortedNeighborIds=malloc(sizeof(int)*n);\r\n\tif (sortedNeighborIds==NULL){\r\n\t\tfree(ldr);\r\n\t\treturn -1;\r\n\t}\r\n\t\r\n\tqueue = malloc(sizeof(unsigned char)*n);\r\n\tif (queue==NULL){\t\r\n\t\tfree(ldr);\r\n\t\tfree(sortedNeighborIds);\r\n\t\treturn -1;\r\n\t}\r\n\tstack = malloc(sizeof(unsigned char)*n);\r\n\tif (stack==NULL){\r\n\t\tfree(ldr);\r\n\t\tfree(queue);\r\n\t\tfree(sortedNeighborIds);\r\n\t\treturn -1;\r\n\t}\r\n\t\r\n\tfor\t( centerI = 0; centerI < n; centerI++ )\r\n\t{\r\n\t\tMinutia* min = finger->minutiae[centerI];\r\n\t\tif\t(min == NULL) continue;\t\r\n\t\tmemset(sortedNeighborIds,0,sizeof(int)*n);\r\n\t\t/* if (debug) printf(\"Sort distances for minutia %d:\\n\",centerI); */\r\n\t\tif (sort_distance(finger, centerI, sortedNeighborIds)){\r\n\t\t\tfree(ldr);\r\n\t\t\tfree(sortedNeighborIds);\r\n\t\t\tfree(queue);\r\n\t\t\tfree(stack);\r\n\t\t\treturn -1;\r\n\t\t}\r\n\t\tint avg_angle_diff = 0;\r\n\t\tfor (j=1;j<ldrN+1;j++){\r\n\t\t\tavg_angle_diff=avg_angle_diff+ad_pi_iso(finger->minutiae[sortedNeighborIds[j]]->angle,min->angle);\r\n\t\t}\r\n\t\t\r\n\t\tif (avg_angle_diff < LDR_ANGLE_AVG){\r\n\t\t\t/* if (debug) printf(\"Calculating LDR for minutia %d...\\n\",centerI); */\r\n\t\t\tmemset( queue, 255, sizeof(ISOBYTE) * n);\r\n\t\t\tmemset( stack, 255, sizeof(ISOBYTE) * n);\r\n\t\t\tstack_size = 0;\r\n\t\t\tqueue_size = 0;\r\n\t\t\tqueue[ queue_size++ ] = centerI;\r\n\t\t\tstack[ stack_size++ ] = centerI;\r\n\t\t\t \r\n\t\t\twhile\t( stack_size != 0 )\t{\r\n\t\t\t\tISOBYTE\tcenter = stack[ --stack_size ];\r\n\t\t\t\t/* ISOBYTE\ttmp_neighbors[ ldrN ]; */\r\n\t\t\t\tstack[ stack_size ] = 255;\r\n\t\t\t\t/* if (debug) printf(\"Pop minutia %d from %d minutiae in stack...\\n\",center, stack_size); */\r\n\t\t\t\tfor\t( i = 1; i < ldrN+1; i++ ){\r\n\t\t\t\t\t/* consider the neighbor i of the center */\r\n\t\t\t\t\tISOBYTE\tfinish = 0;\r\n\t\t\t\t\tISOBYTE\ttmp_index = sortedNeighborIds[i];\r\n\t\t\t\t\tfloat\ttmp_Ed;\r\n\t\t\t\t\tchar tmp_Angle;\r\n\t\t\t\t\tfor\t( j = 0; j < queue_size; j++)\r\n\t\t\t\t\t\t/* if the neighbor i is already in the queue, then don't add it to the queue again */\r\n\t\t\t\t\t\tif\t( tmp_index == queue[j] ){\r\n\t\t\t\t\t\t\tfinish = 1;\r\n\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\tif\t( finish == 1 ) continue;\r\n\t\t\t\t\ttmp_Ed = GetDistance( finger, center, tmp_index );\r\n\t\t\t\t\ttmp_Angle = ad_pi_iso(finger->minutiae[center]->angle,finger->minutiae[tmp_index]->angle);\t\r\n\t\t\t\t\t/* if (debug) printf(\"Minutiae %d<->%d, distance = %f, angle diff=%d\\n\", center,tmp_index,tmp_Ed,tmp_Angle); */\r\n\t\t\t\t\tif \t( tmp_Ed < POS && tmp_Angle <= DIR ){\r\n\t\t\t\t\t\t/* if (debug) printf(\"Push minutia %d int %d minutiae stack...\\n\",tmp_index, stack_size); */\r\n\t\t\t\t\t\tqueue[ queue_size++ ] = tmp_index;\r\n\t\t\t\t\t\tstack[ stack_size++ ] = tmp_index;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tif\t( queue_size > NUM )\r\n\t\t\t\tfor\t( j = 0; j < queue_size; j++ )\r\n\t\t\t\t\tldr[ queue[j] ] = ( ldr[ queue[j] ] > queue_size ) ? ldr[ queue[j] ] : queue_size;\r\n\t\t}\r\n\t}\r\n\t/* if (debug) printf(\"Updating ldr to minutia structure \\n\"); */\r\n\tfor\t( centerI = 0; centerI < n; centerI++ )\r\n\t\tfinger->minutiae[centerI]->ldr = ldr[centerI];\r\n\tfree(sortedNeighborIds);\r\n\tfree(queue);\r\n\tfree(stack);\r\n\tfree(ldr); \r\n\treturn 0;\r\n\t\r\n}\r\n\r\n/* FindDirectionalNeighborsV2: Find MAX_FOUND_NEIGHBORS nearest neighbors \r\nevenly distributed in MAX_FOUND_NEIGHBORS circle sectors arround the center\r\nGiven int* sortedNeighborIds where the distances between center minutia and other minutiae are sorted \r\n*/\r\nchar FindDirectionalNeighborsV2(Fingerprint* finger, unsigned char centerI, int* sortedNeighborIds)\r\n{\r\n\tMinutia* min = finger->minutiae[centerI];\r\n\tunsigned char\t*neighborIds = min->neighborIds;\r\n\tunsigned char\tnNeighbors = 0;\r\n\tunsigned char \tsector;\r\n\tunsigned char \tn = finger->nMinutiae;\r\n\tunsigned char\ti;\r\n\tunsigned char neighborId;\r\n\tfloat dist,d_phi;\r\n\tmemset( neighborIds, 255, sizeof(unsigned char) * MAX_FOUND_NEIGHBORS );\r\n\tchar* skip = malloc(n);\r\n\tif (skip == NULL){\r\n\t\treturn -1;\r\n\t}\r\n\tmemset(skip,0,n);\r\n\tskip[centerI]=1;\r\n\tchar found = 1;\r\n\tchar sectorHaveNeighbor[N_SECTORS];\r\n\t\r\n\t/* if (debug) printf(\"Find neighbors for minutiae %d: distance = %x\\n\",centerI,finger->distances); */\r\n\twhile ((nNeighbors<MAX_FOUND_NEIGHBORS)&&found){\r\n\t\tmemset(sectorHaveNeighbor,0,N_SECTORS);\r\n\t\tfound = 0;\r\n\t\tfor (i=0;((i<n)&&(nNeighbors<MAX_FOUND_NEIGHBORS));i++){\r\n\t\t\tneighborId=sortedNeighborIds[i];\r\n\t\t\tif (skip[neighborId]) continue;\r\n\t\t\tdist = \tGetDistance(finger, centerI, sortedNeighborIds[i] );\r\n\t\t\tif \t( (dist < MIN_DISTANCE) || (dist > MAX_DISTANCE) )\t{\r\n\t\t\t\tskip[neighborId]=1;\r\n\t\t\t\tcontinue;\r\n\t\t\t}\r\n\t\t\td_phi = GetDra(finger, centerI, sortedNeighborIds[i]);\r\n\t\t\tsector = floor( N_SECTORS * (d_phi/(2*M_PI)) );\r\n\t\t\tif (sector>=N_SECTORS) {\r\n\t\t\t\tprintf(\"Error sector >= N_SECTORS, centerI=%d,neighborId=%d,dra=%f\\n\",centerI,neighborId,d_phi);\r\n\t\t\t\treturn -1;\r\n\t\t\t}\r\n\t\t\tif \t(sectorHaveNeighbor[sector]) continue;\r\n\t\t\tif (debug) printf(\"Add minutiae %d, %d, dist=%f, dra=%f, sector=%d\\n\",i, neighborId, dist, d_phi, sector); \r\n\t\t\t\r\n\t\t\tneighborIds[nNeighbors++]=neighborId;\t\r\n\t\t\tsectorHaveNeighbor[sector]=1;\r\n\t\t\tskip[neighborId]=1;\r\n\t\t\tfound = 1;\r\n\t\t}\r\n\t}\r\n\tfree(skip);\r\n\tmin->nNeighbors=nNeighbors;\r\n\treturn 0;\r\n}\r\n\r\n\r\n/* FindDirectionalNeighborsV2: Find MAX_FOUND_NEIGHBORS nearest neighbors \r\nevenly distributed in MAX_FOUND_NEIGHBORS circle sectors arround the center\r\nGiven int* sortedNeighborIds where the distances between center minutia and other minutiae are sorted \r\n*/\r\nchar FindDirectionalNeighborsV3(Fingerprint* finger, unsigned char centerI, int* sortedNeighborIds){\r\n\tMinutia* min = finger->minutiae[centerI];\r\n\tunsigned char\t*neighborIds = min->neighborIds;\r\n\tunsigned char \tL = finger->nMinutiae;\r\n\t\r\n\tunsigned char count = 0, found = 1, pointer = 0;\r\n\tunsigned char *flag = (unsigned char*)malloc(L*sizeof(unsigned char));\r\n\tif(flag == NULL) return - 1;\r\n\tmemset(flag, 0, L*sizeof(unsigned char));\r\n\t\r\n\tunsigned char *id = (unsigned char*)malloc(N_SECTORS*sizeof(unsigned char));\r\n\tif(id == NULL){\r\n\t\tfree(flag);\r\n\t\treturn -1;\r\n\t}\r\n\tunsigned char sector_count = 0, ii, pos;\r\n\tfloat dist, d_phi;\r\n\t\r\n\twhile(count < MAX_FOUND_NEIGHBORS && found){\r\n\t\tmemset(id, 0, N_SECTORS*sizeof(unsigned char));\r\n\t\tsector_count = 0;\r\n\t\tfound = 0;\r\n\t\t\r\n\t\tfor(ii = 1; ii < L; ii++){\r\n\t\t\tif(count && flag[ii]) continue;\r\n\t\t\t\r\n\t\t\tdist = GetDistance(finger, centerI, sortedNeighborIds[ii]);\r\n\t\t\td_phi = GetDra(finger, centerI, sortedNeighborIds[ii]);\r\n\t\t\t\r\n\t\t\tpos = floor( N_SECTORS * (d_phi/(2*M_PI)) );\r\n\t\t\tif (pos >= N_SECTORS) {\r\n\t\t\t\tprintf(\"Error sector >= N_SECTORS\\n\");\r\n\t\t\t\treturn -1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif(!id[pos]){\r\n\t\t\t\tif(dist < MAX_DISTANCE && dist > MIN_DISTANCE && count < MAX_FOUND_NEIGHBORS){\r\n\t\t\t\t\tid[pos] = ii;\r\n\t\t\t\t\tcount++;\r\n\t\t\t\t\tflag[ii] = 1;\r\n\t\t\t\t\tsector_count++;\r\n\t\t\t\t\tfound = 1;\r\n if (debug) \r\n printf(\"Add minutiae %d, %d, dist=%f, dra=%f, sector=%d\\n\",ii, sortedNeighborIds[ii], dist, d_phi, pos); \r\n\r\n\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif(count >= MAX_FOUND_NEIGHBORS || sector_count >= N_SECTORS){\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tif(found){\r\n\t\t\tfor(ii = 0; ii < N_SECTORS; ii++){\r\n\t\t\t\tif (id[ii]){\r\n\t\t\t\t\tneighborIds[pointer] = sortedNeighborIds[id[ii]];\r\n\t\t\t\t\tpointer++;\r\n\t\t\t\t\tif(pointer >= MAX_FOUND_NEIGHBORS){\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tfree(id);\r\n\tfree(flag);\r\n\tmin->nNeighbors = count;\r\n\treturn 0;\r\n}\r\n\r\n\r\n/* Call the FindDirectionalNeighborsV2 where the distances between center minutia and other minutiae are sorted */\r\nchar CalculateLDRNeighbors(Fingerprint* finger)\r\n{\r\n\t/********/\r\n\tint debug = 0;\r\n\tint cc;\r\n\t/********/\r\n\t\r\n\tISOBYTE\tcenterI;\r\n\tISOBYTE\t*ldr;\r\n\tISOBYTE\ti, j;\r\n\tISOBYTE\tldrN = LDR_N\t ;\r\n\tISOBYTE\tNUM = LDR_NUM;\r\n\tISOBYTE POS = LDR_POS;\r\n\tfloat\tDIR = LDR_DIR;\r\n\tISOBYTE n = finger->nMinutiae;\r\n\tunsigned char\t*queue;\r\n\tunsigned char\t*stack;\r\n\tunsigned char\tqueue_size = 0;\r\n\tunsigned char \tstack_size = 0;\r\n\t\r\n\t/******** Dynamic allocating memory for ldr *******/\r\n\tldr = (ISOBYTE*) malloc( sizeof(ISOBYTE) * n);\r\n\tif\t( ldr == (ISOBYTE*) NULL ) return (-1);\r\n\tmemset( ldr, 0, sizeof(ISOBYTE) * n);\r\n\t\r\n\t/******** Dynamic allocating memory for sortedNeighborIds *******/\r\n\tint* sortedNeighborIds = (int*)malloc(sizeof(int)*n);\r\n\tif (sortedNeighborIds == (int*)NULL){\r\n\t\tfree(ldr);\r\n\t\treturn -1;\r\n\t}\r\n\t\r\n\t/******** Dynamic allocating memory for queue and stack *******/\r\n\tqueue = (unsigned char*)malloc(sizeof(unsigned char)*n);\r\n\tif (queue == (unsigned char*)NULL){\t\r\n\t\tfree(ldr);\r\n\t\tfree(sortedNeighborIds);\r\n\t\treturn -1;\r\n\t}\r\n\tstack = (unsigned char*)malloc(sizeof(unsigned char)*n);\r\n\tif (stack == (unsigned char*)NULL){\r\n\t\tfree(ldr);\r\n\t\tfree(queue);\r\n\t\tfree(sortedNeighborIds);\r\n\t\treturn -1;\r\n\t}\r\n\t\r\n\tfor\t( centerI = 0; centerI < n; centerI++ )\r\n\t{\r\n\t\tMinutia* min = finger->minutiae[centerI];\r\n\t\tif\t(min == NULL) continue;\t\r\n\t\tmemset(sortedNeighborIds,0,sizeof(int)*n);\r\n\t\t\r\n\t\t/******/\r\n\t\tdebug = 0;\r\n\t\tif (debug) printf(\"Sort distances for minutia %d:\\n\",centerI);\r\n\t\t/******/\r\n\t\t\r\n\t\tif (sort_distance(finger, centerI, sortedNeighborIds)){\r\n\t\t\tfree(ldr);\r\n\t\t\tfree(sortedNeighborIds);\r\n\t\t\tfree(queue);\r\n\t\t\tfree(stack);\r\n\t\t\treturn -1;\r\n\t\t}\r\n\t\t/* debug = 1; \r\n\t\tif (debug)\r\n\t\t\tprintf(\"Find neighbors for %d\\n\",centerI); */\r\n \r\n\t\tFindDirectionalNeighborsV3(finger,centerI,sortedNeighborIds);\r\n\t\t/*FindDirectionalNeighbors(finger,centerI);*/\r\n\t\tdebug = 0; \r\n\t\tif(debug){\r\n\t\t\tprintf(\". min %d:\", centerI+1);\r\n\t\t\tfor(cc = 0; cc < finger->minutiae[centerI]->nNeighbors; cc++){\r\n\t\t\t\tprintf(\"\\t%d\", finger->minutiae[centerI]->neighborIds[cc]+1);\r\n\t\t\t}\r\n\t\t\tprintf(\"\\n\");\r\n\r\n\t\t}\r\n\t\tfloat avg_angle_diff = 0;\r\n\t\tfor (j=1;j<ldrN+1;j++){\r\n\t\t\tavg_angle_diff=avg_angle_diff+ad_pi_iso(finger->minutiae[sortedNeighborIds[j]]->angle,min->angle);\r\n\t\t}\r\n\t\tavg_angle_diff=avg_angle_diff/ldrN;\r\n\t\tif (avg_angle_diff <= LDR_ANGLE_AVG){\r\n\t\t\t/********/\r\n\r\n\t\t\tif (debug) printf(\"Calculating LDR for minutia %d...\\n\",centerI);\r\n\t\t\t/********/\r\n\t\t\t\r\n\t\t\tmemset( queue, 255, sizeof(ISOBYTE) * n);\r\n\t\t\tmemset( stack, 255, sizeof(ISOBYTE) * n);\r\n\t\t\tstack_size = 0;\r\n\t\t\tqueue_size = 0;\r\n\t\t\tqueue[ queue_size++ ] = centerI;\r\n\t\t\tstack[ stack_size++ ] = centerI;\r\n\t\t\t \r\n\t\t\twhile\t( stack_size != 0 )\t{\r\n\t\t\t\tISOBYTE\tcenter = stack[ --stack_size ];\r\n\t\t\t\t/* ISOBYTE\ttmp_neighbors[ ldrN ]; */\r\n\t\t\t\tstack[ stack_size ] = 255;\r\n\r\n\t\t\t\tif (debug) printf(\"Pop minutia %d from %d minutiae in stack...\\n\",center, stack_size);\r\n\t\t\t\t/*******/\r\n\t\t\t\t\r\n\t\t\t\tfor\t( i = 1; i < ldrN+1; i++ ){\r\n\t\t\t\t\t/* consider the neighbor i of the center */\r\n\t\t\t\t\tISOBYTE\tfinish = 0;\r\n\t\t\t\t\tISOBYTE\ttmp_index = sortedNeighborIds[i];\r\n\t\t\t\t\tfloat\ttmp_Ed;\r\n\t\t\t\t\tint tmp_Angle;\r\n\t\t\t\t\tfor\t( j = 0; j < queue_size; j++)\r\n\t\t\t\t\t\t/* if the neighbor i is already in the queue, then don't add it to the queue again */\r\n\t\t\t\t\t\tif\t( tmp_index == queue[j] ){\r\n\t\t\t\t\t\t\tfinish = 1;\r\n\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\tif\t( finish == 1 ) continue;\r\n\t\t\t\t\ttmp_Ed = GetDistance( finger, center, tmp_index );\r\n\t\t\t\t\ttmp_Angle = ad_pi_iso(finger->minutiae[center]->angle,finger->minutiae[tmp_index]->angle);\t\r\n\t\t\t\t\t\r\n\t\t\t\t\t/******/\r\n\r\n\t\t\t\t\tif (debug) printf(\"Minutiae %d<->%d, distance = %f, angle diff=%d\\n\", center,tmp_index,tmp_Ed,tmp_Angle); \r\n\t\t\t\t\t/******/\r\n\t\t\t\t\t\r\n\t\t\t\t\tif \t( tmp_Ed < POS && tmp_Angle <= DIR ){\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t/*******/\r\n\r\n\t\t\t\t\t\tif (debug) printf(\"Push minutia %d int %d minutiae stack...\\n\",tmp_index, stack_size);\r\n\t\t\t\t\t\t/*******/\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tqueue[ queue_size++ ] = tmp_index;\r\n\t\t\t\t\t\tstack[ stack_size++ ] = tmp_index;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tif\t( queue_size > NUM )\r\n\t\t\t\tfor\t( j = 0; j < queue_size; j++ )\r\n\t\t\t\t\tldr[ queue[j] ] = ( ldr[ queue[j] ] > queue_size ) ? ldr[ queue[j] ] : queue_size;\r\n\t\t}\r\n\t}\r\n\t/* if (debug) printf(\"Updating ldr to minutia structure \\n\"); */\r\n\tfor\t( centerI = 0; centerI < n; centerI++ )\r\n\t\tfinger->minutiae[centerI]->ldr = ldr[centerI];\r\n\tfree(sortedNeighborIds);\r\n\tfree(queue);\r\n\tfree(stack);\r\n\tfree(ldr); \r\n\treturn 0;\r\n\t\r\n}" }, { "alpha_fraction": 0.5806464552879333, "alphanum_fraction": 0.6120446920394897, "avg_line_length": 50.26050567626953, "blob_id": "0ce77d1246dd47d3c620be10d8179aebc7050dd8", "content_id": "60ab290c096931e530ddb8e25806951f44d813f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 24874, "license_type": "no_license", "max_line_length": 464, "num_lines": 476, "path": "/bkafis/bkafis/src/lib/bkafis/iso_format.cpp", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/**\r\n * tool_ISO.h\r\n * Description: Chua cac ham cong cu cho viec xu li chuan ISO_2011\r\n * Created on: Sep 24, 2014 \r\n *\t\tAuthor: Kien\r\n */\r\n\r\n#include <stdio.h>\r\n#include <conio.h>\r\n#include <stdint.h>\r\n#include <iso_format.h>\r\n\r\n/** Dao thu tu cac byte trong mot so nguyen (Vd: abcd -> dcba)\r\n\tInput:\r\n\t\t\tin - So nguyen dau vao\r\n\tOutput:\r\n\t\t\tout - So nguyen sau khi da dao thu tu cac byte\r\n */\r\ntemplate <class T>\r\nT convert(T in){\r\n int n = sizeof(T);\r\n T out=0;\r\n for(int i=0;i<n;i++){\r\n out = out | (((in>>(i*8)) & 0xff)<<((n-i-1)*8));\r\n }\r\n return out;\r\n};\r\n\r\n/** In ra thong tin cua mot du lieu van tay chuan ISO_2011\r\n\tInput:\r\n\t\t\tmy_f - Du lieu van tay dau vao\r\n */\r\nvoid display_iso2011(ISO_2011::FingerPrint2011 my_f){\r\n printf(\"----------------Phan Header chung: -------------------\");\r\n printf(\"\\nFormat ID: %s\\n\",my_f.RecordHeader.FormatId);\r\n printf(\"Version: %s\\n\",my_f.RecordHeader.Version);\r\n printf(\"Record length: %u\\n\",my_f.RecordHeader.RecordLength);\r\n printf(\"Number of Finger Representations: %u\\n\",my_f.RecordHeader.NumberOfFingerRepresentation);\r\n printf(\"Device Certification Block Flag: %u\\n\",my_f.RecordHeader.DeviceBlockFlag);\r\n for(int j=0;j<my_f.RecordHeader.NumberOfFingerRepresentation;j++){\r\n printf(\"\\n\\n----------------Du lieu van tay thu %u: ------------------\\n\",j+1);\r\n printf(\"Representation length: %u\\n\",my_f.FingerData[j].FingerHeader.RepresentationLength);\r\n printf(\"Capture date and time: %u:%u:%u:%u Ngay %u, Thang %u, Nam %u\\n\",my_f.FingerData[j].FingerHeader.CaptureDateTime.Hour,my_f.FingerData[j].FingerHeader.CaptureDateTime.Minute,my_f.FingerData[j].FingerHeader.CaptureDateTime.Second,my_f.FingerData[j].FingerHeader.CaptureDateTime.Milisecond,my_f.FingerData[j].FingerHeader.CaptureDateTime.Date,my_f.FingerData[j].FingerHeader.CaptureDateTime.Mounth,my_f.FingerData[j].FingerHeader.CaptureDateTime.Year);\r\n printf(\"Capture device technology identifier: %u\\n\",my_f.FingerData[j].FingerHeader.CaptureDeviceTech);\r\n printf(\"Capture device vendor identifier: %u\\n\",my_f.FingerData[j].FingerHeader.CaptureDeviceVendorId);\r\n printf(\"Capture device type identifier: %u\\n\",my_f.FingerData[j].FingerHeader.CaptureDeviceTypeId);\r\n printf(\"Number of quality blocks: %u\\n\", my_f.FingerData[j].FingerHeader.NumberOfQualityBlock);\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfQualityBlock;k++){\r\n printf(\"\\tBlock %u:\\n\",k+1);\r\n printf(\"\\tQuality Score: %u\\n\",my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityScore);\r\n printf(\"\\tQuality algorithm vendor ID: %u\\n\", my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityAlgorithmVendorId);\r\n printf(\"\\tQuality algorithm ID: %u\\n\",my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityAlgorithmId);\r\n }\r\n if(my_f.RecordHeader.DeviceBlockFlag != 0){\r\n printf(\"Number of certification blocks: %u\\n\",my_f.FingerData[j].FingerHeader.NumberOfCertification);\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfCertification;k++){\r\n printf(\"\\tBlock %u\\n\",k+1);\r\n printf(\"\\tCertification authority identifier: %u\\n\",my_f.FingerData[j].FingerHeader.CertificationRecord[k].AuthorId);\r\n printf(\"\\tCertification scheme identifier: %u\\n\",my_f.FingerData[j].FingerHeader.CertificationRecord[k].SchemeId);\r\n }\r\n }\r\n printf(\"Finger position: %u\\n\",my_f.FingerData[j].FingerHeader.FingerPosition);\r\n printf(\"Representation number: %u\\n\",my_f.FingerData[j].FingerHeader.RepresentationNumber);\r\n printf(\"Image spatial sampling rate (horiz): %u\\n\",my_f.FingerData[j].FingerHeader.X_spatialSamplingRate);\r\n printf(\"Image spatial sampling rate (vert): %u\\n\",my_f.FingerData[j].FingerHeader.Y_spatialSamplingRate);\r\n printf(\"Impression type: %u\\n\",my_f.FingerData[j].FingerHeader.ImpressionType);\r\n printf(\"Size of scanned image in X: %u\\n\",my_f.FingerData[j].FingerHeader.HorizontalImageSize);\r\n printf(\"Size of Scanned image in Y: %u\\n\", my_f.FingerData[j].FingerHeader.VerticalImageSize);\r\n printf(\"Minutiae field length: %u\\n\", my_f.FingerData[j].FingerHeader.MinutiaeFieldLength_RidgeEndingType>>4);\r\n printf(\"Ridge ending type: %u\\n\",my_f.FingerData[j].FingerHeader.MinutiaeFieldLength_RidgeEndingType & 0x0f);\r\n printf(\"Number of minutiae: %u\\n\",my_f.FingerData[j].FingerHeader.NumberOfMinutiae);\r\n printf(\"\\tSTT\\tType\\tX\\tY\\tAngle\\tQuality\\n\");\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfMinutiae;k++){\r\n printf(\"\\t%u\\t%u\\t%u\\t%u\\t%u\\t%u\\n\",k+1,my_f.FingerData[j].MinutieData6[k].Type_CoordinateX>>14\r\n ,my_f.FingerData[j].MinutieData6[k].Type_CoordinateX & 0x3fff\r\n ,my_f.FingerData[j].MinutieData6[k].CoordinateY\r\n ,my_f.FingerData[j].MinutieData6[k].Angle\r\n ,my_f.FingerData[j].MinutieData6[k].Quality);\r\n }\r\n printf(\"Extended data block length: %u\\n\",my_f.FingerData[j].ExtendedData.ExtendedDataLength);\r\n if(my_f.FingerData[j].ExtendedData.ExtendedDataLength != 0){\r\n printf(\"Extended data area type code: %u\\n\",my_f.FingerData[j].ExtendedData.ExtendedDataType);\r\n printf(\"Extended data area length: %u\\n\",my_f.FingerData[j].ExtendedData.ExtendedDataAreaLength);\r\n if(my_f.FingerData[j].ExtendedData.ExtendedDataType == 1){\r\n printf(\"Extended data type is Ridge_Count_Data:\\n\" );\r\n printf(\"\\tRidge count extraction method: %u\\n\", my_f.FingerData[j].ExtendedData.Data.RidgeCountData.ExtractionMethod);\r\n printf(\"\\tIndex1\\tIndex2\\tCount\\n\");\r\n for(int k=0;k<(my_f.FingerData[j].ExtendedData.ExtendedDataLength-3)/3;k++){\r\n printf(\"\\t%u\\t%u\\t%u\", my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].Index1,\r\n my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].Index2,\r\n my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].CountOfRidges);\r\n }\r\n }\r\n }\r\n }\r\n}\r\n\r\n/** In ra thong tin cua mot du lieu van tay chuan ISO_2005\r\n\tInput:\r\n\t\t\tmy_f - Du lieu van tay dau vao\r\n */\r\nvoid display_iso2005(ISO_2005::FingerPrint2005 my_f){\r\n printf(\"----------------Phan Header chung: -------------------\");\r\n printf(\"\\nFormat ID: %s\\n\",my_f.RecordHeader.FormatId);\r\n printf(\"Version: %s\\n\",my_f.RecordHeader.Version);\r\n printf(\"Record length: %u\\n\",my_f.RecordHeader.RecordLength);\r\n printf(\"Capture Device: %u\\n\",my_f.RecordHeader.CaptureDevice);\r\n printf(\"Image Size X: %u\\n\",my_f.RecordHeader.ImageSizeX);\r\n printf(\"Image Size Y: %u\\n\",my_f.RecordHeader.ImageSizeY);\r\n printf(\"Resolution X: %u\\n\",my_f.RecordHeader.ResolutionX);\r\n printf(\"Resolution Y: %u\\n\",my_f.RecordHeader.ResolutionX);\r\n printf(\"Number Of Finger: %u\\n\",my_f.RecordHeader.NumberOfFinger);\r\n printf(\"Reserved: %u\\n\",my_f.RecordHeader.Reserved);\r\n \r\n for(int j=0;j<my_f.RecordHeader.NumberOfFinger;j++){\r\n printf(\"\\n\\n----------------Du lieu van tay thu %u: ------------------\\n\",j+1);\r\n printf(\"Finger Position: %u\\n\",my_f.FingerData[j].FingerHeader.FingerPosition);\r\n printf(\"Number and Impression Type: %u\\n\",my_f.FingerData[j].FingerHeader.Number_ImpressionType);\r\n printf(\"Finger Quality: %u\\n\",my_f.FingerData[j].FingerHeader.FingerQuality); \r\n printf(\"Number of minutiae: %u\\n\",my_f.FingerData[j].FingerHeader.NumberOfMinutiae);\r\n \r\n printf(\"\\tSTT\\tType\\tX\\tY\\tAngle\\tQuality\\n\");\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfMinutiae;k++){\r\n printf(\"\\t%u\\t%u\\t%u\\t%u\\t%u\\t%u\\n\",k+1,my_f.FingerData[j].MinutieData6[k].Type_CoordinateX>>14\r\n ,my_f.FingerData[j].MinutieData6[k].Type_CoordinateX & 0x3fff\r\n ,my_f.FingerData[j].MinutieData6[k].CoordinateY\r\n ,my_f.FingerData[j].MinutieData6[k].Angle\r\n ,my_f.FingerData[j].MinutieData6[k].Quality);\r\n }\r\n printf(\"Extended data block length: %u\\n\",my_f.FingerData[j].ExtendedData.ExtendedDataLength);\r\n }\r\n}\r\n\r\n/** Doc file ISO_2011 dau vao thanh cau truc du lieu van tay\r\n\tInput:\r\n\t\t\tfilename - Ten file ISO\r\n\tOutput:\r\n\t\t\tmy_f - Cau truc du lieu van tay\r\n */\r\nISO_2011::FingerPrint2011 read_iso2011(char* filename){\r\n FILE *p;\r\n p = fopen(filename, \"rb\");\r\n if(p == NULL) throw \"File does not exist!\";\r\n fseek(p, 0, SEEK_END);\r\n int size_file = ftell(p);\r\n fseek(p, 0, SEEK_SET);\r\n \r\n ISO_2011::FingerPrint2011 my_f;\r\n \r\n uint32_t tmp32;\r\n uint16_t tmp16;\r\n uint64_t tmp64;\r\n uint8_t tmp8;\r\n \r\n fread(&my_f.RecordHeader.FormatId, sizeof(char*), 1, p);\r\n if(strcmp(my_f.RecordHeader.FormatId, \"FMR\")) throw \"Not ISO-19794_2 format!\";\r\n \r\n fread(&my_f.RecordHeader.Version, sizeof(char*), 1, p);\r\n if(strcmp(my_f.RecordHeader.Version, \"030\")) throw \"Not ISO-19794_2:2011 version!\";\r\n \r\n fread(&tmp32, sizeof(int), 1, p);\r\n my_f.RecordHeader.RecordLength = convert(tmp32);\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.RecordHeader.NumberOfFingerRepresentation = convert(tmp16);\r\n \r\n fread(&my_f.RecordHeader.DeviceBlockFlag, 1, 1, p);\r\n \r\n for(int j=0;j<my_f.RecordHeader.NumberOfFingerRepresentation;j++){\r\n fread(&tmp32, sizeof(int), 1, p);\r\n my_f.FingerData[j].FingerHeader.RepresentationLength = convert(tmp32);\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.CaptureDateTime.Year = convert(tmp16);\r\n fread(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Mounth, 1, 1, p);\r\n fread(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Date, 1, 1, p);\r\n fread(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Hour, 1, 1, p);\r\n fread(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Minute, 1, 1, p);\r\n fread(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Second, 1, 1, p);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.CaptureDateTime.Milisecond = convert(tmp16);\r\n fread(&my_f.FingerData[j].FingerHeader.CaptureDeviceTech, 1, 1, p);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.CaptureDeviceVendorId = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.CaptureDeviceTypeId = convert(tmp16);\r\n fread(&my_f.FingerData[j].FingerHeader.NumberOfQualityBlock, 1, 1, p);\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfQualityBlock;k++){\r\n fread(&my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityScore, 1, 1, p);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityAlgorithmVendorId = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityAlgorithmId = convert(tmp16);\r\n }\r\n if(my_f.RecordHeader.DeviceBlockFlag != 0){\r\n fread(&my_f.FingerData[j].FingerHeader.NumberOfCertification, 1, 1, p);\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfCertification;k++){\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.CertificationRecord[k].AuthorId = convert(tmp16);\r\n fread(&my_f.FingerData[j].FingerHeader.CertificationRecord[k].SchemeId, 1, 1, p);\r\n }\r\n }\r\n fread(&my_f.FingerData[j].FingerHeader.FingerPosition, 1, 1, p);\r\n fread(&my_f.FingerData[j].FingerHeader.RepresentationNumber, 1, 1, p);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.X_spatialSamplingRate = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.Y_spatialSamplingRate = convert(tmp16);\r\n fread(&my_f.FingerData[j].FingerHeader.ImpressionType, 1, 1, p);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.HorizontalImageSize = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].FingerHeader.VerticalImageSize = convert(tmp16);\r\n fread(&my_f.FingerData[j].FingerHeader.MinutiaeFieldLength_RidgeEndingType, 1, 1, p);\r\n fread(&my_f.FingerData[j].FingerHeader.NumberOfMinutiae, 1, 1, p);\r\n \r\n //Du lieu minutie\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfMinutiae;k++){\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].MinutieData6[k].Type_CoordinateX = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].MinutieData6[k].CoordinateY = convert(tmp16);\r\n fread(&my_f.FingerData[j].MinutieData6[k].Angle, 1, 1, p);\r\n fread(&my_f.FingerData[j].MinutieData6[k].Quality, 1, 1, p);\r\n }\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].ExtendedData.ExtendedDataLength = convert(tmp16);\r\n if(my_f.FingerData[j].ExtendedData.ExtendedDataLength != 0){\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].ExtendedData.ExtendedDataType = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].ExtendedData.ExtendedDataAreaLength = convert(tmp16);\r\n if(my_f.FingerData[j].ExtendedData.ExtendedDataType == 1){\r\n fread(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.ExtractionMethod, 1, 1, p);\r\n for(int k=0;k<(my_f.FingerData[j].ExtendedData.ExtendedDataLength-5)/3;k++){\r\n fread(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].Index1, 1, 1, p);\r\n fread(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].Index2, 1, 1, p);\r\n fread(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].CountOfRidges, 1, 1, p);\r\n }\r\n }else{\r\n fread(&my_f.FingerData[j].ExtendedData.Data, 1, my_f.FingerData[j].ExtendedData.ExtendedDataLength-4, p);\r\n }\r\n }\r\n }\r\n fclose(p);\r\n return my_f;\r\n}\r\n\r\n/** Ghi doi tuong du lieu van tay thanh file ISO_2011\r\n\tInput:\r\n\t\t\tmy_f - Doi tuong du lieu van tay\r\n\tOutput:\r\n\t\t\tfilename - Ten file ISO can ghi \r\n */\r\nvoid write_iso2011(ISO_2011::FingerPrint2011 my_f, char* namefile){\r\n FILE *p;\r\n p = fopen(namefile, \"wb\");\r\n uint32_t tmp32;\r\n uint16_t tmp16;\r\n uint64_t tmp64;\r\n uint8_t tmp8;\r\n fwrite(my_f.RecordHeader.FormatId, sizeof(char*), 1, p);\r\n fwrite(my_f.RecordHeader.Version, sizeof(char*), 1, p);\r\n tmp32 = convert(my_f.RecordHeader.RecordLength);\r\n fwrite(&tmp32, 4, 1, p);\r\n tmp16 = convert(my_f.RecordHeader.NumberOfFingerRepresentation);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.RecordHeader.DeviceBlockFlag, 1, 1, p);\r\n for(int j=0;j<my_f.RecordHeader.NumberOfFingerRepresentation;j++){\r\n tmp32 = convert(my_f.FingerData[j].FingerHeader.RepresentationLength);\r\n fwrite(&tmp32, sizeof(int), 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.CaptureDateTime.Year);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Mounth, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Date, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Hour, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Minute, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CaptureDateTime.Second, 1, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.CaptureDateTime.Milisecond);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CaptureDeviceTech, 1, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.CaptureDeviceVendorId);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.CaptureDeviceTypeId);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.NumberOfQualityBlock, 1, 1, p);\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfQualityBlock;k++){\r\n fwrite(&my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityScore, 1, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityAlgorithmVendorId);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.QualityRecord[k].QualityAlgorithmId);\r\n fwrite(&tmp16, 2, 1, p);\r\n }\r\n if(my_f.RecordHeader.DeviceBlockFlag != 0){\r\n fwrite(&my_f.FingerData[j].FingerHeader.NumberOfCertification, 1, 1, p);\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfCertification;k++){\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.CertificationRecord[k].AuthorId);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.CertificationRecord[k].SchemeId, 1, 1, p);\r\n }\r\n }\r\n fwrite(&my_f.FingerData[j].FingerHeader.FingerPosition, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.RepresentationNumber, 1, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.X_spatialSamplingRate);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.Y_spatialSamplingRate);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.ImpressionType, 1, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.HorizontalImageSize);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].FingerHeader.VerticalImageSize);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.MinutiaeFieldLength_RidgeEndingType, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.NumberOfMinutiae, 1, 1, p);\r\n \r\n //Du lieu minutie\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfMinutiae;k++){\r\n tmp16 = convert(my_f.FingerData[j].MinutieData6[k].Type_CoordinateX);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].MinutieData6[k].CoordinateY);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].MinutieData6[k].Angle, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].MinutieData6[k].Quality, 1, 1, p);\r\n }\r\n \r\n tmp16 = convert(my_f.FingerData[j].ExtendedData.ExtendedDataLength);\r\n fwrite(&tmp16, 2, 1, p);\r\n if(my_f.FingerData[j].ExtendedData.ExtendedDataLength != 0){\r\n tmp16 = convert(my_f.FingerData[j].ExtendedData.ExtendedDataType);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].ExtendedData.ExtendedDataAreaLength);\r\n fwrite(&tmp16, 2, 1, p);\r\n if(my_f.FingerData[j].ExtendedData.ExtendedDataType == 1){\r\n fwrite(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.ExtractionMethod, 1, 1, p);\r\n for(int k=0;k<(my_f.FingerData[j].ExtendedData.ExtendedDataLength-5)/3;k++){\r\n fwrite(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].Index1, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].Index2, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].ExtendedData.Data.RidgeCountData.RidgeCountData[k].CountOfRidges, 1, 1, p);\r\n }\r\n }else {\r\n fwrite(&my_f.FingerData[j].ExtendedData.Data, 1, my_f.FingerData[j].ExtendedData.ExtendedDataLength-4, p);\r\n }\r\n }\r\n }\r\n \r\n fclose(p);\r\n}\r\n\r\n/** Doc file ISO_2005 dau vao thanh cau truc du lieu van tay\r\n\tInput:\r\n\t\t\tfilename - Ten file ISO\r\n\tOutput:\r\n\t\t\tmy_f - Cau truc du lieu van tay\r\n */\r\nISO_2005::FingerPrint2005 read_iso2005(char* filename){\r\n FILE *p;\r\n p = fopen(filename, \"rb\");\r\n if(p == NULL) throw \"File does not exist!\";\r\n \r\n fseek(p, 0, SEEK_END);\r\n int size_file = ftell(p);\r\n fseek(p, 0, SEEK_SET);\r\n \r\n ISO_2005::FingerPrint2005 my_f;\r\n \r\n uint32_t tmp32;\r\n uint16_t tmp16;\r\n uint64_t tmp64;\r\n uint8_t tmp8;\r\n \r\n fread(&my_f.RecordHeader.FormatId, sizeof(char*), 1, p);\r\n if(strcmp(my_f.RecordHeader.FormatId, \"FMR\")) throw \"Not ISO-19794_2 format!\";\r\n \r\n fread(&my_f.RecordHeader.Version, sizeof(char*), 1, p);\r\n if(strcmp(my_f.RecordHeader.Version, \" 20\")) throw \"Not ISO-19794_2:2005 version!\";\r\n \r\n fread(&tmp32, sizeof(int), 1, p);\r\n my_f.RecordHeader.RecordLength = convert(tmp32);\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.RecordHeader.CaptureDevice = convert(tmp16); \r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.RecordHeader.ImageSizeX = convert(tmp16);\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.RecordHeader.ImageSizeY = convert(tmp16);\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.RecordHeader.ResolutionX = convert(tmp16);\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.RecordHeader.ResolutionY = convert(tmp16);\r\n \r\n fread(&my_f.RecordHeader.NumberOfFinger, 1, 1, p);\r\n\r\n fread(&my_f.RecordHeader.Reserved, 1, 1, p);\r\n \r\n for(int j=0;j<my_f.RecordHeader.NumberOfFinger;j++){\r\n fread(&my_f.FingerData[j].FingerHeader.FingerPosition, 1, 1, p); \r\n fread(&my_f.FingerData[j].FingerHeader.Number_ImpressionType, 1, 1, p); \r\n fread(&my_f.FingerData[j].FingerHeader.FingerQuality, 1, 1, p); \r\n fread(&my_f.FingerData[j].FingerHeader.NumberOfMinutiae, 1, 1, p);\r\n \r\n //Du lieu minutie\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfMinutiae;k++){\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].MinutieData6[k].Type_CoordinateX = convert(tmp16);\r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].MinutieData6[k].CoordinateY = convert(tmp16);\r\n fread(&my_f.FingerData[j].MinutieData6[k].Angle, 1, 1, p);\r\n fread(&my_f.FingerData[j].MinutieData6[k].Quality, 1, 1, p);\r\n }\r\n \r\n fread(&tmp16, 2, 1, p);\r\n my_f.FingerData[j].ExtendedData.ExtendedDataLength = convert(tmp16);\r\n }\r\n fclose(p);\r\n return my_f;\r\n}\r\n\r\n/** Ghi doi tuong du lieu van tay thanh file ISO_2005\r\n\tInput:\r\n\t\t\tmy_f - Doi tuong du lieu van tay\r\n\tOutput:\r\n\t\t\tfilename - Ten file ISO can ghi \r\n */\r\nvoid write_iso2005(ISO_2005::FingerPrint2005 my_f, char* namefile){\r\n FILE *p;\r\n p = fopen(namefile, \"wb\");\r\n uint32_t tmp32;\r\n uint16_t tmp16;\r\n uint64_t tmp64;\r\n uint8_t tmp8;\r\n fwrite(my_f.RecordHeader.FormatId, sizeof(char*), 1, p);\r\n fwrite(my_f.RecordHeader.Version, sizeof(char*), 1, p);\r\n tmp32 = convert(my_f.RecordHeader.RecordLength);\r\n fwrite(&tmp32, 4, 1, p);\r\n tmp16 = convert(my_f.RecordHeader.CaptureDevice);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.RecordHeader.ImageSizeX);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.RecordHeader.ImageSizeY);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.RecordHeader.ResolutionX);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.RecordHeader.ResolutionY);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.RecordHeader.NumberOfFinger, 1, 1, p);\r\n fwrite(&my_f.RecordHeader.Reserved, 1, 1, p); \r\n \r\n for(int j=0;j<my_f.RecordHeader.NumberOfFinger;j++){\r\n fwrite(&my_f.FingerData[j].FingerHeader.FingerPosition, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.Number_ImpressionType, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.FingerQuality, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].FingerHeader.NumberOfMinutiae, 1, 1, p);\r\n \r\n //Du lieu minutie\r\n for(int k=0;k<my_f.FingerData[j].FingerHeader.NumberOfMinutiae;k++){\r\n tmp16 = convert(my_f.FingerData[j].MinutieData6[k].Type_CoordinateX);\r\n fwrite(&tmp16, 2, 1, p);\r\n tmp16 = convert(my_f.FingerData[j].MinutieData6[k].CoordinateY);\r\n fwrite(&tmp16, 2, 1, p);\r\n fwrite(&my_f.FingerData[j].MinutieData6[k].Angle, 1, 1, p);\r\n fwrite(&my_f.FingerData[j].MinutieData6[k].Quality, 1, 1, p);\r\n }\r\n \r\n tmp16 = convert(my_f.FingerData[j].ExtendedData.ExtendedDataLength);\r\n fwrite(&tmp16, 2, 1, p);\r\n }\r\n \r\n fclose(p);\r\n}\r\n\r\nvoid ConvertFingerprint2ISO2011(Fingerprint* finger, )" }, { "alpha_fraction": 0.7432432174682617, "alphanum_fraction": 0.7567567825317383, "avg_line_length": 17.75, "blob_id": "e3bbe47c8136c6934d6a86f5a20486558ef9ff90", "content_id": "3e999144456f38c88a5d76d4e26995e7367fb18d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 74, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/bkafis/bkafis/include/ScanImage.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#ifndef SCANIMAGE_H\n#define SCANIMAGE_H\nvoid ScanImage(uint8_t []);\n#endif" }, { "alpha_fraction": 0.6024400591850281, "alphanum_fraction": 0.6642826795578003, "avg_line_length": 21.30392074584961, "blob_id": "9d1da90aead99baff57ce4836fe95092a8c28421", "content_id": "ad11f93567aaa40125d7357923053b16e03722aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2377, "license_type": "no_license", "max_line_length": 68, "num_lines": 102, "path": "/bkafis/exports/include/User.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#ifndef USER_H\r\n#define USER_H\r\n\r\n\r\n#include <stdint.h>\r\n#include <stdbool.h>\r\n\r\n#include <Pi.h>\r\n\r\n\r\n#ifndef NULL\r\n#define NULL 0\r\n#endif\r\n\r\n#define MCU_BOOT_DELAY\t\t\t\t\t\t180\r\n#define MAX_BUSY_COUNT\t\t\t\t\t\t500\r\n#define NUMBER_OF_COLUMNS\t\t\t\t\t256\r\n#define NUMBER_OF_ROWS\t\t\t\t\t\t180\r\n#define PARTIAL_START_COLUMN\t\t\t\t64\r\n#define PARTIAL_END_COLUMN\t\t\t\t\t192\r\n#define PARTIAL_START_ROW\t\t\t\t\t45\r\n#define PARTIAL_NUMBER_OF_COLUMNS\t\t\t128\r\n#define PARTIAL_NUMBER_OF_ROWS\t\t\t\t90\r\n#define SCAN_OR_PARTIAL_IMAGE_US_DELAY\t\t500\r\n#define SCAN_IMAGE_INITIAL_DELAY\t\t\t15\r\n#define PARTIAL_IMAGE_INITIAL_DELAY\t\t\t5\r\n#define DUMMY_DATA\t\t\t\t\t\t\t0xC0\r\n#define CONNECT_DIAGNOSTIC_STRING_SIZE\t\t8\r\n#define CONNECT_DIAGNOSTIC_RESPONSE_SIZE\t9\r\n\r\n\r\n#define BUSY\t\t\t\t\t\t\t\t0xB0\r\n#define READY\t\t\t\t\t\t\t\t0x01\r\n\r\n\r\n\r\n#define NO_ERROR\t\t\t\t\t0x00\r\n#define ERR_MOD_PARAM_FIELD\t\t\t0x10 \t\r\n#define ERR_MOD_DATA_LENGTH\t\t\t0x11\t\r\n#define ERR_MOD_DATA_FIELD\t\t\t0x12\t\r\n#define ERR_MOD_UNKNOWN_COMMAND\t\t0x30 \t\r\n#define ERR_MOD_OP_MODE\t\t\t\t0x31\t\r\n#define ERR_MOD_COM\t\t\t\t\t0x32\t\r\n#define ERR_MOD_SENSOR_FAIL\t\t\t0x33\t\r\n#define ERR_MOD_DCA\t\t\t\t\t0x34\t\r\n#define ERR_MOD_MCU\t\t\t\t\t0x35\t\r\n\r\n\r\n\r\n\r\n#define ERR_API_SPI_CMD_STAGE\t\t\t\t0xCC\r\n#define ERR_API_SPI_DATA_STAGE\t\t\t\t0XAA\r\n#define ERR_API_SPI_RES_STAGE\t\t\t\t0xBB\r\n#define ERR_API_MODULE_CONNECT\t\t\t\t0x40\r\n#define ERR_API_UNKNOWN_COMMAND\t\t\t\t0x03\r\n#define ERR_API_MODULE_STOPPED\t\t\t\t0x50\r\n\r\n\r\n\r\n\r\n#define INS_GET_LINE\t\t\t\t\t\t0x10\r\n#define INS_GET_PARTIAL\t\t\t\t\t\t0x11\r\n#define P1_FIRST_LINE\t\t\t\t\t\t0x80\r\n#define P1_INTERMEDIATE_LINE\t\t\t\t0x00\r\n#define P1_LAST_LINE\t\t\t\t\t\t0x01\r\n#define INS_FINGER_PRESENT\t\t\t\t\t0x12\r\n#define INS_GET_SERIAL\t\t\t\t\t\t0x20\r\n#define INS_DIAGNOSTIC\t\t\t\t\t\t0x26\r\n#define P1_CONNECT_DAIGNOSTIC\t\t\t\t0x01\r\n#define INS_SET_MO_STOP\t\t\t\t\t\t0x83\r\n#define INS_GET_FIMRWARE_VER\t\t\t\t0xF5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ntypedef struct {\r\n\tuint8_t Column[256]; \r\n}ROW_t;\r\n\r\ntypedef struct {\r\n\tROW_t Row[180];\t\t\r\n}NEXT_SENSOR_IMAGE_t;\r\n\r\ntypedef struct{\r\n\tbool connected;\t\t\t\t\t\r\n\tbool stopped;\t\t\t\t\t\r\n\tuint8_t firmware_ver[3];\t\t\r\n\tuint8_t serial_no[12];\t\t\t\r\n\tNEXT_SENSOR_IMAGE_t image;\t\t\r\n}NEXT_MODULE_t;\r\nuint8_t NEXT_Module_ScanImage(NEXT_MODULE_t *,uint8_t []);\r\nuint8_t NEXT_Module_FingerPresent(NEXT_MODULE_t* , uint8_t* , bool);\r\nuint8_t NEXT_Module_SPI_Command(uint8_t, uint8_t, uint8_t, uint8_t);\r\nuint8_t NEXT_Module_SPI_Data(uint8_t*, uint8_t);\r\nuint8_t NEXT_Module_SPI_Response(void*, uint16_t, uint16_t);\r\n\r\n\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.7103347778320312, "alphanum_fraction": 0.7234352231025696, "avg_line_length": 20.5, "blob_id": "ce0ca30516323268b401aa5d391c3f0a8a136b2c", "content_id": "03d8f6fb3d571b2751a99bf368960c1e22d1ea02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 687, "license_type": "no_license", "max_line_length": 69, "num_lines": 32, "path": "/raspberry_sensor_c/test.sh", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n#\n# rc.local\n#\n# This script is executed at the end of each multiuser runlevel.\n# exit zero\" on success or any other\n# Make sure that the script will \"exit 0\" on success or any other\n# value on error.\n#\n# In order to enable or disable this script just change the execution\n# bits.\n#\n# By default this script does nothing.\n\n# Print the IP address\n\n\nCOUNTER=0\nsudo /home/pi/Desktop/raspberry_sensor_c/main\nsleep 2\nwhile [ $COUNTER -ne 1 ]\ndo\n if [ -f /home/pi/Desktop/raspberry_sensor_c/image.txt ]; then\n\tsudo /home/pi/Desktop/raspberry_sensor_c/chuyendoi.py\n\tsleep 2\n\tsudo /home/pi/Desktop/raspberry_sensor_c/nfiq image.JPG\n\tsleep 2\n\tCOUNTER=1\n fi\ndone\nsleep 3\nexit 0" }, { "alpha_fraction": 0.6243478059768677, "alphanum_fraction": 0.6669565439224243, "avg_line_length": 28.263158798217773, "blob_id": "b461c9c2cfaac8bbce68242aaaf1a2d7fffd54e6", "content_id": "b1a9fb94bb7373d131413ef9eabf385cd836b5da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1150, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/raspberry_sensor_c/Pi.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <stdint.h>\r\n#include <bcm2835.h>\r\n#ifndef PI_H\r\n#define PI_H\r\n\r\n#define LEADING_EDGE 0\r\n#define FALLING_EDGE 1\r\n//Define Pin\r\n#define PIN_MISO RPI_BPLUS_GPIO_J8_21\r\n#define PIN_MOSI RPI_BPLUS_GPIO_J8_19\r\n#define PIN_SCK RPI_BPLUS_GPIO_J8_23\r\n#define PIN_SS RPI_BPLUS_GPIO_J8_24\r\n\r\n\r\n//Macro for setting and clearing the SS pin\r\n#define SS RPI_BPLUS_GPIO_J8_24 // CE0\r\n#define BSP_SET_SS bcm2835_gpio_write(SS, HIGH); //active SS\r\n#define BSP_CLEAR_SS bcm2835_gpio_write(SS, LOW);\r\n\r\n//Macro for setting and clearing the module nRST pin\r\n\r\n#define PIN_RESET RPI_BPLUS_GPIO_J8_15\r\n#define MODULE_RESET_LOW \tbcm2835_gpio_write(PIN_RESET, LOW);\r\n#define MODULE_RESET_HIGH \tbcm2835_gpio_write(PIN_RESET, HIGH);\r\n\r\n\r\n//Function prototypes\r\nvoid BSP_Config_HW (void);\r\nvoid BSP_Module_Reset_Configure (void);\r\nvoid BSP_SPI_Configure (void);\r\nvoid BSP_SPI_ReadWriteBuffer (uint8_t* , uint8_t* , uint16_t);\r\nvoid BSP_Delay_ms(uint16_t);\r\nvoid BSP_Delay_us(uint64_t);\r\nvoid BSP_Module_nRST_High(void);\r\nvoid BSP_Module_nRST_Low(void);\r\nvoid BSP_Module_Wake(void);\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.6021710634231567, "alphanum_fraction": 0.6153199076652527, "avg_line_length": 30.737863540649414, "blob_id": "c42c459d5df31cfe2c0411b3a4a261bf50418545", "content_id": "e8a89f519a311f1523cc3861f9bbeecf56b533c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 13081, "license_type": "no_license", "max_line_length": 147, "num_lines": 412, "path": "/bkafis/bkafis/src/bin/extract/extract.26.8.2015.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*******************************************************************************\n\nLicense: \nThis software and/or related materials was developed at the National Institute\nof Standards and Technology (NIST) by employees of the Federal Government\nin the course of their official duties. Pursuant to title 17 Section 105\nof the United States Code, this software is not subject to copyright\nprotection and is in the public domain. \n\nThis software and/or related materials have been determined to be not subject\nto the EAR (see Part 734.3 of the EAR for exact details) because it is\na publicly available technology and software, and is freely distributed\nto any interested party with no licensing requirements. Therefore, it is \npermissible to distribute this software as a free download from the internet.\n\nDisclaimer: \nThis software and/or related materials was developed to promote biometric\nstandards and biometric technology testing for the Federal Government\nin accordance with the USA PATRIOT Act and the Enhanced Border Security\nand Visa Entry Reform Act. Specific hardware and software products identified\nin this software were used in order to perform the software development.\nIn no case does such identification imply recommendation or endorsement\nby the National Institute of Standards and Technology, nor does it imply that\nthe products and equipment identified are necessarily the best available\nfor the pursectore.\n\nThis software and/or related materials are provided \"AS-IS\" without warranty\nof any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\nNO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\nor FITNESS FOR A PARTICULAR PURsectorE or for any pursectore whatsoever, for the\nlicensed product, however used. In no event shall NIST be liable for any\ndamages and/or costs, including but not limited to incidental or consequential\ndamages of any kind, including economic damage or injury to property and lost\nprofits, regardless of whether NIST shall be advised, have reason to know,\nor in fact shall know of the sectorsibility.\n\nBy using this software, you agree to bear all risk relating to quality,\nuse and performance of the software and/or related materials. You agree\nto hold the Government harmless from any claim arising from your use\nof the software.\n\n*******************************************************************************/\n\n/***********************************************************************\n PACKAGE: NIST Fingerprint Minutiae Detection\n\n FILE: MINDTCT.C\n\n AUTHOR: Michael D. Garris\n DATE: 04/18/2002\n UPDATED: 09/14/2004\n UPDATED: 05/09/2005 by MDG\n UPDATED: 01/31/2008 by Kenneth Ko\n UPDATED: 09/04/2008 by Kenneth Ko\n UPDATED: 09/30/2008 by Kenenth Ko - add version option.\n\n#cat: mindtct - Uses Version 2 of the NIST Latent Fingerprint System (LFS)\n#cat: to detect minutiae and count ridges in a grayscale image.\n#cat: This version of the program will process:\n#cat: ANSI/NIST, WSQ, JPEGB, JPEGL, and IHead image formats.\n#cat: Results are written to various output files with\n#cat: predefined extensions appeneded to a specified output\n#cat: root path.\n\n***********************************************************************/\n\n#include <stdio.h>\n#include <sys/param.h>\n#include <an2k.h>\n#include <lfs.h>\n#include <imgdecod.h>\n#include <imgboost.h>\n#include <img_io.h>\n#include <version.h>\n#include <fingerprint.h>\n\nvoid procargs(int, char **, int *, int *, char **, char **);\n\n\n\nint debug = 1;\n\n\n/*************************************************************************\n**************************************************************************/\nint main(int argc, char *argv[])\n{\n\tint boostflag, m1flag;\n\tchar *ifile, *oroot, ofile[MAXPATHLEN];\n\tunsigned char *idata, *bdata;\n\tint img_type;\n\tint ilen, iw, ih, id, ippi, bw, bh, bd;\n\tdouble ippmm;\n\tint img_idc, img_imp;\n\tint *direction_map, *low_contrast_map, *low_flow_map;\n\tint *high_curve_map, *quality_map;\n\tint map_w, map_h;\n\tint ret;\n\tMINUTIAE *minutiae;\n\tANSI_NIST *ansi_nist;\n\tRECORD *imgrecord;\n\tint imgrecord_i;\n\n\t/* Process command line arguments. */\n\tprocargs(argc, argv, &boostflag, &m1flag, &ifile, &oroot);\n\n\t/* 1. READ FINGERPRINT IMAGE FROM FILE INTO MEMORY. */\n\n\t/* Is input file in ANSI/NIST format? */\n\tif((ret = is_ANSI_NIST_file(ifile)) < 0) {\n\t\t/* If error ... */\n\t\texit(ret);\n\t}\n\n/* If file is ANSI/NIST format ... */\n\tif(ret){\n\t\timg_type = ANSI_NIST_IMG;\n\t\t/* Parse ANSI/NIST file into memory structure */\n\t\tif((ret = read_ANSI_NIST_file(ifile, &ansi_nist)))\n\t\t\texit(ret);\n\t\t/* Get first grayscale fingerprint record in ANSI/NIST file. */\n\t\tif((ret = get_first_grayprint(&idata, &iw, &ih, &id,\n\t\t &ippmm, &img_idc, &img_imp,\n\t\t &imgrecord, &imgrecord_i, ansi_nist)) < 0){\n\t\t\t/* If error ... */\n\t\t\tfree_ANSI_NIST(ansi_nist);\n\t\t\texit(ret);\n\t\t}\n\t\t/* If grayscale fingerprint not found ... */\n\t\tif(!ret){\n\t\t\tfree_ANSI_NIST(ansi_nist);\n\t\t\tfprintf(stderr, \"ERROR : main : \");\n\t\t\tfprintf(stderr, \"grayscale image record not found in %s\\n\", ifile);\n\t\t\texit(-2);\n\t\t}\n\t}\n\t/* Otherwise, not an ANSI/NIST file */\n\telse{\n\t\t/* Read the image data from file into memory */\n\t\tif((ret = read_and_decode_grayscale_image(ifile, &img_type,\n\t\t &idata, &ilen, &iw, &ih, &id, &ippi))){\n\t\t\texit(ret);\n\t\t}\n\t\t/* If image ppi not defined, then assume 500 */\n\t\tif(ippi == UNDEFINED)\n\t\t\tippmm = DEFAULT_PPI / (double)MM_PER_INCH;\n\t\telse \n\t\t\tippmm = ippi / (double)MM_PER_INCH;\n\t}\n\n\t/* 2. ENHANCE IMAGE CONTRAST IF REQUESTED */\n\tif(boostflag)\n\t\ttrim_histtails_contrast_boost(idata, iw, ih); \n\n\t/* 3. GET MINUTIAE & BINARIZED IMAGE. */\n\tif((ret = get_minutiae(&minutiae, &quality_map, &direction_map,\n\t &low_contrast_map, &low_flow_map, &high_curve_map,\n\t &map_w, &map_h, &bdata, &bw, &bh, &bd,\n\t idata, iw, ih, id, ippmm, &lfsparms_V2))){\n\t\tif(img_type == ANSI_NIST_IMG)\n\t\t\tfree_ANSI_NIST(ansi_nist);\n\t\tfree(idata);\n\t\texit(ret);\n\t}\n\n\t/* Done with input image data */\n\tfree(idata);\n\t\n \n\n\t/* Done with minutiae detection maps. */\n\tfree(quality_map);\n\tfree(direction_map);\n\tfree(low_contrast_map);\n\tfree(low_flow_map);\n\tfree(high_curve_map);\n\n\t/* Edited by Minh Nguyen, August 3 2015 */\n\t/* Add code to find neighbors according to BKAFIS algorithm */\n\t/* First, convert minutiae structure of lfs algorithm into BKAFIS data structure */\n\t/* here we need to sort the array minutiae according to quality */\n\t/* call function bubble_sort_double_dec_2 as follows */\n\t\n\tunsigned int num = minutiae->num;\n\tunsigned int i,j;\n\t/* Allocate list of sequential indices. */\n \tint* order = (int *)malloc(num * sizeof(int));\n\tif(order == (int *)NULL){\n\t\tfprintf(stderr, \"ERROR : sort_indices_double_inc : malloc : order\\n\");\n\t\treturn(-400);\n\t}\n\t\t\t\n\t/* construct rank array from the minutiae quality. */\n\tdouble* ranks = (double *)malloc(num * sizeof(double));\n\tif(ranks == NULL){\n fprintf(stderr, \"ERROR : sort_minutiae_y_x : malloc : ranks\\n\");\n return(-310);\n\t}\n \n\tfor(i = 0; i < num; i++){\n ranks[i] = minutiae->list[i]->reliability*100;\n\t order[i] = i;\n\t}\n\tif (debug)\n\t\tprintf(\"Sort minutia list according to quality\\n\");\n\t/* Get sorted order of minutiae. */\n\tbubble_sort_double_dec_2(ranks, order, num);\n\n\t/* Construct new minutiae list: Minh Nguyen - 12 August 2015*/\n\t/* Allocate new MINUTIA list to hold sorted minutiae. */\n\tif (debug)\n\t\tprintf(\"Get the sort resulted list\\n\");\n\tMINUTIA** newlist = (MINUTIA **)malloc(minutiae->num * sizeof(MINUTIA *));\n\tif(newlist == (MINUTIA **)NULL){\n\t\tfree(ranks);\n\t\tfree(order);\n\t\tfprintf(stderr, \"ERROR : sort_minutiae_y_x : malloc : newlist\\n\");\n\t\treturn(-311);\n\t}\n\n\t/* Put minutia into sorted order in new list. */\n\tfor(i = 0; i < minutiae->num; i++)\n\t\tnewlist[i] = minutiae->list[order[i]];\n\n\t/* Deallocate non-sorted list of minutia pointers. */\n\tfree(minutiae->list);\n\t/* Assign new sorted list of minutia to minutiae list. */\n\tminutiae->list = newlist;\n\n\t/* Free the working memories supporting the sort. */\n\tfree(order);\n\tfree(ranks); \n \n\t\n\tFingerprint finger;\n\tfinger.width = iw;\n\tfinger.height = ih;\n\t/* finger.quality = add code to calculate quality of fingerprint here \n\trefer to function comp_nfiq_flex() in the NFIQ package */\n\tfinger.nMinutiae = minutiae->num;\n\tMinutia** bkafisMinutiae;\n\tMinutia* min;\n\tif (debug)\n\t\tprintf(\"Image width=%d\\nImage height=%d\\nnMinutiae=%d\\n\",finger.width, finger.height,finger.nMinutiae);\n\t\t\n\tbkafisMinutiae = malloc(sizeof(Minutia*)*minutiae->num);\n\tif (bkafisMinutiae==NULL)\n\t{\n\t\tfree_minutiae(minutiae);\n\t\tfree(bdata);\n\t\texit(-1);\n\t}\n\t\n\tfor(i=0;i<minutiae->num;i++){\n\t\tmin=malloc(sizeof(Minutia));\n\t\tif (min==NULL){\n\t\t\tCleanFingerprint(&finger);\n\t\t\tfree_minutiae(minutiae);\n\t\t\tfree(bdata);\n\t\t\texit(-1);\n\t\t}\n\t\tmemset(min,0,sizeof(Minutia));\n\t\tif (debug)\n\t\t\tprintf(\"Minutia %d:\\t%d\\t%d\\t%d\\t%f\\t%d\\n\",\n\t\t\t\ti,minutiae->list[i]->x,\n\t\t\t\tminutiae->list[i]->y,\n\t\t\t\tminutiae->list[i]->direction,\n\t\t\t\tminutiae->list[i]->reliability,\n\t\t\t\tminutiae->list[i]->type\n\t\t\t\t); \n\t\t\t\t\n\t\tmin->x = minutiae->list[i]->x;\n\t\tmin->y = -minutiae->list[i]->y;\n\t\tfloat tmp = minutiae->list[i]->direction*11.25; /* after extracting, the minutiae direction is in range 0-32 => need to be converted into grad */\n\t\ttmp = (tmp<=90)?90-tmp:450-tmp;\n\t \n\t\tmin->angle = tmp*M_PI/180; /* convert direction into radian */\n\t\tmin->quality = minutiae->list[i]->reliability*100;\n\t\tmin->type = minutiae->list[i]->type;\t \n\t\tbkafisMinutiae[i]=min;\n\t\tmin=NULL;\n\t\t/* if (debug)\n\t\t\tprintf(\"Minutia %d:\\t%f\\t%f\\t%f\\t%d\\t%d\\n\",\n\t\t\t\ti,bkafisMinutiae[i]->x,\n\t\t\t\tbkafisMinutiae[i]->y,\n\t\t\t\tbkafisMinutiae[i]->angle,\n\t\t\t\tbkafisMinutiae[i]->quality,\n\t\t\t\tbkafisMinutiae[i]->type\n\t\t\t\t); \n\t\t\t\t*/\n\t}\n\tfinger.minutiae = bkafisMinutiae;\n\t\n\tfloat* distances;\n\tif (CalculateDistances(&finger,&distances)==ISO_GENERICERROR){\n\t\tCleanFingerprint(&finger);\n\t\tfree_minutiae(minutiae);\n\t\tfree(bdata);\n\t\texit(-1);\n\t}\n\tgray2bin(1, 1, 0, bdata, iw, ih);\n\tif (debug)\n\t\tprintf(\"Finished distance calculation.distances=%d\\tfinger=%d\\n\",distances,&finger);\n\tfor (i=0;i<minutiae->num;i++){\n\t\tif (finger.minutiae[i]==NULL) continue;\n\n\t\tFindDirectionalNeighbours(&finger, distances, i, &bkafisParams); /* here, the constants should be parameterized */\n\t\t\n\t\tMinutia* min = finger.minutiae[i];\n\t\tif (debug)\n\t\t\tprintf(\"Neighbors of minutia %d:\\n\",i);\n\t\t\n\t\tfor (j=0;j<min->nNeighbors;j++){\n\t\t\tif (min->neighbors[j]){\n\t\t\t\tint rc = ridge_count(i,min->neighbors[j]->index,minutiae,bdata,iw,ih,&lfsparms_V2);\n\t\t\t\tif (debug)\n\t\t\t\t\tprintf(\"index=%d, ed=%f, dra=%f, oda=%f, ridge count=%d\\n\", \n\t\t\t\t\t\tmin->neighbors[j]->index,\n\t\t\t\t\t\tmin->neighbors[j]->ed,\n\t\t\t\t\t\tmin->neighbors[j]->dra,\n\t\t\t\t\t\tmin->neighbors[j]->oda,\n\t\t\t\t\t\trc\n\t\t\t\t\t\t);\n\t\t\t\tmin->neighbors[j]->ridgeCount = rc;\n\t\t\t\t\n\t\t\t}\n\t\t}\n\t\t\n\t}\n\t\n\tif (debug)\n\t\tprintf(\"\\nFinished neighbor finding\\n\");\n\tSaveFingerprintText(\"test.txt\", &finger);\n\tCleanFingerprint(&finger);\n\n\t/* Done with minutiae and binary image results */\n\tfree(distances);\n\tfree_minutiae(minutiae);\n\tfree(bdata);\n\n\t/* Exit normally. */\n\texit(0);\n}\n\n/*************************************************************************\n**************************************************************************\n PROCARGS - Process command line arguments\n Input:\n argc - system provided number of arguments on the command line\n argv - system provided list of command line argument strings\n Output:\n boostflag - contrast boost flag \"-b\"\n ifile - input image file name to be processed by this program\n ifile - output image file name to be created by this program\n**************************************************************************/\nvoid procargs(int argc, char **argv, int *boostflag, int *m1flag,\n char **ifile, char **oroot)\n{\n int a;\n\n *boostflag = FALSE;\n *m1flag = FALSE;\n\n if ((argc == 2) && (strcmp(argv[1], \"-version\") == 0)) {\n getVersion();\n exit(0);\n }\n\n if(argc == 3){\n *ifile = argv[1];\n *oroot = argv[2];\n return;\n }\n\n if((argc == 4) || (argc == 5)){\n a = 1;\n while(a < argc-2){\n if(strcmp(argv[a], \"-b\") == 0){\n *boostflag = TRUE;\n }\n else if(strcmp(argv[a], \"-m1\") == 0){\n *m1flag = TRUE;\n }\n else{\n fprintf(stderr, \"Unrecognized flag \\\"%s\\\"\\n\", argv[a]);\n fprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\n fprintf(stderr,\n \" -b = contrast boost image\\n\");\n fprintf(stderr,\n \" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\n exit(1);\n }\n a++;\n }\n }\n else{\n fprintf(stderr, \"Invalid number of arguments on command line\\n\");\n fprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\n fprintf(stderr,\n \" -b = contrast boost image\\n\");\n fprintf(stderr,\n \" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\n exit(2);\n }\n \n *ifile = argv[a++];\n *oroot = argv[a];\n}\n\n\n\n\n\n" }, { "alpha_fraction": 0.5646085739135742, "alphanum_fraction": 0.5808359980583191, "avg_line_length": 26.990137100219727, "blob_id": "7fdef3ac3f51d374eaffd9ba2a650aa88c2d2eae", "content_id": "10b5ca872f2eb921d0baf4fe6e2982e8709c16e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 38280, "license_type": "no_license", "max_line_length": 151, "num_lines": 1318, "path": "/bkafis/bkafis/src/lib/bkafis/fingerprint.10.9.2015.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS matcher\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include \"fingerprint.h\"\r\n#include \"ISOTemplate.h\"\r\n#ifndef M_PI\r\n#define M_PI 3.14159\r\n#endif\r\n/* #define DEBUG*/\r\n\r\n/* constants to control extractor */\r\n#define MAX_FOUND_NEIGHBORS\t8\r\n#define MIN_DISTANCE\t8.5\r\n#define MAX_DISTANCE\t130\r\n/* constants to control matcher */\r\n#define ED_THRESHOLD\t\t15\r\n#define DRA_THRESHOLD\t22.5*M_PI/180\r\n#define ODA_THRESHOLD \t\t22.5*M_PI/180\r\n#define DRA_THRESHOLD1\t\t( 2*M_PI - 22.5*M_PI/180 )\r\n#define ODA_THRESHOLD1 \t( 2*M_PI - 22.5*M_PI/180 )\r\n#define ED_WEIGHT 0.8\r\n#define DRA_WEIGHT 0.1\r\n#define ODA_WEIGHT 0.1\r\n#define N_PAIRS\t\t2\r\n#define LOCAL_SCORE_BIAS 1\r\n\r\n/* ldr params ?? */\r\n#define LDR_N \t\t\t\t3\r\n#define LDR_NUM\t\t\t\t3\r\n#define LDR_DIR\t\t\t\t22.5*M_PI/180\r\n#define LDR_POS\t\t\t\t50\r\n/* -- */\r\n\r\n#define NumNeighs2\t3\r\n\r\n#define LDR_WEIGHT \t0.5\r\n#define SIGMA \t2 \r\n#define TG_THRESHOLD \t12\r\n#define TA_THRESHOLD \tM_PI/6\r\n#define RC_THRESHOLD\t1\r\n\r\nBkafisParams bkafisParams = {\r\n\tMAX_FOUND_NEIGHBORS, \r\n\tMIN_DISTANCE,\r\n\tMAX_DISTANCE,\r\n\tED_THRESHOLD,\r\n\tDRA_THRESHOLD,\r\n\tODA_THRESHOLD,\r\n\tDRA_THRESHOLD1,\r\n\tODA_THRESHOLD1,\r\n\tED_WEIGHT,\r\n\tDRA_WEIGHT,\r\n\tODA_WEIGHT,\r\n\t\r\n\tN_PAIRS,\r\n\tLOCAL_SCORE_BIAS,\r\n\t\r\n\tLDR_WEIGHT,\r\n\tLDR_N,\r\n\tLDR_NUM,\r\n\tLDR_DIR,\r\n\tLDR_POS,\r\n\tSIGMA,\r\n\tTG_THRESHOLD,\r\n\tTA_THRESHOLD,\r\n\tRC_THRESHOLD\r\n};\r\n/*\r\ntypedef struct\r\n{\r\n\t\r\n\tunsigned char maxNeighbors;\r\n\tfloat minDistance,maxDistance;\r\n\t\r\n\tfloat edThreshold, draThreshold, odaThreshold, draThreshold1, odaThreshold1;\r\n\tfloat edWeight, draWeight, odaWeight;\r\n\tunsigned char nNeighborPairThreshold;\r\n\tfloat localScoreBias;\r\n\t\r\n\tfloat ldrWeight;\r\n\tfloat sigma;\r\n\tfloat tgThreshold, taThreshold;\r\n\tunsigned char rcThreshold;\r\n\t\r\n} BkafisParams; */\r\n/**********************************************************************\r\n\tConvert from ISOTemplate 2005 format \r\n\tInput:\r\n\t\t\tImplicitly stored in static variable isoTemplate that is declared \r\n\t\t\tin ISOTemplate.c \r\n\tOutput:\r\n\t\t\tpointer to Fingerprint structure declared above \r\n\tUsage:\r\n\t\t\tin order to load the iso template from file call \r\n\t\t\tISORESULT LoadISOTemplate (ISOBYTE *path);\r\n\t\t\tthen in order to convert from the template into Fingerprint structure\r\n\t\t\tcall unsigned char ConvertISO2005Fingerprint(Fingerprint* finger);\r\n *********************************************************************/ \r\n\r\nISOBYTE ConvertISO2005Fingerprint(Fingerprint* finger)\r\n{\r\n\tif (finger==NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tISOWORD width, height;\r\n\tGetRecordHeader (&width,&height,NULL,NULL,NULL);\r\n\tfinger->width = width;\r\n\tfinger->height = height;\r\n\t\r\n\t/*\r\n\t#ifdef DEBUG\r\n\tprintf(\"Width=%d\\nHeight=%d\\n\",finger->width,finger->height);\r\n\t#endif\r\n\t*/\r\n\t\r\n\tunsigned char quality, nMinutiae;\r\n\tGetFingerViewHeader (0,NULL,NULL,NULL,&quality,&nMinutiae);\r\n\tfinger->quality = quality;\r\n\tfinger->nMinutiae = nMinutiae;\r\n\tMinutia** minutiae=malloc(sizeof(Minutia*)*finger->nMinutiae);\r\n\tif (minutiae == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(minutiae,0x00,sizeof(Minutia*)*finger->nMinutiae);\r\n\t\r\n\t/*\r\n\t#ifdef DEBUG\r\n\tprintf(\"Quality=%d\\nnMinutiae=%d\\n\",finger->quality,finger->nMinutiae);\r\n\t#endif\r\n\t*/\r\n\t\r\n\tunsigned char minI;\r\n\tISOBYTE type, angle;\r\n\tISOWORD x,y;\r\n\tMinutia* min;\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\t\r\n\t\tmin=malloc(sizeof(Minutia));\r\n\t\tif (min==NULL){\r\n\t\t\tCleanFingerprint(finger);\r\n\t\t\treturn ISO_GENERICERROR;\r\n\t\t}\r\n\t\tGetMinutiaeData(0,minI,&type,&x,&y,&angle,&quality);\r\n\t\tmin->x = x;\r\n\t\tmin->y = y;\r\n\t\tmin->angle = angle*1.40625*M_PI/180;\r\n\t\tmin->type = type;\r\n\t\tmin->quality = quality;\r\n\t\t\r\n\t\t/*\r\n\t\t#ifdef DEBUG\r\n\t\tprintf(\"%d\\t%d\\t%f\\t%d\\t%d\\n\", \r\n\t\t\tmin->x,\r\n\t\t\tmin->y,\r\n\t\t\tmin->angle,\r\n\t\t\tmin->type,\r\n\t\t\tmin->quality\r\n\t\t\t);\r\n\t\t#endif\r\n\t\t*/\r\n\t\t\r\n\t\tminutiae[minI]=min;\r\n\t}\r\n\tfinger->minutiae= minutiae;\r\n\treturn ISO_SUCCESS;\r\n\t\r\n}\r\nISOBYTE SaveFingerprintText(unsigned char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp = fopen(path,\"w\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\t/*\r\n\tfprintf(fp,\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp,\"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbors\\tIndex\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\t*/\r\n\t\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\r\n\t\tif \t( finger->minutiae[minI] )\r\n\t\t\tfprintf\t(\tfp,\t\"%d\\t%d\\t%f\\t%d\\t%d\\t%d\\t%d\"\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->x\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->y\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->angle\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->type\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->quality\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->ldr\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->nNeighbors\r\n\t\t\t\t\t);\r\n\t\t\t\t\t\r\n\t\tfor ( neighborI = 0; neighborI < finger->minutiae[minI]->nNeighbors; neighborI++ )\r\n\t\t\tif \t( finger->minutiae[minI]->neighbors[neighborI] )\r\n\t\t\t\tfprintf\t(\tfp, \"\\t%d\\t%f\\t%f\\t%f\\t%d\"\t\t\t\t\t\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->index\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->ed\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->dra\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->oda\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->ridgeCount \r\n\t\t\t\t\t\t);\r\n\t\t\t\t\t\t\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nISOBYTE CleanFingerprint(Fingerprint* finger)\r\n{\r\n\tunsigned char i,j;\r\n\tif (finger->minutiae){\r\n\t\tfor (i=0;i<finger->nMinutiae;i++){\r\n\t\t\tif (finger->minutiae[i]) {\r\n\t\t\t\tif (finger->minutiae[i]->neighbors) {\r\n\t\t\t\t\tfor (j=0;j<finger->minutiae[i]->nNeighbors;j++)\r\n\t\t\t\t\t\tif (finger->minutiae[i]->neighbors[j]) free(finger->minutiae[i]->neighbors[j]);\r\n\t\t\t\t\tfree(finger->minutiae[i]->neighbors);\r\n\t\t\t\t}\r\n\t\t\t\tfree(finger->minutiae[i]);\r\n\t\t\t}\r\n\t\t}\r\n\t\tfree(finger->minutiae);\r\n\t}\r\n\tmemset(finger,0x00,sizeof(Fingerprint));\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nunsigned char SortMinutiaQuality(Fingerprint* finger)\r\n{\r\n\tunsigned char i, j;\r\n\t\r\n\t/*\t\tSorting minutiae by quality score in descending order\t*/\r\n \r\n\tISOBYTE quality1, quality2;\r\n\tfor (i = 0; i < finger->nMinutiae - 1; i++)\r\n for (j = finger->nMinutiae - 1; j > i; j--){\r\n\t\t\tquality1 = finger->minutiae[j]->quality;\r\n\t\t\tquality2 = finger->minutiae[j-1]->quality;\r\n\t\t\tif (quality1 > quality2){\r\n\t\t\t\tMinutia* \t\t\ttg = finger->minutiae[j];\r\n finger->minutiae[j] = finger->minutiae[j - 1];\r\n finger->minutiae[j-1] = tg;\r\n\t\t\t}\r\n\t\t}\r\n\t\r\n\t\r\n}\r\n\r\n\r\nISOBYTE SaveFingerprint( unsigned char *path, Fingerprint *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tif ( (fp = fopen(path, \"wb\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfwrite( finger, FINGERHEADERSIZE, 1, fp );\r\n\tMinutia\t*min;\r\n\t\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\r\n\t\tmin = finger->minutiae[minI];\r\n\t\tif \t(min)\r\n\t\t{\r\n\t\t\tfwrite( min, MINUTIASIZE, 1, fp );\r\n\t\t\tfor ( neighborI = 0; neighborI < finger->minutiae[minI]->nNeighbors; neighborI++ )\r\n\t\t\t\tif \t( min->neighbors[neighborI] )\r\n\t\t\t\t{\r\n\t\t\t\t\tfwrite( min->neighbors[neighborI], NEIGHBORSIZE, 1, fp );\r\n\t\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn\tISO_SUCCESS;\r\n}\r\n\r\nISOBYTE SaveFingerprintMoC(unsigned char *path, FingerprintMoC *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tif ( (fp = fopen(path, \"wb\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t/* printf(\"size of FingerprintMoC = %d\\n\", sizeof(FingerprintMoC));*/\r\n\tfwrite(finger, sizeof(FingerprintMoC), 1, fp );\r\n\tfclose(fp);\r\n\treturn\tISO_SUCCESS;\r\n}\r\nISOBYTE\tReadFingerprintMoC( unsigned char *path, FingerprintMoC *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( (fp = fopen(path,\"rb\")) == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfread( finger, sizeof(FingerprintMoC), 1, fp );\r\n\tfclose(fp);\r\n\treturn\tISO_SUCCESS;\r\n}\r\n\r\nISOBYTE\tReadFingerprint( unsigned char *path, Fingerprint *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( (fp = fopen(path,\"rb\")) == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfread( finger, FINGERHEADERSIZE, 1, fp );\r\n\t\r\n\tMinutia\t**minutiae = malloc( sizeof(Minutia*) * finger->nMinutiae );\r\n\tif \t( minutiae == NULL )\r\n\t\treturn\tISO_GENERICERROR;\r\n\t\r\n\tmemset( minutiae, 0x00, sizeof(Minutia*) * finger->nMinutiae );\r\n\t\r\n\tMinutia\t*min;\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\t\r\n\t\tmin = malloc( sizeof(Minutia) );\r\n\t\tif \t( min == NULL ) \r\n\t\t\treturn\tISO_GENERICERROR;\r\n\r\n\t\tfread( min, MINUTIASIZE, 1, fp );\r\n\t\tif \t( !min->nNeighbors )\t\r\n\t\t\tcontinue;\r\n\t\t\r\n\t\tNeighbor\t**neighborArray = malloc( sizeof(Neighbor*) * min->nNeighbors );\r\n\t\tNeighbor\t*pNeighbor;\r\n\t\tif \t( neighborArray == NULL )\t\r\n\t\t\treturn\tISO_GENERICERROR;\r\n\t\t\t\t\r\n\t\tmemset( neighborArray, 0, sizeof(Neighbor*) * min->nNeighbors );\r\n\t\t\r\n\t\tunsigned char\ti;\r\n\t\tfor\t( i = 0; i < min->nNeighbors; i++ )\r\n\t\t{\r\n\t\t\tpNeighbor = malloc( sizeof(Neighbor) );\r\n\t\t\tif \t( pNeighbor == NULL )\t\r\n\t\t\t\treturn\tISO_GENERICERROR;\t\r\n\t\t\t\t\r\n\t\t\tfread( pNeighbor, NEIGHBORSIZE, 1, fp );\r\n\t\t\tneighborArray[i] = pNeighbor;\r\n\t\t\tpNeighbor = NULL;\r\n\t\t}\r\n\t\t\r\n\t\tmin->neighbors = neighborArray;\r\n\t\tminutiae[minI] = min;\r\n\t}\r\n\t\r\n\tfinger->minutiae = minutiae;\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nfloat \tCalculateAngle( int x1, int y1, int x2, int y2)\r\n{\r\n float\tangle;\r\n\tangle = atan2( y2 - y1, x2 - x1 );\r\n\treturn\t(angle < 0) ? angle+2*M_PI : angle;\r\n}\r\n/*\r\nStore distances between minutiae in a triangle matrix which represented in C as an array.\r\nd(0,1),d(0,2),...,d(0,n-1): n-1 elements\r\nd(1,2),d(1,3),...,d(1,n-1): n-2 elements\r\nd(i-1,i),d(i-1,i+1),...,d(i-1,n-1): n-i elements\r\nd(i,i+1),d(i,i+2),...,d(i,n-1): n-1-i elements\r\nd(n-2,n-1): 1 elements\r\nTotal number = (n-1)+(n-2)+...1 = (n-1)*n/2 elements\r\nrow 0 starts at distances[0]\r\nrow i starts at distances[start_i]: start_i = (n-1)+(n-2)+...+(n-i)=i*(2n-1-i)/2\r\nd(i,j) and d(j,i) will be stored at distances[start_i+j-i-1] = distances(i*(2n-1-i)/2+j-i-1))\r\nd(n-2,n-1)=distances((n-2)*(2n-1-n+2)/2+n-1-n+2-1)=distances((n-2)*(n+1)/2)=distance(n(n-1)/2-1)\r\n*/\r\nunsigned char CalculateDistances(Fingerprint* finger, float** pDistances)\r\n{\r\n\tunsigned int n=finger->nMinutiae;\r\n\tunsigned int i,j;\r\n\tfloat *distances = malloc(sizeof(float)*n*(n-1)/2);\r\n\tif (distances == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(distances, 0, sizeof(float)*n*(n-1)/2);\r\n\tfor ( i = 0; i < n; i++ )\r\n\t\tfor ( j = i+1; j < n; j++ )\t{\r\n\t\t\t\t\r\n\t\t\t\tdistances[i*(2*n-1-i)/2+j-i-1] = sqrt( pow( (float)( finger->minutiae[i]->x - finger->minutiae[j]->x), 2 ) + \r\n\t\t\t\t\tpow( (float)( finger->minutiae[i]->y - finger->minutiae[j]->y), 2 ) );\r\n\t\t\t\t/* #ifdef DEBUG\r\n\t\t\t\t\tprintf(\"distances(%d,%d)=distances[%d])=%f\\n\",i,j,i*(2*n-1-i)/2+j-i-1,distances[i*(2*n-1-i)/2+j-i-1]);\r\n\t\t\t\t*/\r\n\t\t}\r\n\t*pDistances = distances;\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nunsigned char\tFindDirectionalNeighbours(\tFingerprint* finger , float* distances, unsigned char centerI, \r\n\t\t\t\t\t\t\t\t\t\t\tBkafisParams* params )\r\n{\r\n\tunsigned char\tmaxNeighbors = params->maxNeighbors;\r\n\tfloat\tminDistance = params->minDistance;\r\n\tfloat\tmaxDistance = params->maxDistance;\r\n\tfloat\tminDistances[maxNeighbors];\r\n\t\r\n\t/* đổi neighborIds và minNeighborI từ int -> unsigned char và khởi tạo là 255 ?? */\r\n\tunsigned char\tminNeighborI[maxNeighbors];\r\n\tunsigned char\tneighborIds[maxNeighbors];\r\n\tfloat* \tphi;\r\n\tunsigned char\tnNeighbors = 0;\r\n\tunsigned char\tnIterations = 0;\r\n\t\r\n\t/* đổi sector từ int -> char ?? */\r\n\tunsigned char \tsector;\r\n\t/* đổi n và i từ int -> char ?? */\r\n\tunsigned char \tn = finger->nMinutiae;\r\n\tunsigned char\ti;\r\n\tfloat\tcenterAngle = finger->minutiae[centerI]->angle;\r\n\t\r\n\tphi = malloc( sizeof(float)*n ); /* angle between minutiae centerI & other minutiae */\r\n\t#ifdef DEBUG\r\n\t{\r\n\t\tprintf(\"start finding neighbors for %d\\tnMinutiae=%d, centerAngle=%f\\n\", centerI,n,centerAngle);\r\n\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t}\r\n\t#endif\r\n\tmemset( phi, 0, sizeof(float)*n );\r\n\t\r\n\t/* khởi tạo là 255 thay vì -1 ?? */\r\n\t/* dùng memset ?? */\r\n\tfor ( i = 0; i < maxNeighbors; i++ )\tneighborIds[i] = 255;\r\n\t\r\n\twhile\t( (nNeighbors < maxNeighbors) && (nIterations < maxNeighbors) )\r\n\t{\r\n\t\t#ifdef DEBUG\r\n\t\t{\r\n\t\t\tprintf(\"Iteration=%d\\t nNeighbors=%d\\n\", nIterations, nNeighbors);\r\n\t\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t\t}\r\n\t\t#endif\r\n\t\t\r\n\t\tnIterations++;\r\n\t\tmemset(minDistances, 0, sizeof(float)*maxNeighbors );\r\n\t\t\r\n\t\t/* dùng memset ?? */\r\n\t\tfor\t( i = 0; i < maxNeighbors; i++ )\tminNeighborI[i] = 255;\r\n\t\t\r\n\t\tfor\t( i = 0; i < n; i++ )\r\n\t\t{\r\n\t\t\tfloat\tdist;\r\n\t\t\t\r\n\t\t\tif\t( i == centerI )\tcontinue;\r\n\t\t\t\r\n\t\t\tdist = ( centerI < i ) ? distances[ centerI*(2*n-1-centerI)/2+i-centerI-1 ] : distances[ i*(2*n-1-i)/2+centerI-i-1 ];\r\n\t\t\t/* #ifdef DEBUG\r\n\t\t\t\tprintf(\"\\tMinutia=%d\\tDistance index=%d\\tdist=%f\\n\", i,centerI*(2*n-1-centerI)/2+i-centerI-1,dist);\r\n\t\t\t*/\r\n\t\t\t\r\n\t\t\t/* skip neighbours that are too far or too near the center minutia */\r\n\t\t\tif ( (dist < minDistance) || (dist > maxDistance) )\tcontinue;\r\n\t\t\t\r\n\t\t\t/* skip neighbors that have been chosen */\r\n\t\t\tunsigned char\tfound = 0;\r\n\t\t\tunsigned char\tj = 0;\r\n\t\t\twhile\t( (j < maxNeighbors) && !found )\r\n\t\t\t{\r\n\t\t\t\tif\t( neighborIds[j++] == i )\tfound = 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif\t(found)\tcontinue;\r\n\t\t\t\r\n\t\t\t/* calculate the angle of the vector connecting center minutia with minutia i */\r\n\t\t\tif\t( phi[i] == 0 )\r\n\t\t\t\tphi[i] = CalculateAngle(\tfinger->minutiae[centerI]->x, finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\t\t\t\t\tfinger->minutiae[i]->x , finger->minutiae[i]->y );\r\n\t\t\t\r\n\t\t\tfloat\td_phi = ( phi[i] >= centerAngle ) ? phi[i]-centerAngle : 2*M_PI+phi[i]-centerAngle;\r\n sector = floor( maxNeighbors * (d_phi/(2*M_PI)) );\r\n\t\t\t\r\n\t\t\t#ifdef DEBUG\r\n\t\t\t{\r\n\t\t\t\tprintf(\"\\tMinutia=%d\\tDistance index=%d\\tdist=%f\\tphi=%f\\tdphi=%f\\tsector=%d\\tminDistance=%f\\n\", \r\n\t\t\t\t\ti,centerI*(2*n-1-centerI)/2+i-centerI-1,dist,phi[i],d_phi,sector, minDistances[sector]);\r\n\t\t\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t\t\t}\r\n\t\t\t#endif\r\n\t\t\t\r\n\t\t\tif \t( minDistances[sector] == 0 )\r\n\t\t\t{\r\n\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\tminNeighborI[sector] = i ;\r\n\t\t\t\t/* bỏ lệnh count++ : thuật toán mới là chính xác ?? */\r\n\t\t\t}\r\n\t\t\telse \r\n\t\t\t{\r\n\t\t\t\tif \t( minDistances[sector] > dist )\r\n\t\t\t\t{\r\n\t\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\t\tminNeighborI[sector] = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tfor ( sector = 0; sector < maxNeighbors; sector++ )\r\n\t\t{\r\n\t\t\tif\t( nNeighbors == maxNeighbors )\tbreak;\r\n\t\t\t\r\n\t\t\tif\t( minNeighborI[sector] != 255 )\t/* đổi từ -1 sang 255 ?? */\r\n\t\t\t\tneighborIds[nNeighbors++] = minNeighborI[sector];\r\n\t\t}\r\n\t\t\t\r\n\t}\r\n\r\n\tif \t( nNeighbors == 0)\r\n\t{\r\n\t\tfree(phi);\r\n\t\treturn\tISO_SUCCESS;\r\n\t}\r\n\t\r\n\tNeighbor\t**neighborArray = malloc( sizeof(Neighbor*) * nNeighbors );\r\n\t\r\n\tif \t( neighborArray == NULL )\r\n\t{\r\n\t\tfree(phi);\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tmemset( neighborArray, 0, sizeof(Neighbor*) * nNeighbors );\r\n\tNeighbor\t*pNeighbor;\r\n\t\r\n\t#ifdef DEBUG\r\n\t{\r\n\t\tprintf(\"%d neighbors of minutia %d:\", nNeighbors, centerI);\r\n\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t}\r\n\t#endif\r\n\t\r\n\tfor\t( i = 0; i < nNeighbors; i++ )\r\n\t{\r\n\t\t#ifdef DEBUG\r\n\t\t{\r\n\t\t\tprintf(\"\\tNeighbor %d : %d\\n\", i, neighborIds[i]);\r\n\t\t\tsystem(\"pause\");\t/*\ttesting ?? */\r\n\t\t}\r\n\t\t#endif\r\n\t\t\r\n\t\tpNeighbor = malloc( sizeof(Neighbor) );\r\n\t\t\r\n\t\tif \t( pNeighbor != NULL )\r\n\t\t{\r\n\t\t\tmemset( pNeighbor, 0, sizeof(Neighbor) );\r\n\t\t\tpNeighbor->index = neighborIds[i];\r\n\t\t\tpNeighbor->ed = (centerI<neighborIds[i])?distances[centerI*(2*n-1-centerI)/2+neighborIds[i]-centerI-1]:\r\n\t\t\t\t\t\t\t\t\t\tdistances[neighborIds[i]*(2*n-1-neighborIds[i])/2+centerI-neighborIds[i]-1]; \r\n\t\t\t\r\n\t\t\t/* add code to calculate float/real oda, dra */\t\t\r\n\t\t\t\t\t\t\t\t\t \r\n\t\t\t/*\ttest dra ?? */\r\n\t\t\tfloat\ttmp;\r\n\t\t\ttmp\t= atan2( \tfinger->minutiae[neighborIds[i]]->y - finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\tfinger->minutiae[neighborIds[i]]->x - finger->minutiae[centerI]->x\t );\r\n\t\t\t/*\r\n\t\t\tprintf(\"a1 tmp = %f\\n\", tmp);\r\n\t\t\t*/\r\n\t\t\ttmp = ( tmp < 0 ) ? tmp+2*M_PI : tmp;\r\n\t\t\t\r\n\t\t\tpNeighbor->dra\t= (\ttmp >= \tfinger->minutiae[centerI]->angle ) \t\t\t?\r\n\t\t\t\t\t\t\t\ttmp - \tfinger->minutiae[centerI]->angle \t\t:\r\n\t\t\t\t\t\t\t\ttmp - \tfinger->minutiae[centerI]->angle + 2*M_PI\t;\r\n\t\t\t/* testing ?? */\r\n\t\t\t/*\r\n\t\t\tprintf(\"a2 tmp = %f\\n\", tmp);\r\n\t\t\tprintf(\"a2 centerI.angle = %f\\n\", finger->minutiae[centerI]->angle);\r\n\t\t\tprintf(\"a3 dra = %f\\n\", pNeighbor->dra);\r\n\t\t\t*/\r\n\t\t\t/* -- */\r\n\t\t\t\r\n\t\t\t/*\ttest oda ?? */\r\n\t\t\t/* báo lại vs thầy Minh hoán đổi vị trí */\r\n\t\t\ttmp =\tfinger->minutiae[neighborIds[i]]->angle\r\n\t\t\t\t - \tfinger->minutiae[centerI]\t\t ->angle;\r\n\t\t\t\r\n\t\t\tpNeighbor->oda \t = ( finger->minutiae[neighborIds[i]]->angle >= finger->minutiae[centerI]->angle )\r\n\t\t\t\t\t\t\t\t? tmp : 2*M_PI+tmp;\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t/*\ttesting ?? */\r\n\t\t\t/*\r\n\t\t\tprintf(\"b1 tmp = %f\\n\", tmp);\r\n\t\t\tprintf(\"b1 centerI.angle = %f\\n\", finger->minutiae[centerI]->angle);\r\n\t\t\tprintf(\"b1 neighbor.angle = %f\\n\", finger->minutiae[neighborIds[i]]->angle);\r\n\t\t\tprintf(\"b2 oda = %f\\n\", pNeighbor->oda);\r\n\t\t\t*/\r\n\t\t\t/* -- */\r\n\t\t\t\t\t\t\t\t\r\n\t\t\tneighborArray[i]=pNeighbor;\r\n\t\t\t\r\n\t\t\t/* lệnh này để giải phóng bộ nhớ ?? */\r\n\t\t\tpNeighbor = NULL;\r\n\t\t}\r\n\t}\r\n\t\r\n\t#ifdef DEBUG \r\n\t\tprintf(\"\\n\");\r\n\t#endif\r\n\t\r\n\tfinger->minutiae[centerI]->nNeighbors = nNeighbors;\r\n\tfinger->minutiae[centerI]->neighbors = neighborArray;\r\n\t\r\n\tfree(phi);\r\n\t/* #ifdef DEBUG {\r\n\t\tunsigned char neighborI;\r\n\t\tfor (neighborI=0;neighborI<finger->minutiae[centerI]->nNeighbors;neighborI++)\r\n\t\t\tif (finger->minutiae[centerI]->neighbors[neighborI])\r\n\t\t\t\tprintf(\"\\t%d\\t%f\\t%f\\t%f\\t%d\\n\", \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->index, \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->ed, \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->dra,\r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->oda,\r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->ridgeCount \r\n\t\t\t\t);\r\n\t\tprintf(\"\\n\");\r\n\t}*/\r\n\t\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\n/* Detect LDR */\r\nunsigned char\tDetectLDR( Fingerprint *finger, unsigned char centerI, unsigned char *neighbors, float *distances, unsigned char *ldr )\r\n{\r\n\tunsigned char\ti, j;\r\n\tunsigned char\tN = bkafisParams.ldrN\t ;\r\n\tunsigned char\tNUM = bkafisParams.ldrNum;\r\n\tunsigned char \tPOS = bkafisParams.ldrPos;\r\n\tfloat\t\t\tDIR = bkafisParams.ldrDir;\r\n\tfloat\t\t\tavr_angle_diff = 0;\r\n\t\r\n\t\r\n\tfor\t( i = 0; i < N; i++ )\r\n\t{\r\n\t\tfloat\ttmp_diff = fabs( \tfinger->minutiae[centerI]->angle\r\n\t\t\t\t\t\t\t\t - \tfinger->minutiae[neighbors[i]]->angle );\r\n\t\t\r\n\t\tavr_angle_diff \t+=\t( tmp_diff < 2*M_PI-tmp_diff ) ? tmp_diff : 2*M_PI-tmp_diff;\t\t\r\n\t}\r\n\t\r\n\tavr_angle_diff = avr_angle_diff / N;\r\n\t\r\n\t/*\r\n\tif \t(1)\r\n\t\tprintf(\"centerI = %d\\tavr_angle_diff = %f\\n\", centerI, avr_angle_diff);\r\n\tsystem(\"pause\");\r\n\t*/\r\n\t\r\n\tif\t( avr_angle_diff < M_PI/4 )\r\n\t{\r\n\t\tunsigned char\t*queue;\r\n\t\tunsigned char\t*stack;\r\n\t\tunsigned char\tqueue_size = 0;\r\n\t\tunsigned char \tstack_size = 0;\r\n\t\t\r\n\t\tqueue = malloc(sizeof(unsigned char)*finger->nMinutiae);\r\n\t\tif (queue==NULL)\r\n\t\t\treturn -1;\r\n\t\t\r\n\t\tstack = malloc(sizeof(unsigned char)*finger->nMinutiae);\r\n\t\tif (stack==NULL){\r\n\t\t\tfree(queue);\r\n\t\t\treturn -1;\r\n\t\t}\r\n\t\t\r\n\t\t\r\n\t\tmemset( queue, 255, sizeof(unsigned char)*finger->nMinutiae );\r\n\t\tmemset( stack, 255, sizeof(unsigned char)*finger->nMinutiae );\r\n\t\t\r\n\t\tqueue[ queue_size++ ] = centerI;\r\n\t\tstack[ stack_size++ ] = centerI;\r\n\t\t\r\n\t\t/* \r\n\t\tprintf(\"in_if_1\");\r\n\t\tprintf(\"queue_size = %d\\n\", queue_size);\r\n\t\tprintf(\"stack_size = %d\\n\", stack_size);\r\n\t\t*/\r\n\t\t\r\n\t\twhile\t( stack_size != 0 )\r\n\t\t{\r\n\t\t\tunsigned char\tcenter = stack[ --stack_size ];\r\n\t\t\tunsigned char tmp_neighbors[ N ];\r\n\t\t\t\r\n\t\t\tstack[ stack_size ] = 255;\r\n\t\t\t\r\n\t\t\t/*\r\n\t\t\tprintf(\"in while\\n\");\r\n\t\t\tprintf(\"center = %d\\n\", center);\r\n\t\t\tprintf(\"queue_size = %d\\n\", queue_size);\r\n\t\t\tprintf(\"stack_size = %d\\n\", stack_size);\r\n\t\t\tsystem(\"pause\");\r\n\t\t\t*/\r\n\t\t\t\r\n\t\t\tfor\t( i = 0; i < N; i++ )\r\n\t\t\t{\r\n\t\t\t\tunsigned char\tfinish = 0;\r\n\t\t\t\tunsigned char \ttmp_index = neighbors[i];\r\n\t\t\t\tfloat\t\t\ttmp_Ed, tmp_Angle;\r\n\t\t\t\t\r\n\t\t\t\t/*\r\n\t\t\t\tprintf(\"i = %d\\n\", i);\r\n\t\t\t\tsystem(\"pause\");\r\n\t\t\t\t*/\r\n\t\t\t\t\r\n\t\t\t\tfor\t( j = 0; j < queue_size; j++)\r\n\t\t\t\t\tif\t( tmp_index == queue[j] )\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tfinish = 1;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\t/*\r\n\t\t\t\tprintf(\"finish = %d\\n\", finish);\r\n\t\t\t\t*/\r\n\t\t\t\t\r\n\t\t\t\tif\t( finish == 1 )\tcontinue;\r\n\t\t\t\t\r\n\t\t\t\ttmp_Ed =\t( center < tmp_index ) \r\n\t\t\t\t\t\t\t? distances[ center *(2*finger->nMinutiae-1-center )/2+tmp_index-center -1 ]\r\n\t\t\t\t\t\t\t: distances[ tmp_index*(2*finger->nMinutiae-1-tmp_index)/2+center -tmp_index-1 ];\r\n\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\tfloat\ttmp_angle = fabs( \tfinger->minutiae[center] ->angle\r\n\t\t\t\t\t\t\t\t\t\t - \tfinger->minutiae[tmp_index]->angle );\r\n\t\t\t\ttmp_Angle = ( tmp_angle < 2*M_PI - tmp_angle ) \t\r\n\t\t\t\t\t\t\t? tmp_angle\r\n\t\t\t\t\t\t\t: 2*M_PI - tmp_angle;\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t/* \r\n\t\t\t\tprintf(\"tmp_Ed = %f vs %d\\ttmp_Angle = %f vs %f\\n\", tmp_Ed, POS, tmp_Angle, DIR);\r\n\t\t\t\tsystem(\"pause\");\r\n\t\t\t\t*/\r\n\t\t\t\t\r\n\t\t\t\tif \t( tmp_Ed < POS && tmp_Angle < DIR )\r\n\t\t\t\t{\r\n\t\t\t\t\tqueue[ queue_size++ ] = tmp_index;\r\n\t\t\t\t\tstack[ stack_size++ ] = tmp_index;\r\n\t\t\t\t\t/*\r\n\t\t\t\t\tprintf(\"In_if_2\\n\");\r\n\t\t\t\t\tprintf(\"queue_size = %d\\tstack_size = %d\\ttmp_index = %d\\n\", queue_size, stack_size, tmp_index);\r\n\t\t\t\t\t*/\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t/*\r\n\t\tprintf(\"queue_size = %d\\tNUM = %d\\n\", queue_size, NUM);\r\n\t\t*/\r\n\t\t\r\n\t\tif\t( queue_size > NUM )\r\n\t\t\tfor\t( j = 0; j < queue_size; j++ )\r\n\t\t\t{\r\n\t\t\t\t/*\r\n\t\t\t\tprintf(\"queue[%d] = %d\\n\", queue_size, queue[j]);\r\n\t\t\t\t*/\r\n\t\t\t\tldr[ queue[j] ] = ( ldr[ queue[j] ] > queue_size ) ? ldr[ queue[j] ] : queue_size;\r\n\t\t\t}\t\r\n\t\tfree(queue);\r\n\t\tfree(stack);\r\n\t}\r\n\t\r\n\treturn\tldr[centerI];\r\n}\r\n\r\n/* Detect LDR (0 -> 31) */\r\nunsigned char\tDetectLDR_0_31( Fingerprint *finger, unsigned char centerI, unsigned char *neighbors, float *distances, unsigned char *ldr )\r\n{\r\n\tunsigned char\ti, j;\r\n\tunsigned char\tN = bkafisParams.ldrN\t ;\r\n\tunsigned char\tNUM = bkafisParams.ldrNum;\r\n\tunsigned char \tPOS = bkafisParams.ldrPos;\r\n\tfloat\t\t\tDIR = bkafisParams.ldrDir;\r\n\tfloat\t\t\tavr_angle_diff = 0;\r\n\t\r\n\t\r\n\tfor\t( i = 0; i < N; i++ )\r\n\t{\r\n\t\tfloat\ttmp_diff = fabs( \tfinger->minutiae[centerI]\t\t->angle\t* 11.25*M_PI/180\r\n\t\t\t\t\t\t\t\t - \tfinger->minutiae[neighbors[i]]\t->angle\t* 11.25*M_PI/180 );\r\n\t\t\r\n\t\tavr_angle_diff \t+=\t( tmp_diff < 2*M_PI-tmp_diff ) ? tmp_diff : 2*M_PI-tmp_diff;\t\t\r\n\t}\r\n\t\r\n\tavr_angle_diff = avr_angle_diff / N;\r\n\t\r\n\tif\t( avr_angle_diff < M_PI/4 )\r\n\t{\r\n\t\tunsigned char\t*queue;\r\n\t\tunsigned char\t*stack;\r\n\t\tunsigned char\tqueue_size = 0;\r\n\t\tunsigned char \tstack_size = 0;\r\n\t\t\r\n\t\tqueue = malloc(sizeof(unsigned char)*finger->nMinutiae);\r\n\t\tif (queue==NULL)\r\n\t\t\treturn -1;\r\n\t\t\r\n\t\tstack = malloc(sizeof(unsigned char)*finger->nMinutiae);\r\n\t\tif (stack==NULL){\r\n\t\t\tfree(queue);\r\n\t\t\treturn -1;\r\n\t\t}\r\n\t\t\r\n\t\tmemset( queue, 255, sizeof(unsigned char)*finger->nMinutiae );\r\n\t\tmemset( stack, 255, sizeof(unsigned char)*finger->nMinutiae );\r\n\t\t\r\n\t\tqueue[ queue_size++ ] = centerI;\r\n\t\tstack[ stack_size++ ] = centerI;\r\n\t\t\r\n\t\twhile\t( stack_size != 0 )\r\n\t\t{\r\n\t\t\tunsigned char\tcenter = stack[ --stack_size ];\r\n\t\t\tunsigned char tmp_neighbors[ N ];\r\n\t\t\t\r\n\t\t\tstack[ stack_size ] = 255;\r\n\t\t\t\r\n\t\t\tfor\t( i = 0; i < N; i++ )\r\n\t\t\t{\r\n\t\t\t\tunsigned char\tfinish = 0;\r\n\t\t\t\tunsigned char \ttmp_index = neighbors[i];\r\n\t\t\t\tfloat\t\t\ttmp_Ed, tmp_Angle;\r\n\t\t\t\t\r\n\t\t\t\tfor\t( j = 0; j < queue_size; j++)\r\n\t\t\t\t\tif\t( tmp_index == queue[j] )\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tfinish = 1;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\tif\t( finish == 1 )\tcontinue;\r\n\t\t\t\t\r\n\t\t\t\ttmp_Ed =\t( center < tmp_index ) \r\n\t\t\t\t\t\t\t? distances[ center *(2*finger->nMinutiae-1-center )/2+tmp_index-center -1 ]\r\n\t\t\t\t\t\t\t: distances[ tmp_index*(2*finger->nMinutiae-1-tmp_index)/2+center -tmp_index-1 ];\r\n\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\tfloat\ttmp_angle = fabs( \tfinger->minutiae[center] ->angle * 11.25*M_PI/180\r\n\t\t\t\t\t\t\t\t\t\t - \tfinger->minutiae[tmp_index]->angle * 11.25*M_PI/180);\r\n\t\t\t\ttmp_Angle = ( tmp_angle < 2*M_PI - tmp_angle ) \t\r\n\t\t\t\t\t\t\t? tmp_angle\r\n\t\t\t\t\t\t\t: 2*M_PI - tmp_angle;\r\n\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\tif \t( tmp_Ed < POS && tmp_Angle < DIR )\r\n\t\t\t\t{\r\n\t\t\t\t\tqueue[ queue_size++ ] = tmp_index;\r\n\t\t\t\t\tstack[ stack_size++ ] = tmp_index;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tif\t( queue_size > NUM )\r\n\t\t\tfor\t( j = 0; j < queue_size; j++ )\r\n\t\t\t{\r\n\t\t\t\tldr[ queue[j] ] = ( ldr[ queue[j] ] > queue_size ) ? ldr[ queue[j] ] : queue_size;\r\n\t\t\t}\t\r\n\t\tfree(queue);\r\n\t\tfree(stack);\r\n\t}\r\n\t\r\n\treturn\tldr[centerI];\r\n}\r\n\r\n/* FindDirectionalNeighbours 0 31 */\r\nunsigned char\tFindDirectionalNeighbours_0_31(\tFingerprint* finger , float* distances, unsigned char centerI, \r\n\t\t\t\t\t\t\t\t\t\t\tBkafisParams* params )\r\n{\r\n\tunsigned char\tmaxNeighbors = params->maxNeighbors;\r\n\tfloat\tminDistance = params->minDistance;\r\n\tfloat\tmaxDistance = params->maxDistance;\r\n\tfloat\t*minDistances;\r\n\tunsigned char\t*minNeighborI;\r\n\tunsigned char\t*neighborIds ;\r\n\tunsigned char\tnNeighbors = 0;\r\n\tunsigned char\tnIterations = 0;\r\n\tunsigned char \tsector;\r\n\tunsigned char \tn = finger->nMinutiae;\r\n\tunsigned char\ti;\r\n\tfloat* \tphi;\r\n\tfloat\tcenterAngle = finger->minutiae[centerI]->angle * 11.25*M_PI/180;\r\n\t\r\n\tphi \t\t\t= malloc( sizeof(float) * n );\r\n\tminDistances\t= malloc( sizeof(float) * maxNeighbors );\r\n\tminNeighborI\t= malloc( sizeof(unsigned char) * maxNeighbors );\r\n\tneighborIds \t= malloc( sizeof(unsigned char) * maxNeighbors );\r\n\t\r\n\tmemset( phi, 0, sizeof(float)*n );\r\n\tmemset( neighborIds, 255, sizeof(unsigned char) * maxNeighbors );\r\n\t\r\n\twhile\t( (nNeighbors < maxNeighbors) && (nIterations < maxNeighbors) )\r\n\t{\r\n\t\tnIterations++;\r\n\t\tmemset( minDistances, 0, sizeof(float) * maxNeighbors );\r\n\t\tmemset( minNeighborI, 255, sizeof(unsigned char) * maxNeighbors );\r\n\t\t\r\n\t\tfor\t( i = 0; i < n; i++ )\r\n\t\t{\r\n\t\t\tfloat\tdist;\r\n\t\t\t\r\n\t\t\tif\t( i == centerI )\t\r\n\t\t\t\tcontinue;\r\n\t\t\t\r\n\t\t\tdist = \t( centerI < i ) ? \r\n\t\t\t\t\tdistances[ centerI*(2*n-1-centerI)/2+i -centerI-1 ]:\r\n\t\t\t\t\tdistances[ i *(2*n-1-i )/2+centerI-i -1 ];\r\n\t\t\t\r\n\t\t\t/* skip neighbours that are too far or too near the center minutia */\r\n\t\t\tif \t( (dist < minDistance) || (dist > maxDistance) )\t\r\n\t\t\t\tcontinue;\r\n\t\t\t\r\n\t\t\t/* skip neighbors that have been chosen */\r\n\t\t\tunsigned char\tfound = 0;\r\n\t\t\tunsigned char\tj = 0;\r\n\t\t\t\r\n\t\t\twhile\t( (j < maxNeighbors) && !found )\r\n\t\t\t{\r\n\t\t\t\tif\t( neighborIds[j++] == i )\tfound = 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif\t(found)\r\n\t\t\t\tcontinue;\r\n\t\t\t\r\n\t\t\t/* calculate the angle of the vector connecting center minutia with minutia i */\r\n\t\t\tif\t( phi[i] == 0 )\r\n\t\t\t\tphi[i] = CalculateAngle( finger->minutiae[centerI]->x, finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\t\t\t\t finger->minutiae[i]->x , finger->minutiae[i]->y );\r\n\t\t\t\r\n\t\t\tfloat\td_phi = ( phi[i] >= centerAngle ) ? phi[i]-centerAngle : 2*M_PI+phi[i]-centerAngle;\r\n sector = floor( maxNeighbors * (d_phi/(2*M_PI)) );\r\n\t\t\t\t\t\r\n\t\t\tif \t( minDistances[sector] == 0 )\r\n\t\t\t{\r\n\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\tminNeighborI[sector] = i ;\r\n\t\t\t}\r\n\t\t\telse \r\n\t\t\t{\r\n\t\t\t\tif \t( minDistances[sector] > dist )\r\n\t\t\t\t{\r\n\t\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\t\tminNeighborI[sector] = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tfor ( sector = 0; sector < maxNeighbors; sector++ )\r\n\t\t{\r\n\t\t\tif\t( nNeighbors == maxNeighbors )\t\r\n\t\t\t\tbreak;\r\n\t\t\t\r\n\t\t\tif\t( minNeighborI[sector] != 255 )\r\n\t\t\t\tneighborIds[nNeighbors++] = minNeighborI[sector];\r\n\t\t}\r\n\t}\r\n\r\n\tif \t( nNeighbors == 0)\r\n\t{\r\n\t\tfree(phi);\r\n\t\treturn\tISO_SUCCESS;\r\n\t}\r\n\t\r\n\tNeighbor\t**neighborArray = malloc( sizeof(Neighbor*) * nNeighbors );\r\n\t\r\n\tif \t( neighborArray == NULL )\r\n\t{\r\n\t\tfree(phi);\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tmemset( neighborArray, 0, sizeof(Neighbor*) * nNeighbors );\r\n\tNeighbor\t*pNeighbor;\r\n\t\r\n\tfor\t( i = 0; i < nNeighbors; i++ )\r\n\t{\t\r\n\t\tpNeighbor = malloc( sizeof(Neighbor) );\r\n\t\t\r\n\t\tif \t( pNeighbor != NULL )\r\n\t\t{\r\n\t\t\tmemset( pNeighbor, 0, sizeof(Neighbor) );\r\n\t\t\tpNeighbor->index = neighborIds[i];\r\n\t\t\tpNeighbor->ed = \t( centerI < neighborIds[i] ) ? \r\n\t\t\t\t\t\t\t\tdistances[centerI *(2*n-1-centerI )/2+neighborIds[i]-centerI -1]:\r\n\t\t\t\t\t\t\t\tdistances[neighborIds[i]*(2*n-1-neighborIds[i])/2+centerI -neighborIds[i]-1]; \r\n\t\t\tfloat\ttmp;\r\n\t\t\ttmp\t= atan2( \tfinger->minutiae[neighborIds[i]]->y - finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\tfinger->minutiae[neighborIds[i]]->x - finger->minutiae[centerI]->x\t );\r\n\r\n\t\t\ttmp = ( tmp < 0 ) ? tmp+2*M_PI : tmp;\r\n\t\t\t\r\n\t\t\tpNeighbor->dra\t= (\ttmp >= \tcenterAngle ) \t\t?\r\n\t\t\t\t\t\t\t\ttmp - \tcenterAngle \t:\r\n\t\t\t\t\t\t\t\ttmp - \tcenterAngle + 2*M_PI;\r\n\r\n\t\t\ttmp =\tfinger->minutiae[neighborIds[i]]->angle * 11.25*M_PI/180\r\n\t\t\t\t - \tcenterAngle;\r\n\t\t\t\r\n\t\t\tpNeighbor->oda \t = ( finger->minutiae[neighborIds[i]]->angle * 11.25*M_PI/180 >= centerAngle )\r\n\t\t\t\t\t\t\t\t? tmp : 2*M_PI+tmp;\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\tneighborArray[i]=pNeighbor;\r\n\t\t\tpNeighbor = NULL;\r\n\t\t}\r\n\t}\r\n\t\r\n\tfinger->minutiae[centerI]->nNeighbors = nNeighbors;\r\n\tfinger->minutiae[centerI]->neighbors = neighborArray;\r\n\t\r\n\tfree(phi);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\n/********************************************************\r\n*********convert from MoC to Fingerprint fromat**********\r\n********************************************************/\r\nISOBYTE MoC2Fingerprint(Fingerprint *finger, FingerprintMoC *fingerMoC)\r\n{\r\n\tISOBYTE i, j;\r\n\t\r\n\t/* Fingerprint header */\r\n\tfinger->height = fingerMoC->height_nMinutiae >> 6;\r\n\t/*printf(\"*<debug> height: %d </debug>\\n\", fingerMoC->height_nMinutiae >> 6);*/\r\n\tfinger->width = fingerMoC->width_quality >> 7;\r\n\tfinger->nMinutiae = fingerMoC->height_nMinutiae & 0x3f;\r\n\tfinger->quality = fingerMoC->width_quality & 0x7f;\r\n\t\r\n\t/* Dynamically allocate memory for minutiae list */\r\n\tMinutia **minutiae = malloc(finger->nMinutiae * sizeof(Minutia*));\r\n\tif (minutiae == NULL) return -1;\r\n\t/* assigned minutiae */\r\n\tMinutia *min;\r\n\tfor(i = 0; i < finger->nMinutiae; i++){\r\n\t\tmin = malloc(sizeof(Minutia));\r\n\t\t/* Minutia header */\r\n\t\tmin->x = fingerMoC->minutiae[i].x_quality >> 7;\r\n\t\tmin->y = fingerMoC->minutiae[i].y_nNeighbors_type >> 5;\r\n\t\t/* min->angle = (float)(fingerMoC->minutiae[i].angle_ldr >> 3) * M_PI / 16;*/\r\n\t\tmin->angle = (float)(fingerMoC->minutiae[i].angle_ldr >> 3);\r\n\t\tmin->type = fingerMoC->minutiae[i].y_nNeighbors_type & 0x01;\r\n\t\tmin->nNeighbors = (fingerMoC->minutiae[i].y_nNeighbors_type >> 1) & 0x0f;\r\n\t\tmin->quality = fingerMoC->minutiae[i].x_quality & 0x7f;\r\n\t\tmin->ldr = fingerMoC->minutiae[i].angle_ldr & 0x07;\r\n\t\t\r\n\t\tif(min->nNeighbors){\r\n\t\t\tNeighbor **neighborArray = malloc(min->nNeighbors * sizeof(Neighbor*));\r\n\t\t\tif (neighborArray == NULL){\r\n\t\t \t\tfree(min);\r\n\t\t \t\tfree(minutiae);\r\n\t\t \t\treturn - 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tNeighbor *pNeighbor;\r\n\t\t\tISOWORD tmpEd;\r\n\t\t\tISOBYTE tmpDra;\r\n\t\t\tfor(j = 0; j < min->nNeighbors; j++){\r\n\t\t\t\t/* dynamically alocating */\r\n\t\t\t\tpNeighbor = malloc(sizeof(Neighbor));\r\n\t\t\t\t\r\n\t\t\t\t/* assign parameter */\r\n\t\t\t\tpNeighbor->index = fingerMoC->minutiae[i].neighbors[j].index_rc >> 3;\r\n\t\t\t\t\r\n\t\t\t\ttmpEd = fingerMoC->minutiae[i].neighbors[j].ed;\r\n\t\t\t\tpNeighbor->ed = (float)(tmpEd >> 8) + (float)(tmpEd & 0xff)/ 256.0f;\r\n\t\t\t\t\r\n\t\t\t\ttmpDra = fingerMoC->minutiae[i].neighbors[j].dra;\r\n\t\t\t\tpNeighbor->dra = (float)(tmpDra >> 5) + (float)(tmpDra & 0x1f) / 32.0f;\r\n\t\t\t\t\r\n\t\t\t\tpNeighbor->ridgeCount = fingerMoC->minutiae[i].neighbors[j].index_rc & 0x07;\r\n\t\t\t\r\n\t\t\t\tneighborArray[j] = pNeighbor;\r\n\t\t\t\tpNeighbor == NULL;\r\n\t\t\t}\r\n\t\t\tmin->neighbors = neighborArray;\r\n\t\t\r\n\t\t\tminutiae[i] = min;\r\n\t\t\tmin = NULL;\r\n\t\t}\r\n\t}\r\n\t\r\n\tfinger->minutiae = minutiae;\r\n\t\r\n\t/* return normally */\r\n\treturn 0;\r\n}\r\n\r\n/********************************************************\r\n*******convert from Fingerprint to MoC format************\r\n********************************************************/\r\nchar Fingerprint2MoC (Fingerprint *finger, FingerprintMoC *fingerMoC)\r\n{\r\n\tISOBYTE i, j;\r\n\tISOWORD nMinutiae;\r\n\t\r\n\t/* Fingerprint Header */\r\n\tif(finger->nMinutiae < 30){\r\n\t\tnMinutiae = finger->nMinutiae;\r\n\t}else{\r\n\t\tnMinutiae = 30;\r\n\t}\r\n\tfingerMoC->height_nMinutiae = (finger->height << 6) | nMinutiae;\r\n\t/* printf(\"nMinutiae = %d = %d\", nMinutiae, fingerMoC->height_nMinutiae & 0x3f);*/\r\n\tfingerMoC->width_quality = (finger->width << 7) | (ISOWORD)(finger->quality);\r\n\t\r\n\tfloat tmpEd;\r\n\tfloat tmpDra;\r\n\t/* Assign minutiae list */\r\n\tfor (i = 0; i < nMinutiae; i++){\r\n\t\tfingerMoC->minutiae[i].x_quality = (finger->minutiae[i]->x << 7) | finger->minutiae[i]->quality;\r\n\t\tfingerMoC->minutiae[i].y_nNeighbors_type = ((-finger->minutiae[i]->y) << 5)\r\n\t\t\t| (finger->minutiae[i]->nNeighbors << 1) | finger->minutiae[i]->type;\r\n\t\t/* fingerMoC->minutiae[i].angle_ldr = (((ISOWORD)(finger->minutiae[i]->angle * 16 / M_PI + 0.5)) << 3) | finger->minutiae[i]->ldr; */\r\n\t\t/* 9-9-2015: now angle is already in range 0-31 */\r\n\t\t\r\n\t\tfingerMoC->minutiae[i].angle_ldr = (((ISOBYTE)(finger->minutiae[i]->angle)) << 3) | finger->minutiae[i]->ldr; \r\n\t\t/* printf(\"angle = %f = %d\\n\", finger->minutiae[i]->angle, fingerMoC->minutiae[i].angle_ldr);*/\r\n\t\t/* Assign neighbors */\r\n\t\tmemset(fingerMoC->minutiae[i].neighbors, 0, 8 * sizeof(NeighborMoC));\r\n\t\tif(finger->minutiae[i]->nNeighbors){\r\n\t\t\tfor(j = 0; j < finger->minutiae[i]->nNeighbors; j++){\r\n\t\t\t\t/* ed and index */\r\n\t\t\t\tISOBYTE rc=(finger->minutiae[i]->neighbors[j]->ridgeCount>=7)?7:finger->minutiae[i]->neighbors[j]->ridgeCount;\r\n\t\t\t\tfingerMoC->minutiae[i].neighbors[j].index_rc = (finger->minutiae[i]->neighbors[j]->index << 3) | (rc & 0x07);\r\n\t\t\t\t\r\n\t\t\t\ttmpEd = finger->minutiae[i]->neighbors[j]->ed;\r\n\t\t\t\tfingerMoC->minutiae[i].neighbors[j].ed = (((ISOWORD)tmpEd) << 8) | (ISOWORD)((tmpEd - (ISOWORD)tmpEd) * 256.0f);\r\n\t\t\t\t\r\n\t\t\t\t/* dra, oda and ridgecount */\r\n\t\t\t\ttmpDra = finger->minutiae[i]->neighbors[j]->dra;\r\n\t\t\t\tfingerMoC->minutiae[i].neighbors[j].dra = (((ISOWORD)tmpEd) << 5) | (ISOWORD)((tmpEd - (ISOWORD)tmpEd) * 32.0f);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\treturn 0;\r\n}\r\nvoid SaveFingerprintMoCText(char* path, FingerprintMoC* fingerMoC)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char i,j;\r\n\tunsigned char neighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp = fopen(path,\"w\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\t/*\r\n\tfprintf(fp,\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp,\"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbors\\tIndex\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\t*/\r\n\tunsigned char nMinutiae = GetNMinutiae(*fingerMoC);\r\n\t/* printf(\"nMinutiea=%d\\n\", nMinutiae);*/\r\n\tfor ( i = 0; i < nMinutiae; i++ )\r\n\t{\r\n\t\t\r\n\t\tfprintf\t(\tfp,\t\"%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\"\t, \r\n\t\t\t\t\tGetX(fingerMoC->minutiae[i])\t\t\t\t\t, \r\n\t\t\t\t\tGetY(fingerMoC->minutiae[i])\t\t\t, \r\n\t\t\t\t\tGetAngle(fingerMoC->minutiae[i])\t\t,\r\n\t\t\t\t\tGetType(fingerMoC->minutiae[i])\t\t,\r\n\t\t\t\t\tGetQuality(fingerMoC->minutiae[i])\t\t,\r\n\t\t\t\t\tGetLdr(fingerMoC->minutiae[i]),\r\n\t\t\t\t\tGetNNeighbors(fingerMoC->minutiae[i])\r\n\t\t\t\t);\r\n\t\tISOBYTE nNeighbors = GetNNeighbors(fingerMoC->minutiae[i]);\r\n\t\t\r\n\t\t/* printf(\"nNeighbors = %d\\n\",nNeighbors);*/\r\n\t\t\r\n\t\tif(nNeighbors){\r\n\t\t\t\r\n\t\t\tfor ( j = 0; j<nNeighbors; j++ ){\r\n\t\t\t\tfprintf\t(\tfp, \"\\t%d\\t%d\\t%d\\t%d\"\t\t\t\t\t\t\t\t, \r\n\t\t\t\t\t\tGetNeighborMinIndex(fingerMoC->minutiae[i].neighbors[j]),\r\n\t\t\t\t\t\tfingerMoC->minutiae[i].neighbors[j].ed, \r\n\t\t\t\t\t\tfingerMoC->minutiae[i].neighbors[j].dra,\r\n\t\t\t\t\t\tGetRidgeCount(fingerMoC->minutiae[i].neighbors[j])\r\n\t\t\t\t);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t}\r\n\t\t\t\t\t\t\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n/**************************************************\r\n***************** For debug ***********************\r\n**************************************************/\r\nvoid showMoC(FingerprintMoC *fingerMoC)\r\n{\r\n\tint i, j;\r\n\t\r\n\tprintf(\"<Header>\\n\");\r\n\tprintf(\"\\t.Width:\\t%d\\n\", fingerMoC->width_quality >> 7);\r\n\tprintf(\"\\t.Height:\\t%d\\n\", fingerMoC->height_nMinutiae >> 6);\r\n\tprintf(\"\\t.Quality:\\t%d\\n\", fingerMoC->width_quality & 0x7f);\r\n\tprintf(\"\\t.nMinutiae:\\t%d\\n\", fingerMoC->height_nMinutiae & 0x3f);\r\n\tprintf(\"</Header>\\n\");\r\n\t\r\n\tprintf(\"\\n<Minutia list>\\n\");\r\n\tfor(i = 0; i < 1; i ++){\r\n\t\tprintf(\"\\t<minutia[%d]>\\n\", i);\r\n\t\tprintf(\"\\t\\tx:\\t%d\\n\", fingerMoC->minutiae[i].x_quality >> 7);\r\n\t\tprintf(\"\\t\\ty:\\t%d\\n\", fingerMoC->minutiae[i].y_nNeighbors_type >> 4);\r\n\t\tprintf(\"\\t\\tangle:\\t%d\\n\", fingerMoC->minutiae[i].angle_ldr >> 3);\r\n\t\tprintf(\"\\t\\ttype:\\t%d\\n\", fingerMoC->minutiae[i].y_nNeighbors_type & 0x01);\r\n\t\tprintf(\"\\t\\tnNeighbor:\\t%d\\n\", (fingerMoC->minutiae[i].y_nNeighbors_type >> 1) & 0x07);\r\n\t\tprintf(\"\\t\\tquality:\\t%d\\n\", fingerMoC->minutiae[i].x_quality & 0x7f);\t\r\n\t\tprintf(\"\\t\\tldr:\\t%d\\n\", fingerMoC->minutiae[i].angle_ldr & 0x07);\r\n\t\t\r\n\t\tprintf(\"\\t\\t<Neighbor list>\\n\");\r\n\t\tfor(j = 0; j < 8; j++){\r\n\t\t\tprintf(\"\\t\\t\\t%x\\t%x\\t%x\\n\", fingerMoC->minutiae[i].neighbors[j].index_rc,\r\n\t\t\t\tfingerMoC->minutiae[i].neighbors[j].ed, fingerMoC->minutiae[i].neighbors[j].dra);\r\n\t\t}\r\n\t\tprintf(\"\\t\\t</Neighbor list>\\n\");\r\n\t\tprintf(\"\\t</minutia[%d]>\\n\", i);\r\n\t}\r\n\tprintf(\"</Minutia list>\\n\");\r\n}/**************************************************\r\n***************** For debug ***********************\r\n**************************************************/\r\nvoid showFingerprint(Fingerprint finger)\r\n{\r\n\tint i, j;\r\n\t\r\n\tprintf(\"<Header>\\n\");\r\n\tprintf(\"\\t.Width:\\t%d\\n\", finger.width);\r\n\tprintf(\"\\t.Height:\\t%d\\n\", finger.height);\r\n\tprintf(\"\\t.Quality:\\t%d\\n\", finger.quality);\r\n\tprintf(\"\\t.nMinutiae:\\t%d\\n\", finger.nMinutiae);\r\n\tprintf(\"</Header>\\n\");\r\n\t\r\n\tprintf(\"\\n\\n<Minutia list>\\n\");\r\n\tfor(i = 0; i < 1; i ++){\r\n\t\tprintf(\"\\t<minutia[%d]>\\n\", i);\r\n\t\tprintf(\"\\t\\tx:\\t%d\\n\", (finger.minutiae[i])->x);\r\n\t\tprintf(\"\\t\\ty:\\t%d\\n\", finger.minutiae[i]->y);\r\n\t\tprintf(\"\\t\\tangle:\\t%f\\n\", finger.minutiae[i]->angle);\r\n\t\tprintf(\"\\t\\ttype:\\t%d\\n\", finger.minutiae[i]->type);\r\n\t\tprintf(\"\\t\\tnNeighbor:\\t%d\\n\", finger.minutiae[i]->nNeighbors);\r\n\t\tprintf(\"\\t\\tquality:\\t%d\\n\", finger.minutiae[i]->quality);\t\r\n\t\tprintf(\"\\t\\tldr:\\t%d\\n\", finger.minutiae[i]->ldr);\r\n\t\t\r\n\t\tprintf(\"\\t\\t<Neighbor list>\\n\");\r\n\t\tfor(j = 0; j < 8; j++){\r\n\t\t\tprintf(\"\\t\\t\\t%d\\t%f\\t%f\\t%d\\n\", finger.minutiae[i]->neighbors[j]->index,\r\n\t\t\t\t(ISOWORD)finger.minutiae[i]->neighbors[j]->ed, finger.minutiae[i]->neighbors[j]->dra,\r\n\t\t\t\tfinger.minutiae[i]->neighbors[j]->ridgeCount);\r\n\t\t}\r\n\t\tprintf(\"\\t\\t</Neighbor list>\\n\");\r\n\t\tprintf(\"\\t</minutia[%d]>\\n\", i);\r\n\t}\r\n\tprintf(\"</Minutia list>\\n\");\r\n}" }, { "alpha_fraction": 0.7407797574996948, "alphanum_fraction": 0.7502634525299072, "avg_line_length": 28.6875, "blob_id": "65ee1e8796adddbc3fa37923649942d3efaebba7", "content_id": "47c49f10d0ebfdca12226bd2412d93c5f113b964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 949, "license_type": "no_license", "max_line_length": 187, "num_lines": 32, "path": "/bkafis/bkafis/bin/sosanh.sh", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n#\n# rc.local\n#\n# This script is executed at the end of each multiuser runlevel.\n# exit zero\" on success or any other\n# Make sure that the script will \"exit 0\" on success or any other\n# value on error.\n#\n# In order to enable or disable this script just change the execution\n# bits.\n#\n# By default this script does nothing.\n\n# Print the IP address\n\nsudo /home/pi/Desktop/bkafis/bkafis/bin/xoafile\nCOUNTER=0\nsudo /home/pi/Desktop/bkafis/bkafis/bin/extractMoC /home/pi/Desktop/bkafis/bkafis/bin/User\nsleep 3\nsudo /home/pi/Desktop/bkafis/bkafis/bin/extractMoC /home/pi/Desktop/bkafis/bkafis/bin/extract\nsleep 3\nwhile [ $COUNTER -ne 1 ]\ndo\n if [ -f /home/pi/Desktop/bkafis/bkafis/bin/extract.bin ]; then\n\tsudo /home/pi/Desktop/bkafis/bkafis/bin/matchMoC /home/pi/Desktop/bkafis/bkafis/bin/extract.bin /home/pi/Desktop/bkafis/bkafis/bin/User.bin /home/pi/Desktop/bkafis/bkafis/bin/ket_qua.txt\n\tsleep 2\n\tCOUNTER=1\n fi\ndone\nsleep 3\nexit 0" }, { "alpha_fraction": 0.6019900441169739, "alphanum_fraction": 0.6497512459754944, "avg_line_length": 24.447368621826172, "blob_id": "c0c03d656bc2019cca89997db7b261026d86e246", "content_id": "4fbdb93f45341cbe66c8a5c49d505d88bb35f086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1005, "license_type": "no_license", "max_line_length": 63, "num_lines": 38, "path": "/bkafis/bkafis/include/Pi.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <stdint.h>\r\n#include <bcm2835.h>\r\n#ifndef PI_H\r\n#define PI_H\r\n\r\n#define LEADING_EDGE 0\r\n#define FALLING_EDGE 1\r\n\r\n#define PIN_MISO RPI_BPLUS_GPIO_J8_21\r\n#define PIN_MOSI RPI_BPLUS_GPIO_J8_19\r\n#define PIN_SCK RPI_BPLUS_GPIO_J8_23\r\n#define PIN_SS RPI_BPLUS_GPIO_J8_24\r\n\r\n\r\n\r\n#define SS RPI_BPLUS_GPIO_J8_24 \r\n#define BSP_SET_SS bcm2835_gpio_write(SS, HIGH); \r\n#define BSP_CLEAR_SS bcm2835_gpio_write(SS, LOW);\r\n\r\n\r\n\r\n#define PIN_RESET RPI_BPLUS_GPIO_J8_15\r\n#define MODULE_RESET_LOW \tbcm2835_gpio_write(PIN_RESET, LOW);\r\n#define MODULE_RESET_HIGH \tbcm2835_gpio_write(PIN_RESET, HIGH);\r\n\r\n\r\n\r\nvoid BSP_Config_HW (void);\r\nvoid BSP_Module_Reset_Configure (void);\r\nvoid BSP_SPI_Configure (void);\r\nvoid BSP_SPI_ReadWriteBuffer (uint8_t* , uint8_t* , uint16_t);\r\nvoid BSP_Delay_ms(uint16_t);\r\nvoid BSP_Delay_us(uint64_t);\r\nvoid BSP_Module_nRST_High(void);\r\nvoid BSP_Module_nRST_Low(void);\r\nvoid BSP_Module_Wake(void);\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.5668684840202332, "alphanum_fraction": 0.6840946674346924, "avg_line_length": 21.725000381469727, "blob_id": "7d461f39ac26fa12b52142df003ce94699f76c55", "content_id": "7b3773e6d1eab8a3890774eca04cbc0d8c44394e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1817, "license_type": "no_license", "max_line_length": 50, "num_lines": 80, "path": "/bkafis/bkafis/src/lib/bkafis/lcd.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <lcd.h>\nvoid LCD_Enable()\n{\nbcm2835_gpio_fsel(LCD_EN, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_EN,HIGH);\nbcm2835_delayMicroseconds(3);\nbcm2835_gpio_write(LCD_EN,LOW);\nbcm2835_delayMicroseconds(50);\n}\nvoid LCD_Send4Bit( uint8_t Data )\n{\nbcm2835_gpio_fsel(LCD_D4, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_D4,Data&0x01);\nbcm2835_gpio_fsel(LCD_D5, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_D5,(Data>>1)&1);\nbcm2835_gpio_fsel(LCD_D6, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_D6,(Data>>2)&1);\nbcm2835_gpio_fsel(LCD_D7, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_D7,(Data>>3)&1);\n}\nvoid LCD_SendCommand( uint8_t command )\n{\nLCD_Send4Bit( command >>4 );/* Gui 4 bit cao */\nLCD_Enable() ;\nLCD_Send4Bit( command ); /* Gui 4 bit thap*/\nLCD_Enable() ;\n}\nvoid LCD_Init()\n{\nbcm2835_init();\nLCD_Send4Bit(0x00);\nbcm2835_delay(20);\nbcm2835_gpio_fsel(LCD_RS, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_RS,LOW);\nbcm2835_gpio_fsel(LCD_RW, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_RW,LOW);\nLCD_Send4Bit(0x03);\nLCD_Enable();\nbcm2835_delay(5);\nLCD_Enable();\nbcm2835_delayMicroseconds(100);\nLCD_Enable();\nLCD_Send4Bit(0x02);\nLCD_Enable();\nLCD_SendCommand( 0x28 ); \nLCD_SendCommand( 0x0c); \nLCD_SendCommand( 0x06 ); \nLCD_SendCommand(0x01); \n}\nvoid LCD_PutChar ( uint8_t Data )\n{\nbcm2835_gpio_fsel(LCD_RS, BCM2835_GPIO_FSEL_OUTP);\nbcm2835_gpio_write(LCD_RS,HIGH);\nLCD_SendCommand( Data );\nbcm2835_gpio_write(LCD_RS,LOW);\n}\nvoid LCD_Puts( uint8_t *s)\n{\n while (*s)\n {\n LCD_PutChar(*s);\n s++;\n }\n}\nvoid LCD_Clear()\n{\n LCD_SendCommand(0x01); \n bcm2835_delay(5);\n}\nvoid LCD_Gotoxy( uint8_t x, uint8_t y)\n{\n uint8_t address;\n if(!y)\n address = (0x80 + x);\n else\n address = (0xC0 + x);\n bcm2835_delay(1);\n LCD_SendCommand(address);\n bcm2835_delay(5);\n}" }, { "alpha_fraction": 0.6254768967628479, "alphanum_fraction": 0.6511233448982239, "avg_line_length": 30.55172348022461, "blob_id": "61fb9c87c001c76732bc66f087743c046c4cabf7", "content_id": "41148e61aa61d81158e07d73889e4ec6687d4741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4730, "license_type": "no_license", "max_line_length": 116, "num_lines": 145, "path": "/bkafis/exports/include/fingerprint.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tfingerprint.h\r\n\tDescription: Data structure to present fingerprint inside BKAFIS package\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n\r\n\r\n#ifndef _FINGERPRINT_H_\r\n#define _FINGERPRINT_H_\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n\r\n#include <ISOTemplate.h>\r\n\r\n#ifndef max(a,b)\r\n#define max(a, b) ((a) > (b) ? (a) : (b))\r\n#endif\r\n#ifndef min(a,b)\r\n#define min(a, b) ((a) < (b) ? (a) : (b))\r\n#endif\r\n#ifndef sround(x)\r\n#define sround(x) ((int) (((x)<0) ? (x)-0.5 : (x)+0.5))\r\n#endif\r\n#ifndef trunc_dbl_precision\r\n#define trunc_dbl_precision(x, scale) ((double) (((x)<0.0) \\\r\n ? ((int)(((x)*(scale))-0.5))/(scale) \\\r\n : ((int)(((x)*(scale))+0.5))/(scale)))\r\n#endif\r\n\r\n#ifndef M_PI\r\n#define M_PI\t\t3.14159265358979323846\t/* pi */\r\n#endif\r\n#ifndef TRUE\r\n#define TRUE\t1\r\n#endif\r\n#ifndef FALSE\r\n#define FALSE\t1\r\n#endif\r\n/* #define DEBUG*/\r\n\r\n/* constants to control extractor */\r\n#define MAX_FOUND_NEIGHBORS\t8\r\n#define N_SECTORS\t\t\t8\r\n#define MIN_DISTANCE\t\t8\r\n#define MAX_DISTANCE\t\t150\r\n\r\n\r\n/* ldr params ?? */\r\n#define LDR_N \t\t\t\t3\r\n#define LDR_NUM\t\t\t\t3\r\n/* angle in range 0-255 corresponding to 0-2pi */\r\n#define LDR_DIR\t\t\t\t16 \r\n#define LDR_POS\t\t\t\t50\r\n#define LDR_ANGLE_AVG\t\t32\r\n/* -- */\r\n\r\n#define NumNeighs2\t\t\t3\r\n\r\n#define LDR_WEIGHT \t\t\t0.5\r\n#define SIGMA \t\t\t\t2 \r\n#define TG_THRESHOLD \t\t12\r\n#define TA_THRESHOLD \t\tM_PI/6\r\n#define RC_THRESHOLD\t\t1\r\n\r\n#define MAX_ISO_ANGLE\t\t256\r\n/* convert angle in range according to ISO format into radian */\r\n#define ConvertISOAngle(isoangle)\t((isoangle)*2*M_PI/MAX_ISO_ANGLE)\r\n\r\n\r\n/*\t\t\t\tMinutia information for local pairing\t\t\t\t\t*/\r\ntypedef struct\r\n{\r\n\tint \t\t\t\tx, y; /** position x, y of the minutia */\r\n\tunsigned char\t\tangle; /** minutiae angle in range 0-31 */\r\n\tISOBYTE \t\t\tquality; \r\n\tISOBYTE \t\t\tldr; \r\n\tISOBYTE \t\t\ttype;\r\n\tISOBYTE \t\t\tnNeighbors;\r\n\tISOBYTE \t\tneighborIds[MAX_FOUND_NEIGHBORS];\r\n\tISOBYTE \t\t\tridgeCount[MAX_FOUND_NEIGHBORS];\r\n}\tMinutia;\r\n \r\ntypedef struct\r\n{\r\n\tISOWORD \t\t\twidth, height;\r\n\tISOWORD \t\t\tquality;\r\n\tISOBYTE \t\t\tnMinutiae;\r\n\tMinutia\t\t\t\t**minutiae; /* Array of minutiae */\r\n\tfloat \t\t\t\t*distances;\r\n\tfloat \t\t\t\t*dra;\r\n\tint \t\t\t\t*oda;\r\n}\tFingerprint;\r\n\r\n#define FINGERHEADERSIZE\t(3*sizeof(ISOWORD)+sizeof(ISOBYTE))\r\n#define MINUTIASIZE\t\t\t(sizeof(Minutia))\r\nextern int debug;\r\n/**********************************************************************\r\n\tConvert from ISOTemplate 2005 format \r\n\tInput:\r\n\t\t\tPointer to ISOTemplate\r\n\tOutput:\r\n\t\t\tpointer to Fingerprint structure declared above \r\n\tUsage:\r\n\t\t\tin order to load the iso template from file call \r\n\t\t\tISORESULT LoadISOTemplate (ISOBYTE *path);\r\n\t\t\tthen in order to convert from the template into Fingerprint structure\r\n\t\t\tcall unsigned char ConvertISO2005Fingerprint(Fingerprint* finger);\r\n *********************************************************************/ \r\nunsigned char SaveFingerprintText(char* path, Fingerprint* finger);\r\nunsigned char CleanFingerprint(Fingerprint* finger);\r\nunsigned char SortMinutiaQuality(Fingerprint* finger);\r\nISOBYTE ConvertISO2005Fingerprint(Fingerprint* finger);\r\nchar ConvertFingerprintISO2005(Fingerprint* finger);\r\n/* 2 hàm dưới đây đọc ghi file binary cấu trúc Fingerprint */\r\n\r\nunsigned char SaveFingerprint(char* path, Fingerprint* finger);\r\nunsigned char ReadFingerprint(char* path, Fingerprint* finger);\r\n\r\nunsigned char\tDetectLDR( Fingerprint *finger, unsigned char centerI, unsigned char *neighbors, unsigned char *ldr );\r\n/* void ConvertDra0_31(Fingerprint* finger);\t\t\t\t\t\t\t\t\t\t\t*/\r\n\r\nint GetOda(Fingerprint* finger, ISOBYTE min1, ISOBYTE min2);\r\nfloat GetDra(Fingerprint* finger, ISOBYTE min1, ISOBYTE min2);\r\nfloat GetDistance(Fingerprint* finger, ISOBYTE min1, ISOBYTE min2);\r\nint ad_pi_iso(ISOBYTE angle1, ISOBYTE angle2);\r\nint ad_2pi_iso(ISOBYTE angle1, ISOBYTE angle2);\r\nfloat ad_pi(float angle1, float angle2);\r\nfloat ad_2pi(float angle1, float angle2);\r\nISOBYTE CalculateEdDraOda (Fingerprint* finger);\r\nchar FindDirectionalNeighbors(Fingerprint* finger, unsigned char centerI);\r\nchar FindDirectionalNeighborsV2(Fingerprint* finger, unsigned char centerI, int* sortedNeighborIds);\r\nchar FindDirectionalNeighborsV3(Fingerprint* finger, unsigned char centerI, int* sortedNeighborIds);\r\nchar CalculateLDR(Fingerprint* finger);\r\nchar CalculateLDRNeighbors(Fingerprint* finger);\r\nvoid sort_double_inc(double *ranks, int *items, const int len);\r\nint sort_distance(Fingerprint* finger, ISOBYTE centerI, int* order);\r\n#endif" }, { "alpha_fraction": 0.6067234873771667, "alphanum_fraction": 0.621119499206543, "avg_line_length": 29.255474090576172, "blob_id": "1626936cf403e4e0702231441a3897939971c264", "content_id": "c33c648e4e2b88851aa7f31b07ea8046ad5ae879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 12434, "license_type": "no_license", "max_line_length": 158, "num_lines": 411, "path": "/bkafis/bkafis/src/bin/extract/extract_6_8_2015.new.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*******************************************************************************\n\nLicense: \nThis software and/or related materials was developed at the National Institute\nof Standards and Technology (NIST) by employees of the Federal Government\nin the course of their official duties. Pursuant to title 17 Section 105\nof the United States Code, this software is not subject to copyright\nprotection and is in the public domain. \n\nThis software and/or related materials have been determined to be not subject\nto the EAR (see Part 734.3 of the EAR for exact details) because it is\na publicly available technology and software, and is freely distributed\nto any interested party with no licensing requirements. Therefore, it is \npermissible to distribute this software as a free download from the internet.\n\nDisclaimer: \nThis software and/or related materials was developed to promote biometric\nstandards and biometric technology testing for the Federal Government\nin accordance with the USA PATRIOT Act and the Enhanced Border Security\nand Visa Entry Reform Act. Specific hardware and software products identified\nin this software were used in order to perform the software development.\nIn no case does such identification imply recommendation or endorsement\nby the National Institute of Standards and Technology, nor does it imply that\nthe products and equipment identified are necessarily the best available\nfor the pursectore.\n\nThis software and/or related materials are provided \"AS-IS\" without warranty\nof any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\nNO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\nor FITNESS FOR A PARTICULAR PURsectorE or for any pursectore whatsoever, for the\nlicensed product, however used. In no event shall NIST be liable for any\ndamages and/or costs, including but not limited to incidental or consequential\ndamages of any kind, including economic damage or injury to property and lost\nprofits, regardless of whether NIST shall be advised, have reason to know,\nor in fact shall know of the sectorsibility.\n\nBy using this software, you agree to bear all risk relating to quality,\nuse and performance of the software and/or related materials. You agree\nto hold the Government harmless from any claim arising from your use\nof the software.\n\n*******************************************************************************/\n\n/***********************************************************************\nPACKAGE: NIST Fingerprint Minutiae Detection\n\nFILE: MINDTCT.C\n\nAUTHOR: Michael D. Garris\nDATE: 04/18/2002\nUPDATED: 09/14/2004\nUPDATED: 05/09/2005 by MDG\nUPDATED: 01/31/2008 by Kenneth Ko\nUPDATED: 09/04/2008 by Kenneth Ko\nUPDATED: 09/30/2008 by Kenenth Ko - add version option.\n\n#cat: mindtct - Uses Version 2 of the NIST Latent Fingerprint System (LFS)\n#cat: to detect minutiae and count ridges in a grayscale image.\n#cat: This version of the program will process:\n#cat: ANSI/NIST, WSQ, JPEGB, JPEGL, and IHead image formats.\n#cat: Results are written to various output files with\n#cat: predefined extensions appeneded to a specified output\n#cat: root path.\n\n***********************************************************************/\n\n#include <stdio.h>\n#include <sys/param.h>\n#include <an2k.h>\n#include <lfs.h>\n#include <imgdecod.h>\n#include <imgboost.h>\n#include <img_io.h>\n#include <version.h>\n#include <fingerprint.h>\n\nvoid procargs(int, char **, int *, int *, char **, char **);\nint ascend_sorting(double* arr, int* sort_ind, int len);\nvoid descend_sorting(double* arr, int* sort_ind, int len);\n\nint debug = 0;\n\n\n/*************************************************************************\n**************************************************************************/\nint main(int argc, char *argv[])\n{\n\tint boostflag, m1flag;\n\tchar *ifile, *oroot, ofile[MAXPATHLEN];\n\tunsigned char *idata, *bdata;\n\tint img_type;\n\tint ilen, iw, ih, id, ippi, bw, bh, bd;\n\tdouble ippmm;\n\tint img_idc, img_imp;\n\tint *direction_map, *low_contrast_map, *low_flow_map;\n\tint *high_curve_map, *quality_map;\n\tint map_w, map_h;\n\tint ret;\n\tMINUTIAE *minutiae;\n\tANSI_NIST *ansi_nist;\n\tRECORD *imgrecord;\n\tint imgrecord_i;\n\n\t/* Process command line arguments. */\n\tprocargs(argc, argv, &boostflag, &m1flag, &ifile, &oroot);\n\n\t/* 1. READ FINGERPRINT IMAGE FROM FILE INTO MEMORY. */\n\n\t/* Is input file in ANSI/NIST format? */\n\tif((ret = is_ANSI_NIST_file(ifile)) < 0) {\n\t\t/* If error ... */\n\t\texit(ret);\n\t}\n\n\t/* If file is ANSI/NIST format ... */\n\tif(ret){\n\t\timg_type = ANSI_NIST_IMG;\n\t\t/* Parse ANSI/NIST file into memory structure */\n\t\tif((ret = read_ANSI_NIST_file(ifile, &ansi_nist)))\n\t\texit(ret);\n\t\t/* Get first grayscale fingerprint record in ANSI/NIST file. */\n\t\tif((ret = get_first_grayprint(&idata, &iw, &ih, &id,\n\t\t &ippmm, &img_idc, &img_imp,\n\t\t &imgrecord, &imgrecord_i, ansi_nist)) < 0){\n\t\t/* If error ... */\n\t\t\tfree_ANSI_NIST(ansi_nist);\n\t\t\texit(ret);\n\t\t}\n\t\t/* If grayscale fingerprint not found ... */\n\t\tif(!ret){\n\t\t\tfree_ANSI_NIST(ansi_nist);\n\t\t\tfprintf(stderr, \"ERROR : main : \");\n\t\t\tfprintf(stderr, \"grayscale image record not found in %s\\n\", ifile);\n\t\t\texit(-2);\n\t\t}\n\t}\n\t/* Otherwise, not an ANSI/NIST file */\n\telse{\n\t\t/* Read the image data from file into memory */\n\t\tif((ret = read_and_decode_grayscale_image(ifile, &img_type,\n\t\t &idata, &ilen, &iw, &ih, &id, &ippi))){\n\t\t\texit(ret);\n\t\t}\n\t\t/* If image ppi not defined, then assume 500 */\n\t\tif(ippi == UNDEFINED)\n\t\t\tippmm = DEFAULT_PPI / (double)MM_PER_INCH;\n\t\telse \n\t\t\tippmm = ippi / (double)MM_PER_INCH;\n\t}\n\n\t/* 2. ENHANCE IMAGE CONTRAST IF REQUESTED */\n\tif(boostflag)\n\t\ttrim_histtails_contrast_boost(idata, iw, ih); \n\n\t/* 3. GET MINUTIAE & BINARIZED IMAGE. */\n\tif((ret = get_minutiae(&minutiae, &quality_map, &direction_map,\n\t &low_contrast_map, &low_flow_map, &high_curve_map,\n\t &map_w, &map_h, &bdata, &bw, &bh, &bd,\n\t idata, iw, ih, id, ippmm, &lfsparms_V2))){\n\t\tif(img_type == ANSI_NIST_IMG)\n\t\tfree_ANSI_NIST(ansi_nist);\n\t\tfree(idata);\n\t\texit(ret);\n\t}\n\n\t\t/* Done with input image data */\n\t\tfree(idata);\n\t\n \n\t\n/* Done with minutiae detection maps. */\nfree(quality_map);\nfree(direction_map);\nfree(low_contrast_map);\nfree(low_flow_map);\nfree(high_curve_map);\n/* Edited by Minh Nguyen, August 3 2015 */\n/* Add code to find neighbors according to BKAFIS algorithm */\ncount_minutiae_ridges_bkafis(minutiae, bdata, iw, ih, lfsparms);\nfree(bdata);\t\n/* Then, convert minutiae structure of lfs algorithm into BKAFIS data structure */\n\n/*sort minutiae list in the descend order of quality*/\nint i;\ndouble sort_quality[minutiae->num];\nint sort_ind[minutiae->num];\nfor(i = 0; i < minutiae->num; i++) sort_quality[i] = minutiae->list[i]->reliability;\ndescend_sorting(sort_quality, sort_ind, minutiae->num);\nFingerprint finger;\nfinger.width = iw;\nfinger.height = ih;\n/* finger.quality = add code to calculate quality of fingerprint here \nrefer to function comp_nfiq_flex() in the NFIQ package */\nfinger.nMinutiae = minutiae->num;\nfinger.minutiae = malloc(sizeof(Minutia)*minutiae->num);\nif (finger.minutiae==NULL)\n{\n\tif(img_type == ANSI_NIST_IMG)\nfree_ANSI_NIST(ansi_nist);\nfree_minutiae(minutiae);\nexit(-1);\n}\n\n\tfor(unsigned int i=0;i<minutiae->num;i++){\n\t\tfinger.minutiae[i].x = minutiae->list[sort_ind[i]]->x;\n\t finger.minutiae[i].y = minutiae->list[sort_ind[i]]->y;\n\t float tmp = minutiae->list[sort_ind[i]]->direction*11.25; /* after extracting, the minutiae direction is in range 0-32 => need to be converted into grad */\n\t if(tmp<=90){\n\t\ttmp = 90 - tmp;\n\t }else{\n\t\ttmp = 450 - tmp;\n\t }\n\t finger.minutiae[i].angle = tmp/1.40625; /* convert direction into radian */\n\t finger.minutiae[i].quality = minutiae->list[sort_ind[i]]->reliability*100;\n\t finger.minutiae[i].type = minutiae->list[sort_ind[i]]->type;\t \n\t /* take the neighbors */\n\t finger.minutiae[i].nNeighbors = minutiae->list[sort_ind[i]]->num_nbrs;\n\t finger.minutiae[i].neighbors = malloc(sizeof(Neighbor)*finger.minutiae[i].nNeighbors);\n\t if (finger.minutiae[i].neighbors==NULL){\n\t\t for (unsigned int j=i-1;j>=0;j--)\n\t\t\t free(finger.minutiae[i].neighbors);\n\t\tfree(finger.minutiae);\n\t\tfree_minutiae(minutiae);\nexit(-1);\n\t }\n}\nfree_minutiae(minutiae);\nfree(bdata);\n\nfloat* distances;\nif (CalculateDistances(&finger,distances)==ISO_GENERICERROR){\n\t\tCleanFingerprint(&finger);\t\t\n\t\texit(-1);\n}\nfor (i=0;i<minutiae->num;i++){\n\t\n}\n\t\n/* call the following function for each pair of minutiae \n\t\tint ridge_count(const int first, const int second, MINUTIAE *minutiae,\n unsigned char *bdata, const int iw, const int ih,\n const LFSPARMS *lfsparms)\n\t*/\nSaveFingerprintText(\"test.txt\", &finger);\nCleanFingerprint(&finger);\n\n/* Done with minutiae and binary image results */\nfree_minutiae(minutiae);\nfree(bdata);\n\n/* Exit normally. */\nexit(0);\n}\n\n/*************************************************************************\n**************************************************************************\nPROCARGS - Process command line arguments\nInput:\nargc - system provided number of arguments on the command line\nargv - system provided list of command line argument strings\nOutput:\nboostflag - contrast boost flag \"-b\"\nifile - input image file name to be processed by this program\nifile - output image file name to be created by this program\n**************************************************************************/\nvoid procargs(int argc, char **argv, int *boostflag, int *m1flag,\n char **ifile, char **oroot)\n{\nint a;\n\n*boostflag = FALSE;\n*m1flag = FALSE;\n\nif ((argc == 2) && (strcmp(argv[1], \"-version\") == 0)) {\ngetVersion();\nexit(0);\n}\n\nif(argc == 3){\n*ifile = argv[1];\n*oroot = argv[2];\nreturn;\n}\n\nif((argc == 4) || (argc == 5)){\na = 1;\nwhile(a < argc-2){\nif(strcmp(argv[a], \"-b\") == 0){\n*boostflag = TRUE;\n}\nelse if(strcmp(argv[a], \"-m1\") == 0){\n*m1flag = TRUE;\n}\nelse{\nfprintf(stderr, \"Unrecognized flag \\\"%s\\\"\\n\", argv[a]);\nfprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\nfprintf(stderr,\n\" -b = contrast boost image\\n\");\nfprintf(stderr,\n\" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\nexit(1);\n}\na++;\n}\n}\nelse{\nfprintf(stderr, \"Invalid number of arguments on command line\\n\");\nfprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\nfprintf(stderr,\n\" -b = contrast boost image\\n\");\nfprintf(stderr,\n\" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\nexit(2);\n}\n\n*ifile = argv[a++];\n*oroot = argv[a];\n}\n\n/*************************************************************************\n**************************************************************************\n#cat: descend_sorting - sort an array in the descend order of value \n#cat: \n\nInput:\narr - original array\n\t len - length of array\nOutput:\n\t arr - array after sorting\n\t sort_ind: array store index of elements after sorting\n**************************************************************************/\nvoid descend_sorting(double* arr, int* sort_ind, int len){\n\t/* initialization*/\n\tdouble tmp;\n\tint i, j, tmp_ind;\n\tfor(i = 0; i < len; i++){\n\t\tsort_ind[i] = i;\n\t}\n\t\n\tfor(i = 1; i < len; i++){\n\t\tfor(j = i; j > 0; j--){\n\t\t\tif(arr[j] > arr[j-1]){\n\t\t\t\t/* swap the two values*/\n\t\t\t\ttmp = arr[j];\n\t\t\t\tarr[j] = arr[j-1];\n\t\t\t\tarr[j-1] = tmp;\n\t\t\t\t\n\t\t\t\t/* swap the two indexs*/\n\t\t\t\ttmp_ind = sort_ind[j];\n\t\t\t\tsort_ind[j] = sort_ind[j-1];\n\t\t\t\tsort_ind[j-1] = tmp_ind;\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\n/*************************************************************************\n**************************************************************************\n#cat: ascend_sorting - sort an array in the ascend order of value \n#cat: \n\nInput:\narr - original array\n\t len - length of array\nOutput:\n\t sort_ind: array store index of elements after sorting\nReturn:\nzero: successful\n\t non-zero: malloc error\n**************************************************************************/\nint ascend_sorting(double* arr, int* sort_ind, int len){\n\tdouble *arr1;\n\tarr1 = (double*)malloc(len * sizeof(double));\n\tif(arr1 == (double*)NULL){\n\t\tfprintf(stderr, \"ERROR : ascend_sorting : malloc : arr1\\n\");\n return(-1);\n\t}\n\t\n\t/* initialization*/\n\tdouble tmp;\n\tint i, j, tmp_ind;\n\tfor(i = 0; i < len; i++){\n\t\tsort_ind[i] = i;\n\t\tarr1[i] = arr[i];\n\t}\n\t\n\tfor(i = 1; i < len; i++){\n\t\tfor(j = i; j > 0; j--){\n\t\t\tif(arr1[j] < arr1[j-1]){\n\t\t\t\t/* swap the two values*/\n\t\t\t\ttmp = arr1[j];\n\t\t\t\tarr1[j] = arr1[j-1];\n\t\t\t\tarr1[j-1] = tmp;\n\t\t\t\t\n\t\t\t\t/* swap the two indexs*/\n\t\t\t\ttmp_ind = sort_ind[j];\n\t\t\t\tsort_ind[j] = sort_ind[j-1];\n\t\t\t\tsort_ind[j-1] = tmp_ind;\n\t\t\t}\n\t\t}\n\t}\n\t\n\tfree(arr1);\n\treturn 0;\n}" }, { "alpha_fraction": 0.5757119655609131, "alphanum_fraction": 0.5907540917396545, "avg_line_length": 34.859710693359375, "blob_id": "17cd79adf77041989a925ab70bc38ce6e9bdcc26", "content_id": "33d0cd0ce8ec53fa28d2a0f03e04eee20acd6096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9972, "license_type": "no_license", "max_line_length": 135, "num_lines": 278, "path": "/bkafis/bkafis/src/bin/extractMoC/extractMoC.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*******************************************************************************\n\nLicense: \nThis software and/or related materials was developed at the National Institute\nof Standards and Technology (NIST) by employees of the Federal Government\nin the course of their official duties. Pursuant to title 17 Section 105\nof the United States Code, this software is not subject to copyright\nprotection and is in the public domain. \n\nThis software and/or related materials have been determined to be not subject\nto the EAR (see Part 734.3 of the EAR for exact details) because it is\na publicly available technology and software, and is freely distributed\nto any interested party with no licensing requirements. Therefore, it is \npermissible to distribute this software as a free download from the internet.\n\nDisclaimer: \nThis software and/or related materials was developed to promote biometric\nstandards and biometric technology testing for the Federal Government\nin accordance with the USA PATRIOT Act and the Enhanced Border Security\nand Visa Entry Reform Act. Specific hardware and software products identified\nin this software were used in order to perform the software development.\nIn no case does such identification imply recommendation or endorsement\nby the National Institute of Standards and Technology, nor does it imply that\nthe products and equipment identified are necessarily the best available\nfor the pursectore.\n\nThis software and/or related materials are provided \"AS-IS\" without warranty\nof any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\nNO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\nor FITNESS FOR A PARTICULAR PURsectorE or for any pursectore whatsoever, for the\nlicensed product, however used. In no event shall NIST be liable for any\ndamages and/or costs, including but not limited to incidental or consequential\ndamages of any kind, including economic damage or injury to property and lost\nprofits, regardless of whether NIST shall be advised, have reason to know,\nor in fact shall know of the sectorsibility.\n\nBy using this software, you agree to bear all risk relating to quality,\nuse and performance of the software and/or related materials. You agree\nto hold the Government harmless from any claim arising from your use\nof the software.\n\n*******************************************************************************/\n\n/***********************************************************************\n PACKAGE: NIST Fingerprint Minutiae Detection\n\n FILE: MINDTCT.C\n\n AUTHOR: Michael D. Garris\n DATE: 04/18/2002\n UPDATED: 09/14/2004\n UPDATED: 05/09/2005 by MDG\n UPDATED: 01/31/2008 by Kenneth Ko\n UPDATED: 09/04/2008 by Kenneth Ko\n UPDATED: 09/30/2008 by Kenenth Ko - add version option.\n\n#cat: mindtct - Uses Version 2 of the NIST Latent Fingerprint System (LFS)\n#cat: to detect minutiae and count ridges in a grayscale image.\n#cat: This version of the program will process:\n#cat: ANSI/NIST, WSQ, JPEGB, JPEGL, and IHead image formats.\n#cat: Results are written to various output files with\n#cat: predefined extensions appeneded to a specified output\n#cat: root path.\n\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\n\t- Detecting LDR (from line 318)\n\n***********************************************************************/\n\n#include <stdio.h>\n#include <sys/param.h>\n#include <an2k.h>\n#include <lfs.h>\n#include <imgdecod.h>\n#include <imgboost.h>\n#include <img_io.h>\n#include <version.h>\n#include <fingerprint.h>\n#include <fingerprintMoC.h>\n#include <extract.h>\n#include <lcd.h>\n\n\n/********************************************************\n*********convert from MoC to Fingerprint format**********\n********************************************************/\nISOBYTE MoC2Fingerprint(Fingerprint *finger, FingerprintMoC *fingerMoC);\n/********************************************************\n*******convert from Fingerprint to MoC format************\n********************************************************/\nchar Fingerprint2MoC (Fingerprint *finger, FingerprintMoC *fingerMoC);\nvoid procargs(int, char **, int *, int *, char **);\n\n/*************************************************************************\n**************************************************************************/\nint main(int argc, char *argv[])\n{\n\tint boostflag, m1flag;\n\tchar *oroot, ofile[MAXPATHLEN];\n\tuint8_t str1[30];\n\tLCD_Init();\n\t/* Process command line arguments. */\n\tprocargs(argc, argv, &boostflag, &m1flag, &oroot);\n\tFingerprint finger;\n\tfinger.nMinutiae=MOC_MINUTIAE_NUM;\n\textract(boostflag, m1flag,&finger);\n\tsprintf(ofile, \"%s.txt\", oroot);\n\t/* printf(\"%s\\n\",ofile);*/\n\t/* if (SaveFingerprintText(ofile, &finger)){\n\t\tprintf(\"File not found:%s\\n\",ofile);\n\t}; */\n\tFingerprintMoC fingerMoC;\n\tmemset(&fingerMoC,0,sizeof(FingerprintMoC));\n\tFingerprint2MoC(&finger, &fingerMoC);\n\tsprintf(ofile, \"%s.MoC.txt\", oroot);\n\tSaveFingerprintMoCText(ofile, &fingerMoC); \n\tsprintf(ofile, \"%s.bin\", oroot);\n\tSaveFingerprintMoC(ofile,&fingerMoC);\n\t\n\t/* memset(&fingerMoCtest,0,sizeof(FingerprintMoC));*/\n\t/* printf(\"size of FingerprintMoc=%d\\n size of MinutiaMoC=%d\\n size of NeighborMoC=%d\\n\",\n\t\t\tsizeof(FingerprintMoC),sizeof(MinutiaMoC),sizeof(NeighborMoC));*/\n\t/* Fingerprint2MoC(&finger, &fingerMoC);\n\t/* sprintf(ofile, \"%s.MoC.txt\", oroot);*/\n\t/* SaveFingerprintMoCText(ofile, &fingerMoC); \n\tsprintf(ofile,\"%s.bin\",oroot);\n\tSaveFingerprintMoC(ofile,&fingerMoC);\n\t*/\n\t\n\tCleanFingerprint(&finger);\n\tsprintf(str1,\"extract done\");\n\tLCD_Clear();\n\tLCD_Gotoxy(0,0);\n\tLCD_Puts(str1);\n\texit(0);\n}\n\n/*************************************************************************\n**************************************************************************\n PROCARGS - Process command line arguments\n Input:\n argc - system provided number of arguments on the command line\n argv - system provided list of command line argument strings\n Output:\n boostflag - contrast boost flag \"-b\"\n ifile - input image file name to be processed by this program\n ifile - output image file name to be created by this program\n**************************************************************************/\nvoid procargs(int argc, char **argv, int *boostflag, int *m1flag, char **oroot)\n{\n int a;\n\n *boostflag = FALSE;\n *m1flag = FALSE;\n/*\n if ((argc == 1)) {\n getVersion();\n exit(0);\n }\n*/\n if(argc == 2){\n *oroot = argv[1];\n return;\n }\n else\n {\n\tprintf(\"tham so nhap vao sai chi co mot dau ra\");\n }\n \n/*\n if((argc == 4) || (argc == 5)){\n a = 1;\n while(a < argc-2){\n if(strcmp(argv[a], \"-b\") == 0){\n *boostflag = TRUE;\n }\n else if(strcmp(argv[a], \"-m1\") == 0){\n *m1flag = TRUE;\n }\n else{\n fprintf(stderr, \"Unrecognized flag \\\"%s\\\"\\n\", argv[a]);\n fprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\n fprintf(stderr,\n \" -b = contrast boost image\\n\");\n fprintf(stderr,\n \" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\n exit(1);\n }\n a++;\n }\n }\n else{\n fprintf(stderr, \"Invalid number of arguments on command line\\n\");\n fprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\n fprintf(stderr,\n \" -b = contrast boost image\\n\");\n fprintf(stderr,\n \" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\n exit(2);\n }\n \n *ifile = argv[a++];\n *oroot = argv[a];\n*/\n}\n\n/********************************************************\n*********convert from MoC to Fingerprint format**********\n********************************************************/\nISOBYTE MoC2Fingerprint(Fingerprint *finger, FingerprintMoC *fingerMoC)\n{\n\tISOBYTE i, j;\n\t\n\t/* Fingerprint header */\n\n\t\n\t/* return normally */\n\treturn 0;\n}\n\n/********************************************************\n*******convert from Fingerprint to MoC format************\n********************************************************/\nchar Fingerprint2MoC (Fingerprint *finger, FingerprintMoC *fingerMoC)\n{\n\tISOBYTE i, j;\n\tISOWORD nMinutiae;\n\t\n\t/* Fingerprint Header */\n\tif(finger->nMinutiae < 30){\n\t\tnMinutiae = finger->nMinutiae;\n\t}else{\n\t\tnMinutiae = MOC_MINUTIAE_NUM;\n\t}\n\tfingerMoC->height_nMinutiae = SetFingerHeightNMinutiae(finger->height,nMinutiae);\n\t/* printf(\"nMinutiae = %d = %d\", nMinutiae, fingerMoC->height_nMinutiae & 0x3f);*/\n\tfingerMoC->width_quality = SetFingerWidthQuality(finger->width,finger->quality);\n\t\n\tfloat tmpEd;\n\tfloat tmpDra;\n\t/* Assign minutiae list */\n\tfor (i = 0; i < nMinutiae; i++){\n\t\tMinutia* min = finger->minutiae[i];\n\t\tMinutiaMoC* minMoC=&fingerMoC->minutiae[i];\n\t\tif (min==NULL) continue;\n\t\tminMoC->x_quality = SetXQuality(min->x,min->quality);\n\t\tminMoC->y_type_nNeighbors = SetYTypeNNeighbors((-min->y),min->type,min->nNeighbors);\n\t\t/* fingerMoC->minutiae[i].angle_ldr = (((ISOWORD)(finger->minutiae[i]->angle * 16 / M_PI + 0.5)) << 3) | finger->minutiae[i]->ldr; */\n\t\t/* 9-9-2015: now angle is already in range 0-31 */\n\t\t\n\t\tminMoC->angle_ldr = SetAngleLdr(min->angle,min->ldr); \n\t\t/* printf(\"angle = %f = %d\\n\", finger->minutiae[i]->angle, fingerMoC->minutiae[i].angle_ldr);*/\n\t\t/* Assign neighbors */\n\t\tmemset(minMoC->neighbors, 0, MOC_MAX_NEIGHBORS * sizeof(NeighborMoC));\n\t\tif(min->nNeighbors){\n\t\t\t\n\t\t\tfor(j = 0; j < min->nNeighbors; j++){\n\t\t\t\t/* ed and index */\n\t\t\t\ttmpDra = GetDra(finger, i, min->neighborIds[j])*(1<<DraFL);\n\t\t\t\tISOBYTE byteDra = (ISOBYTE)(tmpDra);\n\t\t\t\t\n\t\t\t\tminMoC->neighbors[j].dra_rc_index = SetDraRidgeCountId(byteDra, min->ridgeCount[j], min->neighborIds[j]); \n\t\t\t\t\n\t\t\t\ttmpEd = GetDistance(finger,i, min->neighborIds[j])*(1<<EdFL);\n\t\t\t\t\n\t\t\t\tminMoC->neighbors[j].ed = ((ISOWORD)tmpEd);\n\t\t\t\t\n\t\t\t\t/* dra, oda and ridgecount */\n\t\t\t}\n\t\t}\n\t}\n\t\n\treturn 0;\n}\n\n\n\n" }, { "alpha_fraction": 0.6177022457122803, "alphanum_fraction": 0.6341148614883423, "avg_line_length": 28.801443099975586, "blob_id": "52c44d89fc907ccb134be84f85b469a57826db26", "content_id": "c812e7a1cd441e1e3f955d7b9fb330641f484f48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8530, "license_type": "no_license", "max_line_length": 144, "num_lines": 277, "path": "/bkafis/bkafis/include/ISOTemplate.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*\r\n/* Copyright (C) 2009 DEIS - University of Bologna (Italy)\r\n/* All rights reserved.\r\n/*\r\n/* FVC sample source code.\r\n/* http:/*biolab.csr.unibo.it/fvcongoing\r\n/*\r\n/* This source code can be used by FVC participants to create FVC executables. \r\n/* It cannot be distributed and any other use is strictly prohibited.\r\n/*\r\n/* Warranties and Disclaimers:\r\n/* THIS SOFTWARE IS PROVIDED \"AS IS\" WITHOUT WARRANTY OF ANY KIND\r\n/* INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,\r\n/* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.\r\n/* IN NO EVENT WILL UNIVERSITY OF BOLOGNA BE LIABLE FOR ANY DIRECT,\r\n/* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES,\r\n/* INCLUDING DAMAGES FOR LOSS OF PROFITS, LOSS OR INACCURACY OF DATA,\r\n/* INCURRED BY ANY PERSON FROM SUCH PERSON'S USAGE OF THIS SOFTWARE\r\n/* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\r\n/*\r\n*/\r\n/* --------------------------------------------------------------- \r\n FVC ISOTemplate program\r\n \r\n\t\t\t\t\t\tv 1.2 - March 2009\r\n v 1.1 - June 2006\r\n\t\t\t\t\t\tv 1.0 - March 2006\r\n\t\t\tv 1.3 - July 2015 by Nguyen Duc Minh - \r\n\t\t\t\t\t\tAdd data struct to support 2011 format\r\n\t\t\t\t\t\t\t\t\t\t\t\t\r\n --------------------------------------------------------------- */\r\n\r\n\r\n#ifndef ISOTEMPLATE_H\r\n#define ISOTEMPLATE_H\r\n\r\n#ifdef __cplusplus\r\nextern \"C\"\r\n{\r\n#endif\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n#define CPU_TYPE_LITTLEENDIAN\t\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n/*ERROR CODE*/\r\n#define ISO_SUCCESS\t\t\t\t\t\t\t\t0\r\n#define ISO_GENERICERROR\t\t\t\t\t1\r\n#define ISO_COORD_OUTSIDE_DIM\t\t\t2\r\n\r\n/*FINGER POSITION CODES*/\r\n#define N_FINGER_POSITION\t\t\t\t\t11\r\n#define UNKNOWN_FINGER\t\t\t\t\t\t0\r\n#define RIGHT_THUMB\t\t\t\t\t\t\t\t1\r\n#define RIGHT_INDEX_FINGER\t\t\t\t2\r\n#define RIGHT_MIDDLE_FINGER\t\t\t\t3\r\n#define RIGHT_RING_FINGER\t\t\t\t\t4\r\n#define RIGHT_LITTLE_FINGER\t\t\t\t5\r\n#define LEFT_THUMB\t\t\t\t\t\t\t\t6\r\n#define LEFT_INDEX_FINGER\t\t\t\t\t7\r\n#define LEFT_MIDDLE_FINGER\t\t\t\t8\r\n#define LEFT_RING_FINGER\t\t\t\t\t9\r\n#define LEFT_LITTLE_FINGER\t\t\t\t10\r\n\r\n/*IMPRESSION TYPE CODES*/\r\n#define N_IMPRESSION_TYPE\t\t\t\t\t9\r\n#define LIVE_SCAN_PLAIN\t\t\t\t\t\t0\r\n#define LIVE_SCAN_ROLLED\t\t\t\t\t1\r\n#define NONLIVE_SCAN_PLAIN\t\t\t\t2\r\n#define NONLIVE_SCAN_ROLLED\t\t\t\t3\r\n#define LATENT_IMPRESSION\t\t\t\t\t4\r\n#define LATENT_TRACING\t\t\t\t\t\t5\r\n#define\tLATENT_PHOTO\t\t\t\t\t\t\t6\r\n#define\tLATENT_LIFT\t\t\t\t\t\t\t\t7\r\n#define SWIPE\t\t\t\t\t\t\t\t\t\t\t8\r\n\r\n/*MINUTIA TYPE CODES*/\r\n#define N_MINUTIA_TYPE\t\t\t\t\t\t3\r\n#define OTHER\t\t\t\t\t\t\t\t\t\t\t0x00\r\n#define RIDGE_ENDING\t\t\t\t\t\t\t0x01\r\n#define RIDGE_BIFURCATION\t\t\t\t\t0x02\r\n\r\n/*EXTENDED DATA AREA TYPE CODE*/\r\n#define RIDGE_COUNT_DATA\t\t\t\t\t0x0001\r\n#define CORE_AND_DELTA_DATA\t\t\t\t0x0002\r\n#define ZONAL_QUALITY_DATA\t\t\t\t0x0003\r\n\r\n/*RIDGE COUNT EXTRACTION METHOD CODES*/\r\n#define N_RIDGE_COUNT_METHOD\t\t\t3\r\n#define\tNON_SPECIFIC\t\t\t\t\t\t\t0x00\r\n#define FOUR_NEIGHBOR\t\t\t\t\t\t\t0x01\r\n#define EIGHT_NEIGHBOR\t\t\t\t\t\t0x02\r\n\r\n/*STRUCT LENGTH*/\r\n#define RECORDHEADER_LEN\t\t\t\t\t24\r\n#define FINGERVIEWHEADER_LEN\t\t\t4\r\n#define MINUTIA_LEN\t\t\t\t\t\t\t\t6\r\n#define EXTENDEDDATAHEADER_LEN\t\t2\r\n#define EXTENDEDBLOCKHEADER_LEN\t\t4\r\n\r\n/*CORE AND DELTA INFO TYPE*/\r\n#define N_CORENDELTA_INFO_TYPE\t\t2\r\n#define NO_ANGULAR_INFO\t\t\t\t\t\t0x00\r\n#define ANGULAR_INFO\t\t\t\t\t\t\t0x01\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\ntypedef\tunsigned char\t\t\t\t\tISOBYTE;\r\ntypedef\tunsigned short int\t\tISOWORD;\r\ntypedef\tunsigned long int\t\t\tISODWORD;\r\ntypedef ISOBYTE\t\t\t\t\t\t\t\tISORESULT;\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n#pragma pack(push)\r\n#pragma pack(1)\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\tformatID[4],\r\n\t\t\t\t\t\t\tspecVersion[4],\r\n\t\t\t\t\t\t\trecordLength[4],\r\n\t\t\t\t\t\t\tcaptureEquipment[2],\r\n\t\t\t\t\t\t\txImageSize[2],\r\n\t\t\t\t\t\t\tyImageSize[2],\r\n\t\t\t\t\t\t\txResolution[2],\r\n\t\t\t\t\t\t\tyResolution[2],\r\n\t\t\t\t\t\t\tnFingerViews,\r\n\t\t\t\t\t\t\treservedByte;\r\n\t\t\t\t} RecordHeader;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\tminutiaType_xLocation[2],\r\n\t\t\t\t\t\t\treserved_yLocation[2],\r\n\t\t\t\t\t\t\tminutiaAngle,\r\n\t\t\t\t\t\t\tminutiaQuality;\r\n \t} FingerMinutiaeData;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\ttypeIDcode[2],\r\n\t\t\t\t\t\t\tlength[2];\r\n\t\t\t\t} ExtendedBlockHeader;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tExtendedBlockHeader extendedBlockHeader;\r\n\t\t\t\t\tISOBYTE\t\t\t\tdata[65536];\r\n \t} ExtendedBlock;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\t\t\tblockLength[2],\r\n\t\t\t\t\t\t\t\t\tnExtendedBlock;\r\n\t\t\t\t\tExtendedBlock\t*extendedBlock;\r\n \t} ExtendedData;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\tfingerPosition,\r\n\t\t\t\t\t\t\tnView_imprType,\r\n\t\t\t\t\t\t\tfingerQuality,\r\n\t\t\t\t\t\t\tnMinutiae;\r\n\t\t\t\t} FingerViewHeader;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tFingerViewHeader\t\tfingerViewHeader;\r\n\t\t\t\t\tFingerMinutiaeData\t*fingerMinutiaeData;\r\n \t} FingerViewRecord;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tRecordHeader\t\t\trecordHeader;\r\n\t\t\t\t\tFingerViewRecord\t*fingerViewRecord;\r\n\t\t\t\t\tExtendedData\t\t\t*extendedData;\r\n \t} ISOTemplate;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\tindex1,\r\n\t\t\t\t\t\t\tindex2,\r\n\t\t\t\t\t\t\tcount;\r\n\t\t\t\t} RidgeCountData;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\t\t\t\t\tmethod;\r\n\t\t\t\t\tISOWORD\t\t\t\t\tnRidgeCountData;\r\n\t\t\t\t\tRidgeCountData\t*ridgeCountData;\r\n\t\t\t\t} RidgeCount;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\tcoreInfoType_Xcoordinate[2],\r\n\t\t\t\t\t\t\t\t\treserved_Ycoordinate[2],\r\n\t\t\t\t\t\t\t\t\tangle;\r\n\t\t\t\t} CoreData;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\tdeltaInfoType_Xcoordinate[2],\r\n\t\t\t\t\t\t\t\t\treserved_Ycoordinate[2],\r\n\t\t\t\t\t\t\t\t\tangle1,\r\n\t\t\t\t\t\t\t\t\tangle2,\r\n\t\t\t\t\t\t\t\t\tangle3;\r\n\t\t\t\t} DeltaData;\r\n\r\ntypedef struct\r\n\t\t\t\t{\r\n\t\t\t\t\tISOBYTE\t\treserved_nCores;\r\n\t\t\t\t\tCoreData\t*coreData;\r\n\t\t\t\t\tISOBYTE\t\treserved_nDeltas;\r\n\t\t\t\t\tDeltaData\t*deltaData;\r\n\t\t\t\t} CoreAndDelta;\r\n\r\n#pragma pack(pop)\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n#ifdef CPU_TYPE_LITTLEENDIAN\r\n\t#define BigEndian\t\t\t\tBigEndianConv\r\n\t#define BigEndianWord\t\tBigEndianConvWord\r\n\t#define BigEndianDWord\tBigEndianConvDWord\r\n#else\r\n\t#define BigEndian\r\n\t#define BigEndianWord \r\n\t#define BigEndianDWord\r\n#endif\r\n\r\n#ifdef CPU_TYPE_LITTLEENDIAN\r\n\tISOBYTE * BigEndianConv (ISOBYTE *pBuff,ISODWORD nbytes);\r\n\tISOWORD * BigEndianConvWord (ISOWORD *pBuff);\r\n\tISODWORD * BigEndianConvDWord (ISODWORD *pBuff);\r\n#endif\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\nISORESULT InitRecordHeader (ISOWORD imgWidth,ISOWORD imgHeight,ISOWORD xRes,ISOWORD yRes,ISOBYTE nViews);\r\nISORESULT InitFingerViewHeader (ISOBYTE index,ISOBYTE fingPos,ISOBYTE nView,ISOBYTE imprType,ISOBYTE fingQuality,ISOBYTE nMinutiae);\r\nISORESULT SetMinutiaeData (ISOBYTE index,ISOBYTE minI,ISOBYTE minType,ISOWORD xCoord,ISOWORD yCoord,ISOBYTE minAngle,ISOBYTE minQuality);\r\nISORESULT InitExtendedDataHeader (ISOBYTE index,ISOBYTE nBlocks);\r\nISORESULT SetExtendedDataBlock (ISOBYTE index,ISOBYTE blkI,ISOWORD typeID,ISOWORD dataLength,ISOBYTE *data);\r\nISORESULT SaveISOTemplate (char *path);\r\nISORESULT LoadISOTemplate (char *path);\r\nISORESULT CleanISOTemplate ();\r\n\r\nISORESULT GetRecordHeader (ISOWORD *imgWidth,ISOWORD *imgHeight,ISOWORD *xRes,ISOWORD *yRes,ISOBYTE *nViews);\r\nISORESULT GetFingerViewHeader (ISOBYTE index,ISOBYTE *fingPos,ISOBYTE *nView,ISOBYTE *imprType,ISOBYTE *fingQuality,ISOBYTE *nMinutiae);\r\nISORESULT GetMinutiaeData (ISOBYTE index,ISOBYTE minI,ISOBYTE *minType,ISOWORD *xCoord,ISOWORD *yCoord,ISOBYTE *minAngle,ISOBYTE *minQuality);\r\nISORESULT GetExtendedDataHeader (ISOBYTE index,ISOBYTE *nBlocks);\r\nISORESULT GetExtendedDataBlock (ISOBYTE index,ISOBYTE blkI,ISOWORD *typeID,ISOWORD *dataLength,ISOBYTE *data);\r\n\r\nISORESULT InitRidgeCountBlock (ISOBYTE meth,ISOWORD n_ridge_count_data);\r\nISORESULT SetRidgeCountDataBlock (ISOWORD ridgecountI,ISOBYTE min_id1,ISOBYTE min_id2,ISOBYTE r_count);\r\nISORESULT AddRidgeCountBlock (ISOBYTE index,ISOBYTE blkI);\r\n\r\nISORESULT InitCoreAndDeltaBlock (ISOBYTE n_cores,ISOBYTE n_deltas);\r\nISORESULT SetCoreDataBlock (ISOBYTE coreI,ISOBYTE core_info_type,ISOWORD xCoord,ISOWORD yCoord,ISOBYTE angle);\r\nISORESULT SetDeltaDataBlock (ISOBYTE deltaI,ISOBYTE delta_info_type,ISOWORD xCoord,ISOWORD yCoord,ISOBYTE angle1,ISOBYTE angle2,ISOBYTE angle3);\r\nISORESULT AddCoreAndDeltaBlock (ISOBYTE index,ISOBYTE blkI);\r\n\r\nstatic ISORESULT CreateRidgeCountBuff (ISOBYTE *pBuff,ISOWORD *pBuff_len);\r\nstatic ISORESULT CleanRidgeCount ();\r\nstatic ISORESULT CreateCoreAndDeltaBuff (ISOBYTE *pBuff,ISOWORD *pBuff_len);\r\nstatic ISORESULT CleanCoreAndDelta ();\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n#ifdef __cplusplus\r\n}\r\n#endif\r\n\r\n#endif /*ISOTEMPLATE_H*/" }, { "alpha_fraction": 0.5946808457374573, "alphanum_fraction": 0.6074467897415161, "avg_line_length": 30.482759475708008, "blob_id": "0f571d7da269904ca0ac67dbb0758e475a3b80aa", "content_id": "a6abfec821c3f88f04b0f7218a846e6e9e89dec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 940, "license_type": "no_license", "max_line_length": 83, "num_lines": 29, "path": "/bkafis/bkafis/include/array.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tpairarray.h\r\n\tDescription: Data structure to present array that can grow in size\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n#ifndef _PAIRARRAY_H_\r\n#define _PAIRARRAY_H_\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n/*\t\t\tArray of pairs of indices used to store pairs of mapped minutiae\t\t*/\r\ntypedef struct \r\n{\r\n unsigned char index1;\t/* Index of minutia in fingerprint 1 */\r\n unsigned char index2;\t/* Index of minutia in fingerprint 1 */\r\n\tvoid* data;\r\n}\tPair;\r\n\r\ntypedef struct {\r\n\tPair** list;\r\n\tunsigned int nPairs;\r\n\tunsigned int maxPairs;\r\n} PairArray;\r\n\r\nchar InitializePairArray(PairArray* pairArray,unsigned int maxPairs);\r\nchar AddPairArray(PairArray* pairArray,unsigned char index1, unsigned char index2);\r\nchar CleanPairArray(PairArray* pairArray);\r\n#endif" }, { "alpha_fraction": 0.6298342347145081, "alphanum_fraction": 0.6519337296485901, "avg_line_length": 17.200000762939453, "blob_id": "a2ba24e91fd0b8c14cde8ed8c0e30659209c594c", "content_id": "6b389ead426139ab8c1ed18cc9358f0c7be37064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 181, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/Scan Image with Sensor/makefile", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "main: main.o Pi.o User.o\n\tgcc -o main main.o Pi.o User.o -lbcm2835 -lwiringPi\nmain.o: main.c\n\tgcc -c main.c\nPi.o: Pi.c\n\tgcc -c Pi.c\nUser.o: User.c\n\tgcc -c User.c\nclean:\n\trm *.o main" }, { "alpha_fraction": 0.5950066447257996, "alphanum_fraction": 0.6371259689331055, "avg_line_length": 32.300655364990234, "blob_id": "41cf16af89dbee1cfe577f46d2ac89617cb01b99", "content_id": "2730e7322b29269ebee931c2e66ecf584cf6d18c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5247, "license_type": "no_license", "max_line_length": 115, "num_lines": 153, "path": "/Scan Image with Sensor/User.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "\r\n/*\r\n* $HeadURL: svn://192.168.1.115/M3_Module_Host/trunk/App/NEXT_User_Lib.h $\r\n*\r\n* Created on: Nov 18, 2013\r\n* Author: Ryan Higgins\r\n*\r\n* Last Modified: $LastChangedDate: 2014-05-08 15:05:27 -0700 (Thu, 08 May 2014) $\r\n* Last Modified by: $LastChangedBy: Ryan $\r\n* LastChangedRevision : $LastChangedRevision: 170 $\r\n*\r\n* This software is provided \"as is\". NEXT Biometrics makes no warranty of any kind, either\r\n* express or implied, including without limitation any implied warranties of condition, uninterrupted\r\n* use, merchantability, or fitness for a particular purpose.\r\n*\r\n* This document as well as the information or material contained is copyrighted.\r\n* Any use not explicitly permitted by copyright law requires prior consent of NEXT Biometrics.\r\n* This applies to any reproduction, revision, translation and storage.\r\n*\r\n*\r\n*\tDescription:\r\n*\r\n*\t\tThis file contains the defines, data structures, and function prototypes for the NEXT Embedded SDK\r\n*\t\tSee NEXT_User_Lib.c for individual function descriptions.\r\n*\r\n*/\r\n\r\n#ifndef USER_H\r\n#define USER_H\r\n\r\n\r\n#include <stdint.h>\r\n#include <stdbool.h>\n\r\n#include \"Pi.h\"\r\n\r\n///////////////////////////////////////////////\r\n//Defines\r\n///////////////////////////////////////////////\r\n\r\n#ifndef NULL\r\n#define NULL 0\r\n#endif\r\n\r\n//#define USE_MST_AS_GPIO\t\t//Uncomment to configure the MST Pin as a GPIO, default configuration is as an interrupt\r\n\r\n#define MCU_BOOT_DELAY\t\t\t\t\t\t180\r\n#define MAX_BUSY_COUNT\t\t\t\t\t\t500\r\n#define NUMBER_OF_COLUMNS\t\t\t\t\t256\r\n#define NUMBER_OF_ROWS\t\t\t\t\t\t180\r\n#define PARTIAL_START_COLUMN\t\t\t\t64\r\n#define PARTIAL_END_COLUMN\t\t\t\t\t192\r\n#define PARTIAL_START_ROW\t\t\t\t\t45\r\n#define PARTIAL_NUMBER_OF_COLUMNS\t\t\t128\r\n#define PARTIAL_NUMBER_OF_ROWS\t\t\t\t90\r\n#define SCAN_OR_PARTIAL_IMAGE_US_DELAY\t\t500\r\n#define SCAN_IMAGE_INITIAL_DELAY\t\t\t15\r\n#define PARTIAL_IMAGE_INITIAL_DELAY\t\t\t5\r\n#define DUMMY_DATA\t\t\t\t\t\t\t0xC0\r\n#define CONNECT_DIAGNOSTIC_STRING_SIZE\t\t8\r\n#define CONNECT_DIAGNOSTIC_RESPONSE_SIZE\t9\r\n\r\n//////////Normal Processing Codes///////////////\r\n#define BUSY\t\t\t\t\t\t\t\t0xB0\r\n#define READY\t\t\t\t\t\t\t\t0x01\r\n\r\n\r\n/////////////Module Error Codes////////////////\r\n#define NO_ERROR\t\t\t\t\t0x00\r\n#define ERR_MOD_PARAM_FIELD\t\t\t0x10 \t//Error in parameter fields\r\n#define ERR_MOD_DATA_LENGTH\t\t\t0x11\t//Data length error\r\n#define ERR_MOD_DATA_FIELD\t\t\t0x12\t//Error in the data field\r\n#define ERR_MOD_UNKNOWN_COMMAND\t\t0x30 \t//Unknown command\r\n#define ERR_MOD_OP_MODE\t\t\t\t0x31\t//Conditions of use not satisfied\r\n#define ERR_MOD_COM\t\t\t\t\t0x32\t//Communications error\r\n#define ERR_MOD_SENSOR_FAIL\t\t\t0x33\t//Sensor hardware failure\r\n#define ERR_MOD_DCA\t\t\t\t\t0x34\t//DCA internal error\r\n#define ERR_MOD_MCU\t\t\t\t\t0x35\t//MCU internal error\r\n\r\n\r\n\r\n/////////////API Error Codes////////////////////\r\n#define ERR_API_SPI_CMD_STAGE\t\t\t\t0xCC\r\n#define ERR_API_SPI_DATA_STAGE\t\t\t\t0XAA\r\n#define ERR_API_SPI_RES_STAGE\t\t\t\t0xBB\r\n#define ERR_API_MODULE_CONNECT\t\t\t\t0x40\r\n#define ERR_API_UNKNOWN_COMMAND\t\t\t\t0x03\r\n#define ERR_API_MODULE_STOPPED\t\t\t\t0x50\r\n\r\n\r\n\r\n//////////////Instruction Codes/////////////////\r\n#define INS_GET_LINE\t\t\t\t\t\t0x10\r\n#define INS_GET_PARTIAL\t\t\t\t\t\t0x11\r\n#define P1_FIRST_LINE\t\t\t\t\t\t0x80\r\n#define P1_INTERMEDIATE_LINE\t\t\t\t0x00\r\n#define P1_LAST_LINE\t\t\t\t\t\t0x01\r\n#define INS_FINGER_PRESENT\t\t\t\t\t0x12\r\n#define INS_GET_SERIAL\t\t\t\t\t\t0x20\r\n#define INS_DIAGNOSTIC\t\t\t\t\t\t0x26\r\n#define P1_CONNECT_DAIGNOSTIC\t\t\t\t0x01\r\n#define INS_SET_MO_STOP\t\t\t\t\t\t0x83\r\n#define INS_GET_FIMRWARE_VER\t\t\t\t0xF5\r\n\r\n\r\n\r\n///////////////////////////////////////////////\r\n//Module structures\r\n///////////////////////////////////////////////\r\n\r\ntypedef struct {\r\n\tuint8_t Column[256]; //A row is 256 columns of 8-bit pixels\r\n}ROW_t;\r\n\r\ntypedef struct {\r\n\tROW_t Row[180];\t\t//An image is 180 rows of 256 columns. Total 46080 pixels\r\n}NEXT_SENSOR_IMAGE_t;\r\n\r\ntypedef struct{\r\n\tbool connected;\t\t\t\t\t//True if module has been powered up and successful SPI handshake (NEXT_Connect_Diagnostic)\r\n\tbool stopped;\t\t\t\t\t// True if module has been connected and placed in stop mode by the host\r\n\tuint8_t firmware_ver[3];\t\t//Firmware version on module\r\n\tuint8_t serial_no[12];\t\t\t//Serial number of MCU on module\r\n\tNEXT_SENSOR_IMAGE_t image;\t\t//Module image\r\n}NEXT_MODULE_t;\nuint8_t data[180][256];\r\n\r\n////////////////////////////////////////////////\r\n//Function Prototypes\nvoid MSP_interupt (void);\r\n///////////////////////////////////////////////\r\n\r\n////////NEXT API Functions/////////////////////\r\nuint8_t NEXT_Module_Connect(NEXT_MODULE_t*);\r\nvoid NEXT_Module_Disconnect(NEXT_MODULE_t*);\r\nuint8_t NEXT_Module_Stop (NEXT_MODULE_t* , uint8_t);\r\nuint8_t NEXT_Module_ScanImage(NEXT_MODULE_t *);\r\nuint8_t NEXT_Module_PartialImage(NEXT_MODULE_t *);\r\nuint8_t NEXT_Module_FingerPresent(NEXT_MODULE_t* , uint8_t* , bool);\r\nuint8_t NEXT_Module_Serial(NEXT_MODULE_t *);\r\nuint8_t NEXT_Module_FW_Version(NEXT_MODULE_t *);\r\n\r\n\r\n////////NEXT M0 Protocol Communication Functions///////////\r\nuint8_t NEXT_Module_SPI_Command(uint8_t, uint8_t, uint8_t, uint8_t);\r\nuint8_t NEXT_Module_SPI_Data(uint8_t*, uint8_t);\r\nuint8_t NEXT_Module_SPI_Response(void*, uint16_t, uint16_t);\r\nuint8_t NEXT_Connect_Diagnostic(NEXT_MODULE_t*);\r\n\r\n//////////////Other Functions/////////////////\r\nuint8_t NEXT_Flush_Image(NEXT_SENSOR_IMAGE_t*);\r\n\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.5812068581581116, "alphanum_fraction": 0.6298409104347229, "avg_line_length": 29.72381019592285, "blob_id": "bcd4ae67f36218c5e0c2303e4bd7e67be173849e", "content_id": "d9f93cab5a26fa1105d1fccddf7d14c6019d5744", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3331, "license_type": "no_license", "max_line_length": 112, "num_lines": 105, "path": "/raspberry_sensor_c/User.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#ifndef USER_H\r\n#define USER_H\r\n\r\n\r\n#include <stdint.h>\r\n#include <stdbool.h>\r\n\r\n#include \"Pi.h\"\r\n\r\n///////////////////////////////////////////////\r\n//Defines\r\n///////////////////////////////////////////////\r\n\r\n#ifndef NULL\r\n#define NULL 0\r\n#endif\r\n\r\n#define MCU_BOOT_DELAY\t\t\t\t\t\t180\r\n#define MAX_BUSY_COUNT\t\t\t\t\t\t500\r\n#define NUMBER_OF_COLUMNS\t\t\t\t\t256\r\n#define NUMBER_OF_ROWS\t\t\t\t\t\t180\r\n#define PARTIAL_START_COLUMN\t\t\t\t64\r\n#define PARTIAL_END_COLUMN\t\t\t\t\t192\r\n#define PARTIAL_START_ROW\t\t\t\t\t45\r\n#define PARTIAL_NUMBER_OF_COLUMNS\t\t\t128\r\n#define PARTIAL_NUMBER_OF_ROWS\t\t\t\t90\r\n#define SCAN_OR_PARTIAL_IMAGE_US_DELAY\t\t500\r\n#define SCAN_IMAGE_INITIAL_DELAY\t\t\t15\r\n#define PARTIAL_IMAGE_INITIAL_DELAY\t\t\t5\r\n#define DUMMY_DATA\t\t\t\t\t\t\t0xC0\r\n#define CONNECT_DIAGNOSTIC_STRING_SIZE\t\t8\r\n#define CONNECT_DIAGNOSTIC_RESPONSE_SIZE\t9\r\n\r\n//////////Normal Processing Codes///////////////\r\n#define BUSY\t\t\t\t\t\t\t\t0xB0\r\n#define READY\t\t\t\t\t\t\t\t0x01\r\n\r\n\r\n/////////////Module Error Codes////////////////\r\n#define NO_ERROR\t\t\t\t\t0x00\r\n#define ERR_MOD_PARAM_FIELD\t\t\t0x10 \t//Error in parameter fields\r\n#define ERR_MOD_DATA_LENGTH\t\t\t0x11\t//Data length error\r\n#define ERR_MOD_DATA_FIELD\t\t\t0x12\t//Error in the data field\r\n#define ERR_MOD_UNKNOWN_COMMAND\t\t0x30 \t//Unknown command\r\n#define ERR_MOD_OP_MODE\t\t\t\t0x31\t//Conditions of use not satisfied\r\n#define ERR_MOD_COM\t\t\t\t\t0x32\t//Communications error\r\n#define ERR_MOD_SENSOR_FAIL\t\t\t0x33\t//Sensor hardware failure\r\n#define ERR_MOD_DCA\t\t\t\t\t0x34\t//DCA internal error\r\n#define ERR_MOD_MCU\t\t\t\t\t0x35\t//MCU internal error\r\n\r\n\r\n\r\n/////////////API Error Codes////////////////////\r\n#define ERR_API_SPI_CMD_STAGE\t\t\t\t0xCC\r\n#define ERR_API_SPI_DATA_STAGE\t\t\t\t0XAA\r\n#define ERR_API_SPI_RES_STAGE\t\t\t\t0xBB\r\n#define ERR_API_MODULE_CONNECT\t\t\t\t0x40\r\n#define ERR_API_UNKNOWN_COMMAND\t\t\t\t0x03\r\n#define ERR_API_MODULE_STOPPED\t\t\t\t0x50\r\n\r\n\r\n\r\n//////////////Instruction Codes/////////////////\r\n#define INS_GET_LINE\t\t\t\t\t\t0x10\r\n#define INS_GET_PARTIAL\t\t\t\t\t\t0x11\r\n#define P1_FIRST_LINE\t\t\t\t\t\t0x80\r\n#define P1_INTERMEDIATE_LINE\t\t\t\t0x00\r\n#define P1_LAST_LINE\t\t\t\t\t\t0x01\r\n#define INS_FINGER_PRESENT\t\t\t\t\t0x12\r\n#define INS_GET_SERIAL\t\t\t\t\t\t0x20\r\n#define INS_DIAGNOSTIC\t\t\t\t\t\t0x26\r\n#define P1_CONNECT_DAIGNOSTIC\t\t\t\t0x01\r\n#define INS_SET_MO_STOP\t\t\t\t\t\t0x83\r\n#define INS_GET_FIMRWARE_VER\t\t\t\t0xF5\r\n\r\n\r\n\r\n///////////////////////////////////////////////\r\n//Module structures\r\n///////////////////////////////////////////////\r\n\r\ntypedef struct {\r\n\tuint8_t Column[256]; //A row is 256 columns of 8-bit pixels\r\n}ROW_t;\r\n\r\ntypedef struct {\r\n\tROW_t Row[180];\t\t//An image is 180 rows of 256 columns. Total 46080 pixels\r\n}NEXT_SENSOR_IMAGE_t;\r\n\r\ntypedef struct{\r\n\tbool connected;\t\t\t\t\t//True if module has been powered up and successful SPI handshake (NEXT_Connect_Diagnostic)\r\n\tbool stopped;\t\t\t\t\t// True if module has been connected and placed in stop mode by the host\r\n\tuint8_t firmware_ver[3];\t\t//Firmware version on module\r\n\tuint8_t serial_no[12];\t\t\t//Serial number of MCU on module\r\n\tNEXT_SENSOR_IMAGE_t image;\t\t//Module image\r\n}NEXT_MODULE_t;\r\nuint8_t NEXT_Module_ScanImage(NEXT_MODULE_t *,uint8_t []);\r\nuint8_t NEXT_Module_FingerPresent(NEXT_MODULE_t* , uint8_t* , bool);\r\nuint8_t NEXT_Module_SPI_Command(uint8_t, uint8_t, uint8_t, uint8_t);\r\nuint8_t NEXT_Module_SPI_Data(uint8_t*, uint8_t);\r\nuint8_t NEXT_Module_SPI_Response(void*, uint16_t, uint16_t);\r\n\r\n\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.6652719378471375, "alphanum_fraction": 0.6820083856582642, "avg_line_length": 19, "blob_id": "ea833793b6951f98b65afaa765ebd5a3edccb759", "content_id": "53340cc8764a493a37811dd63192741a2d051471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 239, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/raspberry_sensor_c/makefile", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "main: main.o Pi.o User.o ScanImage.o\n\tgcc -o main main.o Pi.o User.o ScanImage.o -lbcm2835\nmain.o: main.c\n\tgcc -c main.c\nPi.o: Pi.c\n\tgcc -c Pi.c\nUser.o: User.c\n\tgcc -c User.c\nScanImage.o: ScanImage.c\n\tgcc -c ScanImage.c\nclean:\n\trm *.o main" }, { "alpha_fraction": 0.5659918785095215, "alphanum_fraction": 0.6469635367393494, "avg_line_length": 21.75, "blob_id": "05568427c4131d7370d9e643690084976877ce20", "content_id": "f2db882d545b13343f369a57bb7916a5258524ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 91, "num_lines": 52, "path": "/raspberry_sensor_c/Pi.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <stdint.h>\r\n#include \"Pi.h\"\r\n#include <bcm2835.h>\r\n\r\n\r\nvoid BSP_Config_HW (void){\r\n\r\n\tbcm2835_init();\r\n\tBSP_Module_Reset_Configure();\r\n\tBSP_SPI_Configure();\r\n}\r\n\r\nvoid BSP_Module_Reset_Configure(void){\r\n\r\n\tbcm2835_gpio_fsel(PIN_RESET, BCM2835_GPIO_FSEL_OUTP);\r\n\tMODULE_RESET_HIGH;\r\n}\r\n\r\n\r\n\r\nvoid BSP_SPI_Configure (void){\r\n\r\n bcm2835_spi_begin();\r\n bcm2835_spi_setBitOrder(BCM2835_SPI_BIT_ORDER_MSBFIRST); // MSB First\r\n bcm2835_spi_setDataMode(BCM2835_SPI_MODE0); // CPOL = 0, CPHA = 0\r\n bcm2835_spi_setClockDivider(BCM2835_SPI_CLOCK_DIVIDER_32); // 32 = 128ns = 7.8125MHz\r\n bcm2835_spi_chipSelect(BCM2835_SPI_CS0); // The default\r\n bcm2835_spi_setChipSelectPolarity(BCM2835_SPI_CS0, LOW); // the default\r\n\r\n}\r\n\r\nvoid BSP_SPI_ReadWriteBuffer (uint8_t* txBuff, uint8_t* rxBuff, uint16_t length){\r\n\r\n\tbcm2835_spi_transfernb( (char*) txBuff, (char*) rxBuff, length);\r\n}\r\n\r\nvoid BSP_Delay_ms(uint16_t millis){\r\n\tbcm2835_delay(millis);\r\n}\r\n\r\nvoid BSP_Delay_us(uint64_t micros){\r\n\tbcm2835_delayMicroseconds(micros);\r\n}\r\n\r\nvoid BSP_Module_nRST_High(void){\r\n\tMODULE_RESET_HIGH;\r\n}\r\n\r\nvoid BSP_Module_nRST_Low(void){\r\n\tMODULE_RESET_LOW;\r\n\tBSP_Delay_us(500); \r\n}\r\n" }, { "alpha_fraction": 0.551068902015686, "alphanum_fraction": 0.5676959753036499, "avg_line_length": 22.823530197143555, "blob_id": "c5155fd0b161d072f24810e2e2860f067acd3ad8", "content_id": "be87250edd786baf0d6c85b75dc5271296d1465f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 842, "license_type": "no_license", "max_line_length": 73, "num_lines": 34, "path": "/bkafis/bkafis/include/extract.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tfingerprint.h\r\n\tDescription: Data structure to present fingerprint inside BKAFIS package\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n\r\n\r\n#ifndef _EXTRACT_H_\r\n#define _EXTRACT_H_\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n\r\n#include <lfs.h>\r\n#include <sys/param.h>\r\n#include <an2k.h>\r\n#include <lfs.h>\r\n#include <imgdecod.h>\r\n#include <imgboost.h>\r\n#include <img_io.h>\r\n#include <version.h>\r\n#include <fingerprint.h>\r\n\t\t\t\t\t\t\t\t\t\r\nint sort_minutiae_quality(MINUTIAE *minutiae);\r\nchar extract(int boostflag, int m1flag, Fingerprint* finger);\r\n\r\n#endif" }, { "alpha_fraction": 0.5759717226028442, "alphanum_fraction": 0.6590105891227722, "avg_line_length": 27.350000381469727, "blob_id": "6199dea66efe9707ada682a421ec1cfa8967615c", "content_id": "ac7548a8d418aaa885f44beace7712ef0e76fa0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 566, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/bkafis/exports/include/lcd.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#ifndef LCD_H\n#define LCD_H\n#include <bcm2835.h>\n#define PIN RPI_V2_GPIO_P1_33\n#define LCD_RS RPI_V2_GPIO_P1_38 \n#define LCD_RW RPI_V2_GPIO_P1_37\n#define LCD_EN RPI_V2_GPIO_P1_36\n#define LCD_D4 RPI_V2_GPIO_P1_35 \n#define LCD_D5 RPI_V2_GPIO_P1_33\n#define LCD_D6 RPI_V2_GPIO_P1_31\n#define LCD_D7 RPI_V2_GPIO_P1_29 \nvoid LCD_Enable(); \nvoid LCD_Send4Bit( uint8_t );\nvoid LCD_SendCommand( uint8_t );\nvoid LCD_Init();\nvoid LCD_PutChar( uint8_t );\nvoid LCD_Puts( uint8_t *);\nvoid LCD_Clear();\nvoid LCD_Gotoxy( uint8_t , uint8_t );\n#endif" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 15, "blob_id": "73f09044cb7e5fb1f29e4cbcc3337bc179ca19ce", "content_id": "b93c32571528631f900fd27ad1ab3ca544a14ce6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 32, "license_type": "no_license", "max_line_length": 18, "num_lines": 2, "path": "/bkafis/arch.mak", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "ARCH_FLAG = \nARCH_FLAG = -fPIC\n" }, { "alpha_fraction": 0.6915887594223022, "alphanum_fraction": 0.6947040557861328, "avg_line_length": 31.200000762939453, "blob_id": "0b9594f5c484a7392a74ec2ab0c7f99307f3fbe5", "content_id": "497bd690c8ab4c627fa533def5077b361c065064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 321, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/bkafis/bkafis/bin/xoafile.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <stdio.h>\nint main()\n{\n\tunlink(\"/home/pi/Desktop/bkafis/bkafis/bin/image.txt\");\n\tunlink(\"/home/pi/Desktop/bkafis/bkafis/bin/*.bin\");\n\tunlink(\"/home/pi/Desktop/bkafis/bkafis/bin/*.MoC.txt\");\n\tunlink(\"/home/pi/Desktop/bkafis/bkafis/bin/*.bin\");\n\tunlink(\"/home/pi/Desktop/bkafis/bkafis/bin/*.MoC.txt\");\n\treturn 0;\n}" }, { "alpha_fraction": 0.5838292837142944, "alphanum_fraction": 0.6005225777626038, "avg_line_length": 26.712499618530273, "blob_id": "ea2e970e9c2efd42c371dad683ca8b7140f97dbf", "content_id": "20e62cdac98c7eecc285e7bf87d61aec6b6d7fc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6889, "license_type": "no_license", "max_line_length": 179, "num_lines": 240, "path": "/bkafis/bkafis/src/lib/bkafisextract/extract.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS extractor\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n#include <extract.h>\r\n#include <cuong.h>\r\n#include <Pi.h>\r\n#include <User.h>\r\n#include <ScanImage.h>\r\nint sort_minutiae_quality(MINUTIAE *minutiae)\r\n{\r\n/* Edited by Minh Nguyen, August 3 2015 */\r\n\t/* Add code to find neighbors according to BKAFIS algorithm */\r\n\t/* First, convert minutiae structure of lfs algorithm into BKAFIS data structure */\r\n\t/* here we need to sort the array minutiae according to quality */\r\n\t/* call function bubble_sort_double_dec_2 as follows */\r\n\t\r\n\tunsigned int num = minutiae->num;\r\n\tunsigned int i,j,k;\r\n\t/* Allocate list of sequential indices. */\r\n\tint* order = (int *)malloc(num * sizeof(int));\r\n\tif(order == (int *)NULL){\r\n\t\tfprintf(stderr, \"ERROR : sort_indices_double_inc : malloc : order\\n\");\r\n\t\treturn(-400);\r\n\t}\r\n\t\t\t\r\n\t/* construct rank array from the minutiae quality. */\r\n\tdouble* ranks = (double *)malloc(num * sizeof(double));\r\n\tif(ranks == NULL){\r\n fprintf(stderr, \"ERROR : sort_minutiae_y_x : malloc : ranks\\n\");\r\n return(-310);\r\n\t}\r\n \r\n\tfor(i = 0; i < num; i++){\r\n ranks[i] = minutiae->list[i]->reliability*100;\r\n\t order[i] = i;\r\n\t}\r\n\t\r\n\t/*\r\n\tif (debug)\r\n\t\tprintf(\"Sort minutia list according to quality\\n\");\r\n\t*/\r\n\t\r\n\t/* Get sorted order of minutiae. */\r\n\tbubble_sort_double_dec_2(ranks, order, num);\r\n\r\n\t/* Construct new minutiae list: Minh Nguyen - 12 August 2015*/\r\n\t/* Allocate new MINUTIA list to hold sorted minutiae. */\r\n\t\r\n\t/*\r\n\tif (debug)\r\n\t\tprintf(\"Get the sort resulted list\\n\");\r\n\t*/\r\n\t\r\n\tMINUTIA** newlist = (MINUTIA **)malloc(minutiae->num * sizeof(MINUTIA *));\r\n\tif(newlist == (MINUTIA **)NULL){\r\n\t\tfree(ranks);\r\n\t\tfree(order);\r\n\t\tfprintf(stderr, \"ERROR : sort_minutiae_y_x : malloc : newlist\\n\");\r\n\t\treturn(-311);\r\n\t}\r\n\r\n\t/* Put minutia into sorted order in new list. */\r\n\tfor(i = 0; i < minutiae->num; i++)\r\n\t\tnewlist[i] = minutiae->list[order[i]];\r\n\r\n\t/* Deallocate non-sorted list of minutia pointers. */\r\n\tfree(minutiae->list);\r\n\t/* Assign new sorted list of minutia to minutiae list. */\r\n\tminutiae->list = newlist;\r\n\r\n\t/* Free the working memories supporting the sort. */\r\n\tfree(order);\r\n\tfree(ranks);\r\n}\r\n\r\nchar extract(int boostflag, int m1flag, Fingerprint* finger)\r\n{\r\n\tunsigned char *idata, *bdata;\r\n\tuint8_t dulieu[46080];\r\n\tint img_type;\r\n\tint ilen, iw, ih, id, ippi, bw, bh, bd,k=0;\r\n\tdouble ippmm;\r\n\tint img_idc, img_imp;\r\n\tint *direction_map, *low_contrast_map, *low_flow_map;\r\n\tint *high_curve_map, *quality_map;\r\n\tint map_w, map_h;\r\n\tint ret;\r\n\tMINUTIAE *minutiae;\r\n\tANSI_NIST *ansi_nist;\r\n\tRECORD *imgrecord;\r\n\tint imgrecord_i;\r\n\timg_type=3;\r\n\tih = 180;\r\n\tiw = 256;\r\n\tid = 8;\r\n\tippi = -1;\r\n\tippmm = 19.685039;\r\n\tidata = (unsigned char*) malloc(ih * iw);\r\n\tif (idata == NULL){\r\n\t\tfprintf(stderr, \"ERROR : main : malloc idata\\n\");\r\n\t\texit(-2);\r\n\t}\r\n\tScanImage(idata);\r\n\t/* 2. ENHANCE IMAGE CONTRAST IF REQUESTED */\r\n\tif(boostflag)\r\n\t\ttrim_histtails_contrast_boost(idata, iw, ih); \r\n\t/* 3. GET MINUTIAE & BINARIZED IMAGE. */\r\n\tif((ret = get_minutiae(&minutiae, &quality_map, &direction_map,\r\n\t &low_contrast_map, &low_flow_map, &high_curve_map,\r\n\t &map_w, &map_h, &bdata, &bw, &bh, &bd,\r\n\t idata, iw, ih, id, ippmm, &lfsparms_V2))){\r\n\t\tif(img_type == ANSI_NIST_IMG)\r\n\t\t\tfree_ANSI_NIST(ansi_nist);\r\n\t\tfree(idata);\r\n\t\texit(ret);\r\n\t}\r\n\r\n\t/* Done with input image data */\r\n\tfree(idata);\r\n\t\r\n\tsort_minutiae_quality(minutiae);\r\n\r\n\t/* Done with minutiae detection maps. */\r\n\tfree(quality_map);\r\n\tfree(direction_map);\r\n\tfree(low_contrast_map);\r\n\tfree(low_flow_map);\r\n\tfree(high_curve_map);\r\n\t\r\n\tfinger->width = iw;\r\n\tfinger->height = ih;\r\n\t/* finger.quality = add code to calculate quality of fingerprint here \r\n\trefer to function comp_nfiq_flex() in the NFIQ package */\r\n\tif (finger->nMinutiae==0) finger->nMinutiae = minutiae->num;\r\n\tint nMinutiae = finger->nMinutiae;\r\n\tMinutia** bkafisMinutiae;\r\n\tMinutia* min;\r\n\t\r\n\t/* if (debug)\r\n\t\tprintf(\"Image width=%d\\nImage height=%d\\nnMinutiae=%d\\n\",finger->width, finger->height,finger->nMinutiae); */\r\n\t\r\n\tbkafisMinutiae = malloc( sizeof(Minutia*) * nMinutiae );\r\n\t\r\n\tif \t( bkafisMinutiae == NULL )\r\n\t{\r\n\t\tfree_minutiae(minutiae);\r\n\t\tfree(bdata);\r\n\t\treturn -1;\r\n\t}\r\n\tunsigned int i,j;\r\n\tfor\t( i = 0; i < nMinutiae; i++ )\r\n\t{\r\n\t\tmin\t= malloc( sizeof(Minutia) );\r\n\t\t\r\n\t\tif \t( min == NULL )\r\n\t\t{\t\t\r\n\t\t\tfree_minutiae(minutiae);\r\n\t\t\tfree(bdata);\r\n\t\t\treturn -1;\r\n\t\t}\r\n\t\t\r\n\t\tmemset( min, 0, sizeof(Minutia) );\r\n\t\t\t\t\t\r\n\t\tmin->x = minutiae->list[i]->x;\r\n\t\tmin->y = -minutiae->list[i]->y;\r\n\t\t\r\n\t\tfloat tmp = minutiae->list[i]->direction; /* store range 0-32 directly **11.25; /* after extracting, the minutiae direction is in range 0-32 => need to be converted into grad */\r\n\t\ttmp = ( tmp <= 8 ) ? 8-tmp : 40-tmp;\t\t\r\n\t\tmin->angle = tmp;/* /11.25; /*(minutiae->list[i]->direction<=8)?8-minutiae->list[i]->direction:24-minutiae->list[i]->direction;*/\r\n\t\tmin->angle = min->angle*(MAX_ISO_ANGLE/32); /* convert into ISO standard */\r\n\t\t/* -- */\t\r\n\t\tmin->quality = minutiae->list[i]->reliability*100;\r\n\t\tmin->type = minutiae->list[i]->type;\t \r\n\t\tbkafisMinutiae[i]=min;\r\n\t\t\t\t\r\n\t\tmin=NULL;\t\t\r\n\t}\r\n\tfinger->minutiae = bkafisMinutiae;\r\n/* \tif (debug) printf(\"calculate Ed Dra Oda, distances=%x\\n\",finger->distances); */\r\n\t/* sua loi cho ridge_count */\r\n\tgray2bin(1, 1, 0, bdata, iw, ih);\r\n\tif (CalculateEdDraOda(finger)){\r\n\t\tfree_minutiae(minutiae);\r\n\t\tfree(bdata);\r\n\t\treturn -1;\r\n\t}\r\n\t\r\n\t/* if (debug){\r\n\t\tprintf(\"Distances:\\n\");\r\n\t\tfor (i=0;i<nMinutiae;i++){\r\n\t\t\tfor (j=0;j<nMinutiae;j++)\r\n\t\t\t\tprintf(\"%f\\t\",GetDistance(finger,i,j));\r\n\t\t\tprintf(\"\\n\");\r\n\t\t}\r\n\t\tprintf(\"Dra:\\n\");\r\n\t\tfor (i=0;i<nMinutiae;i++){\r\n\t\t\tfor (j=0;j<nMinutiae;j++)\r\n\t\t\t\tprintf(\"%f\\t\",GetDra(finger,i,j));\r\n\t\t\tprintf(\"\\n\");\r\n\t\t} */\r\n\t\t/* printf(\"Oda:\\n\");\r\n\t\tfor (i=0;i<nMinutiae;i++){\r\n\t\t\tfor (j=0;j<nMinutiae;j++)\r\n\t\t\t\tprintf(\"%d\\t\",GetOda(finger,i,j));\r\n\t\t\tprintf(\"\\n\");\r\n\t\t}\t\t */\r\n\t/* } */\r\n/* \tif (debug) printf(\"find directional neighbors distances=%x\\n\",finger->distances); */\r\n\tCalculateLDRNeighbors(finger);\r\n\tfor\t( i = 0; i < nMinutiae; i++ )\r\n\t{\r\n\t\tif\t( finger->minutiae[i] == NULL )\tcontinue;\r\n\t\tMinutia\t*min = finger->minutiae[i];\r\n\t\t/* FindDirectionalNeighbors(finger, i);\t */\r\n\t\t\r\n\t\t/* if \t(debug) printf(\"%d Neighbors of minutia %d:\\n\",min->nNeighbors, i); */\r\n\r\n\t\tfor ( j = 0; j < min->nNeighbors; j++ )\r\n\t\t{\r\n\t\t\t\r\n\t\t\tint rc = ridge_count(i,min->neighborIds[j],minutiae,bdata,iw,ih,&lfsparms_V2);\r\n\t\t\tmin->ridgeCount[j] = rc;\r\n\t\t\t/* if (debug) printf(\"Neighbor %d, rc=%d\\n\",min->neighborIds[j],rc); */\r\n\t\t}\r\n\t}\r\n\t\r\n\t/* CalculateLDR(finger); */\r\n\t\r\n\tfree_minutiae(minutiae);\r\n\tfree(bdata);\r\n\treturn 0;\r\n}" }, { "alpha_fraction": 0.46967530250549316, "alphanum_fraction": 0.5237900614738464, "avg_line_length": 24.6141300201416, "blob_id": "7f9bc94977c191a8b97cb832c490594bc955d4b0", "content_id": "1d917875bc0373beb7aa450817ecc6583e50b681", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4897, "license_type": "no_license", "max_line_length": 154, "num_lines": 184, "path": "/bkafis/bkafis/src/bin/matchMoC/int32.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "\r\n/*********************************************************************\r\n\tint32.c\r\n\tDescription: Function to operate on 32 bit integers in 16 bit computers\r\n\tCreated on: Sep 13, 2015 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n#include <stdio.h>\r\n#include <int32.h>\r\n\r\nvoid mult16s(const ISOWORD x, const ISOWORD y, int32 *p){\r\n\tISOBYTE highX, lowX, highY, lowY;\r\n\tISOWORD tmp1, tmp2, tmp3, tmp4, tmp5;\r\n\t/* high byte and low byte of the 2 words */\r\n\thighX = x >> 8;\r\n\tlowX = x & 0xff;\r\n\thighY = y >> 8;\r\n\tlowY = y & 0xff;\r\n\t\r\n\t/*___ p = x * y = (highX*highY)*2^16 + (highX*lowY+lowX*highY)*2^8 + lowX*lowY ___*/\r\n\ttmp1 = highX * highY;\r\n\ttmp2 = highX * lowY;\r\n\ttmp3 = lowX * highY;\r\n\ttmp4 = lowX * lowY;\r\n\ttmp5 = (tmp2 & 0xff) + (tmp3 & 0xff) + (tmp4 >> 8);\r\n\t/* printf(\"(%d,%d)*(%d,%d)=(%d)*2^16+(%d+%d)*2^8+%d:tmp5=%d,%d\\n\",highX,lowX,highY,lowY,tmp1,tmp2,tmp3,tmp4,tmp5,(((tmp2&0xff)+(tmp3&0xff))<<8)+tmp4);)*/\r\n\tp->lowWord = (((tmp2&0xff)+(tmp3&0xff))<<8)+tmp4;\r\n\tp->highWord = tmp1 + (tmp2 >> 8) + (tmp3 >> 8) + (tmp5 >> 8);\r\n\t/* printf(\"(%d,%d)*(%d,%d)=(%d)*2^16+(%d+%d)*2^8+%d=(%d*2^16+%d)\\n\",highX,lowX,highY,lowY,tmp1,tmp2,tmp3,tmp4,p->lowWord,p->highWord);*/\r\n}\r\n\r\nvoid shift_right(int32* in, ISOBYTE shamt, int32* out)\r\n{\r\n\r\n\t\tif (shamt<16){\r\n\t\t\t\r\n\t\t\tout->highWord = in->highWord >> shamt;\r\n\t\t\t/* shift loWord to the right shamt bits, i.e., (16-shamt) hight bits*/\r\n\t\t\t/* take shamt low bits in hiWord */\r\n\t\t\t/* concat shamt low bits in hiWord with the (16-shamt) high bits */\r\n\t\t\tout->lowWord = (ISOWORD)(in->highWord << (16-shamt)) | (ISOWORD)(in->lowWord >> shamt);\r\n\t\t\t/* shift hiWord to the left shamt bits */\r\n\t\t\tout->highWord = in->highWord>>shamt;\t\r\n\t\t}\r\n\t\telse {\r\n\t\t\tout->highWord = 0;\r\n\t\t\tout->lowWord = in->highWord >> (shamt-16);\r\n\t\t}\t\t\r\n\r\n}\r\nvoid shift_left(int32* in, ISOBYTE shamt, int32* out)\r\n{\r\n\tif (shamt<16){\r\n\t\tout->highWord = (in->highWord << shamt) | (in->lowWord >> (16-shamt));\r\n\t\tout->lowWord = in->lowWord << shamt;\r\n\t}\r\n\telse {\r\n\t\tout->lowWord = 0;\r\n\t\tout->highWord = in->lowWord >> (32-shamt);\r\n\t}\r\n}\r\n\r\n\r\nvoid add32(int32* a, int32* b, int32* c)\r\n{\r\n\tISOBYTE abyte[4],bbyte[4];\r\n\tISOWORD tmp1, tmp2, tmp3, tmp4, tmp5;\r\n\t/* high byte and low byte of the 2 words */\r\n\tabyte[0]=a->lowWord & 0xff;\r\n\tabyte[1]=a->lowWord >> 8;\r\n\tabyte[2]=a->highWord & 0xff;\r\n\tabyte[3]=a->highWord >> 8;\r\n\tbbyte[0]=b->lowWord & 0xff;\r\n\tbbyte[1]=b->lowWord >> 8;\r\n\tbbyte[2]=b->highWord & 0xff;\r\n\tbbyte[3]=b->highWord >> 8;\r\n\ttmp1 = abyte[0]+bbyte[0];\r\n\ttmp2 = abyte[1]+bbyte[1]+tmp1>>8;\r\n\tc->lowWord = (tmp2<<8) | (tmp1 & 0xff);\r\n\tc->highWord = a->highWord+b->highWord+tmp2>>8;\r\n}\r\nshort int atan2_fxp( short int y, short int x )\t/* y, x in Q1.15.0 */\r\n{\r\n\t/* coeff_1, coeff_2 in Q1.4.3 */\r\n\tshort int\tcoeff_1 = 6; \r\n\tshort int coeff_2 = 19;\r\n\tshort int\tangle;\r\n\t\r\n\t/* r in Q1.9.6 */\r\n\tshort int \tr;\t\r\n\t\r\n\t/* abs_y, nume, deno in Q1.15.0 */\r\n\tshort int\tabs_y = ( y >= 0 ) ? y : -y;\r\n\tshort int \tnume;\r\n\tshort int\tdeno;\r\n\t\r\n\tif\t( y == 0 )\r\n\t\treturn\t( x >= 0 ) ? 0 : 50;\t/* M_PI in Q.1.4.3 */\r\n\t\r\n\t\r\n\tif\t( x >= 0 )\r\n\t{\r\n\t\tnume = ( x - abs_y ) << 7;\r\n\t\tdeno = x + abs_y;\r\n\t\tr = nume / deno;\r\n\t\tangle = ( (coeff_1 << 7) - coeff_1 * r ) >> 7;\t\r\n\t}\r\n\telse\r\n\t{\r\n\t\tnume = ( x + abs_y ) << 7;\r\n\t\tdeno = abs_y - x;\r\n\t\tr = nume / deno;\r\n\t\tangle = ( (coeff_2 << 7) - coeff_1 * r ) >> 7;\r\n\t}\r\n\r\n\tif\t( y < 0 )\r\n\t\treturn\t-angle;\r\n\telse\r\n\t\treturn \t angle;\r\n\t\r\n\treturn \t0;\r\n}\r\n\r\nISOWORD\tsqrt32(int32* num) \r\n{\r\n int32\tres;\r\n int32 \tbit; /* The second-to-top bit is set: 1 << 30 for 32 bits */\r\n\tint32 \tres_bit;\r\n\tint32 \tbit_sr2;\r\n\tint32 \tres_sr1;\r\n\tISOWORD\ttmp1;\r\n\tISOWORD tmp2;\r\n\t\t\r\n\t/* res = 0 */\r\n\tres.highWord = 0;\r\n\tres.lowWord = 0;\r\n\t\r\n\t/* bit = 2^30 */\r\n\tbit.highWord = 16384;\t\r\n\tbit.lowWord = 0;\r\n\t\r\n while ( (bit.highWord > num->highWord) || (bit.lowWord > num->lowWord) )\r\n\t{\r\n\t\t\tshift_right(&bit, 2, &bit_sr2);\r\n\t\t\tbit = bit_sr2;\r\n\t}\r\n\t\r\n\twhile \t( (bit.highWord != 0) || (bit.lowWord != 0) )\r\n\t{\r\n\t\tres_bit.lowWord = res.lowWord + bit.lowWord ;\r\n\t\tres_bit.highWord = res.highWord + bit.highWord;\r\n\t\t\r\n\t\tif\t(\r\n\t\t\t\t( ( num->highWord == res_bit.highWord ) && ( num->lowWord >= res_bit.lowWord ) )\r\n\t\t\t || ( num->highWord > res_bit.highWord )\t\t\t\r\n\t\t\t)\r\n\t\t{\r\n\t\t\ttmp1 = num->lowWord;\r\n\t\t\ttmp2 = res_bit.lowWord;\r\n\t\t\t\r\n\t\t\tnum->lowWord = \t( tmp1 >= tmp2 ) \r\n\t\t\t\t\t\t ? num->lowWord - res_bit.lowWord\r\n\t\t\t\t\t\t : (65535 - (res_bit.lowWord - num->lowWord))+ 1;\r\n\t\t\t\r\n\t\t\tnum->highWord = ( tmp1 >= tmp2 ) \r\n\t\t\t\t\t\t ? num->highWord - res_bit.highWord\r\n\t\t\t\t\t\t : num->highWord - res_bit.highWord - 1;\t\t\t\r\n\r\n\t\t\tshift_right(&res, 1, &res_sr1);\r\n\r\n\t\t\tres.lowWord = res_sr1.lowWord + bit.lowWord ;\r\n\t\t\tres.highWord = res_sr1.highWord + bit.highWord;\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tshift_right(&res, 1, &res_sr1);\r\n\t\t\tres = res_sr1;\r\n\t\t}\r\n\t\t\r\n\t\tshift_right(&bit, 2, &bit_sr2);\r\n\t\tbit = bit_sr2;\r\n\t}\r\n\t\t\r\n return res.lowWord;\r\n}" }, { "alpha_fraction": 0.5626307725906372, "alphanum_fraction": 0.5931240916252136, "avg_line_length": 17.688236236572266, "blob_id": "0388daf2a3b26edc3d475bf4a6a3d6b1e85928ea", "content_id": "13ca0027ea82a55a32bac4891b6c846fa1b83030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3345, "license_type": "no_license", "max_line_length": 97, "num_lines": 170, "path": "/bkafis/bkafis/src/lib/bkafis/User.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <User.h>\r\n#include <stdio.h>\r\nuint8_t NEXT_Module_ScanImage(NEXT_MODULE_t * module,uint8_t data1[46080])\r\n{\r\n\r\n\tuint16_t col = 0;\r\n\tuint8_t row = 0;\r\n\tuint8_t Row_Counter = 0;\r\n\tuint8_t error;\r\n\tFILE *file;\r\n\tfile = fopen(\"image.txt\",\"w\");\r\n\tuint8_t Row_Data[NUMBER_OF_COLUMNS];\r\n\r\n\tfor (Row_Counter = 0; Row_Counter < NUMBER_OF_ROWS; Row_Counter++)\r\n\t{ \r\n\r\n\t\tswitch (Row_Counter)\r\n\t\t{\r\n\t\tcase 0:\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_LINE, P1_FIRST_LINE, Row_Counter,\r\n\t\t\t\t\t0x00); \r\n\t\t\tBSP_Delay_ms(SCAN_IMAGE_INITIAL_DELAY);\r\n\t\t\tbreak;\r\n\t\tcase (NUMBER_OF_ROWS-1):\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_LINE, P1_LAST_LINE, Row_Counter,\r\n\t\t\t\t\t0x00); \r\n\t\t\tbreak;\r\n\t\tdefault:\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_LINE, P1_INTERMEDIATE_LINE, Row_Counter,\r\n\t\t\t\t\t0x00); \r\n\t\t\tbreak;\r\n\t\t}\r\n\r\n\t\tif (!error)\r\n\t\t{\r\n\t\t\tBSP_Delay_us(SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\terror = NEXT_Module_SPI_Response(Row_Data, NUMBER_OF_COLUMNS, SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\tif (!error)\r\n\t\t\t{\r\n\r\n\t\t\t\tfor (col = 0; col < NUMBER_OF_COLUMNS; col++)\r\n\t\t\t\t{\r\n\t\t\t\t\tfprintf(file,\"%d\\t\",Row_Data[col]);\r\n\t\t\t\t\tdata1[col+256*Row_Counter]=Row_Data[col]; \r\n\t\t\t\t}\r\n\t\t\t\tif(row<180)\r\n\t\t\t\t{\r\n\t\t\t\tfprintf(file,\"\\n\");\r\n\t\t\t\t}\r\n\t\t\t\trow++;\r\n\r\n\t\t\t\tBSP_Delay_us(SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\treturn error;\r\n\t\t\t}\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\treturn error;\r\n\t\t}\r\n\t}\r\n\treturn NO_ERROR;\r\n\r\n}\r\nuint8_t NEXT_Module_FingerPresent(NEXT_MODULE_t* module, uint8_t* data, bool sense_finger) {\r\n\tuint8_t error = NEXT_Module_SPI_Command(INS_FINGER_PRESENT, sense_finger, 0x00, 0x00);\r\n\tBSP_Delay_ms(10);\r\n\r\n\tif (!error)\r\n\t{\r\n\r\n\t\terror = NEXT_Module_SPI_Response(data, 1, 1000);\r\n\t\tBSP_Delay_ms(1);\r\n\r\n\t}\r\n\r\n\treturn error;\r\n\r\n}\r\nuint8_t NEXT_Module_SPI_Command(uint8_t INS, uint8_t P1, uint8_t P2, uint8_t LEN){\r\n\r\n\tuint8_t SPI_Tx[4] = {INS,P1,P2,LEN};\r\n\tuint8_t SPI_Rx[4];\r\n\tuint8_t i;\r\n\r\n\tBSP_SPI_ReadWriteBuffer(SPI_Tx,SPI_Rx,4);\r\n\r\n\tfor(i=0;i<4;i++){\r\n\t\tif(SPI_Rx[i] != 0xCC){\r\n\t\t\treturn ERR_API_SPI_CMD_STAGE; \r\n\t\t}\r\n\t}\r\n\r\n\treturn NO_ERROR;\r\n\r\n}\r\nuint8_t NEXT_Module_SPI_Data(uint8_t* data, uint8_t data_len){\r\n\r\n\tuint8_t SPI_Rx[255];\r\n\tuint8_t i;\r\n\r\n\tBSP_SPI_ReadWriteBuffer(data,SPI_Rx,data_len);\r\n\r\n\tfor(i=0;i<data_len;i++){\r\n\t\tif(SPI_Rx[i] != 0xAA){\r\n\t\t\treturn ERR_API_SPI_DATA_STAGE; \r\n\t\t}\r\n\t}\r\n\treturn NO_ERROR;\r\n}\r\nuint8_t NEXT_Module_SPI_Response(void* RES_data, uint16_t Length, uint16_t us_interval ){\r\n\tuint8_t Status = BUSY;\r\n\tuint8_t Check = DUMMY_DATA;\r\n\tuint8_t Tx[Length];\r\n\tuint16_t i;\r\n\tuint32_t Busy_Count = 0;\r\n\r\n\tfor(i=0;i<Length;i++){\r\n\t\tTx[i]=i; \r\n\t}\r\n\r\n\tBSP_SPI_ReadWriteBuffer(&Check,&Status,1);\r\n\r\n\twhile(Status == BUSY && Busy_Count < MAX_BUSY_COUNT){ \r\n\t\tBSP_Delay_us(us_interval);\r\n\t\tBSP_SPI_ReadWriteBuffer(&Check,&Status,1);\r\n\t\tBusy_Count++;\r\n\t}\r\n\r\n\r\n\tif(Status == READY){ \t\r\n\t\tif(Length){\r\n\t\t\tBSP_SPI_ReadWriteBuffer(Tx,(uint8_t*)RES_data,Length);\r\n\t\t}\r\n\t\treturn NO_ERROR;\r\n\t}else{\t\t\t\t\t\r\n\t\tswitch (Status){ \r\n\t\tcase BUSY:\r\n\t\t\t\r\n\t\tcase ERR_MOD_COM:\r\n\t\t\t\r\n\t\tcase ERR_MOD_DATA_FIELD:\r\n\t\t\t\r\n\t\tcase ERR_MOD_DATA_LENGTH:\r\n\t\t\t\r\n\t\tcase ERR_MOD_DCA:\r\n\t\t\t\r\n\t\tcase ERR_MOD_MCU:\r\n\t\t\t\r\n\t\tcase ERR_MOD_OP_MODE:\r\n\t\t\t\r\n\t\tcase ERR_MOD_PARAM_FIELD:\r\n\t\t\t\r\n\t\tcase ERR_MOD_SENSOR_FAIL:\r\n\t\t\t\r\n\t\tcase ERR_MOD_UNKNOWN_COMMAND:\r\n\t\t\treturn Status; \r\n\t\t\tbreak;\r\n\t\tdefault:\r\n\t\t\treturn ERR_API_SPI_RES_STAGE; \r\n\t\t\tbreak;\r\n\t\t}\r\n\r\n\t}\r\n\r\n}" }, { "alpha_fraction": 0.6228408813476562, "alphanum_fraction": 0.6399105787277222, "avg_line_length": 33.417266845703125, "blob_id": "43a5b4509cf1dfc5eaa463bfadfef7749e92f2b6", "content_id": "45613e75ed1dbcdeacf7420cede667239e36566e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4921, "license_type": "no_license", "max_line_length": 120, "num_lines": 139, "path": "/bkafis/exports/include/fingerprintMoC.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tfingerprintMoC.h\r\n\tDescription: Data structure to present fingerprint stored on java card\r\n\tCreated on: Sep 13, 2015 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n *********************************************************************/\r\n\r\n\r\n\r\n#ifndef _FINGERPRINT_MOC_H_\r\n#define _FINGERPRINT_MOC_H_\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n#include <int32.h>\r\n\r\n#ifndef max(a,b)\r\n#define max(a, b) ((a) > (b) ? (a) : (b))\r\n#endif\r\n#ifndef min(a,b)\r\n#define min(a, b) ((a) < (b) ? (a) : (b))\r\n#endif\r\n#define MOC_MAX_NEIGHBORS\t\t8\t\t\t\t\t\t\t\t\r\n#define MOC_MINUTIAE_NUM\t\t30\t\t\t\t\t\t\t\t\t\t\t\r\n/* For MoC data */\r\n/**************************************************\r\n***************************************************\r\n**************************************************/\r\n/* #pragma pack(push)\r\n#pragma pack(1)\r\n*/\r\ntypedef struct{\r\n\tISOWORD dra_rc_index; /* index: 5 bit, rc: 5 bit */\r\n\tISOWORD ed; /* ed: fixed point 8.7 */\r\n} NeighborMoC;\r\n\r\n\r\n/********** Minutia struct **********/\r\ntypedef struct{\r\n\tISOBYTE angle_ldr; /* angle: 5 bit, ldr: 3 bit */\r\n\tISOWORD x_quality; /* x: 9 bit, quality: 7 bit */\r\n\tISOWORD y_type_nNeighbors; /* y: 9 bit, nNeighbors: 4 bit, type: 1 bit */\r\n\tNeighborMoC neighbors[MOC_MAX_NEIGHBORS]; \r\n} MinutiaMoC;\r\n\r\n/********** Fingerprint struct **********/\r\ntypedef struct{\r\n\tISOWORD width_quality;\r\n\tISOWORD height_nMinutiae;\r\n\tMinutiaMoC minutiae[MOC_MINUTIAE_NUM];\r\n} FingerprintMoC;\r\n\r\n\r\n/* Macro to access struct members */\r\n#define NeighborMinIdPOS\t0\r\n#define NeighborMinIdWL\t\t5\r\n#define NeighborMinIdMask\t0x1f\r\n#define GetNeighborMinIndex(neighbor)\t\t(ISOBYTE)(((neighbor).dra_rc_index >> NeighborMinIdPOS) & NeighborMinIdMask)\r\n\r\n#define RidgeCountPOS\t\t5\r\n#define RidgeCountWL\t\t5\r\n#define RidgeCountMask\t\t0x1f\r\n#define GetRidgeCount(neighbor)\t\t\t(ISOBYTE)(((neighbor).dra_rc_index >> RidgeCountPOS) & RidgeCountMask)\r\n\r\n#define DraPOS\t\t\t\t10\r\n#define DraWL\t\t\t\t6\t\t\t\t/* word length */\r\n#define\tDraFL\t\t\t\t3 \t\t\t\t/* fraction length */\r\n#define DraMask\t\t\t\t0x3f\t\t\t/* mask to get 6 bit of dra */\r\n#define GetMoCDra(neighbor)\t\t\t\t(ISOBYTE)(((neighbor).dra_rc_index >> DraPOS) & DraMask)\r\n#define SetDraRidgeCountId(dra, rc, id)\t\t(ISOWORD)((dra)<<DraPOS | (rc) << RidgeCountPOS | (id)<<NeighborMinIdPOS)\r\n\r\n#define EdPOS\t\t\t\t0\r\n#define EdWL\t\t\t\t15\r\n#define EdFL\t\t\t\t7\r\n#define GetEd(neighbor)\t\t\t\t\t(ISOWORD)((neighbor).ed)\r\n#define LdrPOS\t\t\t\t0\r\n#define LdrWL\t\t\t\t3\r\n#define LdrMask\t\t\t\t0x07\r\n#define GetLdr(min)\t\t\t\t\t\t(ISOBYTE)(((min).angle_ldr >> LdrPOS) & LdrMask)\r\n\r\n#define AnglePOS\t\t\t3\r\n#define AngleWL\t\t\t\t5\r\n#define AngleMask\t\t\t0x1f\r\n#define GetAngle(min)\t\t\t\t\t(ISOBYTE)(((min).angle_ldr >> AnglePOS) & AngleMask)\r\n#define SetAngleLdr(angle, ldr)\t\t\t(ISOBYTE)((angle)<<AnglePOS|(ldr)<<LdrPOS)\r\n\r\n#define MinQualityPOS\t\t\t0\r\n#define MinQualityWL\t\t\t7\r\n#define MinQualityMask\t\t\t0x7f\r\n#define GetMinQuality(min)\t\t\t\t(ISOBYTE)(((min).x_quality >> MinQualityPOS) & MinQualityMask)\r\n\r\n#define XPOS\t\t\t\t7\r\n#define XWL\t\t\t\t\t9\r\n#define XMask\t\t\t\t0x1ff\r\n#define GetX(min)\t\t\t\t\t\t(ISOWORD)(((min).x_quality >> XPOS) & XMask)\r\n#define SetXQuality(x, quality)\t\t\t(ISOWORD)((x)<<XPOS|(quality)<<MinQualityPOS)\r\n\r\n#define nNeighborsPOS\t\t0\r\n#define nNeighborsWL\t\t4\r\n#define nNeighborsMask\t\t0x0f\r\n#define GetNNeighbors(min)\t\t\t\t(ISOBYTE)(((min).y_type_nNeighbors >> nNeighborsPOS) & nNeighborsMask)\t\r\n\r\n#define TypePOS\t\t\t\t4\r\n#define TypeWL\t\t\t\t1 \r\n#define TypeMask\t\t\t0x01\r\n#define GetType(min)\t\t\t\t\t(ISOBYTE)(((min).y_type_nNeighbors >> TypePOS) & TypeMask)\t\r\n\r\n#define YPOS\t\t\t\t5\r\n#define YWL\t\t\t\t\t9\r\n#define YMask\t\t\t\t0x1ff\r\n#define GetY(min)\t\t\t\t\t\t(ISOWORD)(((min).y_type_nNeighbors >> YPOS) & YMask)\t\r\n#define SetYTypeNNeighbors(y, type, nNeighbors)\t\t(ISOWORD)((y)<<YPOS | (type) << TypePOS | (nNeighbors)<<nNeighborsPOS )\r\n\r\n#define FingerQualityPOS\t0\r\n#define FingerQualityWL\t\t7\r\n#define FingerQualityMask\t0x7f\r\n#define GetFingerQuality(finger)\t\t\t(ISOBYTE)(((finger).width_quality >> FingerQualityPOS) & FingerQualityMask)\t\r\n\r\n#define FingerWidthPOS\t\t7\r\n#define FingerWidthWL\t\t9\r\n#define GetFingerWidth(finger)\t\t\t(ISOWORD)((finger).width_quality >> FingerWidthPOS)\r\n#define SetFingerWidthQuality(width, quality) \t\t(ISOWORD)((width)<<FingerWidthPOS | (quality)<<FingerQualityPOS)\r\n\r\n#define nMinutiaePOS\t\t0\r\n#define nMinutiaeWL\t\t\t7\r\n#define nMinutiaeMask\t\t0x7f\r\n#define GetNMinutiae(finger)\t\t\t(ISOBYTE)(((finger).height_nMinutiae >> nMinutiaePOS) & nMinutiaeMask)\t\r\n\r\n#define FingerHeightPOS\t\t7\r\n#define FingerHeightWL\t\t9\r\n#define GetFingerHeight(finger)\t\t\t(ISOWORD)((finger).height_nMinutiae >> FingerHeightPOS)\r\n#define SetFingerHeightNMinutiae(height,nMinutiae) \t\t(ISOWORD)((height)<<FingerHeightPOS | (nMinutiae)<<nMinutiaePOS)\r\n\r\nvoid SaveFingerprintMoCText(char *path, FingerprintMoC* fingerMoC);\r\nISOBYTE SaveFingerprintMoC(unsigned char *path, FingerprintMoC *finger );\r\nISOBYTE\tReadFingerprintMoC( unsigned char *path, FingerprintMoC *finger );\r\n#endif" }, { "alpha_fraction": 0.6200509071350098, "alphanum_fraction": 0.6334841847419739, "avg_line_length": 30.75, "blob_id": "575a3a6f10552757f092c0e61c0d2a433141a1b2", "content_id": "dc5a3c5b3d5282fd10ae525decd0e3197327a2fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7072, "license_type": "no_license", "max_line_length": 160, "num_lines": 216, "path": "/bkafis/bkafis/src/bin/extract/matcher.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS matcher\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include \"matcher.h\"\r\n#include \"ISOTemplate.h\"\r\n#ifndef M_PI\r\n#define M_PI 3.14159\r\n#endif\r\n#define DEBUG\r\n/**********************************************************************\r\n\tConvert from ISOTemplate 2005 format \r\n\tInput:\r\n\t\t\tImplicitly stored in static variable isoTemplate that is declared \r\n\t\t\tin ISOTemplate.c \r\n\tOutput:\r\n\t\t\tpointer to Fingerprint structure declared above \r\n\tUsage:\r\n\t\t\tin order to load the iso template from file call \r\n\t\t\tISORESULT LoadISOTemplate (ISOBYTE *path);\r\n\t\t\tthen in order to convert from the template into Fingerprint structure\r\n\t\t\tcall unsigned char ConvertISO2005Fingerprint(Fingerprint* finger);\r\n *********************************************************************/ \r\nstatic AlgoParam algoParam;\r\nISOBYTE ConvertISO2005Fingerprint(Fingerprint* finger)\r\n{\r\n\tif (finger==NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tISOWORD width, height;\r\n\tGetRecordHeader (&width,&height,NULL,NULL,NULL);\r\n\tfinger->width = width;\r\n\tfinger->height = height;\r\n\t#ifdef DEBUG\r\n\tprintf(\"Width=%d\\nHeight=%d\\n\",finger->width,finger->height);\r\n\t#endif\r\n\tunsigned char quality, nMinutiae;\r\n\tGetFingerViewHeader (0,NULL,NULL,NULL,&quality,&nMinutiae);\r\n\tfinger->quality = quality;\r\n\tfinger->nMinutiae = nMinutiae;\r\n\tfinger->Minutiae=malloc(sizeof(Minutia)*finger->nMinutiae);\r\n\tif (finger->Minutiae == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(finger->Minutiae,0x00,sizeof(Minutia)*finger->nMinutiae);\r\n\t#ifdef DEBUG\r\n\tprintf(\"Quality=%d\\nnMinutiae=%d\\n\",finger->quality,finger->nMinutiae);\r\n\t#endif\r\n\tunsigned char minI, i, j;\r\n\tunsigned char* SortingQuality;\r\n\tSortingQuality = malloc(nMinutiae*sizeof(char));\r\n\t\r\n\t/*\t\tSorting minutiae by quality score in descending order\t*/\r\n for\t(minI = 0; minI < finger->nMinutiae; minI++)\r\n\t\tSortingQuality[minI] = minI;\r\n\tISOWORD x, y, x2;\r\n\tISOBYTE angle, type, quality1, quality2;\r\n\t\r\n\tfor (i = 0; i < finger->nMinutiae - 1; i++)\r\n for (j = finger->nMinutiae - 1; j > i; j--){\r\n\t\t\t\r\n\t\t\tGetMinutiaeData(0,SortingQuality[j],&type,&x,&y,&angle,&quality1);\r\n\t\t\tGetMinutiaeData(0,SortingQuality[j-1],&type,&x2,&y,&angle,&quality2);\r\n\t\t\tif (quality1 > quality2){\r\n\t\t\t\tunsigned char \t\t\ttg = SortingQuality[j];\r\n SortingQuality[j] = SortingQuality[j - 1];\r\n SortingQuality[j - 1] = tg;\r\n\t\t\t}\r\n\t\t}\r\n\t\t\t\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\tGetMinutiaeData(0,SortingQuality[minI],&type,&x,&y,&angle,&quality1);\r\n\t\tfinger->Minutiae[minI].x = x;\r\n\t\tfinger->Minutiae[minI].y = y;\r\n\t\tfinger->Minutiae[minI].angle = angle*1.40625*M_PI/180;\r\n\t\tfinger->Minutiae[minI].type = type;\r\n\t\tfinger->Minutiae[minI].quality = quality1;\r\n\t\t#ifdef DEBUG\r\n\t\tprintf(\"%d\\t%d\\t%f\\t%d\\t%d\\n\", \r\n\t\t\tfinger->Minutiae[minI].x,\r\n\t\t\tfinger->Minutiae[minI].y,\r\n\t\t\tfinger->Minutiae[minI].angle,\r\n\t\t\tfinger->Minutiae[minI].type,\r\n\t\t\tfinger->Minutiae[minI].quality\r\n\t\t\t);\r\n\t\t#endif\r\n\t}\r\n\tfree(SortingQuality);\r\n\treturn ISO_SUCCESS;\r\n\t\r\n}\r\nISOBYTE SaveFingerprintText(unsigned char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighbourI;\r\n\r\n\tif ( path==NULL)\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp=fopen(path,\"w\"))==NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tfprintf(fp, \"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp, \"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbours\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\tfprintf(fp, \"%d\\t%d\\t%f\\t%d\\t%d\\t%d\\t%d\", \r\n\t\t\t\tfinger->Minutiae[minI].x, \r\n\t\t\t\tfinger->Minutiae[minI].y, \r\n\t\t\t\tfinger->Minutiae[minI].angle,\r\n\t\t\t\tfinger->Minutiae[minI].type,\r\n\t\t\t\tfinger->Minutiae[minI].quality,\r\n\t\t\t\tfinger->Minutiae[minI].ldr,\r\n\t\t\t\tfinger->Minutiae[minI].nNeighbours\r\n\t\t\t\t\t\t\t);\r\n\t\tfor (neighbourI=0;neighbourI<finger->Minutiae[minI].nNeighbours;neighbourI++)\r\n\t\t\tfprintf(fp, \"\\t%f\\t%f\\t%f\\t%d\", \r\n\t\t\t\tfinger->Minutiae[minI].neighbours[neighbourI].Ed, \r\n\t\t\t\tfinger->Minutiae[minI].neighbours[neighbourI].Dra,\r\n\t\t\t\tfinger->Minutiae[minI].neighbours[neighbourI].Oda,\r\n\t\t\t\tfinger->Minutiae[minI].neighbours[neighbourI].ridgeCount \r\n\t\t\t\t);\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nISOBYTE CleanFingerprint(Fingerprint* finger)\r\n{\r\n\tfree(finger->Minutiae);\r\n\tmemset(finger,0x00,sizeof(Fingerprint));\r\n\treturn ISO_SUCCESS;\r\n}\r\nfloat \tCalculateAngle( int x1, int y1, int x2, int y2, float distance )\r\n{\r\n float angle;\r\n\tangle = atan2( y2 - y1, x2 - x1 );\r\n\treturn\t(angle<0)?angle+2*M_PI:angle;\r\n}\r\n\r\nISOBYTE CalculateDistances(Fingerprint* finger, float* distances)\r\n{\r\n\tdistances = malloc(sizeof(float)*);\r\n\tif (distances == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(distances, 0, sizeof(float)*);\r\n\tfor ( i = 0; i < finger.nMinutiae; i++ )\r\n\t\tfor ( j = 0; j < ; j++ )\t\t\r\n\t\t\tif\t( i < j )\r\n\t\t\t\tdistances[i][j] = sqrt( pow( (float)( finger.Minutiae[i].x - finger.Minutiae[j].x), 2 ) + pow( (float)( finger.Minutiae[i].y - finger.Minutiae[j].y), 2 ) );\r\n\t\t\telse\r\n\t\t\t\tdistances[i][j] = distances[j][i];\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nISOBYTE FindDirectionalNeighbours(Fingerprint* finger, float* distances, ISOBYTE centerI)\r\n{\r\n\tfloat minDistance[algoPrama.nNeighborSector];\r\n\tISOBYTE minNeighborI[algoPrama.nNeighborSector];\r\n\tfloat* phi;\r\n\tphi = malloc(sizeof(float)*finger.nMininutae); /* angle between minutiae centerI & other minutiae */\r\n\tmemset(phi, 0, sizeof(float)*finger.nMininutae);\r\n\tnNeighbors = 0;\r\n\tnIterations = 0;\r\n\t\r\n\twhile ((nNeighbors<8)&&(nIterations<8)){\r\n\t\tnIterations++;\r\n\t\tmemset(minDistance,0,sizeof(float)*algoParam.nNeighborSector);\r\n\t\tmemset(minNeighborI,0,sizeof(ISOBYTE)**algoParam.nNeighborSector);\r\n\t\tfor (i=0;i<finger.nMinutiae;i++){\r\n\t\t\tif (\r\n\t\t\t\t(distances[centerI*finger.nNeighbors+i...] < algoParam.minNeighborDistance) || \r\n\t\t\t\t(distances[centerI*finger.nNeighbors+i...] > algoParam.maxNeighborDistance) ||\r\n\t\t\t\t(i==centerI)\r\n\t\t\t) continue; /* skip neighbors that are too far or too near the center minutia */\r\n\t\t\t/* skip neighbors that have been chosen */\r\n\t\t\tunsigned char stop = 0;\r\n\t\t\tfor (j = 0;j<maxNeighbor;j++) \r\n\t\t\t\tif (nbr_list[j]==i) {\r\n\t\t\t\t stop = 1;\r\n\t\t\t\t break;\r\n\t \t\t\t}\r\n\t\t\tif (stop) continue;\r\n\t\t\t\t\t\t\r\n\t\t\tif (phi[i]==0)\r\n\t\t\t\tphi[i] = CalculateAngle(finger,centerI,i);\r\n\t\t\tISOBYTE sectorI = floor(phi*4/M_PI);\r\n\t\t\tif \r\n\t\t}\r\n\t\t\t\r\n\t\tslotI = 0;\r\n\t\twhile (slotI<8){\r\n\t\t\t\r\n\t\t\t\tif (phi(i) in slotI) && \r\n\t\t\t\t\t(distances[centerI*finger.nNeighbours+i...] > algoParam.minNeighbourDistance) && \r\n\t\t\t\t\t(distances[centerI*finger.nNeighbours+i...] < algoParam.maxNeighbourDistance) && \r\n\t\t\t\t\t(distances[centerI*finger.nNeighbours+i...] < minDistance[phiI]))\r\n\t\t\t\t)\r\n\t\t}\t\r\n\t}\r\n\tfor (ISOBYTE i=0;i<finger.nMininutae;i++)\r\n\t{\r\n\t\tphi = CalculateAngle(finger, centerI, i);\r\n\t\tISOBYTE phiI = \r\n\t\tif (\r\n\t\t\r\n\t\t)\r\n\t\t\r\n\t}\r\n}" }, { "alpha_fraction": 0.5969325304031372, "alphanum_fraction": 0.6184049248695374, "avg_line_length": 31.183673858642578, "blob_id": "6a38d70a1ee93f2eba193c3385e2d582d19f3785", "content_id": "cd2ffd88b1c0da32ed5dbcd381be8222cc2e49e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 78, "num_lines": 49, "path": "/bkafis/bkafis/include/FvcHeader.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "\r\n/*\r\n// Copyright (C) 2009 DEIS - University of Bologna (Italy)\r\n// All rights reserved.\r\n//\r\n// FVC-onGoing sample source code.\r\n// http://biolab.csr.unibo.it/fvcongoing\r\n//\r\n// This source code can be used by FVC participants to create FVC executables.\r\n// It cannot be distributed and any other use is strictly prohibited.\r\n//\r\n// Warranties and Disclaimers:\r\n// THIS SOFTWARE IS PROVIDED \"AS IS\" WITHOUT WARRANTY OF ANY KIND\r\n// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,\r\n// FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.\r\n// IN NO EVENT WILL UNIVERSITY OF BOLOGNA BE LIABLE FOR ANY DIRECT,\r\n// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES,\r\n// INCLUDING DAMAGES FOR LOSS OF PROFITS, LOSS OR INACCURACY OF DATA,\r\n// INCURRED BY ANY PERSON FROM SUCH PERSON'S USAGE OF THIS SOFTWARE\r\n// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\r\n//\r\n*/\r\n/* ---------------------------------------------------------------\r\n FVC-onGoing Win32 testing header file\r\n\r\n v 1.0 - March 2009\r\n\r\n --------------------------------------------------------------- */\r\n\r\n#ifndef FVC_H\r\n#define FVC_H\r\n\r\n\r\n#define MAXPATH \t\t\t\t\t\t\t\t\t\t\t255\r\n#define MAXIMAGESIZE \t\t\t\t\t\t\t640*640\r\n\r\n#define SUCCESS \t\t\t\t\t\t\t\t\t\t\t\t0\r\n#define SYNTAX_ERROR \t\t\t\t\t\t\t\t\t\t1\r\n#define CANNOT_OPEN_OUTPUT_FILE \t\t\t\t2\r\n#define CANNOT_OPEN_IMAGE_FILE \t\t\t\t\t3\r\n#define CANNOT_OPEN_TEMPLATE_FILE \t\t\t4\r\n#define IMAGE_LOAD_ERROR\t\t\t\t\t\t\t\t5\r\n#define CANNOT_WRITE_TEMPLATE \t\t\t\t\t6\r\n#define CANNOT_UPDATE_OUTPUT_FILE \t\t\t7\r\n\r\n#define XXXX_INIT_ERROR \t\t\t\t\t\t\t100\r\n#define XXXX_SETUP_ERROR \t\t\t\t\t\t\t101\r\n\r\n\r\n#endif /* FVC_H */\r\n\r\n" }, { "alpha_fraction": 0.5848061442375183, "alphanum_fraction": 0.6026225686073303, "avg_line_length": 35.13917541503906, "blob_id": "c8cbbd86ec01242a601c53a1d46f6ccf8a9e1d32", "content_id": "ed9648a1df9c1b4ad38bed2c4e4652a74e71aacc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7016, "license_type": "no_license", "max_line_length": 80, "num_lines": 194, "path": "/bkafis/bkafis/src/bin/extract/extract.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*******************************************************************************\n\nLicense: \nThis software and/or related materials was developed at the National Institute\nof Standards and Technology (NIST) by employees of the Federal Government\nin the course of their official duties. Pursuant to title 17 Section 105\nof the United States Code, this software is not subject to copyright\nprotection and is in the public domain. \n\nThis software and/or related materials have been determined to be not subject\nto the EAR (see Part 734.3 of the EAR for exact details) because it is\na publicly available technology and software, and is freely distributed\nto any interested party with no licensing requirements. Therefore, it is \npermissible to distribute this software as a free download from the internet.\n\nDisclaimer: \nThis software and/or related materials was developed to promote biometric\nstandards and biometric technology testing for the Federal Government\nin accordance with the USA PATRIOT Act and the Enhanced Border Security\nand Visa Entry Reform Act. Specific hardware and software products identified\nin this software were used in order to perform the software development.\nIn no case does such identification imply recommendation or endorsement\nby the National Institute of Standards and Technology, nor does it imply that\nthe products and equipment identified are necessarily the best available\nfor the pursectore.\n\nThis software and/or related materials are provided \"AS-IS\" without warranty\nof any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\nNO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\nor FITNESS FOR A PARTICULAR PURsectorE or for any pursectore whatsoever, for the\nlicensed product, however used. In no event shall NIST be liable for any\ndamages and/or costs, including but not limited to incidental or consequential\ndamages of any kind, including economic damage or injury to property and lost\nprofits, regardless of whether NIST shall be advised, have reason to know,\nor in fact shall know of the sectorsibility.\n\nBy using this software, you agree to bear all risk relating to quality,\nuse and performance of the software and/or related materials. You agree\nto hold the Government harmless from any claim arising from your use\nof the software.\n\n*******************************************************************************/\n\n/***********************************************************************\n PACKAGE: NIST Fingerprint Minutiae Detection\n\n FILE: MINDTCT.C\n\n AUTHOR: Michael D. Garris\n DATE: 04/18/2002\n UPDATED: 09/14/2004\n UPDATED: 05/09/2005 by MDG\n UPDATED: 01/31/2008 by Kenneth Ko\n UPDATED: 09/04/2008 by Kenneth Ko\n UPDATED: 09/30/2008 by Kenenth Ko - add version option.\n\n#cat: mindtct - Uses Version 2 of the NIST Latent Fingerprint System (LFS)\n#cat: to detect minutiae and count ridges in a grayscale image.\n#cat: This version of the program will process:\n#cat: ANSI/NIST, WSQ, JPEGB, JPEGL, and IHead image formats.\n#cat: Results are written to various output files with\n#cat: predefined extensions appeneded to a specified output\n#cat: root path.\n\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\n\t- Detecting LDR (from line 318)\n\n***********************************************************************/\n\n#include <fingerprint.h>\n#include <extract.h>\n\nvoid procargs(int, char **, int *, int *, char **, char **);\n\n/*************************************************************************\n**************************************************************************/\nint main(int argc, char *argv[])\n{\n\tint boostflag, m1flag;\n\tchar *ifile, *oroot, ofile[MAXPATHLEN];\n\t\n\n\t/* Process command line arguments. */\n\tprocargs(argc, argv, &boostflag, &m1flag, &ifile, &oroot);\n\n\tFingerprint finger;\n\tfinger.nMinutiae=0;\n\tif (debug) printf(\"Extract minutiae from %s\\n\",ifile);\n\textract(boostflag, m1flag,&finger);\n\t\t\n\t/*sprintf(ofile, \"%s.txt\", oroot);*/\n\tsprintf(ofile, \"%s.txt\", oroot);\n\tif (debug) printf(\"Save minutiae to %s\\n\",ofile);\n\tSaveFingerprintText(ofile, &finger); \n\tsprintf(ofile, \"%s.ist\", oroot);\n\tConvertFingerprintISO2005(&finger);\n\tSaveISOTemplate(ofile);\n\tCleanISOTemplate(); \n\t/* printf(\"Load resulted iso file to test\\n\");\n\tLoadISOTemplate(ofile);\n\tFingerprint finger1;\n\tprintf(\"Convert into fingerprint struct to test\\n\");\n\tConvertISO2005Fingerprint(&finger1);\n\tsprintf(ofile, \"%s.ist.txt\", oroot);\n\tif (debug) printf(\"Save minutiae to %s\\n\",ofile);\n\tSaveFingerprintText(ofile, &finger1); */\n\t/* CleanFingerprint(&finger1);*/\n\t\n\t\n\t/*FingerprintMoC fingerMoC;\n\tFingerprint2MoC(&finger, &fingerMoC);\n\tsprintf(ofile, \"%s.MoC.txt\", oroot);\n\tSaveFingerprintMoCText(ofile, &fingerMoC); \n\t*/\n\tCleanFingerprint(&finger);\n\n\t/* Done with minutiae and binary image results */\n\t\n\t\n\t/*free(tmp_dist);\n\tfree(tmp_index);\n\t*/\n\t/* Exit normally. */\n\texit(0);\n}\n\n/*************************************************************************\n**************************************************************************\n PROCARGS - Process command line arguments\n Input:\n argc - system provided number of arguments on the command line\n argv - system provided list of command line argument strings\n Output:\n boostflag - contrast boost flag \"-b\"\n ifile - input image file name to be processed by this program\n ifile - output image file name to be created by this program\n**************************************************************************/\nvoid procargs(int argc, char **argv, int *boostflag, int *m1flag,\n char **ifile, char **oroot)\n{\n int a;\n\n *boostflag = FALSE;\n *m1flag = FALSE;\n\n if ((argc == 2) && (strcmp(argv[1], \"-version\") == 0)) {\n getVersion();\n exit(0);\n }\n\n if(argc == 3){\n *ifile = argv[1];\n *oroot = argv[2];\n return;\n }\n\n if((argc == 4) || (argc == 5)){\n a = 1;\n while(a < argc-2){\n if(strcmp(argv[a], \"-b\") == 0){\n *boostflag = TRUE;\n }\n else if(strcmp(argv[a], \"-m1\") == 0){\n *m1flag = TRUE;\n }\n else{\n fprintf(stderr, \"Unrecognized flag \\\"%s\\\"\\n\", argv[a]);\n fprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\n fprintf(stderr,\n \" -b = contrast boost image\\n\");\n fprintf(stderr,\n \" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\n exit(1);\n }\n a++;\n }\n }\n else{\n fprintf(stderr, \"Invalid number of arguments on command line\\n\");\n fprintf(stderr,\n \"Usage : %s [-b] [-m1] <finger_img_in> <oroot>\\n\",\n argv[0]);\n fprintf(stderr,\n \" -b = contrast boost image\\n\");\n fprintf(stderr,\n \" -m1 = output \\\"*.xyt\\\" according to ANSI INCITS 378-2004\\n\");\n exit(2);\n }\n \n *ifile = argv[a++];\n *oroot = argv[a];\n}\n\n\n\n\n\n" }, { "alpha_fraction": 0.5460069179534912, "alphanum_fraction": 0.6137152910232544, "avg_line_length": 23.489360809326172, "blob_id": "9a25a6cdd2ba74bb5820baaf623196b2e76a85a4", "content_id": "fe2cab67c9c5af06862eef3991fd907a7a79b75a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1152, "license_type": "no_license", "max_line_length": 79, "num_lines": 47, "path": "/Scan Image with Sensor/main.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "// spi.c\n//\n// Example program for bcm2835 library\n// Shows how to interface with SPI to transfer a byte to and from an SPI device\n//\n// After installing bcm2835, you can build this \n// with something like:\n// gcc -o spi spi.c -l bcm2835\n// sudo ./spi\n//\n// Or you can test it before installing with:\n// gcc -o spi -I ../../src ../../src/bcm2835.c spi.c\n// sudo ./spi\n//\n// Author: Mike McCauley\n// Copyright (C) 2012 Mike McCauley\n// $Id: RF22.h,v 1.21 2012/05/30 01:51:25 mikem Exp $\n\n#include <bcm2835.h>\n#include <stdio.h>\n#include \"User.h\"\n#include <wiringPi.h>\nint main()\n{\n BSP_Config_HW (); \n uint8_t data1[1],i=0;\n uint16_t j=0; \n uint8_t err=1, error=1;\n uint8_t data_seri[12];\n NEXT_MODULE_t* Module;\n unlink(\"image.txt\");\n unlink(\"image.JPG\");\n while(1)\n {\n\terror = NEXT_Module_FingerPresent(Module, data1,0x01);\n printf(\"Dat ngon tay vao cam bien\\n\");\n BSP_Delay_ms(1000);\n\tif (data1[0] < 50) continue;\n error = NEXT_Module_ScanImage(Module);\n printf(\"Da quet dau van tay thanh cong\\n\");\n\tBSP_Module_Reset_Configure();\n\tbreak;\n }\n bcm2835_spi_end();\n bcm2835_close();\n return 0;\n}\n\n" }, { "alpha_fraction": 0.6098592877388, "alphanum_fraction": 0.6285997033119202, "avg_line_length": 23.413408279418945, "blob_id": "9536252702dfb166b684f08779fca0cf21eecb34", "content_id": "d6d1418c119f33c3a26308a7c629b2167a3f47a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 18196, "license_type": "no_license", "max_line_length": 150, "num_lines": 716, "path": "/Scan Image with Sensor/User.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*\r\n* $HeadURL: svn://192.168.1.115/M3_Module_Host/trunk/App/NEXT_User_Lib.c $\r\n*\r\n* Created on: Nov 18, 2013\r\n* Author: Ryan Higgins\r\n*\r\n* Last Modified: $LastChangedDate: 2014-05-08 15:05:27 -0700 (Thu, 08 May 2014) $\r\n* Last Modified by: $LastChangedBy: Ryan $\r\n* LastChangedRevision : $LastChangedRevision: 170 $\r\n*\r\n* This software is provided \"as is\". NEXT Biometrics makes no warranty of any kind, either\r\n* express or implied, including without limitation any implied warranties of condition, uninterrupted\r\n* use, merchantability, or fitness for a particular purpose.\r\n*\r\n* This document as well as the information or material contained is copyrighted.\r\n* Any use not explicitly permitted by copyright law requires prior consent of NEXT Biometrics.\r\n* This applies to any reproduction, revision, translation and storage.\r\n*\r\n*\tDescription:\r\n*\r\n*\t\tFile Contents:\r\n*\t\tThe Embedded SDK API\r\n*\t\tThe M0 Protocol Communication Functions\r\n*\t\tHelper Functions\r\n*/\r\n\r\n#include \"User.h\"\r\n#include <stdio.h>\r\n\r\n#ifndef USE_MST_AS_GPIO\r\nextern NEXT_MODULE_t* p_My_Module; //Default configuration is to use ISR, GPIO usage is optional\r\n#endif\r\n\r\n//////////////////////////////////////////BEGIN Sensor Library Functions/////////////////////////////////////////////////////\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_Connect(NEXT_MODULE_t* module)\r\n *\r\n * Precondition:\r\n * + Module host has applied power to Vbat and 3.3Vdd.\r\n * + Power rails have had time to stabilize.\r\n *\r\n * If the module is disconnected (nRST low), the module is connected.\r\n * If the module is in stop mode (nRST high), the module is re-connected.\r\n * The function delays in ms for the MCU boot time, MCU_BOOT_DELAY.\r\n * It then issues an SPI Connection Diagnostic\r\n *\r\n * Postcondition:\r\n * + Module host can issue any command immediately.\r\n *\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_Connect(NEXT_MODULE_t* module) {\r\n\r\n\tNEXT_Module_Disconnect(module); //If module is in stop mode (nRST high) we re-connect.\r\n\r\n\tBSP_Module_nRST_High();\r\n\r\n\tBSP_Delay_ms(MCU_BOOT_DELAY);\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n\twhile (BSP_Get_Moudle_Status_Pin() == 0);\r\n#else\r\n\twhile (module->stopped);\r\n#endif\r\n\r\n\tuint8_t error = NEXT_Connect_Diagnostic(module);\r\n\r\n\tif(error) {\r\n\t\tNEXT_Module_Disconnect(module);\r\n\t\treturn error;\r\n\t}\r\n\r\n\tmodule->connected = true;\r\n\tmodule->stopped = false;\r\n\r\n\treturn error;\r\n\r\n}\r\n\r\n/*\r\n * void NEXT_Module_Disconnect(NEXT_MODULE_t* module)\r\n *\r\n * This function calls the BSP function to set the nRST pin low, holding the module in reset.\r\n * The value of the connection status is updated to reflect that the module is not connected.\r\n * The stopped flag is cleared as stopped state is only defined when a module is connected.\r\n *\r\n *\r\n */\r\n\r\nvoid NEXT_Module_Disconnect(NEXT_MODULE_t* module)\r\n{\r\n\r\n\tuint8_t i;\r\n\tBSP_Module_nRST_Low();\r\n/*\r\n\tmodule->connected = false;\r\n\tmodule->stopped = false; //Stopped state is defined only when a module is connected.\r\n\r\n\r\n\tfor(i=0;i<12;i++){\r\n\t\tmodule->serial_no[i] = 0;\r\n\t}\r\n\tfor(i=0;i<3;i++){\r\n\t\tmodule->firmware_ver[i] = 0;\r\n\t}\r\n*/\r\n}\r\n\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_ScanImage(NEXT_MODULE_t * module)\r\n *\r\n * Checks the stop mode of the module.\r\n * \t-If the module is in stop mode, an API error is returned to indicate that the module was previously placed in stop mode.\r\n *\r\n * Issues a series of INS_GET_LINE M0 Protocol commands to build a full image.\r\n *\r\n * Returns: NO_ERROR, Command Stage Error, or Any of the Response Errors\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_ScanImage(NEXT_MODULE_t * module)\r\n{\r\n\r\n\tuint16_t col = 0;\r\n\tuint8_t row = 0;\r\n\tuint8_t Row_Counter = 0;\r\n\tuint8_t error;\r\n\tFILE *file;\r\n\tfile = fopen(\"image.txt\",\"w\");\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\tuint8_t Row_Data[NUMBER_OF_COLUMNS];\r\n\r\n\tfor (Row_Counter = 0; Row_Counter < NUMBER_OF_ROWS; Row_Counter++)\r\n\t{ //Intermediate Get_Line\r\n\r\n\t\tswitch (Row_Counter)\r\n\t\t{\r\n\t\tcase 0:\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_LINE, P1_FIRST_LINE, Row_Counter,\r\n\t\t\t\t\t0x00); //First Get_Line\r\n\t\t\tBSP_Delay_ms(SCAN_IMAGE_INITIAL_DELAY);\r\n\t\t\tbreak;\r\n\t\tcase (NUMBER_OF_ROWS-1):\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_LINE, P1_LAST_LINE, Row_Counter,\r\n\t\t\t\t\t0x00); //Last Get_Line\r\n\t\t\tbreak;\r\n\t\tdefault:\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_LINE, P1_INTERMEDIATE_LINE, Row_Counter,\r\n\t\t\t\t\t0x00); //Intermediate Get_Line\r\n\t\t\tbreak;\r\n\t\t}\r\n\r\n\t\tif (!error)\r\n\t\t{\r\n\t\t\tBSP_Delay_us(SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\terror = NEXT_Module_SPI_Response(Row_Data, NUMBER_OF_COLUMNS, SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\tif (!error)\r\n\t\t\t{\r\n\t\t\t\t//Populate image structure with buffer data\r\n\t\t\t\tfor (col = 0; col < NUMBER_OF_COLUMNS; col++)\r\n\t\t\t\t{\r\n\t\t\t\t\tfprintf(file,\"%d\\t\",Row_Data[col]);\r\n\t\t\t\t\t//data[row][col]=Row_Data[col];\r\n\t\t\t\t\t//module->image->Row[row].Column[col]=Row_Data[col];\r\n\t\t\t\t\t//printf(\"%d\\t\",data[row][col]); \r\n\t\t\t\t}\r\n\t\t\t\tif(row<180)\r\n\t\t\t\t{\r\n\t\t\t\tfprintf(file,\"\\n\");\r\n\t\t\t\t}\r\n\t\t\t\trow++;\r\n\r\n\t\t\t\tBSP_Delay_us(SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\treturn error;\r\n\t\t\t}\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\treturn error;\r\n\t\t}\r\n\t}\r\n\treturn NO_ERROR;\r\n\r\n}\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_PartialImage(NEXT_MODULE_t * module)\r\n *\r\n * Checks the stop mode of the module.\r\n * \t-If the module is in stop mode, an API error is returned to indicate that the module was previously placed in stop mode.\r\n *\r\n * Issues a series of INS_GET_PARTIAL M0 Protocol commands to build a partial image.\r\n *\r\n * Returns: NO_ERROR, Command Stage Error, or Any of the Response Errors\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_PartialImage(NEXT_MODULE_t * module)\r\n{\r\n\r\n\tuint8_t col = PARTIAL_START_COLUMN;\r\n\tuint8_t row = PARTIAL_START_ROW;\r\n\tuint8_t Row_Counter = 0;\r\n\tuint8_t error;\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\treturn ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\treturn ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\tuint8_t Partial_Data[PARTIAL_NUMBER_OF_COLUMNS];\r\n\r\n\tfor (Row_Counter = 0; Row_Counter < PARTIAL_NUMBER_OF_ROWS; Row_Counter++)\r\n\t{ //Intermediate Get_Line\r\n\r\n\t\tswitch (Row_Counter)\r\n\t\t{\r\n\t\tcase 0:\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_PARTIAL, P1_FIRST_LINE,\r\n\t\t\t\t\tRow_Counter, 0x00); //First Get_Line\r\n\t\t\tBSP_Delay_ms(PARTIAL_IMAGE_INITIAL_DELAY);\r\n\t\t\tbreak;\r\n\t\tcase (PARTIAL_NUMBER_OF_ROWS-1):\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_PARTIAL, P1_LAST_LINE,\r\n\t\t\t\t\tRow_Counter, 0x00); //Last Get_Line\r\n\t\t\tbreak;\r\n\t\tdefault:\r\n\t\t\terror = NEXT_Module_SPI_Command(INS_GET_PARTIAL, P1_INTERMEDIATE_LINE,\r\n\t\t\t\t\tRow_Counter, 0x00); //Intermediate Get_Line\r\n\t\t\tbreak;\r\n\t\t}\r\n\r\n\t\tif (!error)\r\n\t\t{\r\n\t\t\tBSP_Delay_us(SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\terror = NEXT_Module_SPI_Response(Partial_Data, PARTIAL_NUMBER_OF_COLUMNS, SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\tif (!error)\r\n\t\t\t{\r\n\r\n\t\t\t\t//Populate image structure with buffer data\r\n\t\t\t\tfor (col = PARTIAL_START_COLUMN; col < PARTIAL_END_COLUMN; col++)\r\n\t\t\t\t{\r\n\t\t\t\t\tmodule->image.Row[row].Column[col] = Partial_Data[col - PARTIAL_START_COLUMN];\r\n\t\t\t\t}\r\n\t\t\t\trow++;\r\n\r\n\t\t\t\tBSP_Delay_us(SCAN_OR_PARTIAL_IMAGE_US_DELAY);\r\n\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\treturn error;\r\n\t\t\t}\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\treturn error;\r\n\t\t}\r\n\t}\r\n\r\n\treturn error;\r\n\r\n}\r\n\r\n\r\n\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_FingerPresent(NEXT_MODULE_t* module, uint8_t* data, bool sense_finger)\r\n *\r\n * Checks the stop mode of the module.\r\n * \t-If the module is in stop mode, an API error is returned to indicate that the module was previously placed in stop mode.\r\n *\r\n * Issues the INS_FINGER_PRESENT Module protocol command.\r\n * The sense_finger parameter is used to either re-set the finger on baseline (sense_finger = 0) or sense finger presence (sense_finger = 1).\r\n * When sense_finger = 1 the module will internally check for a faulty baseline (e.g. baseline set when finger is on sensor).\r\n * In the case that a faulty baseline has been set, the baseline is re-set when sense_finger = 1 and no finger is present.\r\n *\r\n * Returns: NO_ERROR, ERR_API_MODULE_STOPPED, Command Stage Error, or Any of the Response Errors\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_FingerPresent(NEXT_MODULE_t* module, uint8_t* data, bool sense_finger) // tra ve gia tri khi \r\n{\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\tuint8_t error = NEXT_Module_SPI_Command(INS_FINGER_PRESENT, sense_finger, 0x00, 0x00);\r\n\tBSP_Delay_ms(10);\r\n\r\n\tif (!error)\r\n\t{\r\n\r\n\t\terror = NEXT_Module_SPI_Response(data, 1, 1000);\r\n\t\tBSP_Delay_ms(1);\r\n\r\n\t}\r\n\r\n\treturn error;\r\n\r\n}\r\n\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_Serial(NEXT_MODULE_t * module)\r\n *\r\n * Checks the stop mode of the module.\r\n * \t-If the module is in stop mode, an API error is returned to indicate that the module was previously placed in stop mode.\r\n *\r\n * Issues the INS_GET_SERIAL Module protocol command.\r\n * Writes the module serial number to the module structure.\r\n *\r\n * Returns: NO_ERROR, Command Stage Error, or Any of the Response Errors\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_Serial(NEXT_MODULE_t * module){\r\nuint8_t data_seri[12];\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\tuint8_t error = NEXT_Module_SPI_Command(INS_GET_SERIAL,0x00,0x00,0x00);\r\n\tBSP_Delay_ms(1);\r\n\r\n\tif(!error){\r\n\r\n\t\terror = NEXT_Module_SPI_Response(data_seri,12,1000);\r\n\t\tBSP_Delay_ms(1);\r\n\t}\r\n\r\n\treturn error;\r\n}\r\n\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_FW_Version(NEXT_MODULE_t * module)\r\n *\r\n * Checks the stop mode of the module.\r\n * \t-If the module is in stop mode, an API error is returned to indicate that the module was previously placed in stop mode.\r\n *\r\n * Issues the INS_GET_FIRMWARE version Module protocol command.\r\n * Writes the firmware version to the module structure.\r\n *\r\n * Returns: NO_ERROR, Command Stage Error, or Any of the Response Errors\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_FW_Version(NEXT_MODULE_t * module){\r\nuint8_t data_FW_Version[3];\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\tuint8_t error = NEXT_Module_SPI_Command(INS_GET_FIMRWARE_VER,0x00,0x00,0x00);\r\n\tBSP_Delay_ms(1);\r\n\r\n\tif(!error){\r\n\r\n\t\terror = NEXT_Module_SPI_Response(data_FW_Version,3,1000);\r\n\t\tBSP_Delay_ms(1);\r\n\t}\r\n\r\n\treturn error;\r\n}\r\n\r\n//////////////////////////////////////END Sensor Library Functions/////////////////////////////////////////////////////\r\n\r\n\r\n\r\n//////////////////////////////////////////BEGIN Module Stop Mode Functions/////////////////////////////////////////////////////\r\n\r\n/*\r\n * uint8_t NEXT_Module_Stop (NEXT_MODULE_t* module, uint8_t number_of_finger_detects)\r\n *\r\n * * Checks the stop mode of the module.\r\n * \t-If the module is in stop mode, an API error is returned to indicate that the module was previously placed in stop mode.\r\n *\r\n * Issues the INS_SET_STOP_MODE module protocol command to place the module in Stop mode.\r\n * If number_of_finger_detects is 0, the module will continually check for finger presence.\r\n * If number_of_finger_detects is n, the module will check for finger presence n-1 times.\r\n * Consequently if number_of_finger_detects is 1, the module enters stop mode and waits for a reset.\r\n *\r\n * Returns: NO_ERROR, Command Stage Error, or Any of the Response Errors\r\n *\r\n */\r\n\r\n\r\nuint8_t NEXT_Module_Stop (NEXT_MODULE_t* module, uint8_t number_of_finger_detects){\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\t//return ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\tuint8_t error = NEXT_Module_SPI_Command(INS_SET_MO_STOP,number_of_finger_detects,0x00,0x00);\r\n\tBSP_Delay_ms(1);\r\n\r\n\tif(!error){\r\n\r\n\t\terror = NEXT_Module_SPI_Response(NULL,0,1000);\r\n\t\tBSP_Delay_ms(1);\r\n\r\n\t}\r\n\r\n\treturn error;\r\n}\r\n\r\n\r\n//////////////////////////////////////////END Module Stop Mode Functions/////////////////////////////////////////////////////\r\n\r\n\r\n//////////////////////////////////////////BEGIN Module User Manual (Module Protocol) Functions///////////////////////////////\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_SPI_Command(uint8_t INS, uint8_t P1, uint8_t P2, uint8_t LEN)\r\n *\r\n * Perform the command stage of the Module Protocol\r\n * Refer to the Module User Manual for detailed description.\r\n *\r\n * Returns: NO_ERROR or command stage error\r\n *\r\n */\r\n\r\nuint8_t NEXT_Module_SPI_Command(uint8_t INS, uint8_t P1, uint8_t P2, uint8_t LEN){\r\n\r\n\tuint8_t SPI_Tx[4] = {INS,P1,P2,LEN};\r\n\tuint8_t SPI_Rx[4];\r\n\tuint8_t i;\r\n\r\n\tBSP_SPI_ReadWriteBuffer(SPI_Tx,SPI_Rx,4);\r\n\r\n\tfor(i=0;i<4;i++){\r\n\t\tif(SPI_Rx[i] != 0xCC){\r\n\t\t\treturn ERR_API_SPI_CMD_STAGE; //Module should respond with 0xCC to indicate it is in the Command Stage\r\n\t\t}\r\n\t}\r\n\r\n\treturn NO_ERROR;\r\n\r\n}\r\n\r\n/*\r\n * uint8_t NEXT_Module_SPI_Data(uint8_t* data, uint8_t data_len)\r\n *\r\n * Perform the data stage of the Module Protocol\r\n * Refer to the Module User Manual for detailed description.\r\n *\r\n * Returns: NO_ERROR or ERR_API_SPI_DATA_STAGE\r\n *\r\n */\r\n\r\n\r\nuint8_t NEXT_Module_SPI_Data(uint8_t* data, uint8_t data_len){\r\n\r\n\tuint8_t SPI_Rx[255];\r\n\tuint8_t i;\r\n\r\n\tBSP_SPI_ReadWriteBuffer(data,SPI_Rx,data_len);\r\n\r\n\tfor(i=0;i<data_len;i++){\r\n\t\tif(SPI_Rx[i] != 0xAA){\r\n\t\t\treturn ERR_API_SPI_DATA_STAGE; //Module should respond with 0xAA to indicate it is in the Data Stage\r\n\t\t}\r\n\t}\r\n\treturn NO_ERROR;\r\n}\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Module_SPI_Response(void* RES_data, uint16_t Length, uint16_t us_interval )\r\n *\r\n * Perform the query and response stage of the Module Protocol\r\n * The us_interval parameter is the delay in microseconds after a busy status has been received from the module.\r\n * Refer to the Module User Manual for detailed description.\r\n *\r\n * Returns: NO_ERROR, Module Errors, ERR_API_SPI_RES_STAGE\r\n *\r\n */\r\n\r\n\r\nuint8_t NEXT_Module_SPI_Response(void* RES_data, uint16_t Length, uint16_t us_interval ){\r\n\tuint8_t Status = BUSY;\r\n\tuint8_t Check = DUMMY_DATA;\r\n\tuint8_t Tx[Length];\r\n\tuint16_t i;\r\n\tuint32_t Busy_Count = 0;\r\n\r\n\tfor(i=0;i<Length;i++){\r\n\t\tTx[i]=i; //Any dummy data will work, this is convenient for indexing response data\r\n\t}\r\n\r\n\tBSP_SPI_ReadWriteBuffer(&Check,&Status,1);\r\n\r\n\twhile(Status == BUSY && Busy_Count < MAX_BUSY_COUNT){ //Poll the module. When the module is no longer busy (Status != BUSY) it will exit this loop.\r\n\t\tBSP_Delay_us(us_interval);\r\n\t\tBSP_SPI_ReadWriteBuffer(&Check,&Status,1);\r\n\t\tBusy_Count++;\r\n\t}\r\n\r\n\r\n\tif(Status == READY){ \t//Module has responded with a Ready\r\n\t\tif(Length){\r\n\t\t\tBSP_SPI_ReadWriteBuffer(Tx,(uint8_t*)RES_data,Length);\r\n\t\t}\r\n\t\treturn NO_ERROR;\r\n\t}else{\t\t\t\t\t//Module has responed with an error\r\n\t\tswitch (Status){ //Defined module error codes\r\n\t\tcase BUSY:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_COM:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_DATA_FIELD:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_DATA_LENGTH:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_DCA:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_MCU:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_OP_MODE:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_PARAM_FIELD:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_SENSOR_FAIL:\r\n\t\t\t//fall through\r\n\t\tcase ERR_MOD_UNKNOWN_COMMAND:\r\n\t\t\treturn Status; //Return the module error code\r\n\t\t\tbreak;\r\n\t\tdefault:\r\n\t\t\treturn ERR_API_SPI_RES_STAGE; //Return general error in response stage\r\n\t\t\tbreak;\r\n\t\t}\r\n\r\n\t}\r\n\r\n}\r\n\r\n\r\n\r\n\r\n/*\r\n * uint8_t NEXT_Connect_Diagnostic(NEXT_MODULE_t* module)\r\n *\r\n ** Issues the INS_DIAGNOSTIC module protocol command with the P1_CONNECT_DAIGNOSTIC parameter.\r\n *\r\n * Perform a connection diagnostic to verify that a module has power and is communicating over SPI.\r\n * This function checks that the module accepts and returns 8 bytes of 0x55 data.\r\n * Refer to the Module User Manual for detailed description.\r\n *\r\n * Returns NO_ERROR, Command, Data, or Response stage errors, or the Module Connect error which is based of the returned SPI data.\r\n */\r\n\r\nuint8_t NEXT_Connect_Diagnostic(NEXT_MODULE_t* module){\r\n\r\n\tuint8_t i;\r\n\tuint8_t Connect_Diagnostic_String[CONNECT_DIAGNOSTIC_STRING_SIZE] = {0x55,0x55,0x55,0x55,0x55,0x55,0x55,0x55};\r\n\tuint8_t Response[CONNECT_DIAGNOSTIC_RESPONSE_SIZE];\r\n\tuint8_t error;\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n\tif(BSP_Get_Moudle_Status_Pin() == 0)\r\n\t{\r\n\t\treturn ERR_API_MODULE_STOPPED;\r\n\t}\r\n#else\r\n\tif(module->stopped)\r\n\t{\r\n\t\treturn ERR_API_MODULE_STOPPED;\r\n\t}\r\n#endif\r\n\r\n\terror = NEXT_Module_SPI_Command(INS_DIAGNOSTIC,P1_CONNECT_DAIGNOSTIC,0x00,CONNECT_DIAGNOSTIC_STRING_SIZE);\r\n\tBSP_Delay_ms(1);\r\n\r\n\tif(!error){\r\n\r\n\t\terror = NEXT_Module_SPI_Data(Connect_Diagnostic_String,CONNECT_DIAGNOSTIC_STRING_SIZE);\r\n\t\tBSP_Delay_ms(1);\r\n\r\n\t\tif(!error){\r\n\r\n\t\t\terror = NEXT_Module_SPI_Response(Response,CONNECT_DIAGNOSTIC_RESPONSE_SIZE,1000);\r\n\t\t\tBSP_Delay_ms(1);\r\n\r\n\t\t\tif(!error){\r\n\r\n\t\t\t\tfor(i=0;i<CONNECT_DIAGNOSTIC_STRING_SIZE;i++){\r\n\t\t\t\t\tif(Response[i] != 0x55){\r\n\t\t\t\t\t\terror = ERR_API_MODULE_CONNECT;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\r\n\t\t\t}\r\n\r\n\t\t}\r\n\r\n\t}\r\n\r\n\treturn error;\r\n}\r\n\r\n\r\n//////////////////////////////////////////END Module User Manual (Module Protocol) Functions///////////////////////////////\r\n\r\n//////////////////////////////////////////BEGIN Miscellaneous Functions///////////////////////////////\r\n\r\n\r\n#ifndef USE_MST_AS_GPIO\r\n///////////////////////////////////////////////////////////////////////////////\r\n//\r\n// void EXTI9_5_IRQHandler(void)\r\n//\r\n//\t\t@brief This function handles EXTI9 interrupt request.\r\n//\t\t@param None\r\n//\t\t@retval None\r\n//\r\n///////////////////////////////////////////////////////////////////////////////\r\nvoid MSP_interupt(void)\r\n{\r\n NEXT_MODULE_t* p_My_Module;\r\n p_My_Module->stopped = false;\r\n}\r\n#endif\r\n\r\n/*\r\n * uint8_t NEXT_Flush_Image(NEXT_SENSOR_IMAGE_t* image)\r\n *\r\n * Set an image to 0x00 (black)\r\n *\r\n */\r\n\r\n\r\nuint8_t NEXT_Flush_Image(NEXT_SENSOR_IMAGE_t* image){\r\n\tuint16_t j;\r\n\tuint8_t i;\r\n\r\n\tfor(j=0;j<256;j++){\r\n\t\tfor(i=0;i<180;i++){\r\n\t\t\timage->Row[i].Column[j] = 0;\r\n\t\t}\r\n\t}\r\n\treturn NO_ERROR;\r\n\r\n}\r\n\r\n//////////////////////////////////////////END Miscellaneous Functions///////////////////////////////\r\n" }, { "alpha_fraction": 0.5817186236381531, "alphanum_fraction": 0.5956192016601562, "avg_line_length": 28.05063247680664, "blob_id": "e8da924b08a17aa93da108a7294ab767be92b0e8", "content_id": "bea224bee45aa6642a11c80108c95e75bc7062ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2374, "license_type": "no_license", "max_line_length": 83, "num_lines": 79, "path": "/bkafis/bkafis/src/lib/bkafis/pairarray.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tpairarray.c\r\n\tDescription: Data structure to present array of indices for sparce matrix\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n#include <pairarray.h>\r\n\r\n/*\t\t\tArray of pairs of indices used to store pairs of mapped minutiae\t\t*/\r\n/* typedef struct \r\n{\r\n unsigned char index1;\t\r\n unsigned char index2;\t\r\n}\tPair;\r\n\r\ntypedef struct {\r\n\tPair** list;\r\n\r\n\tunsigned int nPairs;\r\n\tunsigned int maxPairs;\r\n} PairArray; */\r\n/**********************************************************************\r\n\tInitialize the array of pairs. First, the array is allocate with maxPairs elements\r\n\tInput:\r\n\t\t\tpairArray: pointer to the structure PairArray\r\n\t\t\tmaxPairs: maximum number of elements in the array\r\n\t\t\t\r\n\tOutput:\r\n\t\t\tthe PairArray->list will be updated by allocated array\r\n\tUsage:\r\n\t\t\tPairArray pairArray;\r\n\t\t\tInitPairArray(&pairArray,200,1): Init a pair array with maximum number 200, \r\n *********************************************************************/ \r\nchar InitPairArray(PairArray* pairArray, unsigned int maxPairs)\r\n{\r\n\tif (pairArray->list) return 0;\r\n\tPair** newlist = malloc(sizeof(Pair*)*maxPairs);\r\n\tif (newlist==NULL)\r\n\t\treturn -1; /* can not allocate memory */\r\n\tpairArray->list = newlist;\r\n\tpairArray->nPairs = 0;\r\n\tpairArray->maxPairs = maxPairs;\r\n\treturn 0;\r\n}\r\nchar AddPairArray(PairArray* pairArray,unsigned char index1, unsigned char index2)\r\n{\r\n\t\r\n\tif (pairArray->nPairs == pairArray->maxPairs){\r\n\t\t/* the list array reach the maximum number of elements */\r\n\t\t/* reallocate it */\r\n\t\tpairArray->maxPairs = pairArray->maxPairs+10;\r\n\t\tpairArray->list = realloc(pairArray->list,sizeof(Pair*)*(pairArray->maxPairs));\r\n\t\tif (pairArray->list==NULL)\r\n\t\t\treturn -1;\r\n\t}\r\n\tPair* newpair = malloc(sizeof(Pair));\r\n\tif (newpair==NULL)\r\n\t\treturn -1;\r\n\tnewpair->index1 = index1;\r\n\tnewpair->index2 = index2;\r\n\tnewpair->data = NULL;\r\n\tpairArray->list[pairArray->nPairs++]=newpair;\r\n\treturn 0;\r\n}\r\nchar CleanPairArray(PairArray* pairArray)\r\n{\r\n\tunsigned int i;\r\n\tif (pairArray->list==NULL) return 0;\r\n\tfor(i=0;i<pairArray->nPairs;i++){\r\n\t\tif (pairArray->list[i]){\r\n\t\t\tif (pairArray->list[i]->data)\r\n\t\t\t\tfree(pairArray->list[i]->data);\r\n\t\t\tfree(pairArray->list[i]);\r\n\t\t}\r\n\t\t\t\r\n\t\tfree(pairArray->list);\r\n\t}\r\n\treturn 0;\r\n}\r\n" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.6580029726028442, "avg_line_length": 35.112831115722656, "blob_id": "558b734b91c344d18549a3996ef66e0c2d750a84", "content_id": "81f9055d5782aa11cd7c164dec500ab74b39524f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 16837, "license_type": "no_license", "max_line_length": 162, "num_lines": 452, "path": "/bkafis/bkafis/src/bin/matchMoC/matchMoC.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*\r\n// Copyright (C) 2009 DEIS - University of Bologna (Italy)\r\n// All rights reserved.\r\n\r\n//\r\n// FVC-onGoing sample source code.\r\n// http://biolab.csr.unibo.it/fvcongoing\r\n//\r\n// This source code can be used by FVC participants to create FVC executables.\r\n// It cannot be distributed and any other use is strictly prohibited.\r\n//\r\n// Warranties and Disclaimers:\r\n// THIS SOFTWARE IS PROVIDED \"AS IS\" WITHOUT WARRANTY OF ANY KIND\r\n// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,\r\n// FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.\r\n// IN NO EVENT WILL UNIVERSITY OF BOLOGNA BE LIABLE FOR ANY DIRECT,\r\n// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES,\r\n// INCLUDING DAMAGES FOR LOSS OF PROFITS, LOSS OR INACCURACY OF DATA,\r\n// INCURRED BY ANY PERSON FROM SUCH PERSON'S USAGE OF THIS SOFTWARE\r\n// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\r\n//\r\n*/\r\n/* ---------------------------------------------------------------\r\n FVC-onGoing FMISOMatch.Win32 testing program\r\n\r\n v 1.0 - March 2009\r\n\r\n --------------------------------------------------------------- */\r\n\r\n\r\n#include <stdio.h>\r\n#include <FvcHeader.h>\r\n#include <fingerprintMoC.h>\r\n#include <int32.h>\r\n#include <bcm2835.h>\r\n#include <cuong.h>\r\n#ifndef M_PI\r\n#define M_PI\t\t\t\t3.14159\r\n#endif\r\n\r\nchar debug=0;\r\n\r\ntypedef struct {\r\n\tISOBYTE id1;\r\n\tISOBYTE id2;\r\n\tISOWORD diff; /* fixed point 1.14 */\r\n} PairedNeighbor;\r\n\r\nstatic const ISOWORD edThreshold[32]= { 301U, 620U, 864U, 1051U, 1195U, 1304U,\r\n 1388U, 1453U, 1502U, 1540U, 1569U, 1591U, 1608U, 1621U, 1631U, 1639U, 1644U,\r\n 1649U, 1652U, 1655U, 1657U, 1658U, 1660U, 1661U, 1661U, 1662U, 1662U, 1662U,\r\n 1663U, 1663U, 1663U, 1663U }; /* fixed point word length=15, fraction length=7 */\r\n\r\nISOWORD divEdThreshold[32]={ 55621U, 27037U, 19400U, 15950U, 14037U,\r\n 12857U, 12079U, 11544U, 11165U, 10891U, 10690U, 10541U, 10430U, 10346U,\r\n 10283U, 10235U, 10199U, 10171U, 10150U, 10134U, 10122U, 10113U, 10105U,\r\n 10100U, 10096U, 10092U, 10090U, 10088U, 10087U, 10086U, 10085U, 10084U }; /* fixed point word length=16, fraction length = 17 */\r\n\t\r\n#define divEdFL\t\t17\t\t/* fraction length of divEdThreshold */\r\n#define diffFL\t\t14\t\t/* fraction length of diff variable */\r\n\r\n#define edT \t\t(13>>EdFL)\t\t /* fixed point 8.8 */\r\n#define draT \t3 /* fixed point 3.3 */\r\n#define max_dist \t(130>>EdFL) \r\n#define min_dist \t(8>>EdFL)\r\n#define step \t\t(3+EdFL) /* step between two distances used to index edThreshold */\r\n#define odaT \t\t2 \r\n#define ridgeT \t\t1\r\n#define M_2PI_FIXED 50 /* fixed point 3.3 */\r\n#define M_16thPI\t2 /* pi/16 in fixed point 3.3 */\r\n#define Angle31Rad(angle)\t((angle)*M_16thPI) /* convert angle in range [0-31) into radian */\r\n#define nTHRESHOLD\t32\r\n#define bias\t\t2458 /* 0.15 in fixed point 2.14 */\r\n#define ldrWeight\t143 /* 0.07 in fixed point 0.11 */\r\n#define ldrWeightFL\t11 /* fraction length of ldrWeight */\r\n#define scoreFL\t\t10 /* fraction length of final score */\r\n/* Tinh goc hop boi 2 vector co goc so voi truc hoanh angle1, angle2 tuong ung\r\n Dau vao la 2 goc co gia tri tu 0-31 [0-2*pi)\r\n Ket qua la goc co gia tri tu 0-16 \r\n*/\r\nISOBYTE ad_pi_32(ISOBYTE angle1, ISOBYTE angle2)\r\n{\r\n\t\treturn min(abs(angle1-angle2),32-abs(angle1-angle2));\r\n}\r\n/* Tinh goc quay theo nguoc chieu kim dong ho cua 2 goc bat ky\r\n% Input:\r\n% angle1, angle2 - Hai goc gia tri tu 0 - 32\r\n% Output:\r\n% out - Goc quay theo nguoc schieu kim dong ho tu goc angle1 den angle2\r\n*/\r\n\r\n\r\nISOBYTE ad_2pi_32(ISOBYTE angle1, ISOBYTE angle2)\r\n{\r\n\treturn (angle2>=angle1)?(angle2-angle1):(32+angle2-angle1);\r\n}\r\n/* \tTinh goc hop boi 2 vector co goc so voi truc hoanh angle1, angle2 tuong ung\r\n\tInput:\r\n\t\tangle1, angle2: 2 goc co gias tri 0-2pi dang fixepoint 3.5\r\n\tOutput:\r\n\t\tGoc hop giua 2 vector co gia tri 0-pi dang fixedpoint 3.5\r\n\t\t\r\n*/\r\nISOWORD ad_pi(ISOBYTE angle1, ISOBYTE angle2)\r\n{\r\n\treturn min(abs(angle1-angle2),M_2PI_FIXED-abs(angle1-angle2));\r\n}\r\nISOWORD ad_2pi(ISOBYTE angle1, ISOBYTE angle2)\r\n{\r\n\treturn (angle2>=angle1)?(angle2-angle1):(M_2PI_FIXED-angle1+angle2);\r\n}\r\nvoid neigh_par(ISOWORD x1, ISOWORD y1, ISOBYTE angle1, ISOWORD x2, ISOWORD y2, ISOBYTE angle2, ISOBYTE* dra, ISOBYTE* oda)\r\n{\r\n\t/* float a12 = atan2(y2-y1,x2-x1);\r\n\tif (a12<0) a12=a12+2*M_PI;*/\r\n\tshort int a12 =atan2_fxp(y2-y1,x2-x1);\r\n\tif (a12<0) a12 = a12 + M_2PI_FIXED;\r\n\t/* ISOBYTE a13 = (ISOBYTE)(a12*(1<<DraFL)); /* in fixed point 3.3 */\r\n\t/* ISOBYTE a13 = (ISOBYTE)(a12 << 5);*/\r\n\tISOBYTE a14 = Angle31Rad(angle1); /* a14 = angle1*pi/16 - fixedpoint 3.3 */\r\n\t*oda = ad_2pi_32(angle1, angle2);\r\n\t*dra = ad_2pi(a14,a12);\r\n}\r\n/* Calculate local paring score for two minutia i in finger 1 and minutia j in finger 2 */\r\n/* return score is a fixed point WL=16, fraction length = 15, i.e., 1.15 */\r\nstatic PairedNeighbor pairedNeighbors[64]; /* mảng chứa index của neighbors thuộc min1 mà có thể ghép cặp được với neighbors thuộc min2*/\r\n\r\nISOWORD PairingMinutiae(FingerprintMoC* finger1, FingerprintMoC* finger2, unsigned char min1, unsigned char min2)\r\n{\r\n\t\r\n\tunsigned char nNeighbors1 = GetNNeighbors(finger1->minutiae[min1]);\r\n\t\r\n\tunsigned char nNeighbors2 = GetNNeighbors(finger2->minutiae[min2]);\r\n\tif (debug) printf(\"Pairing minutiae %d<->%d,nNeighbors=%d,%d\\n\",min1,min2,nNeighbors1,nNeighbors2);\r\n\t\r\n\tNeighborMoC* min1Neighbors = finger1->minutiae[min1].neighbors;\r\n\tNeighborMoC* min2Neighbors = finger2->minutiae[min2].neighbors;\r\n\t\r\n\tunsigned char i, j;\r\n\tunsigned char nNeighborPairs=0;\r\n\tISOWORD localScore=(1<<diffFL); /* 1 */\r\n\t\r\n\tISOWORD ed1, ed2;\r\n\tISOBYTE dra1, dra2, oda1, oda2, rc1, rc2;\r\n\tISOWORD edMean, edT1, divEdT1, edDiff;\r\n\tISOBYTE draDiff, odaDiff, rcDiff;\r\n\tISOBYTE thresholdId;\r\n\tISOBYTE angleDiff;\r\n\tISOBYTE min1_neighid1,min1_neighid2,min2_neighid1,min2_neighid2;\r\n\tISOWORD min1_x1, min1_x2,min1_y1,min1_y2;\r\n\tISOWORD min2_x1, min2_x2,min2_y1,min2_y2;\r\n\tISOBYTE min1_angle1, min1_angle2, min2_angle1, min2_angle2;\r\n\t\r\n\tmemset(pairedNeighbors,0,sizeof(PairedNeighbor)*64);\r\n\t\r\n\tfor (i=0;i<nNeighbors1;i++){\r\n\t\t/* với mỗi neighbor i thuộc min1 */\r\n\t\t/* tìm 1 neighbor minNeighbor thuộc min2 sao cho:\r\n\t\t\t- độ chênh lêch ed, dra, oda thỏa mãn các điều kiện threshold\r\n\t\t\t- điểm tính từ độ chênh lệch ed, dra, oda nhỏ nhất \r\n\t\t*/\r\n\t\tISOBYTE min1_neighborid=GetNeighborMinIndex(min1Neighbors[i]);\r\n\t\t\r\n\t\ted1 = GetEd(min1Neighbors[i]); /* fixed point 7.8 */\r\n\t\tdra1 = GetMoCDra(min1Neighbors[i]); /* fixed point 3.3 */\r\n\t\trc1=GetRidgeCount(min1Neighbors[i]); /* range from 0-20 */\r\n\t\toda1=ad_2pi_32(GetAngle(finger1->minutiae[min1]),GetAngle(finger1->minutiae[min1_neighborid]));\r\n \r\n\t\t\r\n\t\tfor (j=0;j<nNeighbors2;j++){\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tISOBYTE min2_neighborid=GetNeighborMinIndex(min2Neighbors[j]);\r\n\t\t\t\r\n\t\t\ted2 = GetEd(min2Neighbors[j]); /* fixed point 8.8 */\r\n\t\t\tdra2 = GetMoCDra(min2Neighbors[j]); /* fixed point 3.5 */\r\n\t\t\trc2=GetRidgeCount(min2Neighbors[j]); /* range from 0-3 */\r\n\t\t\toda2=ad_2pi_32(GetAngle(finger2->minutiae[min2]),GetAngle(finger2->minutiae[min2_neighborid]));\r\n\t\t\tedMean = (ed1+ed2)>>1; /* need to divided by 2 before added to avoid overflow */\r\n\t\t\tthresholdId = (edMean-min_dist)>>step;\r\n\t\t\t\r\n\t\t\t/* if (debug)\r\n\t\t\t\tprintf(\"Comparing neighbors %d<->%d;ed:%d,%d; dra:%d,%d;oda:%d,%d;edMean:%d;thresholdId:%d\\n\",i,j,ed1,ed2,dra1,dra2,oda1,oda2,edMean,thresholdId); */\r\n\t\t\t\r\n\t\t\tif (thresholdId>=nTHRESHOLD) continue;\r\n\t\t\tedT1 = edThreshold[thresholdId];\r\n\t\t\tdivEdT1 = divEdThreshold[thresholdId];\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tedDiff = abs(ed1-ed2); /* fixed point 8.7 */\r\n\t\t\tdraDiff = abs(dra1-dra2);\r\n\t\t\todaDiff = abs(oda1-oda2);\r\n\t\t\trcDiff =abs(rc1-rc2);\r\n\t\t\t\r\n\t\t\tif ((edDiff < edT1) && (draDiff < draT) && (odaDiff < odaT) && (rcDiff < ridgeT)){\r\n\t\t\t\tint32 diff; /* 24 bit fraction length*/\r\n\t\t\t\tmult16s(edDiff,divEdT1,&diff); /* 7.7 * 0.17 = 7.24*/\r\n\t\t\t\tint32 diff_fl14; /* 14 fraction length */\r\n\t\t\t\tshift_right(&diff, (divEdFL+EdFL-diffFL), &diff_fl14);\r\n\t\t\t\tISOWORD diff16bits = diff_fl14.lowWord;\r\n\t\t\t\t/* ISOWORD diff16bits = diff>>(EdFL+divEdFL-diffFL); /* diff16bits 1.15 */\r\n\t\t\t\tpairedNeighbors[nNeighborPairs].id1=i;\r\n\t\t\t\tpairedNeighbors[nNeighborPairs].id2=j;\r\n\t\t\t\tpairedNeighbors[nNeighborPairs].diff = diff16bits;\r\n nNeighborPairs++;\r\n\t\t\t\tif (debug)\r\n\t\t\t\t\tprintf(\"Paired neighbors:%d,%d,%f\\n\",i,j,(float)(diff16bits)/(1<<diffFL)); \r\n\t\t\t}\r\n\t\t\t\t\r\n\t\t}\r\n\t\t\t\r\n\t}\r\n\tif (debug)\r\n\t\tprintf(\"nNeighborPairs:%d\\n\",nNeighborPairs);\r\n\tif (!nNeighborPairs) return localScore;\r\n\t\r\n\tfor (i=0;i<nNeighborPairs-1;i++){\r\n\t\tfor (j=i+1;j<nNeighborPairs;j++){\r\n\t\t\tif ((pairedNeighbors[i].id1==pairedNeighbors[j].id1)||\r\n\t\t\t\t(pairedNeighbors[i].id2==pairedNeighbors[j].id2)) continue;\r\n\t\t\t\r\n\t\t\t/* angleDiff = abs(ad_2pi(GetDra(min1Neighbors[pairedNeighbors[i].id1]),GetDra(min1Neighbors[pairedNeighbors[j].id1]))-\r\n\t\t\t\t\t\t\t\t\t ad_2pi(GetDra(min2Neighbors[pairedNeighbors[i].id2]),GetDra(min2Neighbors[pairedNeighbors[j].id2]))); */\r\n\t\t\t\t\t\t\t\r\n min1_neighid1 = GetNeighborMinIndex(min1Neighbors[pairedNeighbors[i].id1]);\r\n\t\t\tmin1_neighid2 = GetNeighborMinIndex(min1Neighbors[pairedNeighbors[j].id1]);\r\n\t\t\tmin2_neighid1 = GetNeighborMinIndex(min2Neighbors[pairedNeighbors[i].id2]);\r\n\t\t\tmin2_neighid2 = GetNeighborMinIndex(min2Neighbors[pairedNeighbors[j].id2]);\r\n\t\t\t\r\n\t\t\tmin1_x1 = GetX(finger1->minutiae[min1_neighid1]);\r\n\t\t\tmin1_y1 = GetY(finger1->minutiae[min1_neighid1]);\r\n\t\t\tmin1_angle1=GetAngle(finger1->minutiae[min1_neighid1]);\r\n\t\t\tmin1_x2 = GetX(finger1->minutiae[min1_neighid2]);\r\n\t\t\tmin1_y2 = GetY(finger1->minutiae[min1_neighid2]);\r\n\t\t\tmin1_angle2=GetAngle(finger1->minutiae[min1_neighid2]);\r\n\t\t\tint32 x_sq, y_sq,ed_sq;\r\n\t\t\tmult16s((min1_x1-min1_x2), (min1_x1-min1_x2), &x_sq);\r\n\t\t\tmult16s((min1_y1-min1_y2),(min1_y1-min1_y2), &y_sq);\r\n\t\t\tadd32(&x_sq,&y_sq,&ed_sq);\r\n\t\t\ted1 = sqrt32(&ed_sq);\r\n\t\t\t/* float tmp_ed1 = sqrt((min1_x1-min1_x2)*(min1_x1-min1_x2)+(min1_y1-min1_y2)*(min1_y1-min1_y2));*/\r\n\t\t\t/* ed1 = tmp_ed1*(1<<EdFL); */\r\n\t\t\t\r\n\t\t\tmin2_x1 = GetX(finger2->minutiae[min2_neighid1]);\r\n\t\t\tmin2_y1 = GetY(finger2->minutiae[min2_neighid1]);\r\n\t\t\tmin2_angle1=GetAngle(finger2->minutiae[min2_neighid1]);\r\n\t\t\tmin2_x2 = GetX(finger2->minutiae[min2_neighid2]);\r\n\t\t\tmin2_y2 = GetY(finger2->minutiae[min2_neighid2]);\r\n\t\t\tmin2_angle2=GetAngle(finger2->minutiae[min2_neighid2]);\r\n\t\t\t\r\n\t\t\t/* \r\n\t\t\tfloat tmp_ed2 = sqrt((min2_x1-min2_x2)*(min2_x1-min2_x2)+(min2_y1-min2_y2)*(min2_y1-min2_y2));\r\n\t\t\ted2 = tmp_ed2*(1<<EdFL);\r\n\t\t\t*/\r\n\t\t\t\r\n\t\t\tmult16s((min2_x1-min2_x2), (min2_x1-min2_x2), &x_sq);\r\n\t\t\tmult16s((min2_y1-min2_y2),(min2_y1-min2_y2), &y_sq);\r\n\t\t\tadd32(&x_sq,&y_sq,&ed_sq);\r\n\t\t\ted2 = sqrt32(&ed_sq);\r\n\t\t\t\r\n if ((ed1 < min_dist) || (ed2 < min_dist)) continue;\r\n \r\n\t\t\tneigh_par(min1_x1,min1_y1,min1_angle1,min1_x2,min1_y2, min1_angle2, &dra1, &oda1);\r\n\t\t\tneigh_par(min2_x1,min2_y1,min2_angle1,min2_x2,min2_y2, min2_angle2, &dra2, &oda2);\r\n\t\t\t\r\n \r\n\t\t\tdraDiff = ad_pi(dra1,dra2);\r\n\t\t\todaDiff = ad_pi_32(oda1,oda2);\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tedDiff = abs(ed1-ed2);\r\n\t\t\t\r\n edMean = (ed1+ed2)>>1;\r\n\t\t\tthresholdId = (edMean-min_dist)>>step;\r\n\t\t\tif (thresholdId>=nTHRESHOLD) continue;\r\n\t\t\tedT1 = edThreshold[thresholdId];\r\n\t\t\t\r\n\t\t\t/* if (debug)\r\n\t\t\t\tprintf(\"Comparing paired neighbors:%d(%d,%d,%d),%d(%d,%d,%d)<->%d(%d,%d,%d),%d(%d,%d,%d);\\n\",\r\n\t\t\t\t\t\t\tmin1_neighid1,min1_x1,min1_y1,min1_angle1,\r\n\t\t\t\t\t\t\tmin1_neighid2,min1_x2,min1_y2,min1_angle2,\r\n\t\t\t\t\t\t\tmin2_neighid1,min2_x1,min2_y1,min2_angle1,\r\n\t\t\t\t\t\t\tmin2_neighid2,min2_x2,min2_y2,min2_angle2); */\r\n\t\t\t\t\t\t\t\r\n \r\n /*if ((angleDiff<angleT) && (edDiff < edT1) && (draDiff < draT) && (odaDiff < odaT)){*/\r\n\t\t\tif ((edDiff < edT1) && (draDiff < draT) && (odaDiff < odaT)){\r\n\t\t\t\tISOWORD tmp = (pairedNeighbors[i].diff+pairedNeighbors[j].diff)>>1;\r\n\t\t\t\tif (localScore>tmp) localScore = tmp;\r\n\t\t\t} \r\n\t\t}\r\n\t}\t\r\n \r\n return\tlocalScore;\r\n\t\t\t\r\n}\r\n/* Pair minutiae in two finger1, finger2 by calculating localScore */\r\n/* return the list of pair of minutiae, the number of paired minutiae in finger1 (CNI) and in finger2 (CNT)\r\n return 0 if success \r\n*/\r\n\r\nshort int LocalPairingFingers(FingerprintMoC* finger1, FingerprintMoC* finger2)\r\n{\r\n\tuint8_t str[30];\r\n\tISOBYTE i, j;\r\n\tISOWORD localDiff; /* fixed point 2.14 */\r\n\tshort int localScore, tmpScore; /* signed fixed point 2.14 */\r\n\tshort int score;\r\n\tISOBYTE ldrFactor;\r\n\tISOBYTE nMinutiae1 = GetNMinutiae(*finger1);\r\n\tISOBYTE nMinutiae2 = GetNMinutiae(*finger2);\r\n\tif (debug) printf(\"nMinutiae:%d,%d\\n\",nMinutiae1,nMinutiae2);\r\n\tscore = 0;\r\n\tfor (i=0;i<nMinutiae1;i++)\r\n\t\tfor (j=0;j<nMinutiae2;j++){\r\n\t\t\tif (ad_pi_32(GetAngle(finger1->minutiae[i]),GetAngle(finger2->minutiae[j]))>=10) continue;\r\n\t\t\t\r\n\t\t\tlocalDiff = PairingMinutiae(finger1, finger2, i, j);\r\n\t\t\t\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tif (localDiff==(1<<diffFL)) continue;\r\n\t\t\t\r\n\t\t\tlocalScore=(1<<diffFL)-localDiff-bias;\r\n\t\t\t\r\n\t\t\tldrFactor = (GetLdr(finger1->minutiae[i])+GetLdr(finger2->minutiae[j]))>>1;\r\n\t\t\tISOWORD ldrFactorTmp = ldrFactor*ldrWeight; /* 0.07*ldrFactor 8.0*0.11 = 8.11 how ever because ldr<4=>ldrFactorTmp<0.28=>can be contained in 16 bits => 5.11 */\r\n\t\t\tldrFactorTmp = ldrFactorTmp<<(diffFL-ldrWeightFL);\r\n\t\t\ttmpScore = (ldrFactor>1)?(localScore-ldrFactorTmp):localScore;\r\n\t\t\tscore = score+(tmpScore>>(diffFL-scoreFL));\t\t\r\n\t\t\tif (debug)\r\n\t\t\t\tprintf(\"Minutiae %d, %d, LocalDiff:%d,LocalScore:%d,TmpScore:%d \",i, j, localDiff,localScore,tmpScore);\r\n\t\t\tif (debug)\r\n\t\t\t\t{\r\n\t\t\t\tprintf(\"Score:%d\\n\",score);\r\n\t\t\t\t}\r\n\t\t\t\t\t\t\r\n\t\t}\r\n\r\n\treturn score/min(nMinutiae1,nMinutiae2);\r\n}\r\n\r\nint main(int argc, char * argv[])\r\n{\r\n\tuint8_t str[30];\r\n\tchar templatefile1[MAXPATH], templatefile2[MAXPATH], configfile[MAXPATH], outputfile[MAXPATH];\r\n\tFILE *te1,*te2,*ou;\r\n\tint err;\r\n\tchar MatchingPerformed;\r\n\tfloat similarity;\r\n\t/* Load parameters */\r\n\tif (argc!=4)\r\n { \r\n\t\tprintf(\"\\nSyntax error.\\nUse: Match <templatefile1> <templatefile2> <outputfile>\\n\");\r\n\t\treturn SYNTAX_ERROR;\r\n\t}\r\n\tstrcpy(templatefile1,argv[1]);\r\n\tstrcpy(templatefile2,argv[2]);\r\n\tstrcpy(outputfile,argv[3]);\r\n\t\r\n\tFingerprintMoC finger1;\r\n\tFingerprintMoC finger2;\r\n\tif (ReadFingerprintMoC(templatefile1, &finger1))\r\n\t\tprintf(\"Can not read file %s\\n\",templatefile1);\r\n\tif (ReadFingerprintMoC(templatefile2, &finger2))\r\n\t\tprintf(\"Can not read file %s\\n\",templatefile2);\r\n\t/*\r\n\tchar templatefile3[MAXPATH], templatefile4[MAXPATH];\r\n\tsprintf(templatefile3,\"template1.txt\");\r\n\tSaveFingerprintMoCText(templatefile3,&finger1);\r\n\tsprintf(templatefile4,\"template2.txt\");\r\n\tSaveFingerprintMoCText(templatefile4,&finger2); \r\n\t*/\r\n\tshort int score = LocalPairingFingers(&finger1, &finger2);\r\n\tLCD_Init();\r\n\tLCD_Clear();\r\n\tsprintf(str,\"Score = %d\",score);\r\n\tLCD_Gotoxy(0,0);\r\n\tLCD_Puts(str);\r\n\t/* printf(\"%15s %15s %4s %8.6f\\n\",templatefile1,templatefile2,\"OK\",score);*/\r\n\tou=fopen(outputfile,\"at\");\r\n\tif (ou==NULL) return CANNOT_OPEN_OUTPUT_FILE;\r\n\tif (fprintf(ou,\"%15s %15s %4s %d\\n\",templatefile1,templatefile2,\"OK\",score)<=0)\r\n\t\treturn CANNOT_UPDATE_OUTPUT_FILE;\r\n\tfclose(ou);\r\n\t\r\n /* XXXX Init Library\r\n ....\r\n in case of error\r\n - exit returning XXXX_INIT_ERROR if your library cannot be initialized\r\n */\r\n\r\n\r\n /* XXXX Load fingerprint template file 1 */\r\n/*\tif (LoadISOTemplate(templatefile1)!=ISO_SUCCESS){\r\n\t\treturn CANNOT_OPEN_TEMPLATE_FILE;\r\n\t}\r\n\tFingerprint finger1;\r\n\tConvertISO2005Fingerprint(&finger1);\r\n\tSaveFingerprintText(outputfile,&finger1);\r\n\tCleanFingerprint(&finger1);\r\n\r\n\t/* XXXX Store the information of ISOTemplate1 in an internal structure\r\n\t.....\r\n\tin case of error\r\n - exit returning CANNOT_OPEN_TEMPLATE_FILE if your library cannot be initialized\r\n\t*/\r\n\t\r\n\r\n\t/* XXXX Load fingerprint template file 2 */\r\n/*\tCleanISOTemplate();\r\n if (LoadISOTemplate(templatefile2)!=ISO_SUCCESS)\r\n\t{\r\n\t\treturn CANNOT_OPEN_TEMPLATE_FILE;\r\n\t}\r\n\r\n\r\n\t/* XXXX Store the information of ISOTemplate2 in an internal structure\r\n\t.....\r\n\tin case of error\r\n - exit returning CANNOT_OPEN_TEMPLATE_FILE if your library cannot be initialized\r\n\t*/\r\n\r\n\r\n /* XXXX Matching\r\n .....\r\n - set MatchingPerformed=TRUE if the matching has been performed\r\n or MatchingPerformed=FALSE if your algorithm cannot perform the matching (e.g. insufficient quality)\r\n\r\n - copy into \"similarity\" the similarity score produced by your algorithm\r\n [similarity is a floating point value ranging from 0 to 1 which indicates the similarity between\r\n\t the template and the fingerprint: 0 means no similarity, 1 maximum similarity.]\r\n */\r\n\r\n\r\n /* Send the results to outputfile */\r\n /* \r\n ou=fopen(outputfile,\"at\");\r\n if (ou==NULL) return CANNOT_OPEN_OUTPUT_FILE;\r\n if (fprintf(ou,\"%15s %15s %4s %8.6f\\n\",templatefile1,templatefile2,MatchingPerformed?\"OK\":\"FAIL\",MatchingPerformed?similarity:0.0F)<=0)\r\n return CANNOT_UPDATE_OUTPUT_FILE;\r\n fclose(ou);\r\n\t*/\r\n\r\n /* XXXX Close Library\r\n ....\r\n */\r\n\r\n return SUCCESS;\r\n}\r\n" }, { "alpha_fraction": 0.5681062936782837, "alphanum_fraction": 0.6312292218208313, "avg_line_length": 19, "blob_id": "d10528d7db3034048e0a8c180f4cb4bb584b5c31", "content_id": "e0b5b14da4344ba15a5df0a9f9b21de87ba07003", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 301, "license_type": "no_license", "max_line_length": 53, "num_lines": 15, "path": "/raspberry_sensor_c/main.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <bcm2835.h>\n#include <stdio.h>\n#include \"ScanImage.h\"\nint main()\n{\n\tuint8_t dulieu[46080];\n\tuint16_t i;\n\t//unsigned char *idata;\n\t//idata = (unsigned char*) malloc(256 * 180);\n\t//if (idata == NULL){\n\t//\tfprintf(stderr, \"ERROR : main : malloc idata\\n\");\n\t//}\n\tScanImage(dulieu);\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.5688221454620361, "alphanum_fraction": 0.585451602935791, "avg_line_length": 25.265544891357422, "blob_id": "6e78b1926d702ea10913ef6bfc6a8a6a85225293", "content_id": "9111181bba0fa260aa2f87423c2d770e9b3bb010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 21120, "license_type": "no_license", "max_line_length": 151, "num_lines": 772, "path": "/bkafis/bkafis/src/lib/bkafis/fingerprint.31.8.2015.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS matcher\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include \"fingerprint.h\"\r\n#include \"ISOTemplate.h\"\r\n#ifndef M_PI\r\n#define M_PI 3.14159\r\n#endif\r\n/* #define DEBUG*/\r\n\r\n/* constants to control extractor */\r\n#define MAX_FOUND_NEIGHBORS\t8\r\n#define MIN_DISTANCE\t8.5\r\n#define MAX_DISTANCE\t130\r\n/* constants to control matcher */\r\n#define ED_THRESHOLD\t\t15\r\n#define DRA_THRESHOLD\t22.5*M_PI/180\r\n#define ODA_THRESHOLD \t\t22.5*M_PI/180\r\n#define DRA_THRESHOLD1\t\t( 2*M_PI - 22.5*M_PI/180 )\r\n#define ODA_THRESHOLD1 \t( 2*M_PI - 22.5*M_PI/180 )\r\n#define ED_WEIGHT 0.8\r\n#define DRA_WEIGHT 0.1\r\n#define ODA_WEIGHT 0.1\r\n#define N_PAIRS\t\t2\r\n#define LOCAL_SCORE_BIAS 1\r\n\r\n/* ldr params ?? */\r\n#define LDR_N \t\t\t\t3\r\n#define LDR_NUM\t\t\t\t3\r\n#define LDR_DIR\t\t\t\t22.5*M_PI/180\r\n#define LDR_POS\t\t\t\t50\r\n/* -- */\r\n\r\n#define NumNeighs2\t3\r\n\r\n#define LDR_WEIGHT \t0.5\r\n#define SIGMA \t2 \r\n#define TG_THRESHOLD \t12\r\n#define TA_THRESHOLD \tM_PI/6\r\n#define RC_THRESHOLD\t1\r\n\r\nBkafisParams bkafisParams = {\r\n\tMAX_FOUND_NEIGHBORS, \r\n\tMIN_DISTANCE,\r\n\tMAX_DISTANCE,\r\n\tED_THRESHOLD,\r\n\tDRA_THRESHOLD,\r\n\tODA_THRESHOLD,\r\n\tDRA_THRESHOLD1,\r\n\tODA_THRESHOLD1,\r\n\tED_WEIGHT,\r\n\tDRA_WEIGHT,\r\n\tODA_WEIGHT,\r\n\t\r\n\tN_PAIRS,\r\n\tLOCAL_SCORE_BIAS,\r\n\t\r\n\tLDR_WEIGHT,\r\n\tLDR_N,\r\n\tLDR_NUM,\r\n\tLDR_DIR,\r\n\tLDR_POS,\r\n\tSIGMA,\r\n\tTG_THRESHOLD,\r\n\tTA_THRESHOLD,\r\n\tRC_THRESHOLD\r\n};\r\n/*\r\ntypedef struct\r\n{\r\n\t\r\n\tunsigned char maxNeighbors;\r\n\tfloat minDistance,maxDistance;\r\n\t\r\n\tfloat edThreshold, draThreshold, odaThreshold, draThreshold1, odaThreshold1;\r\n\tfloat edWeight, draWeight, odaWeight;\r\n\tunsigned char nNeighborPairThreshold;\r\n\tfloat localScoreBias;\r\n\t\r\n\tfloat ldrWeight;\r\n\tfloat sigma;\r\n\tfloat tgThreshold, taThreshold;\r\n\tunsigned char rcThreshold;\r\n\t\r\n} BkafisParams; */\r\n/**********************************************************************\r\n\tConvert from ISOTemplate 2005 format \r\n\tInput:\r\n\t\t\tImplicitly stored in static variable isoTemplate that is declared \r\n\t\t\tin ISOTemplate.c \r\n\tOutput:\r\n\t\t\tpointer to Fingerprint structure declared above \r\n\tUsage:\r\n\t\t\tin order to load the iso template from file call \r\n\t\t\tISORESULT LoadISOTemplate (ISOBYTE *path);\r\n\t\t\tthen in order to convert from the template into Fingerprint structure\r\n\t\t\tcall unsigned char ConvertISO2005Fingerprint(Fingerprint* finger);\r\n *********************************************************************/ \r\n\r\nISOBYTE ConvertISO2005Fingerprint(Fingerprint* finger)\r\n{\r\n\tif (finger==NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tISOWORD width, height;\r\n\tGetRecordHeader (&width,&height,NULL,NULL,NULL);\r\n\tfinger->width = width;\r\n\tfinger->height = height;\r\n\t\r\n\t/*\r\n\t#ifdef DEBUG\r\n\tprintf(\"Width=%d\\nHeight=%d\\n\",finger->width,finger->height);\r\n\t#endif\r\n\t*/\r\n\t\r\n\tunsigned char quality, nMinutiae;\r\n\tGetFingerViewHeader (0,NULL,NULL,NULL,&quality,&nMinutiae);\r\n\tfinger->quality = quality;\r\n\tfinger->nMinutiae = nMinutiae;\r\n\tMinutia** minutiae=malloc(sizeof(Minutia*)*finger->nMinutiae);\r\n\tif (minutiae == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(minutiae,0x00,sizeof(Minutia*)*finger->nMinutiae);\r\n\t\r\n\t/*\r\n\t#ifdef DEBUG\r\n\tprintf(\"Quality=%d\\nnMinutiae=%d\\n\",finger->quality,finger->nMinutiae);\r\n\t#endif\r\n\t*/\r\n\t\r\n\tunsigned char minI;\r\n\tISOBYTE type, angle;\r\n\tISOWORD x,y;\r\n\tMinutia* min;\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\t\r\n\t\tmin=malloc(sizeof(Minutia));\r\n\t\tif (min==NULL){\r\n\t\t\tCleanFingerprint(finger);\r\n\t\t\treturn ISO_GENERICERROR;\r\n\t\t}\r\n\t\tGetMinutiaeData(0,minI,&type,&x,&y,&angle,&quality);\r\n\t\tmin->x = x;\r\n\t\tmin->y = y;\r\n\t\tmin->angle = angle*1.40625*M_PI/180;\r\n\t\tmin->type = type;\r\n\t\tmin->quality = quality;\r\n\t\t\r\n\t\t/*\r\n\t\t#ifdef DEBUG\r\n\t\tprintf(\"%d\\t%d\\t%f\\t%d\\t%d\\n\", \r\n\t\t\tmin->x,\r\n\t\t\tmin->y,\r\n\t\t\tmin->angle,\r\n\t\t\tmin->type,\r\n\t\t\tmin->quality\r\n\t\t\t);\r\n\t\t#endif\r\n\t\t*/\r\n\t\t\r\n\t\tminutiae[minI]=min;\r\n\t}\r\n\tfinger->minutiae= minutiae;\r\n\treturn ISO_SUCCESS;\r\n\t\r\n}\r\nISOBYTE SaveFingerprintText(unsigned char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp = fopen(path,\"w\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\t/*\r\n\tfprintf(fp,\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp,\"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbors\\tIndex\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\t*/\r\n\t\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\r\n\t\tif \t( finger->minutiae[minI] )\r\n\t\t\tfprintf\t(\tfp,\t\"%d\\t%d\\t%f\\t%d\\t%d\\t%d\\t%d\"\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->x\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->y\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->angle\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->type\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->quality\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->ldr\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->nNeighbors\r\n\t\t\t\t\t);\r\n\t\t\t\t\t\r\n\t\tfor ( neighborI = 0; neighborI < finger->minutiae[minI]->nNeighbors; neighborI++ )\r\n\t\t\tif \t( finger->minutiae[minI]->neighbors[neighborI] )\r\n\t\t\t\tfprintf\t(\tfp, \"\\t%d\\t%f\\t%f\\t%f\\t%d\"\t\t\t\t\t\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->index\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->ed\t\t\t, \r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->dra\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->oda\t\t\t,\r\n\t\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->ridgeCount \r\n\t\t\t\t\t\t);\r\n\t\t\t\t\t\t\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nISOBYTE CleanFingerprint(Fingerprint* finger)\r\n{\r\n\tunsigned char i,j;\r\n\tif (finger->minutiae){\r\n\t\tfor (i=0;i<finger->nMinutiae;i++){\r\n\t\t\tif (finger->minutiae[i]) {\r\n\t\t\t\tif (finger->minutiae[i]->neighbors) {\r\n\t\t\t\t\tfor (j=0;j<finger->minutiae[i]->nNeighbors;j++)\r\n\t\t\t\t\t\tif (finger->minutiae[i]->neighbors[j]) free(finger->minutiae[i]->neighbors[j]);\r\n\t\t\t\t\tfree(finger->minutiae[i]->neighbors);\r\n\t\t\t\t}\r\n\t\t\t\tfree(finger->minutiae[i]);\r\n\t\t\t}\r\n\t\t}\r\n\t\tfree(finger->minutiae);\r\n\t}\r\n\tmemset(finger,0x00,sizeof(Fingerprint));\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nunsigned char SortMinutiaQuality(Fingerprint* finger)\r\n{\r\n\tunsigned char i, j;\r\n\t\r\n\t/*\t\tSorting minutiae by quality score in descending order\t*/\r\n \r\n\tISOBYTE quality1, quality2;\r\n\tfor (i = 0; i < finger->nMinutiae - 1; i++)\r\n for (j = finger->nMinutiae - 1; j > i; j--){\r\n\t\t\tquality1 = finger->minutiae[j]->quality;\r\n\t\t\tquality2 = finger->minutiae[j-1]->quality;\r\n\t\t\tif (quality1 > quality2){\r\n\t\t\t\tMinutia* \t\t\ttg = finger->minutiae[j];\r\n finger->minutiae[j] = finger->minutiae[j - 1];\r\n finger->minutiae[j-1] = tg;\r\n\t\t\t}\r\n\t\t}\r\n\t\r\n\t\r\n}\r\n\r\n\r\nISOBYTE SaveFingerprint( unsigned char *path, Fingerprint *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tif ( (fp = fopen(path, \"wb\")) == NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfwrite( finger, FINGERHEADERSIZE, 1, fp );\r\n\tMinutia\t*min;\r\n\t\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\r\n\t\tmin = finger->minutiae[minI];\r\n\t\tif \t(min)\r\n\t\t{\r\n\t\t\tfwrite( min, MINUTIASIZE, 1, fp );\r\n\t\t\tfor ( neighborI = 0; neighborI < finger->minutiae[minI]->nNeighbors; neighborI++ )\r\n\t\t\t\tif \t( min->neighbors[neighborI] )\r\n\t\t\t\t{\r\n\t\t\t\t\tfwrite( min->neighbors[neighborI], NEIGHBORSIZE, 1, fp );\r\n\t\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn\tISO_SUCCESS;\r\n}\r\n\r\nISOBYTE\tReadFingerprint( unsigned char *path, Fingerprint *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( (fp = fopen(path,\"rb\")) == NULL )\r\n\t{\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tfread( finger, FINGERHEADERSIZE, 1, fp );\r\n\t\r\n\tMinutia\t**minutiae = malloc( sizeof(Minutia*) * finger->nMinutiae );\r\n\tif \t( minutiae == NULL )\r\n\t\treturn\tISO_GENERICERROR;\r\n\t\r\n\tmemset( minutiae, 0x00, sizeof(Minutia*) * finger->nMinutiae );\r\n\t\r\n\tMinutia\t*min;\r\n\tfor ( minI = 0; minI < finger->nMinutiae; minI++ )\r\n\t{\t\r\n\t\tmin = malloc( sizeof(Minutia) );\r\n\t\tif \t( min == NULL ) \r\n\t\t\treturn\tISO_GENERICERROR;\r\n\r\n\t\tfread( min, MINUTIASIZE, 1, fp );\r\n\t\tif \t( !min->nNeighbors )\t\r\n\t\t\tcontinue;\r\n\t\t\r\n\t\tNeighbor\t**neighborArray = malloc( sizeof(Neighbor*) * min->nNeighbors );\r\n\t\tNeighbor\t*pNeighbor;\r\n\t\tif \t( neighborArray == NULL )\t\r\n\t\t\treturn\tISO_GENERICERROR;\r\n\t\t\t\t\r\n\t\tmemset( neighborArray, 0, sizeof(Neighbor*) * min->nNeighbors );\r\n\t\t\r\n\t\tunsigned char\ti;\r\n\t\tfor\t( i = 0; i < min->nNeighbors; i++ )\r\n\t\t{\r\n\t\t\tpNeighbor = malloc( sizeof(Neighbor) );\r\n\t\t\tif \t( pNeighbor == NULL )\t\r\n\t\t\t\treturn\tISO_GENERICERROR;\t\r\n\t\t\t\t\r\n\t\t\tfread( pNeighbor, NEIGHBORSIZE, 1, fp );\r\n\t\t\tneighborArray[i] = pNeighbor;\r\n\t\t\tpNeighbor = NULL;\r\n\t\t}\r\n\t\t\r\n\t\tmin->neighbors = neighborArray;\r\n\t\tminutiae[minI] = min;\r\n\t}\r\n\t\r\n\tfinger->minutiae = minutiae;\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nfloat \tCalculateAngle( int x1, int y1, int x2, int y2)\r\n{\r\n float\tangle;\r\n\tangle = atan2( y2 - y1, x2 - x1 );\r\n\treturn\t(angle < 0) ? angle+2*M_PI : angle;\r\n}\r\n/*\r\nStore distances between minutiae in a triangle matrix which represented in C as an array.\r\nd(0,1),d(0,2),...,d(0,n-1): n-1 elements\r\nd(1,2),d(1,3),...,d(1,n-1): n-2 elements\r\nd(i-1,i),d(i-1,i+1),...,d(i-1,n-1): n-i elements\r\nd(i,i+1),d(i,i+2),...,d(i,n-1): n-1-i elements\r\nd(n-2,n-1): 1 elements\r\nTotal number = (n-1)+(n-2)+...1 = (n-1)*n/2 elements\r\nrow 0 starts at distances[0]\r\nrow i starts at distances[start_i]: start_i = (n-1)+(n-2)+...+(n-i)=i*(2n-1-i)/2\r\nd(i,j) and d(j,i) will be stored at distances[start_i+j-i-1] = distances(i*(2n-1-i)/2+j-i-1))\r\nd(n-2,n-1)=distances((n-2)*(2n-1-n+2)/2+n-1-n+2-1)=distances((n-2)*(n+1)/2)=distance(n(n-1)/2-1)\r\n*/\r\nunsigned char CalculateDistances(Fingerprint* finger, float** pDistances)\r\n{\r\n\tunsigned int n=finger->nMinutiae;\r\n\tunsigned int i,j;\r\n\tfloat *distances = malloc(sizeof(float)*n*(n-1)/2);\r\n\tif (distances == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(distances, 0, sizeof(float)*n*(n-1)/2);\r\n\tfor ( i = 0; i < n; i++ )\r\n\t\tfor ( j = i+1; j < n; j++ )\t{\r\n\t\t\t\t\r\n\t\t\t\tdistances[i*(2*n-1-i)/2+j-i-1] = sqrt( pow( (float)( finger->minutiae[i]->x - finger->minutiae[j]->x), 2 ) + \r\n\t\t\t\t\tpow( (float)( finger->minutiae[i]->y - finger->minutiae[j]->y), 2 ) );\r\n\t\t\t\t/* #ifdef DEBUG\r\n\t\t\t\t\tprintf(\"distances(%d,%d)=distances[%d])=%f\\n\",i,j,i*(2*n-1-i)/2+j-i-1,distances[i*(2*n-1-i)/2+j-i-1]);\r\n\t\t\t\t*/\r\n\t\t}\r\n\t*pDistances = distances;\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nunsigned char\tFindDirectionalNeighbours(\tFingerprint* finger , float* distances, unsigned char centerI, \r\n\t\t\t\t\t\t\t\t\t\t\tBkafisParams* params )\r\n{\r\n\tunsigned char\tmaxNeighbors = params->maxNeighbors;\r\n\tfloat\tminDistance = params->minDistance;\r\n\tfloat\tmaxDistance = params->maxDistance;\r\n\tfloat\tminDistances[maxNeighbors];\r\n\t\r\n\t/* đổi neighborIds và minNeighborI từ int -> unsigned char và khởi tạo là 255 ?? */\r\n\tunsigned char\tminNeighborI[maxNeighbors];\r\n\tunsigned char\tneighborIds[maxNeighbors];\r\n\tfloat* \tphi;\r\n\tunsigned char\tnNeighbors = 0;\r\n\tunsigned char\tnIterations = 0;\r\n\t\r\n\t/* đổi sector từ int -> char ?? */\r\n\tunsigned char \tsector;\r\n\t/* đổi n và i từ int -> char ?? */\r\n\tunsigned char \tn = finger->nMinutiae;\r\n\tunsigned char\ti;\r\n\tfloat\tcenterAngle = finger->minutiae[centerI]->angle;\r\n\t\r\n\tphi = malloc( sizeof(float)*n ); /* angle between minutiae centerI & other minutiae */\r\n\t#ifdef DEBUG\r\n\t{\r\n\t\tprintf(\"start finding neighbors for %d\\tnMinutiae=%d, centerAngle=%f\\n\", centerI,n,centerAngle);\r\n\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t}\r\n\t#endif\r\n\tmemset( phi, 0, sizeof(float)*n );\r\n\t\r\n\t/* khởi tạo là 255 thay vì -1 ?? */\r\n\t/* dùng memset ?? */\r\n\tfor ( i = 0; i < maxNeighbors; i++ )\tneighborIds[i] = 255;\r\n\t\r\n\twhile\t( (nNeighbors < maxNeighbors) && (nIterations < maxNeighbors) )\r\n\t{\r\n\t\t#ifdef DEBUG\r\n\t\t{\r\n\t\t\tprintf(\"Iteration=%d\\t nNeighbors=%d\\n\", nIterations, nNeighbors);\r\n\t\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t\t}\r\n\t\t#endif\r\n\t\t\r\n\t\tnIterations++;\r\n\t\tmemset(minDistances, 0, sizeof(float)*maxNeighbors );\r\n\t\t\r\n\t\t/* dùng memset ?? */\r\n\t\tfor\t( i = 0; i < maxNeighbors; i++ )\tminNeighborI[i] = 255;\r\n\t\t\r\n\t\tfor\t( i = 0; i < n; i++ )\r\n\t\t{\r\n\t\t\tfloat\tdist;\r\n\t\t\t\r\n\t\t\tif\t( i == centerI )\tcontinue;\r\n\t\t\t\r\n\t\t\tdist = ( centerI < i ) ? distances[ centerI*(2*n-1-centerI)/2+i-centerI-1 ] : distances[ i*(2*n-1-i)/2+centerI-i-1 ];\r\n\t\t\t/* #ifdef DEBUG\r\n\t\t\t\tprintf(\"\\tMinutia=%d\\tDistance index=%d\\tdist=%f\\n\", i,centerI*(2*n-1-centerI)/2+i-centerI-1,dist);\r\n\t\t\t*/\r\n\t\t\t\r\n\t\t\t/* skip neighbours that are too far or too near the center minutia */\r\n\t\t\tif ( (dist < minDistance) || (dist > maxDistance) )\tcontinue;\r\n\t\t\t\r\n\t\t\t/* skip neighbors that have been chosen */\r\n\t\t\tunsigned char\tfound = 0;\r\n\t\t\tunsigned char\tj = 0;\r\n\t\t\twhile\t( (j < maxNeighbors) && !found )\r\n\t\t\t{\r\n\t\t\t\tif\t( neighborIds[j++] == i )\tfound = 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif\t(found)\tcontinue;\r\n\t\t\t\r\n\t\t\t/* calculate the angle of the vector connecting center minutia with minutia i */\r\n\t\t\tif\t( phi[i] == 0 )\r\n\t\t\t\tphi[i] = CalculateAngle(\tfinger->minutiae[centerI]->x, finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\t\t\t\t\tfinger->minutiae[i]->x , finger->minutiae[i]->y );\r\n\t\t\t\r\n\t\t\tfloat\td_phi = ( phi[i] >= centerAngle ) ? phi[i]-centerAngle : 2*M_PI+phi[i]-centerAngle;\r\n sector = floor( maxNeighbors * (d_phi/(2*M_PI)) );\r\n\t\t\t\r\n\t\t\t#ifdef DEBUG\r\n\t\t\t{\r\n\t\t\t\tprintf(\"\\tMinutia=%d\\tDistance index=%d\\tdist=%f\\tphi=%f\\tdphi=%f\\tsector=%d\\tminDistance=%f\\n\", \r\n\t\t\t\t\ti,centerI*(2*n-1-centerI)/2+i-centerI-1,dist,phi[i],d_phi,sector, minDistances[sector]);\r\n\t\t\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t\t\t}\r\n\t\t\t#endif\r\n\t\t\t\r\n\t\t\tif \t( minDistances[sector] == 0 )\r\n\t\t\t{\r\n\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\tminNeighborI[sector] = i ;\r\n\t\t\t\t/* bỏ lệnh count++ : thuật toán mới là chính xác ?? */\r\n\t\t\t}\r\n\t\t\telse \r\n\t\t\t{\r\n\t\t\t\tif \t( minDistances[sector] > dist )\r\n\t\t\t\t{\r\n\t\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\t\tminNeighborI[sector] = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tfor ( sector = 0; sector < maxNeighbors; sector++ )\r\n\t\t{\r\n\t\t\tif\t( nNeighbors == maxNeighbors )\tbreak;\r\n\t\t\t\r\n\t\t\tif\t( minNeighborI[sector] != 255 )\t/* đổi từ -1 sang 255 ?? */\r\n\t\t\t\tneighborIds[nNeighbors++] = minNeighborI[sector];\r\n\t\t}\r\n\t\t\t\r\n\t}\r\n\r\n\tif \t( nNeighbors == 0)\r\n\t{\r\n\t\tfree(phi);\r\n\t\treturn\tISO_SUCCESS;\r\n\t}\r\n\t\r\n\tNeighbor\t**neighborArray = malloc( sizeof(Neighbor*) * nNeighbors );\r\n\t\r\n\tif \t( neighborArray == NULL )\r\n\t{\r\n\t\tfree(phi);\r\n\t\treturn\tISO_GENERICERROR;\r\n\t}\r\n\t\r\n\tmemset( neighborArray, 0, sizeof(Neighbor*) * nNeighbors );\r\n\tNeighbor\t*pNeighbor;\r\n\t\r\n\t#ifdef DEBUG\r\n\t{\r\n\t\tprintf(\"%d neighbors of minutia %d:\", nNeighbors, centerI);\r\n\t\tsystem(\"pause\");\t/* testing ?? */\r\n\t}\r\n\t#endif\r\n\t\r\n\tfor\t( i = 0; i < nNeighbors; i++ )\r\n\t{\r\n\t\t#ifdef DEBUG\r\n\t\t{\r\n\t\t\tprintf(\"\\tNeighbor %d : %d\\n\", i, neighborIds[i]);\r\n\t\t\tsystem(\"pause\");\t/*\ttesting ?? */\r\n\t\t}\r\n\t\t#endif\r\n\t\t\r\n\t\tpNeighbor = malloc( sizeof(Neighbor) );\r\n\t\t\r\n\t\tif \t( pNeighbor != NULL )\r\n\t\t{\r\n\t\t\tmemset( pNeighbor, 0, sizeof(Neighbor) );\r\n\t\t\tpNeighbor->index = neighborIds[i];\r\n\t\t\tpNeighbor->ed = (centerI<neighborIds[i])?distances[centerI*(2*n-1-centerI)/2+neighborIds[i]-centerI-1]:\r\n\t\t\t\t\t\t\t\t\t\tdistances[neighborIds[i]*(2*n-1-neighborIds[i])/2+centerI-neighborIds[i]-1]; \r\n\t\t\t\r\n\t\t\t/* add code to calculate float/real oda, dra */\t\t\r\n\t\t\t\t\t\t\t\t\t \r\n\t\t\t/*\ttest dra ?? */\r\n\t\t\tfloat\ttmp;\r\n\t\t\ttmp\t= atan2( \tfinger->minutiae[neighborIds[i]]->y - finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\tfinger->minutiae[neighborIds[i]]->x - finger->minutiae[centerI]->x\t );\r\n\t\t\t/*\r\n\t\t\tprintf(\"a1 tmp = %f\\n\", tmp);\r\n\t\t\t*/\r\n\t\t\ttmp = ( tmp < 0 ) ? tmp+2*M_PI : tmp;\r\n\t\t\t\r\n\t\t\tpNeighbor->dra\t= (\ttmp >= \tfinger->minutiae[centerI]->angle ) \t\t\t?\r\n\t\t\t\t\t\t\t\ttmp - \tfinger->minutiae[centerI]->angle \t\t:\r\n\t\t\t\t\t\t\t\ttmp - \tfinger->minutiae[centerI]->angle + 2*M_PI\t;\r\n\t\t\t/* testing ?? */\r\n\t\t\t/*\r\n\t\t\tprintf(\"a2 tmp = %f\\n\", tmp);\r\n\t\t\tprintf(\"a2 centerI.angle = %f\\n\", finger->minutiae[centerI]->angle);\r\n\t\t\tprintf(\"a3 dra = %f\\n\", pNeighbor->dra);\r\n\t\t\t*/\r\n\t\t\t/* -- */\r\n\t\t\t\r\n\t\t\t/*\ttest oda ?? */\r\n\t\t\t/* báo lại vs thầy Minh hoán đổi vị trí */\r\n\t\t\ttmp =\tfinger->minutiae[neighborIds[i]]->angle\r\n\t\t\t\t - \tfinger->minutiae[centerI]\t\t ->angle;\r\n\t\t\t\r\n\t\t\tpNeighbor->oda \t = ( finger->minutiae[neighborIds[i]]->angle >= finger->minutiae[centerI]->angle )\r\n\t\t\t\t\t\t\t\t? tmp : 2*M_PI+tmp;\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t/*\ttesting ?? */\r\n\t\t\t/*\r\n\t\t\tprintf(\"b1 tmp = %f\\n\", tmp);\r\n\t\t\tprintf(\"b1 centerI.angle = %f\\n\", finger->minutiae[centerI]->angle);\r\n\t\t\tprintf(\"b1 neighbor.angle = %f\\n\", finger->minutiae[neighborIds[i]]->angle);\r\n\t\t\tprintf(\"b2 oda = %f\\n\", pNeighbor->oda);\r\n\t\t\t*/\r\n\t\t\t/* -- */\r\n\t\t\t\t\t\t\t\t\r\n\t\t\tneighborArray[i]=pNeighbor;\r\n\t\t\t\r\n\t\t\t/* lệnh này để giải phóng bộ nhớ ?? */\r\n\t\t\tpNeighbor = NULL;\r\n\t\t}\r\n\t}\r\n\t\r\n\t#ifdef DEBUG \r\n\t\tprintf(\"\\n\");\r\n\t#endif\r\n\t\r\n\tfinger->minutiae[centerI]->nNeighbors = nNeighbors;\r\n\tfinger->minutiae[centerI]->neighbors = neighborArray;\r\n\t\r\n\tfree(phi);\r\n\t/* #ifdef DEBUG {\r\n\t\tunsigned char neighborI;\r\n\t\tfor (neighborI=0;neighborI<finger->minutiae[centerI]->nNeighbors;neighborI++)\r\n\t\t\tif (finger->minutiae[centerI]->neighbors[neighborI])\r\n\t\t\t\tprintf(\"\\t%d\\t%f\\t%f\\t%f\\t%d\\n\", \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->index, \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->ed, \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->dra,\r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->oda,\r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->ridgeCount \r\n\t\t\t\t);\r\n\t\tprintf(\"\\n\");\r\n\t}*/\r\n\t\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\n/* Detect LDR */\r\nunsigned char\tDetectLDR( Fingerprint *finger, unsigned char centerI, unsigned char *neighbors, float *distances, unsigned char *ldr )\r\n{\r\n\tunsigned char\ti, j;\r\n\tunsigned char\tN = bkafisParams.ldrN\t ;\r\n\tunsigned char\tNUM = bkafisParams.ldrNum;\r\n\tunsigned char \tPOS = bkafisParams.ldrPos;\r\n\tfloat\t\t\tDIR = bkafisParams.ldrDir;\r\n\tfloat\t\t\tavr_angle_diff = 0;\r\n\t\r\n\t\r\n\tfor\t( i = 0; i < N; i++ )\r\n\t{\r\n\t\tfloat\ttmp_diff = fabs( \tfinger->minutiae[centerI]->angle\r\n\t\t\t\t\t\t\t\t - \tfinger->minutiae[neighbors[i]]->angle );\r\n\t\t\r\n\t\tavr_angle_diff \t+=\t( tmp_diff < 2*M_PI-tmp_diff ) ? tmp_diff : 2*M_PI-tmp_diff;\t\t\r\n\t}\r\n\t\r\n\tavr_angle_diff = avr_angle_diff / N;\r\n\t\r\n\t/* \r\n\tprintf(\"Minutia %d:\\tavr_angle_diff = %f vs %f\\n\", centerI, avr_angle_diff, M_PI/4);\r\n\tsystem(\"pause\");\r\n\t*/\r\n\t\r\n\tif\t( avr_angle_diff < M_PI/4 )\r\n\t{\r\n\t\tunsigned char\t*queue;\r\n\t\tunsigned char\t*stack;\r\n\t\tunsigned char\tqueue_size = 0;\r\n\t\tunsigned char \tstack_size = 0;\r\n\t\t\r\n\t\tqueue = malloc(sizeof(unsigned char)*finger->nMinutiae);\r\n\t\tstack = malloc(sizeof(unsigned char)*finger->nMinutiae);\r\n\t\t\r\n\t\tif\t( queue == NULL && stack == NULL )\r\n\t\t{\r\n\t\t\tprintf(\"Error\\n\");\r\n\t\t\texit(-1);\r\n\t\t}\r\n\t\t\r\n\t\tmemset( queue, 255, sizeof(unsigned char)*finger->nMinutiae );\r\n\t\tmemset( stack, 255, sizeof(unsigned char)*finger->nMinutiae );\r\n\t\t\r\n\t\tqueue[ queue_size++ ] = centerI;\r\n\t\tstack[ stack_size++ ] = centerI;\r\n\t\t\r\n\t\t/* \r\n\t\tprintf(\"in_if_1\");\r\n\t\tprintf(\"queue_size = %d\\n\", queue_size);\r\n\t\tprintf(\"stack_size = %d\\n\", stack_size);\r\n\t\t*/\r\n\t\t\r\n\t\twhile\t( stack_size != 0 )\r\n\t\t{\r\n\t\t\tunsigned char\tcenter = stack[ --stack_size ];\r\n\t\t\tunsigned char tmp_neighbors[ N ];\r\n\t\t\t\r\n\t\t\tstack[ stack_size ] = 255;\r\n\t\t\t\r\n\t\t\t/*\r\n\t\t\tprintf(\"in while\\n\");\r\n\t\t\tprintf(\"center = %d\\n\", center);\r\n\t\t\tprintf(\"queue_size = %d\\n\", queue_size);\r\n\t\t\tprintf(\"stack_size = %d\\n\", stack_size);\r\n\t\t\tsystem(\"pause\");\r\n\t\t\t*/\r\n\t\t\t\r\n\t\t\tfor\t( i = 0; i < N; i++ )\r\n\t\t\t{\r\n\t\t\t\tunsigned char\tfinish = 0;\r\n\t\t\t\tunsigned char \ttmp_index = neighbors[i];\r\n\t\t\t\tfloat\t\t\ttmp_Ed, tmp_Angle;\r\n\t\t\t\t\r\n\t\t\t\t/*\r\n\t\t\t\tprintf(\"i = %d\\n\", i);\r\n\t\t\t\tsystem(\"pause\");\r\n\t\t\t\t*/\r\n\t\t\t\t\r\n\t\t\t\tfor\t( j = 0; j < queue_size; j++)\r\n\t\t\t\t\tif\t( tmp_index == queue[j] )\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\tfinish = 1;\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\t/*\r\n\t\t\t\tprintf(\"finish = %d\\n\", finish);\r\n\t\t\t\t*/\r\n\t\t\t\t\r\n\t\t\t\tif\t( finish == 1 )\tcontinue;\r\n\t\t\t\t\r\n\t\t\t\ttmp_Ed =\t( center < tmp_index ) \r\n\t\t\t\t\t\t\t? distances[ center *(2*finger->nMinutiae-1-center )/2+tmp_index-center -1 ]\r\n\t\t\t\t\t\t\t: distances[ tmp_index*(2*finger->nMinutiae-1-tmp_index)/2+center -tmp_index-1 ];\r\n\t\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\tfloat\ttmp_angle = fabs( \tfinger->minutiae[center] ->angle\r\n\t\t\t\t\t\t\t\t\t\t - \tfinger->minutiae[tmp_index]->angle );\r\n\t\t\t\ttmp_Angle = ( tmp_angle < 2*M_PI - tmp_angle ) \t\r\n\t\t\t\t\t\t\t? tmp_angle\r\n\t\t\t\t\t\t\t: 2*M_PI - tmp_angle;\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t/* \r\n\t\t\t\tprintf(\"tmp_Ed = %f vs %d\\ttmp_Angle = %f vs %f\\n\", tmp_Ed, POS, tmp_Angle, DIR);\r\n\t\t\t\tsystem(\"pause\");\r\n\t\t\t\t*/\r\n\t\t\t\t\r\n\t\t\t\tif \t( tmp_Ed < POS && tmp_Angle < DIR )\r\n\t\t\t\t{\r\n\t\t\t\t\tqueue[ queue_size++ ] = tmp_index;\r\n\t\t\t\t\tstack[ stack_size++ ] = tmp_index;\r\n\t\t\t\t\t/*\r\n\t\t\t\t\tprintf(\"In_if_2\\n\");\r\n\t\t\t\t\tprintf(\"queue_size = %d\\tstack_size = %d\\ttmp_index = %d\\n\", queue_size, stack_size, tmp_index);\r\n\t\t\t\t\t*/\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t/*\r\n\t\tprintf(\"queue_size = %d\\tNUM = %d\\n\", queue_size, NUM);\r\n\t\t*/\r\n\t\t\r\n\t\tif\t( queue_size > NUM )\r\n\t\t\tfor\t( j = 0; j < queue_size; j++ )\r\n\t\t\t{\r\n\t\t\t\t/*\r\n\t\t\t\tprintf(\"queue[%d] = %d\\n\", queue_size, queue[j]);\r\n\t\t\t\t*/\r\n\t\t\t\tldr[ queue[j] ] = ( ldr[ queue[j] ] > queue_size ) ? ldr[ queue[j] ] : queue_size;\r\n\t\t\t}\t\t\r\n\t}\r\n\t\r\n\treturn\tldr[centerI];\r\n}" }, { "alpha_fraction": 0.6500720977783203, "alphanum_fraction": 0.6572421193122864, "avg_line_length": 38.44169235229492, "blob_id": "785ea36e74848e2b13691ed7f2608ffe0f05e1ac", "content_id": "c85bcf0ca10aca6c51b30f9127eed442867d21ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 27057, "license_type": "no_license", "max_line_length": 80, "num_lines": 686, "path": "/bkafis/pcasys/src/bin/optrws/optrws.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*******************************************************************************\n\nLicense: \nThis software and/or related materials was developed at the National Institute\nof Standards and Technology (NIST) by employees of the Federal Government\nin the course of their official duties. Pursuant to title 17 Section 105\nof the United States Code, this software is not subject to copyright\nprotection and is in the public domain. \n\nThis software and/or related materials have been determined to be not subject\nto the EAR (see Part 734.3 of the EAR for exact details) because it is\na publicly available technology and software, and is freely distributed\nto any interested party with no licensing requirements. Therefore, it is \npermissible to distribute this software as a free download from the internet.\n\nDisclaimer: \nThis software and/or related materials was developed to promote biometric\nstandards and biometric technology testing for the Federal Government\nin accordance with the USA PATRIOT Act and the Enhanced Border Security\nand Visa Entry Reform Act. Specific hardware and software products identified\nin this software were used in order to perform the software development.\nIn no case does such identification imply recommendation or endorsement\nby the National Institute of Standards and Technology, nor does it imply that\nthe products and equipment identified are necessarily the best available\nfor the purpose.\n\nThis software and/or related materials are provided \"AS-IS\" without warranty\nof any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\nNO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\nor FITNESS FOR A PARTICULAR PURPOSE or for any purpose whatsoever, for the\nlicensed product, however used. In no event shall NIST be liable for any\ndamages and/or costs, including but not limited to incidental or consequential\ndamages of any kind, including economic damage or injury to property and lost\nprofits, regardless of whether NIST shall be advised, have reason to know,\nor in fact shall know of the possibility.\n\nBy using this software, you agree to bear all risk relating to quality,\nuse and performance of the software and/or related materials. You agree\nto hold the Government harmless from any claim arising from your use\nof the software.\n\n*******************************************************************************/\n\n/************************************************************************\n\n PACKAGE: PCASYS TOOLS\n\n FILE: OPTRWS.C\n\n AUTHORS: Craig Watson\n [email protected]\n G. T. Candela\n DATE: 08/01/1995\n UPDATED: 05/09/2005 by MDG\n UPDATED: 09/30/2008 by Kenenth Ko - add version option.\n\n#cat: optrws - Optimizes the regional weights that are then applied\n#cat: to the eigen vectors.\n\nOptimizes the regional weights. These weights form a hxw array,\nwith each weight being associated with one 2x2-vector block of\norientation vectors from the (2*w)x(2*h)-vector orientation array. The use\nof the weights is that, in effect (up to an approximation), the\nappropriate elements of orientation arrays get multiplied by the\nweights before the computation of Euclidean distances.\n\nOptimization is done in the sense of minimizing the activation error\nrate that results when a set of fingerprints is classified, by a\nProbabilistic Neural Net (PNN) that uses the same set of fingerprints\nas prototypes, with leave-one-out, i.e. the particular fingerprint\nbeing classified is omitted from the prototypes set each time. The\nactivation error rate is a function of the regional weights. Each\ntime a particular set of regional weights is to be tried, the K-L\nfeature vectors first transformed to make temporary new feature\nvectors that incorporate these regional weights and then the PNN is\nrun using the temporary feature vectors. The activation error rate\nused is the average, over the tuning set, of the squared difference\nbetween 1 and the normalized PNN activation of the actual class. The\noptimization method used is a very simple form of gradient descent.\n\nAt the \"basepoints\" along the optimization, the program records\nthe following, as files in outfiles_dir:\n The basepoints, as \"matrix\" file (dimensions h x w) bspt_0.bin,\n bspt_1.bin, etc. or bspt_0.asc, bspt_1.asc, etc. (suffix indicates\n binary or ascii, which is decided by the ascii_outfiles parm).\n The estimated gradients at the basepoints, as \"matrix\" files (also\n dimensions h x w) egrad_0.bin, etc. or egrad_0.asc, etc.\n The activation error rates at the basepoints, as text files\n acerr_0.txt, etc.\n\nWhen this program needs to compute error values at a set of points\nnear a basepoint, in order to compute the estimated gradient, it can\nstart several processes, which run simultaneously and each do part of\nthe work. If several processors are available, using this feature may\nsave a considerable amount of time. To use this, cause your parms\nfile to set acerror_stepped_points_nprocs (number of processes to use\nwhen estimating gradient) to a value > 1; the value probably should be\n<= number of processors available. If the operating system on your\ncomputer does not have the fork() system call and the execl() function\n(e.g., DOS), then optrws.c should be compiled with NO_FORK_AND_EXECL\ndefined; this can be done by modifying the optrws Makefile so that the\ncc (C compiler) command uses -DNO_FORK_AND_EXECL as an additional\nargument. If optrws.c is compiled this way, the part of the code that\nuses fork and execl, i.e. the part that usually is run if\nacerror_stepped_points_nprocs > 1, will not be compiled, and a bit of\ncode will be compiled that causes the program to print an error\nmessage and exit if acerror_stepped_points_nprocs > 1.\n\n*************************************************************************/\n\n/* <fixup.h> must be near the top of the list of includes; otherwise,\n this source will not compile. */\n#include <stdio.h>\n#include <stdlib.h>\n#include <fixup.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#ifndef __MSYS__\n#include <sys/wait.h>\n#endif\n#include <unistd.h>\n#include <string.h>\n#include <math.h>\n#include <little.h>\n#include <usagemcs.h>\n#include <table.h>\n#include <optrws_r.h>\n#include <pca.h>\n#include <datafile.h>\n#include <ioutil.h>\n#include <version.h>\n\n\nstatic FILE *fp_messages;\nstatic int verbose_int;\n\nstatic struct {\n char n_feats_use, n_klfvs_use, irw_init, irw_initstep, irw_stepthr,\n grad_est_stepsize, n_linesearches, linesearch_initstep,\n linesearch_stepthr, tablesize, acerror_stepped_points_nprocs,\n verbose, klfvs_file, classes_file, eigvecs_file,\n outfiles_dir, ascii_outfiles;\n} setflags;\n\nvoid optrws_read_parms(char [], int *, int *, float *, float *, float *,\n float *, int *, float *, float *, int *, int *, int *,\n char [], char [], char [], char [], int *);\nvoid optrws_check_parms_allset(void);\nvoid message_prog(char []);\n\nint main(int argc, char *argv[])\n{\n FILE *fp;\n char str[400], *prsfile, *datadir, *desc, klfvs_file[200],\n klfvs_file_tf[200], classes_file[200], classes_file_tf[200],\n eigvecs_file[200], eigvecs_file_tf[200], outfiles_dir[200],\n outfiles_dir_tf[200], optrwsgw_path[200], rws_bspt_file[200],\n rws_bspt_file_full_nbytes_str[6], n_feats_use_str[4],\n n_klfvs_use_str[7], seg_start_str[4], seg_end_str[4],\n grad_est_stepsize_str[40], temp_outfile[200];\n unsigned char *classes;\n int n_feats_use, n_klfvs_use, n_linesearches,\n acerror_stepped_points_nprocs, i, ibspt, ascii_outfiles_int,\n tablesize, optrws_pid = 0, *cproc_pids, pid, base_seg_size,\n n_larger_segs, iproc, seg_size, seg_start, seg_end, nleft, ret,\n fd, rws_bspt_file_full_nbytes, temp_outfile_full_nbytes;\n float *klfvs, *eigvecs, irw_init, irw_initstep, irw_stepthr,\n grad_est_stepsize, linesearch_initstep, linesearch_stepthr,\n egrad_slen, egrad_len, *acerrors_stepped, irw, irw_step,\n irw_prev = 0, acerror, acerror_prev, acerror_bspt, *rws,\n *rws_bspt, *egrad, *dh_uvec, dhdist, linesearch_step, dhdist_prev = 0;\n TABLE table;\n int j, n_feats, evt_sz, w, h, n_cls, rwsz;\n char **lcnptr;\n\n if ((argc == 2) && (strcmp(argv[1], \"-version\") == 0)) {\n getVersion();\n exit(0);\n }\n\n Usage(\"<prsfile>\");\n prsfile = *++argv; /* required user parms file */\n /* Read parameters, first from default optrws parms file and then\n from user parms file. Then, check that no parm was left unset. */\n memset(&setflags, 0, sizeof(setflags));\n\n#ifdef __MSYS__\n sprintf(str, \"./optrws.prs\");\n#else\n datadir = get_datadir();\n sprintf(str, \"%s/parms/optrws.prs\", datadir);\n#endif\n \n optrws_read_parms(str, &n_feats_use, &n_klfvs_use, &irw_init,\n &irw_initstep, &irw_stepthr, &grad_est_stepsize, &n_linesearches,\n &linesearch_initstep, &linesearch_stepthr, &tablesize,\n &acerror_stepped_points_nprocs, &verbose_int, klfvs_file,\n classes_file, eigvecs_file, outfiles_dir, &ascii_outfiles_int);\n optrws_read_parms(prsfile, &n_feats_use, &n_klfvs_use, &irw_init,\n &irw_initstep, &irw_stepthr, &grad_est_stepsize, &n_linesearches,\n &linesearch_initstep, &linesearch_stepthr, &tablesize,\n &acerror_stepped_points_nprocs, &verbose_int, klfvs_file,\n classes_file, eigvecs_file, outfiles_dir, &ascii_outfiles_int);\n optrws_check_parms_allset();\n\n w = ((WIDTH/WS)-2)/2;\n h = ((HEIGHT/WS)-2)/2;\n rwsz = w*h;\n if(!(1 <= acerror_stepped_points_nprocs &&\n acerror_stepped_points_nprocs <= rwsz)) {\n sprintf(str, \"acerror_stepped_points_nprocs is %d; must have\\n\\\n1 <= acerror_stepped_points_nprocs <= %d\",\n acerror_stepped_points_nprocs, rwsz);\n fatalerr(\"optrws\", str, NULL);\n }\n\n if(acerror_stepped_points_nprocs > 1) {\n#ifdef NO_FORK_AND_EXECL\n fatalerr(\"optrws\", \"in this no-fork-and-execl version, \\\nacerror_stepped_points_nprocs must be 1\", NULL);\n#endif\n sprintf(optrwsgw_path, \"%s/optrwsgw\", \"\");\n optrws_pid = getpid();\n }\n\n strcpy(outfiles_dir_tf, tilde_filename(outfiles_dir, 0));\n strcpy(klfvs_file_tf, tilde_filename(klfvs_file, 0));\n strcpy(classes_file_tf, tilde_filename(classes_file, 0));\n strcpy(eigvecs_file_tf, tilde_filename(eigvecs_file, 0));\n\n#ifdef __MSYS__\n mkdir(outfiles_dir_tf);\n#else\n mkdir(outfiles_dir_tf, 0700);\n#endif\n\n sprintf(str, \"%s/messages.txt\", outfiles_dir_tf);\n fp_messages = fopen_ch(str, \"w\");\n\n /* Read data: K-L feature vectors and their classes, and\n eigenvectors. */\n message_prog(\"read K-L feature vectors, classes, and \\\neigenvectors\\n\");\n matrix_read_submatrix(klfvs_file_tf, 0, n_klfvs_use - 1, 0,\n n_feats_use - 1, &desc, &klfvs);\n free(desc);\n classes_read_subvector_ind(classes_file_tf, 0, n_klfvs_use - 1, &desc,\n &classes, &n_cls, &lcnptr);\n free(desc);\n free_dbl_char(lcnptr, n_cls);\n matrix_read_dims(eigvecs_file_tf, &n_feats, &evt_sz);\n if(8*rwsz != evt_sz)\n fatalerr(\"optrws\",\"8*rwsz != evt_sz\",\"sizes are incompatible\");\n matrix_read_submatrix(eigvecs_file_tf, 0, n_feats_use - 1, 0, evt_sz-1,\n &desc, &eigvecs);\n free(desc);\n\n /* A simple linearly searched table, which will be used, when\n optimizing irw and during line searches in the main optimization,\n to look up previous results and thereby avoid some redoing of\n error computations. */\n table_init(&table, tablesize);\n\n /* Optimize irw (initial regional weight), an initial value to\n which to set all the regional weights at the start (later) of their\n optimization as separate weights. This is done by using a single\n factor (squared) for the pnn and optimizing this factor: that is\n approximately equivalent to using the factor for all weights. */\n message_prog(\"optimize irw (initial value for all \\\nregional weights)\\n\");\n acerror_prev = optrws_pnn_acerror(n_feats_use, n_klfvs_use, klfvs,\n classes, irw_init * irw_init, n_cls);\n sprintf(str, \"irw %f, acerror %f\\n\", irw_init, acerror_prev);\n message_prog(str);\n table_store(&table, irw_init, acerror_prev);\n for(irw = irw_init + (irw_step = irw_initstep); ; irw += irw_step) {\n if(!table_lookup(&table, irw, &acerror)) {\n acerror = optrws_pnn_acerror(n_feats_use, n_klfvs_use, klfvs,\n classes, irw * irw, n_cls);\n table_store(&table, irw, acerror);\n }\n sprintf(str, \"irw %f, acerror %f\\n\", irw, acerror);\n message_prog(str);\n if(acerror >= acerror_prev) {\n if(fabs((double)irw_step) <= irw_stepthr)\n\tbreak;\n irw_step /= -2;\n }\n irw_prev = irw;\n acerror_prev = acerror;\n }\n table_clear(&table);\n\n /* The main part of the optimization of the regional weights. Uses\n a simple form of gradient descent, which appears to be sufficient\n for this task, although it may be compute-intensive. */\n /* Duplicate the best irw, which was just found, into all weights,\n forming the 0'th \"basepoint\" for the subsequent optimization of\n the weights. Write 0'th basepoint. */\n malloc_flt(&rws_bspt, rwsz, \"optrws rws_bspt\");\n for(i = 0; i < rwsz; i++)\n rws_bspt[i] = irw_prev;\n sprintf(rws_bspt_file, \"%s/bspt_0.%s\", outfiles_dir_tf,\n ascii_outfiles_int ? \"asc\" : \"bin\");\n rws_bspt_file_full_nbytes = matrix_write(rws_bspt_file, \"\",\n ascii_outfiles_int, h, w, rws_bspt);\n sprintf(rws_bspt_file_full_nbytes_str, \"%d\",\n rws_bspt_file_full_nbytes);\n\n /* Compute and write activation error rate at 0'th basepoint. */\n acerror_bspt = rws_to_acerror(rws_bspt, w, h, eigvecs, evt_sz, n_feats_use,\n n_klfvs_use, klfvs, classes, n_cls);\n sprintf(str, \"acerror_bspt %f\\n\", acerror_bspt);\n message_prog(str);\n sprintf(str, \"%s/acerr_0.txt\", outfiles_dir_tf);\n fp = fopen_ch(str, \"w\");\n fprintf(fp, \"%f\\n\", acerror_bspt);\n fclose(fp);\n\n /* Do n_linesearches iterations of {estimate gradient; line search\n along resulting downhill-pointing line}. */\n\n malloc_flt(&acerrors_stepped, rwsz, \"optrws acerrors_stepped\");\n malloc_flt(&rws, rwsz, \"optrws rws\");\n malloc_flt(&egrad, rwsz, \"optrws egrad\");\n malloc_flt(&dh_uvec, rwsz, \"optrws dh_uvec\");\n for(ibspt = 0; ibspt < n_linesearches; ibspt++) {\n sprintf(str, \"ibspt %d\\n\", ibspt);\n message_prog(str);\n\n /* Compute error values at points stepped to from the basepoint\n along the coordinate axes. These will be used to estimate the\n gradient. */\n message_prog(\"compute error at stepped-to points\\n\");\n if(acerror_stepped_points_nprocs == 1)\n /* Use one process, namely this process. */\n for(i = 0; i < rwsz; i++) {\n for(j = 0; j < rwsz; j++)\n rws[j] = rws_bspt[j];\n\trws[i] += grad_est_stepsize;\n\tacerrors_stepped[i] = rws_to_acerror(rws, w, h, eigvecs, evt_sz,\n n_feats_use, n_klfvs_use, klfvs, classes, n_cls);\n\tsprintf(str, \"i = %d; acerrors_stepped[i] = %f\\n\", i,\n acerrors_stepped[i]);\n\tmessage_prog(str);\n }\n else {\n#ifndef NO_FORK_AND_EXECL\n /* Use several processes (instances of the optrwsgw program) to\n compute the error values at the stepped-to points. Divide\n the rwsz points into acerror_stepped_points_nprocs approximately\n equal segments and assign one segment to each process. */\n\n /* Start processes (child processes). */\n message_prog(\"start child processes\\n\");\n base_seg_size = rwsz / acerror_stepped_points_nprocs;\n n_larger_segs = rwsz % acerror_stepped_points_nprocs;\n malloc_int(&cproc_pids, rwsz, \"optrws cprod_pids\");\n for(iproc = seg_start = 0; iproc <\n acerror_stepped_points_nprocs; iproc++, seg_start = seg_end) {\n\tseg_size = (iproc < n_larger_segs ? base_seg_size + 1 :\n base_seg_size);\n\tseg_end = seg_start + seg_size;\n\tret = fork();\n\tif(ret) /* Still this process; ret is process id of\n child process */\n\t cproc_pids[iproc] = ret;\n\telse { /* Child process. Run an instance of optrwsgw. */\n\t sprintf(n_feats_use_str, \"%d\", n_feats_use);\n\t sprintf(n_klfvs_use_str, \"%d\", n_klfvs_use);\n\t sprintf(seg_start_str, \"%d\", seg_start);\n\t sprintf(seg_end_str, \"%d\", seg_end);\n\t sprintf(grad_est_stepsize_str, \"%f\", grad_est_stepsize);\n\t sprintf(temp_outfile, \"/tmp/optrwsgw_optrws-pid-%d_%d\",\n optrws_pid, iproc);\n\t execl(optrwsgw_path, \"optrwsgw\", n_feats_use_str,\n n_klfvs_use_str, klfvs_file_tf, classes_file_tf,\n eigvecs_file_tf, rws_bspt_file,\n rws_bspt_file_full_nbytes_str, seg_start_str, seg_end_str,\n grad_est_stepsize_str, temp_outfile, (char *)0);\n\t /* If control gets here, execl has failed: */\n\t perror(\"execl\");\n\t exit(1);\n\t}\n }\n\n /* Wait for all child processes to finish. Check pids returned\n by wait against list of child process pids, since wait returns\n can be caused by events other than child process exits. */\n message_prog(\"wait for all child processes to exit\\n\");\n for(nleft = acerror_stepped_points_nprocs; nleft;) {\n\tpid = wait(NULL);\n\tfor(iproc = 0; iproc < acerror_stepped_points_nprocs; iproc++)\n\t if(pid == cproc_pids[iproc]) {\n\t nleft--;\n\t break;\n\t }\n }\n free(cproc_pids);\n\n /* Read (and remove) the temporary output files of the child\n processes, thereby building the complete vector of error values\n at all rwsz stepped-to points. Before reading any file here,\n make sure it has the expected number of bytes. */\n message_prog(\"read output files of child processes\\n\");\n for(iproc = seg_start = 0; iproc <\n acerror_stepped_points_nprocs; iproc++, seg_start = seg_end) {\n\tseg_size = (iproc < n_larger_segs ? base_seg_size + 1 :\n base_seg_size);\n\tseg_end = seg_start + seg_size;\n\tsprintf(temp_outfile, \"/tmp/optrwsgw_optrws-pid-%d_%d\",\n optrws_pid, iproc);\n\ttemp_outfile_full_nbytes = seg_size * sizeof(float);\n\twhile(filesize(temp_outfile) != temp_outfile_full_nbytes)\n\t sleep(1);\n\tfd = open_read_ch(temp_outfile);\n\tread(fd, (float *)acerrors_stepped + seg_start,\n temp_outfile_full_nbytes);\n\tclose(fd);\n\tunlink(temp_outfile);\n }\n#endif\n }\n\n /* From error values at stepped-to points, compute estimated\n gradient (secant method) and its length, whence unit-length\n (estimated-)downhill-pointing vector. */\n message_prog(\"compute estimated gradient, its length, \\\nwhence dh_uvec\\n\");\n for(i = 0, egrad_slen = 0.; i < rwsz; i++) {\n egrad[i] = (acerrors_stepped[i] - acerror_bspt) /\n grad_est_stepsize;\n egrad_slen += egrad[i] * egrad[i];\n }\n egrad_len = sqrt((double)egrad_slen);\n /* Estimated-downhill-pointing unit-length vector: */\n for(i = 0; i < rwsz; i++)\n dh_uvec[i] = -egrad[i] / egrad_len;\n\n /* Write estimated gradient. */\n sprintf(str, \"%s/egrad_%d.%s\", outfiles_dir_tf, ibspt,\n ascii_outfiles_int ? \"asc\" : \"bin\");\n matrix_write(str, \"\", ascii_outfiles_int, h, w, egrad);\n\n /* Use a very simple line search method, which appears to be\n sufficient for this particular task, to approximately find the\n minimum, or at least, a local minimum, along the downhill-pointing\n line. Resulting point will be the next basepoint. */\n message_prog(\"line search:\\n\");\n acerror_prev = acerror_bspt;\n sprintf(str, \" acerror_prev %f\\n\", acerror_prev);\n message_prog(str);\n for(dhdist = linesearch_step = linesearch_initstep; ; dhdist +=\n linesearch_step) {\n if(!table_lookup(&table, dhdist, &acerror)) {\n\tfor(i = 0; i < rwsz; i++)\n\t rws[i] = rws_bspt[i] + dhdist * dh_uvec[i];\n\tacerror = rws_to_acerror(rws, w, h, eigvecs, evt_sz, n_feats_use,\n n_klfvs_use, klfvs, classes, n_cls);\n\ttable_store(&table, dhdist, acerror);\n }\n sprintf(str, \" dhdist %f, acerror %f\\n\", dhdist, acerror);\n message_prog(str);\n if(acerror >= acerror_prev) {\n\tif(fabs((double)linesearch_step) <= linesearch_stepthr)\n\t break;\n\tlinesearch_step /= -2;\n }\n dhdist_prev = dhdist;\n acerror_prev = acerror;\n }\n table_clear(&table);\n for(i = 0; i < rwsz; i++)\n rws_bspt[i] += dhdist_prev * dh_uvec[i];\n\n /* Write next basepoint. */\n sprintf(rws_bspt_file, \"%s/bspt_%d.%s\",\n outfiles_dir_tf, ibspt + 1, ascii_outfiles_int ?\n \"asc\" : \"bin\");\n rws_bspt_file_full_nbytes = matrix_write(rws_bspt_file, \"\",\n ascii_outfiles_int, h, w, rws_bspt);\n sprintf(rws_bspt_file_full_nbytes_str, \"%d\",\n rws_bspt_file_full_nbytes);\n\n /* Compute and write activation error rate at next basepoint. */\n acerror_bspt = rws_to_acerror(rws_bspt, w, h, eigvecs, evt_sz, n_feats_use,\n n_klfvs_use, klfvs, classes, n_cls);\n sprintf(str, \"acerror_bspt for basepoint %d: %f\\n\",\n ibspt + 1, acerror_bspt);\n message_prog(str);\n sprintf(str, \"%s/acerr_%d.txt\", outfiles_dir_tf, ibspt + 1);\n fp = fopen_ch(str, \"w\");\n fprintf(fp, \"%f\\n\", acerror_bspt);\n fclose(fp);\n }\n free(rws_bspt);\n free(rws);\n free(egrad);\n free(dh_uvec);\n free(acerrors_stepped);\n\n exit(0);\n}\n\n/********************************************************************/\n\n/* Reads an optrws parms file. */\n\nvoid optrws_read_parms(char parmsfile[], int *n_feats_use, int *n_klfvs_use,\n float *irw_init, float *irw_initstep, float *irw_stepthr,\n float *grad_est_stepsize, int *n_linesearches,\n float *linesearch_initstep, float *linesearch_stepthr,\n int *tablesize, int *acerror_stepped_points_nprocs,\n int *verbose_int, char klfvs_file[], char classes_file[],\n char eigvecs_file[], char outfiles_dir[], int *ascii_outfiles_int)\n{\n FILE *fp;\n char str[1000], *p, name_str[50], val_str[1000];\n\n fp = fopen_ch(parmsfile, \"r\");\n while(fgets(str, 1000, fp)) {\n if((p = strchr(str, '#')))\n *p = 0;\n if(sscanf(str, \"%s %s\", name_str, val_str) < 2)\n continue;\n\n if(!strcmp(name_str, \"n_feats_use\")) {\n *n_feats_use = atoi(val_str);\n setflags.n_feats_use = 1;\n }\n else if(!strcmp(name_str, \"n_klfvs_use\")) {\n *n_klfvs_use = atoi(val_str);\n setflags.n_klfvs_use = 1;\n }\n else if(!strcmp(name_str, \"irw_init\")) {\n *irw_init = atof(val_str);\n setflags.irw_init = 1;\n }\n else if(!strcmp(name_str, \"irw_initstep\")) {\n *irw_initstep = atof(val_str);\n setflags.irw_initstep = 1;\n }\n else if(!strcmp(name_str, \"irw_stepthr\")) {\n *irw_stepthr = atof(val_str);\n setflags.irw_stepthr = 1;\n }\n else if(!strcmp(name_str, \"grad_est_stepsize\")) {\n *grad_est_stepsize = atof(val_str);\n setflags.grad_est_stepsize = 1;\n }\n else if(!strcmp(name_str, \"n_linesearches\")) {\n *n_linesearches = atoi(val_str);\n setflags.n_linesearches = 1;\n }\n else if(!strcmp(name_str, \"linesearch_initstep\")) {\n *linesearch_initstep = atof(val_str);\n setflags.linesearch_initstep = 1;\n }\n else if(!strcmp(name_str, \"linesearch_stepthr\")) {\n *linesearch_stepthr = atof(val_str);\n setflags.linesearch_stepthr = 1;\n }\n else if(!strcmp(name_str, \"tablesize\")) {\n *tablesize = atoi(val_str);\n setflags.tablesize = 1;\n }\n else if(!strcmp(name_str, \"acerror_stepped_points_nprocs\")) {\n *acerror_stepped_points_nprocs = atoi(val_str);\n setflags.acerror_stepped_points_nprocs = 1;\n }\n else if(!strcmp(name_str, \"verbose\")) {\n if(!strcmp(val_str, \"y\"))\n\t*verbose_int = 1;\n else if(!strcmp(val_str, \"n\"))\n\t*verbose_int = 0;\n else\n\tfatalerr(\"optrws_read_parms (file optrws.c)\", \"parm verbose \\\nmust be y or n\", NULL);\n setflags.verbose = 1;\n }\n else if(!strcmp(name_str, \"klfvs_file\")) {\n strcpy(klfvs_file, val_str);\n setflags.klfvs_file = 1;\n }\n else if(!strcmp(name_str, \"classes_file\")) {\n strcpy(classes_file, val_str);\n setflags.classes_file = 1;\n }\n else if(!strcmp(name_str, \"eigvecs_file\")) {\n strcpy(eigvecs_file, val_str);\n setflags.eigvecs_file = 1;\n }\n else if(!strcmp(name_str, \"outfiles_dir\")) {\n strcpy(outfiles_dir, val_str);\n setflags.outfiles_dir = 1;\n }\n else if(!strcmp(name_str, \"ascii_outfiles\")) {\n if(!strcmp(val_str, \"y\"))\n\t*ascii_outfiles_int = 1;\n else if(!strcmp(val_str, \"n\"))\n\t*ascii_outfiles_int = 0;\n else\n\tfatalerr(\"optrws_read_parms (file optrws.c)\", \"parm \\\nascii_outfiles must be y or n\", NULL);\n setflags.ascii_outfiles = 1;\n }\n\n else\n fatalerr(\"optrws_read_parms (file optrws.c)\",\n \"illegal parm name\", name_str);\n }\n}\n\n/********************************************************************/\n\n/* Checks that all parms are set. */\n\nvoid optrws_check_parms_allset()\n{\n if(!setflags.n_feats_use)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nn_feats_use was never set\", NULL);\n if(!setflags.n_klfvs_use)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nn_klfvs_use was never set\", NULL);\n if(!setflags.irw_init)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nirw_init was never set\", NULL);\n if(!setflags.irw_initstep)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nirw_initstep was never set\", NULL);\n if(!setflags.irw_stepthr)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nirw_stepthr was never set\", NULL);\n if(!setflags.grad_est_stepsize)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\ngrad_est_stepsize was never set\", NULL);\n if(!setflags.n_linesearches)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nn_linesearches was never set\", NULL);\n if(!setflags.linesearch_initstep)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nlinesearch_initstep was never set\", NULL);\n if(!setflags.linesearch_stepthr)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nlinesearch_stepthr was never set\", NULL);\n if(!setflags.tablesize)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\ntablesize was never set\", NULL);\n if(!setflags.acerror_stepped_points_nprocs)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nacerror_stepped_points_nprocs was never set\", NULL);\n if(!setflags.verbose)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nverbose was never set\", NULL);\n if(!setflags.klfvs_file)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nklfvs_file was never set\", NULL);\n if(!setflags.classes_file)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nclasses_file was never set\", NULL);\n if(!setflags.eigvecs_file)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\neigvecs_file was never set\", NULL);\n if(!setflags.outfiles_dir)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\noutfiles_dir was never set\", NULL);\n if(!setflags.ascii_outfiles)\n fatalerr(\"optrws_check_parms_allset (file optrws.c)\", \"parm \\\nascii_outfiles was never set\", NULL);\n}\n\n/********************************************************************/\n\n/* Writes message to the messages file, and if verbose also writes it\nto the standard output. */\n\nvoid message_prog(char message[])\n{\n fprintf(fp_messages, \"%s\", message);\n fflush(fp_messages);\n if(verbose_int)\n printf(\"%s\", message);\n}\n\n/********************************************************************/\n" }, { "alpha_fraction": 0.5895242691040039, "alphanum_fraction": 0.6004999876022339, "avg_line_length": 23.720603942871094, "blob_id": "f18d780223b761945fa0b340067ad3c593d6cb7b", "content_id": "65682fb0a922ec11dda4b7cebc5da74ac92f8f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 25602, "license_type": "no_license", "max_line_length": 143, "num_lines": 995, "path": "/bkafis/bkafis/src/lib/bkafis/ISOTemplate.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*\r\n/* Copyright (C) 2009 DEIS - University of Bologna (Italy)\r\n/* All rights reserved.\r\n/*\r\n/* FVC sample source code.\r\n/* http:/*biolab.csr.unibo.it/fvcongoing\r\n/*\r\n/* This source code can be used by FVC participants to create FVC executables. \r\n/* It cannot be distributed and any other use is strictly prohibited.\r\n/*\r\n/* Warranties and Disclaimers:\r\n/* THIS SOFTWARE IS PROVIDED \"AS IS\" WITHOUT WARRANTY OF ANY KIND\r\n/* INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,\r\n/* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.\r\n/* IN NO EVENT WILL UNIVERSITY OF BOLOGNA BE LIABLE FOR ANY DIRECT,\r\n/* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES,\r\n/* INCLUDING DAMAGES FOR LOSS OF PROFITS, LOSS OR INACCURACY OF DATA,\r\n/* INCURRED BY ANY PERSON FROM SUCH PERSON'S USAGE OF THIS SOFTWARE\r\n/* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\r\n/*\r\n*/\r\n/* --------------------------------------------------------------- \r\n FVC ISOTemplate program\r\n \r\n\t\t\t\t\t\tv 1.2 - March 2009\r\n v 1.1 - June 2006\r\n\t\t\t\t\t\tv 1.0 - March 2006\r\n\t\t\t\t\t\t\t\t\t\t\t\t\r\n --------------------------------------------------------------- */\r\n\r\n\r\n\r\n\r\n\r\n#include \"ISOTemplate.h\"\r\n\r\n#include <memory.h>\r\n#include <malloc.h>\r\n#include <stdio.h>\r\n\r\n#ifdef _DEBUG\r\n#define new DEBUG_NEW\r\n#endif\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\nstatic ISOTemplate\tisoTemplate;\r\nstatic RidgeCount\t\tridgeCount;\r\nstatic CoreAndDelta coreAndDelta;\r\n\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\n/*Set Header's fields*/\r\nISORESULT InitRecordHeader (ISOWORD imgWidth,ISOWORD imgHeight,ISOWORD xRes,ISOWORD yRes,ISOBYTE nViews)\r\n{\r\n\tmemset(&isoTemplate.recordHeader,0x00,sizeof(RecordHeader));\r\n\r\n\tisoTemplate.fingerViewRecord=malloc(sizeof(FingerViewRecord)*nViews);\r\n\tmemset(isoTemplate.fingerViewRecord,0x00,sizeof(FingerViewRecord)*nViews);\r\n\r\n\tisoTemplate.extendedData=malloc(sizeof(ExtendedData)*nViews);\r\n\tmemset(isoTemplate.extendedData,0x00,sizeof(ExtendedData)*nViews);\r\n\r\n\tmemcpy(isoTemplate.recordHeader.formatID,\"FMR\",4);\t\t\t\t\t\t\t\t\t\t\t\t/*Format ID*/\r\n\tmemcpy(isoTemplate.recordHeader.specVersion,\" 20\",4);\t\t\t\t\t\t\t\t\t\t\t/*Spec Version*/\r\n\tmemcpy(isoTemplate.recordHeader.xImageSize,BigEndianWord(&imgWidth),2);\t\t/*X image size*/\r\n\tmemcpy(isoTemplate.recordHeader.yImageSize,BigEndianWord(&imgHeight),2);\t/*Y image size*/\r\n\tmemcpy(isoTemplate.recordHeader.xResolution,BigEndianWord(&xRes),2);\t\t\t/*X resolution*/\r\n\tmemcpy(isoTemplate.recordHeader.yResolution,BigEndianWord(&yRes),2);\t\t\t/*Y resolution*/\r\n\tisoTemplate.recordHeader.nFingerViews=nViews;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*# of finger views*/\r\n\t\t\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT InitFingerViewHeader (ISOBYTE index,ISOBYTE fingPos,ISOBYTE nView,ISOBYTE imprType,ISOBYTE fingQuality,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tISOBYTE nMinutiae)\r\n{\r\n\tFingerViewRecord\t*fvr;\r\n\t\r\n\r\n if (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(fingPos>=N_FINGER_POSITION) ||\r\n\t\t\t\t(imprType>=N_IMPRESSION_TYPE) ||\r\n\t\t\t\t(fingQuality>100) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tfvr=&isoTemplate.fingerViewRecord[index];\r\n\r\n\tfvr->fingerMinutiaeData=malloc(sizeof(FingerMinutiaeData)*nMinutiae);\r\n\tmemset(fvr->fingerMinutiaeData,0x00,sizeof(FingerMinutiaeData)*nMinutiae);\r\n\r\n\tfvr->fingerViewHeader.fingerPosition=fingPos;\t\t\t\t\t\t\t\t\t/*Finger position*/\r\n\tfvr->fingerViewHeader.nView_imprType=(nView<<4) | imprType;\t\t/*View number and Impression type */\r\n\tfvr->fingerViewHeader.fingerQuality=fingQuality;\t\t\t\t\t\t\t/*Finger quality*/\r\n\tfvr->fingerViewHeader.nMinutiae=nMinutiae;\t\t\t\t\t\t\t\t\t\t/*Number of minutiae*/\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT SetMinutiaeData (ISOBYTE index,ISOBYTE minI,ISOBYTE minType,ISOWORD xCoord,ISOWORD yCoord,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t ISOBYTE minAngle,ISOBYTE minQuality)\r\n{\r\n\tFingerMinutiaeData *fmd;\r\n\tISOWORD app,\r\n\t\t\t\t\tx_size,\r\n\t\t\t\t\ty_size;\r\n\r\n\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(minI>=isoTemplate.fingerViewRecord[index].fingerViewHeader.nMinutiae) ||\r\n\t\t\t\t(minType>=N_MINUTIA_TYPE) ||\r\n\t\t\t\t(minQuality>100) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tif (\t((xCoord & 0xC000)!=0) ||\r\n\t\t\t\t((yCoord & 0xC000)!=0) )\r\n\t{\r\n\t\treturn ISO_COORD_OUTSIDE_DIM;\r\n\t}\r\n\r\n\tmemcpy(&x_size,isoTemplate.recordHeader.xImageSize,2);\r\n\tmemcpy(&y_size,isoTemplate.recordHeader.yImageSize,2);\r\n\tBigEndianWord(&x_size);\r\n\tBigEndianWord(&y_size);\r\n\tif (\t(xCoord>x_size) ||\r\n\t\t\t\t(yCoord>y_size) )\r\n\t{\r\n\t\treturn ISO_COORD_OUTSIDE_DIM;\r\n\t}\r\n\r\n\tfmd=&isoTemplate.fingerViewRecord[index].fingerMinutiaeData[minI];\r\n\r\n\tfmd->minutiaType_xLocation[0]=minType<<6;\t\t\t\t\t\t\t\t\t\t\t\t\t/*Minutia Type */\r\n\tapp=*BigEndianWord(&xCoord) | *((ISOWORD *) fmd->minutiaType_xLocation);\r\n\tmemcpy(fmd->minutiaType_xLocation,&app,2);\t\t\t\t\t\t\t\t\t\t\t\t/*X location\t*/\r\n\tmemcpy(fmd->reserved_yLocation,BigEndianWord(&yCoord),2);\t\t\t\t\t/*Reserved and Y location*/\r\n\tfmd->minutiaAngle=minAngle;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Minutia Angle*/\r\n\tfmd->minutiaQuality=minQuality;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Minutia Quality*/\r\n\t\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT InitExtendedDataHeader (ISOBYTE index,ISOBYTE nBlocks)\r\n{\r\n\tExtendedData\t*ed;\r\n\r\n\tif (\tindex>=isoTemplate.recordHeader.nFingerViews )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\ted=&isoTemplate.extendedData[index];\r\n\r\n\ted->extendedBlock=malloc(sizeof(ExtendedBlock)*nBlocks);\r\n\tmemset(ed->extendedBlock,0x00,sizeof(ExtendedBlock)*nBlocks);\r\n\r\n\ted->nExtendedBlock=nBlocks;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT SetExtendedDataBlock (ISOBYTE index,ISOBYTE blkI,ISOWORD typeID,ISOWORD dataLength,ISOBYTE *data)\r\n{\r\n\tExtendedBlock *eb;\r\n\r\n\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(blkI>=isoTemplate.extendedData[index].nExtendedBlock) ||\r\n\t\t\t\t(dataLength==0) ||\r\n\t\t\t\t(data==NULL) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\teb=&isoTemplate.extendedData[index].extendedBlock[blkI];\r\n\r\n\tmemcpy(eb->extendedBlockHeader.typeIDcode,BigEndianWord(&typeID),2);\t/*Extended Data Type Code*/\r\n\tmemcpy(eb->data,data,dataLength);\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Extended Data*/\r\n\tmemcpy(eb->extendedBlockHeader.length,BigEndianWord(&dataLength),2);\t/*Extended Data Length*/\r\n\t\t\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT SaveISOTemplate (char *path)\r\n{\r\n\tISOBYTE i,\r\n\t\t\t\t\tj;\r\n\tISOWORD k,\r\n\t\t\t\t\texdata_length;\r\n\tISODWORD template_length;\r\n\tFILE *fp;\r\n\r\n\r\n\tif ( path==0 )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( (fp=fopen(path,\"wb\"))==NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\t/*---*/\r\n\ttemplate_length=RECORDHEADER_LEN+\r\n\t\t\t\t\t\t\t\t\tisoTemplate.recordHeader.nFingerViews*(FINGERVIEWHEADER_LEN+EXTENDEDDATAHEADER_LEN);\r\n\t\t\t\t\t\t\t\t\t\r\n\tfor (i=0;i<isoTemplate.recordHeader.nFingerViews;i++)\r\n\t{\r\n\t\ttemplate_length+=isoTemplate.fingerViewRecord[i].fingerViewHeader.nMinutiae*MINUTIA_LEN;\r\n\t}\r\n\r\n\tfor (i=0;i<isoTemplate.recordHeader.nFingerViews;i++)\r\n\t{\r\n\t\texdata_length=0;\r\n\r\n\t\tfor (j=0;j<isoTemplate.extendedData[i].nExtendedBlock;j++)\r\n\t\t{\r\n\t\t\tk=*((ISOWORD *) isoTemplate.extendedData[i].extendedBlock[j].extendedBlockHeader.length);\r\n\t\t\texdata_length+=*BigEndianWord(&k);\r\n\t\t\texdata_length+=EXTENDEDBLOCKHEADER_LEN;\r\n\t\t}\r\n\r\n\t\ttemplate_length+=exdata_length;\r\n\r\n\t\tmemcpy(isoTemplate.extendedData[i].blockLength,BigEndianWord(&exdata_length),2);\r\n\t}\r\n\r\n\tmemcpy(isoTemplate.recordHeader.recordLength,BigEndianDWord(&template_length),4);\r\n\t/*---*/\r\n\r\n\t/*---*/\r\n\tfwrite(&isoTemplate.recordHeader,sizeof(RecordHeader),1,fp);\r\n\t\r\n\tfor (i=0;i<isoTemplate.recordHeader.nFingerViews;i++)\r\n\t{\r\n\t\tfwrite(&isoTemplate.fingerViewRecord[i].fingerViewHeader,sizeof(FingerViewHeader),1,fp);\r\n\r\n\t\tfor (j=0;j<isoTemplate.fingerViewRecord[i].fingerViewHeader.nMinutiae;j++)\r\n\t\t{\r\n\t\t\tfwrite(&isoTemplate.fingerViewRecord[i].fingerMinutiaeData[j],sizeof(FingerMinutiaeData),1,fp);\r\n\t\t}\r\n\r\n\t\tfwrite(isoTemplate.extendedData[i].blockLength,2,1,fp);\r\n\r\n\t\tif ( *((ISOWORD *) isoTemplate.extendedData[i].blockLength)!=0 )\r\n\t\t{\r\n\t\t\tfor(j=0;j<isoTemplate.extendedData[i].nExtendedBlock;j++)\r\n\t\t\t{\r\n\t\t\t\texdata_length=*((ISOWORD *) isoTemplate.extendedData[i].extendedBlock[j].extendedBlockHeader.length);\r\n\t\t\t\tBigEndianWord(&exdata_length);\r\n\t\t\t\r\n\t\t\t\tfwrite(&isoTemplate.extendedData[i].extendedBlock[j],exdata_length+EXTENDEDBLOCKHEADER_LEN,1,fp);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t/*---*/\r\n\r\n\tfclose(fp);\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT LoadISOTemplate (char *path)\r\n{\r\n\tFILE *fp;\r\n\tFingerViewRecord *fvr;\r\n\tExtendedData *ed;\r\n\tISOBYTE i,\r\n\t\t\t\t\tj,\r\n\t\t\t\t\t*pBuff;\r\n\tISOWORD word,\r\n\t\t\t\t\tk1,\r\n\t\t\t\t\tk2;\r\n\r\n\r\n\tif ( path==0 )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( (fp=fopen(path,\"rb\"))==NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\t/*---*/\r\n\tfread(&isoTemplate.recordHeader,sizeof(RecordHeader),1,fp);\r\n\r\n\tisoTemplate.fingerViewRecord=malloc(sizeof(FingerViewRecord)*isoTemplate.recordHeader.nFingerViews);\r\n\tmemset(isoTemplate.fingerViewRecord,0x00,sizeof(FingerViewRecord)*isoTemplate.recordHeader.nFingerViews);\r\n\r\n\tisoTemplate.extendedData=malloc(sizeof(ExtendedData)*isoTemplate.recordHeader.nFingerViews);\r\n\tmemset(isoTemplate.extendedData,0x00,sizeof(ExtendedData)*isoTemplate.recordHeader.nFingerViews);\r\n\r\n\tfor(i=0;i<isoTemplate.recordHeader.nFingerViews;i++)\r\n\t{\r\n\t\tfvr=&isoTemplate.fingerViewRecord[i];\r\n\r\n\t\tfread(&fvr->fingerViewHeader,sizeof(FingerViewHeader),1,fp);\r\n\r\n\t\tfvr->fingerMinutiaeData=malloc(sizeof(FingerMinutiaeData)*fvr->fingerViewHeader.nMinutiae);\r\n\t\tmemset(fvr->fingerMinutiaeData,0x00,sizeof(FingerMinutiaeData)*fvr->fingerViewHeader.nMinutiae);\r\n\r\n\t\tfor (j=0;j<fvr->fingerViewHeader.nMinutiae;j++)\r\n\t\t{\r\n\t\t\tfread(&fvr->fingerMinutiaeData[j],sizeof(FingerMinutiaeData),1,fp);\r\n\t\t}\r\n\r\n\t\tfread(&word,sizeof(ISOWORD),1,fp);\r\n\t\tBigEndianWord(&word);\r\n\r\n\t\tif ( word!=0 )\r\n\t\t{\r\n\t\t\ted=&isoTemplate.extendedData[i];\r\n\r\n\t\t\tpBuff=malloc(sizeof(ISOBYTE)*word);\r\n\t\t\t\r\n\t\t\tfread(pBuff,word,1,fp);\r\n\r\n\t\t\ted->nExtendedBlock=0;\r\n\t\t\tk1=2;\r\n\r\n\t\t\twhile ( k1<word )\r\n\t\t\t{\r\n\t\t\t\tmemcpy(&k2,&pBuff[k1],2);\r\n\t\t\t\tBigEndianWord(&k2);\r\n\t\t\t\tif ( k2>0 )\r\n\t\t\t\t{\r\n\t\t\t\t\ted->nExtendedBlock++;\r\n\t\t\t\t\tk1+=k2+EXTENDEDBLOCKHEADER_LEN;\r\n\t\t\t\t}\r\n\t\t\t\telse\r\n\t\t\t\t{\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\tmemcpy(&ed->blockLength,BigEndianWord(&word),2);\r\n\r\n\t\t\ted->extendedBlock=malloc(sizeof(ExtendedBlock)*ed->nExtendedBlock);\r\n\t\t\tmemset(ed->extendedBlock,0x00,sizeof(ExtendedBlock)*ed->nExtendedBlock);\r\n\t\t\t\r\n\t\t\tk2=0;\r\n\t\t\tfor (k1=0;k1<ed->nExtendedBlock;k1++)\r\n\t\t\t{\r\n\t\t\t\tmemcpy(&ed->extendedBlock[k1].extendedBlockHeader,&pBuff[k2],EXTENDEDBLOCKHEADER_LEN);\r\n\t\t\t\tk2+=EXTENDEDBLOCKHEADER_LEN;\r\n\t\t\t\t\r\n\t\t\t\tmemcpy(&word,&ed->extendedBlock[k1].extendedBlockHeader.length,2);\r\n\t\t\t\tBigEndianWord(&word);\r\n\r\n\t\t\t\tmemcpy(ed->extendedBlock[k1].data,&pBuff[k2],word);\r\n\t\t\t\tk2+=word;\r\n\t\t\t}\r\n\r\n\t\t\tfree(pBuff);\r\n\t\t}\r\n\t}\r\n\t/*---*/\r\n\r\n\tfclose(fp);\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT CleanISOTemplate ()\r\n{\r\n\tISOBYTE i;\r\n\r\n\r\n\tfor (i=0;i<isoTemplate.recordHeader.nFingerViews;i++)\r\n\t{\r\n\t\tfree(isoTemplate.fingerViewRecord[i].fingerMinutiaeData);\r\n\r\n\t\tfree(isoTemplate.extendedData[i].extendedBlock);\r\n\t}\r\n\r\n\tfree(isoTemplate.fingerViewRecord);\r\n\tfree(isoTemplate.extendedData);\r\n\r\n\tmemset(&isoTemplate.recordHeader,0x00,sizeof(RecordHeader));\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT GetRecordHeader (ISOWORD *imgWidth,ISOWORD *imgHeight,ISOWORD *xRes,ISOWORD *yRes,ISOBYTE *nViews)\r\n{\r\n\tif ( imgWidth!=NULL )\r\n\t{\r\n\t\tmemcpy(imgWidth,&isoTemplate.recordHeader.xImageSize,2);\r\n\t\tBigEndianWord(imgWidth);\r\n\t}\r\n\r\n\tif ( imgHeight!=NULL )\r\n\t{\r\n\t\tmemcpy(imgHeight,&isoTemplate.recordHeader.yImageSize,2);\r\n\t\tBigEndianWord(imgHeight);\r\n\t}\r\n\r\n\tif ( xRes!=NULL )\r\n\t{\r\n\t\tmemcpy(xRes,&isoTemplate.recordHeader.xResolution,2);\r\n\t\tBigEndianWord(xRes);\r\n\t}\r\n\r\n\tif ( yRes!=NULL )\r\n\t{\r\n\t\tmemcpy(yRes,&isoTemplate.recordHeader.yResolution,2);\r\n\t\tBigEndianWord(yRes);\r\n\t}\r\n\r\n\tif ( nViews!=NULL )\r\n\t{\r\n\t\t*nViews=isoTemplate.recordHeader.nFingerViews;\r\n\t}\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT GetFingerViewHeader (\tISOBYTE index,ISOBYTE *fingPos,ISOBYTE *nView,ISOBYTE *imprType,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tISOBYTE *fingQuality,ISOBYTE *nMinutiae)\r\n{\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tif ( fingPos!=NULL )\r\n\t{\r\n\t\t*fingPos=isoTemplate.fingerViewRecord[index].fingerViewHeader.fingerPosition;\r\n\t}\r\n\r\n\tif ( nView!=NULL )\r\n\t{\r\n\t\t*nView=(isoTemplate.fingerViewRecord[index].fingerViewHeader.nView_imprType & 0xF0)>>4;\r\n\t}\r\n\r\n\tif ( imprType!=NULL )\r\n\t{\r\n\t\t*imprType=isoTemplate.fingerViewRecord[index].fingerViewHeader.nView_imprType & 0x0F;\r\n\t}\r\n\r\n\tif ( fingQuality!=NULL )\r\n\t{\r\n\t\t*fingQuality=isoTemplate.fingerViewRecord[index].fingerViewHeader.fingerQuality;\r\n\t}\r\n\r\n\tif ( nMinutiae!=NULL )\r\n\t{\r\n\t\t*nMinutiae=isoTemplate.fingerViewRecord[index].fingerViewHeader.nMinutiae;\r\n\t}\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT GetMinutiaeData (ISOBYTE index,ISOBYTE minI,ISOBYTE *minType,ISOWORD *xCoord,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tISOWORD *yCoord,ISOBYTE *minAngle,ISOBYTE *minQuality)\r\n{\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(minI>=isoTemplate.fingerViewRecord[index].fingerViewHeader.nMinutiae) ||\r\n\t\t\t\t(minType==NULL) ||\r\n\t\t\t\t(xCoord==NULL) ||\r\n\t\t\t\t(yCoord==NULL) ||\r\n\t\t\t\t(minAngle==NULL) ||\r\n\t\t\t\t(minQuality==NULL) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\t*xCoord=*((ISOWORD *) isoTemplate.fingerViewRecord[index].fingerMinutiaeData[minI].minutiaType_xLocation);\r\n\t*minType=((*xCoord) & 0xC0) >> 6;\r\n\tBigEndianWord(xCoord);\r\n\t*xCoord=(*xCoord) & 0x3FFF;\r\n\t*yCoord=*((ISOWORD *) isoTemplate.fingerViewRecord[index].fingerMinutiaeData[minI].reserved_yLocation);\r\n\tBigEndianWord(yCoord);\r\n\t*yCoord=(*yCoord) & 0x3FFF;\r\n\t*minAngle=isoTemplate.fingerViewRecord[index].fingerMinutiaeData[minI].minutiaAngle;\r\n\t*minQuality=isoTemplate.fingerViewRecord[index].fingerMinutiaeData[minI].minutiaQuality;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT GetExtendedDataHeader (ISOBYTE index,ISOBYTE *nBlocks)\r\n{\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(nBlocks==NULL) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\t(*nBlocks)=isoTemplate.extendedData[index].nExtendedBlock;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT GetExtendedDataBlock (ISOBYTE index,ISOBYTE blkI,ISOWORD *typeID,ISOWORD *dataLength,ISOBYTE *data)\r\n{\r\n\tExtendedBlock *eb;\r\n\tISOWORD app;\r\n\r\n\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(blkI>=isoTemplate.extendedData[index].nExtendedBlock) ||\r\n\t\t\t\t(typeID==NULL) ||\r\n\t\t\t\t(dataLength==NULL) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\teb=&isoTemplate.extendedData[index].extendedBlock[blkI];\r\n\r\n\t(*typeID)=*((ISOWORD *) eb->extendedBlockHeader.typeIDcode);\r\n\tBigEndianWord(typeID);\r\n\r\n\tapp=*((ISOWORD *) eb->extendedBlockHeader.length);\r\n\tBigEndianWord(&app);\r\n\r\n\tif ( data==NULL )\r\n\t{\r\n\t\t(*dataLength)=app;\r\n\t\treturn ISO_SUCCESS;\r\n\t}\r\n\r\n\tif ( (*dataLength)<app )\r\n\t{\r\n\t\t(*dataLength)=app;\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tmemcpy(data,eb->data,app);\r\n\t(*dataLength)=app;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT InitRidgeCountBlock (ISOBYTE meth,ISOWORD n_ridge_count_data)\r\n{\r\n\tif ( meth>=N_RIDGE_COUNT_METHOD )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tmemset(&ridgeCount,0x00,sizeof(RidgeCount));\r\n\r\n\tridgeCount.ridgeCountData=malloc(sizeof(RidgeCountData)*n_ridge_count_data);\r\n\tmemset(ridgeCount.ridgeCountData,0x00,sizeof(RidgeCountData)*n_ridge_count_data);\r\n\r\n\tridgeCount.method=meth;\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Extraction type*/\r\n\tridgeCount.nRidgeCountData=n_ridge_count_data;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT SetRidgeCountDataBlock (ISOWORD ridgecountI,ISOBYTE min_id1,ISOBYTE min_id2,ISOBYTE r_count)\r\n{\r\n\tRidgeCountData *rcd;\r\n\r\n\r\n\tif ( ridgecountI>=ridgeCount.nRidgeCountData )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\trcd=&ridgeCount.ridgeCountData[ridgecountI];\r\n\r\n\trcd->index1=min_id1;\t/*index #1*/\r\n\trcd->index2=min_id2;\t/*index #2*/\r\n\trcd->count=r_count;\t\t/*ridge count*/\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT AddRidgeCountBlock (ISOBYTE index,ISOBYTE blkI)\r\n{\r\n\tISOBYTE\t*data;\t\t\t\r\n\tISOWORD data_len;\t\t\r\n\r\n\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(blkI>=isoTemplate.extendedData[index].nExtendedBlock) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tCreateRidgeCountBuff(NULL,&data_len);\r\n\r\n\tdata=malloc(sizeof(ISOBYTE)*data_len);\r\n\t\r\n\tCreateRidgeCountBuff(data,&data_len);\r\n\r\n\tCleanRidgeCount();\r\n\r\n\tSetExtendedDataBlock(index,blkI,RIDGE_COUNT_DATA,data_len,data);\r\n\r\n\tfree(data);\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT InitCoreAndDeltaBlock (ISOBYTE n_cores,ISOBYTE n_deltas)\r\n{\r\n\tmemset(&coreAndDelta,0x00,sizeof(CoreAndDelta));\r\n\r\n\tcoreAndDelta.coreData=malloc(sizeof(CoreData)*n_cores);\r\n\tmemset(coreAndDelta.coreData,0x00,sizeof(CoreData)*n_cores);\r\n\r\n\tcoreAndDelta.deltaData=malloc(sizeof(DeltaData)*n_deltas);\r\n\tmemset(coreAndDelta.deltaData,0x00,sizeof(DeltaData)*n_deltas);\r\n\r\n\tcoreAndDelta.reserved_nCores=n_cores;\t/*# of cores*/\r\n\tcoreAndDelta.reserved_nDeltas=n_deltas;\t/*Delta info type and # of deltas;*/\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT SetCoreDataBlock (ISOBYTE coreI,ISOBYTE core_info_type,ISOWORD xCoord,ISOWORD yCoord,ISOBYTE angle)\r\n{\r\n\tCoreData *cd;\r\n\tISOWORD app,\r\n\t\t\t\t\tx_size,\r\n\t\t\t\t\ty_size;\r\n\r\n\r\n\tif (\t(core_info_type>=N_CORENDELTA_INFO_TYPE) ||\r\n\t\t\t\t(coreI>=(coreAndDelta.reserved_nCores & 0x3F)) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tif (\t((xCoord & 0xC000)!=0) ||\r\n\t\t\t\t((yCoord & 0xC000)!=0) )\r\n\t{\r\n\t\treturn ISO_COORD_OUTSIDE_DIM;\r\n\t}\r\n\r\n\tmemcpy(&x_size,isoTemplate.recordHeader.xImageSize,2);\r\n\tmemcpy(&y_size,isoTemplate.recordHeader.yImageSize,2);\r\n\tBigEndianWord(&x_size);\r\n\tBigEndianWord(&y_size);\r\n\tif (\t(xCoord>x_size) ||\r\n\t\t\t\t(yCoord>y_size) )\r\n\t{\r\n\t\treturn ISO_COORD_OUTSIDE_DIM;\r\n\t}\r\n\r\n\tcd=&coreAndDelta.coreData[coreI];\r\n\r\n\tcd->coreInfoType_Xcoordinate[0]=core_info_type<<6;\t\t\t\t\t\t/*Core info type*/\r\n\tapp=*BigEndianWord(&xCoord) | *((ISOWORD *) cd->coreInfoType_Xcoordinate);\r\n\tmemcpy(cd->coreInfoType_Xcoordinate,&app,2);\t\t\t\t\t\t\t\t\t/*X location*/\r\n\r\n\tmemcpy(cd->reserved_Ycoordinate,BigEndianWord(&yCoord),2);\t\t/*Reserved and Y location*/\r\n\r\n\tif ( core_info_type==ANGULAR_INFO )\r\n\t{\r\n\t\tcd->angle=angle;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Core Angle*/\r\n\t}\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT SetDeltaDataBlock (ISOBYTE deltaI,ISOBYTE delta_info_type,ISOWORD xCoord,ISOWORD yCoord,ISOBYTE angle1,ISOBYTE angle2,ISOBYTE angle3)\r\n{\r\n\tDeltaData *dd;\r\n\tISOWORD app,\r\n\t\t\t\t\tx_size,\r\n\t\t\t\t\ty_size;\r\n\r\n\r\n\tif (\t(delta_info_type>=N_CORENDELTA_INFO_TYPE) ||\r\n\t\t\t\t(deltaI>=(coreAndDelta.reserved_nDeltas & 0x3F)) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tif (\t((xCoord & 0xC000)!=0) ||\r\n\t\t\t\t((yCoord & 0xC000)!=0) )\r\n\t{\r\n\t\treturn ISO_COORD_OUTSIDE_DIM;\r\n\t}\r\n\r\n\tmemcpy(&x_size,isoTemplate.recordHeader.xImageSize,2);\r\n\tmemcpy(&y_size,isoTemplate.recordHeader.yImageSize,2);\r\n\tBigEndianWord(&x_size);\r\n\tBigEndianWord(&y_size);\r\n\tif (\t(xCoord>x_size) ||\r\n\t\t\t\t(yCoord>y_size) )\r\n\t{\r\n\t\treturn ISO_COORD_OUTSIDE_DIM;\r\n\t}\r\n\r\n\tdd=&coreAndDelta.deltaData[deltaI];\r\n\r\n\tdd->deltaInfoType_Xcoordinate[0]=delta_info_type<<6;\t\t\t\t\t/*Delta info type*/\r\n\tapp=*BigEndianWord(&xCoord) | *((ISOWORD *) dd->deltaInfoType_Xcoordinate);\r\n\tmemcpy(dd->deltaInfoType_Xcoordinate,&app,2);\t\t\t\t\t\t\t\t\t/*X location*/\r\n\r\n\tmemcpy(dd->reserved_Ycoordinate,BigEndianWord(&yCoord),2);\t\t/*Reserved and Y location*/\r\n\r\n\tif ( delta_info_type==ANGULAR_INFO )\r\n\t{\r\n\t\tdd->angle1=angle1;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Delta Angle 1*/\r\n\t\tdd->angle2=angle2;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Delta Angle 2*/\r\n\t\tdd->angle3=angle3;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Delta Angle 3*/\r\n\t}\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISORESULT AddCoreAndDeltaBlock (ISOBYTE index,ISOBYTE blkI)\r\n{\r\n\tISOBYTE\t*data;\t\t\t\r\n\tISOWORD data_len;\t\t\r\n\r\n\r\n\tif (\t(index>=isoTemplate.recordHeader.nFingerViews) ||\r\n\t\t\t\t(blkI>=isoTemplate.extendedData[index].nExtendedBlock) )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tCreateCoreAndDeltaBuff(NULL,&data_len);\r\n\r\n\tdata=(ISOBYTE *) malloc(sizeof(ISOBYTE)*data_len);\r\n\t\r\n\tCreateCoreAndDeltaBuff(data,&data_len);\r\n\r\n\tCleanCoreAndDelta();\r\n\r\n\tSetExtendedDataBlock(index,blkI,CORE_AND_DELTA_DATA,data_len,data);\r\n\r\n\tfree(data);\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nstatic ISORESULT CreateRidgeCountBuff (ISOBYTE *pBuff,ISOWORD *pBuff_len)\r\n{\r\n\tISOWORD count;\r\n\r\n\r\n\tcount=1+ridgeCount.nRidgeCountData*sizeof(RidgeCountData);\r\n\r\n\tif ( (pBuff==NULL) ||\r\n\t\t\t\t((*pBuff_len)<count) )\r\n\t{\r\n\t\t*pBuff_len=count;\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tpBuff[0]=ridgeCount.method;\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t/*Method*/\r\n\tmemcpy(&pBuff[1],ridgeCount.ridgeCountData,count-1);\t\t/*Ridge Count Data*/\r\n\r\n\t*pBuff_len=count;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nstatic ISORESULT CleanRidgeCount ()\r\n{\r\n\tfree(ridgeCount.ridgeCountData);\r\n\r\n\tmemset(&ridgeCount,0x00,sizeof(RidgeCount));\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nstatic ISORESULT CreateCoreAndDeltaBuff (ISOBYTE *pBuff,ISOWORD *pBuff_len)\r\n{\r\n\tISOWORD\tcount,\r\n\t\t\t\t\tidx;\r\n\tISOBYTE i;\r\n\r\n\r\n\tcount=1;\r\n\t\r\n\tfor(i=0;i<(coreAndDelta.reserved_nCores & 0x3F);i++)\r\n\t{\r\n\t\tif ( ((coreAndDelta.coreData[i].coreInfoType_Xcoordinate[0] & 0xC0)>>6)==ANGULAR_INFO )\r\n\t\t{\r\n\t\t\tcount+=sizeof(CoreData);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tcount+=(sizeof(CoreData)-1);\r\n\t\t}\r\n\t}\r\n\r\n\tcount++;\r\n\r\n\tfor(i=0;i<(coreAndDelta.reserved_nDeltas & 0x3F);i++)\r\n\t{\r\n\t\tif ( ((coreAndDelta.deltaData[i].deltaInfoType_Xcoordinate[0] & 0xC0)>>6)==ANGULAR_INFO )\r\n\t\t{\r\n\t\t\tcount+=sizeof(DeltaData);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tcount+=(sizeof(DeltaData)-3);\r\n\t\t}\r\n\t}\r\n\r\n\tif ( (pBuff==NULL) ||\r\n\t\t\t\t((*pBuff_len)<count) )\r\n\t{\r\n\t\t*pBuff_len=count;\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\r\n\tidx=0;\r\n\r\n\tpBuff[idx]=coreAndDelta.reserved_nCores;\r\n\tidx++;\r\n\r\n\tfor(i=0;i<(coreAndDelta.reserved_nCores & 0x3F);i++)\r\n\t{\r\n\t\tif ( ((coreAndDelta.coreData[i].coreInfoType_Xcoordinate[0] & 0xC0)>>6)==ANGULAR_INFO )\r\n\t\t{\r\n\t\t\tmemcpy(&pBuff[idx],&coreAndDelta.coreData[i],sizeof(CoreData));\r\n\t\t\tidx+=sizeof(CoreData);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tmemcpy(&pBuff[idx],&coreAndDelta.coreData[i],(sizeof(CoreData)-1));\r\n\t\t\tidx+=(sizeof(CoreData)-1);\r\n\t\t}\r\n\t}\r\n\t\r\n\tpBuff[idx]=coreAndDelta.reserved_nDeltas;\r\n\tidx++;\r\n\r\n\tfor(i=0;i<(coreAndDelta.reserved_nDeltas & 0x3F);i++)\r\n\t{\r\n\t\tif ( ((coreAndDelta.deltaData[i].deltaInfoType_Xcoordinate[0] & 0xC0)>>6)==ANGULAR_INFO )\r\n\t\t{\r\n\t\t\tmemcpy(&pBuff[idx],&coreAndDelta.deltaData[i],sizeof(DeltaData));\r\n\t\t\tidx+=sizeof(DeltaData);\r\n\t\t}\r\n\t\telse\r\n\t\t{\r\n\t\t\tmemcpy(&pBuff[idx],&coreAndDelta.deltaData[i],(sizeof(DeltaData)-3));\r\n\t\t\tidx+=(sizeof(DeltaData)-3);\r\n\t\t}\r\n\t}\r\n\t\r\n\t(*pBuff_len)=count;\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nstatic ISORESULT CleanCoreAndDelta ()\r\n{\r\n\tfree(coreAndDelta.coreData);\r\n\tfree(coreAndDelta.deltaData);\r\n\r\n\tmemset(&coreAndDelta,0x00,sizeof(CoreAndDelta));\r\n\r\n\treturn ISO_SUCCESS;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#ifdef CPU_TYPE_LITTLEENDIAN\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISOBYTE * BigEndianConv (ISOBYTE *pBuff,ISODWORD nbytes)\r\n{\r\n\tISOBYTE *p;\r\n\tISODWORD i;\r\n\r\n\tp=malloc(nbytes);\r\n\t\r\n\tfor (i=0;i<nbytes;i++)\r\n\t{\r\n\t\tp[i]=pBuff[nbytes-i-1];\r\n\t}\r\n\r\n\tmemcpy(pBuff,p,nbytes);\r\n\r\n\tfree(p);\r\n\r\n\treturn pBuff;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISOWORD * BigEndianConvWord (ISOWORD *pBuff)\r\n{\r\n\tISOWORD\tapp;\r\n\tint i,\r\n\t\t\tsize=sizeof(ISOWORD);\r\n\r\n\tapp=*pBuff;\r\n\r\n\tfor (i=0;i<size;i++)\r\n\t{\r\n\t\t((ISOBYTE *) pBuff)[i]=((ISOBYTE *) &app)[size-i-1];\r\n\t}\r\n\r\n\treturn pBuff;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n\r\n\r\nISODWORD * BigEndianConvDWord (ISODWORD *pBuff)\r\n{\r\n\tISODWORD\tapp;\r\n\tint i,\r\n\t\t\tsize=sizeof(ISODWORD);\r\n\r\n\tapp=*pBuff;\r\n\r\n\tfor (i=0;i<size;i++)\r\n\t{\r\n\t\t((ISOBYTE *) pBuff)[i]=((ISOBYTE *) &app)[size-i-1];\r\n\t}\r\n\r\n\treturn pBuff;\r\n}\r\n/*----------------------------------------------------------------------------------*/\r\n#endif\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6128464341163635, "alphanum_fraction": 0.6301929354667664, "avg_line_length": 27.85553550720215, "blob_id": "25e17222a35e91e18e912f6cbc092113429c91e4", "content_id": "163508f1f9f7c7f0618c307fc03a5e98b0b47a16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 15911, "license_type": "no_license", "max_line_length": 153, "num_lines": 533, "path": "/bkafis/bkafis/src/lib/bkafis/fingerprint.26.8.2015.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS matcher\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include \"fingerprint.h\"\r\n#include \"ISOTemplate.h\"\r\n#ifndef M_PI\r\n#define M_PI 3.14159\r\n#endif\r\n/* #define DEBUG*/\r\n\r\n/* constants to control extractor */\r\n#define MAX_FOUND_NEIGHBORS\t8\r\n#define MIN_DISTANCE\t8.5\r\n#define MAX_DISTANCE\t130\r\n/* constants to control matcher */\r\n#define ED_THRESHOLD\t\t15\r\n#define DRA_THRESHOLD\t\t22.5*M_PI/180\r\n#define ODA_THRESHOLD \t\t22.5*M_PI/180\r\n#define DRA_THRESHOLD1\t\t(2*M_PI - 22.5*M_PI/180 )\r\n#define ODA_THRESHOLD1 \t\t(2*M_PI - 22.5*M_PI/180 )\r\n#define ED_WEIGHT 0.8\r\n#define DRA_WEIGHT 0.1\r\n#define ODA_WEIGHT 0.1\r\n#define N_PAIRS\t\t2\r\n#define LOCAL_SCORE_BIAS 1\r\n\r\n\r\n#define NumNeighs2\t3\r\n\r\n#define LDR_WEIGHT \t0.5\r\n#define SIGMA \t2 \r\n#define TG_THRESHOLD \t12\r\n#define TA_THRESHOLD \tM_PI/6\r\n#define RC_THRESHOLD\t1\r\n\r\nBkafisParams bkafisParams = {\r\n\tMAX_FOUND_NEIGHBORS, \r\n\tMIN_DISTANCE,\r\n\tMAX_DISTANCE,\r\n\tED_THRESHOLD,\r\n\tDRA_THRESHOLD,\r\n\tODA_THRESHOLD,\r\n\tDRA_THRESHOLD1,\r\n\tODA_THRESHOLD1,\r\n\tED_WEIGHT,\r\n\tDRA_WEIGHT,\r\n\tODA_WEIGHT,\r\n\t\r\n\tN_PAIRS,\r\n\tLOCAL_SCORE_BIAS,\r\n\t\r\n\tLDR_WEIGHT,\r\n\tSIGMA,\r\n\tTG_THRESHOLD,\r\n\tTA_THRESHOLD,\r\n\tRC_THRESHOLD\r\n};\r\n/*\r\ntypedef struct\r\n{\r\n\t\r\n\tunsigned char maxNeighbors;\r\n\tfloat minDistance,maxDistance;\r\n\t\r\n\tfloat edThreshold, draThreshold, odaThreshold, draThreshold1, odaThreshold1;\r\n\tfloat edWeight, draWeight, odaWeight;\r\n\tunsigned char nNeighborPairThreshold;\r\n\tfloat localScoreBias;\r\n\t\r\n\tfloat ldrWeight;\r\n\tfloat sigma;\r\n\tfloat tgThreshold, taThreshold;\r\n\tunsigned char rcThreshold;\r\n\t\r\n} BkafisParams; */\r\n/**********************************************************************\r\n\tConvert from ISOTemplate 2005 format \r\n\tInput:\r\n\t\t\tImplicitly stored in static variable isoTemplate that is declared \r\n\t\t\tin ISOTemplate.c \r\n\tOutput:\r\n\t\t\tpointer to Fingerprint structure declared above \r\n\tUsage:\r\n\t\t\tin order to load the iso template from file call \r\n\t\t\tISORESULT LoadISOTemplate (ISOBYTE *path);\r\n\t\t\tthen in order to convert from the template into Fingerprint structure\r\n\t\t\tcall unsigned char ConvertISO2005Fingerprint(Fingerprint* finger);\r\n *********************************************************************/ \r\n\r\nchar ConvertISO2005Fingerprint(Fingerprint* finger)\r\n{\r\n\tif (finger==NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tISOWORD width, height;\r\n\tGetRecordHeader (&width,&height,NULL,NULL,NULL);\r\n\tfinger->width = width;\r\n\tfinger->height = height;\r\n\t#ifdef DEBUG\r\n\tprintf(\"Width=%d\\nHeight=%d\\n\",finger->width,finger->height);\r\n\t#endif\r\n\tunsigned char quality, nMinutiae;\r\n\tGetFingerViewHeader (0,NULL,NULL,NULL,&quality,&nMinutiae);\r\n\tfinger->quality = quality;\r\n\tfinger->nMinutiae = nMinutiae;\r\n\tMinutia** minutiae=malloc(sizeof(Minutia*)*finger->nMinutiae);\r\n\tif (minutiae == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(minutiae,0x00,sizeof(Minutia*)*finger->nMinutiae);\r\n\t#ifdef DEBUG\r\n\tprintf(\"Quality=%d\\nnMinutiae=%d\\n\",finger->quality,finger->nMinutiae);\r\n\t#endif\r\n\t\r\n\t\r\n\tunsigned char minI;\r\n\tISOBYTE type, angle;\r\n\tISOWORD x,y;\r\n\tMinutia* min;\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\t\r\n\t\tmin=malloc(sizeof(Minutia));\r\n\t\tif (min==NULL){\r\n\t\t\tCleanFingerprint(finger);\r\n\t\t\treturn ISO_GENERICERROR;\r\n\t\t}\r\n\t\tGetMinutiaeData(0,minI,&type,&x,&y,&angle,&quality);\r\n\t\tmin->x = x;\r\n\t\tmin->y = y;\r\n\t\tmin->angle = angle*1.40625*M_PI/180;\r\n\t\tmin->type = type;\r\n\t\tmin->quality = quality;\r\n\t\t#ifdef DEBUG\r\n\t\tprintf(\"%d\\t%d\\t%f\\t%d\\t%d\\n\", \r\n\t\t\tmin->x,\r\n\t\t\tmin->y,\r\n\t\t\tmin->angle,\r\n\t\t\tmin->type,\r\n\t\t\tmin->quality\r\n\t\t\t);\r\n\t\t#endif\r\n\t\tminutiae[minI]=min;\r\n\t}\r\n\tfinger->minutiae= minutiae;\r\n\treturn ISO_SUCCESS;\r\n\t\r\n}\r\nchar SaveFingerprintText(unsigned char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighborI;\r\n\t/* printf(\"Start saving fingerprint %d to text file\\n\", finger);*/\r\n\tif ( path==NULL)\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp=fopen(path,\"w\"))==NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\t/* printf(\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae); */\r\n\tfprintf(fp,\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp,\"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbors\\tIndex\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\tif (finger->minutiae[minI])\r\n\t\t\tfprintf(fp,\"%d\\t%d\\t%f\\t%d\\t%d\\t%d\\t%d\", \r\n\t\t\t\tfinger->minutiae[minI]->x, \r\n\t\t\t\tfinger->minutiae[minI]->y, \r\n\t\t\t\tfinger->minutiae[minI]->angle,\r\n\t\t\t\tfinger->minutiae[minI]->type,\r\n\t\t\t\tfinger->minutiae[minI]->quality,\r\n\t\t\t\tfinger->minutiae[minI]->ldr,\r\n\t\t\t\tfinger->minutiae[minI]->nNeighbors\r\n\t\t\t\t\t\t\t);\r\n\t\tfor (neighborI=0;neighborI<finger->minutiae[minI]->nNeighbors;neighborI++)\r\n\t\t\tif (finger->minutiae[minI]->neighbors[neighborI])\r\n\t\t\t\tfprintf(fp,\"\\t%d\\t%f\\t%f\\t%f\\t%d\", \r\n\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->index, \r\n\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->ed, \r\n\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->dra,\r\n\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->oda,\r\n\t\t\t\t\tfinger->minutiae[minI]->neighbors[neighborI]->ridgeCount \r\n\t\t\t\t);\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nchar CleanFingerprint(Fingerprint* finger)\r\n{\r\n\tunsigned char i,j;\r\n\tif (finger->minutiae){\r\n\t\tfor (i=0;i<finger->nMinutiae;i++){\r\n\t\t\tif (finger->minutiae[i]) {\r\n\t\t\t\tif (finger->minutiae[i]->neighbors) {\r\n\t\t\t\t\tfor (j=0;j<finger->minutiae[i]->nNeighbors;j++)\r\n\t\t\t\t\t\tif (finger->minutiae[i]->neighbors[j]) free(finger->minutiae[i]->neighbors[j]);\r\n\t\t\t\t\tfree(finger->minutiae[i]->neighbors);\r\n\t\t\t\t}\r\n\t\t\t\tfree(finger->minutiae[i]);\r\n\t\t\t}\r\n\t\t}\r\n\t\tfree(finger->minutiae);\r\n\t}\r\n\tmemset(finger,0x00,sizeof(Fingerprint));\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nchar SortMinutiaQuality(Fingerprint* finger)\r\n{\r\n\tunsigned char i, j;\r\n\t\r\n\t/*\t\tSorting minutiae by quality score in descending order\t*/\r\n \r\n\tISOBYTE quality1, quality2;\r\n\tfor (i = 0; i < finger->nMinutiae - 1; i++)\r\n for (j = finger->nMinutiae - 1; j > i; j--){\r\n\t\t\tquality1 = finger->minutiae[j]->quality;\r\n\t\t\tquality2 = finger->minutiae[j-1]->quality;\r\n\t\t\tif (quality1 > quality2){\r\n\t\t\t\tMinutia* \t\t\ttg = finger->minutiae[j];\r\n finger->minutiae[j] = finger->minutiae[j - 1];\r\n finger->minutiae[j-1] = tg;\r\n\t\t\t}\r\n\t\t}\r\n\treturn ISO_SUCCESS;\r\n\t\r\n}\r\n\r\n\r\nchar SaveFingerprint(unsigned char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighborI;\r\n\t/* printf(\"Start saving fingerprint %d to binary file\\n\", finger);*/\r\n\tif ( path==NULL)\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp=fopen(path,\"wb\"))==NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tfwrite(finger,FINGERHEADERSIZE,1,fp);\r\n\t\r\n\t\r\n\tMinutia* min;\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\tmin = finger->minutiae[minI];\r\n\t\tif (min){\r\n\t\t\t\r\n\t\t\tfwrite(min, MINUTIASIZE,1,fp);\r\n\t\t\t\r\n\t\t\r\n\t\t\tfor (neighborI=0;neighborI<finger->minutiae[minI]->nNeighbors;neighborI++)\r\n\t\t\t\tif (min->neighbors[neighborI]){\r\n\t\t\t\t\tfwrite(min->neighbors[neighborI],NEIGHBORSIZE,1,fp);\r\n\t\t\t\t\t\r\n\t\t\t\t}\r\n\t\t}\r\n\t}\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\nchar ReadFingerprint(unsigned char* path, Fingerprint* finger)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char minI;\r\n\tunsigned char neighborI;\r\n\t/* printf(\"Start saving fingerprint %d to binary file\\n\", finger);*/\r\n\tif ( path==NULL)\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tif ( (fp=fopen(path,\"rb\"))==NULL )\r\n\t{\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tfread(finger,FINGERHEADERSIZE,1,fp);\r\n\r\n\tMinutia** minutiae=malloc(sizeof(Minutia*)*finger->nMinutiae);\r\n\tif (minutiae == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(minutiae,0x00,sizeof(Minutia*)*finger->nMinutiae);\r\n\t\r\n\tMinutia* min;\r\n\tfor (minI=0;minI<finger->nMinutiae;minI++){\r\n\t\t\r\n\t\tmin=malloc(sizeof(Minutia));\r\n\t\tif (min==NULL) \r\n\t\t\treturn ISO_GENERICERROR;\r\n\t\t\r\n\t\tfread(min,MINUTIASIZE,1,fp);\r\n\t\t\r\n\t\t\r\n\t\tif (!min->nNeighbors) continue;\r\n\t\t\t\r\n\t\tNeighbor** neighborArray = malloc(sizeof(Neighbor*)*min->nNeighbors);\r\n\t\r\n\t\tNeighbor* pNeighbor;\r\n\t\tif (neighborArray==NULL) return ISO_GENERICERROR;\r\n\t\t\r\n\t\tmemset(neighborArray,0,sizeof(Neighbor*)*min->nNeighbors);\r\n\t\tunsigned char i;\r\n\t\tfor(i=0;i<min->nNeighbors;i++){\r\n\t\t\r\n\t\r\n\t\t\tpNeighbor=malloc(sizeof(Neighbor));\r\n\t\t\t\tif (pNeighbor!=NULL) return ISO_GENERICERROR;\t\t\r\n\t\t\tfread(pNeighbor,NEIGHBORSIZE,1,fp);\r\n\t\t\t\r\n\t\t\tneighborArray[i]=pNeighbor;\r\n\t\t\tpNeighbor=NULL;\r\n\t\t}\r\n\t\tminutiae[minI]=min;\r\n\t}\r\n\tfclose(fp);\r\n\treturn ISO_SUCCESS;\r\n}\r\nfloat \tCalculateAngle( int x1, int y1, int x2, int y2)\r\n{\r\n float angle;\r\n\tangle = atan2( y2 - y1, x2 - x1 );\r\n\treturn\t(angle<0)?angle+2*M_PI:angle;\r\n}\r\n/*\r\nStore distances between minutiae in a triangle matrix which represented in C as an array.\r\nd(0,1),d(0,2),...,d(0,n-1): n-1 elements\r\nd(1,2),d(1,3),...,d(1,n-1): n-2 elements\r\nd(i-1,i),d(i-1,i+1),...,d(i-1,n-1): n-i elements\r\nd(i,i+1),d(i,i+2),...,d(i,n-1): n-1-i elements\r\nd(n-2,n-1): 1 elements\r\nTotal number = (n-1)+(n-2)+...1 = (n-1)*n/2 elements\r\nrow 0 starts at distances[0]\r\nrow i starts at distances[start_i]: start_i = (n-1)+(n-2)+...+(n-i)=i*(2n-1-i)/2\r\nd(i,j) and d(j,i) will be stored at distances[start_i+j-i-1] = distances(i*(2n-1-i)/2+j-i-1))\r\nd(n-2,n-1)=distances((n-2)*(2n-1-n+2)/2+n-1-n+2-1)=distances((n-2)*(n+1)/2)=distance(n(n-1)/2-1)\r\n*/\r\nchar CalculateDistances(Fingerprint* finger, float** pDistances)\r\n{\r\n\tunsigned int n=finger->nMinutiae;\r\n\tunsigned int i,j;\r\n\tfloat *distances = malloc(sizeof(float)*n*(n-1)/2);\r\n\tif (distances == NULL)\r\n\t\treturn ISO_GENERICERROR;\r\n\tmemset(distances, 0, sizeof(float)*n*(n-1)/2);\r\n\tfor ( i = 0; i < n; i++ )\r\n\t\tfor ( j = i+1; j < n; j++ )\t{\r\n\t\t\t\t\r\n\t\t\t\tdistances[i*(2*n-1-i)/2+j-i-1] = sqrt( pow( (float)( finger->minutiae[i]->x - finger->minutiae[j]->x), 2 ) + \r\n\t\t\t\t\tpow( (float)( finger->minutiae[i]->y - finger->minutiae[j]->y), 2 ) );\r\n\t\t\t\t/* #ifdef DEBUG\r\n\t\t\t\t\tprintf(\"distances(%d,%d)=distances[%d])=%f\\n\",i,j,i*(2*n-1-i)/2+j-i-1,distances[i*(2*n-1-i)/2+j-i-1]);\r\n\t\t\t\t*/\r\n\t\t}\r\n\t*pDistances = distances;\r\n\treturn ISO_SUCCESS;\r\n}\r\n\r\nchar FindDirectionalNeighbours(Fingerprint* finger, float* distances, unsigned char centerI, \r\n\t\t\t\tBkafisParams* params)\r\n{\r\n\tunsigned char maxNeighbors = params->maxNeighbors;\r\n\tfloat minDistance = params->minDistance;\r\n\tfloat maxDistance = params->maxDistance;\r\n\tfloat minDistances[maxNeighbors];\r\n\tint minNeighborI[maxNeighbors];\r\n\tint neighborIds[maxNeighbors];\r\n\tfloat* phi;\r\n\t/* float* dist;\r\n\tif (CalculateDistances(finger,dist)==ISO_GENERICERROR)\r\n\t\treturn ISO_GENERICERROR;\r\n\t*/\r\n\tunsigned char nNeighbors = 0;\r\n\tunsigned char nIterations = 0;\r\n\tunsigned int sector;\r\n\tunsigned int n = finger->nMinutiae;\r\n\tunsigned int i;\r\n\tfloat centerAngle = finger->minutiae[centerI]->angle;\r\n\tphi = malloc(sizeof(float)*n); /* angle between minutiae centerI & other minutiae */\r\n\t#ifdef DEBUG\r\n\t\tprintf(\"start finding neighbors for %d\\tnMinutiae=%d, centerAngle=%f\\n\", centerI,n,centerAngle);\r\n\t#endif\r\n\tmemset(phi, 0, sizeof(float)*n);\r\n\t\r\n\tfor(i=0;i<maxNeighbors;i++) neighborIds[i]=-1;\r\n\t\r\n\twhile ((nNeighbors<maxNeighbors)&&(nIterations<maxNeighbors)){\r\n\t\t#ifdef DEBUG\r\n\t\t\tprintf(\"Iteration=%d\\t nNeighbors=%d\\n\", nIterations, nNeighbors);\r\n\t\t#endif\r\n\t\tnIterations++;\r\n\t\tmemset(minDistances,0,sizeof(float)*maxNeighbors);\r\n\t\tfor(i=0;i<maxNeighbors;i++) minNeighborI[i]=-1;\r\n\t\tfor (i=0;i<n;i++){\r\n\t\t\tfloat dist;\r\n\t\t\tif (i==centerI) continue;\r\n\t\t\t\r\n\t\t\tdist = (centerI<i)?distances[centerI*(2*n-1-centerI)/2+i-centerI-1]:distances[i*(2*n-1-i)/2+centerI-i-1];\r\n\t\t\t/* #ifdef DEBUG\r\n\t\t\t\tprintf(\"\\tMinutia=%d\\tDistance index=%d\\tdist=%f\\n\", i,centerI*(2*n-1-centerI)/2+i-centerI-1,dist);\r\n\t\t\t*/\r\n\t\t\tif (\r\n\t\t\t\t(dist < minDistance) || \r\n\t\t\t\t(dist > maxDistance)\r\n\t\t\t) continue; /* skip neighbours that are too far or too near the center minutia */\r\n\t\t\t/* skip neighbors that have been chosen */\r\n\t\t\tunsigned char found = 0;\r\n\t\t\tunsigned char j = 0;\r\n\t\t\twhile ((j<maxNeighbors) && !found){\r\n\t\t\t\tif (neighborIds[j++]==i) found = 1;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tif (found) continue;\r\n\t\t\t/* calculate the angle of the vector connecting center minutia with minutia i */\r\n\t\t\t\r\n\t\t\tif (phi[i]==0)\r\n\t\t\t\tphi[i] = CalculateAngle(finger->minutiae[centerI]->x,finger->minutiae[centerI]->y,finger->minutiae[i]->x,finger->minutiae[i]->y);\r\n\t\t\t\r\n\t\t\tfloat d_phi = (phi[i] >= centerAngle)?phi[i]-centerAngle:2*M_PI+phi[i]-centerAngle;\r\n sector = floor( maxNeighbors*( d_phi/(2*M_PI) ) );\r\n\t\t\t#ifdef DEBUG\r\n\t\t\t\tprintf(\"\\tMinutia=%d\\tDistance index=%d\\tdist=%f\\tphi=%f\\tdphi=%f\\tsector=%d\\tminDistance=%f\\n\", \r\n\t\t\t\t\ti,centerI*(2*n-1-centerI)/2+i-centerI-1,dist,phi[i],d_phi,sector, minDistances[sector]);\r\n\t\t\t#endif\r\n\t\t\tif (minDistances[sector]==0){\r\n\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\tminNeighborI[sector] = i;\r\n\t\t\t}\r\n\t\t\telse {\r\n\t\t\t\tif (minDistances[sector]>dist){\r\n\t\t\t\t\tminDistances[sector] = dist;\r\n\t\t\t\t\tminNeighborI[sector] = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\tfor (sector=0;sector<maxNeighbors;sector++){\r\n\t\t\tif (nNeighbors==maxNeighbors) break;\r\n\t\t\tif (minNeighborI[sector]!=-1)\r\n\t\t\t\tneighborIds[nNeighbors++]=minNeighborI[sector];\r\n\t\t}\r\n\t\t\t\r\n\t}\r\n\r\n\tif (nNeighbors==0){\r\n\t\tfree(phi);\r\n\t\treturn ISO_SUCCESS;\r\n\t}\r\n\t\t\t\r\n\tNeighbor** neighborArray = malloc(sizeof(Neighbor*)*nNeighbors);\r\n\t\r\n\tNeighbor* pNeighbor;\r\n\tif (neighborArray==NULL){\r\n\t\tfree(phi);\r\n\t\treturn ISO_GENERICERROR;\r\n\t}\r\n\tmemset(neighborArray,0,sizeof(Neighbor*)*nNeighbors);\r\n\t#ifdef DEBUG\r\n\t\tprintf(\"%d neighbors of %d:\",nNeighbors,centerI);\r\n\t#endif\r\n\tfor(i=0;i<nNeighbors;i++){\r\n\t\t#ifdef DEBUG\r\n\t\t\tprintf(\"\\tNeighbor %d:%d\",i,neighborIds[i]);\r\n\t\t#endif\r\n\t\tpNeighbor=malloc(sizeof(Neighbor));\r\n\t\tif (pNeighbor!=NULL){\r\n\t\t\tmemset(pNeighbor,0,sizeof(Neighbor));\r\n\t\t\tpNeighbor->index=neighborIds[i];\r\n\t\t\tpNeighbor->ed = (centerI<neighborIds[i])?distances[centerI*(2*n-1-centerI)/2+neighborIds[i]-centerI-1]:\r\n\t\t\t\t\t\t\t\t\t\tdistances[neighborIds[i]*(2*n-1-neighborIds[i])/2+centerI-neighborIds[i]-1]; \r\n\t\t\t\r\n\t\t\t/* add code to calculate float/real oda, dra */\r\n\t\t\t\t\t\t\t\t\t \r\n\t\t\t/*\ttest dra ?? */\r\n\t\t\tfloat\ttmp;\r\n\t\t\ttmp\t= atan2( \tfinger->minutiae[neighborIds[i]]->y - finger->minutiae[centerI]->y,\r\n\t\t\t\t\t\t\tfinger->minutiae[neighborIds[i]]->x - finger->minutiae[centerI]->x\t );\r\n\t\t\t/*\r\n\t\t\tprintf(\"a1 tmp = %f\\n\", tmp);\r\n\t\t\t*/\r\n\t\t\ttmp = ( tmp < 0 ) ? tmp+2*M_PI : tmp;\r\n\t\t\t\r\n\t\t\tpNeighbor->dra\t= (\ttmp >= \tfinger->minutiae[centerI]->angle ) \t\t\t?\r\n\t\t\t\t\t\t\t\ttmp - \tfinger->minutiae[centerI]->angle \t\t:\r\n\t\t\t\t\t\t\t\ttmp - \tfinger->minutiae[centerI]->angle + 2*M_PI\t;\r\n\t\t\t/* testing ?? */\r\n\t\t\t/*\r\n\t\t\tprintf(\"a2 tmp = %f\\n\", tmp);\r\n\t\t\tprintf(\"a2 centerI.angle = %f\\n\", finger->minutiae[centerI]->angle);\r\n\t\t\tprintf(\"a3 dra = %f\\n\", pNeighbor->dra);\r\n\t\t\t*/\r\n\t\t\t/* -- */\r\n\t\t\t\r\n\t\t\t/*\ttest oda ?? */\r\n\t\t\ttmp =\tfinger->minutiae[neighborIds[i]]->angle\r\n\t\t\t\t - \tfinger->minutiae[centerI]\t\t ->angle;\r\n\t\t\t\r\n\t\t\tpNeighbor->oda \t = ( finger->minutiae[neighborIds[i]]->angle >= finger->minutiae[centerI]->angle )\r\n\t\t\t\t\t\t\t\t? tmp : 2*M_PI+tmp;\r\n\t\t\t\t\t\t\t\t\r\n\t\t\tneighborArray[i]=pNeighbor;\r\n\t\t\tpNeighbor=NULL;\r\n\t\t}\r\n\t}\r\n\t#ifdef DEBUG \r\n\t\tprintf(\"\\n\");\r\n\t#endif\r\n\tfinger->minutiae[centerI]->nNeighbors =nNeighbors;\r\n\tfinger->minutiae[centerI]->neighbors = neighborArray;\r\n\t\r\n\tfree(phi);\r\n\t/* #ifdef DEBUG {\r\n\t\tunsigned char neighborI;\r\n\t\tfor (neighborI=0;neighborI<finger->minutiae[centerI]->nNeighbors;neighborI++)\r\n\t\t\tif (finger->minutiae[centerI]->neighbors[neighborI])\r\n\t\t\t\tprintf(\"\\t%d\\t%f\\t%f\\t%f\\t%d\\n\", \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->index, \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->ed, \r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->dra,\r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->oda,\r\n\t\t\t\t\tfinger->minutiae[centerI]->neighbors[neighborI]->ridgeCount \r\n\t\t\t\t);\r\n\t\tprintf(\"\\n\");\r\n\t}*/\r\n\treturn ISO_SUCCESS;\r\n}" }, { "alpha_fraction": 0.7135134935379028, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 17.399999618530273, "blob_id": "7fa68e2fb81652c8a9968b5772f884c173fd69f9", "content_id": "38128d60440ebdf01c8382d97142bdeb50fcf7c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 34, "num_lines": 10, "path": "/bkafis/bkafis/bin/chuyendoi v2.py", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport numpy as np\nimport cv2\nimport sys\nimport time\ntime.sleep(2)\n\nimg = np.loadtxt('image.txt')\ncv2.imwrite('image.JPG',img)\nprint(\"chuyen doi anh thanh cong\")\n\n" }, { "alpha_fraction": 0.43249428272247314, "alphanum_fraction": 0.45537757873535156, "avg_line_length": 27.266666412353516, "blob_id": "f56602fc5c8b043f5fe5456a53bf8b8be69558d8", "content_id": "5688ae26d801e669d35ec9cabd9fbeb9a0a22b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 437, "license_type": "no_license", "max_line_length": 71, "num_lines": 15, "path": "/bkafis/exports/include/cuong.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tpairarray.h\r\n\tDescription: Data structure to present array that can grow in size\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n#ifndef CUONG_H_\r\n#define CUONG_H_\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n#include <bcm2835.h>\r\n#include <lcd.h>\r\nvoid test(void);\r\n#endif" }, { "alpha_fraction": 0.5361163020133972, "alphanum_fraction": 0.5534709095954895, "avg_line_length": 36.1860466003418, "blob_id": "f7a21f6cfffd585b3d88cfba0a570922695a2144", "content_id": "2a664a451f33b0fc9ff5a8a83982246df4900a16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 6396, "license_type": "no_license", "max_line_length": 124, "num_lines": 172, "path": "/bkafis/rules.mak", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#*******************************************************************************\n#\n# License: \n# This software and/or related materials was developed at the National Institute\n# of Standards and Technology (NIST) by employees of the Federal Government\n# in the course of their official duties. Pursuant to title 17 Section 105\n# of the United States Code, this software is not subject to copyright\n# protection and is in the public domain. \n#\n# This software and/or related materials have been determined to be not subject\n# to the EAR (see Part 734.3 of the EAR for exact details) because it is\n# a publicly available technology and software, and is freely distributed\n# to any interested party with no licensing requirements. Therefore, it is \n# permissible to distribute this software as a free download from the internet.\n#\n# Disclaimer: \n# This software and/or related materials was developed to promote biometric\n# standards and biometric technology testing for the Federal Government\n# in accordance with the USA PATRIOT Act and the Enhanced Border Security\n# and Visa Entry Reform Act. Specific hardware and software products identified\n# in this software were used in order to perform the software development.\n# In no case does such identification imply recommendation or endorsement\n# by the National Institute of Standards and Technology, nor does it imply that\n# the products and equipment identified are necessarily the best available\n# for the purpose.\n#\n# This software and/or related materials are provided \"AS-IS\" without warranty\n# of any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\n# NO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\n# or FITNESS FOR A PARTICULAR PURPOSE or for any purpose whatsoever, for the\n# licensed product, however used. In no event shall NIST be liable for any\n# damages and/or costs, including but not limited to incidental or consequential\n# damages of any kind, including economic damage or injury to property and lost\n# profits, regardless of whether NIST shall be advised, have reason to know,\n# or in fact shall know of the possibility.\n#\n# By using this software, you agree to bear all risk relating to quality,\n# use and performance of the software and/or related materials. You agree\n# to hold the Government harmless from any claim arising from your use\n# of the software.\n#\n#*******************************************************************************\n# Project: NIST Fingerprint Software\n# SubTree: /NBIS/Main\n# Filename: rules.mak.src\n# Integrators: Kenneth Ko\n# Organization: NIST/ITL\n# Host System: GNU GCC/GMAKE GENERIC (UNIX)\n# Date Created: 08/20/2006\n# Date Updated:\t\t03/27/2007\n# 10/23/2007\n# 04/02/2008\n#\t\t\t09/09/2008 by Joseph C. Konczal\n# 12/10/2008 by Kenneth Ko - Fix to support 64-bit\n# 12/16/2008 by Kenneth Ko - Add command line option for\n# HPUX\n#\t\t\t05/04/2011 by Kenneht Ko\n#\t\t\t08/05/2014 by John Grantham - Added CYGWIN_FLAG\n#\n# ******************************************************************************\n#\n# This rules file contains all the necessary variables to build \"NBIS\".\n#\n# ******************************************************************************\nSHELL\t:= /bin/sh\n#\n# ------------------------------------------------------------------------------\n#\nPROJ_NAME\t:= nbis\n#\n# ---------------------- Variables set by setup.sh------------------------------\n#\nPACKAGES\t\t\t:= ijg png commonnbis an2k bozorth3 bkafis imgtools mindtct nfseg nfiq pcasys\n#\nDIR_ROOT\t\t\t:= /home/pi/Desktop/bkafis\nFINAL_INSTALLATION_DIR \t\t:= ../install/\nX11_FLAG\t\t\t:= 1\nX11_INC\t\t\t\t:= /usr\nX11_LIB\t\t\t\t:= /usr\n\nENDIAN_FLAG\t\t\t:= -D__NBISLE__\nNBIS_JASPER_FLAG\t\t:= \nNBIS_OPENJP2_FLAG\t\t:= \nNBIS_PNG_FLAG\t\t\t:= -D__NBIS_PNG__\n\nARCH_FLAG\t\t\t:= -fPIC\n\nMSYS_FLAG\t\t\t:= \nCYGWIN_FLAG\t\t\t:= \n\nOS_FLAG\t\t\t\t:= \n#\n# ------------------------------------------------------------------------------\n#\nINSTALL_ROOT_INC_DIR\t\t:= $(FINAL_INSTALLATION_DIR)/include\nINSTALL_ROOT_BIN_DIR\t\t:= $(FINAL_INSTALLATION_DIR)/bin\nINSTALL_ROOT_LIB_DIR\t\t:= $(FINAL_INSTALLATION_DIR)/lib\nINSTALL_RUNTIME_DATA_DIR\t:= $(FINAL_INSTALLATION_DIR)/$(PROJ_NAME)\n#\n# ------------------------------------------------------------------------------\n#\nEXPORTS_DIR\t:= $(DIR_ROOT)/exports\nEXPORTS_INC_DIR\t:= $(EXPORTS_DIR)/include\nEXPORTS_LIB_DIR\t:= $(EXPORTS_DIR)/lib\nEXPORTS_DIRS\t:= $(EXPORTS_DIR) \\\n\t\t$(EXPORTS_INC_DIR) \\\n\t\t$(EXPORTS_LIB_DIR) \n# \n# ------------------------------------------------------------------------------\n#\nRUNTIME_DATA_PACKAGES\t\t:= an2k nfiq pcasys\nRUNTIME_DATA_DIR\t\t:= runtimedata\n#\n# ------------------------------------------------------------------------------\n#\nDOC_DIR\t\t:= $(DIR_ROOT)/doc\nDOC_CATS_DIR\t:= $(DOC_DIR)/catalogs\nDOC_INSTALL_DIR\t:= $(DOC_DIR)/install\nDOC_REFS_DIR\t:= $(DOC_DIR)/refs\nDOC_DIRS\t:= $(DOC_REFS_DIR)\n#\n# ------------------------------------------------------------------------------\n#\nPCASYS_X11_DIR\t:= $(DIR_ROOT)/pcasys/obj/src/lib/pca/x11\n#\n# ------------------------------------------------------------------------------\n#\nMAN_DIR\t\t:= $(DIR_ROOT)/man\n#\n# ------------------------------------------------------------------------------\n#\nEXTRA_DIR\t:= $(MAN_DIR) \\\n\t\t$(DOC_DIR)\n#\n# ------------------------------------------------------------------------------\n#\nDIR_ROOT_BUILDUTIL:= $(DIR_ROOT)/buildutil\n#\n# ------------------------------------------------------------------------------\n#\nCC\t\t:= $(shell which gcc)\nCFLAGS\t:= -O2 -w -ansi -D_POSIX_SOURCE $(ENDIAN_FLAG) $(NBIS_JASPER_FLAG) $(NBIS_OPENJP2_FLAG) $(NBIS_PNG_FLAG) $(ARCH_FLAG)\n#CFLAGS\t:= -g $(ENDIAN_FLAG) $(NBIS_JASPER_FLAG) $(NBIS_PNG_FLAG) $(ARCH_FLAG)\nCDEFS\t:=\nCCC\t\t:= $(CC) $(CFLAGS) $(CDEFS)\nLDFLAGS\t:= $(ARCH_FLAG)\nM\t\t:= -M\n#M\t\t:= -MM\n#\nUNAME\t:= $(shell uname)\n#\nAWK\t\t:= $(shell which awk)\n#\nOWNER\t:= $(shell whoami)\nGROUP\t:= $(shell id -gn)\n#\nPERMS1\t:= 755\nPERMS2\t:= 644\n#\nINSTALL\t\t:= $(shell which install)\nINSTALL_BIN\t:= $(INSTALL) -p -m $(PERMS1)\nINSTALL_LIB\t:= $(INSTALL) -p -m $(PERMS1)\nRM\t\t:= $(shell which rm) -f\nMV\t\t:= $(shell which mv) -f\nCP\t\t:= $(shell which cp)\nCAT\t\t:= $(shell which cat)\nSED\t\t:= $(shell which sed)\nCHMOD\t:= $(shell which chmod)\nMKDIR\t:= $(shell which mkdir)\nTOUCH\t:= $(shell which touch)\nAR\t\t:= $(shell which ar)\n#\n" }, { "alpha_fraction": 0.6160232424736023, "alphanum_fraction": 0.6567040085792542, "avg_line_length": 31.929576873779297, "blob_id": "553ae87a90b0c6bcc8617eb4aeacc0a08ab4bc49", "content_id": "16ae5fcc0118982620fd66b5dc9c69d5b8e4ad48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2409, "license_type": "no_license", "max_line_length": 106, "num_lines": 71, "path": "/Scan Image with Sensor/Pi.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*\r\n* $HeadURL: svn://192.168.1.115/M3_Module_Host/trunk/App/STM_BSP.h $\r\n*\r\n* Created on: Nov 19, 2013\r\n* Author: Ryan Higgins\r\n*\r\n* Last Modified: $LastChangedDate: 2014-05-06 18:10:58 -0700 (Tue, 06 May 2014) $\r\n* Last Modified by: $LastChangedBy: Ryan $\r\n* LastChangedRevision : $LastChangedRevision: 168 $\r\n*\r\n* This software is provided \"as is\". NEXT Biometrics makes no warranty of any kind, either\r\n* express or implied, including without limitation any implied warranties of condition, uninterrupted\r\n* use, merchantability, or fitness for a particular purpose.\r\n*\r\n* This document as well as the information or material contained is copyrighted.\r\n* Any use not explicitly permitted by copyright law requires prior consent of NEXT Biometrics.\r\n* This applies to any reproduction, revision, translation and storage.\r\n*\r\n*/\r\n#include <stdint.h>\r\n#include <bcm2835.h>\r\n#include <wiringPi.h>\r\n\r\n#ifndef PI_H\r\n#define PI_H\r\n\r\n//////////////////////////////////////////////\r\n//Board Defines\r\n//////////////////////////////////////////////\r\n\r\n#define USE_SPIx SPI2\r\n\r\n#define LEADING_EDGE 0\r\n#define FALLING_EDGE 1\r\n//Define Pin\r\n#define PIN_MISO RPI_BPLUS_GPIO_J8_21\r\n#define PIN_MOSI RPI_BPLUS_GPIO_J8_19\r\n#define PIN_SCK RPI_BPLUS_GPIO_J8_23\r\n#define PIN_SS RPI_BPLUS_GPIO_J8_24\r\n\r\n\r\n#define USE_MST_AS_GPIO\r\n#define PIN_MSP RPI_BPLUS_GPIO_J8_31\r\n\r\n//Macro for setting and clearing the SS pin\r\n#define SS RPI_BPLUS_GPIO_J8_24 // CE0\r\n#define BSP_SET_SS bcm2835_gpio_write(SS, HIGH); //active SS\r\n#define BSP_CLEAR_SS bcm2835_gpio_write(SS, LOW);\r\n\r\n//Macro for setting and clearing the module nRST pin\r\n\r\n#define PIN_RESET RPI_BPLUS_GPIO_J8_15\r\n#define MODULE_RESET_LOW \tbcm2835_gpio_write(PIN_RESET, LOW);\r\n#define MODULE_RESET_HIGH \tbcm2835_gpio_write(PIN_RESET, HIGH);\r\n\r\n\r\n//Function prototypes\r\nvoid BSP_Config_HW (void);\r\nvoid BSP_Module_Reset_Configure (void);\r\nvoid BSP_SPI_Configure (void);\r\nvoid BSP_Module_Status_Pin_Configure_Input(void);\r\nvoid BSP_Module_Status_Pin_Configure_Interrupt(void);\r\nuint8_t BSP_Get_Moudle_Status_Pin(void);\r\nvoid BSP_SPI_ReadWriteBuffer (uint8_t* , uint8_t* , uint16_t);\r\nvoid BSP_Delay_ms(uint16_t);\r\nvoid BSP_Delay_us(uint64_t);\r\nvoid BSP_Module_nRST_High(void);\r\nvoid BSP_Module_nRST_Low(void);\r\nvoid BSP_Module_Wake(void);\r\n\r\n#endif\r\n" }, { "alpha_fraction": 0.6954314708709717, "alphanum_fraction": 0.710659921169281, "avg_line_length": 16.81818199157715, "blob_id": "09dac5cb03a420b5904306bca3bb0b83b706cf44", "content_id": "e7cefac724977ffd5fa06c58299629a605f6dde7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 40, "num_lines": 11, "path": "/bkafis/bkafis/bin/chuyendoi.py", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport numpy as np\nimport cv2\nimport sys\nimport time\ntime.sleep(2)\n\nimg = np.loadtxt(open(\"/image.txt\",'r'))\ncv2.imwrite('image.JPG',img)\n\nprint(\"chuyen doi anh thanh cong\")\n\n" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5625, "avg_line_length": 9.666666984558105, "blob_id": "889a2410193c5c62f131c10a23c06845453bc762", "content_id": "30fc7161c2884835ead1b20c870eba48d91f53ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 39, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/README.md", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "# PI\n\n### Lưu trữ dự án trên Pi\n" }, { "alpha_fraction": 0.6236128211021423, "alphanum_fraction": 0.6689272522926331, "avg_line_length": 23.147287368774414, "blob_id": "f01900b461cfa69d85f92ec1bc8f7023256547ae", "content_id": "8fd0562a0c76758894739d8a1107c74fa9ead2e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3244, "license_type": "no_license", "max_line_length": 129, "num_lines": 129, "path": "/Scan Image with Sensor/Pi.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*\r\n* $HeadURL: svn://192.168.1.115/M3_Module_Host/trunk/App/STM_BSP.c $\r\n*\r\n* Created on: Nov 19, 2013\r\n* Author: Ryan Higgins\r\n*\r\n* Last Modified: $LastChangedDate: 2014-05-08 15:05:27 -0700 (Thu, 08 May 2014) $\r\n* Last Modified by: $LastChangedBy: Ryan $\r\n* LastChangedRevision : $LastChangedRevision: 170 $\r\n*\r\n* This software is provided \"as is\". NEXT Biometrics makes no warranty of any kind, either\r\n* express or implied, including without limitation any implied warranties of condition, uninterrupted\r\n* use, merchantability, or fitness for a particular purpose.\r\n*\r\n* This document as well as the information or material contained is copyrighted.\r\n* Any use not explicitly permitted by copyright law requires prior consent of NEXT Biometrics.\r\n* This applies to any reproduction, revision, translation and storage.\r\n*\r\n*/\r\n\r\n#include <stdint.h>\r\n#include \"Pi.h\"\r\n\r\n\r\n\r\n/*\r\n * Configure all the hardware components\r\n */\r\n\r\nvoid BSP_Config_HW (void){\r\n\r\n\tbcm2835_init();\r\n\twiringPiSetupPhys();\r\n\r\n\tBSP_Module_Reset_Configure();\r\n\tBSP_SPI_Configure();\r\n \r\n#ifdef USE_MST_AS_GPIO\r\n\tBSP_Module_Status_Pin_Configure_Input();\r\n\r\n#else\r\n\tBSP_Module_Status_Pin_Configure_Interrupt();\r\n#endif\r\n}\r\n\r\n\r\n\r\n/*\r\n * void BSP_Module_Reset_Configure(void)\r\n *\r\n * This function configures the GPIO that controls the nRST pin on the module\r\n */\r\nvoid BSP_Module_Reset_Configure(void){\r\n\r\n\tbcm2835_gpio_fsel(PIN_RESET, BCM2835_GPIO_FSEL_OUTP);\r\n\tMODULE_RESET_HIGH;\r\n\r\n}\r\n\r\n\r\n/*\r\n * Configure the SPI\r\n */\r\n\r\nvoid BSP_SPI_Configure (void){\r\n\r\n bcm2835_spi_begin();\r\n bcm2835_spi_setBitOrder(BCM2835_SPI_BIT_ORDER_MSBFIRST); // MSB First\r\n bcm2835_spi_setDataMode(BCM2835_SPI_MODE0); // CPOL = 0, CPHA = 0\r\n bcm2835_spi_setClockDivider(BCM2835_SPI_CLOCK_DIVIDER_32); // 32 = 128ns = 7.8125MHz\r\n bcm2835_spi_chipSelect(BCM2835_SPI_CS0); // The default\r\n bcm2835_spi_setChipSelectPolarity(BCM2835_SPI_CS0, LOW); // the default\r\n\r\n}\r\n\r\n\r\n#ifdef USE_MST_AS_GPIO\r\n/*\r\n * This function configures the Module Status Pin as a general purpose input.\r\n */\r\nvoid BSP_Module_Status_Pin_Configure_Input(void){\r\n\r\n\tpinMode (PIN_MSP, INPUT);\r\n\r\n}\r\n\r\n/*\r\n * Poll module status pin\r\n */\r\nuint8_t BSP_Get_Moudle_Status_Pin(void){\r\n\treturn (uint8_t) digitalRead(PIN_MSP);\r\n}\r\n\r\n#else\r\n\r\nvoid BSP_Module_Status_Pin_Configure_Interrupt(void)\r\n{\r\n\tpullUpDnControl(PIN_MSP, 1);\r\n\twiringPiISR(PIN_MSP, INT_EDGE_FALLING, &MSP_interupt);\r\n}\r\n#endif\r\n\r\n/*\r\n * void BSP_SPI_ReadWriteBuffer (uint8_t* txBuff, uint8_t* rxBuff, uint16_t length)\r\n *\r\n * This function performs a bidirectional SPI read/write\r\n */\r\n\r\nvoid BSP_SPI_ReadWriteBuffer (uint8_t* txBuff, uint8_t* rxBuff, uint16_t length){\r\n\r\n\tbcm2835_spi_transfernb( (char*) txBuff, (char*) rxBuff, length);\r\n}\r\n\r\nvoid BSP_Delay_ms(uint16_t millis){\r\n\tbcm2835_delay(millis);\r\n}\r\n\r\nvoid BSP_Delay_us(uint64_t micros){\r\n\tbcm2835_delayMicroseconds(micros);\r\n}\r\n\r\nvoid BSP_Module_nRST_High(void){\r\n\tMODULE_RESET_HIGH;\r\n}\r\n\r\nvoid BSP_Module_nRST_Low(void){\r\n\tMODULE_RESET_LOW;\r\n\tBSP_Delay_us(500); //This timing value is dependent on host hardware. Can be smaller when nRST is configured as a a push pull.\r\n}\r\n" }, { "alpha_fraction": 0.554476797580719, "alphanum_fraction": 0.6062567234039307, "avg_line_length": 22.174999237060547, "blob_id": "33c3917096aa6baa6fd302de46f41a74f18f0f82", "content_id": "0965d3d736454d61c9366d7500461fd78fac6f10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 927, "license_type": "no_license", "max_line_length": 55, "num_lines": 40, "path": "/bkafis/bkafis/src/lib/bkafis/ScanImage.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "#include <bcm2835.h>\n#include <stdio.h>\n#include <Pi.h>\n#include <User.h>\n#include <ScanImage.h>\n#include <lcd.h>\nvoid ScanImage(uint8_t data[46080])\n{\n BSP_Config_HW (); \n uint8_t score[1];\n uint16_t j=0,i=0; \n uint8_t err=1, error=1;\n uint8_t data_seri[12];\n uint8_t str[30];\n NEXT_MODULE_t* Module;\n unlink(\"image.txt\");\n unlink(\"image.JPG\");\n LCD_Init();\n while(1)\n {\n\terror = NEXT_Module_FingerPresent(Module, score,0x01);\n \tLCD_Clear();\n\tsprintf(str,\"DAT NGON TAY\");\n\tLCD_Gotoxy(0,0);\n\tLCD_Puts(str);\n printf(\"Dat ngon tay vao cam bien\\n\");\n BSP_Delay_ms(1000);\n\tif (score[0] < 70) continue;\n error = NEXT_Module_ScanImage(Module,data);\n\tLCD_Clear();\n\tsprintf(str,\"QUET THANH CONG\");\n\tLCD_Gotoxy(0,0);\n\tLCD_Puts(str);\n printf(\"Da quet dau van tay thanh cong\\n\");\n\tBSP_Module_Reset_Configure();\n\tbreak;\n }\n bcm2835_spi_end();\n bcm2835_close();\n}\n" }, { "alpha_fraction": 0.6076623201370239, "alphanum_fraction": 0.6438453197479248, "avg_line_length": 20.38888931274414, "blob_id": "5dae05ca6a6690180f0a6771e21a01743aafec55", "content_id": "b8a386a993e6e14e1557c710c228f2cb9cee2005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2819, "license_type": "no_license", "max_line_length": 162, "num_lines": 126, "path": "/bkafis/exports/include/match.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatch.h\r\n\tDescription: Data structure to perform full package algorithm to match two fingerprints\r\n\tCreated on: Oct 12, 2015 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n\r\n\r\n#ifndef _MATCH_H_\r\n#define _MATCH_H_\r\n\r\n\r\n\r\n\r\n\r\n#include <cstring>\r\n#include <vector>\r\n\r\n#ifdef __cplusplus\r\nextern \"C\" {\r\n#endif\r\n// all of your legacy C code here\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n#include <lfs.h>\r\n#include <sys/param.h>\r\n#include <an2k.h>\r\n#include <lfs.h>\r\n#include <imgdecod.h>\r\n#include <imgboost.h>\r\n#include <img_io.h>\r\n#include <version.h>\r\n#include <stdio.h>\r\n#include <FvcHeader.h>\r\n#include <ISOTemplate.h>\r\n#include <fingerprint.h>\r\n#ifdef __cplusplus\r\n}\r\n#endif\r\n\r\n\r\nusing namespace std;\r\n\r\n\r\n\r\n\r\n#define max_dist \t150 \r\n#define min_dist \t8\r\n#define bias\t\t0.15 \r\n#define skipAngleDiff\t\t32 /* 80 */\r\n#define ldrWeight\t0.14 \r\n\r\n/* dT = 15;\r\n draT = pi/10;\r\n odaT = 16;\r\n angleT = pi/10; */\r\n/* \t sigma = 10;\r\n step = 10; */\r\n\t\r\n#define step \t\t10\t\r\n#define sigma\t\t10\r\n#define edT\t\t\t15\r\n#define draT \tM_PI/10\r\n#define odaT \t\t16 \r\n#define angleT\t\tM_PI/10\r\n\r\n#define nTHRESHOLD 64\r\n#define dirT\t\t16\r\n\r\n/* angle in range 0-255 corresponding to 0-2pi */\r\n \r\n/* angle in range 0-255 corresponding to 0-2pi */\r\n\r\n#define ridgeT \t\t1\r\n\r\n\r\n\r\n\r\n\r\n#ifndef \tDT\r\n#define \tDT\t\t15\r\n#endif\r\n#ifndef \tAT\r\n#define \tAT\t\tM_PI/8\r\n#endif\r\n#ifndef \tSIGMA\r\n#define \tSIGMA\t10\r\n#endif\r\n#ifndef \tDMIN\r\n#define \tDMIN\t8\r\n#endif\r\n\r\n/* constants to control matcher */\r\n\r\n#define N_PAIRS\t\t\t\t2\r\n#define LOCAL_SCORE_BIAS \t1\r\n\r\n\r\ntypedef struct {\r\n\tunsigned char id1, id2;\r\n\tfloat score, draBias;\r\n} PairedMin;\r\nextern float edThreshold[64];\r\nextern vector<PairedMin> pairedNeighbors;\r\n\r\n/* Calculate local paring score for two minutia i in finger 1 and minutia j in finger 2 \r\n\tReturn local difference score and the dra_bias between two minutiae \r\n*/\r\nvoid PairingMinutiae(Fingerprint* finger1, Fingerprint* finger2, unsigned char min1, unsigned char min2, float* localDiff, float* draBias);\r\n/* Pairing minutiae in two fingerprints \r\n Return a vector of paired minutiae\r\n */\r\nvoid PairingFingers(Fingerprint* finger1, Fingerprint* finger2, vector<PairedMin>& vecPairedMin);\r\nfloat Alignment(Fingerprint* finger1, Fingerprint*finger2, vector<PairedMin>& vecPairedMin, unsigned int pairId, unsigned char *pairFlag, int* flag1, int* flag2);\r\nfloat match_one(char templatefile1[MAXPATH], char templatefile2[MAXPATH]);\r\nint ad_pi2_iso(int angle1, int angle2);\r\nfloat ad_pi2(float angle1,float angle2);\r\nchar dir_diff(int dir, int dir1, int dir2);\r\n\r\n#endif" }, { "alpha_fraction": 0.46989721059799194, "alphanum_fraction": 0.5242290496826172, "avg_line_length": 24.269229888916016, "blob_id": "9b8ac2d6110ae409a185078307004a32ac6df845", "content_id": "b4839b5b4b6ad51bbc9f320fba61f408f3620d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 681, "license_type": "no_license", "max_line_length": 71, "num_lines": 26, "path": "/bkafis/bkafis/src/lib/bkafis/cuong.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS extractor\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <cuong.h>\r\n#include <lcd.h>\r\nvoid test()\r\n{\r\n\tuint8_t str[30];\r\n\tLCD_Init();\r\n\tsprintf(str,\"test\");\r\n\tLCD_Gotoxy(0,0);\r\n\tLCD_Puts(str);\r\n\tbcm2835_delay(2000);\r\n\tprintf(\"test thu them thu vien\\n\");\r\n\tbcm2835_delay(1000);\r\n\tprintf(\"test them thu vien bcm2835\\n\");\r\n}" }, { "alpha_fraction": 0.5457779765129089, "alphanum_fraction": 0.5550715327262878, "avg_line_length": 35.2135124206543, "blob_id": "7196a20bb0c5f9d88e3ff780ae3cf46fb5132ae0", "content_id": "7b1ecc7a7783d8922e674ef50527ad93b9817349", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 13773, "license_type": "no_license", "max_line_length": 121, "num_lines": 370, "path": "/bkafis/bkafis/src/bin/extract/bkafis_ridges.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*******************************************************************************\r\n\r\nLicense: \r\nThis software and/or related materials was developed at the National Institute\r\nof Standards and Technology (NIST) by employees of the Federal Government\r\nin the course of their official duties. Pursuant to title 17 Section 105\r\nof the United States Code, this software is not subject to copyright\r\nprotection and is in the public domain. \r\n\r\nThis software and/or related materials have been determined to be not subject\r\nto the EAR (see Part 734.3 of the EAR for exact details) because it is\r\na publicly available technology and software, and is freely distributed\r\nto any interested party with no licensing requirements. Therefore, it is \r\npermissible to distribute this software as a free download from the internet.\r\n\r\nDisclaimer: \r\nThis software and/or related materials was developed to promote biometric\r\nstandards and biometric technology testing for the Federal Government\r\nin accordance with the USA PATRIOT Act and the Enhanced Border Security\r\nand Visa Entry Reform Act. Specific hardware and software products identified\r\nin this software were used in order to perform the software development.\r\nIn no case does such identification imply recommendation or endorsement\r\nby the National Institute of Standards and Technology, nor does it imply that\r\nthe products and equipment identified are necessarily the best available\r\nfor the purpose.\r\n\r\nThis software and/or related materials are provided \"AS-IS\" without warranty\r\nof any kind including NO WARRANTY OF PERFORMANCE, MERCHANTABILITY,\r\nNO WARRANTY OF NON-INFRINGEMENT OF ANY 3RD PARTY INTELLECTUAL PROPERTY\r\nor FITNESS FOR A PARTICULAR PURPOSE or for any purpose whatsoever, for the\r\nlicensed product, however used. In no event shall NIST be liable for any\r\ndamages and/or costs, including but not limited to incidental or consequential\r\ndamages of any kind, including economic damage or injury to property and lost\r\nprofits, regardless of whether NIST shall be advised, have reason to know,\r\nor in fact shall know of the possibility.\r\n\r\nBy using this software, you agree to bear all risk relating to quality,\r\nuse and performance of the software and/or related materials. You agree\r\nto hold the Government harmless from any claim arising from your use\r\nof the software.\r\n\r\n*******************************************************************************/\r\n\r\n\r\n/***********************************************************************\r\n LIBRARY: LFS - NIST Latent Fingerprint System\r\n\r\n FILE: bkafis_RIDGES.C\r\n AUTHOR: Minh Nguyen\r\n DATE: 06/08/2015\r\n UPDATED: \r\n\r\n Contains routines responsible for locating nearest minutia\r\n neighbors and counting intervening ridges as part of the\r\n NIST Latent Fingerprint System (LFS).\r\n\r\n***********************************************************************\r\n ROUTINES:\r\n count_minutiae_ridges_bkafis()\r\n count_minutia_ridges_bafis()\r\n find_neighbors_bkafis()\r\n***********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <lfs.h>\r\n#include <log.h>\r\n/*************************************************************************\r\n**************************************************************************\r\n#cat: calculate_angle - \tCalculate angle of a vector\r\n#cat: \r\n\r\n Input:\r\n x1, y1: coordinates of A\r\n\t x2, y2: coordinates of B\r\n Output:\r\n\t angle of vector AB\r\n**************************************************************************/\r\nfloat \tcalculate_angle( int x1, int y1, int x2, int y2)\r\n{\r\n float angle;\r\n\tangle = atan2( y2 - y1, x2 - x1 );\r\n\treturn\t(angle<0)?angle+2*M_PI:angle;\r\n}\r\n/*************************************************************************\r\n**************************************************************************\r\n#cat: count_minutiae_ridges - Takes a list of minutiae, and for each one,\r\n#cat: determines its closest neighbors and counts the number\r\n#cat: of interveining ridges between the minutia point and\r\n#cat: each of its neighbors.\r\n\r\n Input:\r\n minutiae - list of minutiae\r\n bdata - binary image data (0==while & 1==black)\r\n iw - width (in pixels) of image\r\n ih - height (in pixels) of image\r\n lfsparms - parameters and thresholds for controlling LFS\r\n Output:\r\n minutiae - list of minutiae augmented with neighbors and ridge counts\r\n Return Code:\r\n Zero - successful completion\r\n Negative - system error\r\n**************************************************************************/\r\nint count_minutiae_ridges_bkafis(MINUTIAE *minutiae,\r\n unsigned char *bdata, const int iw, const int ih,\r\n const LFSPARMS *lfsparms)\r\n{\r\n int ret;\r\n int i;\r\n\r\n print2log(\"\\nFINDING NBRS AND COUNTING RIDGES:\\n\");\r\n\r\n /* Sort minutia points on x then y (column-oriented). */\r\n if((ret = sort_minutiae_x_y(minutiae, iw, ih))){\r\n return(ret);\r\n }\r\n\r\n /* Remove any duplicate minutia points from the list. */\r\n if((ret = rm_dup_minutiae(minutiae))){\r\n return(ret);\r\n }\r\n\r\n /* Foreach remaining sorted minutia in list ... */\r\n /*for(i = 0; i < minutiae->num-1; i++){*/\r\n /****************************************\r\n\t\t\tUdate by Dan\r\n ****************************************/\r\n for(i = 0; i < minutiae->num; i++){\r\n /* Located neighbors and count number of ridges in between. */\r\n /* NOTE: neighbor and ridge count results are stored in */\r\n /* minutiae->list[i]. */\r\n if((ret = count_minutia_ridges_bkafis(i, minutiae, bdata, iw, ih, lfsparms))){\r\n return(ret);\r\n }\r\n }\r\n\r\n /* Return normally. */\r\n return(0);\r\n}\r\n\r\n/*************************************************************************\r\n**************************************************************************\r\n#cat: count_minutia_ridges - Takes a minutia, and determines its closest\r\n#cat: neighbors and counts the number of interveining ridges\r\n#cat: between the minutia point and each of its neighbors.\r\n\r\n Input:\r\n minutia - input minutia\r\n bdata - binary image data (0==while & 1==black)\r\n iw - width (in pixels) of image\r\n ih - height (in pixels) of image\r\n lfsparms - parameters and thresholds for controlling LFS\r\n Output:\r\n minutiae - minutia augmented with neighbors and ridge counts\r\n Return Code:\r\n Zero - successful completion\r\n Negative - system error\r\n**************************************************************************/\r\nint count_minutia_ridges_bkafis(const int first, MINUTIAE *minutiae,\r\n unsigned char *bdata, const int iw, const int ih,\r\n const LFSPARMS *lfsparms)\r\n{\r\n int i, ret, *nbr_list, *nbr_nridges, nnbrs;\r\n\r\n /* Find up to the maximum number of qualifying neighbors. */\r\n if((ret = find_neighbors_bkafis(&nbr_list, &nnbrs, lfsparms->max_nbrs,\r\n first, minutiae))){\r\n free(nbr_list);\r\n return(ret);\r\n }\r\n\r\n print2log(\"NBRS FOUND: %d,%d = %d\\n\", minutiae->list[first]->x,\r\n minutiae->list[first]->y, nnbrs);\r\n\r\n /* If no neighors found ... */\r\n if(nnbrs == 0){\r\n /* Then no list returned and no ridges to count. */\r\n return(0);\r\n }\r\n\r\n /* Sort neighbors on delta dirs. */\r\n if((ret = sort_neighbors(nbr_list, nnbrs, first, minutiae))){\r\n free(nbr_list);\r\n return(ret);\r\n }\r\n\r\n /* Count ridges between first and neighbors. */\r\n /* List of ridge counts, one for each neighbor stored. */\r\n nbr_nridges = (int *)malloc(nnbrs * sizeof(int));\r\n if(nbr_nridges == (int *)NULL){\r\n free(nbr_list);\r\n fprintf(stderr, \"ERROR : count_minutia_ridges : malloc : nbr_nridges\\n\");\r\n return(-450);\r\n }\r\n\r\n /* Foreach neighbor found and sorted in list ... */\r\n for(i = 0; i < nnbrs; i++){\r\n /* Count the ridges between the primary minutia and the neighbor. */\r\n ret = ridge_count(first, nbr_list[i], minutiae, bdata, iw, ih, lfsparms);\r\n /* If system error ... */\r\n if(ret < 0){\r\n /* Deallocate working memories. */\r\n free(nbr_list);\r\n free(nbr_nridges);\r\n /* Return error code. */\r\n return(ret);\r\n }\r\n\r\n /* Otherwise, ridge count successful, so store ridge count to list. */\r\n nbr_nridges[i] = ret;\r\n }\r\n\r\n /* Assign neighbor indices and ridge counts to primary minutia. */\r\n minutiae->list[first]->nbrs = nbr_list;\r\n minutiae->list[first]->ridge_counts = nbr_nridges;\r\n minutiae->list[first]->num_nbrs = nnbrs;\r\n\r\n /* Return normally. */\r\n return(0);\r\n}\r\n\r\n/*************************************************************************\r\n**************************************************************************\r\n#cat: find_neighbors - Takes a primary minutia and a list of all minutiae\r\n#cat: and locates a specified maximum number of closest neighbors\r\n#cat: to the primary point. Neighbors are searched, starting\r\n#cat: in the same pixel column, below, the primary point and then\r\n#cat: along consecutive and complete pixel columns in the image\r\n#cat: to the right of the primary point.\r\n\r\n Input:\r\n max_nbrs - maximum number of closest neighbors to be returned\r\n first - index of the primary minutia point\r\n minutiae - list of minutiae\r\n Output:\r\n onbr_list - points to list of detected closest neighbors\r\n onnbrs - points to number of neighbors returned\r\n Return Code:\r\n Zero - successful completion\r\n Negative - system error\r\n**************************************************************************/\r\nint find_neighbors_bkafis(int **onbr_list, int *onnbrs, const int max_nbrs,\r\n const int first, MINUTIAE *minutiae)\r\n{\r\n MINUTIA *minutia1, *minutia2;\r\n\tint \ti = 0, j = 0, k = 0;\r\n int\tcount = 0, iter = 0;\r\n int \tcenterX\t = minutiae->list[first]->x;\r\n int \tcenterY\t = minutiae->list[first]->y;\r\n float\tcenterAngle\t\t = (float)minutiae->list[first]->direction * 11.25; /* convert into grad */\r\n\tif (centerAngle <= 90)\r\n\t\tcenterAngle = 90 - centerAngle;\r\n\telse\r\n\t\tcenterAngle = 450 - centerAngle;\r\n\tcenterAngle *= M_PI/180;\r\n\t\r\n\tint min_num = minutiae->num;\r\n \r\n float\tmin_dist = 8.5, max_dist = 130; /* should be parameters of the function */\r\n int\tslot = 0;\r\n float d_phi = 0, tmp_dist = 0;\r\n \r\n\t/* Initialization */\r\n\tint* nbr_list;\r\n\tnbr_list = (int *)malloc(max_nbrs * sizeof(int));\r\n if(nbr_list == (int *)NULL){\t \r\n fprintf(stderr, \"ERROR : find_neighbors : malloc : nbr_list\\n\");\r\n return(-460);\r\n }\r\n for\t( i = 0; i < max_nbrs; i++)\r\n \tnbr_list[i] = -1;\r\n \r\n\tfloat *distances;\r\n\tdistances = (float *)malloc(min_num * sizeof(float));\r\n if(distances == (float *)NULL){\r\n\t free(nbr_list);\r\n fprintf(stderr, \"ERROR : find_neighbors : malloc : distances\\n\");\r\n return(-461);\r\n }\r\n\tfor(i = 0; i < min_num; i++){\r\n\t\tdistances[i] = sqrt( pow(centerX - minutiae->list[i]->x, 2) + pow(centerY - minutiae->list[i]->y, 2));\r\n\t}\r\n\t\r\n\tfloat *phi;\r\n\tphi = (float *)malloc(min_num * sizeof(float));\r\n\tmemset(phi,0,min_num * sizeof(float));\r\n if(phi == (float *)NULL){\r\n\t\tfree(nbr_list);\r\n\t\tfree(distances);\r\n\t\tfprintf(stderr, \"ERROR : find_neighbors : malloc : phi\\n\");\r\n\t\treturn(-462);\r\n }\r\n\t\r\n\t/* Changes: Minh Nguyen 2/8/2015 */\r\n\tfloat *minDistances = (float*)malloc(max_nbrs*sizeof(float));\r\n\tif (minDistances==NULL){\r\n\t\tfree(nbr_list);\r\n\t\tfree(distances);\r\n\t\tfree(phi);\r\n\t\tfprintf(stderr, \"ERROR : find_neighbors : malloc : phi\\n\");\r\n\t\treturn(-462);\t\r\n\t}\r\n\t\t\r\n\tint *minIds = (int*)malloc(max_nbrs*sizeof(int));\r\n\tif (minIds==NULL){\r\n\t\tfree(nbr_list);\r\n\t\tfree(distances);\r\n\t\tfree(phi);\r\n\t\tfree(minDistances);\r\n\t\tfprintf(stderr, \"ERROR : find_neighbors : malloc : phi\\n\");\r\n\t\treturn(-462);\t\r\n\r\n\t}\r\n\twhile\t( count < max_nbrs && iter < max_nbrs ){\r\n\t\tmemset(minDistances,0, max_nbrs*sizeof(float));\r\n\t\tmemset(minIds, 255, max_nbrs*sizeof(int));\r\n\t\tfor\t( i = 0; i < min_num; i++ ){\r\n\t\t\tif (i == first) continue; /* skip the center minutia itself */\r\n\t\t\tif ((distances[i] >= max_dist) || (distances[i] <= min_dist)) continue; /* skip minutia that is too far or too near */\r\n\t\t\t/* if the minutia i is already choosen as neighbour skip it */\r\n\t\t\tunsigned char found = 0;\r\n\t\t\tunsigned char j = 0;\r\n\t\t\twhile ((j<max_nbrs) && !found){\r\n\t\t\t\tif (nbr_list[j++]==i) found = 1;\r\n\t\t\t}\r\n\t\t\tif (found) continue;\r\n\t\t\t\t\t\t\r\n\t\t\ttmp_dist = distances[i];\r\n\t\t\tif (phi[i]==0)\r\n\t\t\t\tphi[i] = calculate_angle(centerX, centerY, minutiae->list[i]->x, minutiae->list[i]->y);\r\n\t\t\tfloat d_phi = (phi[i] >= centerAngle)?phi[i]-centerAngle:2*M_PI+phi[i]-centerAngle;\r\n int sector = floor( maxNeighbors*( d_phi/(2*M_PI) ) );\r\n\t\t\tif (minDistances[sector]==0){\r\n\t\t\t\tminDistances[sector] = tmp_dist;\r\n\t\t\t\tminIds[sector] = i;\r\n\t\t\t}\r\n\t\t\telse {\r\n\t\t\t\tif (minDistances[sector]>tmp_dist){\r\n\t\t\t\t\tminDistances[sector] = tmp_dist;\r\n\t\t\t\t\tminIds[sector] = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\tfor\t( i = 0; i < max_nbrs; i++ )\r\n\t\t\tif\t( minIds[i] != -1 )\r\n\t\t\t\tnbr_list[count++] = minIds[i];\r\n\t\titer++;\r\n\t}\r\n\tfree(distances);\r\n\tfree(minDistances);\t\r\n\tfree(minIds);\r\n\tfree(phi);\r\n\t\r\n\t/* return the result*/\r\n\tint nnbrs = 0;\r\n\tfor(i = 0; i < max_nbrs; i++)\r\n\t\tif(nbr_list[i] != -1)\r\n\t\t\tnnbrs++;\r\n\t\t\t\r\n\tif(nnbrs == 0){\r\n\t\tfree(nbr_list);\r\n\t\t*onbr_list = (int*)NULL;\r\n\t\t*onnbrs = 0;\r\n\t\treturn 0;\r\n\t}\r\n\t*onbr_list = nbr_list;\r\n\t*onnbrs = nnbrs;\r\n\t/* Return normally. */\r\n\treturn(0);\r\n}\r\n\r\n\r\n" }, { "alpha_fraction": 0.5670554041862488, "alphanum_fraction": 0.5761661529541016, "avg_line_length": 22.21238899230957, "blob_id": "873f58d61eee1e584b233a04c6a6fbfb65e31516", "content_id": "200afa55a1acdc9fb051c05ea09a7df7c0237d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2744, "license_type": "no_license", "max_line_length": 151, "num_lines": 113, "path": "/bkafis/bkafis/src/lib/bkafisMoC/fingerprintMoC.c", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tmatcher.c\r\n\tDescription: Function implementation of BKAFIS matcher\r\n\tCreated on: Sep 24, 2014 \r\n \tAuthor: Nguyen Duc Minh\r\n\t\r\n\tUpdated on Aug 25, 2015 by Duong Tan Nghia:\r\n\t- add function DetectLDR\r\n\t- add params for DetectLDR into struct BkafisParams\r\n *********************************************************************/\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include \"fingerprintMoC.h\"\r\n\r\nvoid SaveFingerprintMoCText(char* path, FingerprintMoC* fingerMoC)\r\n{\r\n\tFILE *fp;\r\n\tunsigned char i,j;\r\n\tunsigned char neighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn 1;\r\n\t}\r\n\tif ( (fp = fopen(path,\"w\")) == NULL )\r\n\t{\r\n\t\treturn 1;\r\n\t}\r\n\t\r\n\t/*\r\n\tfprintf(fp,\"Minutiae information:Width=%d\\tHeight=%d\\tQuality=%d\\tnMinutiae=%d\\n\", finger->width, finger->height, finger->quality, finger->nMinutiae);\r\n\tfprintf(fp,\"x\\ty\\tAngle\\tType\\tQuality\\tLDR\\t#Neighbors\\tIndex\\tEd\\tDra\\tOda\\tRidgeCount...\\n\");\r\n\t*/\r\n\tunsigned char nMinutiae = GetNMinutiae(*fingerMoC);\r\n\t/* printf(\"nMinutiea=%d\\n\", nMinutiae);*/\r\n\tfor ( i = 0; i < nMinutiae; i++ )\r\n\t{\r\n\t\t\r\n\t\tfprintf\t(\tfp,\t\"%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\"\t, \r\n\t\t\t\t\tGetX(fingerMoC->minutiae[i])\t\t\t\t\t, \r\n\t\t\t\t\tGetY(fingerMoC->minutiae[i])\t\t\t, \r\n\t\t\t\t\tGetAngle(fingerMoC->minutiae[i])\t\t,\r\n\t\t\t\t\tGetType(fingerMoC->minutiae[i])\t\t,\r\n\t\t\t\t\tGetMinQuality(fingerMoC->minutiae[i])\t\t,\r\n\t\t\t\t\tGetLdr(fingerMoC->minutiae[i]),\r\n\t\t\t\t\tGetNNeighbors(fingerMoC->minutiae[i])\r\n\t\t\t\t);\r\n\t\tISOBYTE nNeighbors = GetNNeighbors(fingerMoC->minutiae[i]);\r\n\t\t\r\n\t\t/* printf(\"nNeighbors = %d\\n\",nNeighbors);*/\r\n\t\t\r\n\t\tif(nNeighbors){\r\n\t\t\t\r\n\t\t\tfor ( j = 0; j<nNeighbors; j++ ){\r\n\t\t\t\tfprintf\t(\tfp, \"\\t%d\\t%d\\t%d\\t%d\"\t\t\t\t\t\t\t\t, \r\n\t\t\t\t\t\tGetNeighborMinIndex(fingerMoC->minutiae[i].neighbors[j]),\r\n\t\t\t\t\t\tGetEd(fingerMoC->minutiae[i].neighbors[j]),\r\n\t\t\t\t\t\tGetMoCDra(fingerMoC->minutiae[i].neighbors[j]),\r\n\t\t\t\t\t\tGetRidgeCount(fingerMoC->minutiae[i].neighbors[j])\r\n\t\t\t\t);\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t}\r\n\t\t\t\t\t\t\r\n\t\tfprintf(fp,\"\\n\");\r\n\t}\r\n\t\r\n\tfclose(fp);\r\n\treturn 0;\r\n}\r\n\r\nISOBYTE SaveFingerprintMoC(unsigned char *path, FingerprintMoC *finger)\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\t\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn 1;\r\n\t}\r\n\t\r\n\tif ( (fp = fopen(path, \"wb\")) == NULL )\r\n\t{\r\n\t\treturn 1;\r\n\t}\r\n\t/* printf(\"size of FingerprintMoC = %d\\n\", sizeof(FingerprintMoC));*/\r\n\tfwrite(finger, sizeof(FingerprintMoC), 1, fp );\r\n\tfclose(fp);\r\n\treturn\t0;\r\n}\r\n\r\nISOBYTE\tReadFingerprintMoC( unsigned char *path, FingerprintMoC *finger )\r\n{\r\n\tFILE\t*fp;\r\n\tunsigned char\tminI;\r\n\tunsigned char\tneighborI;\r\n\r\n\tif ( path == NULL )\r\n\t{\r\n\t\treturn\t1;\r\n\t}\r\n\r\n\tif ( (fp = fopen(path,\"rb\")) == NULL )\r\n\t{\r\n\t\treturn\t1;\r\n\t}\r\n\t\r\n\tfread( finger, sizeof(FingerprintMoC), 1, fp );\r\n\tfclose(fp);\r\n\treturn\t0;\r\n}\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5339901447296143, "alphanum_fraction": 0.5871921181678772, "avg_line_length": 24.076923370361328, "blob_id": "c35e106b6e826d8013bde895e8f5abae6f998ba4", "content_id": "2909bc2cc089226c252e34b0cf0e6b4ee3d497d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 75, "num_lines": 39, "path": "/bkafis/exports/include/int32.h", "repo_name": "lammobile/PI", "src_encoding": "UTF-8", "text": "/*********************************************************************\r\n\tint32.h\r\n\tDescription: Data structure to present 32 bit integers in 16 bit computers\r\n\tCreated on: Sep 13, 2015 \r\n \tAuthor: Nguyen Duc Minh\r\n *********************************************************************/\r\n\r\n\r\n\r\n#ifndef _INT32_H_\r\n#define _INT32_H_\r\n\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n#include <math.h>\r\n#include <ISOTemplate.h>\r\n/*\r\n#ifndef ISOBYTE\r\ntypedef\tunsigned char\t\t\t\t\tISOBYTE;\r\n#endif\r\n#ifndef ISOWORD\r\ntypedef\tunsigned short int\t\tISOWORD;\r\n#endif\r\n*/\r\ntypedef struct{\r\n\tISOWORD\t\thighWord;\r\n\tISOWORD\t\tlowWord;\r\n}int32;\r\n\r\n#define maxISOWORD\t(1<<16)\r\n#define toUint(i32)\t(unsigned int)(((i32).highWord<<16)+(i32).lowWord)\r\n\r\nvoid mult16s(const ISOWORD x, const ISOWORD y, int32 *p);\r\nvoid shift_right(int32* in, ISOBYTE shamt, int32* out);\r\nvoid shift_left(int32* in, ISOBYTE shamt, int32* out);\r\nISOWORD\tsqrt32(int32* num);\r\nvoid add32(int32* a, int32* b, int32* c);\r\nshort int\tatan2_fxp( short int y, short int x );\r\n#endif" } ]
54
KAcee77/django_sputnik_map
https://github.com/KAcee77/django_sputnik_map
a9ff8308873c7944481e415a144154c455e0917f
f8ec32b3c38cb5e7988e80b540cffcaff077c23f
5611655920aa536ec3c4e2e26520dda88bc79dee
refs/heads/main
"2023-03-18T19:13:38.007137"
"2021-03-05T15:07:17"
"2021-03-05T15:07:17"
344,240,887
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7837837934494019, "alphanum_fraction": 0.7837837934494019, "avg_line_length": 21.200000762939453, "blob_id": "1f1e12d1f5fccdaa21072f7a140bfafd34cf7297", "content_id": "0fc53156ce0bf7b734f1acc754f91a315b9d5114", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/django_sputnik_maps/apps.py", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass DjangoSputnikMapsConfig(AppConfig):\n name = 'django_sputnik_maps'\n" }, { "alpha_fraction": 0.5091357827186584, "alphanum_fraction": 0.5106172561645508, "avg_line_length": 32.766666412353516, "blob_id": "e5ded273f8fddca164d9da8172c8487cc7ffd91d", "content_id": "09ec3dabb0de3fdc57bc53279f4eeb4f8ef71de5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2030, "license_type": "no_license", "max_line_length": 87, "num_lines": 60, "path": "/django_sputnik_maps/static/django_sputnik_maps/js/base.js", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "const GEOCODER_URL = 'http://search.maps.sputnik.ru/search/addr?q=';\n\nasync function getData(address) {\n if (address !== '') {\n let query = GEOCODER_URL + address,\n response = await fetch(query);\n response = await response.json();\n if ('address' in response['result']) {\n return response;\n }\n }\n}\n\nasync function getListAddress(request) {\n let response = await getData(request);\n if (response) {\n let dataAddress = response['result']['address'],\n addressList = [];\n for (let address of dataAddress) {\n if (address['features']) {\n for (let features of address['features']) {\n addressList.push(features['properties']['display_name'])\n }\n }\n }\n return addressList\n }\n}\n\nasync function mappingAddress(request) {\n let response = await getData(request);\n if (response) {\n\n let firstAddress = response['result']['address'][0]['features'][0],\n region = $('#id_region').val(''),\n place = $('#id_place').val(''),\n street = $('#id_street').val(''),\n house = $('#id_house').val('');\n \n for (let addressComponent of firstAddress['properties']['address_components']){\n switch(addressComponent['type']){\n case 'region':\n region.val(addressComponent['value']);\n break;\n case 'place':\n place.val(addressComponent['value']);\n break;\n case 'street':\n let value = addressComponent['value'].replace('улица ', ''); \n street.val(value);\n break;\n }\n }\n\n let displayName = firstAddress['properties']['display_name'].split(',');\n let houseNum = +displayName.pop();\n house.val(houseNum ? houseNum: '');\n return firstAddress['geometry']['geometries'][0]['coordinates'];\n }\n}" }, { "alpha_fraction": 0.59779953956604, "alphanum_fraction": 0.6088019609451294, "avg_line_length": 37.904762268066406, "blob_id": "dd246228de1e86c8e8bac4149194f171263153f9", "content_id": "04edcd823eccf0cff1cb37b2b64a454a0b32c17e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 818, "license_type": "no_license", "max_line_length": 86, "num_lines": 21, "path": "/django_sputnik_maps/widgets.py", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.forms import widgets\n\n\nclass AddressWidget(widgets.TextInput):\n '''a map will be drawn after the address field'''\n template_name = 'django_sputnik_maps/widgets/mapwidget.html'\n\n class Media:\n css = {\n 'all': ('https://unpkg.com/[email protected]/dist/leaflet.css',\n settings.STATIC_URL + 'django_sputnik_maps/css/jquery-ui.min.css',\n settings.STATIC_URL + 'django_sputnik_maps/css/base.css',)\n\n }\n js=(\n \"https://unpkg.com/[email protected]/dist/leaflet.js\",\n settings.STATIC_URL + 'django_sputnik_maps/js/base.js',\n settings.STATIC_URL + 'django_sputnik_maps/js/jquery-3.5.1.js',\n settings.STATIC_URL + 'django_sputnik_maps/js/jquery-ui.min.js',\n )\n\n" }, { "alpha_fraction": 0.7018348574638367, "alphanum_fraction": 0.7293577790260315, "avg_line_length": 34.91666793823242, "blob_id": "d7845b767efe2851b99d396007e56530fe6a5dbe", "content_id": "03a6c28ef94ffef0699a63752e0ff5cd44d0328e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 51, "num_lines": 12, "path": "/sample/models.py", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django_sputnik_maps.fields import AddressField\n\n# all fields must be present in the model\nclass SampleModel(models.Model):\n region = models.CharField(max_length=100)\n place = models.CharField(max_length=100)\n street = models.CharField(max_length=100)\n house = models.IntegerField()\n lat = models.FloatField()\n lon = models.FloatField()\n address = AddressField(max_length=200)\n \n" }, { "alpha_fraction": 0.711693525314331, "alphanum_fraction": 0.7237903475761414, "avg_line_length": 25.7297306060791, "blob_id": "eb6a88506c20b6911e0019176bd17f1bae5e43c9", "content_id": "1c83a3d036e599d54920fe1b65232ed95525a3e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 992, "license_type": "no_license", "max_line_length": 67, "num_lines": 37, "path": "/README.md", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "# DJANGO-SPUTNIK-MAPS\n## USAGE:\n* include the ``django_sputnik_maps`` app in your settings.py\n* create a model where the field names match the example \n\n```python\nfrom django.db import models\nfrom django_sputnik_maps.fields import AddressField\n\n\nclass SampleModel(models.Model):\n region = models.CharField(max_length=100)\n place = models.CharField(max_length=100)\n street = models.CharField(max_length=100)\n house = models.IntegerField()\n lat = models.FloatField()\n lon = models.FloatField()\n address = AddressField(max_length=200)\n```\n* in the ``admin.py`` include the following as a formfield_override\n\n```python\nfrom django.contrib import admin\nfrom django_sputnik_maps.fields import AddressField\nfrom django_sputnik_maps.widgets import AddressWidget\n\nfrom .models import SampleModel\n\n\[email protected](SampleModel)\nclass SampleModelAdmin(admin.ModelAdmin):\n formfield_overrides = {\n AddressField: {\n 'widget': AddressWidget\n }\n }\n``` \n\n\n" }, { "alpha_fraction": 0.7792207598686218, "alphanum_fraction": 0.7792207598686218, "avg_line_length": 14.600000381469727, "blob_id": "82685aa171b89973bdfcfe5fdf2337bc5183afb6", "content_id": "540f9a66b6102e942005600d03375aa7f3e04d74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/django_sputnik_maps/fields.py", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass AddressField(models.CharField):\n pass" }, { "alpha_fraction": 0.8823529481887817, "alphanum_fraction": 0.8823529481887817, "avg_line_length": 34, "blob_id": "872c1f33fedd1e11d6a29ebbb52cc3c5cef2ed17", "content_id": "2faa9e5e28c4e6990bdaf4b555521f964b5ae28b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/django_sputnik_maps/__init__.py", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "from .widgets import AddressWidget" }, { "alpha_fraction": 0.7349081635475159, "alphanum_fraction": 0.7349081635475159, "avg_line_length": 24.33333396911621, "blob_id": "a6cb2f6e8b2b7f22f1c6d3126f800b69586157e6", "content_id": "ac1b3320e1caf5509fb465e712329a6212fbb0b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 53, "num_lines": 15, "path": "/sample/admin.py", "repo_name": "KAcee77/django_sputnik_map", "src_encoding": "UTF-8", "text": "# from django.db import models\nfrom django.contrib import admin\nfrom django_sputnik_maps.fields import AddressField\nfrom django_sputnik_maps.widgets import AddressWidget\n\nfrom .models import SampleModel\n\n\[email protected](SampleModel)\nclass SampleModelAdmin(admin.ModelAdmin):\n formfield_overrides = {\n AddressField: {\n 'widget': AddressWidget\n }\n }\n\n" } ]
8
Kilian/gedit-jslint
https://github.com/Kilian/gedit-jslint
59da3d5655e466bd1b0ba9fa7393210022fb1025
41d1753b2639c8bf71f3ba43e07276e11199fdcb
d444c4753798e5166bfac24be8e74fb6a1095cf8
refs/heads/master
"2021-01-20T11:26:26.911876"
"2011-01-27T09:14:28"
"2011-01-27T09:14:28"
382,584
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.5588914752006531, "alphanum_fraction": 0.5630155205726624, "avg_line_length": 34.238372802734375, "blob_id": "ce424a079560c1348e6b0af68884ceb5637fadf9", "content_id": "25f253ff95e9ea636249427fbc7422a9aabdb51a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6062, "license_type": "no_license", "max_line_length": 195, "num_lines": 172, "path": "/gedit-jslint/__init__.py", "repo_name": "Kilian/gedit-jslint", "src_encoding": "UTF-8", "text": "from gettext import gettext as _\n\nimport gtk\nimport gedit\nimport os\nimport simplejson\n\n# Menu item example, insert a new item in the Tools menu\nui_str = \"\"\"<ui>\n <menubar name=\"MenuBar\">\n <menu name=\"ToolsMenu\" action=\"Tools\">\n <placeholder name=\"ToolsOps_2\">\n <menuitem name=\"JSLint\" action=\"JSLint\"/>\n </placeholder>\n </menu>\n </menubar>\n</ui>\n\"\"\"\nclass JSLintWindowHelper:\n def __init__(self, plugin, window):\n self._window = window\n self._plugin = plugin\n self.tab = None\n self.pane = None\n\n # Insert menu items\n self._insert_menu()\n\n def deactivate(self):\n # Remove any installed menu items\n self._remove_menu()\n\n self._window = None\n self._plugin = None\n self._action_group = None\n\n def _insert_menu(self):\n # Get the GtkUIManager\n manager = self._window.get_ui_manager()\n\n # Create a new action group\n self._action_group = gtk.ActionGroup(\"JSLintPluginActions\")\n self._action_group.add_actions([(\"JSLint\", None, _(\"JSLint Check\"),\n \"<Ctrl>J\", _(\"JSLint Check\"),\n self.on_jslint_activate)])\n\n # Insert the action group\n manager.insert_action_group(self._action_group, -1)\n\n # Merge the UI\n self._ui_id = manager.add_ui_from_string(ui_str)\n\n def _remove_menu(self):\n # Get the GtkUIManager\n manager = self._window.get_ui_manager()\n\n # Remove the ui\n manager.remove_ui(self._ui_id)\n\n # Remove the action group\n manager.remove_action_group(self._action_group)\n\n # Make sure the manager updates\n manager.ensure_update()\n\n def update_ui(self):\n self._action_group.set_sensitive(self._window.get_active_document() != None)\n if self.pane:\n if self.tab != self._window.get_active_tab():\n self.lines = []\n self.errorlines.clear()\n self._window.get_bottom_panel().remove_item(self.pane)\n self.pane = None\n\n\n def row_clicked(self, treeview, path, view_column, doc):\n lineno, charno = self.lines[path[0]]\n view = self._window.get_active_view()\n bf = view.get_buffer()\n try:\n lineiter = bf.get_iter_at_line_offset(lineno, charno)\n except:\n lineiter = view.get_line_at_y(lineno)\n bf.place_cursor(lineiter)\n view.scroll_to_iter(lineiter, within_margin=0.25)\n view.grab_focus()\n\n # Menu activate handlers\n def on_jslint_activate(self, action):\n doc = self._window.get_active_document()\n self.tab = self._window.get_active_tab()\n if not doc:\n return\n\n tmpfile_path = os.path.join(os.path.split(__file__)[0], \"jslint.tmp\")\n jslint_path = os.path.join(os.path.split(__file__)[0], \"fulljslint.js\")\n\n jsondata = simplejson.dumps(doc.get_text(doc.get_iter_at_line(0), doc.get_end_iter()))\n\n tmpfile = open(tmpfile_path,\"w\")\n tmpfile.writelines(\"load('\" + jslint_path + \"');\")\n tmpfile.writelines(\"var body = \" + jsondata + \";\")\n tmpfile.write('''\n var result = JSLINT(body, {onevar: true, browser: true, undef: true, nomen: true, eqeqeq: true, plusplus: true, bitwise: true, regexp: true, strict: true, newcap: true, immed: true});\n var errors = [];\n if(JSLINT.errors){\n for(var i=0; i<JSLINT.errors.length; i++){\n if(JSLINT.errors[i]){\n errors.push('{\"reason\":\"' + JSLINT.errors[i].reason + '\", \"line\":' + JSLINT.errors[i].line + ', \"character\":' + JSLINT.errors[i].character + '}');\n }\n }\n }\n var output = '{\"errors\":[' + errors.join(\",\") + '], \"result\":\"' + result + '\"}';\n print(output);\n ''')\n tmpfile.close()\n\n command = 'js -f ' + tmpfile_path\n fin,fout = os.popen4(command)\n result = fout.read()\n jslint_results = simplejson.loads(result)\n\n if not self.pane:\n self.errorlines = gtk.ListStore(int,int,str)\n self.pane = gtk.ScrolledWindow()\n treeview = gtk.TreeView(model=self.errorlines)\n lineno = gtk.TreeViewColumn('Line')\n charno = gtk.TreeViewColumn('Char')\n message = gtk.TreeViewColumn('Message')\n treeview.append_column(lineno)\n treeview.append_column(charno)\n treeview.append_column(message)\n cell1 = gtk.CellRendererText()\n cell2 = gtk.CellRendererText()\n cell3 = gtk.CellRendererText()\n lineno.pack_start(cell1,True)\n charno.pack_start(cell2, True)\n message.pack_start(cell3, True)\n lineno.set_attributes(cell1, text=0)\n charno.set_attributes(cell2, text=1)\n message.set_attributes(cell3, text=2)\n bottom = self._window.get_bottom_panel()\n image = gtk.Image()\n image.set_from_icon_name('stock_mark', gtk.ICON_SIZE_MENU)\n self.pane.add(treeview)\n bottom.add_item(self.pane, 'JSLint', image)\n treeview.connect(\"row-activated\", self.row_clicked, doc)\n self.pane.show_all()\n\n self.errorlines.clear()\n self.lines = []\n for e in jslint_results['errors']:\n self.errorlines.append([e['line']+1, e['character']+1, e['reason']])\n self.lines.append([int(e['line']), int(e['character'])])\n\n self._window.get_bottom_panel().set_property(\"visible\", True)\n\n\nclass JSLintPlugin(gedit.Plugin):\n def __init__(self):\n gedit.Plugin.__init__(self)\n self._instances = {}\n\n def activate(self, window):\n self._instances[window] = JSLintWindowHelper(self, window)\n\n def deactivate(self, window):\n self._instances[window].deactivate()\n del self._instances[window]\n\n def update_ui(self, window):\n self._instances[window].update_ui()\n\n" } ]
1
Code-Institute-Submissions/ultimate-irish-quiz
https://github.com/Code-Institute-Submissions/ultimate-irish-quiz
5ec5b7b39f23447ac70adc876664d254d400418b
350935ca04b0ac5f22f1477fc5825ae4bb1e8977
f4502b3212463b8aee55263233dbf89bf6175320
refs/heads/master
"2022-11-24T09:08:55.397793"
"2020-08-01T20:02:53"
"2020-08-01T20:02:53"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7226425409317017, "alphanum_fraction": 0.7332459688186646, "avg_line_length": 51.13664627075195, "blob_id": "aa026f8ded308404fba703fa246d815c766a68b8", "content_id": "da025411bdef110d4fb8c3acf78ec5dbc5b01974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 16791, "license_type": "no_license", "max_line_length": 238, "num_lines": 322, "path": "/README.md", "repo_name": "Code-Institute-Submissions/ultimate-irish-quiz", "src_encoding": "UTF-8", "text": "# Ultimate Irish Quiz\n<img src=\"static/images/logo.png\" alt=\"Ultimate Irish Quiz Logo\" width=\"350px\">\n\n# Contents\n1. <a href=\"#Demo\">Demo</a>\n1. <a href=\"#UX\">UX</a>\n * <a href=\"#UserStories\">User Stories</a>\n * <a href=\"#Strategy\">Strategy</a>\n * <a href=\"#Wireframes\">Wireframes</a>\n1. <a href=\"#Features\">Features</a>\n * <a href=\"#FeaturesLeft\">Possible Future Features</a>\n1. <a href=\"#Technologies\">Technologies</a>\n1. <a href=\"#Testing\">Testing</a>\n1. <a href=\"#Deployment\">Deployment</a>\n * <a href=\"#GitHubPages\">GitHub Pages</a>\n * <a href=\"#ProjectLocally\">To run this project locally</a>\n1. <a href=\"#Credits\">Credits</a>\n * <a href=\"#Content\">Content</a>\n * <a href=\"#Media\">Media</a>\n * <a href=\"#Acknowledgements\">Acknowledgements</a>\n1. <a href=\"#Disclaimer\">Disclaimer</a>\n\n<h2>Third Milestone Project: Data Centric Development - Code Institute</h2>\n<p>This website is based on using MongoDB as a database to store questions \nand answers relating to Ireland which are categorised. I use CRUD operations - \ncreate questions, read questions, update questions and delete questions.\n</p>\n\n<h1 id=\"Demo\">1. Demo</h1>\n<p>A live demo can be found <a target=\"_blank\" href=\"https://ultimate-irish-quiz.herokuapp.com/\">here.</a></p>\n\n<h1 id=\"UX\">2. UX</h1>\n<h2 id=\"UserStories\">User Stories</h1>\n<p>I myself have a keen interest in table quizes and during the lockdown, I held zoom quizzes with my friends \nin England and Ireland. After a few quizzes, I was running out of questions and found it a nuisance searching\nloads of diffent quiz websites for questions. The one thing I noticed was that there wasn't a quiz website where\nyou could add and store questions and that's why I developed this website.</p>\n\n<p>I also contacted 3 of my friends that have a keen interest in quizzes and all things Irish.</p>\n\n<h3>Damien and Julie from Kingsbury</h3>\n<p>As a user, we want to test our knowledge on all things Irish and don't want to see the answer \nbefore we have a guess and then be able to click to reveal the answer. We would like to be able \nto add questions that are not there already, delete questions that are incorrect and update \nquestions that need updating.\n</p>\n\n<h3>John from Ballina</h3>\n<p>As a user, I want to get easy access to loads of Irish based questions and be able to view \nthem by category.</p>\n\n<h3>Darren from Melbourne</h3>\n<p>As a user and quizmaster in local Irish Club, I want to be able to purchase specially \ndesigned quizzes based on Irish questions so that I don't have to waste time researching \ndozens of questions.</p>\n\n<h2 id=\"Strategy\">Strategy</h2>\n<p>The overall strategy for visitors to this website is for them to read Irish questions and answers by category \nor all questions together, let them be able to update them, delete them and create them. My goal in the design of \nthis website is to make it easy to navigate and use and easy on the eye.\nAs soon as you open the website you will see an image of Quiz Time in the Navbar which I find is eye catching.... \nI designed the logo on <a target=\"_blank\" href=\"https://www.freelogodesign.org/\">www.freelogodesign.org</a> and \nchose a subtle green, white and orange as these are the colours of Irish flag. I chose the image of the head with emphasise \nof the brain showing brainpower to answer questions. \nI used <a target=\"_blank\" href=\"https://materializecss.com/color.html/\"> materialize colours</a>\nand selected mainly different shades of green and orange for the basis of the website pages as these are the colours of the Irish flag.\nI selected these colours consistently throughout the website so as not to overpower the website with too many colours.\nI have used <a target=\"_blank\" href=\"https://fonts.google.com/\">Google Fonts</a> to select the 2 different fonts used \nin the website. I chose Montserrat font for the \nmain headings as it compliments the Logo font. I also chose Roboto font for the main scripting as it is a good pairing\nwith Montserrat font as per the google font website.</p>\n\n<h2 id=\"Wireframes\">Wireframes</h2>\n\n<h3>Wireframes for mobile devices</h3> \n<img src=\"wireframes/wireframe5.jpg\" alt=\"WireframesLS1\" width=\"350px\">\n<img src=\"wireframes/wireframe4.jpg\" alt=\"WireframesLS2\" width=\"350px\">\n\n<h3>Wireframes for tablet devices</h3> \n<img src=\"wireframes/wireframe3.jpg\" alt=\"WireframesMS1\" width=\"350px\">\n<img src=\"wireframes/wireframe3.jpg\" alt=\"WireframesLM2\" width=\"350px\">\n\n<h3>Wireframe for desktop devices</h3> \n<img src=\"wireframes/wireframe5.jpg\" alt=\"WireframesMobile\" width=\"350px\">\n\n<h1 id=\"Features\">3. Features</h1>\n<h2>Navbar</h2>\nI have used a responsive navigation bar using materialize. \nFor mobile devices a hamburger menu on the top-left expands a menu with links to \nthe different sections and pages on the website with the logo in the centre. On larger \ndevices the expanded menu is shown instead. I feel this will be easy for users to \nquickly navigate the website. This navbar will be fixed for easy navigation.\n\n<h2>Card</h2>\nI used the <a target=\"_blank\" href=\"https://materializecss.com/cards.html\">Materialize Cards</a> \nfor the Chose Category section as I could show an image , a wee write up and a link all in the one card.\nI also used it for the purchase quizzes section in the shop page. Again, I could show an image, a more \ndetailed section for each quiz when you click <i class=\"material-icons\">more_vert</i> and a link to purchase\nthe quiz. When you hover over the cards, a subtle shadow affect will appear.\n\n<h2>Modal</h2>\nFor the Contact Us Form section in the Footer, I chose to use a modal as it would \ntake up too much space in the footer. \n\n<h2>Contact Form and EmailJS</h2>\nI included a contact form with 3 required fields (name, email and query) \nthat when completed correctly will send an email to myself and an auto reply email\nto the sender using <a target=\"_blank\" href=\"https://www.emailjs.com/\">EmailJS</a>. \nIf one or more of the fields are not completed correctly, website visitor will be advised and\nwill not be able to send until all fields completed.\n\n<h2>Buttons</h2>\nWhen you hover over all buttons, it will change to a subtly darker shade of the exiting colour \nwith a shadow behind the button.\n\n<h2 id=\"FeaturesLeft\">Possible Future Features</h2>\n* Possibly add a Log In/Sign Up option. Allow members only allowed to Add, Edit or Delete questions. \n* Add a chat function for the members to communicate to each other.\n* When Delete is pressed, add a modal to let the user confirm they want to delete the question.\n* Add a flash message when submit button is pressed in the contact us form to advise that it was\ncompleted (as well as receiving a confirmation email - already in place).\n* Complete the add to cart link (have not learned yet - under construction in this website)\n\n<h1 id=\"Technologies\">4. Technologies</h1>\n<h2>Language Used</h2>\n\n* <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/HTML\">HTML</a>\n* <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Cascading_Style_Sheets\">CSS</a>\n* <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/JavaScript\">Javascript</a>\n* <a target=\"_blank\" href=\"https://www.python.org/\">Python</a>\n\n<h2>Frameworks, Libraries & Programs Used</h2>\n\n* <a target=\"_blank\" href=\"https://materializecss.com/\">Materialize</a>\n * **Materialize** was used to assist with the responsiveness and styling of the navbar, \n the sidebar, the cards, icons on forms, the colours, the shadows, the buttons, the modal \n and the footer.\n* <a target=\"_blank\" href=\"https://fonts.google.com/\">Google Fonts</a>\n * **Google fonts** were used to import the Montserrat and Roboto font into the style.css \n file which are the 2 fonts used in the project.\n* <a target=\"_blank\" href=\"https://fontawesome.com/\">Font Awesome</a>\n * **Font Awesome** was used to add icons for UX purposes. Icons specifically used for social \n icons in footer.\n* <a target=\"_blank\" href=\"https://jquery.com/\">jQuery</a>\n * The project uses **JQuery** to simplify DOM manipulation.\n* <a target=\"_blank\" href=\"https://www.emailjs.com/\">EmailJS</a>\n * When the Contact Us form is completed correctly, **EmailJS** will email myself details of \n the completed contact form and also auto reply to the user.\n* <a target=\"_blank\" href=\"https://git-scm.com/\">Git</a>\n * **Git** was used for version control by utilizing the Gitpod terminal to commit to Git and Push to GitHub and Heroku.\n* <a target=\"_blank\" href=\"https://www.gitpod.io/\">Gitpod</a>\n * **Gitpod** was the primary IDE used throughout the entirety of the project.\n* <a target=\"_blank\" href=\"https://github.com/\">GitHub</a>\n * **GitHub** is used to store the project code after being pushed from Git.\n* <a target=\"_blank\" href=\"https://htmlformatter.com/\">HTML Formatter</a>\n * **HTML Formatter** was used to beautify code to keep the code neat and \n easy to read. It was utilised as Beautify Cmd (Shift + Alt + F) in GitPod distorted the code in GitHub.\n* <a target=\"_blank\" href=\"https://dashboard.heroku.com/\">Heroku</a>\n * **Heroku** is a platform as a service (PaaS) that enables \n developers to build, run, and operate applications entirely in the cloud. \n This website is deployed using Heroku.\n* <a target=\"_blank\" href=\"https://flask.palletsprojects.com/en/1.1.x/\">Flask</a>\n * **Flask** is used to dynamically generate pages and content within the application. \n* <a target=\"_blank\" href=\"https://pypi.org/project/pymongo//\">PyMongo</a>\n * **PyMongo** is used connect and interact with my data to and from my MongoDB database.\n* <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Jinja_(template_engine)\">Jinja</a>\n * **Jinja** is used as the template engine for python.\n* <a target=\"_blank\" href=\"https://www.mongodb.com/cloud/atlas\">MongoDB</a>\n * **MongoDB** is used as my storage database for category name, questions and answers, eg\n * _id 5ede8f190d65a7821001c3a0<br>\ncategory_name: \"General Knowledge\"<br>\nquestion: \"Who opened its first store in Ireland first - Aldi or Lidl?\"<br>\nanswer: \"Aldi\"\n\n<h1 id=\"Testing\">5. Testing</h1>\n\n### UX Stories\n**Damien and Julie from Kingsbury** wanted the following:<br>\nCollapsible question and answer - included<br>\nCRUD - included\n \n**John from Ballina** wanted the following:<br>\nView questions by category - included\n\n**Darren from melbourne** wanted the following:<br>\nOnline shop to purchase ready made quizzes (wip) - included\n\n### Validation\nI used validator websites to test the following:\n\n* **HTML** - <a target=\"_blank\" href=\"https://validator.w3.org/\">W3C Html Checker</a> - all errors in all html pages relate to { is not allowed in certain lines which relates to jinja.\n* **JavaScript** - <a target=\"_blank\" href=\"https://jshint.com/\">JSHint</a> - zero warnings\n* **Python** - <a target=\"_blank\" href=\"http://pep8online.com/\">PEP8 Online Check</a> - a number of whitespace and lines too long warnings. Line 26 line too long (83 > 79 characters). Could not indent it any less or move it to next line.\n\n* **CSS** - <a target=\"_blank\" href=\"https://jigsaw.w3.org/css-validator/#validate_by_input\">W3C CSS Checker</a> - No errors found\n\n \n\n<p>\n <a href=\"http://jigsaw.w3.org/css-validator/check/referer\">\n <img style=\"border:0;width:88px;height:31px\"\n src=\"http://jigsaw.w3.org/css-validator/images/vcss\"\n alt=\"Valid CSS!\" />\n </a>\n</p>\n\nWhen I right clicked inspect, zero error or warnings appeared.\n\n### Responsiveness\n<p>I tested the responsiveness of the website on google chrome by using <a target=\"_blank\" href=\"http://www.responsinator.com/\">www.responsinator.com</a>. I also used the \nthe inspect by right clicking over the website and then going into the toggle device toolbar. I selected the most used devices to also test for \nresponsiveness i.e. iPhone 6/7/8, iPhone 6/7/8 plus, iPhone X, iPad and iPad Pro.</p>\n\n### Navbar\nAll links in navabar in all pages are tested and working correctly. \n\n### Sidebar\nAll links in sidebar in all pages are tested and work correctly, however when in home page, the links to Categories and All Questions\ngo to the correct section however the sidebar remains. You have to click outside sidebar to make it disappear.\nI've googled problem, asked help from tutor support but no joy to fix the problem.\n\n### Footer\nAll links in footer in all pages are tested and working correctly\n\n### Links in main section of Pages\n**Home Page** \n\n* Shop links tested and working correctly\n* All links in category cards tested and working correctly bringing you to the list of questions \n* All links for Edit, Delete and reveal answer are tested and working correctly \n \n**Add a Question Page**\n\n* Dropdown for category is tested and working correctly\n* Question and answer fields are tested and working correctly\n* Link to add question is tested and working correctly\n* If question, answer or both fields are left blank, question will not be added is tested and working correctly \n\n**Edit a Question Page**\n\n* Dropdown for category is tested and working correctly\n* Question and answer fields are tested and working correctly\n* Link to edit question is tested and working correctly\n* If question, answer or both fields are left blank, question will not be added is tested and working correctly\n\n**Categories Page**\n\n* All links for Edit, Delete and reveal answer are tested and working correctly\n\n**Shop Page**\n\n* All links in Quiz purchase cards are tested and working correctly \n\n**Shop Page**\n\n* Link back to Home page and shop page are tested and working correctly\n\n\n### Contact Us\nContact Us working correctly. I tried to submit an empty form and verified that an error message about the \nrequired fields appears. Tried to submit the form with an invalid email address without an @ and verified \nthat a relevant error message appears. Tried to submit the form with only 1 field correctly entered, then \n2 fields correctly and verified that a relevant error message appears\nfor the remaining field(s) left blank.\n\n### EmailJS\nWhen form fully completed correctly, I have checked that an email will be sent to my email address with all information\ngiven and also an automatic email will be sent the the email address entered in the form to say \"Hi 'fname' Thank you\nfor completing the contact form in the Ultimate Irish Quiz website. We will reply to you within 24 hours. \nKinds Regards, Ultimate Irish Quiz.\"\n\n### Browser Compatibility\n. | Appearance | Responsiveness | Comments |\n------------ | ------------- | ------------ | ------------\nChrome |Good | Good | n/a\nFirefox |Good | Good | n/a \nSafari |Good | Good | n/a \nMiscosoft Edge |Good | Good | n/a \n\n \n\n\n\n\n<h1 id=\"Deployment\">6. Deployment</h1>\n<h2 id=\"GitHubPages\">GitHub Pages</h2>\n<p>Ultimate Irish Quiz was developed using the Gitpod IDE. A repository was created on GitHub and regular commited \nto Git and pushed to GitHub and Heroku and was deployed using Heroku. </p>\n In order to deploy to Heroku, I did the following:\n\n1. I logged into Heroku, selected \"create new app\", named it \"ultimate-irish-quiz\" and set region to Europe \n1. In settings, I selected configuration vars and created PORT (5000) and IP(0.0.0.0) and pressed add.\n1. I added “MONGO_URI” with the secret key.\n1. I then created a GitHub repository ultimate-irish-quiz and opened it in Gitpod. \n1. I created a requirements.txt file using $ pip3 freeze --local > requirements.txt\n1. I created a Procfile, opened it and add: web: python app.py\n1. I then did git add requirements.txt, git add Procfile and commited them.\n1. I then logged into heroku using heroku login and clicked login\n1. I added a remote repo to git repo using heroku git:remote -a ultimate-irish-quiz\n1. I pushed git repo to heroku using git push heroku master\n1. And the deployed website can be found at https://ultimate-irish-quiz.herokuapp.com/ \n\n<h1 id=\"Credits\">7. Credits</h1>\n\n<h2 id=\"Content\">Content</h2>\n<p>All content in this website was written by myself.</p>\n\n<h2 id=\"Media\">Media</h2>\n<p>The images were taken from the internet. (website are for educational purposes only.) </p>\n\n<h2 id=\"Acknowledgements\">Acknowledgements</h2>\nThe main websites I used to help me with the coding in the various sections are:\n\n1. <a target=\"_blank\" href=\"https://www.w3schools.com\">W3School Website</a>\n2. <a target=\"_blank\" href=\"https://stackoverflow.com/\">stackoverflow Website</a>\n\nI would like to ackknowledge the Code Institute Learning Management System on helping \nme learn all about HTML, CSS, Javascript, Python and MongoDB. The mini projects were a great help. \nI would also like to thank the team in Tutor Support and my mentor for some problems I encountered along the way.\n\n<h1 id=\"Disclaimer\">8. Disclaimer</h1>\nThe content and images on this website are for educational purposes only." }, { "alpha_fraction": 0.6499484181404114, "alphanum_fraction": 0.6499484181404114, "avg_line_length": 27.839284896850586, "blob_id": "88e99a6a1466ccc8061f9e2fc79dddb3f49d2f94", "content_id": "3c581bcf5d2500e44161d71858dbb4a162d38d52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4845, "license_type": "no_license", "max_line_length": 83, "num_lines": 168, "path": "/app.py", "repo_name": "Code-Institute-Submissions/ultimate-irish-quiz", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, render_template, redirect, request, url_for\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\n\nfrom os import path\nif path.exists(\"env.py\"):\n import env\n\nMONGO_URI = os.environ.get(\"MONGO_URI\")\n\napp = Flask(__name__)\napp.config[\"MONGO_DBNAME\"] = 'quiz_questions'\napp.config[\"MONGO_URI\"] = MONGO_URI\n\n\nmongo = PyMongo(app)\n\n# Route for Home Page\n\n\[email protected]('/')\[email protected]('/get_questions')\ndef get_questions():\n return render_template(\"question_and_answer.html\",\n question_and_answer=mongo.db.question_and_answer.find())\n\n# Route to Add a Question\n\n\[email protected]('/add_question')\ndef add_question():\n return render_template('addquestion.html',\n categories=mongo.db.categories.find())\n\n# Route to Insert Question\n\n\[email protected]('/insert_question', methods=['POST'])\ndef insert_question():\n question_and_answer = mongo.db.question_and_answer\n question_and_answer.insert_one(request.form.to_dict())\n return redirect(url_for('get_questions'))\n\n# Route to Edit Question\n\n\[email protected]('/edit_question/<question_and_answer_id>')\ndef edit_question(question_and_answer_id):\n the_question = mongo.db.question_and_answer.find_one(\n {\"_id\": ObjectId(question_and_answer_id)})\n all_categories = mongo.db.categories.find()\n return render_template('editquestion.html',\n question_and_answer=the_question,\n categories=all_categories)\n\n# Route to Update Question\n\n\[email protected]('/update_question/<question_and_answer_id>', methods=['POST'])\ndef update_question(question_and_answer_id):\n question_and_answer = mongo.db.question_and_answer\n question_and_answer.update({'_id': ObjectId(question_and_answer_id)},\n {\n 'category_name': request.form.get('category_name'),\n 'question': request.form.get('question'),\n 'answer': request.form.get('answer')\n })\n return redirect(url_for('get_questions'))\n\n# Route to Delete Question\n\n\[email protected]('/delete_question/<question_and_answer_id>')\ndef delete_question(question_and_answer_id):\n mongo.db.question_and_answer.remove(\n {'_id': ObjectId(question_and_answer_id)})\n return redirect(url_for('get_questions'))\n\n# Route for Shop Link\n\n\[email protected]('/shop')\ndef get_shop():\n return render_template(\"shop.html\")\n\n# Route for Under Construction Link\n\n\[email protected]('/under_construction')\ndef get_under_construction():\n return render_template(\"under_construction.html\")\n\n# Route for General Knowledge category\n\n\[email protected]('/get_general_knowledge')\ndef get_general_knowledge():\n question_and_answer = list(mongo.db.question_and_answer.find(\n {'category_name': 'General Knowledge'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n# Route for Geography category\n\n\[email protected]('/get_geography')\ndef get_geography():\n question_and_answer = list(\n mongo.db.question_and_answer.find({'category_name': 'Geography'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n# Route for History category\n\n\[email protected]('/get_history')\ndef get_history():\n question_and_answer = list(\n mongo.db.question_and_answer.find({'category_name': 'History'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n# Route for Music category\n\n\[email protected]('/get_music')\ndef get_music():\n question_and_answer = list(\n mongo.db.question_and_answer.find({'category_name': 'Music'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n# Route for Politics category\n\n\[email protected]('/get_politics')\ndef get_politics():\n question_and_answer = list(\n mongo.db.question_and_answer.find({'category_name': 'Politics'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n# Route for Sports category\n\n\[email protected]('/get_sport')\ndef get_sport():\n question_and_answer = list(\n mongo.db.question_and_answer.find({'category_name': 'Sport'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n# Route for TV and Film category\n\n\[email protected]('/get_tv_and_film')\ndef get_tv_and_film():\n question_and_answer = list(mongo.db.question_and_answer.find({\n 'category_name': 'TV and Film'}))\n return render_template(\"categories.html\",\n question_and_answer=question_and_answer)\n\n\nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'),\n port=int(os.environ.get('PORT')),\n debug=True)\n" } ]
2
MMaazT/TSP-using-a-Genetic-Algorithm
https://github.com/MMaazT/TSP-using-a-Genetic-Algorithm
ebf4591ce7b63527f223272caab96caa006b0fd8
61ae4dd42aa4fdc365992203a5ef5584814313e4
dac059e95f027e5720cf8e9413c8b40c6a1562fc
refs/heads/master
"2020-05-17T20:24:11.150565"
"2019-04-29T09:39:27"
"2019-04-29T09:39:27"
183,944,202
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5104002952575684, "alphanum_fraction": 0.5712288022041321, "avg_line_length": 26.814634323120117, "blob_id": "b291fd847c90e1de1d71f0d9deba928e02bc5e2b", "content_id": "8f8919f605e5e3af2542948532182872ea2f36d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5721, "license_type": "no_license", "max_line_length": 96, "num_lines": 205, "path": "/TSP.py.py", "repo_name": "MMaazT/TSP-using-a-Genetic-Algorithm", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 28 13:31:51 2019\n\n@author: mmaaz\n\"\"\"\n\nfrom itertools import permutations\nimport random as rand\nimport matplotlib.pyplot as plt\n\ncityDict ={'A': [('B', 8), ('C',10), ('D', 3), ('E', 4), ('F',6)],\n 'B': [('A', 8), ('C',9), ('D', 5), ('E', 5), ('F',12)],\n 'C': [('A', 10), ('B',9), ('D', 7), ('E', 6), ('F',2)],\n 'D': [('A', 3), ('B',5), ('C', 7), ('E', 8), ('F',11)],\n 'E': [('A', 4), ('B',8), ('C', 6), ('D', 8), ('F',8)],\n 'F': [('A', 6), ('B',12), ('C', 2), ('D', 11), ('E',8)]}\n\ndef main():\n permut=rand.sample(list(permutations('ABDEF')), 10)\n best=[]\n average=[]\n for i in range(100):\n initialTourCost= fitnessFunction(permut)\n parents=parentSelection(initialTourCost)\n crossed= crossOver(parents)\n mut=insertMutations(crossed)\n tenBest= survivalSelection(mut, initialTourCost)\n bo=(bestSoFar(tenBest))\n a=(averageBest(tenBest))\n \n permut=removeC(tenBest)\n \n best.append(bo)\n average.append(round(a,5))\n \n print(best)\n print(average)\n #plt.figure(figsize=(20,10))\n #plt.show( plotBest(best))\n plotBest(best)\n #plotAverage(average)\n \ndef fitnessFunction(candidates):\n tourCost= []\n for individual in candidates:\n sumd=0;\n for i in range(len(individual)-1):\n for j in range(len(cityDict[individual[i]])):\n if(cityDict[individual[i]][j][0]==individual[i+1]):\n sumd+=cityDict[individual[i]][j][1] \n tourCost.append((list(individual),sumd))\n return tourCost\n\ndef parentSelection(tourCost):\n aux= tourCost[:]\n parents= []\n p1= rand.sample(aux, 2)\n if(p1[0][1]> p1[1][1]):\n parents.append(p1[0])\n aux.remove(p1[0])\n else:\n parents.append(p1[1])\n aux.remove(p1[1])\n \n p2=rand.sample(aux,2)\n if(p2[0][1]> p2[1][1]):\n parents.append(p2[0])\n aux.remove(p2[0])\n else:\n parents.append(p2[1])\n aux.remove(p2[1])\n \n p3=rand.sample(aux,2)\n if(p3[0][1]> p3[1][1]):\n parents.append(p3[0])\n aux.remove(p3[0])\n else:\n parents.append(p3[1])\n aux.remove(p3[1])\n \n p4=rand.sample(aux,2)\n if(p4[0][1]> p4[1][1]):\n parents.append(p4[0])\n aux.remove(p4[0])\n else:\n parents.append(p4[1])\n aux.remove(p4[1]) \n return parents\n\ndef crossOver(parents):\n sind12= rand.randint(0,3)\n eind12=rand.randint(sind12,4)\n if(sind12==eind12):\n eind12+=1\n sind34= rand.randint(0,3)\n eind34=rand.randint(sind34,4)\n if(sind34==eind34):\n eind34+=1\n\n offs1=[0,0,0,0,0]\n offs2=[0,0,0,0,0]\n offs3=[0,0,0,0,0]\n offs4=[0,0,0,0,0]\n \n offs1[sind12:eind12+1]=parents[0][0][sind12:eind12+1]\n offs2[sind12:eind12+1]=parents[1][0][sind12:eind12+1]\n offs3[sind34:eind34+1]=parents[2][0][sind34:eind34+1]\n offs4[sind34:eind34+1]=parents[3][0][sind34:eind34+1]\n \n auxparent2=parents[1][0][eind12:]\n auxparent2= auxparent2 + parents[1][0][:eind12]\n auxind=eind12\n for j in range(len(auxparent2)):\n if(auxparent2[j] not in offs1):\n auxind+=1\n offs1[auxind%5]=auxparent2[j]\n \n auxparent1=parents[0][0][eind12:]\n auxparent1= auxparent1 + parents[0][0][:eind12]\n auxind=eind12\n for j in range(len(auxparent1)):\n if(auxparent1[j] not in offs2):\n auxind+=1\n offs2[auxind%5]=auxparent1[j]\n \n auxparent4=parents[3][0][eind34:]\n auxparent4= auxparent4 + parents[3][0][:eind34]\n auxind=eind34\n for j in range(len(auxparent4)):\n if(auxparent4[j] not in offs3):\n auxind+=1\n offs3[auxind%5]=auxparent4[j]\n \n auxparent3=parents[2][0][eind34:]\n auxparent3= auxparent3 + parents[2][0][:eind34]\n auxind=eind34\n for j in range(len(auxparent3)):\n if(auxparent3[j] not in offs4):\n auxind+=1\n offs4[auxind%5]=auxparent3[j]\n \n crossOffsprings= [offs1,offs2, offs3, offs4]\n return crossOffsprings\n\ndef insertMutations(crossOffsprings):\n probability= round(rand.random(),2)\n if(probability<=0.20):\n for mut in crossOffsprings:\n m1=rand.randint(0,2)\n m2=rand.randint(3,4)\n mut.insert(m1+1,mut[m2])\n mut.remove(mut[m2+1]) \n else:\n return crossOffsprings\n return crossOffsprings\n\ndef survivalSelection(mutCross, parents):\n costOffspring=fitnessFunction(mutCross)\n parents=parents+costOffspring\n finalParents= addC(parents)\n finalParents=sorted(finalParents, key= lambda x: x[1])\n finalParents=finalParents[:10]\n return finalParents\n\ndef addC(parents):\n c=[]\n for i in parents:\n i[0].insert(0, 'C')\n for i in parents:\n c.append(i[0])\n finalParents= fitnessFunction(c)\n return finalParents\n\ndef removeC(parents):\n c=[]\n for i in parents:\n i[0].remove('C') \n for i in parents:\n c.append(i[0]) \n return c\n\ndef bestSoFar(finalParents):\n bestFitness=finalParents[0][1]\n return bestFitness\n\ndef averageBest(finalParents):\n sumd=0\n for i in finalParents:\n sumd+= i[1]\n return sumd/6\n\ndef plotAverage(ave):\n plt.plot(ave)\n plt.xlabel('Generation Number')\n plt.ylabel('Fitness')\n plt.title('Travelling Salesman Problem using Genetic Algorithm: Average At Each Generation')\ndef plotBest(best):\n plt.plot(best)\n plt.xlabel('Generation Number')\n plt.ylabel('Fitness')\n plt.title('Travelling Salesman Problem using Genetic Algorithm: Best At Each Generation')\n \nif __name__=='main':\n main()\n \n \n \n " } ]
1
joanap/FooterPagination
https://github.com/joanap/FooterPagination
d06aa9596471c3ae2115d39679dd7c87dd9f978f
7c008b86a314cde768bb4b995f03f8ed3daca21a
639c8b6e7a8284c2c6c0fdb8bf14d75bf46bccb9
refs/heads/master
"2021-03-16T09:50:49.008945"
"2018-01-29T07:24:38"
"2018-01-29T07:24:38"
119,273,348
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5944380164146423, "alphanum_fraction": 0.6172267198562622, "avg_line_length": 29.11627960205078, "blob_id": "ff50505503b4a3a01e89141d7da52c4028a34fe3", "content_id": "0d54b4b1fe279bb0ebeceb18c6b57a731b145dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2589, "license_type": "no_license", "max_line_length": 98, "num_lines": 86, "path": "/tests/simple_tests.py", "repo_name": "joanap/FooterPagination", "src_encoding": "UTF-8", "text": "import unittest\nfrom src import footer_pagination\n\n\nclass SimpleTests(unittest.TestCase):\n\n def test_beginning_pages(self):\n \"\"\"Test the initial status of the set of pages in the beginning\n\n \"\"\"\n self.assertSequenceEqual((1, 1), footer_pagination.init_beginning_pages(5, 1))\n\n def test_end_pages(self):\n \"\"\"Test the initial status of the set of pages in the end pages\n\n \"\"\"\n self.assertSequenceEqual((5, 5), footer_pagination.init_end_pages(5, 1))\n\n def test_around_pages(self):\n \"\"\"Test the initial status of the set of around pages\n\n \"\"\"\n self.assertSequenceEqual((4, 4), footer_pagination.init_around_pages(4, 0, 5))\n\n def test_overlapping_pages(self):\n \"\"\"Test overlapping sets of pages\n\n \"\"\"\n self.assertTrue(footer_pagination.are_overlapping_pages((1, 3), (2, 4)))\n\n def test_not_overlapping_pages(self):\n \"\"\"Test not overlapping sets of pages\n\n \"\"\"\n self.assertFalse(footer_pagination.are_overlapping_pages((1, 3), (6, 7)))\n\n def test_merge_pages(self):\n \"\"\"Tests merging of two overlapping sets of pages\n\n \"\"\"\n self.assertSequenceEqual((1, 4), footer_pagination.merge_pages((1, 3), (2, 4)))\n\n def test_update_overlap_pages(self):\n \"\"\"Test the update of two sets of pages that overlap\n\n \"\"\"\n self.assertSequenceEqual(((1, 4), None), footer_pagination.update_pages((1, 3), (2, 4)))\n\n def test_update_not_overlap_pages(self):\n \"\"\"Test the update of two sets of pages that do not overlap\n\n \"\"\"\n self.assertSequenceEqual(((1, 3), (6, 7)), footer_pagination.update_pages((1, 3), (6, 7)))\n\n def test_find_first_page(self):\n \"\"\"Test if the first page is contained in the sets of pages\n\n \"\"\"\n self.assertTrue(footer_pagination.find_page([(1, 2), (3, 5), None], 1))\n\n def test_not_find_first_page(self):\n \"\"\"Test if the first page is contained in the sets of pages\n\n \"\"\"\n self.assertFalse(footer_pagination.find_page([(2, 3), (4, 5), None], 1))\n\n def test_exist_remaining_pages(self):\n \"\"\"Test when two sets of pages have remaining pages between them\n\n \"\"\"\n self.assertTrue(footer_pagination.exist_remaining_pages((1, 3), (6, 7)))\n\n def test_not_exist_remaining_pages(self):\n \"\"\"Test when two sets of pages do not have remaining pages between them\n\n \"\"\"\n self.assertFalse(footer_pagination.exist_remaining_pages((1, 7), (8, 9)))\n\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6571990251541138, "alphanum_fraction": 0.6663753390312195, "avg_line_length": 36.21544647216797, "blob_id": "a5f7112d350cc352ecafba9a240abc9c77ff2117", "content_id": "30c470f68ca00656758bb83f58101ffd3135794e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9154, "license_type": "no_license", "max_line_length": 121, "num_lines": 246, "path": "/src/footer_pagination.py", "repo_name": "joanap/FooterPagination", "src_encoding": "UTF-8", "text": "import sys\n\nINPUT_LEN = 5\nFIRST_PAGE = 1\nFIRST_PAGE_INDEX = 0\nLAST_PAGE_INDEX = 1\nREMAINING_PAGES = \"...\"\n\n\ndef init_beginning_pages(total_pages, boundaries):\n \"\"\"Define the initial status for the set of pages in the beginning: return first and last page\n\n :param total_pages: total number of pages\n :param boundaries: how many pages we want to link in the beginning, or end\n :return: (first beginning page, last beginning page)\n \"\"\"\n if boundaries != 0:\n if boundaries < total_pages:\n return FIRST_PAGE, boundaries\n else:\n FIRST_PAGE, total_pages\n else:\n None\n\n\ndef init_end_pages(total_pages, boundaries):\n \"\"\"Define the initial status for the set of pages in the end: return first and last page\n\n :param total_pages: total number of pages\n :param boundaries: how many pages we want to link in the beginning, or end\n :return: (first end page, last end page)\n \"\"\"\n if boundaries != 0:\n if total_pages - boundaries > 0:\n return total_pages - boundaries+1, total_pages\n else:\n return FIRST_PAGE, total_pages\n else:\n return None\n\n\ndef init_around_pages(current_page, around, total_pages):\n \"\"\"Define the initial status for the set of pages in the around: return first and last page\n\n :param current_page: current page\n :param around: how many pages we want to link before and after the current page\n :param total_pages: total number of pages\n :return: (first around page, last around page)\n \"\"\"\n around_first_page = around_last_page = current_page\n if around != 0:\n around_first_page = current_page - around\n around_last_page = current_page + around\n if around_first_page < 1:\n around_first_page = FIRST_PAGE\n if around_last_page > total_pages:\n around_last_page = total_pages\n return around_first_page, around_last_page\n\n\ndef initial_pages_status(current_page, total_pages, boundaries, around):\n \"\"\"Define the initial status for the sets of pages: return a list with beginning, around and end set of pages\n\n :param current_page: current page\n :param total_pages: total number of pages\n :param boundaries: how many pages we want to link in the beginning, or end\n :param around: how many pages we want to link before and after the current page\n :return: list with beginning, around and end set of pages\n \"\"\"\n beginning_pages = init_beginning_pages(total_pages, boundaries)\n around_pages = init_around_pages(current_page, around, total_pages)\n end_pages = init_end_pages(total_pages, boundaries)\n return [beginning_pages, around_pages, end_pages]\n\n\ndef are_overlapping_pages(pages1, pages2):\n \"\"\"Check if the sets pages1 and pages2 overlap: return True if pages1 and pages2 overlap\n\n :param pages1: set of pages\n :param pages2: set of pages\n :return: True if pages1 and pages2 overlap\n \"\"\"\n if pages1 is None or pages2 is None:\n return False\n else:\n return not (pages1[LAST_PAGE_INDEX] < pages2[FIRST_PAGE_INDEX] or\n pages2[LAST_PAGE_INDEX] < pages1[FIRST_PAGE_INDEX])\n\n\ndef merge_pages(pages1, pages2):\n \"\"\"Merge overlapping sets of pages1 and pages2: return the merged set of pages\n\n :param pages1: set of pages\n :param pages2: set of pages\n :return: merged set of pages\n \"\"\"\n return min(pages1[FIRST_PAGE_INDEX], pages2[FIRST_PAGE_INDEX]), max(pages1[LAST_PAGE_INDEX], pages2[LAST_PAGE_INDEX])\n\n\ndef update_pages(pages1, pages2):\n \"\"\"Merge two sets of pages if they overlap, otherwise return the initial status of the sets\n\n :param pages1: set of pages\n :param pages2: set of pages\n :return: (merged set of pages, None) if pages1 and pages2 overlap, otherwise return (pages1, pages2)\n \"\"\"\n if are_overlapping_pages(pages2, pages1):\n return merge_pages(pages2, pages1), None\n else:\n return pages1, pages2\n\n\ndef update_all_pages(initial_pages_status):\n \"\"\"Iterate the sets of pages and check if the current set of pages overlap the next sets of pages; unify sets\n that overlap.\n\n :param initial_pages_status: initial pages status\n :return: final pages status with no overlapping.\n \"\"\"\n\n for pages_index, item in enumerate(initial_pages_status):\n for i in range(pages_index, len(initial_pages_status) - 1):\n new_pages_status = update_pages(initial_pages_status[pages_index], initial_pages_status[i+1])\n if new_pages_status is not None:\n if initial_pages_status[pages_index] is not None:\n initial_pages_status[pages_index] = new_pages_status[0]\n if initial_pages_status[i+1] is not None:\n initial_pages_status[i+1] = new_pages_status[1]\n return initial_pages_status\n\n\ndef exist_remaining_pages(pages1, pages2):\n \"\"\"Check if there are remaining pages between the sets of pages pages1 and pages2\n\n :param pages1: set of pages\n :param pages2: set of pages\n :return: True if exist remaining pages between pages1 and pages2\n \"\"\"\n if pages1 is not None and pages2 is not None:\n return pages2[FIRST_PAGE_INDEX] - pages1[LAST_PAGE_INDEX] > 1\n else:\n return False\n\n\ndef print_range(pages):\n \"\"\"Print the range of pages in the set pages\n\n :param pages: set of pages to print\n \"\"\"\n if pages is not None:\n print(*range(pages[FIRST_PAGE_INDEX], pages[LAST_PAGE_INDEX]+1), sep=' ', end='')\n\n\ndef find_page(pages_list, page_to_found):\n \"\"\"Check if page_to_found is in pages_list: return True if exists\n\n :param pages_list: list with sets of pages\n :param page_to_found: page to found in the list\n :return: True if the page is in the list\n \"\"\"\n for current_pages in pages_list:\n if current_pages is not None:\n if page_to_found == current_pages[FIRST_PAGE_INDEX] or page_to_found == current_pages[LAST_PAGE_INDEX]:\n return True\n return False\n\n\ndef remove_none(pages_list):\n \"\"\"Remove None elements from a list\n\n :param pages_list: list of sets of pages\n :return: list without None elements\n \"\"\"\n return [pages for pages in pages_list if pages is not None]\n\n\ndef print_output(pages_list, last_page, boundaries):\n \"\"\"Concatenate and print footer pagination\n\n :param pages_list: sets of pages\n :param last_page: total pages\n :param boundaries: how many pages we want to link in the beginning, or end\n \"\"\"\n pages_list_without_none = remove_none(pages_list)\n if boundaries == 0 and not find_page(pages_list_without_none, FIRST_PAGE):\n print(REMAINING_PAGES + \" \", end='')\n for pages_index, current_pages in enumerate(pages_list_without_none):\n print_range(current_pages)\n if pages_index + 1 < len(pages_list_without_none):\n if exist_remaining_pages(current_pages, pages_list_without_none[pages_index + 1]):\n print(\" \" + REMAINING_PAGES + \" \", end='')\n else:\n print(\" \", end='')\n if boundaries == 0 and not find_page(pages_list_without_none, last_page):\n print(\" \" + REMAINING_PAGES, end='')\n\n\ndef validate_input(current_page, total_pages, boundaries, around):\n \"\"\"\n Raises an exception if input is invalid\n :param current_page: current page\n :param total_pages: total number of pages\n :param boundaries: how many pages we want to link in the beginning, or end\n :param around: how many pages we want to link before and after the current page\n \"\"\"\n if current_page <= 0 or total_pages <= 0:\n raise ValueError(\"Current page and total pages must be greater than 0\")\n if boundaries < 0 or around < 0:\n raise ValueError(\"Boundaries and around must be greater or equal to 0\")\n if current_page > total_pages:\n raise ValueError(\"Current page must be lower than total pages\")\n\n\ndef get_footer_pagination(current_page, total_pages, boundaries, around):\n \"\"\"Build and print footer pagination according page, total_pages, boundaries and around\n\n :param current_page: current page\n :param total_pages: total number of pages\n :param boundaries: how many pages we want to link in the beginning, or end\n :param around: how many pages we want to link before and after the current page\n \"\"\"\n initial_pages_stat = initial_pages_status(current_page, total_pages, boundaries, around)\n final_pages_stat = update_all_pages(initial_pages_stat)\n print_output(final_pages_stat, total_pages, boundaries)\n\n\ndef main():\n \"\"\"Read arguments current_page, total_page, boundaries and around and build the corresponding footer pagination\n\n \"\"\"\n if len(sys.argv) == INPUT_LEN:\n current_page = int(sys.argv[1])\n total_pages = int(sys.argv[2])\n boundaries = int(sys.argv[3])\n around = int(sys.argv[4])\n try:\n validate_input(current_page, total_pages, boundaries, around)\n get_footer_pagination(current_page, total_pages, boundaries, around)\n except ValueError as err:\n print(err)\n else:\n print(\"Missing arguments\")\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.741304337978363, "alphanum_fraction": 0.7630434632301331, "avg_line_length": 34.38461685180664, "blob_id": "dab447c38d76215ee49c381791fec5de7fb384e4", "content_id": "f666fafd614af5073e46b16069c57a63810b1803", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 460, "license_type": "no_license", "max_line_length": 163, "num_lines": 13, "path": "/README.md", "repo_name": "joanap/FooterPagination", "src_encoding": "UTF-8", "text": "# FooterPagination\nThis project aims to create a footer with pagination to browse through the several pages of a given website.\n\n### Instructions\n- Dependencies: Python 3.6\n- Test program giving an input sequence as the example below: 1st argument: current_page; 2nd argument: total_pages; 3rd argument: boundaries; 4th argument: around\n```sh\npython src/footer_pagination.py 4 5 1 0\n```\n- Test program with Unit tests\n```sh\npython tests/simple_tests.py -v\n```\n" } ]
3
peter-cai/Learning-PyTorch-for-CODEC
https://github.com/peter-cai/Learning-PyTorch-for-CODEC
50a557312b02a93f36ad2b2d7002133bcfa353f4
9332d40c21cf9fe545efd12c55dff65614e0733b
cafe25f3e21db3eef5609bc0a14b90d06845f577
refs/heads/main
"2023-01-03T15:13:35.321162"
"2020-10-23T09:17:17"
"2020-10-23T09:21:12"
306,242,340
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6844124794006348, "alphanum_fraction": 0.6988009810447693, "avg_line_length": 41.57143020629883, "blob_id": "94ebc262a0cbbad8b505acdef16cf8d7a783391e", "content_id": "b92ec34936802572f554ba96e25d3016ca6b963d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2101, "license_type": "no_license", "max_line_length": 105, "num_lines": 49, "path": "/autograd.py", "repo_name": "peter-cai/Learning-PyTorch-for-CODEC", "src_encoding": "UTF-8", "text": "# -*- coding=utf-8 -*-\nimport torch\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(“cuda:0”)# uncomment to use gpu\n\n# N is the numbers of input and D_in is the input size\n# H is the size of hidden layer; D_out is the output size\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# create the random input and output following the normal distribution\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\n# create the random weight and bias following the normal distribution\n# set requires_grad = True, so the gradient can be obtained in related parameters\nw1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)\nw2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)\n\n# set the learning rate, which can control the scale of parameter change\nlearning_rate = 1e-6\nfor t in range(500):\n # forward propogation and get the prediction values of y \n # the computing graph is formed becasue the requires_grad is set as True\n # the gradient is calculated automately, which is one of the important tools in PyTorch\n # .mm() is to calculate the matrix multiplication\n # torch.clamp(input, min, max, out=None) is to control the limit of tensor\n # torch.clamp is similar to ReLU(Rectified Linear Unit) function\n y_pred = x.mm(w1).clamp(min=0).mm(w2)\n\n # calculate the loss by MSE (mean square error)\n # loss is also a tensor and it can be changed into Scale by the function of .item()\n loss = (y_pred - y).pow(2).sum()\n print(t, loss.item())\n\n # use autograd to calculate the backward propogation \n # all the gradients of tensor with requires_grad=True can be got\n loss.backward()\n\n # torch.no_grad(): update the weight and not change the computing graph\n # .grad() to get the gradient\n with torch.no_grad():\n w1 -= learning_rate * w1.grad\n w2 -= learning_rate * w2.grad\n\n # clear the gradients each step when it has been used, otherwise the gradients will be accumlated\n w1.grad.zero_()\n w2.grad.zero_()" }, { "alpha_fraction": 0.6651398539543152, "alphanum_fraction": 0.6779170632362366, "avg_line_length": 31.4140625, "blob_id": "0f43e2deccc1cf0d30bc3ef586b945be8d7d7371", "content_id": "281712af5663d60046e966427f6fb119370437ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4148, "license_type": "no_license", "max_line_length": 93, "num_lines": 128, "path": "/torchForMLP.py", "repo_name": "peter-cai/Learning-PyTorch-for-CODEC", "src_encoding": "UTF-8", "text": "'''\nkey points in this file:\n-- use the Pytorch to achieve a MLP(multi-layer perception) neural network.\n-- the train and test data is the classical benchmark of MNIST.\n-- it can be run by CPU or GPU .\n'''\n\nimport torch.nn as nn\nimport torch\nimport torchvision\nfrom torch.utils.data import DataLoader\nimport time\n \nprint(\"start\")\n# number of training\nEPOCH=50\n# batch size\nBATCH_SIZE=20 \n# learn rate that should not be too large when using cross-entropy loss function \nLR=0.03 \n# use MNIST for training and testing, and if it had been download, DOWNLOAD_MNIST is False\nDOWNLOAD_MNIST= True\n\n# GPU can be used for computation to speed up the running process\ncuda_available=torch.cuda.is_available() \n# if there is no GPU, the CPU can also be used \ncuda_available=False \n\n# set a set to display data, and transform it into tensor size\n# normilize the data with normal distribution with the parameter of 0.5 and 0.5 -- N(0.5,0.5)\ntrans=torchvision.transforms.Compose(\n [\n # ToTensor method change [0,255] into [0,1]\n torchvision.transforms.ToTensor(), \n # represent the mean and standard deviation, respectively\n torchvision.transforms.Normalize([0.5],[0.5]) \n ]\n)\n \nprint(\"load data\")\ntrain_data=torchvision.datasets.MNIST(\n root=\"./data/mnist\", # the site of the data\n train=True, # for training \n transform=trans, # for transforming\n download=DOWNLOAD_MNIST \n)\n\n# the second parameter is the batch size, and the third parameter is to shuffle the data\ntrain_loader=DataLoader(train_data,batch_size=BATCH_SIZE,shuffle=True)\n \ntest_data=torchvision.datasets.MNIST(\n root=\"./data/mnist\",\n train=False, # for testing, so here is False\n transform=trans,\n download=DOWNLOAD_MNIST\n)\ntest_loader=DataLoader(test_data,batch_size=len(test_data),shuffle=False)\nprint(\"net creating\")\n \n# form a neural network with three layer 784 nodes/30 nodes/10 nodes\nnet=torch.nn.Sequential(\n nn.Linear(28*28,30),\n nn.Tanh(), # activation function\n nn.Linear(30,10)# 10 categories for numbers\n)\n \nif cuda_available:\n net.cuda() # select GPU or CPU\n \n# define the cross entropy as loss function \nloss_function=nn.CrossEntropyLoss()\n# SGD: stochastic gradient decent\noptimizer=torch.optim.SGD(net.parameters(),lr=LR) \n \nprint(\"start training\")\nfor ep in range(EPOCH):\n # record the start time to check the time consumption of each epoch\n startTick = time.time()\n # select each patch in training data\n for data in train_loader:\n img, label=data\n # change the size of image into a column vector\n img = img.view(img.size(0), -1)\n \n if cuda_available:\n img=img.cuda()\n label=label.cuda()\n \n # get the output from the net\n out=net(img)\n\n # get the loss\n loss=loss_function(out,label)\n # clean the previous gradients\n optimizer.zero_grad()\n # backward the loss to update the gradients of parameter, which is called Autograd\n loss.backward()\n # update gradients\n optimizer.step()\n \n # calculate the number of successful classification samples\n num_correct = 0\n # as the size of test samples is equal to batch size, so the loop runs only once\n for data in test_loader:\n img, label=data\n img = img.view(img.size(0), -1)\n \n if cuda_available:\n img=img.cuda()\n label=label.cuda()\n\n # get the output from the net\n out=net(img)\n \n # torch.max() return two results\n # the first is the maximum, and the second is the corresponding index\n # 0 represents the index of maximum in column\n # 1 represents the index of maximum in row\n _,prediction=torch.max(out,1)\n print(prediction)\n\n # count the number of correct classification samples\n num_correct+=(prediction==label).sum()\n \n # get the accuracy, if the GPU is used, then the num_correct should be changed by .cpu()\n accuracy=num_correct.cpu().numpy()/len(test_data)\n timeSpan = time.time()-startTick\n print(\"Iteration time: %d, Accuracy: %f, running time: %ds\"%(ep+1,accuracy,timeSpan))" }, { "alpha_fraction": 0.8285943269729614, "alphanum_fraction": 0.8422106504440308, "avg_line_length": 38.24193572998047, "blob_id": "0548e36c1eb4600f9739ed7fb0af5d0a9fa2d957", "content_id": "7215cb291f1653eb2cd04898cd28c8adae343b46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5330, "license_type": "no_license", "max_line_length": 202, "num_lines": 62, "path": "/README.md", "repo_name": "peter-cai/Learning-PyTorch-for-CODEC", "src_encoding": "UTF-8", "text": "\r\nCODEC是**CO**ding and **DEC**oding的缩写,是面向视频等多媒体的编解码技术。随着视频采集设备不断发展,为人们的生活、娱乐、工作提供了大量高动态范围、宽色域的视频,据估计,互联网上有80%的数据信息都来源于视频。然而,尽管信息通信技术也在不断发展,但由于Jevons悖论的作用,视频存储和传输的压力仍然巨大,因此CODEC技术仍需不断更新和发展\r\n\r\n研究和制定CODEC的标准组织包括ISO/IEC的MPEG和ITU的VCEG(联合专家组JVET),同时也包括中国的AVS,还有以Google等企业发起的AOM。CODEC的标准研究已有几十年,最近的标准版本是H.266/VVC,以及AVS所推出的AVS3。传统的基于块的CODECC方法流程包括块划分、帧内/间预测、变换、量化、熵编码等技术过程,但传统方法在保证计算效率情况下的编码性能提升已十分有限。\r\n\r\n基于神经网络的智能算法是CODEC当下的焦点研究方向,以实现模块替换或整体替换传统方法。该Repository是采用PyTorch作为工具来进行CODEC算法实现,适合Pytorch和CODEC编程的入门。\r\n\r\n*Note: Jevons悖论:“The efficiency with which a resource is used tends to increase (rather than decrease) the rate of consumption of that resource.\"*\r\n\r\n以下是Pytorch的基本介绍和安装流程\r\n\r\n===\r\n### 1 基本情况\r\nPyTorch是一个开源的Python机器学习库,基于Torch,用于自然语言处理等应用程序。\r\n\r\n2017年1月,由Facebook人工智能研究院(FAIR)基于Torch推出了PyTorch。\r\n\r\n它是一个基于Python的可续计算包,提供两个高级功能:\r\n\r\n1. 具有强大的GPU加速的张量计算(如NumPy)。\r\n\r\n2. 包含自动求导系统的的深度神经网络。\r\n\r\n官方教程中文版地址:http://www.pytorch123.com/\r\n\r\n### 2 基于Torch开发的\r\n\r\n要介绍PyTorch之前,不得不说一下Torch。Torch是一个有大量机器学习算法支持的科学计算框架,是一个与Numpy类似的张量(Tensor) 操作库,其特点是特别灵活,但因其采用了小众的编程语言是Lua,所以流行度不高,这也就有了PyTorch的出现。所以其实Torch是 PyTorch的前身,它们的底层语言相同,只是使用了不同的上层包装语言。\r\n\r\n除了Facebook之外,Twitter、GMU和Salesforce等机构都采用了PyTorch。\r\n\r\nPyTorch是一个基于Torch的Python开源机器学习库,用于自然语言处理等应用程序。它主要由Facebookd的人工智能小组开发,不仅能够 实现强大的GPU加速,同时还支持动态神经网络,这一点是现在很多主流框架如TensorFlow都不支持的。 PyTorch提供了两个高级功能: * 具有强大的GPU加速的张量计算(如Numpy) * 包含自动求导系统的深度神经网络\r\n\r\n除了Facebook之外,Twitter、GMU和Salesforce等机构都采用了PyTorch。\r\n\r\n### 3 与主流其他框架的区别\r\n\r\nTensorFlow和Caffe都是命令式的编程语言,而且是静态的,首先必须构建一个神经网络,然后一次又一次使用相同的结构,如果想要改变网络的结构,就必须从头开始。但是对于PyTorch,通过反向求导技术,可以让你零延迟地任意改变神经网络的行为,而且其实现速度快。正是这一灵活性是PyTorch对比TensorFlow的最大优势。\r\n\r\n另外,PyTorch的代码对比TensorFlow而言,更加简洁直观,底层代码也更容易看懂,这对于使用它的人来说理解底层肯定是一件令人激 动的事。\r\n\r\n所以,总结一下PyTorch的优点: * 支持GPU * 灵活,支持动态神经网络 * 底层代码易于理解 * 命令式体验 * 自定义扩展\r\n\r\n当然,现今任何一个深度学习框架都有其缺点,PyTorch也不例外,对比TensorFlow,其全面性处于劣势,目前PyTorch还不支持快速傅里 叶、沿维翻转张量和检查无穷与非数值张量;针对移动端、嵌入式部署以及高性能服务器端的部署其性能表现有待提升;其次因为这个框架较新,使得他的社区没有那么强大,在文档方面其C库大多数没有文档。\r\n\r\n### 4 PyTorch安装流程\r\n1 安装Anaconda 3.5\r\nAnaconda是一个用于科学计算的Python发行版,支持Linux、Mac和Window系统,提供了包管理与环境管理的功能,可以很方便地解决Python并存、切换,以及各种第三方包安装的问题。\r\n\r\n1.1 下载:¶\r\n可以直接从Anaconda官网下载,但因为Anaconda的服务器在国外,所以下载速度会很慢,这里推荐使用清华的镜像来下载。注意选择合适的版本。\r\n\r\n1.2 安装及配置环境变量\r\n下载之后,点击安装即可。\r\n\r\n安装完成后,进行Anaconda的环境变量配置,打开控制面板->高级系统设置->环境变量->系统变量找到Path,点击编辑,加入三个文件夹的存储路径(注意三个路径之间需用分号隔开)。\r\n\r\n至此,Anaconda 3.5 windows版就安装设置好了。打开程序找到Anaconda Navigator,启动后还可以看到:Jubyter,单击打开即可。\r\n\r\n2 Win10+Anaconda环境下安装Pytorch\r\n安装Pytorch,在命令提示符cmd中输出指令,进行在线安装\r\n\r\nconda install pytorch torchvision cudatoolkit=9.0 -c pytorch\r\n" } ]
3
Saumya-singh-02/Quiz-app
https://github.com/Saumya-singh-02/Quiz-app
68f832ca4aed696bc507b8453f1b17b1013c3fd4
8c71003f180c822f3dbead50f301b0976ba5f26e
d107f26b55031432db6b30423c79e60582f580a6
refs/heads/master
"2023-06-08T17:11:03.810376"
"2021-06-27T14:40:40"
"2021-06-27T14:40:40"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6950317025184631, "alphanum_fraction": 0.7325581312179565, "avg_line_length": 26.02857208251953, "blob_id": "4fe2cf3b8a6d69516a0246c5bbd9b9e43af3336c", "content_id": "411b76f59a9b49f6d28f05af1485af815b915a3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1892, "license_type": "no_license", "max_line_length": 90, "num_lines": 70, "path": "/README.md", "repo_name": "Saumya-singh-02/Quiz-app", "src_encoding": "UTF-8", "text": "# Quiz-app\nThis app is made using Django. It can be used to take quizes.\n\n## Specifications of the app.\n\n- SQLite used as the database for backend\n\n- HTML CSS and Javascript Django template are used in frontend\n\n- Python-Django is used for backend\n\n\n**Part of the course project IT workshop**\n\n*Group Members*\n\n- [Sakshi Gupta](http://github.com/sakshi-codes)\n\n- [Saumya Singh](https://github.com/Saumya-singh-02)\n\n- [Konica Ranjan](https://github.com/konica1234)\n\n## Some screenshots of the app.\n\n\n![s1](https://raw.githubusercontent.com/konica1234/Quiz-app/master/Screenshot%20(214).png)\n![s1](https://raw.githubusercontent.com/konica1234/Quiz-app/master/Screenshot%20(215).png)\n![s1](https://raw.githubusercontent.com/konica1234/Quiz-app/master/Screenshot%20(216).png)\n![s1](https://raw.githubusercontent.com/konica1234/Quiz-app/master/Screenshot%20(217).png)\n![s1](https://raw.githubusercontent.com/konica1234/Quiz-app/master/Screenshot%20(218).png)\n\n\n## Functions:\n\n## Admin:\n- Create Admin account using command\n```\npy manage.py createsuperuser\n```\n- Can Add, View, Delete Quiz.\n- Can Add Questions To Respective quiz With options and correct answer.\n- Can also see result\n\n\n## Student:\n\n<ul>\n <li>Can Give Exam Any Time, There Is No Limit On Number Of Attempt.</li>\n <li>Can View Marks Of Each Attempt Of Each Exam.</li>\n <li>Question Pattern Is MCQ With 4 Options And 1 Correct Answer.</li>\n</ul>\n\n## HOW TO RUN THIS PROJECT\n\n- Install Python(3.7.6) (Dont Forget to Tick Add to Path while installing Python)\n- Open Terminal and Execute Following Commands :\n```\npython -m pip install -r requirements. txt\n```\n- Download This Project Zip Folder and Extract it\n- Move to project folder in Terminal. Then run following Commands :\n```\npy manage.py makemigrations\npy manage.py migrate\npy manage.py runserver\n```\n- Now enter following URL in Your Browser Installed On Your Pc\n```\nhttp://127.0.0.1:8000/\n```\n" }, { "alpha_fraction": 0.6128205060958862, "alphanum_fraction": 0.6128205060958862, "avg_line_length": 23.4375, "blob_id": "b109b4a9cda611da5cb756b643ebcfb0a30ffa88", "content_id": "e64acab917334f8a37ecca1afb4e1d5527fdedd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 60, "num_lines": 16, "path": "/quizes/urls.py", "repo_name": "Saumya-singh-02/Quiz-app", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom .views import(\n QuizListView,\n quiz_view,\n quiz_data_view,\n save_quiz_view\n)\n\napp_name = 'quizes'\n\nurlpatterns = [\n path('',QuizListView.as_view(), name = 'main-view'),\n path('<pk>/',quiz_view,name = 'quiz-view'),\n path('<pk>/save/',save_quiz_view,name = 'save-view'),\n path('<pk>/data/',quiz_data_view,name='quiz-data-view'),\n]" }, { "alpha_fraction": 0.811965823173523, "alphanum_fraction": 0.811965823173523, "avg_line_length": 28.25, "blob_id": "be1ffec7a6104fd6bfd8767cfb0cf6e7eb31c8cd", "content_id": "f72a1e8266ac70b1abfcef652d863a92b71c6503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 32, "num_lines": 4, "path": "/results/admin.py", "repo_name": "Saumya-singh-02/Quiz-app", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Result\nadmin.site.register(Result)\n# Register your models here.\n" } ]
3
aymane081/python_algo
https://github.com/aymane081/python_algo
0e841d8c57d9f2826c2813525669faa1da183e1d
15ea27ed9194b3f536e31bd558f296b7ecdec8a3
151ff15aac9f94358b07f4d9d5c5fd0f851fd8ee
refs/heads/master
"2018-09-06T18:04:44.547172"
"2018-08-25T17:09:59"
"2018-08-25T17:09:59"
116,173,807
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4884105920791626, "alphanum_fraction": 0.4884105920791626, "avg_line_length": 34.588233947753906, "blob_id": "1404f128b02443ebf2bb57d4f2bd2a4133412895", "content_id": "3b72f08f60cf7ff8eb18268d2832d297d6c3ea5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 604, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/arrays/increasing_triplet_subsequence.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def has_increasing_subsequence(self, nums):\n smallest, next_smallest = float('inf'), float('inf')\n for num in nums:\n # if num <= smallest:\n # smallest = num\n # elif num <= next_smallest:\n # next_smallest = num\n # else:\n # return True\n # A second way of doing the same\n smallest = min(smallest, num)\n if smallest < num:\n next_smallest = min(next_smallest, min)\n if next_smallest < num:\n return True\n return False" }, { "alpha_fraction": 0.45371219515800476, "alphanum_fraction": 0.47296059131622314, "avg_line_length": 24.395349502563477, "blob_id": "c96ff917fc160ae564d2712ed1068ff069f23089", "content_id": "8110d602c6ab76ff299e7a1ab21a468114c3d948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1091, "license_type": "no_license", "max_line_length": 45, "num_lines": 43, "path": "/arrays/dissapeared_numbers.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def dissapeared_numbers(self, numbers):\n if not numbers:\n return []\n \n n = len(numbers)\n result = [i for i in range(1, n + 1)]\n\n for num in numbers:\n result[num - 1] = 0\n \n self.delete_zeros(result)\n return result\n\n def delete_zeros(self, arr):\n insert_pos = 0\n for num in arr:\n if num != 0:\n arr[insert_pos] = num\n insert_pos += 1\n \n for _ in range(insert_pos, len(arr)):\n arr.pop()\n \n def dissapeared_numbers2(self, numbers):\n if not numbers:\n return []\n\n for i, num in enumerate(numbers):\n val = abs(num) - 1\n if (numbers[val] > 0):\n numbers[val] = - numbers[val]\n \n result = []\n for i, num in enumerate(numbers):\n if num >= 0:\n result.append(i + 1)\n \n return result\n\nsolution = Solution()\nnumbers = [4, 3, 2, 7, 8, 2, 3, 1]\nprint(solution.dissapeared_numbers2(numbers))" }, { "alpha_fraction": 0.5221154093742371, "alphanum_fraction": 0.5413461327552795, "avg_line_length": 23.785715103149414, "blob_id": "3f626ffc65b422968ef6be530e935a3643f6a56d", "content_id": "e7bfb43a3af70a9aa334819fa1536b1a886f9666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1040, "license_type": "no_license", "max_line_length": 70, "num_lines": 42, "path": "/arrays/teemo_attacking.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 495\n\n# time: O(n)\n# space: O(1)\nclass Solution:\n def find_poisoned_duration(self, timeSeries, duration):\n result = 0\n if not timeSeries:\n return result\n \n timeSeries.append(float('inf'))\n \n for i in range(1, len(timeSeries)):\n result += min(timeSeries[i] - timeSeries[i - 1], duration)\n \n return result\n\n# time: O(with of window * number of attacks)\n# space: O(1)\nclass Solution2:\n def find_poisoned_duration(self, timeSeries, duration):\n result = 0\n if not timeSeries:\n return result\n \n temp_poison = 0\n\n for i in range(timeSeries[0], timeSeries[-1] + 1):\n if i in timeSeries:\n temp_poison = duration\n \n if temp_poison:\n result += 1\n temp_poison -= 1\n \n result += temp_poison\n\n return result\n\nsolution = Solution()\nprint(solution.find_poisoned_duration([1], 2))\nprint(solution.find_poisoned_duration([1], 2))" }, { "alpha_fraction": 0.5814977884292603, "alphanum_fraction": 0.5917767882347107, "avg_line_length": 17.94444465637207, "blob_id": "ad6b56eb60b9ef758350f53a0d47356bb5218271", "content_id": "7756a57dbd1f0b00aed21c4cd3f764758699660e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 46, "num_lines": 36, "path": "/linkedList/linked_list_cycle.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n def has_cycle(self, head):\n if not head or not head.next:\n return False\n\n slow, fast = head.next, head.next.next\n\n while fast and fast.next:\n if fast == slow:\n return True\n \n slow = slow.next\n fast = fast.next.next\n \n return False\n\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\nsix = ListNode(6)\nseven = ListNode(7)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\nsix.next = three\n\nsolution = Solution()\nprint(solution.has_cycle(one))" }, { "alpha_fraction": 0.4913494884967804, "alphanum_fraction": 0.5063437223434448, "avg_line_length": 26.677419662475586, "blob_id": "cc6f322b900c89576423e4229b93c1ce4742b9bb", "content_id": "cd78fc88956ac829c90e997c22c0bdef8c72ae16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 92, "num_lines": 31, "path": "/dynamicProgramming/target_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution:\n def target_sum(self, nums, target):\n if not nums:\n return 0\n \n sums = defaultdict(int)\n sums[0] = 1\n\n running = nums[:]\n\n for i in range(len(running) - 2, -1, -1):\n running[i] += running[i + 1]\n\n for i, num in enumerate(nums):\n new_sums = defaultdict(int)\n for old_sum in sums:\n if target <= old_sum + running[i]: # if I can reach the target from this sum\n new_sums[num + old_sum] += sums[old_sum]\n if target >= old_sum - running[i]:\n new_sums[old_sum - num] += sums[old_sum]\n\n sums = new_sums\n \n return sums[target]\n\nnums = [1, 1, 1, 1, 1]\ntarget = 3\nsolution = Solution()\nprint(solution.target_sum(nums, target))\n \n" }, { "alpha_fraction": 0.4655493497848511, "alphanum_fraction": 0.4767225384712219, "avg_line_length": 31.545454025268555, "blob_id": "1315e2a08b9be999bef1a4196345848b5ec7ec1f", "content_id": "9130f33db2da1f2cc06ca5214e44d60ef04c9e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1074, "license_type": "no_license", "max_line_length": 72, "num_lines": 33, "path": "/graphs/surronded_regions.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def surrond(self, matrix):\n if not matrix or not matrix.rows_count or not matrix.cols_count:\n return matrix\n \n for row in range(matrix.rows_count):\n self.dfs(row, 0, matrix)\n self.dfs(row, matrix.cols_count - 1, matrix)\n \n for col in range(matrix.cols_count):\n self.dfs(0, col, matrix)\n self.dfs(matrix.rows_count - 1, col, matrix)\n \n for row in range(matrix.rows_count):\n for col in range(matrix.cols_count):\n if matrix[row][col] == 'O':\n matrix[row][col] = 'X'\n elif matrix[row][col] == '*':\n matrix[row][col] = 'O'\n \n def dfs(self, row, col, matrix):\n if not matrix.is_valid_cell(row, col):\n return\n \n if matrix[row][col] != 'O':\n return\n \n matrix[row][col] = '*'\n\n directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n\n for dr, dc in directions:\n self.dfs(row + dr, col + dc, matrix)\n" }, { "alpha_fraction": 0.5357624888420105, "alphanum_fraction": 0.5479082465171814, "avg_line_length": 14.765957832336426, "blob_id": "7ef87b65a0fc974609658550e1b120c9051deaa6", "content_id": "623c3c29fd13eb0d5a57e664d9602091853086fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 741, "license_type": "no_license", "max_line_length": 54, "num_lines": 47, "path": "/linkedList/reverse_linked_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n def reverse(self, head):\n rev = None\n\n while head:\n rev, rev.next, head = head, rev, head.next\n \n return rev\n\n def reverse2(self, head):\n rev = None\n\n while head:\n next = head.next\n head.next = rev\n rev = head\n head = next\n\n return rev\n \n\n \n \n\n \n\n \none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\nsix = ListNode(6)\nseven = ListNode(7)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\nsix.next = seven\n\nprint(one)\nsolution = Solution()\nprint(solution.reverse2(one))\n" }, { "alpha_fraction": 0.563805103302002, "alphanum_fraction": 0.5777262449264526, "avg_line_length": 34.91666793823242, "blob_id": "bf7b551aabbc7966a647f59b957420c4a0cc8345", "content_id": "33dad82bed0c34222a6f2a5d6154eb0e14ac8800", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 115, "num_lines": 12, "path": "/arrays/maximum_product_subarray.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_maximum_product(self, nums):\n max_so_far = float('-inf')\n max_here, min_here = 1, 1\n for num in nums:\n max_here, min_here = max(max_here * num, min_here * num, num), min(min_here * num, max_here * num, num)\n max_so_far = max(max_so_far, max_here)\n return max_so_far\n\nsolution = Solution()\nnums = [2, 3, -2, -4]\nprint(solution.get_maximum_product(nums))\n" }, { "alpha_fraction": 0.4756637215614319, "alphanum_fraction": 0.5, "avg_line_length": 31.35714340209961, "blob_id": "21f48cf6d8eee048e376c085c915ac25a4883630", "content_id": "e826916d4675062cda2762a7f5c4ec2e6b276868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 59, "num_lines": 14, "path": "/arrays/summary_ranges.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_summary_ranges(self, nums):\n result = []\n for i, num in enumerate(nums):\n if not result or nums[i] > nums[i - 1] + 1:\n result.append(str(num))\n else:\n start = result[-1].split(' -> ')[0]\n result[-1] = ' -> '.join([start, str(num)])\n return result\n\nsolution = Solution()\nnums = [0, 1, 2, 4, 5, 7]\nprint(solution.get_summary_ranges(nums))" }, { "alpha_fraction": 0.3475247621536255, "alphanum_fraction": 0.38712871074676514, "avg_line_length": 31.612903594970703, "blob_id": "cd7fd37d4c4a376b9e1f5664a935a6250066eb91", "content_id": "1934720677492a3018bf4de57d3047387475f212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 64, "num_lines": 31, "path": "/strings/multiply_strings.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def multiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n\n result = [0] * (len(num1) + len(num2))\n num1, num2 = num1[::-1], num2[::-1]\n for i in range(len(num1)):\n for j in range(len(num2)):\n product = int(num1[i]) * int(num2[j])\n units = product % 10\n tens = product // 10\n\n result[i + j] += units\n if result[i + j] > 9:\n tens += result[i + j] // 10\n result[i + j] %= 10\n \n result[i + j + 1] += tens\n if result[i + j + 1] > 9:\n result[i + j + 2] += result[i + j + 1] // 10\n result[i + j + 1] %= 10\n \n # remove the trailing zeros from result\n while len(result) > 0 and result[-1] == 0:\n result.pop()\n\n return ''.join(map(str, result[::-1]))" }, { "alpha_fraction": 0.4870550036430359, "alphanum_fraction": 0.5048543810844421, "avg_line_length": 24.79166603088379, "blob_id": "d934d28909d7067919cd9ac79170075cc9532922", "content_id": "5da7494e7545ea7974858cb1d01f99d1e6cb4b56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 60, "num_lines": 24, "path": "/dynamicProgramming/house_robber2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 213\n\nclass Solution:\n def rob(self, houses):\n if not houses:\n return 0\n \n # last house is not robbed\n rob_first = self.helper(houses, 0, len(houses) - 2)\n # first house is not robbed\n skip_first = self.helper(houses, 1, len(houses) - 1)\n\n return max(rob_first, skip_first)\n \n def helper(self, houses, start, end):\n if end == start:\n return houses[start]\n \n curr, prev = 0, 0\n \n for i in range(start, end + 1):\n curr, prev = max(curr, houses[i] + prev), curr\n \n return curr" }, { "alpha_fraction": 0.467048704624176, "alphanum_fraction": 0.5128939747810364, "avg_line_length": 26.578947067260742, "blob_id": "179d70b0d49f070bc692ea5189d7e65542e6c393", "content_id": "8f8637e573c2eb7a080fa6c99bc629b93738852d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 75, "num_lines": 38, "path": "/strings/compare_versions.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Solution(object):\n def compare_versions(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n\n if not version1 or not version2:\n return None\n\n digits1 = list(map(int, version1.split('.')))\n digits2 = list(map(int, version2.split('.')))\n\n for i in range(max(len(digits1), len(digits2))):\n v1 = digits1[i] if i < len(digits1) else 0\n v2 = digits2[i] if i < len(digits2) else 0\n\n if v1 < v2:\n return -1\n if v1 > v2:\n return 1\n \n return 0\n\nclass Test(unittest.TestCase):\n data = [('1.2.3', '1.2.1', 1), ('1.2', '1.2.4', -1), ('1', '1.0.0', 0)]\n\n def test_compare_versions(self):\n solution = Solution()\n for test_data in self.data:\n actual = solution.compare_versions(test_data[0], test_data[1])\n self.assertEqual(actual, test_data[2])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.4762979745864868, "alphanum_fraction": 0.5033860206604004, "avg_line_length": 23.66666603088379, "blob_id": "17d34074a7d23982c70c06aa885427c668112a0f", "content_id": "4c0be8da1aa542e053865bb475424963b06e92b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/arrays/max_consecutive_ones.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def max_consecutive_ones(self, numbers):\n if not numbers:\n return 0\n\n longest, count = 0, 0\n for num in numbers:\n if num > 0:\n count += 1\n longest = max(longest, count)\n else:\n count = 0\n \n return longest\n\nsolution = Solution()\nnumbers = [1, 1, 0, 1, 1, 1]\nprint(solution.max_consecutive_ones(numbers))" }, { "alpha_fraction": 0.45221444964408875, "alphanum_fraction": 0.503496527671814, "avg_line_length": 35.771427154541016, "blob_id": "24aed2c76c95d7587581974c2eefa75442b8ed07", "content_id": "56ad000ae825ff7c00948b7034965c2163266a3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1287, "license_type": "no_license", "max_line_length": 102, "num_lines": 35, "path": "/dynamicProgramming/range_sum_query2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass RangeSumQuery:\n def __init__(self, matrix):\n \"\"\"\n :type matrix: Matrix\n :rtype: None\n \"\"\"\n matrix_sum = [[0 for _ in range(matrix.col_count + 1)] for _ in range(matrix.row_count + 1)]\n for row in range(1, matrix.row_count + 1):\n for col in range(1, matrix.col_count + 1):\n matrix_sum[row][col] = (\n matrix[row - 1][col - 1]\n + matrix_sum[row][col - 1]\n + matrix_sum[row - 1][col]\n - matrix_sum[row - 1][col - 1]\n )\n \n self.matrix_sum = Matrix(matrix_sum)\n print(self.matrix_sum)\n \n def get_range_sum(self, row1, col1, row2, col2):\n return (\n self.matrix_sum[row2 + 1][col2 + 1]\n - self.matrix_sum[row1][col2 + 1]\n + self.matrix_sum[row1][col1]\n - self.matrix_sum[row2 + 1][col1]\n )\n\nmatrix = Matrix([[3, 0, 1, 4, 2], [5, 6, 3, 2, 1], [1, 2, 0, 1, 5], [4, 1, 0, 1, 7], [1, 0, 3, 0, 5]])\nprint(\"Matrix = {}\".format(matrix))\nrangeSumQuery = RangeSumQuery(matrix)\nprint(rangeSumQuery.get_range_sum(2, 1, 4, 3))\nprint(rangeSumQuery.get_range_sum(1, 1, 2, 2))\nprint(rangeSumQuery.get_range_sum(1, 2, 2, 4))\n" }, { "alpha_fraction": 0.5681415796279907, "alphanum_fraction": 0.5787610411643982, "avg_line_length": 16.5625, "blob_id": "01c62a3fae4e67c88626888ba3ecfb9b2b97d207", "content_id": "3ae2e38d3a1d16a3dddfb92904a6cac2e55f0716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 45, "num_lines": 32, "path": "/linkedList/remove_duplicates.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#83\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def remove_duplicates(self, head):\n if not head:\n return\n \n curr = head\n\n while curr and curr.next:\n if curr.value == curr.next.value:\n curr.next = curr.next.next\n else:\n curr = curr.next\n\n return head\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(1)\n\none.next = four\nfour.next = two\ntwo.next = three\n\nprint(one)\n\nsolution = Solution()\nprint(solution.remove_duplicates(one))\n\n\n\n" }, { "alpha_fraction": 0.44991791248321533, "alphanum_fraction": 0.48275861144065857, "avg_line_length": 28, "blob_id": "21a9be9cd75a0d310d679aba345b33b33821e981", "content_id": "fa73960004cc9474129f25ea0d983d769c298441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/binarySearch/min_circular_sorted_array.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_min(self, nums):\n if not nums:\n return None\n\n left, right = 0, len(nums) - 1\n while left < right:\n if nums[left] <= nums[right]: # not rotated\n break\n mid = (left + right) // 2\n if nums[mid] < nums[left]: # min must be on the left of mid or mid\n right = mid\n else: # min must be on the right of mid\n left = mid + 1\n \n return nums[left]\n\nsolution = Solution()\nnums = [7,0,1,2,3,4,5,6]\n# nums = [4,5,6, 7, 8, 1, 2, 3]\nprint(solution.get_min(nums))\n" }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 20.190475463867188, "blob_id": "68b4f04690a4ce4996d2014ebf77bea41cf11542", "content_id": "fba379defde074da4a48d2c9eafb886f29fd00dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1334, "license_type": "no_license", "max_line_length": 97, "num_lines": 63, "path": "/linkedList/delete_nth_node_from_end.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#19\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def delete_from_end(self, head, n):\n if not head:\n return\n \n front, back = head, head\n\n dummy = prev = ListNode(None)\n\n while n > 0:\n back = back.next\n n -= 1\n \n while back:\n back = back.next\n prev.next = front\n prev = front\n front = front.next\n \n # front is the node I need to delete, and prev is right behind it\n\n prev.next = prev.next.next\n\n return dummy.next\n\nclass Solution2:\n def delete_from_end(self, head, n):\n first, second = head, head\n\n for _ in range(n):\n first = first.next\n \n if not first: # n is the length of the linked list. the first element needs to be removed\n return head.next\n \n while first.next:\n first = first.next\n second = second.next\n \n # second is right before the nth element from the end\n second.next = second.next.next\n\n return head\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(one)\n\nsolution = Solution()\nprint(solution.delete_from_end(one, 2))" }, { "alpha_fraction": 0.5065034627914429, "alphanum_fraction": 0.5156847834587097, "avg_line_length": 19.123077392578125, "blob_id": "b3f6292b4744fe30a8ca45adcf86c6a525971279", "content_id": "3f1b32d1f73a6f2171138384b944c6f7652b5f76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1307, "license_type": "no_license", "max_line_length": 46, "num_lines": 65, "path": "/linkedList/odd_even_linked_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 328\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def odd_even_list(self, head):\n if not head:\n return None\n \n odd = head\n even_head, even = head.next, head.next\n\n while even and even.next:\n odd.next = even.next\n odd = odd.next\n even.next = odd.next\n even = even.next\n \n odd.next = even_head\n\n return head\n\n\nclass Solution2:\n def odd_even_list(self, head):\n if not head:\n return None\n \n odd_head = odd_tail = ListNode(None)\n even_head = even_tail = ListNode(None)\n\n node = head\n count = 1\n\n while node:\n if count == 1:\n odd_tail.next = node\n odd_tail = odd_tail.next\n else:\n even_tail.next = node\n even_tail = even_tail.next\n \n count = 1 - count\n node = node.next\n \n even_tail.next = None\n odd_tail.next = even_head.next\n\n return odd_head.next\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(one)\n\nsolution = Solution()\nprint(solution.odd_even_list(one))" }, { "alpha_fraction": 0.3968871533870697, "alphanum_fraction": 0.4182879328727722, "avg_line_length": 19.600000381469727, "blob_id": "18702aa254fa3fd2c90c761f57c8385f603ae205", "content_id": "d1e6de95946260f72604c728e252fc13614ad470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 37, "num_lines": 25, "path": "/arrays/remove_duplicate_sorted_array.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def remove_duplicate(self, arr):\n \"\"\"\n type arr: list\n rtype: int\n \"\"\"\n if not arr:\n return 0\n\n j = 0\n for i in range(1, len(arr)):\n if arr[i] != arr[j]:\n j += 1\n arr[j] = arr[i]\n j += 1\n \n for i in range(len(arr) - j):\n arr.pop()\n \n return j\n\nsolution = Solution()\narr = [1, 1, 2, 3, 4, 4]\nprint(solution.remove_duplicate(arr))\nprint(arr)" }, { "alpha_fraction": 0.45215311646461487, "alphanum_fraction": 0.48564592003822327, "avg_line_length": 22.27777862548828, "blob_id": "c43efd83deabd546efb29ddb99fc6f1b894967ab", "content_id": "af7adb7e0e89b3db62bea6980d73dd5dfba1177e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 39, "num_lines": 18, "path": "/arrays/find_all_duplicates.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#442\n\nclass Solution:\n def get_duplicates(self, nums):\n if not nums:\n return None\n result = []\n for num in nums:\n index = abs(num) - 1\n if nums[index] < 0:\n result.append(abs(num))\n else:\n nums[index] *= -1\n return result\n\nsolution = Solution()\nnums = [4, 3, 2, 7, 8, 2, 4, 1]\nprint(solution.get_duplicates(nums))" }, { "alpha_fraction": 0.5227586030960083, "alphanum_fraction": 0.524597704410553, "avg_line_length": 26.531644821166992, "blob_id": "4501e6e240c0e3aceea8fefb2d92e06f03967e86", "content_id": "a2466b418bb01fc6facd8d3459ad71cd803bc50c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 155, "num_lines": 79, "path": "/graphs/reconstruct_itenirary.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\nimport unittest\nclass Solution:\n def reconstruct_itinerary(self, flights):\n graph = self.build_graph(flights)\n\n path = []\n\n self.dfs('JFK', graph, path, len(flights))\n\n return path\n\n def dfs(self, node, graph, path, remaining):\n if node == 'X':\n return\n\n print('current node: {} - current path: {}'.format(node, path))\n path.append(node)\n\n if remaining == 0:\n return True\n \n for i, nbr in enumerate(graph[node]):\n # remove nbr from the graph\n graph[node][i] = 'X'\n if self.dfs(nbr, graph, path, remaining - 1):\n return True\n graph[node][i] = nbr\n\n path.pop()\n return False\n \n def build_graph(self, flights):\n graph = defaultdict(list)\n\n for source, dest in flights:\n graph[source].append(dest)\n \n for nbrs in graph.values():\n nbrs.sort()\n \n return graph\n\nclass Solution3:\n def reconstruct_itinerary(self, flights):\n graph = self.build_graph(flights)\n path = []\n\n def dfs(airport):\n path.append(airport)\n while graph[airport]:\n nbr = graph[airport].pop()\n dfs(nbr)\n \n dfs('JFK')\n return path\n \n def build_graph(self, flights):\n graph = defaultdict(list)\n for start, end in flights:\n graph[start].append(end)\n \n\n for node in graph:\n graph[node].sort(reverse=True)\n \n return graph\n\nclass Test(unittest.TestCase):\n test_data = [[[\"MUC\", \"LHR\"], [\"JFK\", \"MUC\"], [\"SFO\", \"SJC\"], [\"LHR\", \"SFO\"]], [[\"JFK\",\"SFO\"],[\"JFK\",\"ATL\"],[\"SFO\",\"ATL\"],[\"ATL\",\"JFK\"],[\"ATL\",\"SFO\"]]]\n expected_results = [[\"JFK\", \"MUC\", \"LHR\", \"SFO\", \"SJC\"], [\"JFK\",\"ATL\",\"JFK\",\"SFO\",\"ATL\",\"SFO\"]]\n\n def test_reconstruct_itinerary(self):\n solution = Solution3()\n for i, data in enumerate(self.test_data):\n self.assertEqual(solution.reconstruct_itinerary(data), self.expected_results[i])\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5953947305679321, "alphanum_fraction": 0.6085526347160339, "avg_line_length": 23.58333396911621, "blob_id": "496af71d26414d12916ab10318dd33810d7ef570", "content_id": "9960dffa260ba7d405062b57ef2c5eeae499ffd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 77, "num_lines": 12, "path": "/trees/binary_tree_pruning.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#814\n\nfrom utils.treeNode import TreeNode\n\nclass Solution:\n def prune(self, root):\n if not root:\n return root\n \n root.left, root.right = self.prune(root.left), self.prune(root.right)\n\n return root if root.value == 1 or root.left or root.right else None\n\n " }, { "alpha_fraction": 0.5443786978721619, "alphanum_fraction": 0.5621301531791687, "avg_line_length": 17.065217971801758, "blob_id": "7520b9abad6146a03ca74f72109866122b961394", "content_id": "917630a25ac2dcd44d0848346cdd5fe83fea451f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 78, "num_lines": 46, "path": "/linkedList/linked_list_components.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 817\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n # time: O(len of linked list)\n # space: O(len of G)\n def get_connected_count(self, head, G):\n count = 0\n\n if not head:\n return count\n \n values_set = set(G)\n\n prev = ListNode(None)\n prev.next = head\n\n while prev.next:\n if prev.value not in values_set and prev.next.value in values_set:\n count += 1\n \n prev = prev.next\n \n return count\n\nzero = ListNode(0)\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\nzero.next = one\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(zero)\nG = [0, 3, 1, 4]\nprint(G)\n\n\nsolution = Solution()\nprint(solution.get_connected_count(zero, G))\n \n\n" }, { "alpha_fraction": 0.5885714292526245, "alphanum_fraction": 0.5885714292526245, "avg_line_length": 26, "blob_id": "a9d26d3b54bd0611c98016c915f2b3f3aeef2e67", "content_id": "e97b697e0a34818517d361473a353dd031ba5e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 86, "num_lines": 13, "path": "/trees/path_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def has_path_sum(self, node, sum):\n if not node:\n return False\n \n sum -= node.value\n\n if not sum and not node.left and not node.right:\n return True\n \n return self.has_path_sum(node.left, sum) or self.has_path_sum(node.right, sum)" }, { "alpha_fraction": 0.5042333006858826, "alphanum_fraction": 0.5174036026000977, "avg_line_length": 33.32258224487305, "blob_id": "9aa432f1a261b9ec1a96fa6c64387a2b37c69682", "content_id": "1880c4a833a026d677a2a1fe3d28ef51c3bf6deb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 95, "num_lines": 31, "path": "/arrays/matrix_spiral2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def generate_spiral(self, n):\n if n <= 0:\n raise Exception('n should be bigger than 0')\n\n # matrix = [[0] * n] * n # for some reason this does not work\n matrix = [[0 for _ in range(n)] for _ in range(n)]\n row, col = 0, 0\n d_row, d_col = 0, 1\n for i in range(n ** 2):\n matrix[row][col] = i + 1\n next_row, next_col = row + d_row, col + d_col\n if self.is_out_of_border(next_row, next_col, n) or matrix[next_row][next_col] != 0:\n # the next cell is either out of border, or already processed. Change direction\n d_row, d_col = d_col, - d_row\n # move to the next cell\n row += d_row\n col += d_col\n \n return matrix\n \n def is_out_of_border(self, row, col, n):\n return row < 0 or row == n or col < 0 or col == n\n \n def print_matrix(self, matrix):\n for row in matrix:\n print(row)\n\n\nsolution = Solution()\nsolution.print_matrix(solution.generate_spiral(3))" }, { "alpha_fraction": 0.47865167260169983, "alphanum_fraction": 0.5258427262306213, "avg_line_length": 28.700000762939453, "blob_id": "49b54494ae0df45f1d2112dd5890149e5dab4142", "content_id": "7db2e77343040f7087dc4773b9516d876396194b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "no_license", "max_line_length": 72, "num_lines": 30, "path": "/strings/time_conversion.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Solution(object):\n def time_conversion(self, time_str):\n if not time_str: \n return None\n \n time_list = list(time_str)\n is_pm = time_list[-2].lower() == 'p'\n\n # handle the 12 AM case. It should be converted to 00\n if not is_pm and time_str[:2] == '12':\n time_list[:2] = ['0', '0']\n elif is_pm:\n hour = str(int(time_str[:2]) + 12)\n time_list[:2] = list(hour)\n \n return ''.join(map(str, time_list[:-2]))\n\nclass Test(unittest.TestCase):\n test_data = [('03:22:22PM', '15:22:22'), ('12:22:22AM', '00:22:22')]\n def test_time_conversion(self):\n solution = Solution()\n\n for data in self.test_data:\n actual = solution.time_conversion(data[0])\n self.assertEqual(actual, data[1])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5765422582626343, "alphanum_fraction": 0.5818735957145691, "avg_line_length": 28.200000762939453, "blob_id": "055b8bb40a3e3f84d0f2e7197aca77cbfb618bb9", "content_id": "182c9aae6d924189b7647aed3cb35fba01ff6690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "no_license", "max_line_length": 108, "num_lines": 45, "path": "/strings/ransom_note.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import collections\nimport unittest\n\n# time: O(M + N) - space: O(N)\n# def can_construct(ransom, magazine):\n# if not magazine: \n# return False\n\n# ransom_dict = dict()\n\n# for s in ransom:\n# if s not in ransom_dict:\n# ransom_dict[s] = 1\n# else:\n# ransom_dict[s] += 1\n\n# for char in magazine:\n# if char in ransom_dict:\n# if ransom_dict[char] > 1:\n# ransom_dict[char] -= 1\n# else:\n# del ransom_dict[char]\n \n# return not ransom_dict\n\n# def can_construct(ransom, magazine):\n# return all(ransom.count(x) <= magazine.count(x) for x in set(ransom))\n\n#time: O(M+N) - space: O(M+N)\n# each time a Counter is produced through an operation, any items with zero or negative counts are discarded\nclass Solution(object):\n def can_construct(self, ransom, magazine):\n return not collections.Counter(ransom) - collections.Counter(magazine)\n\nclass Test(unittest.TestCase):\n test_data = [('ab', 'aab', True), ('abb', 'aab', False)]\n\n def test_can_construct(self):\n solution = Solution()\n for data in self.test_data:\n actual = solution.can_construct(data[0], data[1])\n self.assertIs(actual, data[2])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.588770866394043, "alphanum_fraction": 0.6031866669654846, "avg_line_length": 40.21875, "blob_id": "7a3ee481c967b34ba2a8fff0c549d5fb39ec3f52", "content_id": "40b1acfad50b119c29e63ca5a71c6d1e8524eeae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1318, "license_type": "no_license", "max_line_length": 127, "num_lines": 32, "path": "/binarySearch/find_right_interval.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.interval import Interval\n\nclass Solution:\n def get_right_intervals(self, intervals):\n intervals = [(intervals[i], i) for i in range(len(intervals))]\n # In order to do binary search, the array needs to be sorted\n # We need to sort by the start because the intervals with a bigger start represent the pool of possibilities\n intervals.sort(key= lambda x: x[0].start)\n result = [-1 for _ in range(len(intervals))]\n\n for index, (interval, i) in enumerate(intervals):\n left, right = index + 1, len(intervals) # right = len(intervals) means that it is possible to get no right interval\n while left < right:\n mid = (left + right) // 2\n midInterval = intervals[mid][0]\n if midInterval.start < interval.end:\n left = mid + 1\n else:\n right = mid\n # left is the index of the right interval\n if left < len(intervals):\n result[i] = intervals[left][1]\n \n return result\n\nsolution = Solution()\ninterval1 = Interval(1, 4)\ninterval2 = Interval(2, 3)\ninterval3 = Interval(3, 4)\nintervals = [interval1, interval3, interval2]\nprint(\"intervals = {}\".format(intervals))\nprint(solution.get_right_intervals(intervals))" }, { "alpha_fraction": 0.5174262523651123, "alphanum_fraction": 0.5375335216522217, "avg_line_length": 24.55172348022461, "blob_id": "8cf3185eb78cc68cd15043c29d6253b7d4319398", "content_id": "3764b9dc6dc4bbcf4820c90f0ce03967052043f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 55, "num_lines": 29, "path": "/trees/maximum_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#654\n\nfrom utils.treeNode import TreeNode\n\nclass Solution:\n def build_maximum_tree(self, nums):\n if not nums:\n return None\n \n return self.helper(nums, 0, len(nums) - 1)\n \n def helper(self, nums, start, end):\n if start > end:\n return None\n \n max_num, index = float('-inf'), -1\n for i, num in enumerate(nums[start: end + 1]):\n if num > max_num:\n max_num, index = num, i + start\n \n root = TreeNode(max_num)\n root.left = self.helper(nums, start, index - 1)\n root.right = self.helper(nums, index + 1, end)\n\n return root\n\nnums = [3,2,1,6,0,5]\nsolution = Solution()\nprint(solution.build_maximum_tree(nums)) " }, { "alpha_fraction": 0.4837837815284729, "alphanum_fraction": 0.4837837815284729, "avg_line_length": 25.5, "blob_id": "6246cefe9d518c506aa5201da399086f3d523eb9", "content_id": "e9173d20dda351f62e056d1cb4b75de43036f1f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 71, "num_lines": 14, "path": "/utils/listNode.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class ListNode:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n # def __repr__(self):\n # return '(value = {}, next: {})'.format(self.value, self.next)\n \n def __repr__(self):\n nodes = []\n while self:\n nodes.append(str(self.value))\n self = self.next\n return ' -> '.join(nodes)" }, { "alpha_fraction": 0.514650285243988, "alphanum_fraction": 0.525519847869873, "avg_line_length": 21.273683547973633, "blob_id": "d5ac0c77d0c4fed2b63dab1618db45a7aa5c56b9", "content_id": "04576715b4513f049a38811780a54840380d9800", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2116, "license_type": "no_license", "max_line_length": 73, "num_lines": 95, "path": "/trees/diameter_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 543\n\nfrom utils.treeNode import TreeNode\n\n# time: O(N)\n# space: O(N)\nclass Solution:\n def dimater(self, root):\n self.result = 0\n if not root:\n return self.result\n \n def depth(root):\n left_depth = 1 + depth(root.left) if root.left else 0\n right_depth = 1 + depth(root.right) if root.right else 0\n self.result = max(self.result, left_depth + right_depth)\n\n return max(left_depth, right_depth)\n \n depth(root)\n return self.result\n\n\n# time = O(N)\n# space = O(N)\nclass Solution2:\n heights_map = dict()\n\n def dimater(self, root):\n if not root:\n return 0\n \n return max(\n self.height(root.left) + self.height(root.right),\n self.dimater(root.left),\n self.dimater(root.right)\n )\n \n def height(self, root):\n if not root:\n return 0\n \n if root in self.heights_map:\n return self.heights_map[root]\n \n height = 1 + max(self.height(root.left), self.height(root.right))\n self.heights_map[root] = height\n\n return height\n\none = TreeNode(1)\ntwo = TreeNode(2)\nthree = TreeNode(3)\nfour = TreeNode(4)\nfive = TreeNode(5)\nsix = TreeNode(6)\nseven = TreeNode(7)\n\none.left = two\ntwo.left = three\nthree.left = four\ntwo.right = five\nfive.right = six\nsix.right = seven\n\nprint(one)\nprint('===============')\n\nsolution = Solution()\nprint(solution.dimater(one))\n\nclass SOlution:\n def get_diameter(self, root):\n depth_map = dict()\n\n if not root:\n return 0\n \n def depth(root):\n if not root:\n return 0\n \n if root in depth_map:\n return depth_map[root]\n \n result = 1 + max(depth(root.left), depth(root.right))\n depth_map[root] = result\n \n return result\n \n return max(\n 1 + depth(root.left) + depth(root.right),\n self.get_diameter(root.left),\n self.get_diameter(root.right)\n )\n" }, { "alpha_fraction": 0.4042338728904724, "alphanum_fraction": 0.41532257199287415, "avg_line_length": 22.069766998291016, "blob_id": "749497313a309e1c07ca508b7f19078e1c80b35e", "content_id": "592caff9634765ae7d059c564693b8b5ddd3f5dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 992, "license_type": "no_license", "max_line_length": 59, "num_lines": 43, "path": "/arrays/number_of_subarrays_bounded_maximum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 795\n\nclass Solution:\n def subarray_count(self, nums, L, R):\n # dp is the number of subarrays ending with nums[i]\n result, dp = 0, 0\n prev_invalid_index = -1\n\n if not nums:\n return result\n \n for i, num in enumerate(nums):\n if num < L:\n result += dp\n \n elif num > R:\n dp = 0\n prev_invalid_index = i\n \n else:\n dp = i - prev_invalid_index\n result += dp\n \n return result\n\nclass Solution2:\n def subarray_count(self, nums, L, R):\n prev_invalid_index = -1\n res = count = 0\n\n for i, num in enumerate(nums):\n if num < L:\n res += count\n\n elif num > R:\n count = 0\n prev_invalid_index = i\n\n else:\n count = i - prev_invalid_index\n res += count\n \n return res\n" }, { "alpha_fraction": 0.5189003348350525, "alphanum_fraction": 0.5189003348350525, "avg_line_length": 35.33333206176758, "blob_id": "f0fbfbc17b1dfdfb7f5f1428a10a91080a1cb7a0", "content_id": "858286d0bd2dd56edc6e7cf75af8ab40da1e7422", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 873, "license_type": "no_license", "max_line_length": 69, "num_lines": 24, "path": "/trees/delete_note_bst.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # time: O(N) worst case, O(height) average\n # space: O(N) worst case, O(height) average\n def delete_node(self, root, value):\n if not root:\n return None\n \n if root.value > value:\n root.left = self.delete_node(root.left, value)\n elif root.value < value:\n root.right = self.delete_node(root.right, value)\n else:\n if not root.left or not root.right:\n # one or no children\n root = root.left or root.right\n else:\n # has both children\n next_biggest = root.right\n while next_biggest.left:\n next_biggest = next_biggest.left\n root.value = next_biggest.value\n root.right = self.delete_node(root.right, root.value)\n \n return root\n\n" }, { "alpha_fraction": 0.5049669146537781, "alphanum_fraction": 0.5132450461387634, "avg_line_length": 19.827587127685547, "blob_id": "c40afcd8ebb9b6fadbaf3269752c29279f9acef2", "content_id": "1e7b3cc5411c9c2a6603f5075cdcfe1c3948713e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 54, "num_lines": 87, "path": "/linkedList/reorder_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#143\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def reorder(self, head):\n if not head:\n return head\n \n fast, slow = head, head\n\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n \n rev, node = None, slow\n while node:\n rev, rev.next, node = node, rev, node.next\n \n first, second = head, rev\n while second.next:\n first.next, first = second, first.next\n second.next, second = first, second.next\n \n return head\n\n\nclass Solution2:\n def reorder(self, head):\n list_map = dict()\n curr, i = head, 1\n while curr:\n list_map[i] = curr\n curr = curr.next\n i += 1\n \n left, right = 1, i - 1\n\n node = head\n while left <= right:\n node.next = list_map[right]\n left += 1\n\n if left <= right:\n node = node.next\n node.next = list_map[left]\n right -= 1\n node = node.next\n \n node.next = None\n\n return head\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(one)\n\nsolution = Solution()\nprint(solution.reorder(one))\n\ndef reorder(head):\n if not head:\n return None\n \n slow = fast = head\n\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n \n rev = ListNode(None)\n while slow:\n rev, rev.next, slow = slow, rev, slow.next\n \n first, second = head, rev\n while second.next:\n first.next, first = second, first.next\n second.next, second = first, second.next\n" }, { "alpha_fraction": 0.5369774699211121, "alphanum_fraction": 0.5594855546951294, "avg_line_length": 24.95833396911621, "blob_id": "1808ec8910f906a9f2c0d7998a3825a71b86111f", "content_id": "ecbf65eb7c46dea2da86254d4aa4662ed5da1780", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 51, "num_lines": 24, "path": "/trees/binary_tree_from_sorted_array.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n # time: O(N) - space: O(N)\n def build_binary_tree(self, nums):\n if not nums:\n return None\n \n return self.convert(nums, 0, len(nums) - 1)\n\n def convert(self, nums, left, right):\n if left > right:\n return None\n \n mid = (left + right) // 2\n left = self.convert(nums, left, mid - 1)\n right = self.convert(nums, mid + 1, right)\n root = TreeNode(nums[mid], left, right)\n\n return root\n\nnums = [1,2,3,4,5,6,7,8,9]\nsolution = Solution()\nprint(solution.build_binary_tree(nums))" }, { "alpha_fraction": 0.4873524308204651, "alphanum_fraction": 0.49241146445274353, "avg_line_length": 24.826086044311523, "blob_id": "d4d9d481e6b9659bb0dfcf08aa92aa9ab8482819", "content_id": "e1563cca0b6ca728cb1f000bb65446b4251dc790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 593, "license_type": "no_license", "max_line_length": 49, "num_lines": 23, "path": "/utils/matrix.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Matrix:\n def __init__(self, rows):\n self.rows = rows\n self.row_count = len(rows)\n self.col_count = len(rows[0])\n \n def is_valid_cell(self, row, col):\n return (\n row >= 0 and row < self.row_count and\n col >= 0 and col < self.col_count\n )\n \n def __repr__(self):\n result = ''\n for row in self.rows:\n result += str(row) + '\\n'\n return result\n\n def __getitem__(self, index):\n return self.rows[index]\n \n def __setitem__(self, index, value):\n self.rows[index] = value" }, { "alpha_fraction": 0.4675159156322479, "alphanum_fraction": 0.4968152940273285, "avg_line_length": 31.70833396911621, "blob_id": "ec038db5331858d5f0352da1bb675839979d4916", "content_id": "cc210479ea54055a461804445fc97232acdf0ab1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 83, "num_lines": 24, "path": "/arrays/target_sum_amazon.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def sum_target(self, collection1, collection2, target):\n result = []\n sum_dict = dict() \n\n for nums in [collection1, collection2]:\n for num in nums:\n remaining = target - num\n if remaining in sum_dict:\n result.append((remaining, num))\n if sum_dict[remaining] == 1:\n del sum_dict[remaining]\n else:\n sum_dict[remaining] -= 1\n else:\n sum_dict[num] = 1 if num not in sum_dict else sum_dict[num] + 1\n \n return result\n\ncollection1 = [4, 5, 2, 1, 1, 8]\ncollection2 = [7, 1, 8, 8]\n\nsolution = Solution()\nprint(solution.sum_target(collection1, collection2, 9)) " }, { "alpha_fraction": 0.5934426188468933, "alphanum_fraction": 0.6409835815429688, "avg_line_length": 15.486486434936523, "blob_id": "7f98717725e7a1a90451c0a8bcb9a8769390d54c", "content_id": "7d2737bf394bf038602d8ea5ee37b0cb1a0633ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 53, "num_lines": 37, "path": "/trees/invert_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def invert(self, node):\n if not node:\n return\n \n self.invert(node.left)\n self.invert(node.right)\n\n node.left, node.right = node.right, node.left\n\n\nnode1 = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode4 = TreeNode(4)\nnode5 = TreeNode(5)\nnode6 = TreeNode(6)\nnode7 = TreeNode(7)\n\nnode1.left = node2\nnode1.right = node3\n\nnode2.left = node4\nnode2.right = node5\n\nnode3.left = node6\nnode6.left = node7\n\nprint(node1)\n\nprint('=================')\n\nsolution = Solution()\nsolution.invert(node1)\nprint(node1)\n" }, { "alpha_fraction": 0.5038520693778992, "alphanum_fraction": 0.5053929090499878, "avg_line_length": 28.545454025268555, "blob_id": "8265bbaf22142bb805ae56de92e6d0a2fc717aea", "content_id": "fbd130ba5c03b271a74b044283f7287b9c3d0c30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 73, "num_lines": 22, "path": "/strings/simplify_path.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n\n def simplify_path(self, path):\n \"\"\"\n :type path: str\n :rtype: str\n \"\"\"\n\n directories = path.split('/') # get the directories from the path\n result = [] # stack to hold the result\n\n for dir in directories:\n if dir == '..' and result: # go up one level if possible\n result.pop()\n elif dir and dir != '.': # add the dir to the stack\n result.append(dir)\n # else ignore '' and '.'\n \n return '/' + result[-1] if result else '/'\n\nsolution = Solution()\nprint(solution.simplify_path('/a/b/c/./../'))" }, { "alpha_fraction": 0.5313553810119629, "alphanum_fraction": 0.5380984544754028, "avg_line_length": 25.9818172454834, "blob_id": "e01babb44e5205b3ffd01a50b458f98e640c19fd", "content_id": "fc18ced244aa92d08d00a89ba069dc2ca3aeddd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 54, "num_lines": 55, "path": "/arrays/insert_delete_get_random.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import random\n\nclass RandomizedSet:\n def __init(self):\n self.mapping = {}\n self.items = []\n \n def insert(self, value):\n if value not in self.mapping:\n self.items.append(value)\n self.mapping[value] = len(self.items) - 1\n return True\n return False\n \n def remove(self, value):\n if value not in self.mapping:\n return False\n index = self.mapping[value]\n self.items[index] = self.items[-1]\n self.mapping[self.items[index]] = index\n self.items.pop()\n del self.mapping[value]\n return True\n \n def get_random(self):\n index = random.randint(0, len(self.items) - 1)\n return self.items[index]\n\nclass RandomizedSet2:\n def __init__(self):\n self.mapping = {}\n self.items = []\n \n def insert(self, value):\n if value not in self.mapping:\n self.items.append(value)\n self.mapping[value] = len(self.items) - 1\n return True\n \n return False\n \n def remove(self, value):\n if value in self.mapping:\n index = self.mapping[value]\n self.items[index] = self.items[-1]\n self.mapping[self.items[-1]] = index\n self.items.pop()\n del self.mapping[value]\n return True\n\n return False\n \n def get_random(self):\n index = random.randint(0, len(self.items) - 1)\n return self.items[index]" }, { "alpha_fraction": 0.5470925569534302, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 20.821428298950195, "blob_id": "04e55beda5537c2c40f2f434464a80912cf81159", "content_id": "3e31d67a71b55baec1a55b518b5c66e2e86b3054", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 80, "num_lines": 56, "path": "/trees/print_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 655\n\n#time: O(H * 2**H - 1) need to fill the result array\n# space: O(H * 2**H - 1) the number of elements in the result array\n\nfrom utils.treeNode import TreeNode\n\nclass Solution:\n def print(self, root):\n if not root:\n return []\n \n height = self.get_height(root)\n\n result = [[\"\" for _ in range((2 ** height - 1))] for _ in range(height)]\n\n self.traverse(root, 0, (2 ** height) - 2, 0, result)\n\n return result\n \n # DFS traverse\n def traverse(self, root, start, end, level, result):\n if not root:\n return\n \n mid = (start + end) // 2\n\n result[level][mid] = root.value\n\n self.traverse(root.left, start, mid - 1, level + 1, result)\n self.traverse(root.right, mid + 1, end, level + 1, result)\n\n\n def get_height(self, root):\n if not root:\n return 0\n \n return 1 + max(self.get_height(root.left), self.get_height(root.right))\n\none = TreeNode(1)\ntwo = TreeNode(2)\nthree = TreeNode(3)\nfour = TreeNode(4)\nfive = TreeNode(5)\n\none.left = two\ntwo.left = three\nthree.left = four\none.right = five\n\nprint(one)\n\nprint('==============')\n\nsolution = Solution()\nprint(solution.print(one))" }, { "alpha_fraction": 0.5603741407394409, "alphanum_fraction": 0.581632673740387, "avg_line_length": 35.78125, "blob_id": "fd1895ab353e8a3d8aae3a80906da73cf391c71d", "content_id": "daa5d5c3ed716ec86a5a8ba895e4e8e14224c7b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 107, "num_lines": 32, "path": "/arrays/majority_element.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n # this algorithm is called the Boyer-Moore majority voting algorithm\n # https://stackoverflow.com/questions/4325200/find-the-majority-element-in-array\n\n # the majority element appears more than all the other elements combined. Therefore, if we keep a \n # counter and change the major every time the counter is 0, eventually, major will be the major element\n def majority_element(self, numbers):\n counter, major = 0, None\n\n for num in numbers:\n if counter == 0:\n counter = 1\n major = num\n elif num == major:\n counter += 1\n else:\n counter -= 1\n # major is guaranteed to be the major element if it exists.\n # otherwise, iterate over the array, and count the occurrences of major\n #return major\n counter = 0\n for num in numbers:\n if num == major:\n counter += 1\n if counter > len(numbers) / 2:\n return major\n \n return None\n\nnumbers = [2, 3, 5, 3, 5, 6, 5, 5, 5]\nsolution = Solution()\nprint(solution.majority_element(numbers))" }, { "alpha_fraction": 0.4123076796531677, "alphanum_fraction": 0.48923078179359436, "avg_line_length": 22.285715103149414, "blob_id": "d29530212cfa4dc723aa078ac2f9f36e15b02801", "content_id": "891e87b89f0276fd1ba952dd226cafcff234d0b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 63, "num_lines": 14, "path": "/arrays/flipping_image.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 832\n\nclass Solution:\n def flip(self, matrix):\n\n for row in matrix:\n for i in range((len(row) + 1) // 2):\n row[i], row[-1 -i] = 1 - row[-1 -i], 1 - row[i]\n \n return matrix\n\nsolution = Solution()\nmatrix = [[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]\nprint(solution.flip(matrix))" }, { "alpha_fraction": 0.6401869058609009, "alphanum_fraction": 0.6542056202888489, "avg_line_length": 22.88888931274414, "blob_id": "29ad46752a90b896e0fd3bb997e28f7c2fe95be8", "content_id": "942a2e62d8dea256c8ad7767174a4b72239f6c06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/linkedList/delete_node_linked_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\n#237\n\nclass Solution:\n def delete_node(self, node):\n # node is not the tail => node.next exists\n node.value = node.next.value\n node.next = node.next.next" }, { "alpha_fraction": 0.4547477662563324, "alphanum_fraction": 0.4773738980293274, "avg_line_length": 33.13924026489258, "blob_id": "38f4e76808597eef28e215964184bc9cd731df26", "content_id": "c8cc043c567490bca713de34018ffd24861c881f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2696, "license_type": "no_license", "max_line_length": 116, "num_lines": 79, "path": "/dynamicProgramming/maximum_square.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass Solution:\n # O(m * n * min(m, n)) time and O(1) space\n def get_max_square(self, matrix):\n \"\"\"\n :type matrix: Matrix\n :rtype: int\n \"\"\"\n max_area = 0\n for row in range(1, matrix.row_count):\n for col in range(1, matrix.col_count):\n if matrix[row][col] == 0:\n continue\n \n max_length = matrix[row - 1][col - 1]\n length = 1\n while length <= max_length:\n if matrix[row - length][col] == 0 or matrix[row][col - 1] == 0 or matrix[row - 1][col - 1] == 0:\n break\n length += 1\n matrix[row][col] = length ** 2\n max_area = max(max_area, matrix[row][col])\n \n return max_area\n \n # dynamic programming: changing the matrix itself\n def get_max_square2(self, matrix):\n \"\"\"\n :type matrix: Matrix\n :rtype: int\n \"\"\"\n max_side = 0\n if not matrix or not matrix.row_count or not matrix.col_count:\n return max_side\n \n for row in range(1, matrix.row_count):\n for col in range(1, matrix.col_count):\n if matrix[row][col] == 0:\n continue\n \n matrix[row][col] = 1 + min(matrix[row - 1][col], matrix[row][col - 1], matrix[row - 1][col - 1])\n max_side = max(max_side, matrix[row][col])\n \n return max_side ** 2\n\n # dynamic programming: keeping an array of longest square sides\n def get_max_square3(self, matrix):\n \"\"\"\n :type matrix: Matrix\n :rtype: int\n \"\"\"\n max_side = 0\n if not matrix or not matrix.row_count or not matrix.col_count:\n return max_side\n \n max_sides = [0 for _ in range(matrix.col_count)]\n \n for row in range(matrix.row_count):\n new_max_sides = [matrix[row][0]] + [0 for _ in range(1, matrix.col_count)]\n max_side = max(max_side, matrix[row][0])\n \n for col in range(1, matrix.col_count):\n if matrix[row][col] == 0:\n continue\n \n new_max_sides[col] = 1 + min(new_max_sides[col - 1], max_sides[col], max_sides[col - 1])\n max_side = max(max_side, new_max_sides[col])\n \n max_sides = new_max_sides\n\n return max_side ** 2\n\n\nmatrix = Matrix([[1, 0, 1, 0, 0], [1, 0, 1, 1, 1], [1, 1, 1, 1, 1], [1, 0, 0, 1, 0]])\nprint(matrix)\nprint('===============')\nsolution = Solution()\nprint(solution.get_max_square3(matrix))" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5907257795333862, "avg_line_length": 21.159090042114258, "blob_id": "9ec633f7162c2c16671d61152f63ed07c93ec864", "content_id": "ae954e3558810b7594eaf07f3e902eec91dd2120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 992, "license_type": "no_license", "max_line_length": 71, "num_lines": 44, "path": "/trees/binary_tree_paths.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def get_paths(self, node):\n result = []\n if not node:\n return result\n \n self.helper([], node, result)\n return [\" -> \".join(path) for path in result]\n \n def helper(self, prefix, node, result):\n if not node.left and not node.right:\n #reached a leaf\n result.append(prefix + [str(node.value)])\n return\n \n if node.left:\n self.helper(prefix + [str(node.value)], node.left, result)\n\n if node.right:\n self.helper(prefix + [str(node.value)], node.right, result)\n\n\nnode1 = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode4 = TreeNode(4)\nnode5 = TreeNode(5)\nnode6 = TreeNode(6)\nnode7 = TreeNode(7)\n\nnode1.left = node2\nnode1.right = node3\n\nnode2.left = node4\nnode2.right = node5\n\nnode3.left = node6\nnode6.left = node7\n\nprint(node1)\nsolution = Solution()\nprint(solution.get_paths(node1))\n \n " }, { "alpha_fraction": 0.4653846025466919, "alphanum_fraction": 0.5038461685180664, "avg_line_length": 26.421052932739258, "blob_id": "39594d5253459a106761bbd39c8dfad5ea36a87d", "content_id": "4f7b6b56ef3fc8caa065288cda3c99a243e09d2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 93, "num_lines": 19, "path": "/dynamicProgramming/triangle.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_min_path(self, triangle):\n \"\"\"\n type triangle: List[List[int]]\n rtype: int\n \"\"\"\n\n if not triangle:\n return 0\n\n for row in range(len(triangle) - 2, -1, -1):\n for col in range(row + 1):\n triangle[row][col] += min(triangle[row + 1][col], triangle[row + 1][col + 1])\n \n return triangle[0][0]\n\ntriangle = [[2], [3, 4], [6, 5, 7], [4, 1, 8, 3]]\nsolution = Solution()\nprint(solution.get_min_path(triangle))" }, { "alpha_fraction": 0.4151565134525299, "alphanum_fraction": 0.4206480085849762, "avg_line_length": 32.25, "blob_id": "da4f60949658807111fa398b3cf6d6dc98c6cf0e", "content_id": "846e26f33837d61d6c50f9545f2bdedc83bb628a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1821, "license_type": "no_license", "max_line_length": 107, "num_lines": 44, "path": "/strings/substring_without_repeating_characters.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "\n\n # Time: O(N) - Space: O(1): the length of the set/map is bounded by the number of the alphabet\n # set.clear() is O(1)\nclass Solution(object):\n def longest_unique_substring(self, str):\n if not str:\n return 0\n str_set = set()\n result = 0\n\n for char in str:\n if char not in str_set:\n # Non repeated character\n str_set.add(char)\n result = max(result, len(str_set))\n else:\n # Repeated character\n str_set.clear()\n str_set.add(char)\n\n # result = max(result, len(str_set))\n return result\n\n # Sliding window technique\n # Maitain a sliding window, updating the start whenever we see a character repeated\n def longest_unique_substring2(self, s):\n \"\"\"\n :type str: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n start = 0 # start index of the current window(substring)\n longest = 0 \n last_seen = {} # mapping from character to its last seen index\n\n for i, char in enumerate(s):\n if char in last_seen and last_seen[char] >= start: # start a new substring after the previous c\n start = last_seen[char] + 1\n else:\n longest = max(longest, i - start + 1)\n \n last_seen[char] = i # update the last sightning index\n \n return longest\n\n " }, { "alpha_fraction": 0.48223039507865906, "alphanum_fraction": 0.48774510622024536, "avg_line_length": 27.64912223815918, "blob_id": "14dacebcc2cefae0bf5467765c895b0994ec35eb", "content_id": "a135ee50487f6f42fdd302f74ffd5187d8af86a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/arrays/word_search.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass Solution:\n def exist(self, matrix, word):\n \"\"\"\n :type matrix: Matrix\n :type word: Str\n :rtype: boolean\n \"\"\"\n if not matrix.row_count or not matrix.col_count:\n return False\n \n for row in range(matrix.row_count):\n for col in range(matrix.col_count):\n if self.can_find(matrix, row, col, 0, word):\n return True\n \n return False\n \n def can_find(self, matrix, row, col, index, word):\n \"\"\"\n :type matrix: Matrix\n :type row: int\n :type col: int\n :type index: int\n :type word: str\n :rtype: boolean\n \"\"\"\n if index == len(word):\n return True\n\n if not matrix.is_valid_cell(row, col):\n return False\n \n if matrix[row][col] != word[index]:\n return False\n \n # in order to avoid using the same cell twice, we should mark it\n matrix[row][col]= '*'\n \n found = (\n self.can_find(matrix, row + 1, col, index + 1, word) or\n self.can_find(matrix, row - 1, col, index + 1, word) or \n self.can_find(matrix, row, col + 1, index + 1, word) or\n self.can_find(matrix, row, col - 1, index + 1, word)\n )\n \n if found:\n return True\n \n matrix[row][col] = word[index]\n return False\n \nmatrix = Matrix([['A', 'B', 'C', 'E'], ['S', 'F', 'C', 'S'], ['A', 'D', 'E', 'E']])\nsolution = Solution()\nprint(matrix)\nprint(solution.exist(matrix, 'ABCCED'))" }, { "alpha_fraction": 0.4932432472705841, "alphanum_fraction": 0.5054054260253906, "avg_line_length": 26.44444465637207, "blob_id": "0a9bfabdb002a1045206677af195de25d481b7a5", "content_id": "f079ee9c828bbafbf101908b0ab1e67b453649f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 107, "num_lines": 27, "path": "/dynamicProgramming/longest_increasing_sequence.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from bisect import bisect\n\nclass Solution:\n # Time: O(n log n) - Space: O(n)\n def get_longest_increasing_sequence(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n \n dp = []\n for num in nums:\n index = bisect(dp, num)\n if index == len(dp):\n dp.append(num)\n else:\n # num is smaller than the current value at dp[index], therefore, num can be used to build a\n # longer increasing sequence\n dp[index] = num\n \n return len(dp)\n\nsolution = Solution()\nnums = [5, 2, 3, 8, 1, 19, 7]\nprint(solution.get_longest_increasing_sequence(nums))" }, { "alpha_fraction": 0.42071881890296936, "alphanum_fraction": 0.4460887908935547, "avg_line_length": 21.571428298950195, "blob_id": "0cedf309685fd2519a2b549ef7f93faacdd8cdd2", "content_id": "2c80a7dd61ed4728d52ac48d7b16c21805e2bf19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 39, "num_lines": 21, "path": "/arrays/remove_duplicate_sorted_array2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def remove_duplicates(self, nums):\n if len(nums) <= 2:\n return len(nums)\n \n j = 1\n for i in range(2, len(nums)):\n if nums[i] > nums[j - 1]:\n j += 1\n nums[j] = nums[i]\n \n j += 1\n for _ in range(j, len(nums)):\n nums.pop()\n \n return j\n\nsolution = Solution()\nnums = [1,1,1,2,2,3]\nprint(solution.remove_duplicates(nums))\nprint(nums)" }, { "alpha_fraction": 0.5136778354644775, "alphanum_fraction": 0.5136778354644775, "avg_line_length": 23.407407760620117, "blob_id": "bb6064772a57754d0ae2ecfe6fcfece906a63a2f", "content_id": "71384dad27ad91a9d0eca4b1e9714f249ce025dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/graphs/topological_sort.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def topological_sort(self, graph):\n result = []\n discovered = set()\n path = []\n\n for node in graph:\n self.helper(node, result, discovered, path)\n \n return result.reverse()\n \n def helper(self, node, result, discovered, path):\n if node in discovered:\n return\n \n path.append(node)\n\n discovered.add(node)\n\n for nbr in node.neighbors:\n if nbr in path:\n raise Exception('Cyclic graph')\n \n self.helper(nbr, result, discovered, path)\n \n path.pop()\n result.append(node.key)" }, { "alpha_fraction": 0.5261043906211853, "alphanum_fraction": 0.5502008199691772, "avg_line_length": 29.5, "blob_id": "629f4945cc0437213b04fd58fa9d7f3ef10a9779", "content_id": "27c43b4986d481c6e6140217b86a53d947d2b0d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/binarySearch/intersection_of_two_arrays.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_interesection(self, nums1, nums2):\n set1 = set(nums1)\n intersection = set()\n for num in nums2:\n if num in set1:\n intersection.add(num)\n return list(intersection)\n\n " }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5355392098426819, "avg_line_length": 33, "blob_id": "184e63ab0ed65582af2622bf75b3e00f2a4a2c47", "content_id": "b5a8f7bb4b6bd5f4bf81e37f5abf8c297b5555e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "no_license", "max_line_length": 108, "num_lines": 24, "path": "/arrays/container_with_most_water.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def max_area(self, heights):\n \"\"\"\n :type heights: List(int)\n :rtype: int\n \"\"\"\n if not heights:\n return 0\n\n left = 0\n right = len(heights) - 1\n # calculate the area of the outer container\n max_area = (right - left) * min(heights[left], heights[right])\n\n # start moving in-ward. \n # In order to get a bigger area, the min of both left and right borders need to be higher\n while left < right:\n if heights[left] < heights[right]: # increment left for the possibility of finding a larger area\n left += 1\n else:\n right -= 1\n max_area = max(max_area, (right - left) * min(heights[left], heights[right]))\n \n return max_area\n" }, { "alpha_fraction": 0.5628803372383118, "alphanum_fraction": 0.5801216959953308, "avg_line_length": 30.80645179748535, "blob_id": "8bbcac71e39f33be01174c697c6dee7e0c7b96b7", "content_id": "d4dbdd8b48729d952177c11c4616ef0d2d0ff206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 986, "license_type": "no_license", "max_line_length": 98, "num_lines": 31, "path": "/arrays/random_pick_index.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import random\nfrom collections import defaultdict\n\nclass Solution:\n # O(1) space and time in initialization. O(n) time and O(1) space when getting the rand index \n def __init__(self, nums):\n self.nums = nums\n\n def get_random_index(self, target):\n result, count = None, 0\n for i, num in enumerate(self.nums):\n if num == target:\n if random.randint(0, count) == 0:\n result = i\n count += 1\n return result\n\n #Solution 2:\n # O(N) space and time in initilization. O(1) space and time when picking random index\n # def __init__(self, nums):\n # self.nums = nums\n # self.mapping = defaultdict(list)\n # for i, num in enumerate(nums):\n # self.mapping[num].append(i)\n \n # def get_random_index(self, target):\n # return random.choice(self.mapping[target])\n\nnums = [1, 3, 2, 2, 3, 3, 3, 4]\nsolution = Solution(nums)\nprint(solution.get_random_index(2))\n" }, { "alpha_fraction": 0.4944238066673279, "alphanum_fraction": 0.5055761933326721, "avg_line_length": 25.933332443237305, "blob_id": "7a7db52db33a028d6c42dd4a7c74a0edeb3c212f", "content_id": "71c733750df596b5703a6ed7c98b0dc177704ca3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/strings/longestCommonPrefix.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_longest_common_prefix(self, words):\n if not words:\n return ''\n \n min_word_length = min(words, key=len)\n\n start, end = 0, len(min_word_length) - 1\n\n while start <= end:\n mid = (start + end) // 2\n if not self.is_common_prefix(mid, words):\n end = mid - 1\n else:\n start = mid + 1\n \n return words[0][:end + 1]\n\n def is_common_prefix(self, length, words):\n prefix = words[0][:length + 1]\n \n for word in words:\n if not word.startswith(prefix):\n return False\n \n return True\n\nlist_strings = ['abu', 'abcd', 'abce', 'abcee']\nsolution = Solution()\nprint(solution.get_longest_common_prefix(list_strings))" }, { "alpha_fraction": 0.5080214142799377, "alphanum_fraction": 0.51871657371521, "avg_line_length": 25.85714340209961, "blob_id": "340e1a1501f886d5ea3944919fec4a41df0fc489", "content_id": "b9238d968d0cc27560b706e7adfd7dee9024d595", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/utils/interval.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Interval:\n def __init__(self, start, end):\n self.start = start\n self.end = end\n \n def __repr__(self):\n return \"[{0}, {1}]\".format(self.start, self.end)" }, { "alpha_fraction": 0.4125269949436188, "alphanum_fraction": 0.4254859685897827, "avg_line_length": 26.294116973876953, "blob_id": "e23060ca8a75f3ee8366afe45ada60d466f9ab0a", "content_id": "d6021209a6d92674d15f0e3c40ca2ee6da65ebc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 58, "num_lines": 17, "path": "/dynamicProgramming/word_break.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def word_break(self, s, wordDict):\n if not wordDict:\n return False\n \n if not s:\n return True\n \n can_make = [True] + [False for _ in range(len(s))]\n\n for i in range(1, len(s) + 1):\n for j in range(i - 1, -1, -1):\n if can_make[j] and s[j:i] in wordDict:\n can_make[i] = True\n break\n \n return can_make[-1]" }, { "alpha_fraction": 0.4442176818847656, "alphanum_fraction": 0.47551020979881287, "avg_line_length": 19.43055534362793, "blob_id": "5779584594bbbe88621730a841b9c3f87ecfff67", "content_id": "b5c50c45de87e31e994d1a3ebce6f98ad917077f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 53, "num_lines": 72, "path": "/linkedList/merge_two_sorted_lists.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n def merge(self, l1, l2):\n if not l1 or not l2:\n return l1 or l2\n \n node1, node2 = l1, l2\n head = None\n curr = None\n\n while node1 and node2:\n min_value = min(node1.value, node2.value)\n\n if min_value == node1.value:\n node1 = node1.next\n else:\n node2 = node2.next\n \n if not head:\n head = ListNode(min_value)\n curr = head\n else:\n curr.next = ListNode(min_value)\n curr = curr.next\n \n if node1:\n curr.next = node1\n elif node2:\n curr.next = node2\n \n return head\n\n# time: O(m + n)\n# space: O(1)\nclass Solution2:\n def merge(self, l1, l2):\n prev = dummy = ListNode(None)\n\n while l1 and l2:\n if l1.value < l2.value:\n prev.next = l1\n l1 = l1.next\n else:\n prev.next = l2\n l2 = l2.next\n \n prev = prev.next\n \n prev.next = l1 or l2\n\n return dummy.next\n\none = ListNode(1)\ntwo = ListNode(2)\nfour = ListNode(4)\n\none.next = two\ntwo.next = four\n\none_ = ListNode(1)\nthree_ = ListNode(3)\nfour_ = ListNode(4)\n\none_.next = three_\nthree_.next = four_\n\nprint(one)\nprint(one_)\n\nsolution = Solution2()\nprint(solution.merge(one, one_))" }, { "alpha_fraction": 0.5107398629188538, "alphanum_fraction": 0.5115354061126709, "avg_line_length": 25.76595687866211, "blob_id": "ad9844686d0385ce8dc8a5dba246d36b657579e7", "content_id": "762f2906470c2cfdac0accd16b41b62cabc9254a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 56, "num_lines": 47, "path": "/graphs/clone_graph.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Node:\n def __init__(self, label):\n self.label = label\n self.neighbors = []\n\nclass Solution:\n def clone_graph(self, node):\n if not node:\n return None\n \n cloned_start = Node(node.key)\n node_mapping = { node: cloned_start}\n queue = [node]\n\n while queue:\n node = queue.pop()\n cloned_node = node_mapping[node]\n\n for nbr in node.neighbors:\n if nbr not in node_mapping:\n cloned_nbr = Node(nbr.key)\n node_mapping[nbr] = cloned_nbr\n queue.append(nbr)\n else: \n cloned_nbr = node_mapping[nbr]\n\n cloned_node.neighbors.append(cloned_nbr)\n\n return cloned_start\n \n def clone_graph2(self, node):\n mapping = dict()\n return self.helper(node, mapping)\n \n def helper(self, node, mapping):\n if node in mapping:\n return mapping[node]\n \n cloned_node = Node(node.key)\n\n for nbr in node.neighbors:\n cloned_nbr = self.helper(nbr, mapping)\n cloned_node.neighbors.append(cloned_nbr)\n \n mapping[node] = cloned_node\n\n return cloned_node" }, { "alpha_fraction": 0.46014294028282166, "alphanum_fraction": 0.4892798364162445, "avg_line_length": 29.830509185791016, "blob_id": "7158e8326528ad2e49444a30d8d7c03a21ac2809", "content_id": "3efeb742df18450e068e723b1cca5de98b74f84e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1819, "license_type": "no_license", "max_line_length": 76, "num_lines": 59, "path": "/arrays/search_matrix.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\nimport unittest\n\nclass Solution:\n def search(self, matrix, value):\n if value < matrix[0][0] or value > matrix[-1][-1]:\n return False\n \n row = self.get_row(matrix, value)\n return self.binary_search_row(matrix[row], value)\n\n def get_row(self, matrix, value):\n left, right = 0, matrix.row_count - 1\n while left <= right:\n mid = (left + right) // 2\n if value < matrix[mid][0]:\n right = mid - 1\n elif value > matrix[mid][-1]:\n left = mid + 1\n else:\n return mid\n return left\n \n def binary_search_row(self, nums, value):\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (right + left) // 2\n if value == nums[mid]:\n return True\n if value > nums[mid]:\n left = mid + 1\n else:\n right = mid - 1\n return False\n \n #time: O(col_count + row_count) - space: O(1)\n def search2(self, matrix, value):\n row, col = matrix.row_count - 1, 0\n while row >= 0 and col < matrix.col_count:\n if matrix[row][col] == value:\n return True\n elif value < matrix[row][col]:\n row -= 1\n else:\n col += 1\n return False\n\nclass Test(unittest.TestCase):\n matrix = Matrix([[1,3,5,7],[10,11,16,20],[23,30,34,50]])\n test_data = [(3, True), (9, False), (0, False), (56, False), (30, True)]\n\n def test_search(self):\n solution = Solution()\n for data in self.test_data:\n actual = solution.search2(self.matrix, data[0])\n self.assertEqual(actual, data[1])\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.46800732612609863, "alphanum_fraction": 0.4899451434612274, "avg_line_length": 17.86206817626953, "blob_id": "3a20d7ba3f47910110b7866b8c8f2166ff9c715c", "content_id": "854b7fc028ae3587edaf14de4f425e4701407f9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 43, "num_lines": 29, "path": "/arrays/subarray_sum_equals_k.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 560\n\nfrom collections import defaultdict\n\n# time: O(N)\n# space: O(N)\nclass Solution:\n def subarray_sum(self, nums, k):\n result = 0\n if not nums:\n return result\n \n sum_map = defaultdict(int)\n sum_map[0] = 1\n\n curr_sum = 0\n\n for num in nums:\n curr_sum += num\n \n result += sum_map[curr_sum - k]\n \n sum_map[curr_sum] += 1\n \n return result\n\nnums = [1, 1, 1]\nsolution = Solution()\nprint(solution.subarray_sum(nums, 2))\n" }, { "alpha_fraction": 0.5229430198669434, "alphanum_fraction": 0.5348101258277893, "avg_line_length": 27.11111068725586, "blob_id": "131aab339e534092763144e61a0387fabfa28663", "content_id": "0241b03fc7e06c9598ed7903a35be696f840e69f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 82, "num_lines": 45, "path": "/graphs/evaluate_division.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution:\n def solve_queries(self, equations, values, queries):\n graph = self.build_graph(equations, values)\n\n result = []\n\n for query in queries:\n result.append(self.dfs(query[0], query[1], 1, graph, set()))\n \n return result\n\n def dfs(self, start, target, temp_result, graph, visited):\n if not start in graph or start in visited:\n return -1\n \n if start == target:\n return temp_result\n \n visited.add(start)\n\n for nbr, division in graph[start].items():\n result = self.dfs(nbr, target, temp_result * division, graph, visited)\n\n if result != -1: # found the target\n return result\n \n return -1\n \n def build_graph(self, equations, values):\n graph = defaultdict(dict)\n\n for i, eq in enumerate(equations):\n graph[eq[0]][eq[1]] = values[i]\n graph[eq[1]][eq[0]] = 1 / values[i]\n \n return graph\n\nequations = [ [\"a\", \"b\"], [\"b\", \"c\"] ]\nvalues = [2.0, 3.0]\nqueries = [ [\"a\", \"c\"], [\"b\", \"a\"], [\"a\", \"e\"], [\"a\", \"a\"], [\"x\", \"x\"] ]\n\nsolution = Solution()\nprint(solution.solve_queries(equations, values, queries))" }, { "alpha_fraction": 0.5442560911178589, "alphanum_fraction": 0.5574387907981873, "avg_line_length": 24.285715103149414, "blob_id": "653545660ac6e8f2707ac4d1c913812c19626edd", "content_id": "be74b52ba6ab51ea788fdc811ffbcdf9f6fabce3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "no_license", "max_line_length": 46, "num_lines": 21, "path": "/arrays/contains_duplicates.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def contains_duplicates(self, numbers):\n number_set = set(numbers)\n return len(numbers) != len(number_set)\n\n def contains_duplicates2(self, numbers):\n \"\"\"\n :type numbers: list\n :rtype : Boolean\n \"\"\"\n\n numbers.sort()\n for i in range(1, len(numbers)):\n if numbers[i] == numbers[i - 1]:\n return True\n \n return False\n\nnumbers = [1, 2, 1, 4]\nsolution = Solution()\nprint(solution.contains_duplicates(numbers))\n" }, { "alpha_fraction": 0.41013267636299133, "alphanum_fraction": 0.4306393265724182, "avg_line_length": 29.740739822387695, "blob_id": "4841829ccad9509d8f48a98c73f0a6367fc98b3b", "content_id": "f13b9d3be20a58c0691ba42cd38347e803db5bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "no_license", "max_line_length": 192, "num_lines": 27, "path": "/binarySearch/sqrt.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def sqrt(self, n):\n if n == 0:\n return 0\n \n left, right = 1, n\n # while True:\n # mid = (left + right) // 2\n # if mid * mid > n:\n # right = mid - 1\n # else:\n # if (mid + 1) * (mid + 1) > n:\n # return mid\n # left = mid + 1\n\n # use the while left <= right and return left - 1 when looking for the max value that satisfies a condition. because we do left = mid + 1, the last value of left will be our result + 1\n while left <= right:\n mid = (left + right) // 2\n if mid * mid > n:\n right = mid - 1\n else:\n left = mid + 1\n \n return left - 1\n\nsolution = Solution()\nprint(solution.sqrt(24))" }, { "alpha_fraction": 0.5489902496337891, "alphanum_fraction": 0.5527299642562866, "avg_line_length": 30.0930233001709, "blob_id": "1a28c41b7cf5a093310ac4b7e357c1f94599cd11", "content_id": "e0d86b3e0dc931af3228d72ea959457a7cf836ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2674, "license_type": "no_license", "max_line_length": 110, "num_lines": 86, "path": "/graphs/course_schedule.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution:\n def course_schedule(self, classes):\n if not classes:\n return[]\n \n graph = self.build_graph(classes)\n\n result = []\n path = []\n discovered = set()\n\n for node in graph:\n self.topological_sort(node, graph, path, discovered, result)\n \n return result.reverse()\n\n def build_graph(self, classes):\n graph = defaultdict(set)\n for dep in classes:\n graph[dep[1]].add(dep[0])\n \n return graph\n\n def topological_sort(self, node, graph, path, discovered, result):\n if node in discovered:\n return\n \n discovered.add(node)\n path.append(node)\n\n for nbr in graph[node]:\n if nbr in path:\n raise Exception('Cyclic dependency. Cannot finish all the courses')\n \n self.topological_sort(nbr, graph, path, discovered, result)\n \n result.append(result)\n path.pop()\n \n\n def can_finish(self, courses_count, prerequisites):\n \" Returns True if can finish all the courses \"\n nb_prerequisites = defaultdict(int) # mapping between each course and the number of its pre-requisites\n preq_map = defaultdict(list) # mapping between each course and the courses depending on it\n\n for after, before in prerequisites:\n nb_prerequisites[after] += 1\n preq_map[before].append(after)\n \n # get the list of courses with no dependencies\n can_take = set(range(courses_count)) - set(nb_prerequisites.keys())\n\n while can_take:\n course = can_take.pop()\n courses_count -= 1\n for dep in preq_map[course]:\n nb_prerequisites[dep] -= 1\n if nb_prerequisites[dep] == 0:\n can_take.add(dep)\n \n return courses_count == 0\n \n def get_order(self, num_courses, prerequisites):\n nb_prerequisites = defaultdict(int)\n preq_list = defaultdict(list)\n\n result = []\n\n for (after, before) in prerequisites:\n nb_prerequisites[after] += 1\n preq_list[before].append(after)\n \n can_take = set(i for i in range(num_courses)) - set(nb_prerequisites.keys())\n\n while can_take:\n course = can_take.pop()\n result.append(course)\n\n for dep in preq_list[course]:\n nb_prerequisites[dep] -= 1\n if nb_prerequisites[dep] == 0:\n can_take.append(dep)\n \n return result if len(result) == num_courses else []\n" }, { "alpha_fraction": 0.40326741337776184, "alphanum_fraction": 0.422184020280838, "avg_line_length": 21.384614944458008, "blob_id": "770b7bf30c815c4002ec0f01e22200a865d0a329", "content_id": "08819356573e45ed7af97488c156168220af11f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 57, "num_lines": 52, "path": "/arrays/reshape_matrix.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 566\n\n# time: O(N * M)\n# space: O(N * M)\nclass Solution:\n def reshape(self, nums, r, c):\n if not nums or len(nums) * len(nums[0]) != r * c:\n return nums\n\n rows, cols = len(nums), len(nums[0])\n \n queue = []\n for row in range(rows):\n for col in range(cols):\n queue.append(nums[row][col])\n \n res, count =[], 0\n\n for row in range(r):\n res.append([])\n for col in range(c):\n res[-1].append(queue[count])\n count += 1\n \n return res\n\n# time: O(N * M)\n# space: O(N * M)\nclass Solution2:\n def reshape(self, nums, r, c):\n if not nums or len(nums) * len(nums[0]) != r * c:\n return nums\n \n res = [[] * r]\n\n rows = cols = 0\n\n for i in range(len(nums)):\n for j in range(len(nums[0])):\n res[rows].append(nums[i][j])\n\n cols += 1\n if cols == c:\n rows += 1\n cols = 0\n \n return res\n\n\nnums = [[1,2], [3,4]]\nsolution = Solution2()\nprint(solution.reshape(nums, 1, 4))" }, { "alpha_fraction": 0.46124279499053955, "alphanum_fraction": 0.4817424714565277, "avg_line_length": 21.955883026123047, "blob_id": "20491d52aaba69d50e8fe3802ab15471963f89e7", "content_id": "d703e3e954c8d47004a9304b425615039dec4a2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1561, "license_type": "no_license", "max_line_length": 68, "num_lines": 68, "path": "/trees/maximum_width_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#662\n\nfrom utils.treeNode import TreeNode\n\nclass Solution2:\n def max_width(self, root):\n queue = [(root, 0, 0)]\n curr_level, left, result = 0, 0, 0\n\n for node, pos, level in queue:\n if node:\n queue.append((node.left, 2 * pos, level + 1))\n queue.append((node.right, (2 * pos) + 1, level + 1))\n\n if curr_level != level:\n curr_level = level\n left = pos\n \n result = max(result, pos - left + 1)\n \n return result\n\n\n\nclass Solution:\n def max_width(self, root):\n if not root:\n return 0\n \n level = [(root, 0)]\n left, right, result = float('inf'), 0, 1\n\n while level:\n new_level = []\n for node, pos in level:\n if node:\n new_level.append((node.left, 2 * pos))\n new_level.append((node.right, (2 * pos) + 1))\n\n left = min(left, pos)\n right = max(right, pos)\n \n result = max(result, right - left + 1)\n left, right = float('inf'), 0\n level = new_level\n \n return result\n\none = TreeNode(1)\ntwo = TreeNode(1)\nthree = TreeNode(1)\nfour = TreeNode(1)\nfive = TreeNode(1)\nsix = TreeNode(1)\nseven = TreeNode(1)\n\none.left = two\ntwo.left = three\nthree.left = four\none.right = five\nfive.right = six\nsix.right = seven\n\nprint(one)\n\nprint('============')\nsolution = Solution2()\nprint(solution.max_width(one))\n" }, { "alpha_fraction": 0.483699768781662, "alphanum_fraction": 0.49507203698158264, "avg_line_length": 29, "blob_id": "da1811555f7e59e814b7ac6d0427005a0a440405", "content_id": "650adc592ed13ef95d13f0d95190fc43926bfa5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1319, "license_type": "no_license", "max_line_length": 83, "num_lines": 44, "path": "/strings/reverse_words_in_string.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def reverse_words(self, string):\n if not string:\n return ''\n \n # words = string.split()\n # return ' '.join(words[::-1])\n word_lists = [[]]\n for i, c in enumerate(string):\n if c != ' ':\n word_lists[-1].append(c)\n elif string[i] == ' ' and i < len(string) - 1 and string[i + 1] != ' ':\n word_lists.append([])\n \n words = [''.join(word_list) for word_list in word_lists]\n\n return ' '.join(words[::-1])\n\nclass Solution2(object):\n # time: O(n)\n # space: O(n) because we transform the str to list, otherwise it is O(1)\n def reverse_words(self, s):\n string_list = list(s)\n\n self.reverse(string_list, 0, len(string_list) - 1)\n\n string_list.append(' ')\n start = 0\n for i, c in enumerate(string_list):\n if string_list[i] == ' ':\n self.reverse(string_list, start, i - 1)\n start = i + 1\n \n string_list.pop()\n return ''.join(string_list)\n\n def reverse(self, s, left, right):\n while left < right:\n s[left], s[right] = s[right], s[left]\n left += 1\n right -= 1\n\nsolution = Solution2()\nprint(solution.reverse_words('The sky is blue'))" }, { "alpha_fraction": 0.6183816194534302, "alphanum_fraction": 0.620379626750946, "avg_line_length": 33.55172348022461, "blob_id": "c3b1885dc30649a06351b5be656267b47cf0fe62", "content_id": "04c2045c2387d3650475256256ba3855d9b8dd00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1001, "license_type": "no_license", "max_line_length": 100, "num_lines": 29, "path": "/strings/anagram_groups.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\n# Gotcha 1: there is no sort for strings.\n# You have to convert the word to a list, then sort it using list.sort(),\n# then reconstruct the string using ''.join(sorted_list)\n\n# Gotcha 2: defaultdict is part of the collections library\n\nclass Solution(object):\n # Time: O(n * k * log k) where n is the number of words, and k is the length of the longest word\n # Space: O(n * k) to hold the result - k * n is the total number of characters\n def group_anagrams(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[List[str]]\n \"\"\"\n\n sorted_dict = defaultdict(list)\n\n for word in words:\n letter_list = list(word) # or [c for c in word]\n letter_list.sort()\n sorted_word = ''.join(letter_list)\n sorted_dict[sorted_word].append(word)\n \n return list(sorted_dict.values())\n\nsolution = Solution()\nprint(solution.group_anagrams(['bat', 'car', 'atb', 'rca', 'aaa']))" }, { "alpha_fraction": 0.5025413036346436, "alphanum_fraction": 0.5362134575843811, "avg_line_length": 20.79166603088379, "blob_id": "b658d619d080596e7d1fe9fd02eb9b6ce85fe2e0", "content_id": "32e8f53e7c04b541814789dc01a5fe2c40e575aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1574, "license_type": "no_license", "max_line_length": 87, "num_lines": 72, "path": "/linkedList/intersection_of_two_linked_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n def get_intersection(self, head1, head2):\n if not head1 or not head2:\n return None\n \n l1, l2 = self.get_length(head1), self.get_length(head2)\n node1, node2 = self.move_ahead(head1, l1 - l2), self.move_ahead(head2, l2 - l1)\n\n while node1 and node2:\n if node1 == node2:\n return node1\n \n node1 = node1.next\n node2 = node2.next\n \n return None\n \n def get_length(self, head):\n count = 0\n node = head\n while node:\n count += 1\n node = node.next\n \n return count\n \n def move_ahead(self, head, l):\n if l <= 0:\n return head\n \n curr = head\n while l > 0:\n curr = curr.next\n l -= 1\n \n return curr\n\nclass Solution2:\n def get_intersection(self, head1, head2):\n if not head1 or not head2:\n return None\n \n savedA, savedB = head1, head2\n\n while head1 != head2:\n head1 = savedB if not head1 else head1.next\n head2 = savedA if not head2 else head2.next\n \n return head1\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\nsix = ListNode(6)\nseven = ListNode(7)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nsix.next = seven\nseven.next = two\n\nprint(one)\nprint(six)\n\nsolution = Solution()\nprint(solution.get_intersection(one, six))\n \n" }, { "alpha_fraction": 0.45498085021972656, "alphanum_fraction": 0.4722222089767456, "avg_line_length": 28.02777862548828, "blob_id": "db13e38ab8816120fd4a20c56a7f7daca074f3be", "content_id": "2312c423ff54c8db3a96c8258f14533d2ee95b3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1044, "license_type": "no_license", "max_line_length": 95, "num_lines": 36, "path": "/dynamicProgramming/perfect_squares.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from math import floor, sqrt\n\nclass Solution:\n def get_min_square_count(self, n):\n if n == 0:\n return 0\n memo = [-1 for _ in range(n + 1)]\n memo[0] = 0\n\n return self.get_min_square_rec(memo, n)\n \n def get_min_square_rec(self, memo, n):\n if memo[n] < 0:\n biggest_square = floor(sqrt(n)) ** 2\n memo[n] = (n // biggest_square) + self.get_min_square_rec(memo, n % biggest_square)\n \n return memo[n]\n\n def get_min_squares(self, n):\n memo = [0] + [float('inf') for _ in range(n)]\n \n for i in range(1, n + 1):\n min_count = float('inf')\n j = 1\n while i - j*j >= 0:\n min_count = min(min_count, 1 + memo[i - j * j])\n j += 1\n \n memo[i] = min_count\n \n return memo[-1]\n\nsolution = Solution()\nfor i in range(10):\n print(\"n = {} : {}\".format(i, solution.get_min_square_count(i)))\n print(\"n = {} : {}\".format(i, solution.get_min_squares(i)))" }, { "alpha_fraction": 0.5128694176673889, "alphanum_fraction": 0.5166825652122498, "avg_line_length": 23.418603897094727, "blob_id": "06f7e9276a6ebcec53826c0b7b1c16d2b1308364", "content_id": "11e277b96119898f10d563cf6e7c7f9280b39fba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1049, "license_type": "no_license", "max_line_length": 96, "num_lines": 43, "path": "/trees/substree_of_another_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 572\n\nfrom utils.treeNode import TreeNode\n\nclass Solution:\n def is_substree(self, s, t):\n return self.traverse(s, t)\n \n def traverse(self, s, t):\n return s and (self.equal(s, t) or self.traverse(s.left, t) or self.traverse(s.right, t))\n \n def equal(self, s, t):\n if not s and not t:\n return True\n \n if not s or not t or s.value != t.value:\n return False\n \n return self.equal(s.left, t.left) and self.equal(s.right, t.right)\n\n# time: O(m + n)\n# space: O(m + n)\n\nclass Solution2:\n def is_substree(self, s, t):\n def serialize(root):\n if not root:\n serial.append('#')\n return\n \n serial.append(str(root.value))\n serialize(root.left)\n serialize(root.right)\n \n serial = []\n serialize(s)\n s_serialized = ','.join(serial)\n\n serial = []\n serialize(t)\n t_serialized = ','.join(serial)\n\n return t_serialized in s_serialized" }, { "alpha_fraction": 0.520370364189148, "alphanum_fraction": 0.5351851582527161, "avg_line_length": 32.8125, "blob_id": "3e1c10dd89a66c9c3f29cedc713c00bdd49f20e9", "content_id": "a2259cd4f49315322688ad601d0aae789a5a5dce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 88, "num_lines": 16, "path": "/strings/segments_in_a_string.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def countSegments(self, string):\n count = 0\n for i, char in enumerate(string):\n if char != ' ' and (i == 0 or string[i - 1] == ' '):\n count += 1\n \n return count\n \n # time: O(N)\n # space: O(N) because we have to build the array of results first\n def connt_segments2(self, s):\n return sum([s[i] != ' ' and (i == 0 or s[i - 1] == ' ') for i in range(len(s))])\n\nsolution = Solution()\nprint(solution.connt_segments2('Hello, my name is Aymane'))" }, { "alpha_fraction": 0.5008389353752136, "alphanum_fraction": 0.5151006579399109, "avg_line_length": 18.883333206176758, "blob_id": "cb476aaf1516ece8a052217f28b7adfb860d6f7a", "content_id": "dc9b8d628ad2c646236251a27e9aff930cbb02f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1192, "license_type": "no_license", "max_line_length": 76, "num_lines": 60, "path": "/linkedList/split_linked_list_in_parts.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 725\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def split(self, head, k):\n if not head:\n return []\n \n nodes_count = self.get_count(head)\n part_length, odd_parts = divmod(nodes_count, k)\n\n result = []\n prev, node = None, head\n\n for _ in range(k):\n required = part_length\n if odd_parts:\n required += 1\n odd_parts -= 1\n \n result.append(node)\n\n for _ in range(required):\n # we'll only get here if required > 0, i.e. node is not null\n prev, node = node, node.next\n\n if prev:\n prev.next = None\n \n return result\n \n def get_count(self, head):\n count = 0\n\n while head:\n head = head.next\n count += 1\n \n return count\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\nsix = ListNode(6)\nseven = ListNode(7)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\nsix.next = seven\n\nprint(one)\n\nsolution = Solution()\nprint(solution.split(one, 10))" }, { "alpha_fraction": 0.536796510219574, "alphanum_fraction": 0.560606062412262, "avg_line_length": 28.838708877563477, "blob_id": "b5111ee6a236026d18bffa955f7b79e95e4839a5", "content_id": "9a3c3e658af31abf0e4bba24d3c1685fa4202c18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "no_license", "max_line_length": 71, "num_lines": 31, "path": "/trees/lowest_common_ancestor.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n #binary search tree\n def get_lca_bst(self, root, node1, node2):\n if not node1 or not node2 or not root:\n return None\n \n if not root or root == node1 or root == node2:\n return root\n \n if (root.value - node1.value) * (root.value - node2.value) < 0:\n return root\n \n if root.value > node1.value:\n return self.get_lca(root.left, node1, node2)\n \n return self.get_lca(root.right, node1, node2)\n\n #O(N) time and space\n def get_lca(self, root, node1, node2):\n if not root or root == node1 or root == node2:\n return root\n \n left_lca = self.get_lca(root.left, node1, node2)\n right_lca = self.get_lca(root.right, node1, node2)\n\n if left_lca and right_lca:\n return root\n\n return left_lca or right_lca" }, { "alpha_fraction": 0.45028409361839294, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 26.799999237060547, "blob_id": "7894360e805b65d57a802e9e0f5e6fbc57fd8a7b", "content_id": "014f45fd6633db5a5af581297bf5e3f861ac59a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 52, "num_lines": 25, "path": "/graphs/is_bypartite.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#time: O(N**2) - space: O(N)\n\nclass Solution:\n def is_bypartite(self, graph):\n colors = dict()\n for node in range(len(graph)):\n if node not in colors[node]:\n colors[node] = 0\n if not self.dfs(node, graph, colors):\n return False\n \n return True\n \n def dfs(self, node, graph, colors):\n for nbr in graph[node]:\n if nbr in colors:\n if colors[nbr] == colors[node]:\n return False\n else:\n colors[nbr] = 1 - colors[node]\n\n if not self.dfs(nbr, graph, colors):\n return False\n \n return True\n \n " }, { "alpha_fraction": 0.44018059968948364, "alphanum_fraction": 0.45711061358451843, "avg_line_length": 25.058822631835938, "blob_id": "e26035f27c54150bd2406be747429161502083bd", "content_id": "8e75a48642c9623a57786bbbc2d75fc464b260fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 75, "num_lines": 34, "path": "/strings/countAndSay.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "def countAndSay(n):\n result = '1'\n for _ in range(n - 1):\n count, last = 0, None\n newString = ''\n\n for digit in result:\n if last is None or digit == last:\n count += 1\n last = digit\n else:\n newString += str(count) + last\n last = digit\n count = 1\n \n newString += str(count) + last\n result = newString\n \n return result\n\n# Time: O(2 ^ n) because the sequence at worst double during each iteration\n# Space: O(2 ^ n)\ndef countAndSay2 (n):\n sequence = [1]\n for _ in range(n - 1):\n next = []\n for digit in sequence:\n if not next or next[-1] != digit:\n next += [1, digit]\n else:\n next[-2] = next[-2] + 1\n sequence = next\n \n return ''.join(map(str, sequence))\n" }, { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 19, "blob_id": "894b5b74091c171e23d93416e005afa610283db5", "content_id": "4ce99688d1e2fb38e1fd4fa4ae15c636896331e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 44, "num_lines": 3, "path": "/README.md", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# python_algo\n\nAlgorithms, mostly from Leetcode, in Python.\n" }, { "alpha_fraction": 0.40763968229293823, "alphanum_fraction": 0.42816418409347534, "avg_line_length": 23.375, "blob_id": "70787ab96e05368ebe1728b27b395d33d1cf050e", "content_id": "7dabaf6731bd676d222d2edc656efaf1f49b8064", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1754, "license_type": "no_license", "max_line_length": 74, "num_lines": 72, "path": "/arrays/k_diif_pairs.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 532\n\nfrom collections import Counter\n\n# time: O(N)\n# space: O(N)\nclass Solution:\n def k_diff_pairs(self, nums, k):\n if k < 0:\n return 0\n \n freq = Counter(nums)\n pairs = 0\n\n for num in freq:\n if k == 0:\n if freq[num] > 1:\n pairs += 1\n \n else:\n if k + num in freq: # this will ensure we get unique pairs\n pairs += 1\n \n return pairs\n\n# time: O(N log N + N)\n# space: O(1)\nclass Solution2:\n def k_diff_pairs(self, nums, k):\n result = []\n if not nums or len(nums) < 2:\n return result\n \n nums.sort()\n left, right = 0, 1\n \n while left < len(nums) and right < len(nums):\n diff = nums[right] - nums[left]\n if diff == k:\n result.append((nums[left], nums[right]))\n # move both left and right\n right = self.move(right, nums)\n \n left = self.move(left, nums)\n \n elif diff < k:\n # move right\n right = self.move(right, nums)\n\n else:\n # move left\n left = self.move(left, nums)\n \n if left == right:\n # move right\n right = self.move(right, nums)\n \n return result\n\n def move(self, index, nums):\n index += 1\n while index < len(nums) and nums[index] == nums[index - 1]:\n index += 1\n \n return index\n\nnums, k = [3, 1, 4, 1, 5], 2\n# nums, k = [1, 2, 3, 4, 5], 1\n# nums, k = [3, 1, 4, 1, 5], 0\n\nsolution = Solution()\nprint(solution.k_diff_pairs(nums, k))" }, { "alpha_fraction": 0.43679457902908325, "alphanum_fraction": 0.44920992851257324, "avg_line_length": 23.63888931274414, "blob_id": "a88d87c0f9066dd4f3460fdfc85580631e7035fc", "content_id": "0c4c88237de9dcb4e4a7515fe545fae38b9973f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 57, "num_lines": 36, "path": "/graphs/number_of_islands.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass Solution:\n def get_islands_count(self, grid):\n if not grid or not len(grid) or not len(grid[0]):\n return 0\n \n count = 0\n\n for row in range(len(grid)):\n for col in range(len(grid[0])):\n if grid[row][col] == '1':\n self.dfs(row, col, grid)\n count += 1\n \n return count\n\n def dfs(self, row, col, grid):\n \"\"\"\n type row: int\n type col: int\n type grid: Matrix\n rtype: None\n \"\"\"\n if not grid.is_valid_cell(row, col):\n return\n \n if grid[row][col] != '1':\n return\n \n grid[row][col] = 'X'\n\n self.dfs(row - 1, col, grid)\n self.dfs(row + 1, col, grid)\n self.dfs(row, col + 1, grid)\n self.dfs(row, col - 1, grid)" }, { "alpha_fraction": 0.5084666013717651, "alphanum_fraction": 0.5235183238983154, "avg_line_length": 24.45783042907715, "blob_id": "9d1198187fc0c88e522e185e3f39eb730bb3110c", "content_id": "8a90de9e20419cbb822370e2185197a48baaf0ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2126, "license_type": "no_license", "max_line_length": 106, "num_lines": 83, "path": "/arrays/my_calendar_1.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 729\n\nfrom bisect import bisect\n\nclass Node:\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n \n return self.right.insert(node)\n \n if node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n \n return self.left.insert(node)\n \n return False\n\n# time: O(N * log N) in average cases. time needed to insert N events in the tree. Worst case: O(N**2) \n# space: O(N) for the tree structure\nclass MyCalendar:\n def __init__(self):\n self.root = None\n \n def book(self, start, end):\n node = Node(start, end)\n\n if not self.root:\n self.root = node\n return True\n \n return self.root.insert(node)\n\nclass MyCalendar2:\n def __init__(self):\n self.events = []\n \n def book(self, start, end):\n if start >= end:\n raise ValueError('Start should be smaller than End')\n\n if not self.events:\n self.events.append((start, end))\n return True\n \n start_list = list(map(lambda event: event[0], self.events))\n index = bisect(start_list, start)\n\n if index == len(self.events) and self.events[-1][1] > start:\n return False\n \n if index == 0 and self.events[0][0] < end:\n return False\n \n if 0 < index < len(self.events) - 1:\n prev, after = self.events[index - 1], self.events[index]\n\n if prev[1] > start or after[0] < end:\n return False\n \n self.events.insert(index, (start, end))\n return True\n \n def print_events(self):\n print(self.events)\n\ncalendar = MyCalendar()\n\nprint(calendar.book(10, 20))\nprint(calendar.book(15, 25))\nprint(calendar.book(20, 30))\nprint(calendar.book(30, 40))\n\n# calendar.print_events()\n\n\n\n \n\n" }, { "alpha_fraction": 0.4878634214401245, "alphanum_fraction": 0.49639904499053955, "avg_line_length": 28.992000579833984, "blob_id": "6960f61bcc72cf4d6ee5038c8f2484740ab14456", "content_id": "dcc0668c3f176b0c2fdfb1af0dfa1280e0603f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3749, "license_type": "no_license", "max_line_length": 109, "num_lines": 125, "path": "/strings/substring_with_at_least_k_repeating_characters.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import Counter\nimport unittest\n\nclass Solution(object):\n ### time: O(N**3), space: O(N)\n def length_substring(self, string, k):\n \"\"\"\n :type string: str\n :type k: int\n :rtype: int\n \"\"\" \n if not string:\n return 0\n\n if k == 1:\n return len(string) # return the length of the entire string\n\n longest = 0\n\n for i in range(1, len(string)): # iterate over the string\n for j in range(0, i): # get all the possible susbstrings ending at i\n substring = string[j: i + 1] \n if self.is_valid_substring(substring, k): # check if the current substring meets the criteria\n longest = max(longest, len(substring))\n \n return longest\n \n def is_valid_substring(self, substring, k):\n \"\"\"\n :type substring: str\n :type k: int\n :rtype : boolean\n \"\"\"\n frequencies = Counter(substring)\n return all(frequencies[c] >= k for c in set(substring))\n \n # time: O(N**3), space: O(N)\n def length_substring2(self, string, k):\n \"\"\"\n :type string: str\n :type k: int\n :rtype: int\n \"\"\"\n if not string:\n return 0\n\n if k == 1:\n return len(string) # return the length of the entire string\n\n to_split = [string]\n longest = 0\n\n while to_split:\n t = to_split.pop()\n splitted = [t]\n freq = Counter(t)\n for c in freq:\n if freq[c] < k:\n new_splitted = []\n for spl in splitted:\n new_splitted += spl.split(c)\n splitted = new_splitted\n \n if len(splitted) == 1:\n longest = max(longest, len(splitted[0]))\n else:\n to_split += [sub for sub in splitted if len(sub) > longest]\n \n return longest\n\n def length_substring4(self, s, k):\n if not s or len(s) < k:\n return 0\n\n if k == 1:\n return len(s)\n \n longest = 0\n to_split = [s]\n\n while to_split:\n t = to_split.pop()\n frequencies = Counter(t)\n new_splitted = []\n\n for c in frequencies:\n if frequencies[c] < k: # t is splittable\n new_splitted += t.split(c)\n \n if not new_splitted: # t is not splittable:\n longest = max(longest, len(t))\n else: # t was splittable, add the new splitted elements to to_split\n to_split += [sp for sp in new_splitted if len(sp) > longest]\n \n return longest\n\n # recursive method \n def length_substring3(self, string, k):\n return self.helper(string, 0, len(string), k)\n\n def helper(self, string, start, end, k):\n if end - start < k:\n return 0\n \n substring = string[start : end]\n freq = Counter(substring)\n for i, c in enumerate(substring):\n if freq[c] < k: # found an infrequent char, split by it\n left = self.helper(string, start, i, k)\n right = self.helper(string, i + 1, end, k)\n return max(left, right)\n \n return end - start # all chars are frequent\n\n\nclass Test(unittest.TestCase):\n test_data = [('aaabb', 3, 3), ('ababbc', 2, 5), ('aabbccbcdeee', 3, 6)]\n def test_length_substring(self):\n solution = Solution()\n for data in self.test_data:\n actual = solution.length_substring4(data[0], data[1])\n self.assertEqual(actual, data[2])\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.49291783571243286, "alphanum_fraction": 0.5269121527671814, "avg_line_length": 22.600000381469727, "blob_id": "38f54ab1ca559555c0fecfc1913b66f073bd4bf5", "content_id": "14f68d75d90553e7bb2500bf2eefb800715e4560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 46, "num_lines": 15, "path": "/dynamicProgramming/count_numbers_with_unique_digits.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_unique_count(self, n):\n if n == 0:\n return 1\n res = 10\n available_numbers = 9\n\n for digit in range(2, min(n, 10) + 1):\n available_numbers *= 11 - digit\n res += available_numbers\n \n return res\n\nsolution = Solution()\nprint(solution.get_unique_count(3))" }, { "alpha_fraction": 0.5950704216957092, "alphanum_fraction": 0.6044601202011108, "avg_line_length": 21.70270347595215, "blob_id": "77245307270630145b9fd8b2b89ace6b73efdc9d", "content_id": "7843e16ab4db1b74804f510d36a35c3c551bda14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "no_license", "max_line_length": 107, "num_lines": 37, "path": "/trees/most_frequent_substree_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 508\n\nfrom utils.treeNode import TreeNode\nfrom collections import defaultdict\n\n# time: O(N)\n# space: O(N)\nclass Solution:\n def most_frequent_substree_sum(self, root):\n sum_mapping = defaultdict(int)\n\n def helper(node):\n if not node:\n return 0\n \n substree_sum = node.value + helper(node.left) + helper(node.right)\n sum_mapping[substree_sum] += 1\n\n return substree_sum\n \n helper(root)\n\n max_frequency = max(sum_mapping.values())\n return [substree_sum for substree_sum in sum_mapping if sum_mapping[substree_sum] == max_frequency]\n\none = TreeNode(6)\ntwo = TreeNode(2)\nthree = TreeNode(-5)\n\none.left = two\none.right = three\n\nprint(one)\nprint('===========')\n\nsolution = Solution()\nprint(solution.most_frequent_substree_sum(one))\n\n\n\n \n" }, { "alpha_fraction": 0.5488371849060059, "alphanum_fraction": 0.5643410682678223, "avg_line_length": 28.31818199157715, "blob_id": "65271119b9d50f4fd1cb9815dca639b085374a9e", "content_id": "fb545874836fe1829842ecb0ccc6009bcc875e74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 139, "num_lines": 22, "path": "/strings/repeated_string_pattern.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def repeated_string_pattern(self, string):\n if not string:\n return False\n\n length = len(string)\n\n for i in range(2, (length // 2) + 1):\n pattern_length = length // i\n if length % i == 0 and all(string[j * pattern_length : (j + 1) * pattern_length] == string[:pattern_length] for j in range(i)):\n return True\n \n return False\n\n def repeated_string_pattern2(self, string):\n if not string:\n return False\n\n return string in (string * 2)[1 : -1]\n\nsolution = Solution()\nprint(solution.repeated_string_pattern2('ababab'))\n" }, { "alpha_fraction": 0.4673076868057251, "alphanum_fraction": 0.4980769157409668, "avg_line_length": 22.68181800842285, "blob_id": "c3bb6ef4c818dfe42a2ca07e23d9a8b347fb0e1a", "content_id": "b3a69196b15ee68720da4aa5729344fb2bbe29c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "no_license", "max_line_length": 55, "num_lines": 22, "path": "/arrays/shortest_unsorted_continuous_subarray.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 581\n\n#time: O(N log N)\n# space: O(N)\nclass Solution:\n def shortest_subarray(self, nums):\n if not nums:\n return []\n \n s_nums = sorted(nums)\n left, right = len(nums) - 1, 0\n\n for i in range(len(nums)):\n if nums[i] != s_nums[i]:\n left = min(left, i)\n right = max(right, i)\n \n return right - left + 1 if right >= left else 0\n\nnums = [2, 6, 4, 8, 10, 9, 15]\nsolution = Solution()\nprint(solution.shortest_subarray(nums))" }, { "alpha_fraction": 0.5179738402366638, "alphanum_fraction": 0.540032684803009, "avg_line_length": 26.840909957885742, "blob_id": "ffb05fe281b73db6b4ae34c8188aa042cedcbf22", "content_id": "20939de14830b0f08d794282598995955f22bb51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1224, "license_type": "no_license", "max_line_length": 86, "num_lines": 44, "path": "/arrays/stock_market.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n # At most one transaction\n def get_max_profit(self, prices):\n max_profit = 0\n if not prices:\n return max_profit\n buy = prices[0]\n\n for i in range(1, len(prices)):\n price = prices[i]\n buy = min(price, buy)\n max_profit = max(max_profit, price - buy)\n \n return max_profit\n\n # At most one transaction\n def get_max_profit3(self, prices):\n buy = float('inf')\n profit = 0\n for price in prices:\n buy = min(price, buy)\n profit = max(profit, price - buy)\n \n return profit\n \n # At most one transaction\n def get_max_profit2(self, prices):\n currMax = 0\n maxSoFar = 0\n\n for i in range(1, len(prices)):\n currMax = max(0, currMax + prices[i] - prices[i - 1])\n maxSoFar = max(currMax, maxSoFar)\n \n return maxSoFar\n \n # unlimited transactions\n def get_max_profit4(self, prices):\n return sum([max(prices[i] - prices[i - 1], 0) for i in range(1, len(prices))])\n\n\nsolution = Solution()\nprint(solution.get_max_profit([7, 1, 5, 3, 6, 4]))\nprint(solution.get_max_profit([7, 6, 5, 4, 3, 1]))" }, { "alpha_fraction": 0.4892384111881256, "alphanum_fraction": 0.5091059803962708, "avg_line_length": 27.069766998291016, "blob_id": "ce785d2e807cfaee972c1e19b99c8d3dea197e1e", "content_id": "9641516ac63127bec4728fcb34c4ec962ca2909a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1208, "license_type": "no_license", "max_line_length": 79, "num_lines": 43, "path": "/binarySearch/divide_two_integers.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def divide(self, dividend, divisor):\n if divisor == 0:\n raise ValueError('divisor should not be 0')\n \n result, right = 0, abs(divisor)\n while right <= abs(dividend):\n result += 1\n right += abs(divisor)\n \n is_result_negative = (\n (dividend > 0 and divisor < 0) or\n (dividend < 0 and divisor > 0)\n )\n\n return int('-{}'.format(str(result))) if is_result_negative else result\n \n def divide2(self, dividend, divisor):\n if divisor == 0:\n raise ValueError('divisor is 0')\n \n is_result_negative = (\n (dividend > 0 and divisor < 0) or\n (dividend < 0 and divisor > 0)\n )\n\n dividend, divisor = abs(dividend), abs(divisor)\n \n result = 1\n right = divisor\n while right <= dividend:\n result += result\n right += right\n \n while right > dividend:\n result -= 1\n right -= divisor\n \n return -result if is_result_negative else result\n\nsolution = Solution()\n# print(solution.divide(-10, 2))\nprint(solution.divide2(-10, 3))\n\n" }, { "alpha_fraction": 0.5263158082962036, "alphanum_fraction": 0.5385556817054749, "avg_line_length": 18.4761905670166, "blob_id": "9a6bb810a54f8c3fbdf2b9bdce423a1d49175ab0", "content_id": "3d91c9c59dde9e6af417797c807477aac8959d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 51, "num_lines": 42, "path": "/linkedList/rotate_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n def rotate(self, head, k):\n if not head:\n return None\n \n count, node = 1, head\n while node.next:\n node = node.next\n count += 1\n \n # link the end and the start of the list\n node.next = head\n\n to_move = count - (k % count)\n\n while to_move > 0:\n node = node.next\n to_move -= 1\n \n # next is the new last element of the list \n head = node.next\n node.next = None\n\n return head\n \n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(one)\nsolution = Solution()\nprint(solution.rotate(one, 6))" }, { "alpha_fraction": 0.4576771557331085, "alphanum_fraction": 0.46653541922569275, "avg_line_length": 20.319149017333984, "blob_id": "8957b9d496b677fb3688101552153c2b948dec65", "content_id": "247351817460f23f8fa681d9313bdaa4500ba61c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1016, "license_type": "no_license", "max_line_length": 58, "num_lines": 47, "path": "/linkedList/reverse_linked_list2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n def reverse(self, head, m, n):\n if not head:\n return head\n \n dummy = prev = ListNode(None)\n node = head\n rev, rev_tail = None, None\n\n count = 1\n while node:\n if count > n:\n rev_tail.next = node\n break\n \n if count >= m:\n if count == m: # set the rev tail\n rev_tail = node\n rev, rev.next, node = node, rev, node.next\n prev.next = rev\n \n else:\n prev.next = node\n prev = prev.next\n node = node.next\n \n count += 1\n\n return dummy.next\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(one)\n\nsolution = Solution()\nprint(solution.reverse(one, 2, 4))\n \n\n" }, { "alpha_fraction": 0.5289255976676941, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 27, "blob_id": "edbffa51f2e75219f17ee1cc828846b8a8d4486a", "content_id": "7a811e6a314cd6c6ca463df81e27cfc9bd32caf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 363, "license_type": "no_license", "max_line_length": 52, "num_lines": 13, "path": "/arrays/two_sums.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def two_sum(self, arr, target):\n if not arr:\n return None\n\n remainders = dict()\n for i, num in enumerate(arr):\n if target - num in remainders:\n return (remainders[target - num], i)\n remainders[num] = i\n\nsolution = Solution()\nprint(solution.two_sum([1, 3, 5, 9], 10))" }, { "alpha_fraction": 0.4327731132507324, "alphanum_fraction": 0.45798319578170776, "avg_line_length": 18.75, "blob_id": "bb8c8c55f83a8618e2cf28dd48f817e2e9ba7337", "content_id": "e20cee5e62459cd3d802785639e50cfb3aef2b4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/dynamicProgramming/house_robber.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 198\n\nclass Solution:\n def rob(self, homes):\n if not homes:\n return 0\n curr, prev = 0, 0\n \n for home in homes:\n curr, prev = max(curr, prev + home), curr\n \n return curr\n\n" }, { "alpha_fraction": 0.43066516518592834, "alphanum_fraction": 0.4520856738090515, "avg_line_length": 25.909090042114258, "blob_id": "5f50365a1158313a700e222aa3254bea5a5b9c2b", "content_id": "cba2d3a3ba7b0744eeb3d9a1225264b4298926ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 887, "license_type": "no_license", "max_line_length": 70, "num_lines": 33, "path": "/arrays/pascal_triangle.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def pascal_triangle(self, numRows):\n pascal = []\n for k in range(numRows):\n pascal.append([1] * (k + 1))\n for i in range(1, k):\n pascal[k][i] = pascal[k - 1][i - 1] + pascal[k - 1][i]\n \n return pascal\n\n def pascal_triangle2(self, numRows):\n all_rows = []\n row = []\n\n for k in range(numRows):\n row.append(1)\n for i in range(k - 1, 0, -1):\n row[i] = row[i] + row[i - 1]\n all_rows.append(list(row))\n \n return all_rows\n \n def pascal_triangle_row(self, k):\n row = []\n for j in range(k + 1):\n row.append(1)\n for i in range(j - 1, 0, -1):\n row[i] = row[i] + row[i - 1]\n \n return row\n\nsolution = Solution()\nprint(solution.pascal_triangle_row(2))" }, { "alpha_fraction": 0.45717132091522217, "alphanum_fraction": 0.48007968068122864, "avg_line_length": 23.487804412841797, "blob_id": "f466514b2df2e3edb4a8e900b46fcd73fbaf07e6", "content_id": "c928d2f4e17bf917838285b37beff9c5785adf8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 90, "num_lines": 41, "path": "/arrays/search_insert_position.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Solution(object):\n def search_insert_position(self, arr, val):\n if not arr:\n return 0\n\n for i, d in enumerate(arr):\n if d >= val:\n return i\n \n return len(arr)\n\n def search_insert_position2(self, arr, val):\n if not arr:\n return 0\n\n left, right = 0, len(arr) - 1\n while left <= right:\n mid = (left + right) // 2\n if arr[mid] == val:\n return mid\n elif arr[mid] < val:\n left = mid + 1\n else:\n right = mid - 1\n \n return left\n\nclass Test(unittest.TestCase):\n arr = [1, 3, 5, 6]\n test_data = [(5, 2), (2, 1), (7, 4), (0, 0)]\n\n def test_search_insert_positiion(self):\n solution = Solution()\n\n for data in self.test_data:\n self.assertEqual(solution.search_insert_position2(self.arr, data[0]), data[1])\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.47668394446372986, "alphanum_fraction": 0.4853195250034332, "avg_line_length": 25.363636016845703, "blob_id": "0ff42bdae90ddd4d7c1824d4ed8db8c4a8f41949", "content_id": "fbfd71cb99bc875cef623ddf712f8672b222fc70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/arrays/my_calendar2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 731\n\n# time: o(N**2)\n# space: O(N)\nclass MyCalendar2:\n def __inint__(self):\n self.calendar = []\n self.overlap = []\n \n def book(self, start, end):\n for s, e in self.overlap:\n # conflict\n if s < end and start < e:\n return False\n \n for s, e in self.calendar:\n if s < end and start > e:\n #conflict. The intersection becomes an overlap\n self.overlap.append(max(start, s), min(end, e))\n \n self.calendar.append((start, end))\n return True" }, { "alpha_fraction": 0.4784482717514038, "alphanum_fraction": 0.4978448152542114, "avg_line_length": 22.25, "blob_id": "5757b0281aa680c380da6067b570fead3706b229", "content_id": "cb8540922f5cbced99240b7c367d4e15d4e25ad0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 63, "num_lines": 20, "path": "/arrays/position_of_large_groups.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 830\n\nclass Solution:\n def position_of_large_groups(self, chars):\n result, start = [], 0\n\n for i in range(len(chars)):\n if i == len(chars) - 1 or chars[i] != chars[i + 1]:\n if i - start + 1 >= 3:\n result.append([start, i])\n \n start = i + 1\n \n return result\n\n\n\nchars = \"abcdddeeeeaabbbcd\"\nsolution = Solution()\nprint(solution.position_of_large_groups(chars))" }, { "alpha_fraction": 0.4656616449356079, "alphanum_fraction": 0.4757118821144104, "avg_line_length": 23.70833396911621, "blob_id": "34e0c4102bb39d078ce628245767c518c15d9ae0", "content_id": "74966caae06d2241aa4c0fec2d124c4ad17ce5ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1194, "license_type": "no_license", "max_line_length": 91, "num_lines": 48, "path": "/strings/reverse_vowels.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\ndef reverse_vowels(str):\n if not str: return str\n\n vowels = ['a', 'e', 'i', 'o', 'u']\n\n list_str = list(str)\n\n head, tail = 0, len(list_str) - 1\n\n while head < tail:\n if list_str[head].lower() not in vowels:\n head += 1\n \n elif list_str[tail].lower() not in vowels:\n tail -= 1\n \n else:\n list_str[head], list_str[tail] = list_str[tail], list_str[head]\n head += 1\n tail -= 1\n \n return ''.join(list_str)\n\nprint(reverse_vowels('leEtcode'))\n\nclass Solution:\n vowels = ['a', 'e', 'i', 'o', 'u']\n\n def reverse_vowels(self, string):\n if not string:\n return string\n \n string_list = list(string)\n head, tail = 0, len(string) - 1\n\n while head < tail:\n if string_list[head] not in self.vowels:\n head += 1\n elif string_list[tail] not in self.vowels:\n tail -= 1\n else:\n string_list[head], string_list[tail] = string_list[tail], string_list[head]\n head += 1\n tail -= 1\n \n return ''.join(string_list)\n " }, { "alpha_fraction": 0.5236842036247253, "alphanum_fraction": 0.5368421077728271, "avg_line_length": 26.214284896850586, "blob_id": "60469ef48a27549bb3f559fc8d0181c587addacf", "content_id": "c764802ea732794f7f358d955ddc95a6918405b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/arrays/max_chunks_to_make_sorted.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 769\n\nclass Solution:\n # a new chunk is form only if the current element is the max so far,\n # and it is where it is supposed to be \n def max_chunks(self, nums):\n result = max_so_far = 0\n\n for i, num in enumerate(nums):\n max_so_far = max(max_so_far, num)\n if max_so_far == i:\n result += 1\n \n return result" }, { "alpha_fraction": 0.5350553393363953, "alphanum_fraction": 0.5670356750488281, "avg_line_length": 31.559999465942383, "blob_id": "f09bebb1015665bf28c92af65d94ac65535afe99", "content_id": "81cb7320f812a2fa1f989248b2891a93823efb31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 67, "num_lines": 25, "path": "/dynamicProgramming/most_profit_assigning_work.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n\n # M = len(difficulties), N = len(workers)\n # time = O(M log M + N log N + M + N), but can omit the M and N\n # space = O(M)\n def max_profit_assignment(self, difficulty, profits, workers):\n jobs = list(zip(difficulty, profits))\n jobs.sort() # will sort by the first tuple element\n index, best_so_far, result = 0, 0, 0\n \n for worker in sorted(workers):\n while index < len(jobs) and worker >= jobs[index][0]:\n best_so_far = max(best_so_far, jobs[index][1])\n index += 1\n \n result += best_so_far\n \n return result\n\ndifficulty = [2,4,6,8,10]\nprofits = [10,20,30,40,50]\nworkers = [4,5,6,7]\n\nsolution = Solution()\nprint(solution.max_profit_assignment(difficulty, profits, workers))" }, { "alpha_fraction": 0.49048152565956116, "alphanum_fraction": 0.49440088868141174, "avg_line_length": 27.822580337524414, "blob_id": "3a6eb4be58d0a2baa5f0fc432daa478f04363098", "content_id": "5d77e51fd387ab3ca068529197e60693b0857612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 147, "num_lines": 62, "path": "/graphs/word_ladder.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\nfrom string import ascii_lowercase\n\nclass Solution(object):\n # Time: O(n * k * k) to build the graph. n = # of words. k = max number of character in a word\n # O(b ^ (d/2)) to perform a bidrectional BFS search, where b is the branching factor ( the average number of children(neighbors) at each node),\n # and d is the depth\n # Space: O(n * k)\n\n def word_ladder(self, start_word, end_word, word_list):\n \"\"\"\n :type start_word: str\n :type end_word: str:\n :type word_list: Set[str]\n :rtye: int\n \"\"\"\n\n if not start_word or not end_word or not word_list:\n return 0\n\n word_list.add(end_word)\n\n graph = self.build_graph(word_list)\n\n visited = set()\n length = 0\n front, back = {start_word}, {end_word}\n\n while front:\n if front & back:\n return length\n\n new_front = set()\n for word in front:\n visited.add(word)\n for i in range(len(word)):\n wildcard = word[:i] + '-' + word[i + 1 :]\n new_words = graph[wildcard]\n new_words -= visited\n new_front |= new_words\n \n front = new_front\n length += 1\n\n if len(back) < len(front):\n front, back = back, front\n \n return 0\n\n def build_graph(self, word_list):\n \"\"\"\n :type word_list: Set[str]\n :rtype: defaultdict \n \"\"\"\n\n graph = defaultdict(set)\n for word in word_list:\n for i in range(len(word)):\n wildcard = word[:i] + '-' + word[i + 1 :]\n graph[wildcard].add(word)\n \n return graph" }, { "alpha_fraction": 0.5418006181716919, "alphanum_fraction": 0.5771704316139221, "avg_line_length": 27.272727966308594, "blob_id": "ba1afb9fbbf8e3af8c97e5b279c761293afaf41f", "content_id": "4bc52ff43c78a0bc1316fd076a2ce95b8198e8af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 85, "num_lines": 22, "path": "/arrays/product_of_array_except_self.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# This bidirectional technique is useful when we want to get cumulutative information\n# from the left and right of each index\nclass Solution:\n def get_product_array(self, nums):\n if not nums:\n return None\n result = [1]\n for i in range(1, len(nums)):\n result.append(result[-1] * nums[i - 1])\n \n product_right = 1\n for i in range(len(nums) - 1, -1, -1):\n result[i] *= product_right\n product_right *= nums[i]\n\n return result\n\nsolution = Solution()\nnums = [2, 3, 4, 5]\n[60, 40, 30, 24]\n60\nprint(solution.get_product_array(nums))\n" }, { "alpha_fraction": 0.4402061998844147, "alphanum_fraction": 0.4670103192329407, "avg_line_length": 28.42424201965332, "blob_id": "f46ab4b4eade4b282f38a02ac3098a59cd7d3520", "content_id": "3715a3c5a084acf62b879fcd11f4b82bdddcb83d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 970, "license_type": "no_license", "max_line_length": 74, "num_lines": 33, "path": "/binarySearch/arranging_coins.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_stack_count(self, n):\n stack_count = 0\n if n == 0:\n return stack_count\n \n remain = n\n while remain >= stack_count + 1:\n stack_count += 1\n remain -= stack_count\n \n return stack_count\n \n def get_stack_count2(self, n):\n if n == 0:\n return 0\n \n left, right = 1, n\n while left <= right:\n mid = (left + right) // 2\n s = mid * (mid + 1) // 2\n if s > n:\n right = mid - 1\n else:\n left = mid + 1\n \n return left - 1\n\nsolution = Solution()\nprint(\"n = {} : stack_count = {}\".format(5, solution.get_stack_count2(5)))\nprint(\"n = {} : stack_count = {}\".format(6, solution.get_stack_count2(6)))\nprint(\"n = {} : stack_count = {}\".format(8, solution.get_stack_count2(8)))\nprint(\"n = {} : stack_count = {}\".format(9, solution.get_stack_count2(9)))" }, { "alpha_fraction": 0.5294771790504456, "alphanum_fraction": 0.538375973701477, "avg_line_length": 19, "blob_id": "410dcb6ef05fe4015d653501bb89ed9f562f158f", "content_id": "136953d0941c62ded4f58e95f5e0663eea8bc763", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 899, "license_type": "no_license", "max_line_length": 41, "num_lines": 45, "path": "/linkedList/partition_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.listNode import ListNode\n\nclass Solution:\n # time: O(N)\n # space: O(1)\n def partition(self, head, pivot):\n if not head:\n return head\n \n s_head = smaller = ListNode(None)\n g_head = greater = ListNode(None)\n node = head\n\n while node:\n if node.value < pivot:\n smaller.next = node\n smaller = node\n else:\n greater.next = node\n greater = node\n \n node = node.next\n \n greater.next = None\n smaller.next = g_head.next\n\n return s_head.next\n\none = ListNode(1)\ntwo = ListNode(4)\nthree = ListNode(3)\nfour = ListNode(2)\nfive = ListNode(5)\nsix = ListNode(2)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\n\nprint(one)\n\nsolution = Solution()\nprint(solution.partition(one, 3))" }, { "alpha_fraction": 0.5313653349876404, "alphanum_fraction": 0.5608856081962585, "avg_line_length": 26.049999237060547, "blob_id": "8b5c84d1d8b3fadf1caf167f8cc08ce79ccc2598", "content_id": "75dfa4cbf1e46fe927630e47e541bde10b158916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 104, "num_lines": 20, "path": "/trees/same_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n #O(min(N1, N2)) time and space \n def is_same_tree(self, node1, node2):\n \"\"\"\n type node1: TreeNode\n type node2: TreeNode\n rtype: bool\n \"\"\"\n if not node1 and not node2:\n return True\n\n if not node1 or not node2:\n return False\n \n if node1.value != node2.value:\n return False\n \n return self.is_same_tree(node1.left, node2.left) and self.is_same_tree(node1.right, node2.right)\n\n" }, { "alpha_fraction": 0.41703295707702637, "alphanum_fraction": 0.4384615421295166, "avg_line_length": 29.86440658569336, "blob_id": "f6ac1b7f26f80fa7063bd22f20c56c39bd6d304f", "content_id": "a6201e886320d9acf25dc6cf195b617a4b2df49f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1820, "license_type": "no_license", "max_line_length": 77, "num_lines": 59, "path": "/dynamicProgramming/longest_common_sequence.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Solution:\n def get_longest_common_sequence(self, str1, str2):\n if not str1 or not str2:\n return ''\n \n max_length = float('-inf')\n \n num_rows, num_cols = len(str2) + 1, len(str1) + 1\n T = [[0 for _ in range(num_cols)] for _ in range(num_rows)]\n\n for i in range(1, num_rows):\n for j in range(1, num_cols):\n if str2[i - 1] != str1[j - 1]:\n # if the sequences do not need to be contigious\n T[i][j] = max(T[i - 1][j], T[i][j - 1])\n # if the sequence need to be contiguous\n T[i][j] = 0\n else:\n T[i][j] = T[i - 1][j - 1] + 1\n # to get the max length of the contiguous common sequence\n max_length = max(max_length, T[i][j])\n \n result = ''\n i = num_rows - 1\n j = num_cols - 1\n\n while T[i][j]:\n if T[i][j] == T[i - 1][j]:\n i -= 1\n elif T[i][j] == T[i][j - 1]:\n j -= 1\n elif T[i][j] == T[i - 1][j - 1] + 1:\n result += str2[i - 1]\n i -= 1\n j -= 1\n else:\n raise Exception('Error constructing table')\n \n return result[::-1]\n \n \nclass Test(unittest.TestCase):\n def test_longest_common_subsequence(self):\n solution = Solution()\n str1 = 'ABCDEFGHIJ'\n str2 = 'FOOBCDBCDEG'\n expected = 'BCDEG'\n actual = solution.get_longest_common_sequence(str1, str2)\n self.assertEqual(expected, actual)\n print('Success!')\n \ndef main():\n test = Test()\n test.test_longest_common_subsequence()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5235235095024109, "alphanum_fraction": 0.5255255103111267, "avg_line_length": 23.975000381469727, "blob_id": "5e98ba49ada3983b96e86ddc7bd6b841e8659344", "content_id": "b8a50ce544d17997a6a321b8f94dd52ceb53c1db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 999, "license_type": "no_license", "max_line_length": 67, "num_lines": 40, "path": "/trees/next_right_pointer.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass TreeLinkNode(TreeNode):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.next = None\n\n\nclass Solution:\n #O(N) time and space\n def set_next_right_pointers(self, node):\n if not node or not node.left:\n return\n\n # node has children\n node.left.next = node.right\n \n node.right.next = None if not node.next else node.next.left\n \n self.set_next_right_pointers(node.right)\n self.set_next_right_pointers(node.left)\n \n #O(N) time and space\n def set_next_right_pointers2(self, node):\n level = [node]\n\n while level and level[0]:\n prev = None\n next_level = []\n\n for node in level:\n if prev:\n prev.next = node\n \n prev = node\n\n next_level.append(node.left)\n next_level.append(node.right)\n\n level = next_level\n" }, { "alpha_fraction": 0.458156019449234, "alphanum_fraction": 0.4936170279979706, "avg_line_length": 36.157894134521484, "blob_id": "94ee590fdd8750d2bb8abf57851a89c06157ff21", "content_id": "ea1c7fdb11b3bbfe27c0c9c18376a7d5c01e916e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 84, "num_lines": 19, "path": "/dynamicProgramming/ones_and_zeroes.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_max_words(self, words, zeroes_count, ones_count):\n memo = [[0 for _ in range(ones_count + 1)] for _ in range(zeroes_count + 1)]\n\n for word in words:\n zeroes = sum([True for c in word if c == '0'])\n ones = len(word) - zeroes\n\n for i in range(zeroes_count + 1):\n for j in range(ones_count + 1):\n can_build = i >= zeroes and j >= ones\n if can_build:\n memo[i][j] = max(memo[i][j], 1 + memo[i - zeroes][j - ones])\n \n return memo[-1][-1]\n\nwords = [\"10\", \"0001\", \"111001\", \"1\", \"0\"]\nsolution = Solution()\nprint(solution.get_max_words(words, 5, 3))" }, { "alpha_fraction": 0.5932343006134033, "alphanum_fraction": 0.6361386179924011, "avg_line_length": 21.054546356201172, "blob_id": "90a9941fc5f0b38eef5bc61012e8bc7119017b05", "content_id": "f201f286abe1f17dc71aedb1f318535c66b6fced", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1212, "license_type": "no_license", "max_line_length": 72, "num_lines": 55, "path": "/trees/path_sum3.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom collections import defaultdict\n\nclass Solution:\n def get_paths_count(self, node, target):\n if not node:\n return 0\n sum_mapping = defaultdict(int)\n sum_mapping[0] = 1\n return self.helper(node, 0, target, sum_mapping)\n \n def helper(self, node, curr_sum, target, sum_mapping):\n if not node:\n return 0\n \n curr_sum += node.value\n\n result = sum_mapping[curr_sum - target]\n sum_mapping[curr_sum] += 1\n \n result += self.helper(node.left, curr_sum, target, sum_mapping)\n result += self.helper(node.right, curr_sum, target, sum_mapping)\n\n sum_mapping[curr_sum] -= 1\n return result\n \n\nnode1 = TreeNode(10)\nnode2 = TreeNode(5)\nnode3 = TreeNode(-3)\nnode4 = TreeNode(3)\nnode5 = TreeNode(2)\nnode6 = TreeNode(6)\nnode7 = TreeNode(11)\nnode8 = TreeNode(3)\nnode9 = TreeNode(-2)\nnode10 = TreeNode(1)\n\nnode1.left = node2\nnode1.right = node3\n\nnode2.left = node4\nnode2.right = node5\n\nnode4.left = node8\nnode4.right = node9\n\nnode5.left = node10\n\nnode3.right = node7\n# node6.left = node7\n\nprint(node1)\nsolution = Solution()\nprint(solution.get_paths_count(node1, 8))" }, { "alpha_fraction": 0.46930423378944397, "alphanum_fraction": 0.4802182912826538, "avg_line_length": 26.730770111083984, "blob_id": "8d180f74e8123912448319c69fef880c819f804e", "content_id": "fc0c4396e7f3e8d4d8a9e443754ec62e125e0b06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/arrays/combimation_sum_3.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_combinations(self, k, target):\n result = []\n self.backtrack([], 1, target, k, result)\n return result\n \n\n def backtrack(self, prefix, current_num, remaining, k, result):\n if len(prefix) > k or remaining < 0:\n return\n \n if remaining == 0 and len(prefix) == k:\n result.append(prefix)\n return\n \n for i in range(10 - current_num):\n self.backtrack(\n prefix + [current_num + i],\n current_num + i + 1,\n remaining - current_num - i,\n k,\n result\n )\n\nsolution = Solution()\nprint(solution.get_combinations(3, 9))\n " }, { "alpha_fraction": 0.4178082048892975, "alphanum_fraction": 0.47123289108276367, "avg_line_length": 28.239999771118164, "blob_id": "c611f688d4a03e13308a0c85f2e1650a1baac64a", "content_id": "ebe2231fe08063c72140d8175fd2a133486023ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 730, "license_type": "no_license", "max_line_length": 110, "num_lines": 25, "path": "/arrays/majority_element_2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_major(self, nums):\n candidate1, candidate2 = None, None\n count1, count2 = 0, 0\n\n for num in nums:\n if num == candidate1:\n count1 += 1\n elif num == candidate2:\n count2 += 1\n elif count1 == 0:\n candidate1 = num\n count1 = 1\n elif count2 == 0:\n candidate2 = num\n count2 = 1\n else:\n count1 -= 1\n count2 -= 1\n \n return [candidate for candidate in [candidate1, candidate2] if nums.count(candidate) > len(nums) // 3]\n\nsolution = Solution()\nnums = [1,2,3,2,2,1,3,1,2,1]\nprint(solution.get_major(nums))" }, { "alpha_fraction": 0.5058274865150452, "alphanum_fraction": 0.5268065333366394, "avg_line_length": 27.66666603088379, "blob_id": "d195d5e90f846c92dc990e8b11c5cea53d7032b2", "content_id": "472d7c767f73225da5045e7b4a000778f30afbc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 50, "num_lines": 15, "path": "/arrays/contains_duplicates2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def contains_duplicates(self, values, k):\n hash_set = set()\n for i, val in enumerate(values):\n if i > k:\n hash_set.remove(values[i - k - 1])\n if val in hash_set:\n return True\n hash_set.add(val)\n \n return False\n\nsolution = Solution()\nvalues = [1, 2, 3, 4, 1, 7, 5]\nprint(solution.contains_duplicates(values, 3))" }, { "alpha_fraction": 0.38486841320991516, "alphanum_fraction": 0.40625, "avg_line_length": 20.35087776184082, "blob_id": "1eaa9ab77f51c92bcc5051bad185881ac44b9023", "content_id": "e8a830f5c94b684ea753c4110efcccf9c1d592d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "no_license", "max_line_length": 71, "num_lines": 57, "path": "/dynamicProgramming/climbing_stairs.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # O(N) time - O(1) space\n def climb_stairs(self, n):\n if n < 0:\n return 0\n \n if n <= 2:\n return n\n \n curr, prev = 2, 1\n for _ in range(2, n):\n curr, prev = curr + prev, curr\n \n return curr\n \n def climbStairs3(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n < 0:\n return 0\n \n if n <= 2:\n return n\n \n curr, prev = 1, 1\n \n for i in range(2, n + 1):\n curr, prev = curr + prev, curr\n \n return curr\n \n # O(N) time - O(N) space\n def climb_stairs2(self, n):\n memo = [0] + [-1 for _ in range(n)]\n return self.stairs(n, memo)\n \n def stairs(self, n, memo):\n if n < 0:\n return 0\n \n if n <= 2:\n return n\n \n if memo[n] != -1:\n return memo[n]\n \n result = self.stairs(n - 1, memo) + self.stairs(n - 2, memo)\n memo[n] = result\n return result\n\n \n\nsolution = Solution()\nfor i in range(7):\n print(\"climb_stairs({}) = {}\".format(i, solution.climb_stairs2(i)))" }, { "alpha_fraction": 0.5210084319114685, "alphanum_fraction": 0.5220588445663452, "avg_line_length": 27, "blob_id": "e465b1855524da278d4139ae5185d4e536b16a98", "content_id": "44264275b2bf5a81fd098839bcabcfbbdd512d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 62, "num_lines": 34, "path": "/trees/validate_bst.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def is_valid_bst(self, node):\n return self.helper(node, float('-inf'), float('inf'))\n \n def helper(self, node, min_value, max_value):\n if not node:\n return True\n \n if node.value < min_value or node.value > max_value:\n return False\n \n return (\n self.helper(node.left, min_value, node.value)\n and self.helper(node.right, node.value, max_value)\n )\n \n def is_valid_bst2(self, node): \n self.is_valid = True\n self.prev_value = float('-inf')\n self.in_order(node)\n return self.is_valid\n \n def in_order(self, node):\n if not node or not self.is_valid:\n return\n \n self.in_order(node.left)\n\n if node.value <= self.prev_value:\n self.is_valid = False\n return\n self.prev_value = node.value\n \n self.in_order(node.right)\n" }, { "alpha_fraction": 0.5527831315994263, "alphanum_fraction": 0.5662187933921814, "avg_line_length": 27.38888931274414, "blob_id": "a6b74871606cc3a6b7c6a76c685e9462274918e2", "content_id": "71df8ed163174ed35787f6cb49f9c44237b2123c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 90, "num_lines": 18, "path": "/trees/is_balanced.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n #O(N) time and space\n def is_balanced(self, root):\n return self.absHeight(root) != -1\n \n def absHeight(self, root):\n if not root:\n return 0\n \n left_height = self.absHeight(root.left)\n right_height = self.absHeight(root.right)\n\n if left_height == -1 or right_height == -1 or abs(left_height - right_height) > 1:\n return -1\n \n return 1 + max(left_height, right_height)\n \n\n" }, { "alpha_fraction": 0.524848461151123, "alphanum_fraction": 0.5321212410926819, "avg_line_length": 27.482759475708008, "blob_id": "9bee9d33da37088472bbb5c1a389505a9245750b", "content_id": "3125d5f1c0121d85b9ce6e2d709ecddf015d1d71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "no_license", "max_line_length": 99, "num_lines": 29, "path": "/trees/unique_bst2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def get_unique_bst(self, n):\n result = []\n if n <= 0:\n return result\n\n return self.get_unique_bst_helper(1, n)\n \n def get_unique_bst_helper(self, start, end):\n result = []\n \n if start > end:\n return [None] # return [None] so that I can get in the for lefts and rights loops below\n \n for i in range(start, end + 1):\n lefts = self.get_unique_bst_helper(start, i - 1)\n rights = self.get_unique_bst_helper(i + 1, end)\n\n for left in lefts:\n for right in rights:\n root = TreeNode(i, left, right)\n result.append(root)\n \n return result\n\nsolution = Solution()\nprint(solution.get_unique_bst(3))" }, { "alpha_fraction": 0.5094339847564697, "alphanum_fraction": 0.5249326229095459, "avg_line_length": 24.586206436157227, "blob_id": "622eaaaa4c1d052d24363bf2955245fd0cd8cddf", "content_id": "e28a5bca7b855c9f14d5ddf6fa234a3458f91edb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1484, "license_type": "no_license", "max_line_length": 54, "num_lines": 58, "path": "/trees/binary_tree_level_order_traversal.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def get_level_order_traversal(self, root):\n \"\"\"\n type root: TreeNode\n rtype: List[List[Int]]\n \"\"\"\n result = []\n if not root:\n return result\n\n level_nodes = [root]\n while level_nodes:\n result.append([])\n new_level_nodes = []\n\n for node in level_nodes:\n result[-1].append(node.value)\n\n if node.left:\n new_level_nodes.append(node.left)\n if node.right:\n new_level_nodes.append(node.right)\n \n level_nodes = new_level_nodes\n \n return result[::-1]\n \n def get_level_order_traversal2(self, root):\n result = []\n if not root:\n return result\n \n self.helper(root, 0, result)\n return result[::-1]\n \n def helper(self, root, level, result):\n if not root:\n return\n \n if len(result) == level:\n # create a new array for this level\n result.append([])\n \n result[level].append(root.value)\n\n self.helper(root.left, level + 1, result)\n self.helper(root.right, level + 1, result)\n\nnode5 = TreeNode(5)\nnode4 = TreeNode(4)\nnode3 = TreeNode(3, node4, node5)\nnode2 = TreeNode(2)\nnode1 = TreeNode(1, node2, node3)\nprint(node1)\nsolution = Solution()\nprint(solution.get_level_order_traversal(node1))\n" }, { "alpha_fraction": 0.5124555230140686, "alphanum_fraction": 0.5302491188049316, "avg_line_length": 24.545454025268555, "blob_id": "cefa82d153ff34be62e0fcbf6cb6a49e0adbdd4a", "content_id": "366504ba66babe1260cfbbeff0ee90f63f1bff57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 281, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/dynamicProgramming/buy_sell_stock_cooldown.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 309\n\nclass Solution:\n def get_max_profit(self, prices):\n buy, sell, prev_sell = float('-inf'), 0, 0\n\n for price in prices:\n sell, prev_sell = max(prev_sell, buy + price), sell\n buy = max(buy, prev_sell - price)\n \n return sell\n" }, { "alpha_fraction": 0.4439716339111328, "alphanum_fraction": 0.47801417112350464, "avg_line_length": 28.41666603088379, "blob_id": "88c2ef01023f8b5bd2bca2a89db2c0eb8722eb8a", "content_id": "8b91c0dfe3f813e22f36d02191c4920c3a8104e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "no_license", "max_line_length": 98, "num_lines": 24, "path": "/dynamicProgramming/integer_break.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_max_break(self, n):\n memo = [0, 1]\n\n for i in range(2, n + 1):\n max_product = 0\n for j in range(1, (i // 2) + 1):\n max_product = max(max_product, max(j, memo[j]) * max(i - j, memo[i - j]))\n memo.append(max_product)\n\n return memo[-1]\n \n def get_max_break2(self, n):\n memo = [0 for _ in range(n + 1)]\n memo[1] = 1\n\n for i in range(2, n+1):\n memo[i] = max(max(j, memo[j]) * max(i - j, memo[i - j]) for j in range(1, (i//2) + 1))\n\n return memo[-1]\n\nsolution = Solution()\nfor i in range(2, 10):\n print('get_max_break({}) = {}'.format(i, solution.get_max_break2(i)))" }, { "alpha_fraction": 0.5474860072135925, "alphanum_fraction": 0.575419008731842, "avg_line_length": 34.79999923706055, "blob_id": "3616b5c01ea67ae304d8efacf960dabf2fb35430", "content_id": "fe9f5c112d5b024093fdf7985a0dee316001a619", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 102, "num_lines": 20, "path": "/arrays/sort_colors.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# iterate over the list, and keep track of where the next 0 and 1 should be placed\n# because 2 will always be last, replace the current element with 2\n# TAKEAWAY: when sorting in place, have a counter to keep track of where an element should placed next\nclass Solution:\n def sort_colors(self, colors):\n next_red, next_white = 0, 0\n for i, c in enumerate(colors):\n if c < 2:\n colors[i] = 2\n colors[next_white] = 1\n next_white += 1\n if c == 0:\n colors[next_red] = 0\n next_red += 1\n\nsolution = Solution()\ncolors = [2, 1, 2, 2, 1, 0, 0]\nprint(colors)\nsolution.sort_colors(colors)\nprint(colors)\n" }, { "alpha_fraction": 0.48117154836654663, "alphanum_fraction": 0.5125523209571838, "avg_line_length": 24.157894134521484, "blob_id": "f7a0bfb109df869dec43d4a098926ec4268b9051", "content_id": "63f26fc0f30a05ed2fcd5aedb0d0049fc5586264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 49, "num_lines": 19, "path": "/arrays/move_zeroes.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def move_zeroes(self, numbers):\n if not numbers:\n return None\n \n insert_pos = 0\n for i, num in enumerate(numbers):\n if num != 0:\n numbers[insert_pos] = num\n insert_pos += 1\n \n for i in range(insert_pos, len(numbers)):\n numbers[i] = 0\n\n\nsolution = Solution()\nnumbers = [0, 0, 3, 0, 12, 5, 6, 0, 0, 0]\nsolution.move_zeroes(numbers)\nprint(numbers) " }, { "alpha_fraction": 0.4513981342315674, "alphanum_fraction": 0.4660452604293823, "avg_line_length": 25.85714340209961, "blob_id": "b402b6ccb1781f89643956790007c8bfa70cc05e", "content_id": "7171108d94c9263edd6bf28510bcc66205f08f5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 94, "num_lines": 28, "path": "/binarySearch/first_bad_version.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_first_bad_version(self, n):\n left, right = 1, n\n while left < right:\n mid = (left + right) // 2\n if not self.is_bad(mid):\n left = mid + 1 # this will avoid an infinite loop, because left <= mid < right\n else:\n right = mid\n \n return left\n\n def get_first_bad_version2(self, n):\n left, right = 1, n\n while left <= right:\n mid = (left + right) // 2\n if self.is_bad(mid):\n right = mid - 1\n else:\n left = mid + 1\n \n return left\n \n def is_bad(self, n):\n return n >= 4\n\nsolution = Solution()\nprint(solution.get_first_bad_version2(8))" }, { "alpha_fraction": 0.42052313685417175, "alphanum_fraction": 0.4386318027973175, "avg_line_length": 33.877193450927734, "blob_id": "009a546617bf8257ae375b221550db271a4bcb8d", "content_id": "db5f4936bf498048680e6fde9b867f3031af09ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1988, "license_type": "no_license", "max_line_length": 80, "num_lines": 57, "path": "/arrays/three_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Solution(object):\n def three_sum(self, numbers):\n \"\"\"\n :type numbers: List[int]\n :rtype : List[List[int]]\n \"\"\"\n result = []\n if not numbers:\n return result\n numbers.sort()\n i = 0\n while i < len(numbers) - 2:\n left = i + 1\n right = len(numbers) - 1\n while left < right:\n triple_sum = numbers[i] + numbers[left] + numbers[right]\n\n if triple_sum == 0:\n result.append([numbers[i], numbers[left], numbers[right]])\n # move left to the next possible value\n left += 1\n while left < right and numbers[left] == numbers[left - 1]:\n left += 1\n # move right to the next possible value\n right -= 1\n while left < right and numbers[right] == numbers[right + 1]:\n right -= 1\n elif triple_sum < 0:\n # move left to the next possible value\n left += 1\n while left < right and numbers[left] == numbers[left - 1]:\n left += 1\n else:\n # move right to the next possible value\n right -= 1\n while left < right and numbers[right - 1] == numbers[right]:\n right -= 1\n \n # move i to the next possible value\n i += 1\n while i < len(numbers) - 2 and numbers[i] == numbers[i - 1]:\n i += 1\n \n return result\n\nclass Test(unittest.TestCase):\n test_data = [([-1, 0, 1, 2, -1, 4], [[-1, -1, 2], [-1, 0, 1]])]\n\n def test_three_way(self):\n solution = Solution()\n for data in self.test_data:\n self.assertEqual(solution.three_sum(data[0]), data[1])\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5153751373291016, "alphanum_fraction": 0.5387454032897949, "avg_line_length": 25.25806427001953, "blob_id": "844366823fdaeb56dec4b72a1ec34c9c0876c6f3", "content_id": "b41a3b145347f17d93b25fe0dc34dd7f084c4f27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 82, "num_lines": 31, "path": "/arrays/find_redundant_connection.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#684\n\nfrom collections import defaultdict\n\nclass Solution:\n def find_redundant_connection(self, edges):\n graph = defaultdict(set)\n\n for u, v in edges:\n visited = set()\n if u in graph and v in graph and self.dfs(u, v, graph, visited):\n return [u, v]\n \n graph[u].add(v)\n graph[v].add(u)\n \n def dfs(self, source, target, graph, visited):\n if source in visited:\n return False\n\n visited.add(source)\n \n if source == target:\n return True\n \n return any(self.dfs(nbr, target, graph, visited) for nbr in graph[source])\n\nsolution = Solution()\nedges = [[1,2], [1,3], [2,3]]\n# edges = [[1,2], [2,3], [3,4], [1,4], [1,5]]\nprint(solution.find_redundant_connection(edges))" }, { "alpha_fraction": 0.5085106492042542, "alphanum_fraction": 0.5085106492042542, "avg_line_length": 35.230770111083984, "blob_id": "72f2900258aa59d79bf204d9271b56ead8d39c84", "content_id": "a1806dbfd5d71fa7ba2d0016dc75559c35b0c6cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 470, "license_type": "no_license", "max_line_length": 79, "num_lines": 13, "path": "/graphs/reconstruct_sequence.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def can_construct(self, org, seqs):\n extended = [None] + seqs\n pairs = set((u, v) for u, v in zip(extended, org))\n num_to_index = { num: i for i, num in enumerate(extended)}\n\n for seq in seqs:\n for u, v in zip([None]+seq, seq):\n if v not in num_to_index or num_to_index[v] <= num_to_index[u]:\n return False\n pairs.discard((u,v))\n \n return not pairs" }, { "alpha_fraction": 0.5249344110488892, "alphanum_fraction": 0.5590550899505615, "avg_line_length": 30.83333396911621, "blob_id": "54bd7e14c7baae81291bb5e1228fee4f013a66f8", "content_id": "946afb45766cb95eee96213fe2d94c1a6ba68844", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/dynamicProgramming/range_sum_query_immutable.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class RangeSum:\n def __init__(self, nums):\n self.sums = [0 for _ in range(len(nums) + 1)]\n for i, num in enumerate(nums):\n self.sums[i + 1] = num + self.sums[i]\n \n def get_range_sum(self, start, end):\n return self.sums[end + 1] - self.sums[start]\n\nnums = [1, 2, 3, 4, 5, 6, 7]\nrange_sum = RangeSum(nums)\nprint(range_sum.get_range_sum(1, 3))" }, { "alpha_fraction": 0.41870129108428955, "alphanum_fraction": 0.44727271795272827, "avg_line_length": 31.100000381469727, "blob_id": "47daed23d817c1538a7475feb239e458e9bed553", "content_id": "47bf988614fd652107b481b3704bd7a9b49b30b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1925, "license_type": "no_license", "max_line_length": 96, "num_lines": 60, "path": "/binarySearch/maximum_length_repeated_subarray.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # time: O(log(min(m, n) * (m + n) * min(m, n)))\n # space: O(m ** 2)\n def get_max_length(self, A, B):\n \n def mutual_subarray(length):\n # generate all subarrays of A with length length\n subarrays = set(tuple(A[i: i + length]) for i in range(len(A) - length + 1))\n\n return any(tuple(B[j: j + length]) in subarrays for j in range(len(B) - length + 1))\n \n left, right = 0, min(len(A), len(B)) - 1\n\n while left <= right: # search for smallest length with no mutual subarray\n mid = (right + left) // 2\n\n if mutual_subarray(mid):\n left = mid + 1\n else:\n right = mid - 1\n \n return left - 1\n \n\n #time: O(M * N)\n #space: O(M * N)\n def get_max_length2(self, A, B):\n # rows = A, cols = B\n memo = [[0 for _ in range(len(B) + 1)] for _ in range(len(A) + 1)]\n\n result = 0\n\n for row in range(1, len(A) + 1):\n for col in range(1, len(B) + 1):\n if A[row - 1] == B[col - 1]:\n memo[row][col] = memo[row - 1][col - 1] + 1\n result = max(result, memo[row][col])\n \n return result\n \n\n ## this is the solution for the problem if the subarrays do not need to be continuous\n ## in this case, dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n def get_max_length3(self, A, B):\n dp = [[0 for _ in range(len(B) + 1)] for _ in range(len(A) + 1)]\n \n for i in range(1, len(A) + 1):\n for j in range(1, len(B) + 1):\n if A[i - 1] == B[j - 1]:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n \n return dp[-1][-1]\n\n\nsolution = Solution()\nA = [3, 2, 1, 8, 6, 9]\nB = [7, 6, 3, 2, 1, 9]\nprint(solution.get_max_length3(A, B))" }, { "alpha_fraction": 0.5004135370254517, "alphanum_fraction": 0.5078577399253845, "avg_line_length": 26.720930099487305, "blob_id": "25b1de91acedc32166ae54eace7ea5aaa613aef8", "content_id": "af1d67b05ad56a76cb1998db546e07d5d99d84a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 107, "num_lines": 43, "path": "/trees/minimum_depth.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n # O(N) time and space\n def get_minimum_depth(self, root):\n if not root:\n return 0\n\n depth = 0\n level_nodes = [root]\n\n while level_nodes:\n depth += 1\n new_level_nodes = []\n for node in level_nodes:\n if not node.left and not node.right:\n #leaf node. return depth\n return depth\n \n if node.left:\n new_level_nodes.append(node.left)\n \n if node.right:\n new_level_nodes.append(node.right)\n \n level_nodes = new_level_nodes\n \n return depth\n \n # O(N) time and space\n def get_minimum_depth2(self, root):\n if not root:\n return 0\n \n left_height, right_height = self.get_minimum_depth2(root.left), self.get_minimum_depth2(root.right)\n\n if not left_height or not right_height:\n return 1 + left_height + right_height\n\n return 1 + min(left_height, right_height)\n\nsolution = Solution()\nprint(solution.get_minimum_depth(None))\n \n" }, { "alpha_fraction": 0.52601158618927, "alphanum_fraction": 0.5364161729812622, "avg_line_length": 23.742856979370117, "blob_id": "528432e75c38af350371c9d209cc8e83a88372df", "content_id": "fc45ab721713ae212e3f1507988f16836cbfa264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 70, "num_lines": 35, "path": "/strings/palindrome.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\n# Time: O(N), Space: O(1)\n\ndef is_palyndrome(str):\n if not str: return True\n head = 0\n tail = len(str) - 1\n\n while head < tail:\n # only compare digits and alphabetical values\n if not str[head].isdigit() and not str[head].isalpha():\n head += 1\n elif not str[tail].isdigit() and not str[tail].isalpha():\n tail -= 1\n else:\n if str[head].lower() != str[tail].lower():\n return False\n\n head += 1\n tail -= 1\n\n return True \n\nclass Test(unittest.TestCase):\n\n data = [('A man, A plan, a canal: Panama', True), ('abab', False)]\n\n def test_is_palindrome(self):\n for test_data in self.data:\n actual = is_palyndrome(test_data[0])\n self.assertIs(actual, test_data[1])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5750224590301514, "alphanum_fraction": 0.5822102427482605, "avg_line_length": 26.170732498168945, "blob_id": "db26e596f58cf302625e7c9daafffccaa896c9b6", "content_id": "8057d4f7f7ac9dcd3b026cf847d7f627d536ba06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 81, "num_lines": 41, "path": "/trees/sum_root_to_leaf_numbers.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom utils.treeUtils import generate_tree\n\nclass Solution:\n def get_root_to_leaves_sum(self, node):\n leaves = [0]\n self.get_sum_helper(\"\", node, leaves)\n return sum([int(num) for num in leaves])\n \n def get_sum_helper(self, path, node, leaves):\n if not node:\n return\n \n path += str(node.value)\n\n if not node.left and not node.right:\n leaves.append(path)\n \n self.get_sum_helper(path, node.left, leaves)\n self.get_sum_helper(path, node.right, leaves)\n\n path = path[:-1]\n \n def get_root_to_leaves_sum2(self, node):\n return self.helper(0, node)\n \n def helper(self, partial, node):\n if not node:\n return 0\n\n partial = partial * 10 + node.value\n if not node.left and not node.right:\n return partial\n \n return self.helper(partial, node.left) + self.helper(partial, node.right)\n \n \nroot = generate_tree()\nprint(root)\nsolution = Solution()\nprint(solution.get_root_to_leaves_sum2(root))" }, { "alpha_fraction": 0.5494398474693298, "alphanum_fraction": 0.5582075119018555, "avg_line_length": 32.36065673828125, "blob_id": "fe34ee7b938e1dc5b96d4cb89cb373e362bb80db", "content_id": "9775f698b94e60b5168711bfb1f66227348b0fe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2053, "license_type": "no_license", "max_line_length": 135, "num_lines": 61, "path": "/arrays/combination_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def combination_sum_unlimited(self, nums, target):\n '''\n In this method, each number can be used an unlimited time\n type nums: List[int]\n type target: int\n rtype: List[List[int]\n '''\n nums.sort()\n results = []\n # for i, num in enumerate(nums):\n # self.helper(nums, num, [nums[i]], i, target, results)\n self.backtrack_unlimited(nums, target, [], 0, results)\n return results\n \n def helper(self, nums, sum, partial, index, target, results):\n if sum > target: \n return\n\n if sum == target: # found a solution\n results.append(partial)\n return\n \n self.helper(nums, sum + nums[index], partial + [nums[index]], index, target, results) ## add the number to current sum\n\n if index < len(nums) - 1:\n self.helper(nums, sum + nums[index + 1], partial + [nums[index + 1]], index + 1, target, results) # move to the next number\n\n def backtrack_unlimited(self, nums, remainder, partial, start, results):\n if remainder < 0:\n return\n\n if remainder == 0:\n results.append(partial)\n return\n \n for i in range(start, len(nums)):\n self.backtrack_unlimited(nums, remainder - nums[i], partial + [nums[i]], i, results)\n \n def combination_sum_once(self, nums, target):\n nums.sort()\n results = []\n self.backtrack_once(nums, target, [], 0, results)\n return results\n \n def backtrack_once(self, nums, remainder, partial, start, results):\n if remainder < 0:\n return\n \n if remainder == 0:\n results.append(partial)\n return\n\n for i in range(start, len(nums)):\n self.backtrack_once(nums, remainder - nums[i], partial + [nums[i]], i + 1, results)\n\n\nsolution = Solution()\nnums = [2, 6, 5, 3, 7]\n# print(solution.combination_sum_unlimited(nums, 7))\nprint(solution.combination_sum_once(nums, 7))\n\n \n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.48783785104751587, "avg_line_length": 23.406593322753906, "blob_id": "ad48db5407be785f2492b0b285d86fcfe4c60b80", "content_id": "c29c8dd95fcf10c8a71125dadabe9254a612f212", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2220, "license_type": "no_license", "max_line_length": 73, "num_lines": 91, "path": "/strings/lengthOfLastWord.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\n# O(N) time and O(1) space\n\n\n\n# def lengthOfLastWord(string):\n# list = string.split(' ')\n# return len(list[-1]) if list else 0\n\n# def length_of_last_word(string):\n# current_length, prev_length = 0, 0\n# for char in string:\n# if char == ' ':\n# if current_length != 0:\n# prev_length = current_length\n# current_length = 0\n# else:\n# current_length += 1\n \n# return current_length if current_length != 0 else prev_length\n\n# def length_of_last_word(string):\n# result = 0\n# i = len(string) - 1\n\n# while string[i] == ' ' and i >= 0:\n# i -= 1\n \n# if i == -1:\n# # Reached the beginning of the word, and did not find any words\n# return 0\n# # result = 1\n\n# while string[i] != ' ' and i >= 0:\n# result += 1\n# i -= 1\n \n# return result\n\n\ndef length_of_last_word(string):\n i = len(string) - 1\n end = -1\n\n while i >= 0:\n if string[i] == ' ' and end != -1:\n # already found a word, and encoutered a space\n return end - i\n if string[i] != ' ' and end == -1:\n # found a letter for the first time\n end = i\n i -= 1\n \n return end + 1 if end != -1 else 0\n\nclass Test(unittest.TestCase):\n dataTrue = [('Hello World', 5), ('qwerte', 6), (' ', 0)]\n dataFalse = [('Hello World', 7), ('qwerte', 2), (' ', 3)]\n\n def test_length_of_last_word(self):\n # true check\n for test_data in self.dataTrue:\n actual = length_of_last_word(test_data[0])\n self.assertEqual(actual, test_data[1])\n \n # false check\n for test_data in self.dataFalse:\n actual = length_of_last_word(test_data[0])\n self.assertNotEqual(actual, test_data[1])\n\nif __name__ == '__main__':\n unittest.main()\n\ndef length_of_last_word(word):\n if not word:\n return None\n \n i = len(word) - 1\n end = -1\n\n while i >= 0:\n if word[i] == ' ' and end != -1:\n return end - i\n \n if word[i] != ' ' and end == -1:\n end = i\n \n i -= 1\n \n return end + 1 if end != -1 else 0" }, { "alpha_fraction": 0.5866666436195374, "alphanum_fraction": 0.5955555438995361, "avg_line_length": 31.285715103149414, "blob_id": "67839d765299df9b8fb3c202686e3f14c626dca2", "content_id": "b594873664e20ee28d1100332914bf924d5d144f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 93, "num_lines": 7, "path": "/trees/maximum_depth_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # O(N) time and space\n def get_maximum_depth(self, root):\n if not root:\n return 0\n \n return 1 + max(self.get_maximum_depth(root.left), self.get_maximum_depth(root.right))" }, { "alpha_fraction": 0.5068681240081787, "alphanum_fraction": 0.5151098966598511, "avg_line_length": 26, "blob_id": "cafcad31b1cc0e94e5cbb006d2d5d92ba9da2019", "content_id": "2c6540b832c78d5ba31354d1d88a1868c8f0021c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 64, "num_lines": 27, "path": "/strings/parenthesis_generator.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def parenthesis_generator(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n result = []\n self.generator_helper([], n, n, result)\n return result\n\n def generator_helper(self, curr, left, right, result):\n if left == 0 and right == 0:\n result.append(''.join(curr))\n return\n\n if left != 0:\n curr.append('(')\n self.generator_helper(curr, left - 1, right, result)\n curr.pop()\n\n if right > left:\n curr.append(')')\n self.generator_helper(curr, left, right - 1, result)\n curr.pop()\n\nsolution = Solution()\nprint(solution.parenthesis_generator(3))" }, { "alpha_fraction": 0.4488392174243927, "alphanum_fraction": 0.4737747311592102, "avg_line_length": 30.45945930480957, "blob_id": "7348d5af532f70e689a61ac93be18e68ad479c57", "content_id": "fbb76689cceada8c9cd72f2bfc5205f55c662250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 130, "num_lines": 37, "path": "/arrays/rotate_image.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def rotate_image(self, image):\n if not self.isValidImage(image):\n raise Exception('Invalid image')\n \n n = len(image)\n\n for row in range(n//2):\n start = row\n end = n - 1 - row\n for col in range(start, end): # end should not be included, because it is already taken care of in the first iteration\n x = row\n y = col\n prev = image[x][y]\n for _ in range(4):\n new_row = y\n new_col = n - x - 1\n new_cell = image[new_row][new_col]\n image[new_row][new_col] = prev\n x = new_row\n y = new_col\n prev = new_cell\n \n def isValidImage(self, image):\n n = len(image)\n return n > 0 and len(image[0]) == n\n\n def print_image(self, image):\n for row in image:\n print(row)\n\nimage = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]\nsolution = Solution()\nsolution.print_image(image)\nsolution.rotate_image(image)\nprint('===============')\nsolution.print_image(image)" }, { "alpha_fraction": 0.5240913033485413, "alphanum_fraction": 0.529163122177124, "avg_line_length": 24.148935317993164, "blob_id": "e2ce53f0c3c503051d73be4da3b335453a57cba4", "content_id": "dae8219037682381e59adb2efd4e774623290842", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1183, "license_type": "no_license", "max_line_length": 54, "num_lines": 47, "path": "/trees/binary_tree_side_view.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom utils.treeUtils import generate_tree\n\nclass Solution:\n def get_side_view(self, node):\n result = []\n if not node:\n return result\n \n level_nodes = [node]\n\n while level_nodes:\n result.append(level_nodes[-1].value)\n new_level_nodes = []\n\n for node in level_nodes:\n if node.left:\n new_level_nodes.append(node.left)\n \n if node.right:\n new_level_nodes.append(node.right)\n\n level_nodes = new_level_nodes\n \n return result\n \n def get_side_view2(self, node):\n result = []\n self.helper(node, 0, result)\n return result\n \n def helper(self, node, level, result):\n if not node:\n return\n\n if len(result) == level:\n result.append(node.value)\n else:\n result[level] = node.value\n \n self.helper(node.left, level + 1, result)\n self.helper(node.right, level + 1, result)\n\nroot = generate_tree()\nprint(root)\nsolution = Solution()\nprint(solution.get_side_view2(root))\n\n" }, { "alpha_fraction": 0.4547206163406372, "alphanum_fraction": 0.4773603081703186, "avg_line_length": 32.467742919921875, "blob_id": "efb0a697efb6fb8e39a5fe753454efde5265d84c", "content_id": "507e5688509e0d595042c72987fee7445ec4dd3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2076, "license_type": "no_license", "max_line_length": 72, "num_lines": 62, "path": "/arrays/fil_zeroes.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # Time: O(rows * cols) - space: O(1)\n def fill_zeroes(self, matrix):\n rows_count, col_count = len(matrix), len(matrix[0])\n for row in range(rows_count):\n for col in range(col_count):\n if not matrix[row][col]:\n matrix[row][col] = 'X'\n self.fill_row(row, 'X', matrix)\n self.fill_col(col, 'X', matrix)\n \n for row in range(rows_count):\n for col in range(col_count):\n if matrix[row][col] == 'X':\n matrix[row][col] = 0\n \n def fill_row(self, row, val, matrix):\n col_count = len(matrix[0])\n for col in range(col_count):\n matrix[row][col] = val\n \n def fill_col(self, col, val, matrix):\n row_count = len(matrix)\n for row in range(row_count):\n matrix[row][col] = val\n \n def print_matrix(self, matrix):\n for row in matrix:\n print(row)\n \n def fill_zeroes2(self, matrix):\n should_fill_row, should_fill_col = False, False\n rows_count, col_count = len(matrix), len(matrix[0])\n \n for row in range(rows_count):\n for col in range(col_count):\n if not matrix[row][col]:\n if row == 0:\n should_fill_row = True\n if col == 0:\n should_fill_col = True\n matrix[0][col] = 0\n matrix[row][0] = 0\n \n for row in range(1, rows_count):\n for col in range(1, rows_count):\n if not matrix[row][0] or not matrix[0][col]:\n matrix[row][col] = 0\n \n if should_fill_row:\n self.fill_row(0, 0, matrix)\n \n if should_fill_col:\n self.fill_col(0, 0, matrix)\n\n\nmatrix = [[1,0,1,1,1],[1,1,1,1,1],[1,1,1,1,1], [1,1,1,0,1], [1,1,1,1,1]]\nsolution = Solution()\nsolution.print_matrix(matrix)\nsolution.fill_zeroes2(matrix)\nprint('===============')\nsolution.print_matrix(matrix)\n\n" }, { "alpha_fraction": 0.5969945192337036, "alphanum_fraction": 0.618852436542511, "avg_line_length": 27.19230842590332, "blob_id": "c6f51f10e194f075f9d80d29730659cb37df322a", "content_id": "8c022f0d09a34c44cb33ccaa439b929b658d855e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 732, "license_type": "no_license", "max_line_length": 66, "num_lines": 26, "path": "/arrays/merge_intervals.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.interval import Interval\n\nclass Solution:\n def merge_intervals(self, intervals):\n \"\"\"\n type: intervals: List[Interval]\n rtype: List[Interval]\n \"\"\"\n result = []\n intervals.sort(key=lambda interval:interval.start)\n for interval in intervals:\n if not result or interval.start > result[-1].end:\n result.append(interval)\n else:\n result[-1].end = max(result[-1].end, interval.end)\n \n return result\n\n\ninterval1 = Interval(2, 5)\ninterval2 = Interval(7, 10)\ninterval3 = Interval(4, 6)\nintervals = [interval1, interval3, interval2]\nprint(intervals)\nsolution = Solution()\nprint(solution.merge_intervals(intervals))" }, { "alpha_fraction": 0.5189542770385742, "alphanum_fraction": 0.5477124452590942, "avg_line_length": 30.83333396911621, "blob_id": "4d4d5e6b9cc705ecf5fb8458234a148efcaf88cb", "content_id": "8c15317aabcb6bfc426cec901016f6d79313e7a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "no_license", "max_line_length": 101, "num_lines": 24, "path": "/arrays/min_path_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass Solution:\n def min_path_sum(self, matrix):\n row_count, col_count = matrix.row_count, matrix.col_count\n\n if not row_count or not col_count:\n return 0\n \n sum_row = [float('inf') for _ in range(col_count + 1)]\n sum_row[1] = 0\n\n for row in range(1, row_count + 1):\n new_sum_row = [float('inf') for _ in range(col_count + 1)]\n for col in range(1, col_count + 1):\n new_sum_row[col] = matrix[row - 1][col - 1] + min(new_sum_row[col - 1], sum_row[col])\n sum_row = new_sum_row\n \n return sum_row[-1]\n\nmatrix = Matrix([[5,2,8],[6,1,0],[3,3,7]])\nprint(matrix)\nsolution = Solution()\nprint(solution.min_path_sum(matrix))\n\n" }, { "alpha_fraction": 0.4996461570262909, "alphanum_fraction": 0.5017693042755127, "avg_line_length": 22.180328369140625, "blob_id": "dd10c5b08101e71f573909cdea896a0901887300", "content_id": "86867becb673c40677ecdd66ec8383947ca612fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1413, "license_type": "no_license", "max_line_length": 45, "num_lines": 61, "path": "/trees/binary_tree_inorder_traversal.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom utils.treeUtils import generate_tree\n\nclass Solution:\n def in_order_traversal(self, node):\n result = []\n self.in_order_rec(node, result)\n return result\n \n def in_order_rec(self, node, result):\n if not node:\n return\n \n self.in_order_rec(node.left, result)\n\n result.append(node.value)\n\n self.in_order_rec(node.right, result)\n\n def in_order_traversal2(self, node):\n result = []\n if not node:\n return result\n \n stack = []\n while node or stack:\n if node:\n stack.append(node)\n node = node.left\n continue\n \n node = stack.pop()\n result.append(node.value)\n node = node.right\n \n return result\n\n def in_order_traversal3(self, node):\n stack, result = [], []\n while node:\n stack.append(node)\n node = node.left\n \n while stack:\n node = stack.pop()\n result.append(node.value)\n\n if node.right:\n node = node.right\n while node:\n stack.append(node)\n node = node.left\n \n return result\n\n\n\nroot = generate_tree()\nprint(root)\nsolution = Solution()\nprint(solution.in_order_traversal3(root))" }, { "alpha_fraction": 0.43109986186027527, "alphanum_fraction": 0.4677623212337494, "avg_line_length": 19.30769157409668, "blob_id": "6fd43d3b51a3b4f8d23fd8bcdab8214705e0a298", "content_id": "d99429340ac832e42aa36f5b7a5963c14d6c3420", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 59, "num_lines": 39, "path": "/strings/addBinary.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\ndef add_binary(str1, str2):\n \"\"\"\n :type str1: str\n :type str2: str\n :rtype: str\n \"\"\"\n carry = 0\n result = []\n i = len(str1) - 1\n j = len(str2) - 1\n\n while carry or i >= 0 or j >= 0:\n total = carry\n\n if i >= 0:\n total += int(str1[i])\n i -= 1\n\n if j >= 0:\n total += int(str2[j])\n j -= 1\n\n result.append(str(total % 2))\n carry = total // 2\n\n return ''.join(result[::-1])\n\nclass Test(unittest.TestCase):\n data = [('11', '1', '100')]\n \n def test_add_binary(self):\n for test_data in self.data:\n actual = add_binary(test_data[0], test_data[1])\n self.assertEqual(actual, test_data[2])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5458680987358093, "alphanum_fraction": 0.5481425523757935, "avg_line_length": 23.44444465637207, "blob_id": "5e1b509ac902a53dad1870b41eeb751427943a8f", "content_id": "7d136ae37d7216d21bd0ac21e746f725d91e0ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1319, "license_type": "no_license", "max_line_length": 55, "num_lines": 54, "path": "/trees/preorder_traversal.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom utils.treeUtils import generate_tree\n\nclass Solution:\n def preorder_traversal(self, node):\n result, rights = [], []\n\n while node or rights:\n if not node:\n node = rights.pop()\n \n result.append(node.value)\n\n if node.right:\n rights.append(node.right)\n node = node.left\n \n return result\n \n def preorder_traversal2(self, root):\n result = []\n if not root:\n return result\n \n stack = [root]\n\n while stack:\n node = stack.pop()\n result.append(node.value)\n\n if node.right:\n stack.append(node.right)\n if node.left:\n stack.append(node.left)\n\n return result \n\n def preorder_traversal3(self, node):\n result = []\n self.preorder_traversal_rec(node, result)\n return result\n \n def preorder_traversal_rec(self, node, result):\n if not node:\n return\n \n result.append(node.value)\n self.preorder_traversal_rec(node.left, result)\n self.preorder_traversal_rec(node.right, result)\n\nroot = generate_tree()\nprint(root)\nsolution = Solution()\nprint(solution.preorder_traversal2(root))" }, { "alpha_fraction": 0.5287539958953857, "alphanum_fraction": 0.5287539958953857, "avg_line_length": 30.299999237060547, "blob_id": "f7209571c9a3176e4fd9e63037960fe465254b19", "content_id": "42a2d31728262d99aa51c216616725399b38c594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 626, "license_type": "no_license", "max_line_length": 83, "num_lines": 20, "path": "/utils/treeNode.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class TreeNode:\n def __init__(self, value = None, left = None, right = None):\n self.value = value\n self.left = left\n self.right = right\n \n def __repr__(self):\n return self.trace(padding=\"\")\n \n def trace(self, padding=\"\"):\n string_representation = ''\n if self.right:\n string_representation += self.right.trace(padding + \" \") + \"\\n\" \n \n string_representation += padding + str(self.value) + \"\\n\"\n\n if self.left:\n string_representation += self.left.trace(padding + \" \")\n \n return string_representation\n" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.5904762148857117, "avg_line_length": 32.21052551269531, "blob_id": "b97790fd9f03db92697662a5eafc95121eecf4e8", "content_id": "999298a55ab9d0a20eaf4f0a209a73855968db5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 74, "num_lines": 19, "path": "/binarySearch/heaters.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from bisect import bisect\n\nclass Solution:\n # n = len(houses), m = len(heathers)\n # O(m log m + n log m) = O(max(mlogm, nlogm)) time - O(1) space\n def get_heathers_radius(self, houses, heathers):\n heathers = [float('-inf')] + sorted(heathers) + [float('inf')]\n result = 0\n for house in houses:\n i = bisect(heathers, house)\n min_distance = min(house - heathers[i-1], heathers[i] - house)\n result = max(result, min_distance)\n \n return result\n\nhouses = [1, 2, 3, 4]\nheathers = [2]\nsolution = Solution()\nprint(solution.get_heathers_radius(houses, heathers))" }, { "alpha_fraction": 0.5110389590263367, "alphanum_fraction": 0.5207791924476624, "avg_line_length": 19.824323654174805, "blob_id": "28698214037519c1ab933d9694907ce756b9494b", "content_id": "7971de4b4ada7a4ae41c03bd85623a7bc43bf144", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 64, "num_lines": 74, "path": "/trees/find_largest_value_in_each_tree_row.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 515\n\nfrom utils.treeNode import TreeNode\n\n# time: O(N)\n# space: O(N)\nclass Solution:\n def max_rows(self, root):\n result = []\n if not root:\n return result\n \n self.traverse(root, 0, result)\n\n return result\n \n def traverse(self, root, level, result):\n if not root:\n return\n \n if len(result) == level:\n result.append(root.value)\n else:\n result[level] = max(result[level], root.value)\n \n self.traverse(root.left, level + 1, result)\n self.traverse(root.right, level + 1, result)\n\n# time: O(N)\n# space: O(N)\nclass Solution2:\n def max_rows(self, root):\n result = []\n if not root:\n return result\n \n level = [root]\n while level:\n new_level = []\n max_value = max(map(lambda node: node.value, level))\n result.append(max_value)\n\n for node in level:\n if node.left:\n new_level.append(node.left)\n \n if node.right:\n new_level.append(node.right)\n \n level = new_level\n \n return result\n\n\n\none = TreeNode(1)\ntwo = TreeNode(2)\nthree = TreeNode(3)\nfour = TreeNode(4)\nfive = TreeNode(5)\nsix = TreeNode(6)\nseven = TreeNode(7)\n\none.left = five\nfive.left = three\nfive.right = four\none.right = two\ntwo.right = six\nsix.right = seven\n\nprint(one)\nprint('===========')\nsolution = Solution2()\nprint(solution.max_rows(one))" }, { "alpha_fraction": 0.4579439163208008, "alphanum_fraction": 0.4658878445625305, "avg_line_length": 22.20652198791504, "blob_id": "a7f95f7c69cdb89c46931a894fc0ca42b1214d21", "content_id": "336085cf397c7aa0c67d3512c433efd47e9ed796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2140, "license_type": "no_license", "max_line_length": 67, "num_lines": 92, "path": "/trees/kth_smallest_element_in_bst.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_kth_smallest(self, node, k):\n count = self.count(node.left)\n\n if k <= count:\n return self.get_kth_smallest(node.left, k)\n \n elif k > count + 1:\n return self.get_kth_smallest(node.right, k - count - 1)\n \n return node.value\n \n def count(self, node):\n if not node:\n return 0\n \n return 1 + self.count(node.left) + self.count(node.right)\n\n def get_kth_smallest2(self, node, k):\n stack = []\n \n while node:\n stack.append(node.value)\n node = node.left\n \n while stack:\n node = stack.pop()\n k -= 1\n if k == 0:\n return node.value\n \n if node.right:\n node = node.right\n while node:\n stack.append(node)\n node = node.left\n \n def get_kth_smallest3(self, node, k):\n self.k = k\n self.result = None\n self.helper(node)\n return self.result\n \n def helper(self, node):\n if not node:\n return\n \n self.helper(node.left)\n \n self.k -= 1\n \n if self.k == 0:\n self.result = node.value\n return\n \n self.helper(node.right)\n\ndef get_kth_smallest(root, k):\n left_count = get_count(root.left)\n\n if left_count <= k:\n return get_kth_smallest(root.left, k)\n \n if left_count == k + 1:\n return root.value\n \n return get_kth_smallest(root.right, k - left_count - 1)\n\ndef get_count(root):\n if not root:\n return 0\n \n return 1 + get_count(root.left) + get_count(root.right)\n\ndef get_kth_smallest4(root, k):\n stack = []\n\n while root:\n stack.append(root)\n root = root.left\n \n while stack:\n node = stack.pop()\n k -= 1\n if k == 0:\n return node.value\n \n if node.right:\n node = node.right\n while node:\n stack.append(node)\n node = node.left\n \n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5314009785652161, "avg_line_length": 17.04347801208496, "blob_id": "0f57baa901f3b3035dfb7fd154f4b814d6b514db", "content_id": "40a4668db8d1e50c3ce6784151170d4e69f3367d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 40, "num_lines": 23, "path": "/arrays/array_partition_1.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 561\n\nclass Solution:\n def partition(self, nums):\n result = 0\n\n if not nums:\n return result\n \n nums.sort()\n\n for i in range(0, len(nums), 2):\n result += nums[i]\n \n return result\n\nclass Solution2:\n def partition(self, nums):\n return sum(sorted(nums)[::2])\n\nsolution = Solution2()\nnums = [1,4, 3, 2]\nprint(solution.partition(nums))" }, { "alpha_fraction": 0.4172413647174835, "alphanum_fraction": 0.4482758641242981, "avg_line_length": 18.399999618530273, "blob_id": "6d848cdaa1ade16d90950f002ccc59b9d4f2e856", "content_id": "b3e340264e28f37884ab37699a26ef11b9ea4815", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/dynamicProgramming/couting_bits.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_ones(self, n):\n if n < 0:\n return 0\n \n ones = [0 for _ in range(n + 1)]\n\n for i in range(1, n + 1):\n ones[i] = ones[i // 2] + i % 2\n \n return ones\n\n\nsolution = Solution()\nprint(solution.get_ones(5))" }, { "alpha_fraction": 0.4331914782524109, "alphanum_fraction": 0.45957446098327637, "avg_line_length": 29.447368621826172, "blob_id": "dc833e2116a7ec505a83139713521c1e66c62bcd", "content_id": "31c421535a2fbc62de1e0f50fd8fc75e79fb5dfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 94, "num_lines": 38, "path": "/arrays/spiral_matrix.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def spiral(self, matrix):\n rows_count = len(matrix[0])\n cols_count = len(matrix)\n row, col = 0, -1\n d_row, d_col = 0, 1\n row_leg, col_leg = rows_count, cols_count - 1\n leg_count = 0\n spiral = []\n\n for _ in range(rows_count * cols_count):\n row += d_row\n col += d_col\n spiral.append(matrix[row][col])\n leg_count += 1\n\n if (d_row == 0 and leg_count == row_leg) or (d_col == 0 and leg_count == col_leg):\n # change direction\n # decrease the right leg (possible number of moves through the axis)\n if d_row == 0:\n row_leg -= 1\n else:\n col_leg -= 1\n \n # get the new direction\n d_row, d_col = d_col, - d_row\n leg_count = 0\n \n return spiral\n \n def print_matrix(self, matrix):\n for row in matrix:\n print(row)\n\nsolution = Solution()\nmatrix = [[1,2,3],[5,6,7],[9,10,11],[13,14,15]]\nsolution.print_matrix(matrix)\nprint(solution.spiral(matrix))\n\n \n" }, { "alpha_fraction": 0.41022443771362305, "alphanum_fraction": 0.43765586614608765, "avg_line_length": 26.689655303955078, "blob_id": "5d61ceec150da828c4fa53e28dabded96c589ce8", "content_id": "eba8c1a0671e4d1adc197e327a338dca4070e8ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 73, "num_lines": 29, "path": "/arrays/jump_game.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def can_jump(self, nums):\n last = len(nums) - 1\n for i in range(last - 1, -1, -1):\n if i + nums[i] >= last:\n last = i\n \n return last == 0\n\n def can_jump2(self, nums):\n max_reached = 0\n stack = [0]\n while stack:\n index = stack.pop()\n if index + nums[index] > max_reached:\n if index + nums[index] >= len(nums) - 1:\n return True\n \n for i in range(max_reached + 1, index + nums[index] + 1):\n stack.append(i)\n \n max_reached = index + nums[index]\n \n return False\n\nnums = [2,3,1,0,4]\n# nums = [3, 2, 1, 0, 4]\nsolution = Solution()\nprint(solution.can_jump2(nums))" }, { "alpha_fraction": 0.5148063898086548, "alphanum_fraction": 0.5364464521408081, "avg_line_length": 31.55555534362793, "blob_id": "1e5b68090cbca24fb6d8913ddb9b41deb30e416f", "content_id": "f0e7a71bd96c1e0c68cbbfb62ab301d3fe0fb1fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/arrays/unique_paths.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # time: O(rows_count * cols_count). space: O(cols_count)\n def get_unique_paths_count(self, rows_count, cols_count):\n row_paths = [1 for _ in range(cols_count)]\n for _ in range(1, rows_count):\n new_row_paths = [1]\n for col in range(1, cols_count):\n new_row_paths.append(new_row_paths[-1] + row_paths[col])\n row_paths = new_row_paths\n \n return row_paths[-1]\n\n def get_unique_paths_count2(self, rows_count, cols_count):\n if rows_count == 0 or cols_count == 0:\n return 0\n\n if rows_count == 1 or cols_count == 1:\n return 1\n \n res = 1\n for i in range(1, cols_count):\n res *= (cols_count + rows_count - 1 - i) / i\n \n return int(res)\n\nsolution = Solution()\nprint(solution.get_unique_paths_count2(6, 3))" }, { "alpha_fraction": 0.3922652006149292, "alphanum_fraction": 0.4033149182796478, "avg_line_length": 23.68181800842285, "blob_id": "4712f2a7c1cc103ca9614f48ba08ded172696118", "content_id": "0d78844f22d38cab4c238f5b008a425319bbf43c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 543, "license_type": "no_license", "max_line_length": 46, "num_lines": 22, "path": "/arrays/array_nesting.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 565\n\nclass Solution:\n def array_nesting(self, nums):\n result = 0\n if not nums:\n return result\n \n for i in range(len(nums)):\n if nums[i] == float('inf'):\n continue\n \n start, count = i, 0\n while nums[start] != float('inf'):\n temp = start\n start = nums[start]\n nums[temp] = float('inf')\n count += 1\n \n result = max(result, count)\n \n return result\n" }, { "alpha_fraction": 0.45527157187461853, "alphanum_fraction": 0.47337594628334045, "avg_line_length": 26.617647171020508, "blob_id": "8e2b8881b9e37158a8a097374bad7a59b3a13998", "content_id": "4d5f36d9fd35edde1290664b363503c54154cdd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1878, "license_type": "no_license", "max_line_length": 57, "num_lines": 68, "path": "/graphs/minumum_height_trees.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution:\n def get_min_height_trees(self, n, edges):\n graph = defaultdict(set)\n\n for edge in edges:\n graph[edge[0]].add(edge[1])\n graph[edge[1]].add(edge[0])\n \n height_to_nodes = defaultdict(set)\n\n for node in graph:\n self.bfs(node, graph, set(), height_to_nodes)\n \n min_height = min(height_to_nodes.keys())\n return list(height_to_nodes[min_height])\n \n def bfs(self, node, graph, visited, height_to_nodes):\n height = -1\n queue = [node]\n visited.add(node)\n \n while queue:\n height += 1\n new_queue = []\n for curr in queue:\n for nbr in graph[curr]:\n if nbr in visited:\n continue\n \n visited.add(nbr)\n new_queue.append(nbr)\n \n queue = new_queue\n \n height_to_nodes[height].add(node)\n \n def get_min_height_trees2(self, n, edges):\n graph = defaultdict(set)\n\n for edge in edges:\n graph[edge[0]].add(edge[1])\n graph[edge[1]].add(edge[0])\n \n leaves = [i for i in graph if len(graph[i]) == 1]\n\n while len(graph) > 2:\n new_leaves = []\n for leaf in leaves:\n nbr = graph[leaf].pop()\n del graph[leaf]\n\n graph[nbr].remove(leaf)\n if len(graph[nbr]) == 1:\n new_leaves.append(nbr)\n \n leaves = new_leaves\n \n return list(graph.keys())\n\n\n\nsolution = Solution()\n# edges = [[1, 0], [1, 2], [1, 3]]\n# print(solution.get_min_height_trees2(4, edges))\nedges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]\nprint(solution.get_min_height_trees2(6, edges))\n" }, { "alpha_fraction": 0.400956928730011, "alphanum_fraction": 0.400956928730011, "avg_line_length": 22.697673797607422, "blob_id": "9f5ba26256df79b2cad6cd3df6e79194910c4d63", "content_id": "a8d0d23d75911d0ebee5607656f72cd9f5ad6785", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 48, "num_lines": 43, "path": "/graphs/dfs.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.graph import Vertex, Graph\n\nclass Solution:\n # DFS\n def search(self, graph, start, end):\n if not end:\n return\n \n if start == end:\n return True\n \n visited = set()\n\n set.add(start)\n\n for nbr in graph[start]:\n if nbr not in visited:\n if self.search(graph, nbr, end):\n return True\n \n return False\n \n def search_BFS(self, graph, start, end):\n if start == end:\n return True\n \n start.visited = True\n queue = [start]\n\n while queue:\n new_queue = []\n for node in queue:\n for nbr in graph[node]:\n if nbr == end:\n return True\n \n if not nbr.visited:\n nbr.visited = True\n new_queue.append(nbr)\n \n queue = new_queue\n \n return False\n\n\n\n \n\n \n" }, { "alpha_fraction": 0.4642857015132904, "alphanum_fraction": 0.47727271914482117, "avg_line_length": 19.10869598388672, "blob_id": "c2d4d2474cbd4102130c451fc05f11273ae0133a", "content_id": "96652d46aabb06d523cbf68db07a64d7651abbaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 924, "license_type": "no_license", "max_line_length": 76, "num_lines": 46, "path": "/arrays/nextPermutation.js", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "// time: O(n)\n// space: O(1)\n\nclass Solution {\n nextPermutation(nums) {\n if (!nums || !nums.length) return nums;\n \n let i = nums.length - 2;\n \n while (i >= 0 && nums[i] >= nums[i + 1]) {\n i--;\n }\n \n \n \n if (i >= 0) {\n let nextBiggestIndex = nums.length - 1;\n \n while (nextBiggestIndex >= 0 && nums[nextBiggestIndex] <= nums[i]) {\n nextBiggestIndex--;\n }\n this.swap(i, nextBiggestIndex, nums);\n }\n this.reverse(i + 1, nums);\n console.log(nums)\n }\n \n swap(i, j, nums) {\n let temp = nums[i];\n nums[i] = nums[j];\n nums[j] = temp;\n }\n \n reverse(left, nums) {\n let right = nums.length - 1;\n \n while (left < right) {\n this.swap(left, right, nums);\n left++;\n right--;\n }\n }\n }\n \n const solution = new Solution();\n console.log(solution.nextPermutation([1, 2, 2]))" }, { "alpha_fraction": 0.4754098355770111, "alphanum_fraction": 0.4901639223098755, "avg_line_length": 26.727272033691406, "blob_id": "0844941c17e22bd399ce508e64cff3844861ee6e", "content_id": "9717116cec03c02a59caaeb9c6b9e9d26d50c38b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1220, "license_type": "no_license", "max_line_length": 78, "num_lines": 44, "path": "/dynamicProgramming/largest_divisible_subset.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 368\n\nclass Solution:\n #O(N ** 2) time, O(N) space\n def get_largest_divisible_subset(self, nums):\n if not nums:\n return []\n \n max_to_set = dict()\n nums.sort()\n\n for num in nums:\n new_set = set()\n for max_in_s, s in max_to_set.items():\n if num % max_in_s == 0 and len(s) > len(new_set):\n new_set = s\n max_to_set[num] = new_set | { num }\n \n return list(max(max_to_set.values(), key=len))\n\n\n def get_max_divisible_subset_length(self, nums):\n \"\"\"\n returns the length of the longest divisible subset\n \"\"\"\n if not nums:\n return 0\n \n max_lengths = [1]\n max_length = 1\n\n for i in range(1, len(nums)):\n max_length_here = 1\n for j in range(i - 1, -1, -1):\n if nums[i] % nums[j] == 0:\n max_length_here = max(max_length_here, 1 + max_lengths[j])\n max_lengths.append(max_length_here)\n max_length = max(max_length, max_length_here)\n \n return max_length\n\nsolution = Solution()\nnums = [1,2,3]\nprint(solution.get_largest_divisible_subset(nums))\n" }, { "alpha_fraction": 0.3731911778450012, "alphanum_fraction": 0.4082254469394684, "avg_line_length": 23.314815521240234, "blob_id": "ef6112bb83fbd1ee7862d5fcc1dae02e43099f55", "content_id": "69b3c86fe8c88fd5cd2774f2546471dd13a49f22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "no_license", "max_line_length": 62, "num_lines": 54, "path": "/arrays/maximize_distance_to_closest_person.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 849\n\nclass Solution:\n def max_distance_to_closest(self, seats):\n if not seats:\n return 0\n \n n = len(seats)\n\n lefts, rights = [n] * n, [n] * n\n\n for i in range(len(seats)):\n if seats[i] == 1:\n lefts[i] = 0\n elif i > 0:\n lefts[i] = 1 + lefts[i - 1]\n \n for i in range(n - 1, -1, -1):\n if seats[i] == 1:\n rights[i] = 0\n elif i < n - 1:\n rights[i] = 1 + rights[i + 1]\n \n return max(min(lefts[i], rights[i]) for i in range(n))\n\nclass Solution2:\n def max_distance_to_closest(self, seats):\n if not seats:\n return 0\n \n n = len(seats)\n lefts, rights = [n] * n, [n] * n\n\n for i in range(n):\n if seats[i] == 1:\n lefts[i] = 0\n elif i > 0:\n lefts[i] = lefts[i - 1] + 1\n \n for i in range(n - 1, -1, -1):\n if seats[i] == 1:\n rights[i] = 0\n elif i < n - 1:\n rights[i] = rights[i + 1] + 1\n \n return max(min(lefts[i], rights[i]) for i in range(n))\n\n\n\n\nseats = [1,0,0,0,1,0,1]\nsolution = Solution()\nprint(solution.max_distance_to_closest(seats))\n[0, 1, 2, 1, 0, 1, 0]\n" }, { "alpha_fraction": 0.5431654453277588, "alphanum_fraction": 0.555155873298645, "avg_line_length": 25.935483932495117, "blob_id": "bb3ef136f074affa831d87d62d3e95c6551dccbf", "content_id": "46fa09e8ef506375354253c2169389a41653b769", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 69, "num_lines": 31, "path": "/arrays/shuffle_array.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import random\n\nclass Solution:\n def __init__(self, nums):\n self.original = nums\n \n # def shuffle(self):\n # shuffled = []\n # copy = list(self.original)\n # for i in range(len(copy) - 1, -1, -1):\n # index = random.randint(0, i)\n # shuffled.append(copy[index])\n # copy[index] = copy[-1]\n # copy.pop()\n # return shuffled\n\n def shuffle(self):\n shuffled = list(self.original)\n for i in range(len(shuffled)):\n swap = random.randint(i, len(shuffled) - 1)\n shuffled[i], shuffled[swap] = shuffled[swap], shuffled[i]\n return shuffled\n \n def restore(self):\n return self.original\n\nnums = [1,2,3,4]\nsolution = Solution(nums)\nprint(solution.shuffle())\nprint(solution.shuffle())\nprint(solution.restore())" }, { "alpha_fraction": 0.3802647292613983, "alphanum_fraction": 0.40673887729644775, "avg_line_length": 20.894737243652344, "blob_id": "33d06cc4532679683cfd8ca4f4d96e18e5db092e", "content_id": "4f5e4f30a001eb53c3fd1608e3102151edbfb7a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 61, "num_lines": 38, "path": "/arrays/add_1.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def add_one(self, arr):\n \"\"\"\n :type arr: list[int]\n :rtype: list[int]\n \"\"\"\n sum = 0\n carry = 1\n arr.reverse()\n\n for i, c in enumerate(arr):\n sum = c + carry\n arr[i] = sum % 10\n carry = sum // 10\n \n if carry == 1:\n arr.append(1)\n \n arr.reverse()\n return arr\n\n def add_one2(self, digits):\n \"\"\"\n :type digits: list[int]\n :rtype: list[int]\n \"\"\"\n i = len(digits) - 1\n while i >= 0 and digits[i] == 9:\n digits[i] = 0\n i -= 1\n \n if i == -1:\n return [1] + digits\n\n return digits[:i] + [digits[i] + 1] + digits[i + 1 :]\n\nsolution = Solution()\nprint(solution.add_one2([9, 8, 9]))" }, { "alpha_fraction": 0.37162160873413086, "alphanum_fraction": 0.4121621549129486, "avg_line_length": 23.75, "blob_id": "a5299e2022f263991e3eb51233b9c062dd41d24c", "content_id": "c14d0956ebaf9abaf5f259207984254f34dd655d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 73, "num_lines": 12, "path": "/binarySearch/pow.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def pow(self, x, n):\n if n == 0:\n return 1\n if n < 0:\n n *= -1\n x = 1/x\n # return x * pow(x, n - 1)\n return pow(x*x, n // 2) if n % 2 == 0 else x * pow(x * x, n // 2)\n\nsolution = Solution()\nprint(solution.pow(2, 3))" }, { "alpha_fraction": 0.49268579483032227, "alphanum_fraction": 0.5196021199226379, "avg_line_length": 28.465517044067383, "blob_id": "d7a3e38aaf0c0a02849facf7d8f0fe09a3523239", "content_id": "791db37d6f4f1228a00352fb133e55cdc354e2cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1709, "license_type": "no_license", "max_line_length": 120, "num_lines": 58, "path": "/strings/decode_ways.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#Gotcha: because int are immutable in python, changing it in a function does not update its value outside of this method\n# therefore, we have to return its value from the function that changes it\n\nclass Solution(object):\n def decode_ways(self, nums):\n \"\"\"\n :type nums: str\n :rtype: int\n \"\"\"\n\n if not nums:\n return 0\n\n result = 0\n\n result = self.decode_ways_count(nums, 0, result)\n\n return result\n\n def decode_ways_count(self, nums, index, result):\n # went through all the digits in the string, increment the result count\n if index == len(nums):\n result += 1\n elif nums[index] != '0':\n # The digit at index is not 0, I can use it to get a character\n result = self.decode_ways_count(nums, index + 1, result)\n\n # Check if we can build a character using 2 digits\n if index < len(nums) - 1 and 10 <= int(nums[index: index + 2]) <= 26:\n result = self.decode_ways_count(nums, index + 2, result)\n \n return result\n \n def get_decode_ways(self, code):\n \"\"\"\n type code: str\n rtype: int\n \"\"\"\n if not code:\n return 0\n\n ways = [0 for _ in range(len(code) + 1)]\n ways[0] = 1\n if code[0] != '0':\n ways[1] = 1\n \n for i in range(1, len(code)):\n if code[i] != '0':\n ways[i + 1] += ways[i]\n \n if 10 <= int(code[i-1: i+1]) <= 26:\n ways[i + 1] += ways[i - 1]\n \n return ways[-1]\n\nsolution = Solution()\nprint(solution.decode_ways('12123'))\nprint(solution.get_decode_ways('12123'))\n" }, { "alpha_fraction": 0.5361930131912231, "alphanum_fraction": 0.548704206943512, "avg_line_length": 19, "blob_id": "33dd780692b9c4ecf68e318ba1c10c0046399319", "content_id": "27e246bf7c73c8aecec75dec24dfc0181de8e316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1119, "license_type": "no_license", "max_line_length": 87, "num_lines": 56, "path": "/trees/trim_binary_search_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#669\n\nfrom utils.treeNode import TreeNode\n\nclass Solution:\n def trim(self, root, l, r):\n if not root:\n return root\n \n if root.value < l:\n return self.trim(root.right, l, r)\n \n if root.value > r:\n return self.trim(root.left, l, r)\n \n root.left, root.right = self.trim(root.left, l, r), self.trim(root.right, l, r)\n return root\n\n\nclass Solution2:\n def trim(self, root, l, r):\n if not root:\n return root\n \n root.left, root.right = self.trim(root.left, l, r), self.trim(root.right, l, r)\n\n if root.value < l:\n return root.right\n \n if root.value > r:\n return root.left\n \n return root\n\n# one = TreeNode(1)\n# zero = TreeNode(0)\n# two = TreeNode(2)\n\n# one.left = zero\n# one.right = two\n\none = TreeNode(1)\nzero = TreeNode(0)\ntwo = TreeNode(2)\nthree = TreeNode(3)\nfour = TreeNode(4)\n\nthree.left = zero\nzero.right = two\ntwo.left = one\nthree.right = four\n\nprint(three)\nprint('=============')\nsolution = Solution()\nprint(solution.trim(three, 1, 3))" }, { "alpha_fraction": 0.5139859914779663, "alphanum_fraction": 0.5489510297775269, "avg_line_length": 32.70588302612305, "blob_id": "833ac9b06c4755ed3578bb004d122db575eddfeb", "content_id": "c760325e5caadf54e0fe9f308b1e451582d15bb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 572, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/arrays/find_peak_element.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#162\n\nclass Solution:\n def get_peak(self, nums):\n left, right = 0, len(nums) - 1\n while left < right:\n mid = (left + right) // 2\n mid2 = mid + 1 # will not exceed len(nums) - 1 because left < right\n if nums[mid] < nums[mid2]: #mid2 is potential peak\n left = mid2 #notice how it is mid2 and not mid2+1\n else: #mid is potential peak\n right = mid #notice how it is mid and not mid - 1\n return nums[left]\n\nnums = [1, 2, 3, 1]\nsolution = Solution()\nprint(solution.get_peak(nums))" }, { "alpha_fraction": 0.5687074661254883, "alphanum_fraction": 0.5809524059295654, "avg_line_length": 17.399999618530273, "blob_id": "454047fb2baa4f45ce3053e1e123ac84ec4be821", "content_id": "75b424e3f116e995af9b6a1002c9c3dcec814941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 42, "num_lines": 40, "path": "/linkedList/remove_duplicates2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#82\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def remove_duplicates(self, head):\n if not head:\n return head\n \n curr, runner = head, head.next\n\n while runner:\n if curr.value != runner.value:\n curr.next = runner\n curr = curr.next\n \n runner = runner.next\n \n curr.next = None\n return head\n\none = ListNode(1)\ntwo = ListNode(1)\nthree = ListNode(3)\nfour = ListNode(3)\nfive = ListNode(4)\nsix = ListNode(4)\nseven = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\nsix.next = seven\n\nprint(one)\n\nsolution = Solution()\nprint(solution.remove_duplicates(one))" }, { "alpha_fraction": 0.37290501594543457, "alphanum_fraction": 0.43435755372047424, "avg_line_length": 23.724138259887695, "blob_id": "05cf93a3fd7d14c27aa69d4d2d49e26485bd41a9", "content_id": "8785ca2891060fb31f46d675fdf53c54a5e4e036", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "no_license", "max_line_length": 96, "num_lines": 29, "path": "/arrays/merge_sorted_arrays.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def merge_sorted_arrays(self, nums1, nums2):\n m = len(nums1)\n n = len(nums2)\n\n nums1 += [0] * n\n \n i = m - 1\n j = n - 1\n k = i + j + 1\n\n while i >= 0 and j >= 0:\n nums1[k] = max(nums1[i], nums2[j])\n k -= 1\n if nums1[i] < nums2[j]:\n j -= 1\n else:\n i -= 1\n\n # nothing to move if only nums1 digits are left, move the rest of digits2 if any \n if j >= 0:\n nums1[:k+ 1] = nums2[:j + 1]\n \n return nums1\n\narr1 = [5, 7, 12, 20]\narr2 = [2, 6, 9, 40, 80]\nsolution = Solution()\nprint(solution.merge_sorted_arrays(arr1, arr2))" }, { "alpha_fraction": 0.4242914915084839, "alphanum_fraction": 0.447773277759552, "avg_line_length": 27.744186401367188, "blob_id": "44859365b904b5af39cf6311f79a1c67e3c3a82b", "content_id": "23465e54298547b910dacd26878ebf8a4cee4d62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 60, "num_lines": 43, "path": "/arrays/diagonal_traverse.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass Solution:\n def traverse(self, matrix):\n \"\"\"\n :type matrix: Matrix\n :rtype: List[int]\n \"\"\"\n if not matrix.row_count or not matrix.col_count:\n return\n\n row, col = 0, 0\n d_row, d_col = -1, 1\n result = []\n for _ in range(matrix.row_count * matrix.col_count):\n result.append(matrix[row][col])\n row += d_row\n col += d_col\n if not self.is_valid_cell(row, col, matrix):\n if col == matrix.col_count:\n col -= 1\n row += 2\n elif row < 0:\n row = 0\n elif row == matrix.row_count:\n row -= 1\n col += 2\n elif col < 0:\n col = 0\n d_row, d_col = d_col, d_row\n return result\n\n def is_valid_cell(self, row, col, matrix):\n return (\n row >= 0 and row < matrix.row_count and\n col >= 0 and col < matrix.col_count\n )\n\n# matrix = Matrix([[1,2,3],[4,5,6]])\nmatrix = Matrix([[1,2,3],[4,5,6], [7,8,9]])\nprint(matrix)\nsolution = Solution()\nprint(solution.traverse(matrix))" }, { "alpha_fraction": 0.5166931748390198, "alphanum_fraction": 0.521462619304657, "avg_line_length": 23.230770111083984, "blob_id": "11fbd1e3e67fd6c6fe44e3fd262da2ae73c2bb2d", "content_id": "fdb69605d5b1abbc5e7502530f528f3d72d101e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 59, "num_lines": 26, "path": "/strings/super_reduced_string.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\n# Time: O(N) - Space: O(N)\nclass Solution(object):\n def super_reduced_string(self, str):\n res = []\n for c in str:\n if res and res[-1] == c:\n res.pop()\n else:\n res.append(c)\n \n return ''.join(res)\n\n\nclass Test(unittest.TestCase):\n test_data = [('aaabccbddd', 'ad')]\n\n def test_super_reduced_string(self):\n solution = Solution()\n for data in self.test_data:\n actual = solution.super_reduced_string(data[0])\n self.assertEqual(actual, data[1])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5232919454574585, "alphanum_fraction": 0.5419254899024963, "avg_line_length": 24.799999237060547, "blob_id": "128fdb9018545c621ae67a9f77f622eab87a725d", "content_id": "0cfa4dcfb4e644cf7d90ab38d6339cba8e899e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "no_license", "max_line_length": 98, "num_lines": 25, "path": "/trees/symetric_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n # O(N) time and space\n def is_symetric(self, root):\n \"\"\"\n type root: TreeNode\n rtype: bool\n \"\"\"\n if not root:\n return True\n \n return self.is_mirror(root.left, root.right)\n \n def is_mirror(self, node1, node2):\n if not node1 and not node2:\n return True\n \n if not node1 or not node2:\n return False\n \n if node1.value != node2.value:\n return False\n \n return self.is_mirror(node1.left, node2.right) and self.is_mirror(node1.right, node2.left)" }, { "alpha_fraction": 0.48707401752471924, "alphanum_fraction": 0.4964747428894043, "avg_line_length": 25.200000762939453, "blob_id": "45d1b258d998bfff4a2fddd73cc9f27e0909227e", "content_id": "0dc0b4bfe5ecb2c880430032a8e389d1308dcfc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 88, "num_lines": 65, "path": "/graphs/eventually_safe.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution:\n WHITE, GRAY, BLACK = 0, 1, 2\n \n def eventually_safe_nodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n colors = defaultdict(int)\n\n result_set = set()\n\n for node in range(len(graph)):\n self.dfs(node, graph, colors, result_set)\n \n return sorted(list(result_set))\n\n def dfs(self, node, graph, colors, result_set):\n if colors[node] != self.WHITE:\n return colors[node] == self.BLACK\n \n colors[node] = self.GRAY\n\n for nbr in graph[node]:\n if colors[nbr] == self.BLACK:\n continue\n \n if colors[nbr] == self.GRAY or not self.dfs(nbr, graph, colors, result_set):\n return False\n \n colors[node] = self.BLACK\n result_set.add(node)\n return True\n \n def eventually_safe_nodes2(self, graph):\n n = len(graph)\n out_degree = [0] * n\n in_nodes = defaultdict(list)\n terminales = []\n\n for i in range(n):\n out_degree[i] = len(graph[i])\n if out_degree[i] == 0:\n terminales.append(i)\n \n for j in graph[i]:\n in_nodes[j].append(i)\n \n for term in terminales:\n for in_node in in_nodes[term]:\n out_degree[in_node] -= 1\n if out_degree[in_node] == 0:\n terminales.append(in_node)\n\n return sorted(terminales) \n\n\n \n \nsolution = Solution()\ngraph = [[1,2],[2,3],[5],[0],[5],[],[]]\n\nprint(solution.eventually_safe_nodes2(graph))" }, { "alpha_fraction": 0.5544041395187378, "alphanum_fraction": 0.6243523359298706, "avg_line_length": 17.428571701049805, "blob_id": "fc8ee3fa20da80f4df0b3012b8b5a10c2ffb1fe8", "content_id": "9f9ab40a66f88b8fa8926098a321a43d8706ac16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 35, "num_lines": 21, "path": "/utils/treeUtils.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\ndef generate_tree():\n node1 = TreeNode(1)\n node2 = TreeNode(2)\n node3 = TreeNode(3)\n node4 = TreeNode(4)\n node5 = TreeNode(5)\n node6 = TreeNode(6)\n node7 = TreeNode(7)\n\n node1.left = node2\n node1.right = node3\n\n node2.left = node4\n node2.right = node5\n\n node3.left = node6\n node6.left = node7\n\n return node1" }, { "alpha_fraction": 0.5619295835494995, "alphanum_fraction": 0.5762711763381958, "avg_line_length": 17.190475463867188, "blob_id": "8851c85913423b6235fd950c0863fe16305082a9", "content_id": "a8dab50fc2087f3c6be38425e3fda0b416675d31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 52, "num_lines": 42, "path": "/linkedList/remove_linked_list_elements.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 203\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def remove_element(self, head, target):\n if not head:\n return\n \n dummy, prev = ListNode(None), ListNode(None)\n\n dummy.next = head\n prev = dummy\n\n while head:\n if head.value == target:\n prev.next = head.next\n else:\n prev = head\n\n head = head.next\n\n return dummy.next\n\none = ListNode(6)\ntwo = ListNode(2)\nthree = ListNode(6)\nfour = ListNode(3)\nfive = ListNode(4)\nsix = ListNode(5)\nseven = ListNode(6)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\nsix.next = seven\n\nprint(one)\n# solution = Solution()\n# print(solution.remove_element(one, 6)) " }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.5873016119003296, "avg_line_length": 36.5, "blob_id": "843944d4177f6c56293148a7752428c196da72c4", "content_id": "830416c26b28aba5efcc1ef37a8f687325726303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 121, "num_lines": 16, "path": "/trees/longest_univalue_path.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#687\n\nclass Solution:\n def longest_univalue_path(self, root):\n if not root:\n return 0\n \n return max(self.longest_univalue_path(root.left),\n self.longest_univalue_path(root.right),\n self.straight_univalue_path(root.left, root.value) + self.straight_univalue_path(root.right, root.value))\n \n def straight_univalue_path(self, root, value):\n if not root or root.value != value:\n return 0\n \n return max(self.straight_univalue_path(root.left, value), self.straight_univalue_path(root.right, value)) + 1\n \n\n \n \n \n\n" }, { "alpha_fraction": 0.4193083643913269, "alphanum_fraction": 0.4639769494533539, "avg_line_length": 26.719999313354492, "blob_id": "6047add82af444a0f7eb51bf982f13bce9799db6", "content_id": "7d50199bc11d0553492c787499763043941caa68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 694, "license_type": "no_license", "max_line_length": 43, "num_lines": 25, "path": "/arrays/third_max.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def get_third_max(self, numbers):\n if not numbers:\n return None\n \n max1, max2, max3 = None, None, None\n\n for num in numbers:\n if num in (max1, max2, max3):\n continue\n elif not max1 or max1 < num:\n max3 = max2\n max2 = max1 \n max1 = num\n elif not max2 or max2 < num:\n max3 = max2\n max2 = num\n elif not max3 or max3 < num:\n max3 = num\n \n return max3 if max3 else max1\n \nsolution = Solution()\nnumbers = [3, 5, 8, 5, 5, 2, 5]\nprint(solution.get_third_max(numbers))\n\n" }, { "alpha_fraction": 0.4778972566127777, "alphanum_fraction": 0.5053763389587402, "avg_line_length": 24.33333396911621, "blob_id": "b0f258ae59d204b78841c4c65db1f191061059dd", "content_id": "50491ee9d4f9de60f27894fe97d23e3b5221a7a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 82, "num_lines": 33, "path": "/arrays/maximum_subarray.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n # time: O(N), space: O(N)\n def maximum_subarray(self, arr):\n if not arr:\n return 0\n\n result = arr[0]\n dp = [arr[0]]\n\n for i in range(1, len(arr)):\n dp.append(arr[i] + max(dp[i - 1], 0)) # longest subarray until index i\n result = max(result, dp[i])\n \n return result\n \n # time: O(N), space: O(1)\n def maximum_subarray2(self, arr):\n if not arr:\n return 0\n\n result = arr[0]\n max_ending_here = arr[0]\n\n for i in range(1, len(arr)):\n max_ending_here = arr[i] + max(max_ending_here, 0)\n result = max(result, max_ending_here)\n \n return result\n\n\ntest_arr = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\nsolution = Solution()\nprint(solution.maximum_subarray2(test_arr))\n\n" }, { "alpha_fraction": 0.5072202086448669, "alphanum_fraction": 0.5234656929969788, "avg_line_length": 24.227272033691406, "blob_id": "972abaa4f4b56ecf6db9de48f1bc9898aa7d261f", "content_id": "12e4e1347cafa4ae7637e3bce169862fe97a50e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 554, "license_type": "no_license", "max_line_length": 67, "num_lines": 22, "path": "/dynamicProgramming/buy_sell_stock1.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_max_profit(self, prices):\n if not prices:\n return 0\n \n max_profit, max_here = 0, 0\n for i in range(1, len(prices)):\n max_here = max(max_here + prices[i] - prices[i - 1], 0)\n max_profit = max(max_profit, max_here)\n return max_profit\n\ndef get_max_profit2(self, prices):\n if not prices:\n return 0\n \n buy, sell = float('-inf'), 0\n\n for price in prices:\n buy = max(buy, - price)\n sell = max(sell, price + buy)\n \n return sell" }, { "alpha_fraction": 0.5166015625, "alphanum_fraction": 0.529296875, "avg_line_length": 18.843137741088867, "blob_id": "45cf6d6e69c4d9f21efa3e5b7bba6de309fa25cc", "content_id": "509ee27ba8488a2e9716e44bc5d4ef24e27c2568", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 85, "num_lines": 51, "path": "/trees/second_minimum_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#671\n\nfrom utils.treeNode import TreeNode\n\nclass Solution:\n def second_minimum(self, root):\n if not root:\n return -1\n \n level = [root]\n curr_min = root.value\n\n while level:\n next_level = []\n for node in level:\n if node.left:\n next_level.append(node.left)\n next_level.append(node.right)\n \n candidates = [node.value for node in next_level if node.value > curr_min]\n if candidates:\n return min(candidates)\n \n level = next_level\n \n return -1\n\n\n# one = TreeNode(2)\n# two = TreeNode(2)\n# three = TreeNode(5)\n# four = TreeNode(5)\n# five = TreeNode(7)\n\n# one.left = two\n# one.right = three\n# three.left = four\n# three.right = five\n\none = TreeNode(2)\ntwo = TreeNode(2)\nthree = TreeNode(2)\n\none.left = two\none.right = three\n\nprint(one)\n\nprint('=========')\nsolution = Solution()\nprint(solution.second_minimum(one))\n " }, { "alpha_fraction": 0.49310871958732605, "alphanum_fraction": 0.5007656812667847, "avg_line_length": 35.27777862548828, "blob_id": "b6ee2c61d6442abf347438831ebad8b11c3a4652", "content_id": "9a59bdf5d2d7de403d92ac27fe932d7e60b676e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 653, "license_type": "no_license", "max_line_length": 107, "num_lines": 18, "path": "/dynamicProgramming/partition_equal_subset_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def can_partition(self, nums):\n sum_nums = sum(nums)\n if sum_nums % 2:\n return False\n \n target = sum_nums // 2\n nums.sort(reverse = True) #Try the largest numbers first, since less operations to reach the target\n subset_sum = [True] + [False for _ in range(target)]\n\n for num in nums:\n for i in range(target - 1, -1, -1): # Try the largest sums first\n if num + i <= target and subset_sum[i]:\n if i + num == target:\n return True\n subset_sum[num + i] = True\n \n return False\n" }, { "alpha_fraction": 0.32905659079551697, "alphanum_fraction": 0.34867924451828003, "avg_line_length": 29.837209701538086, "blob_id": "224fd447a89167d834595cea5f13847591cbf8c2", "content_id": "86e700fe9b6dff2bbcc3fb16b68fac68b81591c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1325, "license_type": "no_license", "max_line_length": 60, "num_lines": 43, "path": "/arrays/longestLineOfconsecutiveOnes.js", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution {\n longestLine(matrix) {\n let result = 0,\n rows = matrix.length,\n cols = matrix[0].length;\n \n for (let row = 0; row < matrix.length; row++) {\n let count = 0;\n for (let col = 0; col < cols; col++) {\n count = matrix[row][col] * (1 + count);\n result = Math.max(result, count);\n }\n }\n \n for (let col = 0; col < cols; col++) {\n let count = 0;\n for (let row = 0; row < rows; row++) {\n count = matrix[row][col] * (count + 1);\n result = Math.max(result, count);\n }\n }\n \n for (let i = 0; i < rows + cols - 1; i++) {\n let count1 = 0,\n count2 = 0;\n \n for (let j = i; j >= 0; j--) {\n if (i - j < rows && j < cols) {\n count1 = matrix[i - j][j] * (count + 1);\n result = Math.max(result, count1);\n }\n \n let t = rows + j - i - 1;\n if (t >= 0 && t < rows && t < cols) {\n count2 = matrix[t][t] * (count2 + 1);\n result = Math.max(result, count2);\n }\n }\n }\n \n return result;\n }\n}" }, { "alpha_fraction": 0.5040916800498962, "alphanum_fraction": 0.512274980545044, "avg_line_length": 27.809524536132812, "blob_id": "b0c25890853cb67a205ad53491744845d6d49b41", "content_id": "32d9e097852b3b08312b853f6c6a155660599fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "no_license", "max_line_length": 87, "num_lines": 21, "path": "/trees/complete_tree_nodes_count.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def count_nodes(self, node):\n if not node:\n return 0\n \n left_depth, right_depth = self.get_depth(node.left), self.get_depth(node.right)\n\n if left_depth == right_depth:\n #left side is complete\n return 2 ** (left_depth) + self.count_nodes(node.right)\n else:\n # right side is complete\n return 2 ** (right_depth) + self.count_nodes(node.left)\n \n def get_depth(self, node):\n depth = 0\n while node:\n depth += 1\n node = node.left\n \n return depth\n \n\n" }, { "alpha_fraction": 0.5727091431617737, "alphanum_fraction": 0.5856573581695557, "avg_line_length": 25.289474487304688, "blob_id": "1b14a4afbefdbade72da342a63db1a0d1c632b78", "content_id": "2e6e301cbe1e58c3d902eca90a0d0b91d7bb5e80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 86, "num_lines": 38, "path": "/utils/graph.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Vertex:\n def __init__(self, key):\n self.id =key\n self.connectedTo = {}\n \n def add_neighbor(self, nbr, weight = 0):\n self.connectedTo[nbr] = weight\n \n def __str__(self):\n return str(self.id) + ' connectedTo: ' + str([x.id for x in self.connectedTo])\n \n def get_connections(self):\n return self.connectedTo.keys()\n \n def get_id(self):\n return self.id\n \n def get_weight(self, nbr):\n return self.connectedTo[nbr]\n\nclass Graph:\n def __init__(self):\n self.vert_list = defaultdict(set)\n \n def add(self, node1, node2):\n self.vert_list[node1].add(node2)\n self.vert_list[node2].add(node1)\n \n def get_vertices(self):\n return self.vert_list.keys()\n \n def __iter__(self):\n return iter(self.vert_list.keys())\n\n def is_connected(self, node1, node2):\n return node2 in self.vert_list[node1] and node1 in self.vert_list[node2]\n \n" }, { "alpha_fraction": 0.4742893576622009, "alphanum_fraction": 0.48706483840942383, "avg_line_length": 37.654319763183594, "blob_id": "a692b8b14628170891acc7e9c29b5fca53cda749", "content_id": "ad8f591faf88c752db3708bff8c48fc21a7b5d89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3131, "license_type": "no_license", "max_line_length": 146, "num_lines": 81, "path": "/graphs/pacific_atlantic.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.matrix import Matrix\n\nclass Solution:\n def pacific_atlantic(self, matrix):\n if not matrix or not matrix[0]:\n return []\n \n rows, cols = len(matrix), len(matrix[0])\n pacific, atlantic = set(), set()\n\n for row in range(rows):\n atlantic.add((row, cols - 1))\n pacific.add((row, 0))\n \n for col in range(cols):\n atlantic.add((rows - 1, col))\n pacific.add((0, col))\n \n for ocean in [atlantic, pacific]:\n frontier = set(ocean)\n \n while frontier:\n new_frontier = set()\n for row, col in frontier:\n for dir_r, dir_c in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n new_row, new_col = row + dir_r, col + dir_c\n \n if new_row < 0 or new_row >= rows or new_col < 0 or new_col >= cols or (new_row, new_col) in ocean:\n continue\n \n if matrix[new_row][new_col] >= matrix[row][col]:\n new_frontier.add((new_row, new_col))\n \n frontier = new_frontier\n ocean |= frontier\n \n return list(atlantic & pacific)\n\n\n def pacific_atlantic2(self, matrix):\n \"\"\"\n type matrix: Matrix\n rtype: List([int, int])\n \"\"\"\n\n if not matrix or not matrix.rows_count or matrix.cols_count:\n raise Exception('Invalid Matrix')\n \n reached_cells = [[0 for _ in range(matrix.rows_count)] for _ in range(matrix.cols_count)]\n pacific_queue = []\n atlantic_queue = []\n\n for row in range(matrix.rows_count):\n pacific_queue.append((row, 0))\n atlantic_queue.append((row, matrix.rows_count - 1))\n reached_cells[row][0] += 1\n reached_cells[row][matrix.rows_count - 1] += 1\n \n for col in range(matrix.cols_count):\n pacific_queue.append((0, col))\n atlantic_queue.append((col, matrix.rows_cols - 1))\n reached_cells[0][col] += 1\n reached_cells[row][matrix.rows_cols - 1] += 1\n \n self.bfs(pacific_queue, matrix, pacific_queue[:], reached_cells)\n self.bfs(atlantic_queue, matrix, atlantic_queue[:], reached_cells)\n\n return [[row, col] for row in range(matrix.rows_count) for col in range(matrix.cols_count) if reached_cells[row][col] == 2]\n \n def bfs(self, queue, matrix, visited, reached_cells):\n while queue:\n new_queue = []\n for (row, col) in queue:\n for dir_r, dir_c in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n new_row, new_col = row + dir_r, col + dir_c\n if not matrix.is_valid_cell(new_row, new_col) or (new_row, new_col) in visited or matrix[row][col] > matrix[new_row][new_col]:\n continue\n new_queue.append((new_row, new_col))\n matrix[new_row][new_col] += 1\n \n queue = new_queue\n" }, { "alpha_fraction": 0.3675675690174103, "alphanum_fraction": 0.3675675690174103, "avg_line_length": 22.782608032226562, "blob_id": "7b6eb9ce851e5807635bf2bf8f2af155fa6179d0", "content_id": "89372fecf04121f82c98a12861c0b6f4c9cce1ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 555, "license_type": "no_license", "max_line_length": 49, "num_lines": 23, "path": "/next_right_pointer2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def connect(self, node):\n if not node:\n return\n \n level = [node]\n\n while level:\n prev = None\n next_level = []\n \n for node in level:\n if prev:\n prev.next = node\n \n prev = node\n\n if node.left:\n next_level.append(node.left)\n if node.right:\n next_level.append(node.right)\n \n level = next_level\n " }, { "alpha_fraction": 0.4536912739276886, "alphanum_fraction": 0.47248321771621704, "avg_line_length": 27.69230842590332, "blob_id": "5c687dd046df48a37090135300cff5c5d49fff4e", "content_id": "6ed473b6776804f65ac857da7e9951503811656d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 745, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/dynamicProgramming/coin_change.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def coin_change(self, coins, amount):\n if amount < 1:\n return -1\n \n memo = [0 for _ in range(amount)]\n return self.helper(coins, amount, memo)\n \n def helper(self, coins, amount, memo):\n if amount < 0:\n return -1\n \n if amount == 0:\n return 0\n \n if memo[amount - 1]:\n return memo[amount - 1]\n \n min_count = float('inf')\n for coin in coins:\n res = self.helper(coins, amount - coin, memo)\n if res >= 0 and res < min_count:\n min_count = res\n \n memo[amount - 1] = -1 if min_count == float('inf') else 1 + min_count\n return memo[amount - 1]" }, { "alpha_fraction": 0.5146138072013855, "alphanum_fraction": 0.5187891721725464, "avg_line_length": 26.399999618530273, "blob_id": "c8d4b84269959350ec4363088b6c840c6a021b6a", "content_id": "824ef7051578dbb96ab380d01b42c5fbe023c3e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "no_license", "max_line_length": 54, "num_lines": 35, "path": "/trees/binary_tree_zigzag_level_order.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom utils.treeUtils import generate_tree\n\nclass Solution:\n def get_zigzag_level_order(self, node):\n result = []\n if not node:\n return result\n should_reverse = False\n level_nodes = [node]\n \n while level_nodes:\n result.append([])\n new_level_nodes = []\n \n for node in level_nodes:\n result[-1].append(node.value)\n\n if node.left:\n new_level_nodes.append(node.left)\n if node.right:\n new_level_nodes.append(node.right)\n \n level_nodes = new_level_nodes\n \n if should_reverse:\n result[-1] = result[-1][::-1]\n should_reverse = not should_reverse\n \n return result\n\nroot = generate_tree()\nprint(root)\nsolution = Solution()\nprint(solution.get_zigzag_level_order(root))" }, { "alpha_fraction": 0.571890115737915, "alphanum_fraction": 0.5815832018852234, "avg_line_length": 16.714284896850586, "blob_id": "e3c9b51eb415269b295bd50f46f959ab039af5f6", "content_id": "e5ab024b36bfa801eb74b587d52298114d0620c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 38, "num_lines": 35, "path": "/linkedList/swap_nodes_in_pair.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "#24\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def swap_pairs(self, head):\n if not head:\n return None\n\n dummy = prev = ListNode(None)\n\n while head and head.next:\n next_head = head.next.next\n prev.next = head.next\n head.next.next = head\n prev = head\n head = next_head\n\n prev.next = head\n \n return dummy.next\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\n\none.next = two\ntwo.next = three\nthree.next = four\n\nprint(one)\n\nsolution = Solution()\nprint(solution.swap_pairs(one))" }, { "alpha_fraction": 0.5149147510528564, "alphanum_fraction": 0.5298295617103577, "avg_line_length": 20.33333396911621, "blob_id": "4cde02547b3b5101c1c08716111c3590e53e606b", "content_id": "2bac8b6c518ea7f0fd61f91771c9b5a7c729be51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1408, "license_type": "no_license", "max_line_length": 66, "num_lines": 66, "path": "/trees/add_one_row_to_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 623\n\nfrom utils.treeNode import TreeNode\n\n# time: O(N)\n# space: O(N) or the max number of node at each level\nclass Solution:\n def add_row(self, root, v, d):\n if d == 1:\n new_root = TreeNode(v)\n new_root.left = root\n return new_root\n \n current_level = [root]\n while d > 2:\n d -= 1\n new_level = []\n for node in current_level:\n if node.left:\n new_level.append(node.left)\n \n if node.right:\n new_level.append(node.right)\n \n current_level = new_level\n \n # current_level is at d - 1\n for node in current_level:\n node.left, node.left.left = TreeNode(v), node.left\n node.right, node.right.right = TreeNode(v), node.right\n \n\n return root\n\n# one = TreeNode(1)\n# two = TreeNode(2)\n# three = TreeNode(3)\n# four = TreeNode(4)\n# five = TreeNode(5)\n# six = TreeNode(6)\n\n# four.left = two\n# four.right = six\n# two.left = three\n# two.right = one\n# six.left = five\n\none = TreeNode(1)\ntwo = TreeNode(2)\nthree = TreeNode(3)\nfour = TreeNode(4)\nfive = TreeNode(5)\nsix = TreeNode(6)\n\nfour.left = two\n# four.right = six\ntwo.left = three\ntwo.right = one\n# six.left = five\n\nprint(four)\n\nprint('==============')\n\nsolution = Solution()\nprint(solution.add_row(four, 1, 3))\n" }, { "alpha_fraction": 0.5689085125923157, "alphanum_fraction": 0.5931642651557922, "avg_line_length": 32.55555725097656, "blob_id": "f9e2c66afff595ee3609bfd2cad7f138c5737dc4", "content_id": "9e3fccb8157a4481f9bd9c0de6058824ec38bd13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 907, "license_type": "no_license", "max_line_length": 124, "num_lines": 27, "path": "/trees/tree_from_preorder_inorder.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeUtils import TreeNode\n\nclass Solution:\n def build_tree(self, pre_order, in_order):\n if not pre_order:\n return None\n \n return self.helper(0, 0, len(in_order) - 1, pre_order, in_order)\n \n def helper(self, pre_start, in_start, in_end, pre_order, in_order):\n if pre_start > len(pre_order) - 1 or in_start > in_end:\n return None\n \n value = pre_order[pre_start]\n in_order_index = in_order.index(value)\n\n root = TreeNode(value)\n root.left = self.helper(pre_start + 1, in_start, in_order_index - 1, pre_order, in_order)\n root.right = self.helper(pre_start + 1 + in_order_index - in_start, in_order_index + 1, in_end, pre_order, in_order)\n\n return root\n\nin_order = [4, 2, 5, 1, 3, 6, 7]\npre_order = [1, 2, 4, 5, 3, 6, 7]\n\nsolution = Solution()\nprint(solution.build_tree(pre_order, in_order))\n\n" }, { "alpha_fraction": 0.43291139602661133, "alphanum_fraction": 0.4438818693161011, "avg_line_length": 33.882354736328125, "blob_id": "29163860dc9ca972fbf7245effca31136b24f3ec", "content_id": "99ed38f7c7b01821275c8c4349cd6fb74ce9e10c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1185, "license_type": "no_license", "max_line_length": 111, "num_lines": 34, "path": "/strings/basic_calculator.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n # O(n) time and space\n def basic_calculator(self, expression):\n if not expression:\n return\n\n stack = []\n num = 0\n operation = '+'\n\n for i, c in enumerate(expression):\n if c.isdigit():\n num = num * 10 + int(c)\n \n if i == len(expression) - 1 or (not c.isdigit() and c != ' '): # c is operator or end of string\n if operation == '+':\n stack.append(num)\n elif operation == '-':\n stack.append(-num)\n elif operation == '*':\n stack.append(stack.pop() * num)\n elif operation == '/':\n left = stack.pop()\n stack.append(left // num)\n if left // num < 0 and left % num != 0:\n stack[-1] += 1 # negative integer division result with remainder rounds down by default\n \n num = 0 # num has been used, so reset\n operation = c\n \n return sum(stack)\n\nsolution = Solution()\nprint(solution.basic_calculator('1 + 2 * 3 - 4'))" }, { "alpha_fraction": 0.49274584650993347, "alphanum_fraction": 0.516389012336731, "avg_line_length": 19.439559936523438, "blob_id": "4e94d99730286160a046ad7932949063a307460f", "content_id": "2dab8e50a4f31e3ed37c280efeefe7803bf0a4ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1861, "license_type": "no_license", "max_line_length": 61, "num_lines": 91, "path": "/linkedList/add_two_numbers.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 445\nfrom utils.listNode import ListNode\n\nclass Solution:\n def add(self, head1, head2):\n num1 = self.listToInt(head1)\n num2 = self.listToInt(head2)\n\n return self.intToList(num1 + num2)\n \n def listToInt(self, head):\n result = 0\n if not head:\n return result\n \n while head:\n result = (result * 10) + head.value\n head = head.next\n \n return result\n \n def intToList(self, num):\n dummy = prev = ListNode(None)\n\n for c in str(num):\n prev.next = ListNode(int(c))\n prev = prev.next\n \n return dummy.next\n\nclass Solution2:\n def add(self, head1, head2):\n rev1, rev2 = self.reverse(head1), self.reverse(head2)\n\n carry = 0\n\n total_head = total_tail = ListNode(None)\n\n while rev1 or rev2:\n total = carry\n\n if rev1:\n total += rev1.value\n rev1 = rev1.next\n \n if rev2:\n total += rev2.value\n rev2 = rev2.next\n \n total_tail.next = ListNode(total % 10)\n carry = total // 10\n\n total_tail = total_tail.next\n \n if carry:\n total_tail.next = ListNode(carry)\n \n return self.reverse(total_head.next)\n\n \n def reverse(self, head):\n if not head:\n return head\n \n rev = None\n\n while head:\n rev, rev.next, head = head, rev, head.next\n \n return rev\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\nsix = ListNode(6)\nseven = ListNode(7)\n\none.next = two\ntwo.next = three\nthree.next = four\n\nfive.next = six\nsix.next = seven\n\nprint(one)\nprint(five)\n\nsolution = Solution2()\nprint(solution.add(one, five))\n\n" }, { "alpha_fraction": 0.4980340898036957, "alphanum_fraction": 0.5019659399986267, "avg_line_length": 23.354839324951172, "blob_id": "3a9e06052299e506914bb3f412a7165fa3fe3f65", "content_id": "0e45e0f99f679c3e386758d31a03cb37ae98317c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 763, "license_type": "no_license", "max_line_length": 50, "num_lines": 31, "path": "/trees/bst_iterator.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass BSTIterator:\n # O(n) worst case for time and space\n def __init__(self, root):\n self.stack = []\n while root:\n self.stack.append(root)\n root = root.left\n \n # O(1) time and space\n def has_next(self):\n return len(self.stack) > 0\n \n #time: O(n) worst case, O(1) average\n # space: O(n) worst case, O(log n) if balanced\n def next(self):\n if not self.has_next():\n return None\n \n node = self.stack.pop()\n result = node.value\n\n if node.right:\n node = node.right\n \n while node:\n self.stack.append(node)\n node = node.left\n\n return result\n \n" }, { "alpha_fraction": 0.44356659054756165, "alphanum_fraction": 0.4650112986564636, "avg_line_length": 28.53333282470703, "blob_id": "727a0f9fcba1071683e52103f0c22d773635a3e2", "content_id": "f776a3e0c6675029fb8f22a26491b405cca47570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 108, "num_lines": 30, "path": "/binarySearch/search_insert_position.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_insert_position(self, nums, value):\n left, right = 0, len(nums) - 1\n # left <= right because we want the first occurrence of left > right, aka, left is bigger than value\n while left <= right:\n mid = (left + right) // 2\n if mid == value:\n return mid\n if mid < value:\n left = mid + 1\n else: \n right = mid - 1\n return left\n \n def get_insert_position2(self, nums, value):\n left, right = 0, len(nums) - 1\n \n while left <= right:\n mid = (left + right) // 2\n \n if nums[mid] > value:\n right = mid - 1\n else:\n left = mid + 1\n \n return left\n\nnums = [1, 2, 3, 3, 5, 8]\nsolution = Solution()\nprint(solution.get_insert_position2(nums, 8))\n" }, { "alpha_fraction": 0.5664893388748169, "alphanum_fraction": 0.5678191781044006, "avg_line_length": 27.461538314819336, "blob_id": "71980ca30bfaf780688a29f9f27752fe31e3aa80", "content_id": "3232777b09c2bdaef9feec60a77138628377a0c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 95, "num_lines": 26, "path": "/trees/path_sum2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def get_paths_sum(self, node, target):\n result = []\n if not node:\n return result\n \n self.get_paths_helper([], target, node, result)\n\n return result\n \n def get_paths_helper(self, path, target, node, result):\n if not node:\n return\n \n target -= node.value\n path.append(node.value)\n\n if target == 0 and not node.left and not node.right:\n result.append(path[:]) # import to add a new copy of path, because path will change\n \n self.get_paths_helper(path, target, node.left, result)\n self.get_paths_helper(path, target, node.right, result)\n\n path.pop()\n " }, { "alpha_fraction": 0.54347825050354, "alphanum_fraction": 0.5597826242446899, "avg_line_length": 29.70833396911621, "blob_id": "cee0477b2e5b91779639a81d9a2a12bfe1b9e0fd", "content_id": "8f6f0e45c2a17183f47a1d6c0a2d4326c1274916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 736, "license_type": "no_license", "max_line_length": 67, "num_lines": 24, "path": "/arrays/subsets.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n # Time: O(2 ** n), space(n * 2**n)\n def generate_subsets_from_unique(self, nums):\n result = []\n self.backtrack(nums, [], 0, result)\n return result\n \n def generate_subsets_from_duplicates(self, nums):\n result = []\n nums.sort()\n self.backtrack(nums, [], 0, result)\n return result\n \n def backtrack(self, nums, prefix, start, result):\n result.append(prefix)\n for i in range(start, len(nums)):\n if i > start and nums[i] == nums[i-1]:\n continue\n self.backtrack(nums, prefix + [nums[i]], i + 1, result)\n\nsolution = Solution()\nnums = [1,2,3]\n# nums = [1,2,2]\nprint(solution.generate_subsets_from_unique(nums))" }, { "alpha_fraction": 0.4308152496814728, "alphanum_fraction": 0.445774108171463, "avg_line_length": 38.35293960571289, "blob_id": "77593684a10599fec72b8816cf0910bf3a818ac7", "content_id": "4c9ef24d43750af5b70a4e9d3fc499dfec94b38e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 100, "num_lines": 34, "path": "/arrays/four_way.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# time: O(n ** 3)\n\nclass Solution:\n ELEMENTS_COUNT = 4\n\n def four_way(self, nums, target):\n results = []\n self.n_way(sorted(nums), target, [], self.ELEMENTS_COUNT, results)\n return results\n \n def n_way(self, nums, target, partial, n, results):\n if len(nums) < n or nums[0] * n > target or nums[-1] * n < target:\n return\n \n if n == 2:\n left, right = 0, len(nums)\n while left < right:\n if nums[left] + nums[right] == target:\n results.append(partial + [nums[left], nums[right]])\n left += 1\n right -= 1\n while nums[right] == nums[right + 1] and right > left:\n right -= 1\n while nums[left] == nums[left - 1] and left < right:\n left += 1\n elif nums[left] + nums[right] < target:\n left += 1\n else:\n right -= 1\n else:\n # add the next element in the list to partial, in order to get to 2-sum\n for i in range(len(nums) - n + 1):\n if i == 0 or nums[i] != nums[i - 1]: # avoid dups if possible\n self.n_way(nums[i + 1 :], target - nums[i], partial + [nums[i]], n - 1, results)" }, { "alpha_fraction": 0.5556792616844177, "alphanum_fraction": 0.5657015442848206, "avg_line_length": 20.404762268066406, "blob_id": "6e2fc90f35349a1bb173347076bb35776a79e8f5", "content_id": "acc232215e3f22cb542d08097832f23a10173844", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 898, "license_type": "no_license", "max_line_length": 53, "num_lines": 42, "path": "/linkedList/insertion_sort_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 147\n\nfrom utils.listNode import ListNode\n\n# time: O(N ** 2)\n# space: O(1)\nclass Solution:\n def insert_sort_list(self, head):\n sorted_tail = dummy = ListNode(float('-inf'))\n dummy.next = head\n \n while sorted_tail.next:\n node = sorted_tail.next\n\n if node.value >= sorted_tail.value:\n sorted_tail = sorted_tail.next\n continue\n \n sorted_tail.next = node.next\n\n insertion = dummy\n while insertion.next.value <= node.value:\n insertion = insertion.next\n \n node.next = insertion.next\n insertion.next = node\n\n return dummy.next\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\n\none.next = two\ntwo.next = three\nthree.next = four\n\nprint(one)\n\nsolution = Solution()\nprint(solution.insert_sort_list(one))" }, { "alpha_fraction": 0.461382120847702, "alphanum_fraction": 0.4776422679424286, "avg_line_length": 22.4761905670166, "blob_id": "b89738d1a5c938f4db145dd062c8e4bb9586c6e6", "content_id": "b29e950bcca4fc792583ba76455d964f198c37ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/arrays/find_duplicate.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_duplicate(self, nums):\n if not nums:\n return\n \n slow = nums[0]\n fast = nums[slow] # which is nums[nums[0]]\n while slow != fast:\n slow = nums[slow]\n fast = nums[nums[fast]]\n \n fast = 0\n while slow != fast:\n slow = nums[slow]\n fast = nums[fast]\n \n return fast\n\nnums = [1, 3, 2, 4, 3]\nsolution = Solution()\nprint(solution.get_duplicate(nums))" }, { "alpha_fraction": 0.5158924460411072, "alphanum_fraction": 0.5843520760536194, "avg_line_length": 24.625, "blob_id": "bc924b59a42ede47e09cc932570ecf10613464f5", "content_id": "c743ba9361e35fafcd73c96f93a6875a9edf64b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 87, "num_lines": 16, "path": "/arrays/min_cost_climbing_stairs.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 746\n\nclass Solution:\n def min_cost(self, costs):\n\n one_before, two_before = costs[1], costs[0]\n\n for i in range(2, len(costs)):\n one_before, two_before = min(one_before, two_before) + costs[i], one_before\n \n return min(one_before, two_before)\n\nsolution = Solution()\ncosts = [10, 15, 20]\n# costs = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\nprint(solution.min_cost(costs))" }, { "alpha_fraction": 0.4554973840713501, "alphanum_fraction": 0.481675386428833, "avg_line_length": 26.33333396911621, "blob_id": "c58bf46ac2257b735eaa793eac53772a84b6e899", "content_id": "3cc7965ea0dc0a3d3e2e52b0114d1bdfddb34c89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 53, "num_lines": 21, "path": "/arrays/two_sum_sorted_array.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def two_sum(self, numbers, target):\n \"\"\"\n :type numbers: List[int]\n :type target: int\n :rtype: Tuple(int)\n \"\"\"\n\n left, right = 0, len(numbers) - 1\n while left < right:\n pair_sum = numbers[left] + numbers[right]\n if pair_sum == target:\n return (left + 1, right + 1)\n elif pair_sum < target:\n left += 1\n else:\n right -= 1\n\nsolution = Solution()\nnumbers = [1, 4, 8, 10, 18]\nprint(solution.two_sum(numbers, 12))" }, { "alpha_fraction": 0.6007326245307922, "alphanum_fraction": 0.6007326245307922, "avg_line_length": 19.259260177612305, "blob_id": "a44b57d2968ae309ca528d4503c4fd7db7bdcb8a", "content_id": "4f0604f6eead39a183a552873962983dc22c36b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 546, "license_type": "no_license", "max_line_length": 41, "num_lines": 27, "path": "/trees/flatten_binary_tree.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\nfrom utils.treeUtils import generate_tree\n\nclass Solution:\n def __init__(self):\n self.prev = None\n\n def flatten(self, node):\n if not node:\n return None\n\n self.prev = node\n self.flatten(node.left)\n\n temp = node.right\n node.right = node.left\n node.left = None\n\n self.prev.right = temp\n self.flatten(self.prev.right)\n\nroot = generate_tree()\nprint(root)\nprint('==============')\nsolution = Solution()\nsolution.flatten(root)\nprint(root)" }, { "alpha_fraction": 0.5298759937286377, "alphanum_fraction": 0.5422773361206055, "avg_line_length": 18.733333587646484, "blob_id": "fe70f24885a9ebaf64bf28c77d825c1315d893f4", "content_id": "4da3a1ecb6b6c9d12a47e77de2c1cd6feb218316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 887, "license_type": "no_license", "max_line_length": 48, "num_lines": 45, "path": "/trees/find_bottom_left_value.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 513\n\nfrom utils.treeNode import TreeNode\n\n# time: O(N)\n# space: O(N)\nclass Solution:\n def find_bottom_left_value(self, root):\n level = [root]\n most_left = None\n\n while level:\n most_left = level[0].value\n new_level = []\n\n for node in level:\n if node.left:\n new_level.append(node.left)\n \n if node.right:\n new_level.append(node.right)\n \n level = new_level\n \n return most_left\n\none = TreeNode(1)\ntwo = TreeNode(2)\nthree = TreeNode(3)\nfour = TreeNode(4)\nfive = TreeNode(5)\nsix = TreeNode(6)\nseven = TreeNode(7)\n\none.left = five\nfive.left = three\nfive.right = four\none.right = two\ntwo.right = six\n# six.right = seven\n\nprint(one)\nprint('===========')\nsolution = Solution()\nprint(solution.find_bottom_left_value(one))" }, { "alpha_fraction": 0.42129629850387573, "alphanum_fraction": 0.4444444477558136, "avg_line_length": 20.600000381469727, "blob_id": "baaa7f822b67c93a03c7b7d2e2f3c96b0fc8f270", "content_id": "3e890113b855a905d5996767475f12aa049ad5bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 39, "num_lines": 20, "path": "/arrays/remove_element.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def remove_element(self, arr, val):\n if not arr:\n return 0\n\n j = 0\n for i in range(len(arr)):\n if arr[i] != val:\n arr[j] = arr[i]\n j += 1\n \n for i in range(len(arr) - j):\n arr.pop()\n\n return j\n \nsolution = Solution()\narr = [1, 2, 3, 2, 4, 5]\nprint(solution.remove_element(arr, 2))\nprint(arr)\n" }, { "alpha_fraction": 0.4897400736808777, "alphanum_fraction": 0.5075239539146423, "avg_line_length": 26, "blob_id": "5d718c96257993ed530b92eb242d375332e2d1f2", "content_id": "a6f9ed57b266bd821cf72ad4c6f892f4e2e34012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "no_license", "max_line_length": 91, "num_lines": 27, "path": "/dynamicProgramming/ugly_number.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_ugly_number(self, n, primes):\n if n <= 0:\n return 0\n if n == 1:\n return 1\n\n uglys = [1]\n indices = [0 for _ in range(len(primes))]\n candidates = primes[:]\n\n while len(uglys) < n:\n ugly = min(candidates)\n\n uglys.append(ugly)\n\n # increment the correct indexes to avoid duplicates, and set the new candidates\n for i in range(len(indices)):\n if candidates[i] == ugly:\n indices[i] += 1\n candidates[i] = uglys[indices[i]] * primes[i]\n\n return uglys[-1]\n\nsolution = Solution()\nprimes = [2, 3, 5]\nprint(solution.get_ugly_number(11, primes))\n\n\n" }, { "alpha_fraction": 0.49034038186073303, "alphanum_fraction": 0.5096595883369446, "avg_line_length": 26.897436141967773, "blob_id": "378fecb104575992f608ae7a5fffa5a3e69b4a5d", "content_id": "43ff47e55a1942d2e491556a5d0f840ff1abb408", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 79, "num_lines": 39, "path": "/trees/unique_bst.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_unique_bst_count(self, n):\n if n <= 0:\n return 0\n \n return self.get_unique_bst_count_rec(1, n)\n \n def get_unique_bst_count_rec(self, start, end):\n if start >= end:\n return 1\n \n result = 0\n for i in range(start, end + 1):\n left_count = self.get_unique_bst_count_rec(start, i - 1)\n right_count = self.get_unique_bst_count_rec(i + 1, end)\n result += left_count * right_count\n \n return result\n \n def get_unique_bst_count2(self, n):\n memo = [-1 for _ in range(n + 1)]\n memo[0], memo[1] = 1, 1\n\n return self.helper(n, memo)\n\n def helper(self, n, memo):\n if memo[n] != -1:\n return memo[n]\n \n count = 0\n for i in range(n):\n # how many ways can i distribute n - 1 nodes between left and right\n count += self.helper(i, memo) * self.helper(n - 1 - i, memo)\n \n return count\n\n \nsolution = Solution()\nprint(solution.get_unique_bst_count2(3))" }, { "alpha_fraction": 0.5425400733947754, "alphanum_fraction": 0.5450061559677124, "avg_line_length": 30.230770111083984, "blob_id": "fb123263a65b5a87e6f1b2f587f4e5c1a8e5a19e", "content_id": "2e1c62b8ccc8575fdf741abceb116130b4b5d673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 159, "num_lines": 26, "path": "/graphs/network_delay_time.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nclass Solution:\n def networkDelayTime(self, times, K):\n graph = self.build_graph(times)\n dist = { node: int('float') for node in graph }\n\n def dfs(node, elapsed):\n if elapsed >= dist[node]:\n return\n \n dist[node] = elapsed\n for time, nbr in sorted(graph[node]): # start with the node with the smallest travel time, as it has more chances of reaching all the nodes quicker\n dfs(nbr, elapsed + time)\n \n dfs(K, 0)\n\n ans = max(dist.values())\n return ans if ans != float('inf') else -1\n \n def build_graph(self, times):\n graph = defaultdict(list)\n for u, v, w in times:\n graph[u].append((w, v))\n \n return graph" }, { "alpha_fraction": 0.5597269535064697, "alphanum_fraction": 0.5847554206848145, "avg_line_length": 30.39285659790039, "blob_id": "8c4cf063b114ad179055ad0a93c818d16a285e8a", "content_id": "cfb700ae3c181d67c035f0f0a708a1e0c3988111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 879, "license_type": "no_license", "max_line_length": 110, "num_lines": 28, "path": "/trees/tree_from_postorder_inorder.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n def build_tree(self, in_order, post_order):\n if not in_order:\n return None\n \n return self.build(len(post_order) - 1, 0, len(in_order) - 1, in_order, post_order)\n \n def build(self, post_end, in_start, in_end, in_order, post_order):\n if post_end < 0 or in_start > in_end:\n return None\n \n value = post_order[post_end]\n in_index = in_order.index(value)\n\n root = TreeNode(value)\n\n root.right = self.build(post_end - 1, in_index + 1, in_end, in_order, post_order)\n root.left = self.build(post_end - 1 - in_end + in_index, in_start, in_index - 1, in_order, post_order)\n\n return root\n\nin_order = [4, 2, 5, 1, 3, 6, 7]\npost_order = [4, 5, 2, 7, 6, 3, 1]\n\nsolution = Solution()\nprint(solution.build_tree(in_order, post_order))\n" }, { "alpha_fraction": 0.4182373881340027, "alphanum_fraction": 0.44982171058654785, "avg_line_length": 28.75757598876953, "blob_id": "daa1bd02c96bdabb605bf74ea9b7aea39a6588fa", "content_id": "2d444656619c5b0dac85d0187f30a62e763ffb59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1963, "license_type": "no_license", "max_line_length": 84, "num_lines": 66, "path": "/arrays/unique_paths2.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "import unittest\n\nclass Solution:\n def get_unique_paths_count(self, matrix):\n row_count, col_count = len(matrix), len(matrix[0])\n if row_count == 0 or col_count == 0:\n raise Exception('Unvalid Matrix')\n \n if matrix[0][0] or matrix[-1][-1]:\n return 0\n \n row_paths = [0 for _ in range(col_count)]\n path_count = 0\n\n for row in range(row_count):\n new_row_paths = []\n for col in range(col_count):\n if row == 0 and col == 0:\n path_count = 1\n elif matrix[row][col] == 1:\n path_count = 0\n else:\n path_count = row_paths[col]\n path_count += new_row_paths[-1] if col > 0 else 0\n new_row_paths.append(path_count)\n row_paths = new_row_paths \n\n return row_paths[-1]\n \n def uniquePathsWithObstacles(self, grid):\n \"\"\"\n :type obstacleGrid: List[List[int]]\n :rtype: int\n \"\"\"\n \n if grid[0][0] or grid[-1][-1]:\n return 0\n \n m, n = len(grid), len(grid[0])\n \n ways = [0] * (n + 1)\n ways[1] = 1\n \n for row in range(1, m + 1):\n new_ways = [0]\n for col in range(1, n + 1):\n if grid[row - 1][col - 1] == 1:\n new_ways.append(0)\n else:\n new_ways.append(new_ways[-1] + ways[col])\n \n ways = new_ways\n \n return ways[-1]\n\nclass Test(unittest.TestCase):\n test_data = [([[0,0,0], [0,1,0], [0,0,0]], 2), ([[0,0,0], [1,0,1], [0,0,0]], 1)]\n\n def test_get_unique_paths_count(self):\n solution = Solution()\n for data in self.test_data:\n actual = solution.uniquePathsWithObstacles(data[0])\n self.assertEqual(actual, data[1])\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.4586583375930786, "alphanum_fraction": 0.4976598918437958, "avg_line_length": 34.66666793823242, "blob_id": "b0d5ca491217efc8a9889bf8e45cfa5d1f3c8508", "content_id": "7ee671fef723b6cb72fb548925aaf6e2935630df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 79, "num_lines": 18, "path": "/arrays/find_minimum_rotated_sorted_arary.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_minimum(self, nums):\n left, right = 0, len(nums) - 1\n while left < right:\n if nums[left] <= nums[right]: # sorted array, return nums[left]\n break\n mid = (left + right) // 2\n if nums[mid] < nums[left]: # min is either mid the left side of mid\n right = mid\n else: # nums[mid] >= nums[left] > num[right] => mid is not min\n left = mid + 1\n return nums[left]\n\nsolution = Solution()\n# nums = [3, 4, 5, 6, 7, 1, 2]\n# nums = [1, 2, 3, 4, 5, 6]\nnums = [7, 8, 1, 2, 3, 4, 5, 6]\nprint(solution.get_minimum(nums))" }, { "alpha_fraction": 0.5562273263931274, "alphanum_fraction": 0.5683192014694214, "avg_line_length": 18.20930290222168, "blob_id": "ae1522142a4010e1f46e8a9589c755cb77881b71", "content_id": "6bf7ed47f6f1f207479a68385efa703c3b36a46e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 827, "license_type": "no_license", "max_line_length": 53, "num_lines": 43, "path": "/linkedList/palindrome_linked_list.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "# 234\n\nfrom utils.listNode import ListNode\n\nclass Solution:\n def is_palindrome(self, head):\n if not head:\n return False\n \n rev, slow, fast = None, head, head\n\n while fast and fast.next:\n fast = fast.next.next\n rev, rev.next, slow = slow, rev, slow.next\n \n if fast:\n slow = slow.next\n \n while rev and rev.value == slow.value:\n rev = rev.next\n slow = slow.next\n \n return not rev\n \n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(3)\nsix = ListNode(2)\nseven = ListNode(1)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\nfive.next = six\nsix.next = seven\n\nprint(one)\nsolution = Solution()\nprint(solution.is_palindrome(one))\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.523423433303833, "avg_line_length": 27.461538314819336, "blob_id": "f41a760f739899f3e9f7192b41fd4d7491eaa1ce", "content_id": "26139aac0815e6fd6010426c03e64d930de6256d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "no_license", "max_line_length": 74, "num_lines": 39, "path": "/arrays/rotate_array.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n # O(N) time and space\n def rotate_array(self, numbers, k):\n \"\"\"\n :type numbers: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n n = len(numbers)\n if k > n:\n raise ValueError('The array is not long enough to be rotated')\n return numbers[n - k:] + numbers[:n - k]\n \n # O(N) time, O(1) time\n def rotate_array2(self, numbers, k):\n \"\"\"\n :type numbers: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n n = len(numbers)\n if k > n:\n raise ValueError('The array is not long enough')\n self.reverse(numbers, 0, n - 1)\n self.reverse(numbers, 0, k - 1)\n self.reverse(numbers, k, n - 1)\n \n def reverse(self, numbers, left, right):\n while left < right:\n numbers[left], numbers[right] = numbers[right], numbers[left]\n left += 1\n right -= 1\n\n\nsolution = Solution()\n# print(solution.rotate_array([1,2,3,4,5,6,7], 1))\nnumbers = [1,2,3,4,5,6,7]\nsolution.rotate_array2(numbers, 3)\nprint(numbers)\n" }, { "alpha_fraction": 0.6057416200637817, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 19.490196228027344, "blob_id": "b86cf0dd824a07104eca735c9be298a8360740b6", "content_id": "cb7497fa7b74bdb6187b801e4f7c07631413ce1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1045, "license_type": "no_license", "max_line_length": 70, "num_lines": 51, "path": "/trees/left_leaves_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "from utils.treeNode import TreeNode\n\nclass Solution:\n #O(N) time, O(1) space\n def get_left_leaves_sum(self, node):\n result = 0\n\n if not node:\n return result\n\n if node.left and not node.left.left and not node.left.right:\n result += node.left.value\n else:\n result += self.get_left_leaves_sum(node.left)\n\n result += self.get_left_leaves_sum(node.right)\n\n return result\n\n\nnode1 = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode4 = TreeNode(4)\nnode5 = TreeNode(5)\nnode6 = TreeNode(6)\nnode7 = TreeNode(7)\n\nnode1.left = node2\nnode1.right = node3\n\nnode2.left = node4\nnode2.right = node5\n\nnode3.left = node6\nnode6.left = node7\n\nprint(node1)\nsolution = Solution()\nprint(solution.get_left_leaves_sum(node1))\n\ndef left_leaves_sum(root):\n if not root:\n return 0\n \n result = 0\n \n if root.left and not root.left.left and not root.left.right:\n result += root.left.value\n\n result += left_leaves_sum(root.left) + left_leaves_sum(root.right) " }, { "alpha_fraction": 0.48635634779930115, "alphanum_fraction": 0.5056179761886597, "avg_line_length": 30.200000762939453, "blob_id": "3f14dd0a759993eb5995921882c9bc8f63df5f4e", "content_id": "6f75287b6c04ad9ef49bba145a8d4fe1a21d9995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 623, "license_type": "no_license", "max_line_length": 59, "num_lines": 20, "path": "/arrays/minimum_size_subarray_sum.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution:\n def get_min_length(self, nums, target):\n \"\"\"\n type nums: List[int]\n type target: int\n :rtype : int\n \"\"\"\n min_length, sum_so_far, start = len(nums), 0, 0\n for i, num in enumerate(nums):\n sum_so_far += num\n while sum_so_far - nums[start] >= target:\n sum_so_far -= nums[start]\n start += 1\n min_length = min(min_length, i - start + 1)\n \n return min_length if min_length < len(nums) else 0\n\nsolution = Solution()\nnums = [2, 3, 1, 2, 4, 3]\nprint(solution.get_min_length(nums, 7))" }, { "alpha_fraction": 0.5230262875556946, "alphanum_fraction": 0.5460526347160339, "avg_line_length": 24.41666603088379, "blob_id": "48e27f6a8b9dea600f0cb7b7ee2f25399b2b11ff", "content_id": "2d61c8b049e6370f3428154e61fc1b9dc34631d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 304, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/arrays/missing_element.py", "repo_name": "aymane081/python_algo", "src_encoding": "UTF-8", "text": "class Solution(object):\n def missing_element(self, numbers):\n \"\"\"\n :type numbers: List[int]\n :rtype: int\n \"\"\"\n n = len(numbers)\n return (n * (n + 1) // 2) - sum(numbers)\n\nsolution = Solution()\nnumbers = [0, 2, 5, 3, 1]\nprint(solution.missing_element(numbers))" } ]
212
alan-valenzuela93/port-scanner
https://github.com/alan-valenzuela93/port-scanner
0414fe8f97278830ac866cfc672a0508f72db6a0
fabf8b02b34b633d07b412d38f6cbb3ed478ec8e
e3237ca56168c9b49e0645e621de86831030c1f1
refs/heads/main
"2023-03-25T18:17:31.791591"
"2021-03-23T12:47:00"
"2021-03-23T12:47:00"
330,258,154
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5797579884529114, "alphanum_fraction": 0.619361937046051, "avg_line_length": 24.735294342041016, "blob_id": "a7f2b738993fd43c03e0ea3411cecb9de4414572", "content_id": "603ba3e8c4ef5d8318899fa7da0fd247cb2e121a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 909, "license_type": "no_license", "max_line_length": 124, "num_lines": 34, "path": "/port-scanner.py", "repo_name": "alan-valenzuela93/port-scanner", "src_encoding": "UTF-8", "text": "import socket\r\nimport argparse\r\nfrom grabber import banner_grabbing\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-t', '--target', help='Enter your target address', required=True)\r\nparser = parser.parse_args()\r\nports = [21, 22, 25, 53, 66, 80, 88, 110, 139, 443, 445, 8080, 9050] # These are some of the most interesting ports to scan\r\n\r\n\r\ndef get_ip(target):\r\n return str(socket.gethostbyname(target))\r\n\r\n\r\ndef scan(host, port):\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n try:\r\n s.connect((host, port))\r\n s.settimeout(0.2) # Increase scanning speed\r\n except:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef main():\r\n for p in ports:\r\n if scan(parser.target, p):\r\n print(banner_grabbing(parser.target, p))\r\n\r\n\r\nif __name__ == '__main__':\r\n print('TCP/IP scan started at IP ' + get_ip(parser.target))\r\n main()\r\n" }, { "alpha_fraction": 0.6802574992179871, "alphanum_fraction": 0.6845493316650391, "avg_line_length": 15.923076629638672, "blob_id": "486df6a96d6152d3775dd889101e7133f89d8efb", "content_id": "5c48855e881365b871cebce41ba3ee56ed4ad265", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 466, "license_type": "no_license", "max_line_length": 80, "num_lines": 26, "path": "/README.md", "repo_name": "alan-valenzuela93/port-scanner", "src_encoding": "UTF-8", "text": "# TCP port scanner\r\n\r\nScan hosts for open ports and services.\r\n\r\n## Usage\r\n\r\nRun port-scanner.py on terminal. Use [-t] parameter to set a target to scan.\r\n\r\n### Example\r\n\r\n> python3 port-scanner.py -t scanme.nmap.org\r\n\r\n### Requirements\r\n\r\nPython 3.x\r\n\r\n### Upcoming\r\n\r\n~~Add a banner grabbing function~~ Done\r\n\r\n- Optimize scanning speed\r\n- Add UDP scan option\r\n\r\n#### Disclaimer\r\n\r\nUse this script on a controlled environment, don't use it on unauthorized hosts.\r\n" }, { "alpha_fraction": 0.527748703956604, "alphanum_fraction": 0.5403141379356384, "avg_line_length": 30.931034088134766, "blob_id": "eb553e7d7795b69473c4de092e8093883787606f", "content_id": "6aeabd99cc79f377bbdbb74eceae8093c8adac36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 955, "license_type": "no_license", "max_line_length": 77, "num_lines": 29, "path": "/grabber.py", "repo_name": "alan-valenzuela93/port-scanner", "src_encoding": "UTF-8", "text": "import socket\r\n\r\n\r\ndef banner_grabbing(addr, port):\r\n print(\"Getting service information for open TCP/IP port: \", port + \"...\")\r\n socket.setdefaulttimeout(10)\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((addr, port))\r\n data = ''\r\n headers = \\\r\n \"GET / HTTP/1.1\\r\\n\" \\\r\n f\"Host: {addr}\\r\\n\" \\\r\n \"User-Agent: python-custom-script/2.22.0\\r\\n\" \\\r\n \"Accept-Encoding: gzip, deflate\\r\\nAccept: */*\\r\\n\" \\\r\n \"Connection: keep-alive\\r\\n\\r\\n\"\r\n print(\"\\n\\n\" + headers)\r\n cycle = True\r\n\r\n try: # If banner can't be reach, print a message\r\n while cycle: # Keep looping until the banner is found\r\n data = str(s.recv(4096))\r\n if data != '':\r\n s.send(headers.encode()) # Send request\r\n cycle = False\r\n s.close()\r\n except:\r\n print(\"Connection refused... banner unreachable\")\r\n\r\n return data + '\\n'\r\n" } ]
3
thevaccinetracker/data_engine
https://github.com/thevaccinetracker/data_engine
d2081ee5cf05a03f284ce9c79ca414b9768a6705
5a0c43cf0653a0b0f1e3da6c0f2d605aef143a07
bf60f89166d905fd18bd1c4de55abfcd5f0fcfb3
refs/heads/master
"2022-11-25T21:03:03.301425"
"2020-07-28T18:07:58"
"2020-07-28T18:07:58"
277,340,154
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 12, "blob_id": "1e07aebf58b99d81a19775173c7a452547c49cba", "content_id": "d9fbe26875433c01305900488a87545ba99378f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "permissive", "max_line_length": 13, "num_lines": 2, "path": "/README.md", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "# data_engine\ndata engine\n" }, { "alpha_fraction": 0.5908304452896118, "alphanum_fraction": 0.6038062572479248, "avg_line_length": 28.64102554321289, "blob_id": "dc715421aa799668fb5afa67714056548ae8f36f", "content_id": "2c18b71e61eb936f7a06bad4253308a5bc82b9b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1156, "license_type": "permissive", "max_line_length": 102, "num_lines": 39, "path": "/web_scrap/raps_org.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "from settings import GOOGLE_DRIVER, DATA_PATH\nimport time\n\ndef WebScrap():\n print(\"Raps webscrap: Started...\")\n\n driver = GOOGLE_DRIVER\n\n driver.get('https://www.raps.org/news-and-articles/news-articles/2020/3/covid-19-vaccine-tracker')\n\n table = driver.find_element_by_id(\"vax_wrapper\")\n table.find_element_by_name(\"vax_length\").send_keys(\"100\")\n\n rows = table.find_element_by_class_name(\"dataTable\").find_elements_by_tag_name(\"tr\")\n\n tableData = []\n isColumn = True\n for row in rows:\n rowData = []\n colTag = \"td\"\n if isColumn:\n isColumn = False\n colTag = \"th\"\n colFirst = True\n for col in row.find_elements_by_tag_name(colTag):\n if colFirst:\n colFirst = False\n continue\n rowData.append(col.text.encode('utf-8').decode('utf-8'))\n tableData.append(rowData)\n\n import csv\n with open(DATA_PATH + r'/raps.org.tabledata.csv', 'w') as file:\n writer = csv.writer(file, delimiter='|', lineterminator='\\n')\n writer.writerows(tableData)\n\n time.sleep(60 * 1)\n\n print(\"Raps webscrap: Completed...\")\n" }, { "alpha_fraction": 0.7134778499603271, "alphanum_fraction": 0.7426955699920654, "avg_line_length": 39.80769348144531, "blob_id": "e5f1574040786ab2c67a0734c277e83da9e5b6b9", "content_id": "6096893bc203c13e38f64576b0cf92b1ec6af916", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1061, "license_type": "permissive", "max_line_length": 121, "num_lines": 26, "path": "/web_scrap/airtable_com.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "import time\n\nfrom settings import GOOGLE_DRIVER\n\n\ndef WebScrap():\n print(\"Airtable webscrap: Started...\")\n driver = GOOGLE_DRIVER\n\n driver.get('https://airtable.com/shrSAi6t5WFwqo3GM/tblEzPQS5fnc0FHYR/viweyymxOAtNvo7yH?blocks=bipZFzhJ7wHPv7x9z')\n\n table = driver.find_element_by_id(\"table\")\n table.find_element_by_class_name('viewConfigContainer').find_element_by_class_name('link-quiet').click()\n time.sleep(5)\n table.find_element_by_class_name('viewSwitcherContainer').find_elements_by_tag_name('li')[2].click()\n time.sleep(5)\n viewMenuPopover = table.find_elements_by_class_name(\"viewMenuPopover\")[0]\n viewMenuPopover.click()\n time.sleep(3)\n viewMenuPopover.find_element_by_class_name(\"menu\").find_element_by_tag_name(\"li\").click()\n time.sleep(60 * 1)\n print(\"Airtable webscrap: Completed...\")\n\n# References\n# https://medium.com/@moungpeter/how-to-automate-downloading-files-using-python-selenium-and-headless-chrome-9014f0cdd196\n# https://www.programcreek.com/python/example/100025/selenium.webdriver.ChromeOptions\n" }, { "alpha_fraction": 0.682758629322052, "alphanum_fraction": 0.6942528486251831, "avg_line_length": 24.58823585510254, "blob_id": "d10e9aa5b668d0af96b93b599e2273df6881e8e6", "content_id": "e7a4e67fe96671928756bfb19193830da8db7c03", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 435, "license_type": "permissive", "max_line_length": 104, "num_lines": 17, "path": "/web_scrap/who_int.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "from settings import GOOGLE_DRIVER, DATA_PATH\nimport time\n\n\ndef WebScrap():\n print(\"WHO webscrap: Started...\")\n\n driver = GOOGLE_DRIVER\n\n driver.get('https://www.who.int/publications/m/item/draft-landscape-of-covid-19-candidate-vaccines')\n\n body = driver.find_element_by_tag_name(\"body\")\n body.find_element_by_class_name('button-blue-background').click()\n\n time.sleep(60 * 1)\n\n print(\"WHO webscrap: Completed...\")\n" }, { "alpha_fraction": 0.7474226951599121, "alphanum_fraction": 0.7680412530899048, "avg_line_length": 17.4761905670166, "blob_id": "a50d3524e05f8a6ec4a191ca6f259b3cc5163443", "content_id": "9770b849177481b5312c7f6c81b2318c46a7f96d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "permissive", "max_line_length": 53, "num_lines": 21, "path": "/main_exce.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "from web_scrap import airtable_com, raps_org, who_int\nimport time\n\nairtable_com.WebScrap()\nraps_org.WebScrap()\nwho_int.WebScrap()\n\nprint(\"Sleep for 1 min\")\ntime.sleep(60 * 1)\n\nfrom preprocess_data import pdf_read_table,airtable\n\npdf_read_table.TransformPDFData()\nairtable.PreProcessAirtableData()\n\nprint(\"Sleep for 1 min\")\ntime.sleep(60 * 1)\n\nimport googleDb\n\ngoogleDb.MainGSheetUpdate()\n" }, { "alpha_fraction": 0.6426703929901123, "alphanum_fraction": 0.6524316072463989, "avg_line_length": 28.57216453552246, "blob_id": "0caca1bdc9056f04bf07cde58695bd99a629dbc5", "content_id": "6353cf64c0c1dcfa18bc3bc7cb1570db7e3f98b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5737, "license_type": "permissive", "max_line_length": 95, "num_lines": 194, "path": "/googleDb.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "import gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport time\n\nfrom settings import GSHEET_CRED_FILE, GSHEET_SCOPE, GSHEET_FILE, GSHEET_WORKSHEET\nfrom settings import WHO_INPUT_DATA, RAPS_INPUT_DATA, AIRTABLE_INPUT_DATA\nfrom settings import VT_CORPS\n\nimport get_cosine.get_cosine\n\n# use creds to create a client to interact with the Google Drive API\ncreds = ServiceAccountCredentials.from_json_keyfile_name(GSHEET_CRED_FILE, GSHEET_SCOPE)\nclient = gspread.authorize(creds)\n\n# Find a workbook by name and open the first sheet\n# Make sure you use the right name here.\nsheet = client.open(GSHEET_FILE).get_worksheet(GSHEET_WORKSHEET)\n\n# Extract and print all of the values\nlist_of_hashes = sheet.get_all_records()\n\n\ndef GetDataFromFile(file, separator):\n with open(file) as who_file:\n file_data = who_file.readlines()\n for index in range(len(file_data)):\n file_data[index] = file_data[index].split(separator)\n\n return file_data\n\n\ndef GetRow(data, matchString, col):\n perfactMatch = None\n perfactMatchPer = 0\n for row in data:\n # try:\n # print(row[col] , matchString)\n cosineSim = get_cosine.get_cosine.GetCosineSim([row[col], matchString])\n if cosineSim > 0.70:\n if perfactMatchPer < cosineSim:\n perfactMatch = row\n perfactMatchPer = cosineSim\n # if row[col] == matchString:\n # return row\n # except:\n # print(\"Error:\", row)\n\n # print(perfactMatch, perfactMatchPer, cosineSim)\n return perfactMatch, perfactMatchPer\n\n\ndef UpdateGoogleSheet(settings, data, gSheet):\n sheetCol = settings[\"sheetCol\"]\n dataCol = settings[\"dataCol\"]\n currentSheetRow = settings[\"currentSheetRow\"]\n updateSheetCol = settings[\"updateSheetCol\"]\n dataColForUpdate = settings[\"dataColForUpdate\"]\n currentIndex = 0\n for sheetRow in gSheet.get_all_values():\n try:\n foundRow, foundRowMatchPer = GetRow(data, sheetRow[sheetCol], dataCol)\n # print(foundRowMatchPer, sheetRow[sheetCol], foundRow)\n if foundRow:\n gSheet.update_cell(currentSheetRow, updateSheetCol, foundRow[dataColForUpdate])\n gSheet.update_cell(currentSheetRow, updateSheetCol + 1, foundRowMatchPer)\n time.sleep(3)\n\n except:\n print(currentSheetRow, updateSheetCol, dataColForUpdate, foundRow)\n currentSheetRow += 1\n currentIndex += 1\n\nprint(\"WHO data loading start...\")\nwhoData = GetDataFromFile(WHO_INPUT_DATA, \"|\")\nprint(\"WHO data loading complete...\")\n\nprint(\"RAPS data loading start...\")\nrapsData = GetDataFromFile(RAPS_INPUT_DATA, \"|\")\nprint(\"RAPS data loading complete...\")\n\nprint(\"AirTable data loading start...\")\nairTableData = GetDataFromFile(AIRTABLE_INPUT_DATA, \"|\")\nprint(\"AirTable data loading complete...\")\n\ntime.sleep(10)\n\nwhoSettings = {\n 'sheetCol': 2,\n 'dataCol': 2,\n 'currentSheetRow': 1,\n 'updateSheetCol': 8,\n 'dataColForUpdate': 4\n}\nrapsSettings = {\n 'sheetCol': 3,\n 'dataCol': 1,\n 'currentSheetRow': 1,\n 'updateSheetCol': 10,\n 'dataColForUpdate': 2\n}\nairTableSettings = {\n 'sheetCol': 1,\n 'dataCol': 0,\n 'currentSheetRow': 1,\n 'updateSheetCol': 6,\n 'dataColForUpdate': 3\n}\n\nprint(\"Updating GSheet for WHO...\")\nUpdateGoogleSheet(whoSettings, whoData, sheet)\nprint(\"Updating GSheet for WHO Completed...\")\n\ntime.sleep(10)\n\nprint(\"Updating GSheet for RAPS...\")\nUpdateGoogleSheet(rapsSettings, rapsData, sheet)\nprint(\"Updating GSheet for RAPS Completed...\")\n\ntime.sleep(10)\n\nprint(\"Updating GSheet for AirTable...\")\nUpdateGoogleSheet(airTableSettings, airTableData, sheet)\nprint(\"Updating GSheet for AirTable Completed...\")\n\ntime.sleep(10)\n\ndef GetPhaseCorp():\n with open(VT_CORPS, 'r') as file:\n data = file.readlines()\n phase = {}\n for row in data:\n col = row.split(':')\n phase[col[0]] = col[1].split(',')\n return phase\n\n\ndef GetStagePhase(stage):\n stage = stage.lower().replace(' ', '')\n findStageIn = []\n for key in phase:\n for p in phase[key]:\n if p.lower().replace(' ', '') in stage:\n findStageIn.append(key)\n findStageIn = sorted(list(set(findStageIn)), reverse=True)\n if len(findStageIn) > 0:\n return findStageIn[0]\n return '0'\n\n\ndef GetFinalPhase(all_stage):\n initLen = len(all_stage)\n final_stage = dict()\n\n final_stage_result = \"Not Sure\"\n for d in all_stage:\n if d not in final_stage:\n final_stage[d] = 1\n else:\n final_stage[d] += 1\n if len(final_stage) == initLen:\n final_stage_result = \"Not Sure\"\n\n final_stage = sorted(final_stage.items(), key=lambda x: x[1], reverse=True)\n if len(final_stage):\n final_stage_result = final_stage[0][0]\n\n if final_stage_result == '0':\n final_stage_result = \"Not Sure\"\n\n return final_stage_result\n\n\ndef UpdateGoogleSheetFinalStage(gSheet):\n currentSheetRow = 2\n updateSheetCol = 15\n index = 0\n for sheetRow in gSheet.get_all_values():\n if index == 0:\n index = 1\n continue\n WHOStage = GetStagePhase(sheetRow[7])\n RAPSStage = GetStagePhase(sheetRow[9])\n AIRTableStage = GetStagePhase(sheetRow[5])\n finalStage = GetFinalPhase([WHOStage, RAPSStage, AIRTableStage])\n gSheet.update_cell(currentSheetRow, updateSheetCol, finalStage)\n currentSheetRow += 1\n time.sleep(3)\n\n\nphase = dict(GetPhaseCorp())\ndef MainGSheetUpdate():\n print(\"Updating GSheet for Final Stage...\")\n UpdateGoogleSheetFinalStage(sheet)\n print(\"Updating GSheet for Final Stage Completed...\")\n" }, { "alpha_fraction": 0.7516233921051025, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 36.693878173828125, "blob_id": "bb43172c3c86f393f86e9e5294ba468e69c616f9", "content_id": "94fea4010a4baa9159398f894764201d35ccffa0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1848, "license_type": "permissive", "max_line_length": 106, "num_lines": 49, "path": "/settings.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "import sys\n\nsys.path.append(r'C:\\Users\\v-shvi\\Desktop\\Personal\\VT\\data_engine')\nsys.path.append(r'C:\\Users\\v-shvi\\Desktop\\Personal\\VT\\data_engine\\web_scrap_data')\nsys.path.append(r'C:\\Users\\v-shvi\\Desktop\\Personal\\VT\\data_engine\\get_cosine')\nsys.path.append(r'C:\\Users\\v-shvi\\Desktop\\Personal\\VT\\data_engine\\preprocess_data')\n\n\nROOT_PATH = \"../\"\nDATA_PATH = \"data\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--window-size=1920x1080\")\nchrome_options.add_argument(\"--disable-notifications\")\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--verbose')\nchrome_options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": r\"C:\\Users\\v-shvi\\Desktop\\Personal\\VT\\data_engine\\data\",\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing_for_trusted_sources_enabled\": False,\n \"safebrowsing.enabled\": False\n})\nchrome_options.add_argument('--disable-gpu')\nchrome_options.add_argument('--disable-software-rasterizer')\n\n# chrome_options = chrome_options\n# GOOGLE_DRIVER = webdriver.Chrome(executable_path='driver/chromedriver.exe')\nGOOGLE_DRIVER = webdriver.Chrome(executable_path='driver/chromedriver.exe', chrome_options=chrome_options)\n\n# SETTINGS DATA\nGSHEET_CRED_FILE = \"credentials.json\"\nGSHEET_SCOPE = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\nGSHEET_FILE = \"Data Engine Database\"\nGSHEET_WORKSHEET = 6\n\nWHO_INPUT_DATA = \"data/who.int.transformed_data.csv\"\nRAPS_INPUT_DATA = \"data/raps.org.tabledata.csv\"\n# AIRTABLE_INPUT_DATA = \"data/COVID-19 Tracker-Vaccines.csv\"\nAIRTABLE_INPUT_DATA = \"data/airtable.transformed_data.csv\"\n\n\nVT_CORPS = 'vt_corp/phase.txt'\nSTOPWORDS = 'english'\n\n" }, { "alpha_fraction": 0.5624508857727051, "alphanum_fraction": 0.5655930638313293, "avg_line_length": 28.604650497436523, "blob_id": "6ac3b79ee2202b6610723f8aaced5ed5ff204101", "content_id": "b81341f1f776e6eb33a7139c105a5ffeed033e10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1273, "license_type": "permissive", "max_line_length": 74, "num_lines": 43, "path": "/preprocess_data/airtable.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "from settings import DATA_PATH\nimport csv\n\n\ndef parseRowToCell(row):\n isSingleWord = False;\n word = \"\"\n rowArray = []\n for letter in row:\n if letter == \"\\\"\" and not isSingleWord:\n isSingleWord = True\n elif letter == \"\\\"\" and isSingleWord:\n isSingleWord = False\n elif letter == \",\" and not isSingleWord:\n rowArray.append(word)\n word = \"\"\n else:\n word += letter\n return rowArray\n\n\ndef PreProcessAirtableData():\n print(\"Airtable csv pre-processing: Started...\")\n # with open(r\"../data/COVID-19 Tracker-Vaccines.csv\") as file:\n with open(DATA_PATH + r\"/COVID-19 Tracker-Vaccines.csv\") as file:\n data = file.readlines()\n dataMatrix = []\n for row in data:\n if (\"\\n\" in row):\n row = row.replace('\\n', '')\n if (\"\\\"\" in row):\n dataMatrix.append(parseRowToCell(row))\n else:\n dataMatrix.append(row.split(\",\"))\n\n with open(DATA_PATH + r'/airtable.transformed_data.csv', 'w') as file:\n writer = csv.writer(file, delimiter='|', lineterminator='\\n')\n writer.writerows(dataMatrix)\n\n print(\"Airtable csv pre-processing: Completed...\")\n\n\n# PreProcessAirtableData()\n" }, { "alpha_fraction": 0.5917767882347107, "alphanum_fraction": 0.5983847379684448, "avg_line_length": 27.375, "blob_id": "3de73a2384d74895d77d913196a8d40db56f3f4e", "content_id": "e1f806e1fc6f35350ab6295fc7cf188899be4f6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1362, "license_type": "permissive", "max_line_length": 94, "num_lines": 48, "path": "/preprocess_data/pdf_read_table.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "import tabula\nfrom settings import DATA_PATH\n\nfile = DATA_PATH + \"/novel-coronavirus-landscape-covid-19-(1).pdf\"\ntabula.convert_into(file, DATA_PATH + \"/who_covid_data.csv\", output_format=\"csv\", pages='all')\n\nimport csv\n\nfile_CSV = open(DATA_PATH + '/who_covid_data.csv')\ndata_CSV = csv.reader(file_CSV)\nlist_CSV = list(data_CSV)\n\n\ndef transformData(data):\n if len(data) <= 0:\n return []\n tempData = data[0]\n data.remove(tempData)\n for r in data:\n index = 0\n for c in range(len(tempData)):\n col = tempData[c] + \" \" + r[c].lstrip('\\r\\n').rstrip('\\r\\n').strip()\n tempData[c] = col.strip()\n\n cleanCol = []\n for col in tempData:\n cleanCol.append(col.replace(\"\\n\", \" \"))\n return cleanCol\n\n\ndef TransformPDFData():\n print(\"WHO pdf pre-processing: Started...\")\n\n indexStartFrom = 3\n row = []\n transformedData = []\n for data in range(indexStartFrom, len(list_CSV)):\n if list_CSV[data][3] != '':\n if len(row) > 0:\n transformedData.append(transformData(row))\n row = []\n row.append(list_CSV[data])\n\n with open(DATA_PATH + r'/who.int.transformed_data.csv', 'w') as file:\n writer = csv.writer(file, delimiter='|', lineterminator='\\n')\n writer.writerows(transformedData)\n\n print(\"WHO pdf pre-processing: Completed...\")\n" }, { "alpha_fraction": 0.6068601608276367, "alphanum_fraction": 0.6437994837760925, "avg_line_length": 35.14285659790039, "blob_id": "e5c892bde9e9a0060cf4b3d2299a675e8cc3786e", "content_id": "136dd0b8714a32a8c10e304ef9ffafb08f9b2474", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "permissive", "max_line_length": 278, "num_lines": 21, "path": "/test.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "statement = \"\"\"\"Institute of Medical Biology, Chinese Academy of Medical Sciences\",Vaccine,Inactivated virus,Phase II,Phase II began June 2020,Inactivated,NCT04412538,Unknown,,,N/A,https://docs.google.com/document/d/1Y4nCJJ4njzD1wiHbufCY6gqfRmj49Qn_qNgOJD62Wik/edit,6/23/2020\"\"\"\n\n\ndef parseRowToCell(row):\n isSingleWord = False;\n word = \"\"\n rowArray = []\n for letter in row:\n if letter == \"\\\"\" and not isSingleWord:\n isSingleWord = True\n elif letter == \"\\\"\" and isSingleWord:\n isSingleWord = False\n elif letter == \",\" and not isSingleWord:\n rowArray.append(word)\n word = \"\"\n else:\n word += letter\n print(rowArray)\n return rowArray\n\nparseRowToCell(statement)" }, { "alpha_fraction": 0.6773504018783569, "alphanum_fraction": 0.6955128312110901, "avg_line_length": 27.363636016845703, "blob_id": "3fbc3fa4ce94b5574730b5845806cf4bdf6d8f9b", "content_id": "0b178d3aa88cccac9cf3f4dd31f2ee63a87cc4f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 936, "license_type": "permissive", "max_line_length": 77, "num_lines": 33, "path": "/get_cosine/get_cosine.py", "repo_name": "thevaccinetracker/data_engine", "src_encoding": "UTF-8", "text": "import string\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom nltk.corpus import stopwords\n\nfrom settings import STOPWORDS\n\nstopwords = stopwords.words(STOPWORDS)\n\n\ndef cosine_sim_vectors(vec1, vec2):\n vec1 = vec1.reshape(1, -1)\n vec2 = vec2.reshape(1, -1)\n return cosine_similarity(vec1, vec2)[0][0]\n\n\ndef clean_string(text):\n text = ''.join([word for word in text if word not in string.punctuation])\n text = text.lower()\n text = ' '.join([word for word in text.split() if word not in stopwords])\n return text\n\n\ndef GetCosineSim(sentanceList):\n try:\n cleaned = list(map(clean_string, sentanceList))\n vectorizer = CountVectorizer().fit_transform(cleaned)\n vectors = vectorizer.toarray()\n # csim = cosine_similarity(vectors)\n\n return cosine_sim_vectors(vectors[0], vectors[1])\n except:\n return 0\n" } ]
11
marioxe301/ParserDR
https://github.com/marioxe301/ParserDR
be95c10da25aa0b5edc446967b4028076ce9d5cc
ddaeb78ce3f91b1c201b21fdd2154aa764176d89
6b6e98920fb23e7cabff8a15cec5cce14ba3e898
refs/heads/master
"2020-07-25T16:55:37.573570"
"2019-09-21T21:45:50"
"2019-09-21T21:45:50"
208,362,168
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.557953417301178, "alphanum_fraction": 0.5589233636856079, "avg_line_length": 43.83695602416992, "blob_id": "547712c16010a884207dd41304336770b7c07712", "content_id": "6879c82afb2b4977a8e31f1370b90e320e44b897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4124, "license_type": "no_license", "max_line_length": 299, "num_lines": 92, "path": "/Lexer.py", "repo_name": "marioxe301/ParserDR", "src_encoding": "UTF-8", "text": "from pyparsing import (Regex, White, Literal ,\nZeroOrMore, OneOrMore, Group , Combine , \nWord , alphanums, Suppress)\nimport sys\n\n\nclass Tokens(object):\n def __init__(self,tag,token):\n self.tag = tag\n self.token = token\n#esta clase permitira guardar en la lista\n\nclass Lexer(object):\n def __init__(self,path):\n #TERMINALES\n WHITESPACE = White(ws=\" \\t\\r\")\n LETTER = Regex('[a-zA-Z]')\n DIGIT = Regex('[0-9]')\n DATE_TYPE = Literal('date') \n STRING_TYPE = Literal('String') \n REAL_TYPE = Literal('real')\n VOID_TYPE = Literal('void')\n BOOLEAN_TYPE = Literal('boolean') \n ANYTYPE_TYPE = Literal('anytype') \n INT_TYPE = Literal('int')\n STATIC_TKN = Literal('static')\n RETURN = Literal('EXIT')\n IF = Literal('if')\n WHILE = Literal('while')\n \n \n \n #TERMINALES LITERALES\n DATE_LITERAL = DIGIT + DIGIT + Literal('/') + DIGIT + DIGIT + Literal('/') + DIGIT + DIGIT + DIGIT + DIGIT\n STRING_LITERAL = Combine(Literal('\"')+ ZeroOrMore(LETTER | DIGIT | Literal(' ') | Literal('%')|Literal('@')| Literal(',')| Literal('-')|Literal('=')|Literal('(')|Literal(')')|Literal('_')) +Literal('\"'))\n REAL_LITERAL = Combine(OneOrMore(DIGIT) + Literal('.') + OneOrMore(DIGIT))\n INT_LITERAL = Combine(OneOrMore(DIGIT))\n TRUE_LITERAL = Literal('true')\n FALSE_LITERAL = Literal('false')\n BOOLEAN_LITERAL = TRUE_LITERAL | FALSE_LITERAL\n INCR = Literal('++')\n DDPERIOD = Literal('::')\n PAR_OP = Literal('(')\n PAR_CL = Literal(')')\n SEMICOLON = Literal(';')\n COMA = Literal(',')\n BRACK_OP = Literal('{')\n BRACK_CL = Literal('}')\n PERIOD = Literal('.')\n ASIG = Literal(':=')\n REL_OP = Literal('>') | Literal('<') | Literal('==') | Literal('<=') | Literal('>=')\n LOG_OP = Literal('||') | Literal('&&')\n MULT_OP = Literal('/') | Literal('*')\n ADD_OP = Literal('+') | Literal('-')\n ID = Combine((LETTER | Literal('_')) + ZeroOrMore( LETTER | DIGIT ) )\n TEXT = ZeroOrMore(Word(alphanums)| WHITESPACE )\n COMMENT = Combine((Literal('//')+ TEXT + Literal('\\n') ) | (Literal('//')+ TEXT) ) \n\n \n program = ZeroOrMore( Suppress(COMMENT) | Group(DATE_TYPE)(\"DATE-TY\") | Group(STRING_TYPE)(\"STRING-TY\") | Group(REAL_TYPE)(\"REAL-TY\") | Group( VOID_TYPE)(\"VOID-TY\") | Group(BOOLEAN_TYPE)(\"BOOLEAN-TY\") | Group(ANYTYPE_TYPE)(\"ANY-TY\")\n | Group(INT_TYPE)(\"INT-TY\") | Group(STATIC_TKN)(\"STATIC-TY\")| Group(RETURN)(\"RETURN-TOK\") | Group(IF)(\"IF-TOK\") | Group(WHILE)(\"WHILE-TOK\") | Group(DATE_LITERAL)(\"DATE-TOK\") | Group(STRING_LITERAL)(\"STRING-TOK\")\n | Group(COMA)(\"COMA-TOK\") | Group(REAL_LITERAL)(\"REAL-TOK\") | Group(INT_LITERAL)(\"INT-TOK\") | Group(BOOLEAN_LITERAL)(\"BOOLEAN-TOK\") | Group(INCR)(\"INCR-TOK\") |Group( DDPERIOD)(\"DDPERIOD-TOK\") | Group(PAR_OP)(\"PAR-OP-TOK\") | Group(PAR_CL)(\"PAR-CL-TOK\") | Group(SEMICOLON)(\"SEMICOLON-TOK\")\n | Group(BRACK_OP)(\"BRACK-OP-TOK\") | Group(BRACK_CL)(\"BRACK-CL-TOK\") | Group(PERIOD)(\"PERIOD-TOK\") | Group(ASIG)(\"ASIG-TOK\") | Group( REL_OP)(\"REL-OP-TOK\") | Group(LOG_OP)(\"LOG-OP-TOK\") | Group(MULT_OP)(\"MULT-OP-TOK\") | Group( ADD_OP)(\"ADD-OP-TOK\") | Group(ID)(\"ID-TOK\")\n )\n \n #manda las palabras reservadas que acepta la gramatica\n self.lexer = program\n self.tokenList = []\n self.path = path\n \n #Divide en Tokens el archivo que se le manda\n def tokenFile(self):\n try:\n return self.lexer.parseFile(self.path)\n except Exception:\n print(\"Invalid token found\")\n sys.exit()\n \n \n def tokenize(self):\n tokenItems = self.tokenFile()\n for items in tokenItems:\n tok = Tokens(items.getName(),items[0])\n self.tokenList.append(tok)\n\n def printAllTokens(self):\n for tok in self.tokenList:\n print(\"TAG:\",tok.tag,\" \",\"TOKEN:\",tok.token,\"\\n\")\n\n#lex = Lexer('Program.g4')\n#lex.tokenize()\n#lex.printAllTokens()" }, { "alpha_fraction": 0.4542105197906494, "alphanum_fraction": 0.46052631735801697, "avg_line_length": 23.688312530517578, "blob_id": "a818b3f4cebf053f3d85b08469daddca6efce206", "content_id": "9da8861478f040b02ab67f07ee4a4892d41b5c12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1900, "license_type": "no_license", "max_line_length": 66, "num_lines": 77, "path": "/ParserSimpleExample.py", "repo_name": "marioxe301/ParserDR", "src_encoding": "UTF-8", "text": "from Lexer import Lexer\nfrom treelib import Node, Tree\n\n#verifica unicamente la declaracion de un int int <ID> := <NUMERO>\nclass ParserExample(object):\n def __init__(self,path):\n lex = Lexer(path)\n lex.tokenize()\n self.TOKENS = lex.tokenList\n self.INDEX = 0\n tree = Tree()\n self.TREE = tree\n\n def nextToken(self):\n if self.INDEX < len(self.TOKENS):\n x = self.INDEX\n self.INDEX+=1\n return [True ,self.TOKENS[x]]\n else:\n #self.INDEX = 0\n return [False]\n def parseCheck(self):\n if self.variable():\n print(\"Gramatica Correcta\")\n else:\n print(\"Gramatica Incorrecta\")\n\n def variable(self):\n if self.TYPE():\n if self.ID():\n if self.ASSIG():\n if self.NUMBER():\n return True\n def TYPE(self):\n x = self.nextToken()\n if x[0]:\n if x[1].tag == \"INT-TY\":\n return True\n else:\n return False\n else:\n return False\n\n def ID(self):\n x = self.nextToken()\n if x[0]:\n if x[1].tag == \"ID-TOK\":\n return True\n else:\n return False\n else:\n return False\n def ASSIG(self):\n x = self.nextToken()\n if x[0]:\n if x[1].tag == \"ASIG-TOK\":\n return True\n else:\n return False\n else:\n return False\n def NUMBER(self):\n x = self.nextToken()\n if x[0]:\n if x[1].tag == \"INT-TOK\":\n return True\n else:\n return False\n else:\n return False\n\n def ImprimirArbol(self):\n self.TREE.show()\n\nPar = ParserExample('Program.g4')\nPar.parseCheck()\n#Par.ImprimirArbol()" }, { "alpha_fraction": 0.4424584209918976, "alphanum_fraction": 0.45044317841529846, "avg_line_length": 34.005126953125, "blob_id": "1b5088169eff0544d8f64064e814554b40a79cb4", "content_id": "a34ac18857555d409a10799d317dc1b038749aea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13651, "license_type": "no_license", "max_line_length": 114, "num_lines": 390, "path": "/Parser.py", "repo_name": "marioxe301/ParserDR", "src_encoding": "UTF-8", "text": "from Lexer import Lexer\nfrom treelib import Node, Tree\nimport re\nfrom termcolor import colored,cprint\n\nclass Parser(object):\n def __init__(self,path):\n lex = Lexer(path)\n lex.tokenize()\n self.TOKENS = lex.tokenList\n self.TYPES= re.compile(\".*-TY\")\n \n\n def nextToken(self):\n if len(self.TOKENS) != 0:\n x = self.TOKENS[0]\n self.TOKENS.pop(0)\n return [True,x]\n else:\n return [False]\n\n def seekNextToken(self):\n if len(self.TOKENS) > 2:\n return [True,self.TOKENS[1]]\n else:\n return [False]\n\n def seekActualToken(self):\n if len(self.TOKENS)!= 0:\n return [True,self.TOKENS[0]]\n else:\n return [False]\n\n def Parse(self):\n if self.Program():\n cprint(\"Grammar Correct\\n\",\"green\",attrs=['bold'])\n else:\n cprint(\"Grammar Incorrect\\n\",\"red\",attrs=['bold'])\n \n def Program(self):\n print(\"Program\\n\")\n if self.Opt_funct_decl():\n return True\n else:\n return False\n\n \n def Opt_funct_decl(self):\n print(\"Opt_funct_decl\\n\")\n if self.Funct_head():\n if self.Body():\n return True\n else:\n return False\n else:\n return False\n \n def Funct_head(self):\n print(\"Funct_head\\n\")\n if self.Funct_name():\n token = self.nextToken()\n if token[0] and token[1].tag == 'PAR-OP-TOK':\n print(\"PAREN_OP_TOKEN\")\n print(\"Token: \",token[1].token,\"\\n\")\n if self.Param_list_opt():\n return True\n else:\n cprint(\"Expected a ( TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n \n def Funct_name(self):\n print(\"Funct_name\\n\")\n if self.Funct_type():\n token = self.nextToken()\n if token[0] and token[1].tag == 'ID-TOK':\n print(\"ID_TOKEN\")\n print(\"Token: \",token[1].token,\"\\n\")\n return True\n else:\n cprint(\"Expected a ID TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n \n def Funct_type(self):\n print(\"Funct_type\\n\")\n token = self.nextToken()\n if token[0] and token[1].tag == 'STATIC-TY':\n print(\"STATIC_TOKEN\")\n print(\"Token: \",token[1].token,\"\\n\")\n if self.Decl_type():\n return True\n else:\n cprint(\"Expected a STATIC TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n\n def Decl_type(self):\n print(\"Decl_type\\n\")\n token = self.nextToken()\n if token[0] and self.TYPES.match(token[1].tag) is not None:\n print(\"TYPE_TOKEN\")\n print(\"Token: \",token[1].token,\"\\n\")\n return True\n else:\n cprint(\"Expected a TYPE TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n \n def Param_list_opt(self):\n print(\"Param_list_opt\\n\")\n Token = self.seekActualToken()\n if Token[0] and Token[1].tag == 'PAR-CL-TOK':\n print(\"PAREN_CL_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n self.nextToken() # para quitar el parentesis\n return True\n elif Token[0] and self.TYPES.match(Token[1].tag) is not None:\n while True:\n if self.Decl_param():\n Token = self.seekActualToken()\n if Token[0] and Token[1].tag == 'COMA-TOK':\n print(\"COMA_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n self.nextToken() #solo para descartar la coma\n continue\n elif Token[0] and Token[1].tag == 'PAR-CL-TOK':\n print(\"PAREN_CL_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n self.nextToken() # para descartar el parentesis\n return True\n else:\n cprint(\"Expected a COMA or ) TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n cprint(\"Expected a ) TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n\n def Decl_param(self):\n print(\"Decl_param\\n\")\n if self.Decl_type():\n token = self.nextToken()\n if token[0] and token[1].tag == 'ID-TOK':\n print(\"ID_TOKEN\")\n print(\"Token: \",token[1].token,\"\\n\")\n return True\n else:\n cprint(\"Expected a ID TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n \n def Body(self):\n print(\"Body\\n\")\n token = self.nextToken()\n if token[0] and token[1].tag == 'BRACK-OP-TOK':\n print(\"BRACK_OP_TOKEN\")\n print(\"Token: \",token[1].token,\"\\n\")\n if self.Stmt_list():\n return True\n else:\n \n return False\n else:\n cprint(\"Expected a { TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n\n def Stmt_list(self):\n print(\"Stmt_list\\n\")\n if self.Stmts():\n return True\n else:\n return False\n\n def Stmts(self):\n print(\"Stmts\\n\")\n BrackToken = self.seekActualToken()\n if BrackToken[0] and BrackToken[1].tag == 'BRACK-CL-TOK':\n print(\"BRACK_CL_TOKEN\")\n print(\"Token: \",BrackToken[1].token,\"\\n\")\n self.nextToken() # para quitar el braket\n return True\n\n else:\n while True:\n if self.Stmt():\n BrackToken = self.seekActualToken()\n if BrackToken[0] and BrackToken[1].tag == 'BRACK-CL-TOK':\n print(\"BRACK_CL_TOKEN\")\n print(\"Token: \",BrackToken[1].token,\"\\n\")\n self.nextToken() # descarta el bracket\n return True\n else:\n continue\n else:\n cprint(\"Unexpected TOKEN found\\n\",\"red\",attrs=['bold'])\n return False\n \n def Stmt(self):\n print(\"Stmt\\n\")\n Token = self.seekActualToken()\n if Token[0] and Token[1].tag == 'IF-TOK':\n if self.If_stmt():\n return True\n else:\n return False\n elif Token[0] and Token[1].tag == 'WHILE-TOK' :\n if self.While_stmt():\n return True\n else:\n return False\n elif Token[0] and Token[1].tag == 'RETURN-TOK':\n if self.Return_stmt():\n return True\n else:\n return False\n elif Token[0] and self.TYPES.match(Token[1].tag) is not None:\n if self.Assign_stmt():\n return True\n else:\n return False\n else:\n return False\n \n def If_stmt(self):\n print(\"If_stmt\\n\")\n IfToken = self.nextToken()\n ParToken = self.nextToken()\n if IfToken[0] and IfToken[1].tag == 'IF-TOK' and ParToken[0] and ParToken[1].tag == 'PAR-OP-TOK':\n print(\"IF_TOKEN\")\n print(\"Token: \",IfToken[1].token,\"\\n\")\n print(\"PAR_OP_TOKEN\")\n print(\"Token: \",ParToken[1].token,\"\\n\")\n if self.Bool_expr():\n ParToken = self.nextToken()\n if ParToken[0] and ParToken[1].tag == 'PAR-CL-TOK':\n print(\"PAR_CL_TOKEN\")\n print(\"Token: \",ParToken[1].token,\"\\n\")\n if self.Body():\n return True\n else:\n return False\n else:\n cprint(\"Expected a ) TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n else:\n cprint(\"Expected a IF or ( or TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n\n def Bool_expr(self):\n print(\"Bool_expr\\n\")\n Token = self.seekActualToken()\n if Token[0] and Token[1].tag == 'BOOLEAN-TOK':\n print(\"BOOLEAN_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n self.nextToken() #Descartar el token \n return True\n else:\n if self.Constant():\n Token = self.nextToken()\n if Token[0] and (Token[1].tag == 'REL-OP-TOK' or Token[1].tag == 'LOG-OP-TOK'): \n print(\"LOGICAL_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n if self.Constant():\n return True\n else:\n return False\n else:\n cprint(\"Expected a RELATIONAL or LOGICAL TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n \n def Constant(self):\n print(\"Constant\\n\")\n Token = self.nextToken()\n if Token[0] and Token[1].tag == 'INT-TOK':\n print(\"INT_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n return True\n elif Token[0] and Token[1].tag == 'STRING-TOK':\n print(\"STRING_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n return True\n elif Token[0] and Token[1].tag == 'REAL-TOK':\n print(\"REAL_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n return True\n elif Token[0] and Token[1].tag == 'DATE-TOK':\n print(\"DATE_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n return True\n elif Token[0] and Token[1].tag == 'BOOLEAN-TOK':\n print(\"BOOLEAN_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n return True\n else:\n cprint(\"Expected a CONSTANT TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n\n def While_stmt(self):\n print(\"While_stmt\\n\")\n WhileToken = self.nextToken()\n ParToken = self.nextToken()\n if WhileToken[0] and ParToken[0] and WhileToken[1].tag == 'WHILE-TOK' and ParToken[1].tag == 'PAR-OP-TOK':\n print(\"WHILE_TOKEN\")\n print(\"Token: \",WhileToken[1].token,\"\\n\")\n print(\"PAR_OP_TOKEN\")\n print(\"Token: \",ParToken[1].token,\"\\n\")\n if self.Bool_expr():\n ParToken = self.nextToken()\n if ParToken[0] and ParToken[1].tag == 'PAR-CL-TOK':\n print(\"PAR_CL_TOKEN\")\n print(\"Token: \",ParToken[1].token,\"\\n\")\n if self.Body():\n return True\n else:\n return False\n else:\n \n return False\n else:\n return False\n else:\n cprint(\"Expected a WHILE or ( TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n\n def Return_stmt(self):\n print(\"Return_stmt\\n\")\n Token = self.nextToken()\n if Token[0] and Token[1].tag == 'RETURN-TOK':\n print(\"RETURN_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n Semicolon = self.seekActualToken()\n if Semicolon[0] and Semicolon[1].tag == 'SEMICOLON-TOK':\n print(\"SEMICOLON_TOKEN\")\n print(\"Token: \",Semicolon[1].token,\"\\n\")\n self.nextToken()\n return True\n else:\n if self.Constant():\n Semicolon = self.seekActualToken()\n if Semicolon[0] and Semicolon[1].tag == 'SEMICOLON-TOK':\n print(\"SEMICOLON_TOKEN\")\n print(\"Token: \",Semicolon[1].token,\"\\n\")\n self.nextToken()\n return True\n else:\n return False\n else:\n return False\n else:\n cprint(\"Expected a RETURN TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n \n def Assign_stmt(self):\n print(\"Assign_stmt\\n\")\n if self.Decl_type():\n Token = self.nextToken()\n if Token[0] and Token[1].tag == 'ID-TOK':\n print(\"ID_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n Token = self.nextToken()\n if Token[0] and Token[1].tag == 'ASIG-TOK':\n print(\"ASSIGN_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n if self.Constant():\n Token = self.nextToken()\n if Token[0] and Token[1].tag == 'SEMICOLON-TOK':\n print(\"SEMICOLON_TOKEN\")\n print(\"Token: \",Token[1].token,\"\\n\")\n return True\n else:\n cprint(\"Expected a SEMICOLON TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n return False\n else:\n cprint(\"Expected a ASSIGN TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n else:\n cprint(\"Expected a ID TOKEN\\n\",\"red\",attrs=['bold'])\n return False\n\nPars = Parser('Program.g4')\nPars.Parse()" } ]
3
p4squ4lle/PI-Controller-Communication
https://github.com/p4squ4lle/PI-Controller-Communication
699bfcd464c969e5032c23b1542a6db256750bf7
fb18a7837ab1135ef5ae51b84d3b7a54bc24b42d
02227d8caac04175a71c1b038e1193376897ddbb
refs/heads/main
"2023-05-29T09:38:27.785962"
"2021-06-11T09:16:28"
"2021-06-11T09:16:28"
372,495,906
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5508854389190674, "alphanum_fraction": 0.5656070113182068, "avg_line_length": 33.79389190673828, "blob_id": "d9d88b5407d3e2b775c1b0b722d86fc5399f42bb", "content_id": "262eb0ebed605de809723261775d59c8bb8801c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4687, "license_type": "permissive", "max_line_length": 93, "num_lines": 131, "path": "/PIController.py", "repo_name": "p4squ4lle/PI-Controller-Communication", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport serial\r\nimport subprocess\r\nimport logging\r\nfrom datetime import datetime\r\nfrom pipython import GCSDevice, pitools\r\nfrom time import sleep\r\n\r\n# Set-Up logging\r\ndt = datetime.now()\r\ndt_string = dt.strftime(\"%H-%M_%d%m%Y\")\r\nlogging.basicConfig(level=logging.INFO,\r\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\r\n handlers=[logging.FileHandler(f\"log/{dt_string}.log\"),\r\n logging.StreamHandler()\r\n ]\r\n )\r\nposition_file = open(f'log/motor_positions_{dt_string}.csv', 'a')\r\nposition_file.write(\"#pos_m1, pos_m2, pos_m3 [mm]\\n\")\r\n\r\n# Laser Desk COM port\r\nLASER_DESK_COM = 'COM6'\r\n\r\n# Start Laser Desk\r\n# laser_desk_path = r'C:\\Program Files\\SCANLAB\\laserDesk\\SLLaserDesk.exe'\r\n# print(f'Starting Laser Desk application at {laser_desk_path}')\r\n# subprocess.Popen([laser_desk_path])\r\n# print('Succesfully started Laser Desk application')\r\n\r\n# Initialize PI Motor Controller\r\nSN = '120060504'\r\nSTAGES = ['M-521.DG1', 'M-405.DG(FW000.000)', 'M-405.DG(FW000.000)',]\r\nREFMODE = 'FRF'\r\n\r\nPI = GCSDevice('C-844')\r\nPI.ConnectUSB(serialnum=SN)\r\nlogging.info('connected: {}'.format(PI.qIDN().strip()))\r\n \r\nprint('-----------------------------------------------')\r\n \r\nif PI.HasqVER():\r\n logging.info('version info: {}'.format(PI.qVER().strip()))\r\n \r\nprint('-----------------------------------------------')\r\n \r\nlogging.info(f'initialize connected stages: {STAGES}')\r\npitools.startup(PI, stages=STAGES, refmodes=REFMODE)\r\nlogging.info(f'Connected Stages: {PI.qCST()}')\r\n\r\nprint('-----------------------------------------------')\r\n\r\nservo_dict = PI.qSVO()\r\nreference_dict = PI.qFRF()\r\n\r\nif all(v for v in servo_dict.values()):\r\n logging.info('Servo-control is set ON for all axes')\r\nelse:\r\n logging.warning('Servo-control is not set ON for axes',\r\n f'{[k for k in servo_dict.keys() if servo_dict[k]==False]}')\r\n\r\nif all(v for v in reference_dict.values()):\r\n logging.info('All axes have been succesfully referenced.')\r\n position_file.write(f\"{PI.qPOS()['1']}, {PI.qPOS()['2']}, {PI.qPOS()['3']}\\n\")\r\nelse:\r\n logging.warning('The following axes have not been referenced properly',\r\n f'{[k for k in reference_dict.keys() if reference_dict[k]==False]}')\r\n \r\nrangemin = list(PI.qTMN().values())\r\nrangemax = list(PI.qTMX().values())\r\nranges = zip(rangemin, rangemax)\r\n\r\n# error_dict = {i: PI.TranslateError(i) for i in range(10000) \r\n# if PI.TranslateError(i) != str(i)}\r\n\r\npi_error = PI.qERR()\r\nif pi_error > 0:\r\n logging.warning(f'WARNING: an error occurred (error code: {pi_error})',\r\n PI.TranslateError(pi_error))\r\n\r\nLaserDesk = serial.Serial(LASER_DESK_COM)\r\nif LaserDesk.is_open:\r\n logging.info('Serial connection was successfully established.')\r\nelse:\r\n logging.warning('Serial port could not be opened.')\r\n\r\nprint('===============================================')\r\n\r\nlisten = True\r\nwhile listen:\r\n bytes_waiting = LaserDesk.in_waiting\r\n if bytes_waiting==0:\r\n continue\r\n input_bytes = LaserDesk.read_until(b'\\x03')\r\n input_string = input_bytes.decode()[1:-1]\r\n \r\n if input_string=='End':\r\n logging.info(\"Recieved 'End' command. Stop listening\")\r\n listen = False\r\n continue\r\n \r\n controller_ready_flag = PI.IsControllerReady()\r\n #while any(v for v in PI.IsMoving().values()):\r\n # sleep(0.5)\r\n \r\n try:\r\n PI.send(input_string)\r\n logging.info(f'string sent to pi controller: {input_string}')\r\n if any(v for v in PI.IsMoving().values()):\r\n print('axes are moving', end='')\r\n while any(v for v in PI.IsMoving().values()):\r\n print('.', end='')\r\n sleep(1)\r\n if all(v for v in PI.qONT().values()):\r\n logging.info('axes stopped moving and are on target')\r\n logging.info('absolute motor positions now are:')\r\n logging.info(f'{PI.qPOS()}')\r\n position_file.write(f\"{PI.qPOS()['1']}, {PI.qPOS()['2']}, {PI.qPOS()['3']}\\n\")\r\n print('===============================================')\r\n else:\r\n logging.warning(f'some axes are not on target: {PI.qONT()}')\r\n print('===============================================')\r\n LaserDesk.write(b'\\x02 1 \\x03')\r\n except Exception as e:\r\n logging.error('An exception occured while sending the command to the PI controller:')\r\n logging.error(e)\r\n LaserDesk.write(b'\\x02 0 \\x03')\r\n\r\nposition_file.close()\r\nLaserDesk.close()\r\nlogging.info(\"Serial connection was closed. End of script.\")" }, { "alpha_fraction": 0.8320000171661377, "alphanum_fraction": 0.8339999914169312, "avg_line_length": 70.57142639160156, "blob_id": "298bad09dbf38ff5df7809858153570d8aecb422", "content_id": "50cd6d6517b18b17a6f0ce86b8619f3eb543f46e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 502, "license_type": "permissive", "max_line_length": 140, "num_lines": 7, "path": "/README.md", "repo_name": "p4squ4lle/PI-Controller-Communication", "src_encoding": "UTF-8", "text": "# PI-Controller-Communication\n\n## Bewerkstelligung der Kommunikation zwischen Laser Desk und PI controller mittels virtuellem Null-Modem Kabel\n\nDie serielle Verbindung zwischen der Scansoftware (Laserdesk) und dem Python-Programm wird durch einen Null-Modem Emulator bewerkstelligt. \nDie Software, die diese virtuellen COM ports zur Verfügung stellt, heißt com0com. Damit diese auf dem Steuerrechner einwandfrei funktioniert\nmusste der \"secure boot\" modus im UEFI (vormals BIOS) ausgeschaltet werden." }, { "alpha_fraction": 0.4533333480358124, "alphanum_fraction": 0.5, "avg_line_length": 25.55555534362793, "blob_id": "aa55e0c146a7a51c1159784be3fe0e1466925a16", "content_id": "cfa057a94efe9043f34901dd8a37aaec3c17dc7e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 750, "license_type": "permissive", "max_line_length": 69, "num_lines": 27, "path": "/PIControllerConnection.py", "repo_name": "p4squ4lle/PI-Controller-Communication", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport serial\r\nimport subprocess\r\nimport logging\r\nfrom pipython import GCSDevice, pitools\r\n\r\n\r\n# Initialize PI Motor Controller\r\nSN = '120060504'\r\nSTAGES = ['M-521.DG1', 'M-405.DG(FW000.000)', 'M-405.DG(FW000.000)',]\r\nREFMODE = 'FRF'\r\n\r\nPI = GCSDevice('C-844')\r\nPI.ConnectUSB(serialnum=SN)\r\nprint('connected: {}'.format(PI.qIDN().strip()))\r\n \r\nprint('===============================================')\r\n \r\nif PI.HasqVER():\r\n print('version info: {}'.format(PI.qVER().strip()))\r\n \r\nprint('===============================================')\r\n \r\nprint('initialize connected stages...')\r\npitools.startup(PI, stages=STAGES, refmodes=REFMODE)\r\nprint('===============================================')\r\n\r\n " } ]
3
strawberryblackhole/hippopotamus
https://github.com/strawberryblackhole/hippopotamus
40c478f7ee5e12e709beb5311b656250fcbb4c0b
6b28d8702b8ce88764b73f2979f5cc396eb5f37b
7d1f859e8914c38bbd9bcff05debd70b8b767b56
refs/heads/master
"2022-12-01T14:44:08.980252"
"2020-08-18T16:21:29"
"2020-08-18T16:21:29"
273,690,711
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7797427773475647, "alphanum_fraction": 0.7974276542663574, "avg_line_length": 102.66666412353516, "blob_id": "adf47f683023faadf646d0bc18e0b5c3c2e8dea8", "content_id": "10138e3c20c80ce6a88726634fed7a3de4600943", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1244, "license_type": "no_license", "max_line_length": 367, "num_lines": 12, "path": "/README.md", "repo_name": "strawberryblackhole/hippopotamus", "src_encoding": "UTF-8", "text": "This project aims to provide a tool that generates a structure in a minecraft world that contains every single wikipedia article.\n\n\n## Installation\nInstall https://github.com/Amulet-Team/Amulet-Map-Editor from source.\nLatest known working commit is a0b729f0ee3767cf0b583a94eddf9d412d2cd0a3\n\n## Usage\nFirst you need to get a ZIM file of the wiki that you want to integrate into minecraft. You can find some here:\nThen you need to find out how many chunks this will occupy. For this run the program without specifying a world. It will stop after it has loaded the wiki file (which can take a few minutes if its big) and provide you with the corner chunks of the square area that will be occupied.\nThese chunks have to be present in your world file. If you havent visited them, they are not. To make sure that they are present, fly over them or use a tool like https://github.com/strawberryblackhole/ChunkGenerator (If you use a world file from the spigot server and run into problems opening it with Amulet, try opening an saving it once with the Minecraft client.\nAfterwards run the command again and it will start filling the chunks. It can take a few minutes per chunk. It prints you the status, so that you can resume after a break or crash.\n" }, { "alpha_fraction": 0.5995025038719177, "alphanum_fraction": 0.6158271431922913, "avg_line_length": 40.22435760498047, "blob_id": "1012955a9fa17b952e3f40212f78324d6a4c12bb", "content_id": "2eff6962bd8d3f9c17413763c5368c5161ce46b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6432, "license_type": "no_license", "max_line_length": 213, "num_lines": 156, "path": "/fillWithWiki.py", "repo_name": "strawberryblackhole/hippopotamus", "src_encoding": "UTF-8", "text": "from htmlParser import getFormatedArticle\nfrom chunkGenerator import *\nfrom ZIMply.zimply import ZIMFile\nfrom os import path\nimport math\nimport time\nimport argparse\n\nfrom amulet.world_interface import load_world\n\ndef generateChunkList(totalArticleCount, chunkBookCapacity, target_pos, outputForceload = False):\n #generate a square, that could fit (more than) all articles\n sideLength = math.ceil(math.sqrt(totalArticleCount/chunkBookCapacity))\n if outputForceload:\n command = \"/chunkgenerator:generatechunks %d %d %d %d\"%(target_pos[0] - 1, target_pos[1] - 1, target_pos[0] + sideLength + 1, target_pos[1] + sideLength + 1)#+- 1 to include the outer border of the library\n print(command)\n return\n\n chunkList = []\n for x in range(sideLength):\n for z in range(sideLength):\n if len(chunkList) >= math.ceil(totalArticleCount/chunkBookCapacity): #stop if we have enough chunks\n break\n chunkList.append([x + target_pos[0] // 16, z + target_pos[1] // 16])\n return chunkList\n\ndef generateWallList(chunkList):\n wallChunkWithSlice = []\n for chunk in chunkList:\n #create chunk slices for the 4 chunks that would have walls to the center chunk\n potentialWalls = []\n potentialWalls.append([[1,0], [0, slice(0,16)]])\n potentialWalls.append([[0,1], [slice(0,16), 0]])\n potentialWalls.append([[-1,0], [15, slice(0,16)]])\n potentialWalls.append([[0,-1], [slice(0,16), 15]])\n\n #turn its local coordinates into world coordinates\n for potWall in potentialWalls:\n potWall[0][0] += chunk[0]\n potWall[0][1] += chunk[1]\n\n #only keep the wallchunk if its not in use\n for potWall in potentialWalls:\n if potWall[0] in chunkList:\n continue\n wallChunkWithSlice.append(potWall)\n\n return wallChunkWithSlice\n\n\ndef getLastArticleId(zimfile):\n article = None\n for article in zimfile:\n pass\n return article[2]\n\ndef fill( booksPerBarrel, \n position, \n world = False, \n dimension = \"overworld\", \n skipChunk = 0, \n skipArticles = 0, \n filePath = \"\",\n totalArticleCount = -1):\n zimfile = ZIMFile(filePath,\"utf-8\")\n\n if totalArticleCount == -1:\n totalArticleCount = getLastArticleId(zimfile)\n print(\"Article count: \", totalArticleCount)\n\n\n barrelPositionList = generateBarrelPositionList()\n barrelsPerChunk = len(barrelPositionList)\n chunkBookCapacity = barrelsPerChunk * booksPerBarrel\n \n chunkList = generateChunkList(totalArticleCount, chunkBookCapacity, position, world == False)\n if world:\n\n wallChunkList = generateWallList(chunkList)\n\n totalChunkCount = len(chunkList) + len(wallChunkList)\n completedChunks = 0\n currentArticle = skipArticles\n for chunkCoords in chunkList:\n if skipChunk > 0:\n skipChunk -= 1\n completedChunks += 1\n currentArticle += booksPerBarrel * barrelsPerChunk\n continue\n \n start = time.perf_counter()\n\n worldObj = load_world(path.expandvars(world))\n chunk = worldObj.get_chunk(chunkCoords[0], chunkCoords[1], dimension)\n fillChunk(chunk, barrelPositionList, worldObj, dimension, currentArticle, booksPerBarrel, filePath, chunkList, position)\n currentArticle += booksPerBarrel * barrelsPerChunk\n\n worldObj.create_undo_point()#workaround suggested by amulet team so that saving works (can possibly be removed in the future)\n worldObj.save()\n worldObj.close()\n\n completedChunks += 1\n print(\"chunk time (m): \", (time.perf_counter() - start)/60)\n print(\"completed chunk: \", completedChunks)\n yield 100 * completedChunks / totalChunkCount\n\n for wallChunkCoords, orientation in wallChunkList:\n chunk = worldObj.get_chunk(wallChunkCoords[0], wallChunkCoords[1], dimension)\n placeWall(chunk, orientation, worldObj)\n\n completedChunks += 1\n yield 100 * completedChunks / totalChunkCount\n \n worldObj.create_undo_point()#workaround suggested by amulet team so that saving works (can possibly be removed in the future)\n worldObj.save()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Puts a wiki into a Minecraft world')\n parser.add_argument('-wiki', type=str, help='Location of the wiki file')\n parser.add_argument('-world', type=str, help='Location of the world file. You may use %%APPDATA%%')\n parser.add_argument('-booksPerBarrel', type=int, help='Number of books to put in a barrel', default=27)\n parser.add_argument('-chunkSkip', type=int, help='Number of chunks to skip', default=0)\n parser.add_argument('-articleCount', type=int, help='If the number of articles was counted before, specifying this can save startup time', default=-1)\n parser.add_argument('-pos', metavar=(\"X\",\"Z\"),type=int, help='X Z coordinates of the starting chunk (block coordinates)', default=[0,0], nargs=2)\n \n args = parser.parse_args()\n\n #debug vars\n bookSkip = 0\n args.world = '%APPDATA%\\\\.minecraft\\\\saves\\\\loadedWorld\\\\'\n args.chunkSkip = 5\n args.booksPerBarrel = 50\n args.pos = [0,0]\n #args.wiki = path.dirname(path.realpath(__file__)) + \"\\\\wikipedia_de_chemistry_nopic_2020-04.zim\"\n #args.articleCount = ????\n args.wiki = path.dirname(path.realpath(__file__)) + \"\\\\wikipedia_de_all_nopic_2020-04.zim\"\n #args.articleCount = 3979758\n\n if args.world is not None:\n for progress in fill(args.booksPerBarrel,\n args.pos,\n world = args.world,\n skipArticles = bookSkip,\n skipChunk = args.chunkSkip,\n filePath = args.wiki,\n totalArticleCount = args.articleCount):\n print(progress)\n else:\n for progress in fill(args.booksPerBarrel,\n args.pos,\n world = False,\n skipArticles = bookSkip,\n skipChunk = args.chunkSkip,\n filePath = args.wiki):\n pass\n\n" }, { "alpha_fraction": 0.5589123964309692, "alphanum_fraction": 0.5775885581970215, "avg_line_length": 47.123348236083984, "blob_id": "8011b0de9b6d575d6092a77828dd609171b609fb", "content_id": "052cee5c8fd4d3722b598738b6cea10085b58884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10926, "license_type": "no_license", "max_line_length": 402, "num_lines": 227, "path": "/chunkGenerator.py", "repo_name": "strawberryblackhole/hippopotamus", "src_encoding": "UTF-8", "text": "from amulet.api.block import Block\nimport amulet_nbt\nfrom amulet.api.block_entity import BlockEntity\nfrom htmlParser import getFormatedArticle\nfrom functools import partial\nimport multiprocessing\nfrom multiprocessing.pool import Pool\nfrom multiprocessing.pool import ThreadPool\nfrom ZIMply.zimply import ZIMFile\nimport time\nimport re\nimport json\n\ndef getBlock(world, block):\n \"\"\"turns a block object into a usable block object, no idea what this actually does\"\"\"\n tmp = world.world_wrapper.translation_manager.get_version(\n \"java\",\n (1, 15, 2)\n ).block.to_universal(\n block\n )[0]\n return world.palette.get_add_block(tmp)\n\ndef createBlocks(world):\n \"\"\"generates all needed Block objects\"\"\"\n\n barrel = getBlock(world, Block(\"minecraft\", \"barrel\", {\"facing\" : amulet_nbt.TAG_String(\"up\"), \"open\" : amulet_nbt.TAG_String(\"false\")}))\n\n wool = getBlock(world, Block(\"minecraft\", \"red_wool\"))\n\n air = getBlock(world, Block(\"minecraft\", \"air\"))\n\n stone = getBlock(world, Block(\"minecraft\", \"stone\"))\n\n glowstone = getBlock(world, Block(\"minecraft\", \"glowstone\"))\n\n lantern = getBlock(world, Block(\"minecraft\", \"lantern\", {\"hanging\" : amulet_nbt.TAG_String(\"false\")}))\n\n sign_north = getBlock(world, Block(\"minecraft\", \"acacia_wall_sign\", {\"facing\" : amulet_nbt.TAG_String(\"north\")}))\n sign_south = getBlock(world, Block(\"minecraft\", \"acacia_wall_sign\", {\"facing\" : amulet_nbt.TAG_String(\"south\")}))\n\n return [barrel, wool, glowstone, sign_north, sign_south, air, stone, lantern]\n\n\ndef generateBarrelPositionList():\n \"\"\"Generates a list of coordinates in a chunk (16x16) where barrels should be\"\"\"\n barrels = []\n\n for row in [0,8]:\n for y in range(5,7):\n for x in range(1,8,2):\n subList = [(x, y, z) for z in range(1 + row, 7 + row)]\n barrels.extend(subList)\n for x in range(8,15,2):\n subList = [(x, y, z) for z in range(1 + row, 7 + row)]\n barrels.extend(subList)\n return barrels\n\ndef generateSignEntity(x, y, z, direction):\n \"\"\"Generates the entity to make the sign display its position\"\"\"\n return BlockEntity(\"java\", \"acacia_wall_sign\", x, y, z,\\\n amulet_nbt.NBTFile(\\\n value = amulet_nbt.TAG_Compound(\\\n {\\\n \"utags\": amulet_nbt.TAG_Compound(\\\n {\\\n \"keepPacked\": amulet_nbt.TAG_Byte(0),\\\n \"Text4\": amulet_nbt.TAG_String(\"{\\\"text\\\":\\\"\\\"}\"),\\\n \"Text3\": amulet_nbt.TAG_String(\"{\\\"text\\\":\\\"\\\"}\"),\\\n \"Text2\": amulet_nbt.TAG_String(\"{\\\"text\\\":\\\"%d - %d\\\"}\"%(z + direction, z + direction * 6)), \\\n \"Text1\": amulet_nbt.TAG_String(\"{\\\"text\\\":\\\"%d\\\"}\"%x)\\\n }),\\\n \"Color\": amulet_nbt.TAG_String(\"black\")\\\n })))\n\ndef fillSigns(chunk, world, dimension, sign_north, sign_south):\n \"\"\"Generates all signs in the chunk and fills them with text\"\"\"\n for z in [0, 8]:\n for x in list(range(1,8,2)) + list(range(8,15,2)):\n chunk.blocks[x,6,z] = sign_north\n chunk.block_entities.insert(generateSignEntity(x + chunk.cx * 16, 6, z + chunk.cz * 16, 1))\n\n for z in [7, 15]:\n for x in list(range(1,8,2)) + list(range(8,15,2)):\n chunk.blocks[x,6,z] = sign_south\n chunk.block_entities.insert(generateSignEntity(x + chunk.cx * 16, 6, z + chunk.cz * 16, -1))\n\n\ndef fillbarrels(chunk, barrelPositionList, barrelBlock, currentArticle, booksPerBarrel, zimFilePath, chunkList, target_pos):\n \"\"\"Generates all barrels in the chunk and fills them with books/articles\"\"\"\n \n for barrelPos in barrelPositionList:\n books = []\n titles = []\n\n\n start = time.perf_counter()\n\n if booksPerBarrel > 30:\n pool = Pool(processes=4) #on my laptop ~4 processes was faster than any amount of threads (4 = logic core count)\n else:\n pool = ThreadPool(processes=3)#the article reading is mostly cpu limited, so going high on process count doesnt help\n outputs = pool.map(partial(tryGetArticle, zimFilePath = zimFilePath, barrelPositionList = barrelPositionList, booksPerBarrel = booksPerBarrel, chunkList = chunkList, target_pos = target_pos), range(currentArticle,currentArticle + booksPerBarrel))\n pool.close()\n #outputs = []\n #for id in range(currentArticle, currentArticle + booksPerBarrel):\n # outputs.append(tryGetArticle(id, zimFilePath))\n\n currentArticle += booksPerBarrel\n for output in outputs:\n if output[0] == None:\n continue\n titles.append(output[1])\n books.append(output[0])\n\n stop = time.perf_counter()\n #print(\"generating a book\", (stop-start)/booksPerBarrel)\n\n chunk.blocks[barrelPos] = barrelBlock\n barrelEntity = BlockEntity(\"java\", \"barrel\", barrelPos[0] + chunk.cx * 16, barrelPos[1], barrelPos[2] + chunk.cz * 16,\\\n amulet_nbt.NBTFile(\\\n value = amulet_nbt.TAG_Compound(\\\n {\\\n \"utags\": amulet_nbt.TAG_Compound(\\\n {\\\n \"keepPacked\": amulet_nbt.TAG_Byte(0),\\\n \"isMovable\": amulet_nbt.TAG_Byte(1),\\\n \"Findable\": amulet_nbt.TAG_Byte(0),\\\n \"CustomName\": amulet_nbt.TAG_String(\"{\\\"text\\\":\\\"x:%d y:%d z:%d\\\"}\"%(barrelPos[0] + chunk.cx * 16, barrelPos[1], barrelPos[2] + chunk.cz * 16)),\\\n \"Items\": amulet_nbt.TAG_List(\\\n value = [\n amulet_nbt.TAG_Compound(\\\n {\\\n \"Slot\": amulet_nbt.TAG_Byte(iBook),\\\n \"Count\": amulet_nbt.TAG_Byte(1),\\\n \"id\": amulet_nbt.TAG_String(\"minecraft:written_book\"),\\\n \"tag\": amulet_nbt.TAG_Compound(\\\n {\n \"pages\": amulet_nbt.TAG_List(\\\n value=[amulet_nbt.TAG_String(page) for page in books[iBook]],\\\n list_data_type = 8\\\n ),\\\n \"title\": amulet_nbt.TAG_String(titles[iBook]),\\\n \"author\": amulet_nbt.TAG_String(\"Pos: x:%d y:%d z:%d, ID: %d\"%(barrelPos[0] + chunk.cx * 16, barrelPos[1], barrelPos[2] + chunk.cz * 16, currentArticle + iBook)),\n })\n })\n for iBook in range(len(books)) \n ], list_data_type = 9\\\n )\n })\\\n })))\n chunk.block_entities.insert(barrelEntity)\n\n\ndef tryGetArticle(id, zimFilePath, barrelPositionList, booksPerBarrel, chunkList, target_pos):\n \"\"\"Tries to find the article with the given id, returns [False, False] if no article was found, else article and its title are returned\"\"\"\n\n start = time.perf_counter()\n zimFile = ZIMFile(zimFilePath,\"utf-8\")\n\n stop = time.perf_counter()\n #print(\"some overhead \", stop - start)\n\n start = time.perf_counter()\n article = zimFile._get_article_by_index(id, follow_redirect=False)\n if article != None:\n if article.mimetype == \"text/html\":\n articleTitle, articleContent = getFormatedArticle(article.data.decode(\"utf-8\"), zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos)\n\n re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n articleContent = [re_pattern.sub(u'\\uFFFD', page).replace(u'\\xa0', u' ') for page in articleContent] # seems like mc cant handle 💲. (found in the article about the $ sign), this lead me to the assumption, that mc cant handle any surrogate unicode pair. https://stackoverflow.com/questions/3220031/how-to-filter-or-replace-unicode-characters-that-would-take-more-than-3-bytes/3220210#3220210\n\n stop = time.perf_counter() \n #print(\"parsing \", stop - start)\n\n return articleContent, json.dumps(article.url.replace(u'\\xa0', u' '), ensure_ascii=False)[1:-1]\n if article.is_redirect == True:\n coordinates = getArticleLocationById(article.mimetype, barrelPositionList, booksPerBarrel, chunkList, target_pos)\n return [\"{\\\"text\\\":\\\"Redirect to article with ID %d at x:%d y:%d z:%d\\\"}\"%tuple([id] + coordinates)], json.dumps(article.url.replace(u'\\xa0', u' '), ensure_ascii=False)[1:-1]\n return None, None\n\ndef getArticleLocationById(id, barrelPositionList, booksPerBarrel, chunkList, target_pos):\n\n booksPerChunk = len(barrelPositionList) * booksPerBarrel\n chunk = int(id) // booksPerChunk\n bookNumberInChunk = (int(id) - chunk * booksPerChunk)\n barrel = (bookNumberInChunk - 1)// booksPerBarrel #-1 because if booksNumberInChunk == booksPerBarrel, it should be 0\n\n return [chunkList[chunk][0] * 16 + barrelPositionList[barrel][0] + target_pos[0], barrelPositionList[barrel][1], chunkList[chunk][1] * 16 + barrelPositionList[barrel][2] + target_pos[1]]\n\n\ndef fillChunk(chunk, barrelPositionList, world, dimension, currentArticle, booksPerBarrel, zimfilePath, chunkList, target_pos):\n \"\"\"Fills the chunk with all blocks and content\"\"\"\n barrel, wool, glowstone, sign_north, sign_south, air, stone, lantern = createBlocks(world)\n\n chunk.blocks[:,5:9:,:] = air\n\n chunk.blocks[:,3,:] = stone\n chunk.blocks[:,9,:] = stone\n\n for innerRow in [1,5,14,10]:\n for positionInRow in [6,9]:\n chunk.blocks[innerRow,7,positionInRow] = lantern\n for outerRow in [3,7,8,12]:\n for positionInRow in [1,14]:\n chunk.blocks[outerRow,7,positionInRow] = lantern\n\n fillSigns(chunk, world, dimension, sign_north, sign_south)\n\n chunk.blocks[:,4,:] = wool\n\n chunk.blocks[0,4,7:9] = glowstone\n chunk.blocks[0,4,0] = glowstone\n chunk.blocks[0,4,15] = glowstone\n chunk.blocks[15,4,7:9] = glowstone\n chunk.blocks[15,4,0] = glowstone\n chunk.blocks[15,4,15] = glowstone\n\n fillbarrels(chunk, barrelPositionList, barrel, currentArticle, booksPerBarrel, zimfilePath, chunkList, target_pos)\n\n chunk.changed = True\n\ndef placeWall(chunk, orientation, world):\n \"\"\"Places a wall on the wanted side of the chunk\"\"\"\n barrel, wool, glowstone, sign_north, sign_south, air, stone, lantern = createBlocks(world)\n chunk.blocks[orientation[0],3:9,orientation[1]] = stone\n chunk.changed = True" }, { "alpha_fraction": 0.5209668874740601, "alphanum_fraction": 0.5345886945724487, "avg_line_length": 41.305084228515625, "blob_id": "b93f5aa3eaa7003b71328097751f06dde037d69a", "content_id": "bcecada82b7837c3dd13d96285b24f6a45d0b892", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7488, "license_type": "no_license", "max_line_length": 190, "num_lines": 177, "path": "/htmlParser.py", "repo_name": "strawberryblackhole/hippopotamus", "src_encoding": "UTF-8", "text": "from html.parser import HTMLParser\nfrom bs4 import BeautifulSoup\nimport json\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n\n def feed(self, in_html, zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos):\n self._data = [\"\"]\n self._formats = [[[],[]]]\n self._attrs = []\n self._title = \"\"\n self._zimFile = zimFile\n self._barrelPositionList = barrelPositionList\n self._booksPerBarrel = booksPerBarrel\n self._chunkList = chunkList\n self._target_pos = target_pos\n super(MyHTMLParser, self).feed(in_html)\n articleContent = self._data[0] \n articleFormating = self._formats[0]\n\n pages = ['{\"extra\":[{\"text\":\"']\n charsOnPage = 0\n for iChar in range(len(articleContent)):\n #if page not too long\n if charsOnPage < 200:\n \n #if the formating has to be defined\n if charsOnPage == 0 or articleFormating[0][iChar] != articleFormating[0][iChar -1] or articleFormating[1][iChar] != articleFormating[1][iChar -1]:\n pages[-1] += '\"},{'\n if articleFormating[0][iChar] > 0:\n pages[-1] += '\"bold\":true,'\n if articleFormating[1][iChar] > 0:\n pages[-1] += '\"italic\":true,'\n pages[-1] += '\"text\":\"'\n \n pages[-1] += json.dumps(articleContent[iChar], ensure_ascii=False)[1:-1]\n charsOnPage += 1\n if articleContent[iChar] == \"\\n\":\n charsOnPage += 12\n\n else:\n pages[-1] += '\"}],\"text\":\"\"}'\n pages.append('{\"extra\":[{')\n\n if articleFormating[0][iChar] > 0:\n pages[-1] += '\"bold\":true,'\n if articleFormating[1][iChar] > 0:\n pages[-1] += '\"italic\":true,'\n\n\n pages[-1] +='\"text\":\"' + json.dumps(articleContent[iChar], ensure_ascii=False)[1:-1]\n charsOnPage = 0 \n\n pages[-1] += ' The original work has been modified.\"}],\"text\":\"\"}'\n\n return json.dumps(self._title, ensure_ascii=False), pages\n\n def handle_data(self, data):\n self._data[-1] += data\n for formating in self._formats[-1]:\n formating.extend([0]*len(data))\n\n def handle_starttag(self, tag, attrs):\n self._data.append(\"\")\n self._formats.append([[],[]])\n self._attrs.append(attrs)\n\n def remove_data(self, replacement = \"\", replacementFormatings = [0,0]):\n self._data[-1] = replacement\n self._formats[-1] = [[0] * len(replacement), [0] * len(replacement)]\n self.collaps_last_block_and_format(formatings=replacementFormatings)\n\n def collaps_last_block_and_format(self, prefix = \"\", postfix = \"\", formatings = [0,0]):\n\n self._data[-1] = prefix + self._data[-1] + postfix\n \n #extend format by pre/postfix length\n for iFormat in range(len(self._formats[-1])):\n #turn on formating, but dont turn it off (because allready collapsed formats should keep their formating and should not be overwritten)\n for iElement in range(len(self._formats[-1][iFormat])):\n self._formats[-1][iFormat][iElement] += formatings[iFormat]\n \n self._formats[-1][iFormat][:0] = [formatings[iFormat]] * len(prefix) \n self._formats[-1][iFormat].extend([formatings[iFormat]] * len(postfix))\n\n #collaps the last array entry\n self._data[-2] += self._data[-1]\n for iFormat in range(len(self._formats[-2])):\n self._formats[-2][iFormat].extend(self._formats[-1][iFormat])\n\n #delete last array entry\n self._data.pop()\n self._formats.pop()\n self._attrs.pop()\n\n def handle_endtag(self, tag): \n if tag == 'a' :\n foundiAtt = -1\n for iAtt in range(len(self._attrs[-1])):\n try:\n self._attrs[-1][iAtt].index(\"href\")\n foundiAtt = iAtt\n break\n except ValueError:\n continue\n\n\n if foundiAtt != -1:\n url = self._attrs[-1][iAtt][1].split(\"#\")[0]\n entry, idx = self._zimFile._get_entry_by_url(\"A\", url)\n if(idx != None):\n location = getArticleLocationById(idx,self._barrelPositionList, self._booksPerBarrel, self._chunkList, self._target_pos)\n self.collaps_last_block_and_format(\"\", \"[ID %d at x:%d y:%d z:%d]\"%tuple([idx] + location))\n else:\n self.collaps_last_block_and_format(\"\", \"[%s]\"%url)\n else:\n self.collaps_last_block_and_format() \n elif tag == 'br' :\n self.collaps_last_block_and_format(\"\\n\", \"\")\n elif tag == 'div' :\n if self._data[-1] != \"\" and self._data[-1][-1] != \"\\n\":\n self.collaps_last_block_and_format(\"\\n \", \"\\n\")\n else:\n self.collaps_last_block_and_format()\n elif tag == 'h1' :\n if ('class', 'section-heading') in self._attrs[-1]: #if its the title of the article\n self._title = self._data[-1]\n self.collaps_last_block_and_format(\"\", \"\\n\", [1,0])\n else:\n self.collaps_last_block_and_format(\"\\n\\n\", \"\\n\", [1,0])\n elif tag == 'h2' :\n self.collaps_last_block_and_format(\"\\n\\n\", \"\\n\", [1,0])\n elif tag == 'h3' :\n self.collaps_last_block_and_format(\"\\n\\n\", \"\\n\")\n elif tag == 'li' :\n self.collaps_last_block_and_format(\"\\n -\", \"\")\n elif tag == 'p' :\n if self._data[-1] != \"\":\n self.collaps_last_block_and_format(\"\\n \", \"\\n\")\n else:\n self.collaps_last_block_and_format()\n elif tag == 'ol' :\n self.collaps_last_block_and_format(\"\\n\")\n elif tag == 'ul' :\n self.collaps_last_block_and_format(\"\\n\")\n elif tag == 'script' :\n self.remove_data()\n elif tag == 'style' :\n self.remove_data()\n elif tag == 'table' :\n self.remove_data(\"\\nCan't display table\\n\", [0,1])\n elif tag == 'title' :\n self.remove_data()\n else:\n self.collaps_last_block_and_format()\n\ndef getArticleLocationById(id, barrelPositionList, booksPerBarrel, chunkList, target_pos):\n\n booksPerChunk = len(barrelPositionList) * booksPerBarrel\n chunk = int(id) // booksPerChunk\n bookNumberInChunk = (int(id) - chunk * booksPerChunk)\n barrel = (bookNumberInChunk - 1)// booksPerBarrel #-1 because if booksNumberInChunk == booksPerBarrel, it should be 0\n\n return [chunkList[chunk][0] * 16 + barrelPositionList[barrel][0] + target_pos[0], barrelPositionList[barrel][1], chunkList[chunk][1] * 16 + barrelPositionList[barrel][2] + target_pos[1]]\n \n\n\ndef getFormatedArticle(html, zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos):\n parser = MyHTMLParser()\n soup = BeautifulSoup(html, features =\"html.parser\")\n title, text = parser.feed(str(soup).replace(\"\\n\", \"\").replace(\"\\t\", \"\"), zimFile, barrelPositionList, booksPerBarrel, chunkList, target_pos) \n #text = parser.feed(html.replace(\"\\n\", \"\").replace(\"\\t\", \"\")) # some things break when not using bfs\n parser.close()\n\n return title, text\n" }, { "alpha_fraction": 0.6529411673545837, "alphanum_fraction": 0.6676470637321472, "avg_line_length": 26.219999313354492, "blob_id": "b3346617ccf245c26605c420a6e6e8945bf0e55d", "content_id": "7ff1997941a5c26b16488adf71d449f07fb81e8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1360, "license_type": "no_license", "max_line_length": 118, "num_lines": 50, "path": "/parserTester.py", "repo_name": "strawberryblackhole/hippopotamus", "src_encoding": "UTF-8", "text": "from amulet.api.selection import SelectionGroup\nfrom amulet.api.block import Block\nfrom amulet.api.data_types import Dimension\nfrom amulet import log\nimport amulet_nbt\nfrom amulet.api.block_entity import BlockEntity\nfrom ZIMply.zimply import ZIMFile\nimport os\nimport math\nimport time\n\nfrom fillWithWiki import getFormatedArticle\n\nzimfile = ZIMFile(os.path.dirname(os.path.realpath(__file__)) + \"\\\\wikipedia_de_basketball_nopic_2020-04.zim\",\"utf-8\")\narticleCount = list(zimfile)[-1][2]\n\ncount = 0\n\narticles = list(zimfile)\n\nfor article in range(articleCount):\n print(article)\n\n start = time.perf_counter()\n\n article = [x for x in articles if x[2] == article]\n\n print(\"article search\", time.perf_counter() - start)\n\n if len(article) > 1:\n raise Exception()\n foundArticle = len(article) == 1\n \n articleStop = 0\n if foundArticle:\n article = article[0]\n articleTitle = article[1]\n articleId = article[2] \n\n start = time.perf_counter()\n a = zimfile._get_article_by_index(articleId).data.decode(\"utf-8\")\n print(\"article read\", time.perf_counter() - start)\n\n start = time.perf_counter()\n formatedArticle = getFormatedArticle(a)\n print(\"article parse\", time.perf_counter() - start)\n print(formatedArticle)\n if count > 4:\n break\n count += 1" } ]
5
ViktorMihalik/Save-the-world
https://github.com/ViktorMihalik/Save-the-world
248cb70ded8a35d02ca9ebe1c1e7df20c6967d12
99c9693cd60e19029149852dc17f700deceaac71
3c3885cd0d99a6fc8a170731700494849878e36a
refs/heads/main
"2023-02-21T19:49:24.981766"
"2021-01-29T11:10:51"
"2021-01-29T11:10:51"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4301513433456421, "alphanum_fraction": 0.43779104948043823, "avg_line_length": 37.56195831298828, "blob_id": "3c4d2793b4585812f9cb8f554e80a13d51d3c02f", "content_id": "1fcdc9bbc75c0fdb9433f03b3027f288c2d3674d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15091, "license_type": "no_license", "max_line_length": 240, "num_lines": 347, "path": "/Save the world.py", "repo_name": "ViktorMihalik/Save-the-world", "src_encoding": "UTF-8", "text": "import random\r\n\r\n# ///Welcoming screen///\r\n\r\nprint(\"\"\"\r\nOoooh welcome unknown player. I'm going to destroy all humans! But of course I have to give you a change to defend yourself.\r\nSo you suppose to be the savior of the earth? Let ma laugh- HA-HA-HA.\r\nIf you would like to save the earth you have to beat me in 4 games. Okay than let's play a game.\r\n\"\"\")\r\n\r\nplayer_name = input(\"But at first 'savior' tell my your name: \")\r\n\r\n# /// Game over///\r\n\r\ngame_over = (\"\"\"World will be destroyed and it's your fault {}\r\n\r\n ▄▀▀█▄▄ ▄▀▀▀▀▄ ▄▀▀▀▀▄ ▄▀▀▄ ▄▀▄ ▄▀▀█▄▄▄▄ ▄▀▀█▄▄ \r\n█ ▄▀ █ █ █ █ █ █ █ ▀ █ ▐ ▄▀ ▐ █ ▄▀ █ \r\n▐ █ █ █ █ █ █ ▐ █ █ █▄▄▄▄▄ ▐ █ █ \r\n █ █ ▀▄ ▄▀ ▀▄ ▄▀ █ █ █ ▌ █ █ \r\n ▄▀▄▄▄▄▀ ▀▀▀▀ ▀▀▀▀ ▄▀ ▄▀ ▄▀▄▄▄▄ ▄▀▄▄▄▄▀ \r\n█ ▐ █ █ █ ▐ █ ▐ \r\n▐ ▐ ▐ ▐ ▐ \r\n\r\n _.-^^---....,,-- \r\n _-- --_ \r\n< >)\r\n| | \r\n \\._ _./ \r\n ```--. . , ; .--''' \r\n | | | \r\n .-=|| | |=-. \r\n `-=#$%&%$#=-' \r\n | ; :| \r\n _____.,-#%&$@%#&#~,._____\r\n\"\"\".format(player_name))\r\n\r\ngood= (\"\"\"\r\n ..--+++--..\r\n .-' | `-.\r\n +' | `+\r\n ' | `\r\n ' | `\r\n: | :\r\n: +'|`+ :\r\n. +' | `+ ;\r\n + +' | `+ +\r\n `. +' | `+ .'\r\n `._ | _.'\r\n `--.._|_..--' \r\nI decided not to destroy the world...\r\n\"\"\")\r\n# ///rock, paper, scissors///\r\n# header\r\n\r\nprint(\"\"\"\r\n\r\n###### ####### ##### # # ###### # ###### ####### ###### ##### ##### ### ##### ##### ####### ###### ##### \r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \r\n# # # # # # # # # # # # # # # # # # # # # # # # # # \r\n###### # # # ### ###### # # ###### ##### ###### ##### # # ##### ##### # # ###### ##### \r\n# # # # # # # # ####### # # # # # # # # # # # # # # \r\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \r\n# # ####### ##### # # # # # # ####### # # ##### ##### ### ##### ##### ####### # # ##### \r\n\r\n\"\"\")\r\n\r\nmaxpoints_rsp = 3 # We can set higher points for win\r\nprint(\"{}, in this first game called rock, paper, scissor the one who scores {} points first wins \".format(player_name,maxpoints_rsp))\r\n\r\n# Game mechanics\r\ndef player_wins(i,j): \r\n \r\n if (i == \"rock\" and j == \"scissors\") or (i == \"scissors\" and j == \"paper\") or (i == \"paper\" and j == \"rock\"):\r\n return True\r\n elif (i == \"rock\" and j == \"paper\") or (i == \"scissors\" and j == \"rock\") or (i == \"paper\" and j == \"scissors\"):\r\n return False\r\n\r\n#game body\r\n\r\nplayer_point_RPS = 0\r\ncomputer_points_RSP = 0\r\n\r\nwhile computer_points_RSP or player_point_RPS != maxpoints_rsp:\r\n player_choice= input(\"Comon choose rock, paper or scissors: \")\r\n rsp= \"rock\",\"paper\",\"scissors\"\r\n computer_choice = random.choice(rsp)\r\n\r\n if player_choice not in rsp:\r\n player_choice= input(\"You can choose only choose rock, paper or scissors: \")\r\n \r\n if player_wins(player_choice,computer_choice) is True:\r\n player_point_RPS += 1\r\n print(\"I choose {} so point for you.\".format(computer_choice))\r\n\r\n\r\n if player_wins (player_choice,computer_choice) is False:\r\n computer_points_RSP += 1\r\n print(\"{} beat {} so point for me.\".format(computer_choice,player_choice))\r\n \r\n \r\n if player_choice == computer_choice:\r\n print(\"Hey {}, you've red my mind I choose {} to.\".format(player_name,computer_choice))\r\n pass\r\n\r\n print(\"Player points: {} \".format(player_point_RPS))\r\n print(\"Computer point: {}\".format(computer_points_RSP))\r\n print()\r\n\r\n if player_point_RPS == maxpoints_rsp:\r\n print(\"GRHHHH How it’s possible that you beat me?!\")\r\n break\r\n if computer_points_RSP == maxpoints_rsp:\r\n print(game_over)\r\n \r\n exit()\r\n\r\n# ///Card game///\r\n# header\r\nprint(\"\"\"\r\n.------..------..------..------..------..------..------..------.\r\n|C.--. ||A.--. ||R.--. ||D.--. ||W.--. ||A.--. ||R.--. ||S.--. |\r\n| :/\\: || (\\/) || :(): || :/\\: || :/\\: || (\\/) || :(): || :/\\: |\r\n| :\\/: || :\\/: || ()() || (__) || :\\/: || :\\/: || ()() || :\\/: |\r\n| '--'C|| '--'A|| '--'R|| '--'D|| '--'W|| '--'A|| '--'R|| '--'S|\r\n`------'`------'`------'`------'`------'`------'`------'`------'\r\n\"\"\")\r\n\r\nwin_points = 3 # Setting points to win\r\nprint(\"Okay beginner's luck. {} let’s play another game called CARD WARS.\\n\"\r\n\"The rules are easy- who have higher card get a point('A' is the highest card). The one who get the first {} points is winner. Of course, it will be me.\".format(player_name,win_points))\r\n\r\n# Game mechanics\r\n\r\ncards = [\"6\",\"7\",\"8\",\"9\",\"10\",\"J\",\"Q\",\"K\",\"A\"] # Wee can change type of cards\r\n\r\ndef flip_coin(): # In case of equal points\r\n\r\n player_coin = input(\"It's a draw, let's flip a coin. What's your choice tail or head: \").lower()\r\n coin = \"tail\",\"head\"\r\n coin_side = random.choice(coin)\r\n if player_coin not in coin:\r\n player_coin= input(\"please choose betwen tail or head: \").lower()\r\n\r\n if player_coin in coin:\r\n if player_coin==coin_side:\r\n print(\"Yeah good guees It's {}, player wins.\".format(coin_side))\r\n \r\n\r\n else:\r\n print(\"It's {}, I'm a winner.\".format(coin_side))\r\n print(game_over)\r\n \r\n exit()\r\n\r\ndef shuffling_cards(r):\r\n hand= []\r\n for i in range(r):\r\n random_cards = [str(random.choice(cards))]\r\n hand = hand+random_cards\r\n return hand\r\n \r\n#game body\r\nplayer_point_CRD = 0\r\ncomputer_points_CRD = 0\r\n\r\nplayers_cards= shuffling_cards(5)\r\ncomputer_cards= shuffling_cards(5)\r\n\r\nwhile computer_points_CRD or player_point_CRD != win_points:\r\n \r\n computer_choice_CRD = str(random.choice(computer_cards))\r\n\r\n player_choice=input(\"Please choose one card from your hand {}:\".format(players_cards)).upper()\r\n \r\n if player_choice not in players_cards:\r\n player_choice=input(\"Again check your card and choose one {}:\".format(players_cards)).upper()\r\n \r\n if cards.index(computer_choice_CRD) > cards.index(player_choice):\r\n print(\"{} is higher than {} that means point for me\".format(computer_choice_CRD,player_choice))\r\n computer_points_CRD+=1\r\n if cards.index(computer_choice_CRD) < cards.index(player_choice):\r\n print(\"Okay I have {}, point for you\".format(computer_choice_CRD))\r\n player_point_CRD+=1\r\n if cards.index(computer_choice_CRD) == cards.index(player_choice):\r\n print(\"My is also {}, boring no one get a point\".format(computer_choice_CRD))\r\n \r\n players_cards.remove(player_choice)\r\n computer_cards.remove(computer_choice_CRD)\r\n print(\"{}'s points:{} and computer:{}\".format(player_name,player_point_CRD,computer_points_CRD))\r\n print()\r\n\r\n if not players_cards:\r\n if player_point_CRD > computer_points_CRD:\r\n print(\"Hmm you won another game. Maybe I have to take it seriously\")\r\n break\r\n if computer_points_CRD > player_point_CRD:\r\n print(game_over)\r\n \r\n exit()\r\n if computer_points_CRD == player_point_CRD:\r\n flip_coin()\r\n break\r\n\r\n if player_point_CRD == win_points:\r\n print(\"Hmm you won another game. Maybe I have to take it seriously\")\r\n break\r\n \r\n if computer_points_CRD == win_points:\r\n print(game_over)\r\n \r\n exit()\r\n\r\n# ///Guess the number///\r\n# header\r\nprint(\"\"\" \r\n _ _ _ _ __ __ ____ ______ _____ ____ _____ \r\n | \\ | | | | | \\/ | _ \\| ____| __ \\ / __ \\ / ____|\r\n | \\| | | | | \\ / | |_) | |__ | |__) | | | | (___ \r\n | . ` | | | | |\\/| | _ <| __| | _ /| | | |\\___ \\ \r\n | |\\ | |__| | | | | |_) | |____| | \\ \\| |__| |____) |\r\n |_| \\_|\\____/|_| |_|____/|______|_| \\_\\\\____/|_____/ \r\n \r\n \"\"\")\r\nprint()\r\nprint ( \"My systems do not report any armageddon, What a pitty. But it's time to change that\" \r\n \"Guess the number I am thinking of betwen 1 and 20. But be careful {} you only have 4 attempts\". format(player_name))\r\nprint()\r\n# Game mechanics\r\nguesses = 3\r\nnumber = random.randrange(1,20)\r\nplayer_guess= int(input(\"Guess the number I am thinking of: \"))\r\n\r\n#game body\r\nif player_guess == number:\r\n print(\"You are a wizard {}, you guess it for the first time. \".format(player_name))\r\n\r\nwhile player_guess != number:\r\n \r\n if player_guess < number: \r\n player_guess= int(input(\"Don't be humble it's higher. C'mon guess again: \"))\r\n guesses -= 1\r\n if player_guess > number: \r\n player_guess= int(input(\"No no no, go lower. C'mon guess again: \"))\r\n guesses -= 1\r\n \r\n if guesses == 0 and player_guess != number:\r\n print(\"My number was {}\".format(number))\r\n print(game_over)\r\n quit()\r\n \r\n if player_guess == number and guesses<=1:\r\n print(\"IT was closed but you save the world for now {}.\".format(player_name))\r\n break\r\n\r\n if player_guess == number and guesses>1:\r\n print(\"That is correct. I think I chose very easy game for you.\".format(guesses))\r\n break\r\n\r\n# ///Player can decides if he can end the game or continue///\r\nprint(\"Okay that wasn't bad. Look I have a little proposition for you before our last game. You can quit now and I will not destroy the world (maybe)\\n\"\r\n \"On the other hand you can take a risk and earn unknow reaward\")\r\nprint()\r\nplayer_choose = input(\"So what's you choice? Would you like to continue? Choose yes or no : \").lower()\r\n\r\nif player_choose == \"no\":\r\n consequences = good,game_over\r\n computer_chose = random.choice(consequences)\r\n print(computer_chose)\r\n quit()\r\n\r\nchoice= \"yes\",\"no\"\r\n\r\nif player_choose not in choice:\r\n player_choose = input(\"I know that it can be hard for you bud just write me yes or no: \")\r\n\r\nelse :\r\n pass\r\n\r\n# ///Player decided to continue///\r\n\r\n# ///Dices///\r\n# header\r\nprint(\"\"\"\r\n ▄████████ ▄████████ ▄████████ ▄███████▄ ▄██ ▄ ████████▄ ▄█ ▄████████ ▄████████ ▄████████ \r\n███ ███ ███ ███ ███ ███ ██▀ ▄██ ███ ██▄ ███ ▀███ ███ ███ ███ ███ ███ ███ ███ \r\n███ █▀ ███ ███ ███ ███ ▄███▀ ███▄▄▄███ ███ ███ ███▌ ███ █▀ ███ █▀ ███ █▀ \r\n███ ▄███▄▄▄▄██▀ ███ ███ ▀█▀▄███▀▄▄ ▀▀▀▀▀▀███ ███ ███ ███▌ ███ ▄███▄▄▄ ███ \r\n███ ▀▀███▀▀▀▀▀ ▀███████████ ▄███▀ ▀ ▄██ ███ ███ ███ ███▌ ███ ▀▀███▀▀▀ ▀███████████ \r\n███ █▄ ▀███████████ ███ ███ ▄███▀ ███ ███ ███ ███ ███ ███ █▄ ███ █▄ ███ \r\n███ ███ ███ ███ ███ ███ ███▄ ▄█ ███ ███ ███ ▄███ ███ ███ ███ ███ ███ ▄█ ███ \r\n████████▀ ███ ███ ███ █▀ ▀████████▀ ▀█████▀ ████████▀ █▀ ████████▀ ██████████ ▄████████▀ \r\n ███ ███ \r\n\r\n\"\"\")\r\n\r\nmax_score = 5 # Score to winn the game\r\nprint(\"I have to admit you are brave. The last {} points wil decides the fate of humanity\\n Let's get this over.\".format(max_score))\r\n\r\n# Game mechanics\r\nrolling_dice = random.randint(1,6)\r\n\r\nplayers_point = 0\r\ncumpoters_point = 0\r\nplayer_total= random.randint(1,6)\r\ncomputer_total= random.randint(1,6)\r\n\r\n#game body\r\nwhile players_point or cumpoters_point!=max_score:\r\n print()\r\n input(\"press enter to roll the dice\")\r\n player_roll1 = random.randint(1,6)\r\n player_roll2 = random.randint(1,6)\r\n player_roll3 = random.randint(1,6)\r\n player_total = player_roll1+player_roll2+player_roll3\r\n\r\n computer_roll1 = random.randint(1,6)\r\n computer_roll2 = random.randint(1,6)\r\n computer_roll3 = random.randint(1,6)\r\n computer_total = computer_roll1+computer_roll2+computer_roll3\r\n print()\r\n\r\n print(\"{0} you roll({1}, {2} and {3} = {7} total ) and computer has ({4}, {5} and {6} = {8} total )\".format(player_name,player_roll1,player_roll2, player_roll3, computer_roll1, computer_roll2,computer_roll3,player_total,computer_total))\r\n \r\n if player_total > computer_total:\r\n players_point += 1\r\n\r\n elif player_total == computer_total:\r\n players_point+0\r\n\r\n else:\r\n cumpoters_point+=1\r\n\r\n\r\n print(\"{}'s points: {} and computer: {}\".format(player_name,players_point,cumpoters_point))\r\n\r\n if players_point == max_score:\r\n print(good)\r\n print(\"Not becouse you won. I'm just not in the mood to destry the world. however promis is promis here is your reward:\")\r\n currency = \"€\",\"£\",\"৳\",\"ƒ\",\"₹\",\"₡\",\"Kč\",\"₣\",\"₪\",\"¥\",\".ރ\",\"₮\",\"₲\",\"₽\",\"฿\",\"Ft\"\r\n print(\"{0},{1}{2}{3},{4}{5}{6} {7}\".format(random.randint(1,5),random.randint(0,9),random.randint(0,9),random.randint(0,9),random.randint(0,9),\r\n random.randint(0,9),random.randint(0,9),random.choice(currency)))\r\n quit()\r\n\r\n if cumpoters_point == max_score:\r\n print(game_over)\r\n quit()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" } ]
1
flinteller/unit_eleven
https://github.com/flinteller/unit_eleven
0bc8a13ba433463d421366fc757d4ec64d99f119
cd9fb7a352c95bf140a9be14e8e3b8202274948e
8487bba1c4e0462a58031d0f8874fc88f92a5bb1
refs/heads/master
"2020-04-09T20:14:01.690470"
"2019-01-23T18:58:46"
"2019-01-23T18:58:46"
160,567,793
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5656382441520691, "alphanum_fraction": 0.5747126340866089, "avg_line_length": 26.966102600097656, "blob_id": "a46860af4077867a47354b5e109e2715d09b529e", "content_id": "68e428f76f4e663573b204bd74f8fc499a565eef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1653, "license_type": "no_license", "max_line_length": 87, "num_lines": 59, "path": "/paddle.py", "repo_name": "flinteller/unit_eleven", "src_encoding": "UTF-8", "text": "import pygame\nimport random\n\n\nclass Paddle(pygame.sprite.Sprite):\n\n def __init__(self, main_surface, color, height, width):\n \"\"\"\n This function creates creates a surface using each other params\n :param main_surface:\n :param color:\n :param height:\n :param width:\n \"\"\"\n # initialize sprite super class\n super().__init__()\n # finish setting the class variables to the parameters\n self.main_surface = main_surface\n self.color = color\n self.height = height\n self.width = width\n\n # Create a surface with the correct height and width\n self.image = pygame.Surface((width, height))\n\n # Get the rect coordinates\n self.rect = self.image.get_rect()\n\n # Fill the surface with the correct color\n self.image.fill(color)\n\n def move_left(self):\n \"\"\"\n This function moves the paddle left and stops the paddle form going off screen\n :return:\n \"\"\"\n self.rect.x = self.rect.x - 7\n\n if self.rect.left < 0:\n self.rect.x = 1\n\n def move_right(self):\n \"\"\"\n This function moves the paddle right and stops the paddle form going off screen\n :return:\n \"\"\"\n self.rect.x = self.rect.x + 7\n\n if self.rect.right > 400:\n self.rect.x = 335\n\n def resize(self):\n \"\"\"\n This function creates a new surface with a random size and keeps its color\n :return:\n \"\"\"\n self.width = random.randint(20, 100)\n self.image = pygame.Surface((self.width, self.height))\n self.image.fill(self.color)\n\n\n\n" }, { "alpha_fraction": 0.5798664689064026, "alphanum_fraction": 0.5819209218025208, "avg_line_length": 32.517242431640625, "blob_id": "b411795660e02007e1aebabb061ece52c4420415", "content_id": "e7e9f0b145fbc3388c2345aa6881fa6e11464926", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1947, "license_type": "no_license", "max_line_length": 112, "num_lines": 58, "path": "/ball.py", "repo_name": "flinteller/unit_eleven", "src_encoding": "UTF-8", "text": "import pygame\n\n\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, color, window_width, window_height, radius):\n # initialize sprite super class\n super().__init__()\n # finish setting the class variables to the parameters\n self.color = color\n self.radius = radius\n self.window_width = window_width\n self.window_height = window_height\n self.speedx = 6\n self.speedy = 8\n\n # Create a surface, get the rect coordinates, fill the surface with a white color (or whatever color the\n # background of your breakout game will be.\n self.image = pygame.image.load(\"chrome copy.png\")\n\n self.rect = self.image.get_rect()\n\n # Add a circle to represent the ball to the surface just created.\n\n def move(self):\n \"\"\"\n This makes the ball move and keeps it on the screen\n :return:\n \"\"\"\n self.rect.top += self.speedy\n self.rect.left += self.speedx\n if self.rect.top < 0:\n self.speedy = -self.speedy\n elif self.rect.left < 0 or self.rect.right > self.window_width:\n self.speedx = -self.speedx\n\n def collide(self, paddle_group, brick_group):\n \"\"\"\n This detects collisions and plays a sound accordingly\n :param paddle_group:\n :param brick_group:\n :return:\n \"\"\"\n if pygame.sprite.spritecollide(self, brick_group, True):\n self.speedx = self.speedx\n self.speedy = -self.speedy\n pygame.mixer.init()\n pygame.init()\n sound = pygame.mixer.Sound(\"Bleep-sound.wav\")\n sound.play()\n if pygame.sprite.spritecollide(self, paddle_group, False):\n self.speedx = self.speedx\n self.speedy = -self.speedy\n pygame.mixer.init()\n pygame.init()\n sound = pygame.mixer.Sound(\"Paddle_bounce_sound.wav\")\n sound.play()\n\n\n\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5724930763244629, "avg_line_length": 30.523488998413086, "blob_id": "f14ca6815dc835267c43d4f61481947840142c93", "content_id": "296728d82e452017d641484a0dad1765dd6b3efb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4697, "license_type": "no_license", "max_line_length": 114, "num_lines": 149, "path": "/breakout.py", "repo_name": "flinteller/unit_eleven", "src_encoding": "UTF-8", "text": "import pygame\nimport sys\nfrom pygame.locals import *\nimport brick\nimport ball\nimport paddle\n\n\ndef main():\n # Constants that will be used in the program\n APPLICATION_WIDTH = 400\n APPLICATION_HEIGHT = 600\n PADDLE_Y_OFFSET = 30\n BRICKS_PER_ROW = 10\n BRICK_SEP = 4 # The space between each brick\n BRICK_Y_OFFSET = 70\n BRICK_WIDTH = (APPLICATION_WIDTH - (BRICKS_PER_ROW -1) * BRICK_SEP) / BRICKS_PER_ROW\n BRICK_HEIGHT = 8\n PADDLE_HEIGHT = 10\n PADDLE_WIDTH = 60\n RADIUS_OF_BALL = 10\n NUM_TURNS = 3\n\n # Sets up the colors\n BLUE = (30, 144, 255)\n RED = (255, 48, 48)\n YELLOW = (255, 215, 0)\n GREEN =(0, 201, 87)\n WHITE = (255, 255, 255)\n\n pygame.init()\n main_window = pygame.display.set_mode((APPLICATION_WIDTH, APPLICATION_HEIGHT), 32, 0)\n pygame.display.set_caption(\"AD Blocker\")\n pygame.display.update()\n\n # Step 1: Use loops to draw the rows of bricks. The top row of bricks should be 70 pixels away from the top of\n # the screen (BRICK_Y_OFFSET)\n brick_group = pygame.sprite.Group()\n paddle_group = pygame.sprite.Group()\n\n x_pos = 0\n y_pos = BRICK_Y_OFFSET\n\n # Places bricks with correct colors\n colors = [BLUE, RED, YELLOW, BLUE, GREEN]\n for color in colors:\n for y in range(2):\n for z in range(10):\n my_brick = brick.Brick(BRICK_WIDTH, BRICK_HEIGHT, color)\n brick_group.add(my_brick)\n my_brick.rect.y = y_pos\n my_brick.rect.x = x_pos\n main_window.blit(my_brick.image, my_brick.rect)\n x_pos += (BRICK_SEP + BRICK_WIDTH)\n x_pos = 0\n y_pos += BRICK_HEIGHT + BRICK_SEP\n\n # Places ball and passes it parameters\n my_ball = ball.Ball(RED, APPLICATION_WIDTH, APPLICATION_HEIGHT, RADIUS_OF_BALL)\n my_ball.rect.x = 200\n my_ball.rect.y = 200\n\n # Places paddle and passes it parameters\n my_paddle = paddle.Paddle(main_window, GREEN, PADDLE_HEIGHT, PADDLE_WIDTH)\n paddle_group.add(my_paddle)\n my_paddle.rect.x = APPLICATION_WIDTH / 2\n my_paddle.rect.y = APPLICATION_HEIGHT - PADDLE_Y_OFFSET\n pygame.display.update()\n\n # Event detection loop\n while True:\n for event in pygame.event.get():\n if event == QUIT:\n pygame.quit()\n sys.exit()\n if pygame.key.get_pressed()[K_LEFT]:\n my_paddle.move_left()\n if pygame.key.get_pressed()[K_RIGHT]:\n my_paddle.move_right()\n if pygame.key.get_pressed()[K_SPACE]:\n my_paddle.resize()\n if my_ball.rect.bottom > 590:\n NUM_TURNS -= 1\n pygame.mixer.init()\n pygame.init()\n sound = pygame.mixer.Sound(\"Error_sound.wav\")\n sound.play()\n my_ball.rect.x = 200\n my_ball.rect.y = 20\n\n main_window.fill(WHITE)\n\n # Prints number of lives\n mouse_font = pygame.font.SysFont(\"Verdana\", 32)\n mouse_label = mouse_font.render(\"Lives: \" + str(NUM_TURNS), 1, BLUE)\n main_window.blit(mouse_label, (30, 30))\n pygame.display.update()\n\n # Prints message if you win\n if len(brick_group) == 0:\n mouse_font = pygame.font.SysFont(\"Verdana\", 32)\n mouse_label = mouse_font.render(\"You Win!!!\", 1, BLUE)\n main_window.blit(mouse_label, (135, 200))\n pygame.mixer.init()\n pygame.init()\n sound = pygame.mixer.Sound(\"Win_sound.wav\")\n sound.play()\n pygame.display.update()\n\n if len(brick_group) == 0:\n pygame.time.wait(2000)\n break\n\n # Prints message if you loose\n if NUM_TURNS == 1 and my_ball.rect.bottom > 585:\n mouse_font = pygame.font.SysFont(\"Verdana\", 32)\n mouse_label = mouse_font.render(\"Game Over\", 1, RED)\n main_window.blit(mouse_label, (135, 200))\n pygame.mixer.init()\n pygame.init()\n sound = pygame.mixer.Sound(\"Game_over_sound.wav\")\n sound.play()\n pygame.display.update()\n\n if NUM_TURNS == 0:\n pygame.time.wait(2000)\n break\n\n # Moves and blits ball\n my_ball.move()\n main_window.blit(my_ball.image, my_ball.rect)\n\n if my_ball.rect.bottom > my_ball.window_height:\n NUM_TURNS -= 1\n\n # Blits each brick\n for a_brick in brick_group:\n main_window.blit(a_brick.image, a_brick.rect)\n\n # Calls collision function\n my_ball.collide(paddle_group, brick_group)\n\n # Blits paddle\n main_window.blit(my_paddle.image, my_paddle.rect)\n\n pygame.display.update()\n\n\nmain()\n" } ]
3
rafatmyo/Definition-Creator
https://github.com/rafatmyo/Definition-Creator
c2abf44975278a7056ce93ab7a3c9ce0270f69b2
0518cc3a80c29c8d63628ded848f0404580b0018
ddc748b256c2fc7ee0b5eb61ef08c470afad4069
refs/heads/master
"2021-08-17T10:04:28.695389"
"2017-11-21T03:10:46"
"2017-11-21T03:10:46"
111,492,195
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.54995197057724, "alphanum_fraction": 0.5537944436073303, "avg_line_length": 30.08955192565918, "blob_id": "04a747de73714aa0b95fa7fb40783d7383b5d0b9", "content_id": "075e44c062c8ff67655d9917a9994cc006111a6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2082, "license_type": "no_license", "max_line_length": 102, "num_lines": 67, "path": "/program.py", "repo_name": "rafatmyo/Definition-Creator", "src_encoding": "UTF-8", "text": "import re\n\ndef main():\n try:\n print('Exit with Ctrl-C\\n')\n\n while True:\n print('Please enter your article:')\n input_text = input()\n\n # Remove pronounciation, language origin, date of birth, etc.\n simplified_text = re.sub('[\\(\\[].*?[\\)\\]]', '', input_text)\n\n # Split the term and it's definition around 'is' or 'was'\n is_split = simplified_text.partition(' is ')\n was_split = simplified_text.partition(' was ')\n\n text_before_is = is_split[0]\n text_before_was = was_split[0]\n\n\n # Found the keyword 'is' first\n if len(text_before_is) < len(text_before_was) and is_split[1]:\n\n # Strip surrounding whitespace from the term and it's definition\n term = text_before_is.strip()\n definition = is_split[2].partition('.')[0].strip()\n # Process the completed term and definition\n handle_phrase(term + ': ' + definition + '.')\n\n\n # Found the keyword 'was' first\n elif len(text_before_is) > len(text_before_was) and was_split[1]:\n\n # Strip surrounding whitespace from the term and it's definition\n term = text_before_was.strip()\n definition = was_split[2].partition('.')[0].strip()\n # Process the completed term and definition\n handle_phrase(term + ': ' + definition + '.')\n\n\n # Handle incomplete entry\n else:\n print(\"\\nA definition was not created because an 'is' or 'was' could not be found.\\n\")\n\n\n # Allow loop to end gracefully\n except KeyboardInterrupt:\n pass\n\n # For debugging\n except Exception as e:\n print(e)\n\n\n# Process the final phrase\ndef handle_phrase(full_phrase):\n print('\\nDefinition created:')\n print(full_phrase + '\\n')\n\n with open('definitions.txt', 'a') as text_file:\n text_file.write(full_phrase + '\\n\\n')\n\n\n# Run this code if being executed directly\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.8173912763595581, "alphanum_fraction": 0.8173912763595581, "avg_line_length": 56, "blob_id": "47c1923b0bd7786fa202fdbba66e0626ba3956bd", "content_id": "dccf2c18d0ddec6b7d3dab5109677bf9d7e3cdba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 115, "license_type": "no_license", "max_line_length": 92, "num_lines": 2, "path": "/README.md", "repo_name": "rafatmyo/Definition-Creator", "src_encoding": "UTF-8", "text": "# Definition Creator\nPYhon Script that creates definitions from Wikipedia articles and saves them in a text file. \n" } ]
2
thejakeboyd/SEproject
https://github.com/thejakeboyd/SEproject
ef16e90f129d8594bfb8e91220237df6fb30d6a5
3aaaeca9708a8e4cd96e73610a327253d7a760c4
4175fe503e8f67b80b133b5f74b7fcdc8c4feec0
refs/heads/main
"2023-04-19T00:37:52.317913"
"2021-04-30T03:15:13"
"2021-04-30T03:15:13"
363,013,025
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49284881353378296, "alphanum_fraction": 0.5292466878890991, "avg_line_length": 43.7754020690918, "blob_id": "d0d01b75ce372ca2b80544723105fe6370b71782", "content_id": "a45d5a5b006b6c3e0419ef54a2c824bf795b4c71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33491, "license_type": "no_license", "max_line_length": 159, "num_lines": 748, "path": "/CapitalAirlines.py", "repo_name": "thejakeboyd/SEproject", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport random\n\n\ntotalcustomers = 0\nseats = []\nseatstaken = []\nbonly = [109, 110, 111, 112, 113, 114, 115,116, 117, 119, 119, 120]\nfor x in range(1, 121):\n seats.append(x)\nalph = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T']\ncustomers = []\nsatisfaction = []\n\ndef family():\n familywin = Tk()\n familywin.configure(bg='grey')\n familywin.title('Capital Flights')\n Label(familywin, text='Family', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(familywin, text='How many people are in your group?: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(familywin, text='Enter First Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(familywin, text='Enter Second Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n Label(familywin, text='Enter Third Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=4, column=0)\n Label(familywin, text='Enter Fourth Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=5, column=0)\n Label(familywin, text='Enter Fifth Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=6, column=0)\n numgroup = Entry(familywin)\n numgroup.grid(row=1, column=1)\n familyname1 = Entry(familywin)\n familyname1.grid(row=2, column=1)\n familyname2 = Entry(familywin)\n familyname2.grid(row=3, column=1)\n familyname3 = Entry(familywin)\n familyname3.grid(row=4, column=1)\n familyname4 = Entry(familywin)\n familyname4.grid(row=5, column=1)\n familyname5 = Entry(familywin)\n familyname5.grid(row=6, column=1)\n Label(familywin, text=\"Leave Name blank if N/A\", bg='white', font=('Arial', 20)).grid(row=7, column=0)\n Button(familywin, text='SUBMIT', command=lambda: ticket3(numgroup, familyname1, familyname2, familyname3, familyname4, familyname5, familywin)).grid(row=8,\n column=1)\n\ndef tourist():\n touristwin = Tk()\n touristwin.configure(bg='grey')\n touristwin.title('Capital Flights')\n Label(touristwin, text='Tourists', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(touristwin, text='Enter First Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(touristwin, text='Enter Second Passenger Name: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n touristname1 = Entry(touristwin)\n touristname1.grid(row=2, column=1)\n touristname2 = Entry(touristwin)\n touristname2.grid(row=3, column=1)\n Button(touristwin, text='SUBMIT', command=lambda: ticket2(touristname1, touristname2, touristwin)).grid(row=4, column=1)\n\ndef business():\n businesswin = Tk()\n businesswin.configure(bg='grey')\n businesswin.title('Capital Flights')\n Label(businesswin, text='Business Customer', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(businesswin, text='Enter your Name: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n bussinessname = Entry(businesswin)\n bussinessname.grid(row=2, column=1)\n Button(businesswin, text='SUBMIT', command=lambda : ticket(bussinessname, businesswin)).grid(row=3, column=1)\n\ndef manager3(manwin2):\n manwin2.destroy()\n manwin3 = Tk()\n manwin3.title(\"FLIGHT REPORT\")\n Label(manwin3, text='TOTAL CUSTOMERS: ', bg='grey').grid(row=0, column=0)\n Label(manwin3, text=len(seatstaken)).grid(row=1, column=0)\n Label(manwin3, text='AVG SATISFACTION: ', bg='grey').grid(row=2, column=0)\n final = sum(satisfaction) / len(satisfaction)\n Label(manwin3, text=final).grid(row=3, column=0)\n\ndef manager2(manwin):\n manwin.destroy()\n manwin2 = Tk()\n manwin2.configure(bg='grey')\n manwin2.title('MANAGER SCREEN')\n x=0\n y=6\n i=1\n j=0\n Label(manwin2, text='ROW', bg='grey').grid(row=0, column=0)\n while i <= 20:\n Label(manwin2, text=alph[j]).grid(row=i, column=0)\n i += 1\n j += 1\n i=1\n while y <= 120:\n Label(manwin2, text=seats[x:y]).grid(row=i, column=1)\n x += 6\n y += 6\n i += 1\n Label(manwin2, text='SEATS TAKEN: ', bg='grey').grid(row=0, column=3)\n Label(manwin2, text='ROW', bg='grey').grid(row=0, column=0)\n Label(manwin2, text=seatstaken).grid(row=2, column=3)\n Label(manwin2, text='TOTAL CUSTOMERS: ', bg='grey').grid(row=4, column=3)\n Label(manwin2, text=len(seatstaken)).grid(row=6, column=3)\n Button(manwin2, text='FINISH FLIGHT // GENERATE REPORT', command=lambda : manager3(manwin2)).grid(row=8, column=3)\n\n\ndef manager():\n manwin = Tk()\n manwin.title(\"MANAGER LOGIN\")\n Label(manwin, text='User Name').grid(row=0, column=0)\n username = StringVar()\n usernameEntry = Entry(manwin, textvariable=username).grid(row=0, column=1)\n Label(manwin, text='Password'). grid(row=1, column=0)\n password = StringVar()\n passwordEntry = Entry(manwin, textvariable=password, show='*').grid(row=1, column=1)\n Button(manwin, text='LOGIN', command=lambda : manager2(manwin)).grid(row=2, column=1)\n\n\ndef customer():\n customerwin = Tk()\n customerwin.configure(bg='grey')\n customerwin.title(\"Capital Flights\")\n Label(customerwin, text='Customer Login', bg='grey', font=('Arial', 65)).grid(row=2, column=0)\n Button(customerwin, text='Business', command=lambda : business()).grid(row=3, column=0)\n Button(customerwin, text='Tourist', command=lambda : tourist()).grid(row=4, column=0)\n Button(customerwin, text='Family', command=lambda : family()).grid(row=5, column=0)\n\ndef ticket(businessname, businesswin):\n ticketwin = Tk()\n ticketwin.configure(bg='grey')\n ticketwin.title(\"TICKET\")\n Label(ticketwin, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name = businessname.get()\n businesswin.destroy()\n Label(ticketwin, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 108\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(0)\n except IndexError:\n var = 0\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(-5)\n\ndef ticket2(touristname1, touristname2, touristwin):\n ticketwin2 = Tk()\n ticketwin2.configure(bg='grey')\n ticketwin2.title(\"TICKET\")\n Label(ticketwin2, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin2, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin2, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin2, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name = touristname1.get()\n Label(ticketwin2, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 0\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin2, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin2, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 5\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin2, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin2, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin3 = Tk()\n ticketwin3.configure(bg='grey')\n ticketwin3.title(\"TICKET\")\n Label(ticketwin3, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin3, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin3, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin3, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name = touristname2.get()\n touristwin.destroy()\n Label(ticketwin3, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 1\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin3, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin3, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(10)\n except IndexError:\n var = 4\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin3, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin3, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(10)\n\ndef ticket3(numgroup, familyname1, familyname2, familyname3, familyname4, familyname5, familywin):\n numgroup = numgroup.get()\n if numgroup == '3':\n ticketwin4 = Tk()\n ticketwin4.configure(bg='grey')\n ticketwin4.title(\"TICKET\")\n Label(ticketwin4, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin4, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin4, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin4, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name = familyname1.get()\n Label(ticketwin4, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 2\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 3\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin5 = Tk()\n ticketwin5.configure(bg='grey')\n ticketwin5.title(\"TICKET\")\n Label(ticketwin5, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin5, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin5, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin5, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name2 = familyname2.get()\n Label(ticketwin5, text=name2, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 3\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 8\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin6 = Tk()\n ticketwin6.configure(bg='grey')\n ticketwin6.title(\"TICKET\")\n Label(ticketwin6, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin6, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin6, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin6, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name3 = familyname3.get()\n familywin.destroy()\n Label(ticketwin6, text=name3, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 8\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n except IndexError:\n var = 9\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(10)\n if numgroup == '4':\n ticketwin4 = Tk()\n ticketwin4.configure(bg='grey')\n ticketwin4.title(\"TICKET\")\n Label(ticketwin4, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin4, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin4, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin4, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name = familyname1.get()\n Label(ticketwin4, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 2\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 3\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin5 = Tk()\n ticketwin5.configure(bg='grey')\n ticketwin5.title(\"TICKET\")\n Label(ticketwin5, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin5, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin5, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin5, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name2 = familyname2.get()\n Label(ticketwin5, text=name2, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 3\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 8\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin6 = Tk()\n ticketwin6.configure(bg='grey')\n ticketwin6.title(\"TICKET\")\n Label(ticketwin6, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin6, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin6, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin6, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name3 = familyname3.get()\n Label(ticketwin6, text=name3, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 8\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 9\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin7 = Tk()\n ticketwin7.configure(bg='grey')\n ticketwin7.title(\"TICKET\")\n Label(ticketwin7, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin7, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin7, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin7, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name4 = familyname4.get()\n familywin.destroy()\n Label(ticketwin7, text=name4, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 9\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 14\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n satisfaction.append(10)\n if numgroup == '5':\n ticketwin4 = Tk()\n ticketwin4.configure(bg='grey')\n ticketwin4.title(\"TICKET\")\n Label(ticketwin4, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin4, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin4, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin4, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name = familyname1.get()\n Label(ticketwin4, text=name, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 2\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 3\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin4, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin4, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin5 = Tk()\n ticketwin5.configure(bg='grey')\n ticketwin5.title(\"TICKET\")\n Label(ticketwin5, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin5, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin5, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin5, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name2 = familyname2.get()\n Label(ticketwin5, text=name2, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 3\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 8\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin5, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin5, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin6 = Tk()\n ticketwin6.configure(bg='grey')\n ticketwin6.title(\"TICKET\")\n Label(ticketwin6, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin6, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin6, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin6, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name3 = familyname3.get()\n Label(ticketwin6, text=name3, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 8\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 9\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin6, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin6, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin7 = Tk()\n ticketwin7.configure(bg='grey')\n ticketwin7.title(\"TICKET\")\n Label(ticketwin7, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin7, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin7, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin7, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name4 = familyname4.get()\n Label(ticketwin7, text=name4, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 9\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n except IndexError:\n var = 14\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin7, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin7, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(5)\n ticketwin8 = Tk()\n ticketwin8.configure(bg='grey')\n ticketwin8.title(\"TICKET\")\n Label(ticketwin8, text='PLANE TICKET', bg='grey', font=('Arial', 65)).grid(row=0, column=0)\n Label(ticketwin8, text='Name: ', bg='grey', font=('Arial', 35)).grid(row=1, column=0)\n Label(ticketwin8, text='Row: ', bg='grey', font=('Arial', 35)).grid(row=2, column=0)\n Label(ticketwin8, text='Seat Number: ', bg='grey', font=('Arial', 35)).grid(row=3, column=0)\n customers.append(1)\n name5 = familyname5.get()\n familywin.destroy()\n Label(ticketwin8, text=name5, bg='grey', font=('Arial', 35)).grid(row=1, column=1)\n try:\n var = 13\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken and seats[var] not in bonly:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 6\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin8, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin8, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n except IndexError:\n var = 14\n SI = 0\n while SI == 0:\n if seats[var] not in seatstaken:\n seatstaken.append(seats[var])\n SI = 1\n else:\n var += 1\n alphI = (var) // 6\n row = alph[alphI]\n print(row, seats[var])\n Label(ticketwin8, text=row, bg='grey', font=('Arial', 35)).grid(row=2, column=1)\n Label(ticketwin8, text=seats[var], bg='grey', font=('Arial', 35)).grid(row=3, column=1)\n satisfaction.append(10)\n\n\n\n\nmainroot = Tk()\nmainroot.configure(bg='grey')\nmainroot.title(\"Capital Flights\")\nLabel(mainroot, text='Capital Flights', bg='grey', font=('Arial', 80)).grid(row=2, column=0)\nButton(mainroot, text='Customer', command=lambda : customer()).grid(row=3, column=0)\nButton(mainroot, text='Manager', command=lambda : manager()).grid(row=6, column=0)\n\nmainroot.mainloop()" } ]
1
prashantc29/kaggle-titanic
https://github.com/prashantc29/kaggle-titanic
34978d8cb65569a665eac8d96af2a792b5fa0ddc
3c1656a0503223b72c1ab07148fd59d96c0908c6
93717a99970ccb171e25bdc3a2620252c76cf60d
refs/heads/master
"2021-03-12T19:37:38.786104"
"2014-01-29T17:46:53"
"2014-01-29T17:46:53"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6686111092567444, "alphanum_fraction": 0.6811110973358154, "avg_line_length": 28.983333587646484, "blob_id": "bc87403605b34ceb94633ae2cbb57af10bf9680f", "content_id": "cdf0af1aafe868e71d51767e0b27cc2c793d7809", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7200, "license_type": "no_license", "max_line_length": 105, "num_lines": 240, "path": "/generic.py", "repo_name": "prashantc29/kaggle-titanic", "src_encoding": "UTF-8", "text": "\n# This code will compare different algorithms for the dataset\n# To add a new algorithm use the preprocessed data and write a new method and call the method in the end.\n\nimport re\t\nimport csv \nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom datetime import datetime\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import svm\n\n# Add time along with the log\ndef log(logname,string):\n print str(datetime.now()) + \"\\t\" + logname + \"\\t\" + string\n\n##################################################\n# METHODS FOR PREPROCESSING THE DATA\n##################################################\n\n# Convert the gender\ndef convertGender(gender):\n\tif gender == 'female':\n\t\tgender = 0\n\tif gender == 'male':\n\t\tgender = 1\n\treturn gender\n\n# Convert the embarked field\ndef convertEmbarked(embarked):\n\tif embarked == 'C':\n\t\tembarked = 0\n\tif embarked == 'Q':\n\t\tembarked = 1\n\tif embarked == 'S':\n\t\tembarked = 2\n\telse:\n\t\tembarked = '2'\n\treturn embarked\n\n# return title\ndef getTitle(name):\n\tfor word in name.split():\n\t\tif word.endswith('.'):\n\t\t\ttitle=word\n\t\t\tbreak\n\treturn title\n\n# convert title to hash \n# TODO need to improve\ndef getTitleHash(title,gender):\n\thas = ord(title[0]) + len(title) + int(gender)\n\treturn has\n\n# returns one if the passenger had a family\ndef getFamily(sibsp,parch):\n\t#if int(sibsp) + int(parch) > 0:\n\t#\tfamily = 1\n\t#else:\n\t#\tfamily = 0\n\t#return family\n\treturn int(sibsp) + int(parch)\n\n# Pull out the dept from the ticket number\ndef getTicketCode(ticket):\n deptName = re.sub(r\"$\\d+\\W+|\\b\\d+\\b|\\W+\\d+$\", \"\", ticket)\n if len(deptName) == 0:\n deptName = 'none'\n deptCode = ord(deptName[0]) + len(deptName)\n return deptCode\n\n# Return the same fare if it is non-empty.\n# Else return the average fro the given 'ticket class'\n# The average fare is already calculated in a spreadsheet\ndef getFare(fare,ticketclass):\n\tif fare != '':\n\t\treturn fare\n\tif ticketclass == 1:\n\t\treturn '94'\n\tif ticketclass == 2:\n\t\treturn '22'\n\tif ticketclass == '3':\n\t\treturn '12'\n\n##################################################\n# METHODS FOR DIFFERENT ALGORITHMS\n#\n# Tips to add new algorithm:\n#\t1. Copy the following random forest code. \n#\t2. Change the place holders accordingly\n##################################################\n\ndef randomforest(trainfeatures,trainlabels,testfeatures):\n\tRandomForest = RandomForestClassifier(n_estimators = 1000)\n\treturn runalgorithm(RandomForest,trainfeatures,trainlabels,testfeatures)\n\ndef supportVectorMachine(trainfeatures,trainlabels,testfeatures):\n\tsupportVectorMachine = svm.SVC()\n\treturn runalgorithm(supportVectorMachine,trainfeatures,trainlabels,testfeatures)\n\ndef decisiontree(trainfeatures,trainlabels,testfeatures):\n\ttree = DecisionTreeClassifier(random_state = 1000)\n\treturn runalgorithm(tree,trainfeatures,trainlabels,testfeatures)\n\ndef naivebayes(trainfeatures,trainlabels,testfeatures):\n\tnb = GaussianNB()\n\treturn runalgorithm(nb,trainfeatures,trainlabels,testfeatures)\n\ndef adaboost(trainfeatures,trainlabels,testfeatures):\n\tadaBoost = AdaBoostClassifier(RandomForestClassifier(n_estimators = 1000),\n algorithm=\"SAMME\",\n n_estimators=200)\n\treturn runalgorithm(adaBoost,trainfeatures,trainlabels,testfeatures)\n\n# Generic code for running any algorithm called from above algorithms\ndef runalgorithm(algorithm,trainfeatures,trainlabels,testfeatures):\n\tlogname = runalgorithm.__name__\n\talgorithmName = algorithm.__class__.__name__\n\t\n\tlog(logname,algorithmName + \" Fitting train data\")\n algorithm = algorithm.fit(trainfeatures,trainlabels)\n\tlog(logname,algorithmName + \" DONE Fitting train data\")\n\t\n\tlog(logname,algorithmName + \" Scoring train data\")\n\tscores = cross_val_score(algorithm, trainfeatures, trainlabels)\n\tscore = scores.mean()\n\tscore = str(score)\n\tlog(logname,algorithmName + \" Score : \" + score)\n\tlog(logname,algorithmName + \" DONE Scoring train data\")\n\t\n\tlog(logname,algorithmName + \" Predicting test data\")\n\tOutput = algorithm.predict(testfeatures)\t\n\tlog(logname,algorithmName + \" DONE Predicting test data\")\n\twriteFile = algorithmName + \".csv\"\n\tlog(logname,algorithmName + \" Writing results to \" + writeFile)\n\tnp.savetxt(writeFile,Output,delimiter=\",algorithmName + \" ,fmt=\"%s\")\n\tlog(logname,algorithmName + \" DONE Writing results to \" + writeFile)\n\treturn score\n\n##################################################\n# MAIN METHOD\n##################################################\nif __name__ == '__main__':\t\n\t\n\tlogname = \"__main__\"\n\n\tlog(logname,\"Reading Train Data\")\n\t\n\ttrain = csv.reader(open('train.csv','rb'))\n\theader = train.next()\n\t\n\t######READING TRAIN DATA################\t\n\ttrain_data=[]\n\tfor row in train:\n\t train_data.append(row)\n\t\n\ttrain_data = np.array(train_data)\n\t\n\tlog(logname,\"DONE Reading Train Data\")\n\t\n\tlog(logname,\"Preprocessing Train Data\")\n\t# replace categorical attributes\n\tfor row in train_data:\n\t\t\n\t\trow[4] = convertGender(row[4])\n\t\ttitle = getTitle(row[3])\n\t\trow[3] = getTitleHash(title,row[4])\n\t\trow[6] = getFamily(row[6],row[7])\n\t\trow[8] = getTicketCode(row[8])\n\t\trow[9] = getFare(row[9],row[2])\n\t\trow[11] = convertEmbarked(row[11])\n\t\t\n\ttrainfeatures = train_data[0::,[2,3,4,6,8,11]]\n\ttrainlabels = train_data[0::,1]\n\ttrainfeatures = trainfeatures.astype(np.float)\n\tlog(logname,\"DONE Preprocessing Train Data\")\n\n\n\t######READING TEST DATA################\t\n\tlog(logname,\"Reading Test Data\")\n\ttest = csv.reader(open('test.csv','rb'))\n\theader = test.next()\n\t\n\ttest_data=[]\n\tfor row in test:\n\t test_data.append(row)\n\ttest_data = np.array(test_data)\n\tlog(logname,\"DONE Reading Test Data\")\n\t\n\t# replace categorical attributes\n\tlog(logname,\"Preprocessing Test Data\")\n\tfor row in test_data:\n\t\t\n\t\trow[3] = convertGender(row[3])\n\t\ttitle = getTitle(row[2])\n\t\trow[2] = getTitleHash(title,row[3])\n\t\trow[5] = getFamily(row[5],row[6])\n\t\trow[7] = getTicketCode(row[7])\n\t\trow[8] = getFare(row[8],row[1])\n\t\trow[10] = convertEmbarked(row[10])\n\t\t\n\n\ttestfeatures = test_data[0::,[1,2,3,5,7,10]]\n\ttestfeatures = testfeatures.astype(np.float)\n\tlog(logname,\"DONE Preprocessing Test Data\")\n\t\n\t####################### TRAIN AND TEST ##########################\n\n\tscores = {}\n\n\tlog(logname,\"Calling Random Forest\")\n\tscore = randomforest(trainfeatures,trainlabels,testfeatures)\n\tscores['Random Forest'] = score\n\tlog(logname,\"DONE WITH Random Forest\")\n\n#\tlog(logname,\"Calling AdaBoost\")\n#\tscore = adaboost(trainfeatures,trainlabels,testfeatures)\n#\tscores['AdaBoost'] = score\n#\tlog(logname,\"DONE WITH AdaBoost\")\n\t\n\tlog(logname,\"Calling Decision Tree\")\n\tscore = decisiontree(trainfeatures,trainlabels,testfeatures)\n\tscores['Decision Tree'] = score\n\tlog(logname,\"DONE WITH Decision Tree\")\n\n\tlog(logname,\"Calling Naive Bayes\")\n\tscore = naivebayes(trainfeatures,trainlabels,testfeatures)\n\tscores['Naive Bayes'] = score\n\tlog(logname,\"DONE WITH Naive Bayes\")\n\n\tlog(logname,\"Calling SVM\")\n\tscore = supportVectorMachine(trainfeatures,trainlabels,testfeatures)\n\tscores['SVM'] = score\n\tlog(logname,\"DONE WITH SVM\")\n\t\n\tprint \"\\nSCORES\\n\"\n\tfor k, v in scores.iteritems():\n\t\tprint k + \"\\t\" + v\n\t\t\n" }, { "alpha_fraction": 0.6964285969734192, "alphanum_fraction": 0.708791196346283, "avg_line_length": 20.41176414489746, "blob_id": "42f0a29e09bb38ed0f7bdac29138ae2e961bea5f", "content_id": "c4b4519f21d2ebf2224e47168dfc1307fa32b9ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 71, "num_lines": 34, "path": "/test.py", "repo_name": "prashantc29/kaggle-titanic", "src_encoding": "UTF-8", "text": "import csv as csv\nimport numpy as np\n\n\n#open file\n\ntrain = csv.reader(open('train.csv','rb'))\nheader = train.next();\n\n#print header\n\ndata=[]\nfor row in train:\n\tdata.append(row)\ndata = np.array(data)\n\n\n#calculate statistics number of survived and total number of passengers\nnumberOfPassengers = np.size(data[0::,1].astype(np.float))\n#print(\"Number of passengers \",numberOfPassengers)\n\nnumOfSurvived = np.sum(data[0::,1].astype(np.float))\n#print(\"Number of survived passengers \",numOfSurvived)\n\ntest = csv.reader(open('test.csv','rb'))\nwriteFile = csv.writer(open(\"genderbased.csv\",\"wb\"))\n\nfor row in test:\n\tif row[3] == 'female':\n\t\trow.insert(0,'1')\n\t\twriteFile.writerow(row)\n\telse:\n\t\trow.insert(0,'0')\n\t\twriteFile.writerow(row)\n" } ]
2
sudheermouni/NeckTie
https://github.com/sudheermouni/NeckTie
702ac718c4162e790f7b08599fd53fef907218e9
7d90fc0ef3a61c0acec452d1dae9de61e4aa2427
a50a96b175bd853ae9e24692323a51569ed961d5
refs/heads/main
"2023-08-19T12:43:47.281189"
"2021-10-29T04:22:49"
"2021-10-29T04:22:49"
422,309,967
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7018710970878601, "alphanum_fraction": 0.7218295335769653, "avg_line_length": 21.679244995117188, "blob_id": "f200f7ee6699397bbce0df8b329265dc4f8f6223", "content_id": "ce0d09826d71148020848ce2594a7a01044ba009", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2405, "license_type": "no_license", "max_line_length": 113, "num_lines": 106, "path": "/README.md", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# NeckTie\n\n# Choice of Framework & Library:\nDjango is the best framework for web applications, as it allows developers to use modules for faster development.\nAs a developer, you can make use of these modules to create apps, websites from an existing source. \nIt speeds up the development process greatly, as you do not have to code everything from scratch.\n\n#### Advantages over other framework\n1. It fallows MVT architecture\n2. it supports all types of databases\n3. it has admin dashboard\n4. more secure\n\n\nSetup\n-----\n\nThe django `python manage.py` commands only work after the\nsetup steps are completed.\n\nVirtual Environment\n-------------------\n\n#### macOS\n\nCreate the virtualenv:\n```\npython -m venv venv\nsource venv/bin/activate\npip install -r requirements.txt\n```\n\n\n#### Windows\n\nCreate virtualenv:\n```\npython -m venv venv\nvenv\\Scripts\\activate\npip install -r requirements.txt\n```\n\n\nTests\n-----\n\nTo run tests, activate your virtual environment and run:\n```\npython manage.py test path_of_file\n```\n\n\nRunning the Server Locally\n--------------------------\n\nTo run the server locally, run the following commands:\n```\npython manage.py makemigrations\npython manage.py migrate\npython manage.py runserver localhost:8000\n```\n\nIf the dependencies in the requirements.txt file change, then\nyou will need to re-install dependencies with pip. See the\ntroubleshooting section at the bottom of this document if you\nget stuck.\n\nJwt Authentication:\n------------------\nBefore viewing api we should be authenticated. For authentication purpose used **simplejwt**\n```\npip install djangorestframework-simplejwt==4.4.0\npip install PyJWT==1.7.1\n```\ncommand prompt\n\nAccess token\n```\nhttp post http://127.0.0.1:8000/api/token/ username=sudheer password=123456\n```\nwe should get access token. Then we need to pass token for access all api's\ncommand prompt\n```\nhttp http://127.0.0.1:8000/v1/doctors \"Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoiYWNjZXNzIiwiZXhwIjoxNjM1NDc2NDEwLCJqdGkiOiIxNTdkNjE2MTZmZWU0OTYyYmM0NzMyYjQ5OWI5Y2RjMSIsInVzZXJfaWQiOjF9.2AwaH-Al_n2hOKpjBcXB6XTbNWJgZncGdS-IBMvbFR4\"\n```\n\nLoading Fixtures\n----------------\nIt will load sample data to the models\n```\npython manage.py loaddata necktieapp/fixtures/*.json\n```\n\nCustom managemant command\n------------------------\n Creating bul records for doctors model\n```\npython manage.py bulk_create number_of_records\nex: python manage.py bulk_create 3\n```\nAPI Documentation\n-----------------\n\nTo view API documentation, when running locally on port 8000, open:\n * http://localhost:8000/doctors\n * http://localhost:8000/patients\n\n" }, { "alpha_fraction": 0.7361111044883728, "alphanum_fraction": 0.7361111044883728, "avg_line_length": 26.69230842590332, "blob_id": "eec2c2b08b60f5f853d00feeebf1305bc390988a", "content_id": "c98a952cdac90705d84d94c5635e8495a8495cfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "no_license", "max_line_length": 91, "num_lines": 13, "path": "/Necktie/necktieapp/models/patent_doctorTb.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django.db import models\n\nfrom .doctors import Doctors\nfrom .patients import Patient\n\n\nclass PatentDoctorTb(models.Model):\n '''\n we can add extra fields here\n '''\n\n doctor = models.ForeignKey(Doctors, blank=False, null=False, on_delete=models.CASCADE)\n patient = models.ForeignKey(Patient, blank=False, null=False, on_delete=models.CASCADE)\n" }, { "alpha_fraction": 0.5600858330726624, "alphanum_fraction": 0.6266094446182251, "avg_line_length": 24.88888931274414, "blob_id": "a640033d9bef27d7e243b45ea38496db051e82b5", "content_id": "da904aa7daacdb38085b404b5a5eaf0c84115d69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 126, "num_lines": 18, "path": "/Necktie/necktieapp/migrations/0006_alter_patient_doctor.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.8 on 2021-10-28 06:10\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('necktieapp', '0005_auto_20211028_1129'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='patient',\n name='doctor',\n field=models.ManyToManyField(blank=True, null=True, through='necktieapp.PatentDoctorTb', to='necktieapp.Doctors'),\n ),\n ]\n" }, { "alpha_fraction": 0.567956805229187, "alphanum_fraction": 0.5958595871925354, "avg_line_length": 40.14814758300781, "blob_id": "a15d32703b21cf9b3fe23d2efb084f78dcd54033", "content_id": "b58c768ec7382afe406e9d688fa32bcdadc85601", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1111, "license_type": "no_license", "max_line_length": 117, "num_lines": 27, "path": "/Necktie/necktieapp/migrations/0003_patient.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.8 on 2021-10-27 16:40\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('necktieapp', '0002_alter_doctors_d_phone'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Patient',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('p_surname', models.CharField(blank=True, max_length=20, null=True)),\n ('p_fullname', models.CharField(blank=True, max_length=20, null=True)),\n ('p_username', models.CharField(max_length=40)),\n ('p_phone', models.CharField(blank=True, max_length=10, null=True)),\n ('p_country', models.CharField(blank=True, max_length=50, null=True)),\n ('p_state', models.CharField(blank=True, max_length=50, null=True)),\n ('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.doctors')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.649142861366272, "alphanum_fraction": 0.6617143154144287, "avg_line_length": 30.25, "blob_id": "700864cd9fab4343bf350e48613cdd52cd2cf898", "content_id": "7eb9777bd60c37cde98c4c180e9241ac68eb0bd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/Necktie/necktieapp/models/doctors.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom model_utils import Choices\n\nSPECIALIZATIONS = Choices(\n (\"CD\", \"Cardiology\"),\n (\"GS\", \"General Surgery\"),\n (\"EC\", \"Endocrinology\"),\n (\"NT\", \"Neonatology\"),\n)\n\n\nclass Doctors(models.Model):\n d_surname = models.CharField(max_length=20, blank=True, null=True)\n d_firstname = models.CharField(max_length=20, blank=True, null=True)\n d_username = models.CharField(max_length=40, blank=False, null=False, unique=True)\n d_phone = models.CharField(max_length=10, blank=True, null=True)\n d_address = models.TextField(blank=True, null=True)\n d_country = models.CharField(max_length=30)\n d_specialization = models.CharField(\n choices=SPECIALIZATIONS,\n max_length=4,\n blank=False,\n null=False,\n )\n d_pincode = models.IntegerField()\n\n def __str__(self):\n return self.d_username\n" }, { "alpha_fraction": 0.7289719581604004, "alphanum_fraction": 0.7850467562675476, "avg_line_length": 53, "blob_id": "d0b692817ee3d629ca603a7420377852d3e3600e", "content_id": "6f53419f09e36b53201f6adb9aeb434adf3ede05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/Necktie/necktieapp/views/__init__.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from .doctor_view import DoctorViewset # noqa: F401\nfrom .patient_view import PatientViewset # noqa: F401" }, { "alpha_fraction": 0.5783365368843079, "alphanum_fraction": 0.5967118144035339, "avg_line_length": 32.35483932495117, "blob_id": "97342a2a2fc736f1ee81ad1a09c31688ce5e283f", "content_id": "7cc926405a9269a48961ba9835a442a01d815a5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 117, "num_lines": 31, "path": "/Necktie/necktieapp/migrations/0004_auto_20211027_2226.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.8 on 2021-10-27 16:56\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('necktieapp', '0003_patient'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='patient',\n name='doctor',\n ),\n migrations.CreateModel(\n name='PatentDoctorTb',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.doctors')),\n ('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.patient')),\n ],\n ),\n migrations.AddField(\n model_name='patient',\n name='doctor',\n field=models.ManyToManyField(through='necktieapp.PatentDoctorTb', to='necktieapp.Doctors'),\n ),\n ]\n" }, { "alpha_fraction": 0.48659002780914307, "alphanum_fraction": 0.545976996421814, "avg_line_length": 21.69565200805664, "blob_id": "8cd6cb4c806490619f74852ab028c162534c3112", "content_id": "d574e42b1ff8a359ed5bac1d6d2eb8647a9ed0f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 50, "num_lines": 23, "path": "/Necktie/necktieapp/migrations/0005_auto_20211028_1129.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.8 on 2021-10-28 05:59\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('necktieapp', '0004_auto_20211027_2226'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='doctors',\n old_name='d_state',\n new_name='d_specialization',\n ),\n migrations.RenameField(\n model_name='doctors',\n old_name='d_surnam',\n new_name='d_surname',\n ),\n ]\n" }, { "alpha_fraction": 0.5469798445701599, "alphanum_fraction": 0.7348993420600891, "avg_line_length": 18.866666793823242, "blob_id": "cca37581b4f71d454a45d5c5c2dfa74c121fbd19", "content_id": "5128c376a3b832403b9c40d55fdbf5c21359afc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 298, "license_type": "no_license", "max_line_length": 36, "num_lines": 15, "path": "/Necktie/requirements.txt", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "asgiref==3.4.1\ncertifi==2021.10.8\ncharset-normalizer==2.0.7\nDjango==3.2.8\ndjango-filter==21.1\ndjango-model-utils==4.2.0\ndjangorestframework==3.12.4\ndjangorestframework-simplejwt==4.4.0\nidna==3.3\nPyJWT==1.7.1\npytz==2021.3\nrequests==2.26.0\nsqlparse==0.4.2\ntyping-extensions==3.10.0.2\nurllib3==1.26.7\n" }, { "alpha_fraction": 0.8865979313850403, "alphanum_fraction": 0.8865979313850403, "avg_line_length": 48, "blob_id": "f0a5656d3d02af9ead4d791e61508f6c33c21476", "content_id": "137fb841a6abb98c9260f69843344ad44e58dc41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 49, "num_lines": 2, "path": "/Necktie/necktieapp/serializers/__init__.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from .doctor_serializer import DoctorSerializer\nfrom .patient_serializer import PatientSerializer" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5905707478523254, "avg_line_length": 21.38888931274414, "blob_id": "64b09565a6d402d3265c025d86137ff17ddd388e", "content_id": "385d84c39da95094ccb5de570a9382a736de68f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 73, "num_lines": 18, "path": "/Necktie/necktieapp/migrations/0002_alter_doctors_d_phone.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.8 on 2021-10-27 16:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('necktieapp', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='doctors',\n name='d_phone',\n field=models.CharField(blank=True, max_length=10, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.7018072009086609, "alphanum_fraction": 0.7198795080184937, "avg_line_length": 40.5625, "blob_id": "ab45b0391a7834d7d2e8de1ff4d343c7b38f01c8", "content_id": "40b022f2f405641156764cf666259a1866505f4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 93, "num_lines": 16, "path": "/Necktie/necktieapp/models/patients.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django.db import models\n\nfrom .doctors import Doctors\n\n\nclass Patient(models.Model):\n p_surname = models.CharField(max_length=20, blank=True, null=True)\n doctor = models.ManyToManyField(Doctors, through=\"PatentDoctorTb\", null=True, blank=True)\n p_fullname = models.CharField(max_length=20, blank=True, null=True)\n p_username = models.CharField(max_length=40, blank=False, null=False)\n p_phone = models.CharField(max_length=10, blank=True, null=True)\n p_country = models.CharField(max_length=50, blank=True, null=True)\n p_state = models.CharField(max_length=50, blank=True, null=True)\n\n def __str__(self):\n return self.p_username" }, { "alpha_fraction": 0.7676470875740051, "alphanum_fraction": 0.7676470875740051, "avg_line_length": 41.5, "blob_id": "c18fc9cdc524e0346c0c60b0fe6be333488e748c", "content_id": "263fd3dcfab47fa233b9b1f8d26af02d140b44c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 680, "license_type": "no_license", "max_line_length": 89, "num_lines": 16, "path": "/Necktie/necktieapp/views/patient_view.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import viewsets, filters\nfrom rest_framework.permissions import IsAuthenticated\nfrom necktieapp.models import Patient\nfrom necktieapp.serializers import PatientSerializer\n\n\nclass PatientViewset(viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n\n queryset = Patient.objects.all()\n serializer_class = PatientSerializer\n filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]\n filterset_fields = ['id', 'p_surname', 'p_username']\n search_fields = ['id', 'p_surname', 'p_username']\n ordering_fields = ['id', 'p_surname', 'p_username']\n" }, { "alpha_fraction": 0.8296703100204468, "alphanum_fraction": 0.8296703100204468, "avg_line_length": 25, "blob_id": "a6190465c7b62af259f084c35d3123a45c86f10b", "content_id": "6ca46f40ace8f404ddcd3283439bf0a8b9e0a6c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/Necktie/necktieapp/admin.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Doctors, Patient, PatentDoctorTb\n\n\nadmin.site.register(Doctors)\nadmin.site.register(Patient)\nadmin.site.register(PatentDoctorTb)\n" }, { "alpha_fraction": 0.7815384864807129, "alphanum_fraction": 0.7846153974533081, "avg_line_length": 26.16666603088379, "blob_id": "d75a5c0107f9e8ccd61014574856361e91a31e44", "content_id": "e30253772d89414ef4183c09710bf8eaa2373e2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 325, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/Necktie/necktieapp/urls.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from rest_framework.routers import DefaultRouter\nfrom django.conf.urls import url, include\nfrom necktieapp import views\n\nrouter = DefaultRouter(trailing_slash=False)\n\nrouter.register(r'doctors', views.DoctorViewset)\nrouter.register(r'patients', views.PatientViewset)\n\nurlpatterns = [\n url(r'^v1/', include(router.urls)),\n]" }, { "alpha_fraction": 0.7736389636993408, "alphanum_fraction": 0.7736389636993408, "avg_line_length": 42.625, "blob_id": "a345b568327922fc72954cafec56d0bceca0fabe", "content_id": "6b2b251ecb126571645d2a80969cfd00bf3e22f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 89, "num_lines": 16, "path": "/Necktie/necktieapp/views/doctor_view.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import viewsets, filters\nfrom rest_framework.permissions import IsAuthenticated\nfrom necktieapp.models import Doctors\nfrom necktieapp.serializers import DoctorSerializer\n\n\nclass DoctorViewset(viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n\n queryset = Doctors.objects.all()\n serializer_class = DoctorSerializer\n filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]\n filterset_fields = ['id', 'd_specialization', 'd_username']\n search_fields = ['id', 'd_specialization', 'd_username']\n ordering_fields = ['id', 'd_specialization', 'd_username']\n" }, { "alpha_fraction": 0.6450381875038147, "alphanum_fraction": 0.6517175436019897, "avg_line_length": 28.94285774230957, "blob_id": "23d0dafcdd40d20ee4bc94b82279bf0406f8ed9c", "content_id": "e444bb05fbd4ae4a6128602c91a8f7f4bfa8becf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 108, "num_lines": 35, "path": "/Necktie/necktieapp/management/commands/bulk_create.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "import random\nimport string\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils.crypto import get_random_string\n\nfrom necktieapp.models import Doctors\n\nsample_data = {\n 'd_surname': get_random_string(),\n 'd_firstname': get_random_string(),\n 'd_username': \"\",\n 'd_phone': get_random_string(),\n 'd_address': get_random_string(),\n 'd_country': get_random_string(),\n 'd_specialization': \"CD\",\n 'd_pincode': 524101,\n}\n\n\nclass Command(BaseCommand):\n help = 'Create random doctors'\n\n def add_arguments(self, parser):\n parser.add_argument('total', type=int, help='Indicates the number of users to be created')\n\n def handle(self, *args, **kwargs):\n total = kwargs['total']\n list_instances = []\n Doctors.objects.all().delete()\n for i in range(total):\n sample_data['d_username'] = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))\n list_instances.append(Doctors(**sample_data))\n\n Doctors.objects.bulk_create(list_instances)\n" }, { "alpha_fraction": 0.6106870174407959, "alphanum_fraction": 0.6420695781707764, "avg_line_length": 30.83783721923828, "blob_id": "b1d86876cae43ea46ab64f630c45a440c679fba6", "content_id": "559084fc8b9562ba88396416b5c94166e75f1b1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 62, "num_lines": 37, "path": "/Necktie/necktieapp/tests/test_doctors.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django.test import TestCase, TransactionTestCase\nfrom necktieapp.models import Doctors\n\nsample_data = {\n 'd_surname': \"sudheer\",\n 'd_firstname': \"mandi\",\n 'd_username': \"smre\",\n 'd_phone': \"7702231789\",\n 'd_address': \"Ramalingapuram\",\n 'd_country': \"India\",\n 'd_specialization': \"CD\",\n 'd_pincode': 524101,\n}\n\n\nclass TestDoctor(TransactionTestCase):\n fixtures = [\"doctors.json\"]\n\n def test_create_new_record(self):\n model_instance = Doctors.objects.create(**sample_data)\n self.assertIsInstance(model_instance, Doctors)\n self.assertEqual(model_instance.d_username, \"smre\")\n\n def test_update_record(self):\n instance = Doctors.objects.get(id=1)\n instance.d_phone = \"9177935906\"\n instance.save()\n self.assertEqual(instance.d_phone, \"9177935906\")\n\n def test_should_not_save_duplicate_username(self):\n before_count = Doctors.objects.count()\n sample_data[\"d_username\"] = \"smreddy\"\n try:\n Doctors.objects.create(**sample_data)\n except Exception as e:\n after_count = Doctors.objects.count()\n self.assertEqual(before_count, after_count)\n\n" }, { "alpha_fraction": 0.7309644818305969, "alphanum_fraction": 0.7309644818305969, "avg_line_length": 27.285715103149414, "blob_id": "d80463d0974fe429627fd8ff1981b9cddccd15be", "content_id": "2b1e699ab7358276824c6203f336399ab8500534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/Necktie/necktieapp/serializers/doctor_serializer.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom necktieapp.models import Doctors\n\nclass DoctorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Doctors\n fields = \"__all__\"" }, { "alpha_fraction": 0.5448833107948303, "alphanum_fraction": 0.5664272904396057, "avg_line_length": 38.78571319580078, "blob_id": "401f1f5730f26248ae8108ac2fb62637bedcadd4", "content_id": "40fe5b785c0b1f0df5af058a7b8b86c61293e4b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1114, "license_type": "no_license", "max_line_length": 167, "num_lines": 28, "path": "/Necktie/necktieapp/migrations/0001_initial.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.8 on 2021-10-27 15:55\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Doctors',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('d_surnam', models.CharField(blank=True, max_length=20, null=True)),\n ('d_firstname', models.CharField(blank=True, max_length=20, null=True)),\n ('d_username', models.CharField(max_length=40, unique=True)),\n ('d_phone', models.IntegerField(blank=True, null=True)),\n ('d_address', models.TextField(blank=True, null=True)),\n ('d_country', models.CharField(max_length=30)),\n ('d_state', models.CharField(choices=[('CD', 'Cardiology'), ('GS', 'General Surgery'), ('EC', 'Endocrinology'), ('NT', 'Neonatology')], max_length=4)),\n ('d_pincode', models.IntegerField()),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7565789222717285, "alphanum_fraction": 0.7565789222717285, "avg_line_length": 24.33333396911621, "blob_id": "d384e7a8aed238f78292af225c4e39768b5e79a1", "content_id": "e670eeef6c973ec2d5c3a3f77b3c1f394efbab32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 56, "num_lines": 6, "path": "/Necktie/necktieapp/apps.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass NecktieappConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'necktieapp'\n" }, { "alpha_fraction": 0.844660222530365, "alphanum_fraction": 0.844660222530365, "avg_line_length": 33.33333206176758, "blob_id": "5a0368fdc2dda0ed43b2d116a42e43524aedd0b2", "content_id": "f88ec40e91781e348589b3eb9ac8eae5212042f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 43, "num_lines": 3, "path": "/Necktie/necktieapp/models/__init__.py", "repo_name": "sudheermouni/NeckTie", "src_encoding": "UTF-8", "text": "from .doctors import Doctors\nfrom .patients import Patient\nfrom .patent_doctorTb import PatentDoctorTb\n" } ]
22
AlexsandroMO/Bitcoin
https://github.com/AlexsandroMO/Bitcoin
cca28beeb712b63f31c7ef1c54aced47d8de3153
e19498660d5e3a9fdaee7fdb17e9a5464ebdac8d
978e8e7397237a269ce55dff551aee65948d3803
refs/heads/master
"2020-09-26T02:47:41.616660"
"2019-12-05T16:48:48"
"2019-12-05T16:48:48"
226,146,249
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 14, "blob_id": "06ad3af4c469b37053fdcdafe35a375719ac16ff", "content_id": "65c71bdcf4dd2064d623dfdb5818ca4a0225c49d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "no_license", "max_line_length": 19, "num_lines": 2, "path": "/README.md", "repo_name": "AlexsandroMO/Bitcoin", "src_encoding": "UTF-8", "text": "# Bitcoin\nBitcoin Application\n" }, { "alpha_fraction": 0.6198402047157288, "alphanum_fraction": 0.6225033402442932, "avg_line_length": 22.62295150756836, "blob_id": "b3af72be26890d7b8f338fc9e646a9ad53e5c2c3", "content_id": "2a6f8482b7c2881a3cdfdefc34852639e802d89c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 88, "num_lines": 61, "path": "/Write_SQL.py", "repo_name": "AlexsandroMO/Bitcoin", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport pandasql as pdsql\r\nimport sqlite3\r\nfrom datetime import date\r\nfrom datetime import datetime\r\nimport CreateTable_SQL\r\n\r\n#--------------------------------------------\r\n#Adicionar dados no banco - VarBitcoin\r\n\r\ndef add_var_bitcoin(btc_last,btc_buy,btc_sell,date_btc):\r\n\r\n # CreateTable_SQL.create_VarBTC()\r\n # CreateTable_SQL.create_Wallet()\r\n\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n c = conn.cursor()\r\n\r\n qsl_datas = f\"\"\"INSERT INTO VARBTC(VAR_BTC_LAST,VAR_BTC_BUY,VAR_BTC_SELL,VAR_BTC_DATE)\r\n VALUES ({btc_last},{btc_buy},{btc_sell},'{date_btc}');\r\n \"\"\"\r\n\r\n c.execute(qsl_datas)\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\ndef add_var_wallet(my_wallet_control,profit,date_today):\r\n\r\n # CreateTable_SQL.create_VarBTC()\r\n # CreateTable_SQL.create_Wallet()\r\n\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n c = conn.cursor()\r\n\r\n qsl_datas = f\"\"\"INSERT INTO WALLET(VAR_WALLET,WIN_LOSE,DATE_NEGOCIATION)\r\n VALUES ({my_wallet_control}, {profit},'{date_today}');\r\n \"\"\"\r\n\r\n c.execute(qsl_datas)\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef add_var_wallet_start(wallet,win_lose,date_today):\r\n\r\n # CreateTable_SQL.create_VarBTC()\r\n # CreateTable_SQL.create_Wallet()\r\n\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n c = conn.cursor()\r\n\r\n qsl_datas = f\"\"\"INSERT INTO COINCOIN(VAR_WALLET,WIN_LOSE,DATE_NEGOCIATION)\r\n VALUES ({wallet},{win_lose},'{date_today}');\r\n \"\"\"\r\n\r\n c.execute(qsl_datas)\r\n\r\n conn.commit()\r\n conn.close()\r\n" }, { "alpha_fraction": 0.7491039633750916, "alphanum_fraction": 0.7491039633750916, "avg_line_length": 28.77777862548828, "blob_id": "7dc52b208445a00b62cc19ea2ff9dfb316c0b74e", "content_id": "0ba546279f9a8775de22e9accf0fb9f035cd4ef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 279, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/coin/admin.py", "repo_name": "AlexsandroMO/Bitcoin", "src_encoding": "UTF-8", "text": "\r\nfrom django.contrib import admin\r\nfrom .models import TypeWallet, MYWallet\r\n\r\n\r\nclass ListaMYWallet(admin.ModelAdmin):\r\n list_display = ('name_wallet','var_wallet','type_wallet','log_create')\r\n\r\nadmin.site.register(TypeWallet)\r\nadmin.site.register(MYWallet, ListaMYWallet)\r\n" }, { "alpha_fraction": 0.5416036248207092, "alphanum_fraction": 0.5438728928565979, "avg_line_length": 16.91428565979004, "blob_id": "7043643f75f6fb6c044955122b53fcfe4704a823", "content_id": "7cab28d870c7fa81582041e31a6a1b8fa3282e35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1322, "license_type": "no_license", "max_line_length": 56, "num_lines": 70, "path": "/CreateTable_SQL.py", "repo_name": "AlexsandroMO/Bitcoin", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport pandasql as pdsql\r\nimport sqlite3\r\nfrom datetime import date\r\nfrom datetime import datetime\r\nimport os\r\n\r\n\r\ndef verify():\r\n directory = 'DB'\r\n dir = directory \r\n\r\n if not os.path.exists(directory): \r\n os.makedirs(dir)\r\n\r\n\r\n#-------------------------------------------------------\r\n#Criar Tabela VARBITCOIN\r\ndef create_VarBTC():\r\n verify()\r\n\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n c = conn.cursor()\r\n\r\n table_createdb = f\"\"\"\r\n \r\n CREATE TABLE IF NOT EXISTS VARBTC (\r\n ID INTEGER PRIMARY KEY,\r\n VAR_BTC_LAST DOUBLE NOT NULL,\r\n VAR_BTC_BUY DOUBLE NOT NULL,\r\n VAR_BTC_SELL DOUBLE NOT NULL,\r\n VAR_BTC_DATE DATE NOT NULL\r\n )\r\n \r\n \"\"\"\r\n\r\n c.execute(table_createdb)\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n#-------------------------------------------------------\r\n#Criar Tabela Wallet\r\ndef create_Wallet():\r\n verify()\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n c = conn.cursor()\r\n\r\n table_createdb = f\"\"\"\r\n \r\n CREATE TABLE IF NOT EXISTS COINCOIN (\r\n ID INTEGER PRIMARY KEY,\r\n VAR_WALLET DOUBLE NOT NULL,\r\n WIN_LOSE DOUBLE NOT NULL,\r\n DATE_NEGOCIATION DATE NOT NULL\r\n \r\n )\r\n \r\n \"\"\"\r\n\r\n c.execute(table_createdb)\r\n\r\n conn.commit()\r\n conn.close()\r\n#-------------------------\r\n\r\n\r\n\r\n#db = TinyDB('db.json')\r\n#Ft = Query()" }, { "alpha_fraction": 0.5841924548149109, "alphanum_fraction": 0.5893470644950867, "avg_line_length": 15.696969985961914, "blob_id": "8b93e9ca810f1da792110a51b5d2f576bdf47733", "content_id": "e098b567ba338883105afdbd3ecd01bf6d617518", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 46, "num_lines": 33, "path": "/Read_SQL.py", "repo_name": "AlexsandroMO/Bitcoin", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport pandasql as pdsql\r\nimport sqlite3\r\nfrom datetime import date\r\nfrom datetime import datetime\r\n\r\n\r\n\r\ndef read_sql_btc():\r\n\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n\r\n sql_datas = f\"\"\"\r\n SELECT * FROM VARBTC;\r\n \"\"\"\r\n\r\n read_db = pd.read_sql_query(sql_datas, conn)\r\n conn.close()\r\n\r\n return read_db\r\n\r\ndef read_sql_wallet():\r\n\r\n conn = sqlite3.connect('DB/DB_COINS.db')\r\n\r\n sql_datas = f\"\"\"\r\n SELECT * FROM COINCOIN;\r\n \"\"\"\r\n\r\n read_db = pd.read_sql_query(sql_datas, conn)\r\n conn.close()\r\n\r\n return read_db" }, { "alpha_fraction": 0.6601941585540771, "alphanum_fraction": 0.6601941585540771, "avg_line_length": 10.235294342041016, "blob_id": "081d7f1189048f930acd86637bc6db2a5f920e7d", "content_id": "5048aaf4616468ac80b6caa2cb3abc3170052b60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 33, "num_lines": 17, "path": "/coin/tests.py", "repo_name": "AlexsandroMO/Bitcoin", "src_encoding": "UTF-8", "text": "from django.test import TestCase\r\n\r\n# pip install django-crispy-forms\r\n\r\n\r\n'''Upload documents on Github\r\n\r\ngit clone <nome>\r\n\r\n<entra na pasta criada>\r\n\r\ngit add .\r\n\r\ngit commit -m \"texto\"\r\n\r\ngit push\r\n'''" } ]
6
kawa-kokosowa/urlink
https://github.com/kawa-kokosowa/urlink
caeed443938d9a2b57e42a6771bf952938e405b4
f240e18ee3ee9486451ee808f6b59ea89c048829
5000191025e698b59d5e5cef4f894f72479fd47e
refs/heads/master
"2021-06-19T10:56:48.191526"
"2017-05-24T06:42:23"
"2017-05-24T06:42:23"
69,394,533
6
1
null
null
null
null
null
[ { "alpha_fraction": 0.617829442024231, "alphanum_fraction": 0.6267442107200623, "avg_line_length": 30.08433723449707, "blob_id": "87c9ce1367187e73a42fe9be89012e3d93c4d414", "content_id": "4572ec297bb059f839d87e6d4ca911b1e7ccaa10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2580, "license_type": "no_license", "max_line_length": 104, "num_lines": 83, "path": "/models.py", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "# builtin\nimport datetime\n\n# 3rd party\nimport flask_sqlalchemy\nimport flask_user\n\n\ndb = flask_sqlalchemy.SQLAlchemy()\n\n\nclass User(db.Model, flask_user.UserMixin):\n \"\"\"Generic User data model for flask_user as seen\n in their documentation.\n\n http://pythonhosted.org/Flask-User/basic_app.html\n\n \"\"\"\n\n id = db.Column(db.Integer, primary_key=True)\n\n # User authentication information\n password = db.Column(db.String(255), nullable=False, server_default='')\n reset_password_token = db.Column(db.String(100), nullable=False, server_default='')\n\n # User email information\n email = db.Column(db.String(255), nullable=False, unique=True)\n confirmed_at = db.Column(db.DateTime())\n\n # User information\n active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')\n first_name = db.Column(db.String(100), nullable=False, server_default='')\n last_name = db.Column(db.String(100), nullable=False, server_default='')\n\n\nclass Url(db.Model):\n \"\"\"A URL belonging to a user, accompanied by a description\n of 140 characters or less.\n\n Belongs to /urls/x\n\n \"\"\"\n\n __tablename__ = 'urls'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey(User.id)) # should never be null :o\n created = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n url = db.Column(db.String()) # should neve rbe null :o\n description = db.Column(db.String(140))\n title = db.Column(db.String())\n content_type = db.Column(db.String()) # isn't this a certain number of bytes max? should b required\n user = db.relationship('User', foreign_keys='Url.user_id', lazy='subquery')\n\n def __init__(self, user_id, url, description, content_type=None, title=None):\n self.user_id = user_id\n self.url = url\n self.description = description\n # these are derived from util.fetch_searchable_data()\n self.title = title\n self.content_type = content_type\n\n def __repr__(self):\n return '<URL #%s %s (%s)>' % (self.id, self.title, self.url)\n\n def to_dict(self):\n \"\"\"Create a dictionary representing this URL.\n\n Returns:\n dict: contains the id, url, and description of\n this URL.\n\n \"\"\"\n\n data_to_return = {\n 'id': self.id,\n # TODO:\n # 'created': self.created,\n 'url': self.url,\n 'description': self.description,\n 'title': self.title,\n 'content_type': self.content_type,\n }\n return data_to_return\n" }, { "alpha_fraction": 0.6167227625846863, "alphanum_fraction": 0.6180321574211121, "avg_line_length": 26.699481964111328, "blob_id": "133d028a858d790f92a57aa7dfb530627d3b78dd", "content_id": "f464729304d7cf8868f8a63a06e784320cdfdd2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5346, "license_type": "no_license", "max_line_length": 83, "num_lines": 193, "path": "/app.py", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "\"\"\"urlink Flask App\n\n\"\"\"\n\n# builtin\nimport os\n\n# local\nimport models\nimport config\nimport urlhelper\n\n# 3rd party/pip\nimport flask\nimport flask_mail\nimport flask_user\nimport flask_login\nimport flask_script\nimport flask_migrate\nimport sqlalchemy\nimport wtforms\n\n\n# flask app setup\napp = flask.Flask(__name__)\napp.config.from_object(config)\nmigrate = flask_migrate.Migrate(app, models.db)\n\nmanager = flask_migrate.Manager(app)\nmanager.add_command('db', flask_migrate.MigrateCommand)\n\nmodels.db.init_app(app) # ???\n# flask user\nmail = flask_mail.Mail(app)\ndb_adapter = flask_user.SQLAlchemyAdapter(models.db, models.User)\nuser_manager = flask_user.UserManager(db_adapter, app)\n\n\nclass AddUrlForm(wtforms.Form):\n \"\"\"Validation and fields for the form/page which allows a user\n to save/add a URL/link.\n\n \"\"\"\n\n url = wtforms.StringField(\n 'url',\n [wtforms.validators.URL(require_tld=True),],\n render_kw={\n \"placeholder\": \"URL/Link\",\n \"class\": \"form-control input-lg\",\n \"id\": \"url\",\n \"autofocus\": True\n },\n )\n description = wtforms.TextAreaField(\n 'description',\n [wtforms.validators.Length(max=140),],\n render_kw={\n \"placeholder\": \"Description/about URL\",\n \"class\": \"form-control input-lg\",\n \"id\": \"description\",\n \"maxlength\": 140,\n },\n )\n\n\nclass SearchForm(wtforms.Form):\n \"\"\"For live searching/filtering the bookmarks.\n\n Uses the /autocomplete endpoint (see: autocomplete()).\n\n \"\"\"\n\n autocomp = wtforms.TextField('autocomp', id='autocomplete')\n\n\n# TODO: newest first.\[email protected]('/')\ndef home_page():\n \"\"\"Rendered Jinja/HTML page for live-searching bookmarks.\n\n Form on this page can use normal form submission, however,\n this page includes jQuery which implements the live-searching\n feature, it updates the page with values from `/autocomplete`,\n i.e., autocomplete().\n\n If the user isn't logged in, they are redirected to the about page.\n\n \"\"\"\n\n if flask_login.current_user.is_authenticated:\n # this form doesn't need validating\n search_form = SearchForm(flask.request.form)\n\n\n # if we have at least search term, the user has GET'd search form\n search_term = flask.request.args.get('term')\n search_type = flask.request.args.get('type')\n if search_term:\n urls = url_search(search_term, search_type=search_type)\n else:\n urls = models.Url.query.filter_by(\n user=flask_login.current_user\n ).all()\n\n content_types = set([url.content_type for url in urls if url.content_type])\n return flask.render_template(\n \"ur_links.html\",\n search_form=search_form,\n urls=urls,\n options=content_types,\n )\n else:\n return flask.render_template(\"landing.html\")\n\n\ndef url_search(search_term, search_type=None):\n\n if search_type:\n search_results = models.Url.query.filter(\n models.Url.user_id == flask_login.current_user.id,\n sqlalchemy.or_(\n models.Url.url.ilike(\"%\" + search_term + \"%\"),\n models.Url.description.ilike(\"%\" + search_term + \"%\"),\n ),\n models.Url.content_type == search_type,\n )\n else:\n search_results = models.Url.query.filter(\n models.Url.user_id == flask_login.current_user.id,\n sqlalchemy.or_(\n models.Url.url.ilike(\"%\" + search_term + \"%\"),\n models.Url.description.ilike(\"%\" + search_term + \"%\"),\n ),\n )\n\n return search_results\n\n\[email protected]('/autocomplete', methods=['GET'])\n@flask_user.login_required\ndef autocomplete():\n \"\"\"Provides JSON response of URLs where\n the search term is in the description.\n\n Query for URLs owned by the current user, whose descriptions\n in the database contain `term`.\n\n Returns:\n json: A list of dictionaries describing each\n matching URL.\n\n \"\"\"\n\n search_term = flask.request.args.get('term')\n search_type = flask.request.args.get('type')\n urls = url_search(search_term, search_type=search_type)\n urls = [url.to_dict() for url in urls]\n return flask.jsonify(urls)\n\n\[email protected]('/urls/add', methods=['POST', 'GET'])\n@flask_user.login_required\ndef add_url():\n \"\"\"Process and provide the form for adding a new URL to the\n current user's urls.\n\n \"\"\"\n\n form = AddUrlForm(flask.request.form)\n\n # Either process the form from POST or show the form.\n if flask.request.method == 'POST' and form.validate():\n # There's no reason to prevent the URL from being created\n # using the POST'd information. Create and show the URL.\n url = flask.request.form['url']\n searchable_data = urlhelper.fetch_searchable_data(url)\n new_url = models.Url(\n user_id=flask_login.current_user.id,\n url=url,\n description=flask.request.form['description'],\n **searchable_data,\n )\n models.db.session.add(new_url)\n models.db.session.commit()\n return flask.redirect(flask.url_for('home_page'))\n else:\n return flask.render_template(\"add_url.html\", form=form)\n\n\n# Create the database\nif __name__=='__main__':\n manager.run()\n" }, { "alpha_fraction": 0.7865168452262878, "alphanum_fraction": 0.7902621626853943, "avg_line_length": 37.14285659790039, "blob_id": "5abc28aabfd8e4583c10a6b70238aaca2d08e86d", "content_id": "09d420d24a9d7c4b01453c3c85ebad67cda2745a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 267, "license_type": "no_license", "max_line_length": 53, "num_lines": 7, "path": "/requirements.txt", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "# This is the file Heroku installs. I wish I could\n# call it something like requirements_heroku.txt, but\n# Heroku specifically looks for requirements.txt in\n# root of directory. I may change this upon further\n# investigation.\n-r requirements_no_postgres.txt\npsycopg2\n" }, { "alpha_fraction": 0.5976027250289917, "alphanum_fraction": 0.6695205569267273, "avg_line_length": 21.461538314819336, "blob_id": "dd70fe307845b32be43c2400612f97190dc3567a", "content_id": "6866070e78d766152ef3b66b3c410e180c69bd6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/migrations/versions/a77719286100_.py", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: a77719286100\nRevises: ae0cb4fef303\nCreate Date: 2016-10-03 13:03:02.448316\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'a77719286100'\ndown_revision = 'ae0cb4fef303'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('urls', sa.Column('title', sa.String(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('urls', 'title')\n ### end Alembic commands ###\n" }, { "alpha_fraction": 0.7216783165931702, "alphanum_fraction": 0.7230769395828247, "avg_line_length": 27.579999923706055, "blob_id": "2fbb5084931944b7a74005841f1f82a2349b26ea", "content_id": "9125024627577977cce00798c672f053a6765fda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1430, "license_type": "no_license", "max_line_length": 89, "num_lines": 50, "path": "/README.md", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "# urlink\n\nSimple-as-possible non-social bookmark service, including live search\nand elegant frontend.\n\n[Use urlink LIVE on urlink.link!](http://urlink.link)\n\nI've also included a Chrome Extension for urlink (`/chrome_extension`).\n\n![urlink screen recording](https://github.com/lily-seabreeze/urlink/blob/master/demo.gif)\n\nThis is an early work in progress (call it \"alpha\").\n\n## The tools\n\n * Python 3\n * `flask`, `flask_user`, `flask_login`, `flask_sqlalchemy`,\n `flask_migrate`\n * Bootstrap\n * jQuery\n\n## Quickstart\n\n```\nmkvirtualenv urlink -p python3\npip install -r requirements_no_postgres.txt\nexport SECRET_KEY=\"asfdsfasdfasdffsdsdfa\"\npython app.py db init\npython app.py db upgrade\ngunicorn app:app\n```\n## Environmental Variables\n\nThis application is configured through environmental variables.\n\nRequired (everything else is optional):\n\n * `SECRET_KEY` (you must override)\n * `SQLALCHEMY_DATABASE_URI` (optional; defaults to sqlite)\n\nYou will want to look at `flask-mail` if you want to provide registration\nemail verification, as seen below:\n\n * `MAIL_USERNAME` (you always need to set this!)\n * `MAIL_PASSWORD` (you always need to set this!)\n * `MAIL_DEFAULT_SENDER` (don't set if you're using gmail)\n * `MAIL_SERVER` (don't set if you're using gmail)\n * `MAIL_PORT` (don't set if you're using gmail)\n * `MAIL_USE_SSL` (don't set if you're using gmail)\n * `MAIL_USE_TLS` (don't set if you're using gmail)\n\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 19, "blob_id": "70238d22b9c3506e6906a78ee9ed2b0faad4f2ba", "content_id": "be2fe7a05296b4c4596954e863fa3d522ed53f9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 40, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/requirements_dev.txt", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "-r requirements_no_postgres.txt\npy.test\n" }, { "alpha_fraction": 0.7446808218955994, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 93, "blob_id": "d70b552f31a344010bc50c5c86efceb926d66513", "content_id": "740b0532e007850e5a9b69c7d95fdc36a462e03d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 94, "license_type": "no_license", "max_line_length": 93, "num_lines": 1, "path": "/CONTRIBUTING.md", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "if you change the DB run `python app.py db migrate` (and possibly `python app.py db upgrade`)\n" }, { "alpha_fraction": 0.628670871257782, "alphanum_fraction": 0.6375897526741028, "avg_line_length": 29.85234832763672, "blob_id": "6fdd470302944051a8207bcadcabbdc6a920c248", "content_id": "a39ec36ff8de1b495cb54e130978e9864e7fe85c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4597, "license_type": "no_license", "max_line_length": 86, "num_lines": 149, "path": "/urlhelper.py", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "\"\"\"Get as much info as possible about a URL.\n\n\"\"\"\n\nimport mimetypes\n\nimport requests\nimport bs4\n\nMAXIMUM_REDIRECTS = 4\nFIELDS = [\n {\n 'name': 'title',\n 'soup_find': ('title', {}),\n },\n]\n\n\n_session = requests.Session()\n\n\nclass MaxRedirectError(Exception):\n\n def __init__(self):\n self.message = (\n \"Head request redirected %d times (max is %d)\"\n % (MAXIMUM_REDIRECTS + 1, MAXIMUM_REDIRECTS)\n )\n\n\nclass HttpError(Exception):\n\n def __init__(self, status_code):\n self.status_code = status_code\n self.message = \"Encountered HTTP error %d\" % status_code\n\n\ndef head_until_no_redirect(url, maximum_redirects=MAXIMUM_REDIRECTS):\n \"\"\"Keep fetching the redirect URL until 200 (not 301) or fail.\n\n Return:\n url, Response:\n None:\n\n \"\"\"\n\n if maximum_redirects:\n response = _session.head(url)\n\n if response.status_code == 301:\n maximum_redirects -= 1\n return head_until_no_redirect(\n response.headers['Location'],\n maximum_redirects\n )\n elif response.status_code == 200:\n return url, response\n else:\n raise HttpError(response.status_code)\n # maximum redirects is 0; we recursively reached the end\n else:\n raise MaxRedirectError()\n\n\ndef searchable_data_from_soup(soup):\n tags_to_return = {}\n for field in FIELDS:\n arg, kwargs = field['soup_find']\n found_tag = soup.find(arg, **kwargs)\n if found_tag:\n tags_to_return[field['name']] = found_tag.text\n\n return tags_to_return\n\n\n# TODO: this docstring sucks, also clean all of this up\ndef fetch_searchable_data(url):\n \"\"\"Fetch the title and meta tags of a remote\n HTML document, or fail and return None.\n\n May be expanded in the future.\n\n Note:\n does note check file extension for mimetype first, becuase more\n searchable data is hoped for than simply content_type\n\n Arguments:\n url (str): ---\n\n Returns:\n dict: Dictionary of searchable data...\n\n \"\"\"\n\n searchable_data = {}\n\n # Try to get the HTTP header for this resource. This may fail\n # so as a last-ditch effort try to get a type from the URL's\n # file extension.\n\n # first try file extension, if can't tell type then determine with head...\n # once you can get first x bytes for <head> info (meta, title, etc).\n try:\n # note that the new url is the final url we were directed to\n url, head_response = head_until_no_redirect(url)\n except (HttpError, MaxRedirectError) as e:\n # we can at least try to guess the mimetype from file extension\n mimetype = mimetypes.guess_type(url)\n return {\"content_type\": mimetype[0]} if mimetype else None\n\n # Determine resource's type from the 'Content-Type' HTTP header.\n headers_from_url = head_response.headers\n content_type = headers_from_url['Content-Type'].split(';', 1)[0]\n\n # TODO: should be able to handle extrapolating meta\n # from images, PDFs, music, etc.\n #\n # Bail if we can't extrapolate any further information\n # about this Content-Type (because beyond here we are just\n # extrapolating HTML information).\n if content_type != \"text/html\":\n return {\"content_type\": content_type}\n\n # ...now we know the content_type is text/html!\n searchable_data['content_type'] = \"text/html\"\n\n # First try to only request the first 400 bytes to get all of the\n # desired tags (which will be used to create searchable data).\n #\n # If this fails we request bytes 401 onward and combine,\n # extrapolating what we can\n response = _session.get(url, headers={'Range': 'bytes=0-400'})\n soup = bs4.BeautifulSoup(response.text, 'html.parser')\n more_searchable_data = searchable_data_from_soup(soup)\n\n # we couldn't find all of the tags we wanted in\n # the first 400 bytes of the response\n if not len(more_searchable_data) == len(FIELDS):\n # Store the old response text so we can skip getting it again\n old_response_text = response.text\n # Get the full page, but skip the part we already have (skip the\n # first 400 bytes), combining this new part with\n # the old_response_text!\n # FIXME: could be stream of data! Set an upper limit on bytes range!\n new_response = _session.get(url, headers={'Range': 'bytes=401-'})\n soup = bs4.BeautifulSoup(old_response_text + new_response.text, 'html.parser')\n\n searchable_data.update(searchable_data_from_soup(soup))\n return searchable_data\n" }, { "alpha_fraction": 0.8641975522041321, "alphanum_fraction": 0.8765432238578796, "avg_line_length": 10.571428298950195, "blob_id": "9770715c09d97eaed521610ff18293757cef0650", "content_id": "e984b29a88d26de5662bbdd600b2b68e21988733", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 81, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/requirements_no_postgres.txt", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "flask\nflask-migrate\nflask_sqlalchemy\nflask_user\ngunicorn\nrequests\nbeautifulsoup4\n" }, { "alpha_fraction": 0.721112072467804, "alphanum_fraction": 0.721112072467804, "avg_line_length": 26.404762268066406, "blob_id": "90c963173767dcedf1ea73fe8fd764fd7bfd2886", "content_id": "e8f7dffe86b48ae2c5dab10eafd4111989acad52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1151, "license_type": "no_license", "max_line_length": 68, "num_lines": 42, "path": "/config.py", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "\"\"\"Really sloppy configuration that will be overhauled\nto include environment-specific configs (develop, test, production).\n\nMostly due to a Heroku headache.\n\n\"\"\"\n\nimport os\n\n\nDEBUG = False\nTESTING = False\n\nSECRET_KEY = os.getenv('SECRET_KEY')\nSQLALCHEMY_DATABASE_URI = os.getenv(\n 'SQLALCHEMY_DATABASE_URI', # if not this... then below\n os.getenv('DATABASE_URL', 'sqlite:////tmp/debug.db'), # heroku\n)\n\n# flask-user\nUSER_ENABLE_USERNAME = False\nUSER_ENABLE_CHANGE_USERNAME = False\n# flask-mail settings for flask-user\n# (email confirmation, password reset)\n# setup for gmail by default\n# NOTE, FIXME, TODO: the only reason this is false\n# is because gmail isn't allowing me use their smtp\n# anymore!\nUSER_ENABLE_CONFIRM_EMAIL = False\n# this email stuff is all moot because of above note\n# will renable once have smtp service\n\"\"\"\nMAIL_USERNAME = os.getenv('MAIL_USERNAME') # [email protected]\nMAIL_PASSWORD = os.getenv('MAIL_PASSWORD')\nMAIL_DEFAULT_SENDER = '\"urlink\" <[email protected]>'\nMAIL_SERVER = os.getenv('MAIL_SERVER')\nMAIL_PORT = int(os.getenv('MAIL_PORT'))\nMAIL_USE_SSL = True\nMAIL_USE_TLS = True\n\"\"\"\n# this is used by email:\nUSER_APP_NAME = 'urlink'\n" }, { "alpha_fraction": 0.6788991093635559, "alphanum_fraction": 0.6788991093635559, "avg_line_length": 26.25, "blob_id": "64bc52e7e970ea493e9d3de2271a6ade72c17099", "content_id": "955438194ed6c484bdb33b0f69696323dfd7619c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 109, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/chrome_extension/popup.js", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "chrome.tabs.getSelected(null, function(tab) {\n d = document;\n d.getElementById(\"url\").value = tab.url;\n});\n" }, { "alpha_fraction": 0.5670840740203857, "alphanum_fraction": 0.5670840740203857, "avg_line_length": 17.032258987426758, "blob_id": "c2e8b3fc5d302904a538803d075556c1e5aad74d", "content_id": "e4252a4a530e52d8ec2576559653606c198f251c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 71, "num_lines": 31, "path": "/tests.py", "repo_name": "kawa-kokosowa/urlink", "src_encoding": "UTF-8", "text": "import unittest\nimport os\n\nimport tempfile\n\nimport app\n\n\nclass UrlinkTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Deploy the test DB (sqlite).\n\n \"\"\"\n\n self.db_handle, app.app.config['DATABASE'] = tempfile.mkstemp()\n self.app = app.app.test_client()\n with app.app.app_context():\n app.init_db() # nope\n\n def tearDown(self):\n \"\"\"Delete the test DB (sqlite).\n\n \"\"\"\n\n os.close(self.db_fd)\n os.unlink(app.app.config['DATABASE'])\n\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
12
nakulrathore/Machine-Learning-Projects-ud120
https://github.com/nakulrathore/Machine-Learning-Projects-ud120
3efca419df33cd6e8cf18a66211fd5558a9fa65d
d08a01cd4aae47f958c3ad5a32da712641a5e723
f6a41b8b1a1451e6dd1068b701c07b281ed92c2d
refs/heads/master
"2021-01-21T16:32:13.011348"
"2016-03-27T19:55:00"
"2016-03-27T19:55:00"
54,571,668
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.614984393119812, "alphanum_fraction": 0.6389178037643433, "avg_line_length": 32.39285659790039, "blob_id": "21ad52b6c2b0d382f3a28697ad97297042a226d4", "content_id": "f3fc3f5ce065f0d5f421e4d41b2ae21673b5ac98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 81, "num_lines": 28, "path": "/outliers/outlier_cleaner.py", "repo_name": "nakulrathore/Machine-Learning-Projects-ud120", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n\r\n\r\ndef outlierCleaner(predictions, ages, net_worths):\r\n \"\"\"\r\n clean away the 10% of points that have the largest\r\n residual errors (different between the prediction\r\n and the actual net worth)\r\n\r\n return a list of tuples named cleaned_data where \r\n each tuple is of the form (age, net_worth, error)\r\n \"\"\"\r\n \r\n ### your code goes here\r\n errors = abs(predictions - net_worths)\r\n #print predictions[0][0]\r\n #print net_worths[0][0] \r\n #print errors[0][0]\r\n #using zip\r\n not_cleaned_data = zip(ages,net_worths,errors)\r\n #print cleaned_data\r\n #sorting ,ref: http://stackoverflow.com/questions/13669252/what-is-key-lambda\r\n not_cleaned_data.sort(key=lambda tup: tup[2])\r\n #print not_cleaned_data\r\n #keeping only 90% data means, 0.9*lenth of net_worths\r\n cleaned_data = not_cleaned_data[:int(len(net_worths)*0.9)]\r\n #print cleaned_data\r\n return cleaned_data" }, { "alpha_fraction": 0.5726443529129028, "alphanum_fraction": 0.59452885389328, "avg_line_length": 30.294116973876953, "blob_id": "35cb3aaf9125b8d76f4a0a19a12f837c5d69e72c", "content_id": "c80059250befc2cae90e9e9e763e6d64fec9bdb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1645, "license_type": "no_license", "max_line_length": 144, "num_lines": 51, "path": "/outliers/enron_outliers.py", "repo_name": "nakulrathore/Machine-Learning-Projects-ud120", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n\r\nimport pickle\r\nimport sys\r\nimport matplotlib.pyplot\r\nsys.path.append(\"../tools/\")\r\nfrom feature_format import featureFormat, targetFeatureSplit\r\n\r\n\r\n### read in data dictionary, convert to numpy array\r\ndata_dict = pickle.load( open(\"../final_project/final_project_dataset.pkl\", \"r\") )\r\nfeatures = [\"salary\", \"bonus\"]\r\ndata_dict.pop('TOTAL',0)\r\ndata = featureFormat(data_dict, features)\r\n#print data[0]\r\n\r\n\r\n### your code below\r\n\r\nfor point in data:\r\n salary = point[0]\r\n bonus = point[1]\r\n matplotlib.pyplot.scatter( salary, bonus )\r\n\r\nmatplotlib.pyplot.xlabel(\"salary\")\r\nmatplotlib.pyplot.ylabel(\"bonus\")\r\nmatplotlib.pyplot.show()\r\nlen_data = len(data_dict)\r\n#print data_dict.items()[0][1]['salary']\r\nprint \"__________________________________________________\\n\\n\"\r\nprint \"biggest enron outlier, , comment line:13 for this\"\r\ntemp = 0\r\nname = \"\"\r\nfor i in range(0, len_data):\r\n if data_dict.items()[i][1]['bonus'] > temp and data_dict.items()[i][1]['bonus'] != 'NaN':\r\n temp = data_dict.items()[i][1]['bonus']\r\n name = data_dict.items()[i][0]\r\nprint temp\r\nprint \"biggest enron outlier is :\", name,\"\\n\"\r\n\r\n\r\nprint \"__________________________________________________\\n\\n\"\r\nprint \"more enron outliers, , un-comment line:13 for this\\n\"\r\n\r\nprint \"serching for some other outliers in data....\\n\"\r\nfor i in range(0, len_data):\r\n if data_dict.items()[i][1]['bonus'] > 5000000 and data_dict.items()[i][1]['salary'] > 1000000 and data_dict.items()[i][1]['bonus'] != 'NaN':\r\n temp = data_dict.items()[i][1]['bonus']\r\n name = data_dict.items()[i][0]\r\n print temp\r\n print name,\"\\n\"" }, { "alpha_fraction": 0.6507628560066223, "alphanum_fraction": 0.6576976180076599, "avg_line_length": 32.98058319091797, "blob_id": "6cc5b288fa946691d358c97bb414c9290f43f4bf", "content_id": "ecb37a5feda247de78a2e9eaa9408eac33aae661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3605, "license_type": "no_license", "max_line_length": 123, "num_lines": 103, "path": "/regression/finance_regression.py", "repo_name": "nakulrathore/Machine-Learning-Projects-ud120", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\r\n\r\n\"\"\"\r\n Starter code for the regression mini-project.\r\n \r\n Loads up/formats a modified version of the dataset\r\n (why modified? we've removed some trouble points\r\n that you'll find yourself in the outliers mini-project).\r\n\r\n Draws a little scatterplot of the training/testing data\r\n\r\n You fill in the regression code where indicated:\r\n\"\"\" \r\n\r\n\r\nimport sys\r\nimport pickle\r\nsys.path.append(\"../tools/\")\r\nfrom feature_format import featureFormat, targetFeatureSplit\r\ndictionary = pickle.load( open(\"../final_project/final_project_dataset_modified.pkl\", \"r\") )\r\n\r\n### list the features you want to look at--first item in the \r\n### list will be the \"target\" feature\r\nfeatures_list = [\"bonus\", \"salary\"]\r\n#features_list = [\"bonus\", \"long_term_incentive\"]\r\ndata = featureFormat( dictionary, features_list, remove_any_zeroes=True)\r\ntarget, features = targetFeatureSplit( data )\r\n\r\n### training-testing split needed in regression, just like classification\r\nfrom sklearn.cross_validation import train_test_split\r\nfeature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)\r\ntrain_color = \"b\"\r\ntest_color = \"r\"\r\n\r\n\r\n\r\n### Your regression goes here!\r\n### Please name it reg, so that the plotting code below picks it up and \r\n### plots it correctly. Don't forget to change the test_color above from \"b\" to\r\n### \"r\" to differentiate training points from test points.\r\nprint \"\"\r\n\r\nprint \"__________________________________________________\\n\\n\"\r\nprint \"Etracting Slope And Intercept\"\r\nfrom sklearn.linear_model import LinearRegression\r\nreg = LinearRegression()\r\nreg.fit(feature_train, target_train)\r\nslope = reg.coef_\r\nprint \"Slope is : \",slope[0]\r\nicept = reg.intercept_\r\nprint \"Intercept is : \",icept\r\n\r\nprint \"__________________________________________________\\n\\n\"\r\nprint \"Regression Score : Training Data\"\r\ntraining_score = reg.score(feature_train,target_train)\r\nprint \"Regression Score on Training Data : \",training_score\r\n\r\nprint \"__________________________________________________\\n\\n\"\r\nprint \"Regression Score : Test Data\"\r\ntest_score = reg.score(feature_test,target_test)\r\nprint \"Regression Score on Test Data : \",test_score\r\n\r\nprint \"__________________________________________________\\n\\n\"\r\nprint \"Regressing Bonus against LTI(long_term_incentive)\"\r\nprint \"using __ features_list = ['bonus', 'long_term_incentive'] __\"\r\nprint \"Regression Score on Test Data : -0.59271289995\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n### draw the scatterplot, with color-coded training and testing points\r\nimport matplotlib.pyplot as plt\r\nfor feature, target in zip(feature_test, target_test):\r\n plt.scatter( feature, target, color=test_color ) \r\nfor feature, target in zip(feature_train, target_train):\r\n plt.scatter( feature, target, color=train_color ) \r\n\r\n### labels for the legend\r\nplt.scatter(feature_test[0], target_test[0], color=test_color, label=\"test\")\r\nplt.scatter(feature_test[0], target_test[0], color=train_color, label=\"train\")\r\n\r\n\r\n\r\n\r\n### draw the regression line, once it's coded\r\ntry:\r\n plt.plot( feature_test, reg.predict(feature_test) )\r\nexcept NameError:\r\n pass\r\n#reg.fit(feature_test, target_test)\r\n#plt.plot(feature_train, reg.predict(feature_train), color=\"b\") \r\nplt.xlabel(features_list[1])\r\nplt.ylabel(features_list[0])\r\nplt.legend()\r\nplt.show()\r\n\r\nprint \"__________________________________________________\\n\"\r\nprint \"##for get this correct , uncomment last two comments\\n##(right before plt.xlabel(features_list[1]):)\"\r\nprint \"sneak peek\"\r\nslope = reg.coef_\r\nprint \"Sneak peek, Slope is : \",slope[0]\r\n\r\n" }, { "alpha_fraction": 0.7692307829856873, "alphanum_fraction": 0.8153846263885498, "avg_line_length": 31.5, "blob_id": "4d48374ba983493bc33076c9de0287d1b39ea10e", "content_id": "49a7d5d36b163e5512c0b4497f2a012e49ff1676", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 65, "license_type": "no_license", "max_line_length": 47, "num_lines": 2, "path": "/README.md", "repo_name": "nakulrathore/Machine-Learning-Projects-ud120", "src_encoding": "UTF-8", "text": "# ud120-projects\nIntro to Machine Learning , Pattern Recognition\n" } ]
4
Shally1130/CS7641-assignment3
https://github.com/Shally1130/CS7641-assignment3
ca11243ab2c19907bfaf5733b211ecd23378d093
a3b72a808de3465dd2e72e887de028c45800c4d8
252575ae85fb3c1bd754c6812c63ab8339a0c47b
refs/heads/master
"2020-04-04T15:18:24.839408"
"2018-11-03T23:33:19"
"2018-11-03T23:33:19"
156,032,615
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6026653051376343, "alphanum_fraction": 0.6268988251686096, "avg_line_length": 35.80583953857422, "blob_id": "c7ba3e22fc0bf84dc49ce331164671667476a544", "content_id": "ecdf35d505c37d41be7a9bcb81ebae652720b934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25213, "license_type": "no_license", "max_line_length": 100, "num_lines": 685, "path": "/abalone.py", "repo_name": "Shally1130/CS7641-assignment3", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport datetime as datetime\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.decomposition import PCA\nfrom sklearn.decomposition import FastICA\nfrom sklearn.random_projection import GaussianRandomProjection\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics.cluster import normalized_mutual_info_score\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\n#################################################\n#Data set 1: wine quality data set\n\ndata = pd.read_csv('abalone.csv')\nX = data.iloc[:,:8]\ny = data.iloc[:,8]\nfeatures = list(X.columns.values)\n\nscaler = MinMaxScaler(feature_range=[0,100])\nscaler.fit(X)\nX_norm = pd.DataFrame(scaler.transform(X))\nprint(X_norm)\n\n#################################################\n#K means clustering\n\nrange_n_clusters = [5,10,15,20,25]\n\nfor n_clusters in range_n_clusters:\n # Create a subplot with 1 row and 2 columns\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.set_size_inches(18, 7)\n\n # The 1st subplot is the silhouette plot\n # The silhouette coefficient can range from -1, 1 but in this example all\n # lie within [-0.1, 1]\n ax1.set_xlim([-0.2, 1])\n # The (n_clusters+1)*10 is for inserting blank space between silhouette\n # plots of individual clusters, to demarcate them clearly.\n ax1.set_ylim([0, len(X_norm) + (n_clusters + 1) * 10])\n\n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_norm)\n cluster_labels = clusterer.labels_\n print(\"NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\n # The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n silhouette_avg = silhouette_score(X_norm, cluster_labels)\n print(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\n\n # Compute the silhouette scores for each sample\n sample_silhouette_values = silhouette_samples(X_norm, cluster_labels)\n\n y_lower = 10\n for i in range(n_clusters):\n # Aggregate the silhouette scores for samples belonging to\n # cluster i, and sort them\n ith_cluster_silhouette_values = \\\n sample_silhouette_values[cluster_labels == i]\n\n ith_cluster_silhouette_values.sort()\n\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n\n cmap = cm.get_cmap(\"Spectral\")\n color = cmap(float(i) / n_clusters)\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\n 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.7)\n\n # Label the silhouette plots with their cluster numbers at the middle\n ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n\n # Compute the new y_lower for next plot\n y_lower = y_upper + 10 # 10 for the 0 samples\n\n ax1.set_title(\"The silhouette plot for the various clusters.\")\n ax1.set_xlabel(\"The silhouette coefficient values\")\n ax1.set_ylabel(\"Cluster label\")\n\n # The vertical line for average silhouette score of all the values\n ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\n\n ax1.set_yticks([]) # Clear the yaxis labels / ticks\n ax1.set_xticks([-0.2, -0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n\n # 2nd Plot showing the actual clusters formed\n cmap = cm.get_cmap(\"Spectral\")\n colors = cmap(cluster_labels.astype(float) / n_clusters)\n ax2.scatter( X_norm.iloc[:, 7], X_norm.iloc[:, 4], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\n # Labeling the clusters\n centers = clusterer.cluster_centers_\n\n # Draw white circles at cluster centers\n ax2.scatter(centers[:, 7], centers[:, 4], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\n for i, c in enumerate(centers):\n ax2.scatter( c[7], c[4], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\n ax2.set_title(\"The visualization of the clustered data.\")\n ax2.set_xlabel(\"Feature space for the 1st feature\")\n ax2.set_ylabel(\"Feature space for the 2nd feature\")\n\n plt.suptitle((\"Silhouette analysis for KMeans clustering on sample data \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\n plt.show()\n\n#################################################\n#Expectation Maximization clustering\n\nfor n_clusters in range_n_clusters:\n fig = plt.gcf()\n fig.set_size_inches(7, 7)\n ax = fig.add_subplot(111)\n \n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_norm)\n cluster_labels = clusterer.predict(X_norm)\n print(\"NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\n # 2nd Plot showing the actual clusters formed\n cmap = cm.get_cmap(\"Spectral\")\n colors = cmap(cluster_labels.astype(float) / n_clusters)\n plt.scatter( X_norm.iloc[:, 7], X_norm.iloc[:, 4], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\n # Labeling the clusters\n centers = clusterer.means_\n\n # Draw white circles at cluster centers\n plt.scatter(centers[:, 7], centers[:, 4], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\n for i, c in enumerate(centers):\n ax.scatter( c[7], c[4], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\n ax.set_title(\"The visualization of the clustered data.\")\n ax.set_xlabel(\"Feature space for the 1st feature\")\n ax.set_ylabel(\"Feature space for the 2nd feature\")\n\n plt.suptitle((\"Clusters plot for EM clustering on sample data \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\n plt.show()\n\n#################################################\n#PCA feature transformation\n \npca = PCA(n_components=8, random_state=10)\nX_r = pca.fit(X).transform(X)\nX_pca = X_r\nprint('explained variance ratio (first two components): %s'\n % str(pca.explained_variance_ratio_))\n\nplt.figure()\ncolors = [\"b\",\"g\",\"r\",\"c\",\"m\",\"y\",\"k\"]\nlw = 5\n\nfor color, i in zip(colors, [5,10,15,20,25]):\n plt.scatter(X_r[y == i, 1], X_r[y == i, 2], color=color, alpha=.8, lw=lw, label=i)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('PCA of Abalone dataset')\n\n#################################################\n#ICA feature transformation\n \nica = FastICA(n_components=8, random_state=10)\nX_r = ica.fit(X).transform(X)\nX_ica = X_r\n\nplt.figure()\ncolors = [\"b\",\"g\",\"r\",\"c\",\"m\",\"y\",\"k\"]\nlw = 5\n\nfor color, i in zip(colors, [5,10,15,20,25]):\n plt.scatter(X_r[y == i, 1], X_r[y == i, 2], color=color, alpha=.8, lw=lw, label=i)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('ICA of Abalone dataset')\n\n#################################################\n#Random Projection feature transformation\n\nrca = GaussianRandomProjection(n_components=8, random_state=10)\nX_r = rca.fit_transform(X)\nX_rca = X_r\n\nplt.figure()\ncolors = [\"b\",\"g\",\"r\",\"c\",\"m\",\"y\",\"k\"]\nlw = 5\n\nfor color, i in zip(colors, [5,10,15,20,25]):\n plt.scatter(X_r[y == i, 1], X_r[y == i, 2], color=color, alpha=.8, lw=lw, label=i)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('Random Projection of Abalone dataset')\n\n#################################################\n#Univariate feature selection (K best)\n\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_selection import mutual_info_classif\n\nX_new = SelectKBest(chi2, k=5).fit_transform(X, y)\nX_fs = X_new\n\nplt.figure()\ncolors = [\"b\",\"g\",\"r\",\"c\",\"m\",\"y\",\"k\"]\nlw = 5\n\nfor color, i in zip(colors, [5,10,15,20,25]):\n plt.scatter(X_new[y == i, 1], X_new[y == i, 2], color=color, alpha=.8, lw=lw, label=i)\nplt.legend(loc='best', shadow=False, scatterpoints=1)\nplt.title('Chi square feature selection of Abalone dataset')\nplt.show()\n\n#################################################\n#Rerun clustering on transformed features\n# range_n_clusters = [5,10,15,20,25]\n# # X_test=pd.DataFrame(X_pca)\n# X_test=pd.DataFrame(X_ica)\n# # X_test=pd.DataFrame(X_rca)\n# # X_test=pd.DataFrame(X_fs)\n# for n_clusters in range_n_clusters:\n# fig = plt.gcf()\n# fig.set_size_inches(7, 7)\n# ax = fig.add_subplot(111)\n \n# clusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)\n# cluster_labels = clusterer.labels_\n\n# silhouette_avg = silhouette_score(X_test, cluster_labels)\n# print(\"For n_clusters =\", n_clusters,\n# \"The average silhouette_score is :\", silhouette_avg)\n# print(\"The NMI score is: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n \n# cmap = cm.get_cmap(\"Spectral\") \n# colors = cmap(cluster_labels.astype(float) / n_clusters)\n# ax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n# c=colors, edgecolor='k')\n\n# centers = clusterer.cluster_centers_\n\n# ax.scatter(centers[:, 1], centers[:, 2], marker='o',\n# c=\"white\", alpha=1, s=200, edgecolor='k')\n\n# for i, c in enumerate(centers):\n# ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n# s=50, edgecolor='k')\n\n# ax.set_title(\"The visualization of the clustered data.\")\n# ax.set_xlabel(\"Feature space for the 1st feature\")\n# ax.set_ylabel(\"Feature space for the 2nd feature\")\n\n# plt.suptitle((\"KMeans clustering using ICA feature transformation \"\n# \"with n_clusters = %d\" % n_clusters),\n# fontsize=14, fontweight='bold')\n\n# plt.show()\n \n# for n_clusters in range_n_clusters:\n# fig = plt.gcf()\n# fig.set_size_inches(7, 7)\n# ax = fig.add_subplot(111)\n\n# clusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)\n# cluster_labels = clusterer.predict(X_test)\n# print(\"NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\n# cmap = cm.get_cmap(\"Spectral\") \n# colors = cmap(cluster_labels.astype(float) / n_clusters)\n# plt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n# c=colors, edgecolor='k')\n\n# centers = clusterer.means_\n\n# plt.scatter(centers[:, 1], centers[:, 2], marker='o',\n# c=\"white\", alpha=1, s=200, edgecolor='k')\n\n# for i, c in enumerate(centers):\n# ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n# s=50, edgecolor='k')\n\n# ax.set_title(\"The visualization of the clustered data.\")\n# ax.set_xlabel(\"Feature space for the 1st feature\")\n# ax.set_ylabel(\"Feature space for the 2nd feature\")\n# plt.suptitle((\"Clusters plot for EM clustering on PCA data \"\n# \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight='bold')\n\n# plt.show()\n\n\n#################################################\n# Rerun clustering on transformed features\nrange_n_clusters = [2,4,6,8,10]\nX_test=pd.DataFrame(X_pca)\nn_clusters = 20\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.labels_\n\nsilhouette_avg = silhouette_score(X_test, cluster_labels)\nprint(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\nprint(\"kmeans pca The NMI score is: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.cluster_centers_\n\nax.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\n\nplt.suptitle((\"KMeans clustering using PCA feature transformation \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\nplt.show()\n\n################################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_ica)\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.labels_\n\nsilhouette_avg = silhouette_score(X_test, cluster_labels)\nprint(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\nprint(\"kmeans ica The NMI score is: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.cluster_centers_\n\nax.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\n\nplt.suptitle((\"KMeans clustering using ICA feature transformation \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\nplt.show()\n\n\n# ###################################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_fs)\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.labels_\n\nsilhouette_avg = silhouette_score(X_test, cluster_labels)\nprint(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\nprint(\"kmeans fs The NMI score is: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.cluster_centers_\n\nax.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\n\nplt.suptitle((\"KMeans clustering using feature selection transformation \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\nplt.show()\n\n\n# ###################################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_rca)\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = KMeans(n_clusters=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.labels_\n\nsilhouette_avg = silhouette_score(X_test, cluster_labels)\nprint(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\nprint(\"kmeans rca The NMI score is: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nax.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.cluster_centers_\n\nax.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\n\nplt.suptitle((\"KMeans clustering using RCA transformation \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\nplt.show()\n\n###################################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_rca)\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.predict(X_test)\nprint(\"RCA NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nplt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.means_\n\nplt.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\nplt.suptitle((\"Clusters plot for EM clustering on RCA data \"\n \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight='bold')\n\nplt.show()\n\n##################################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_ica)\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.predict(X_test)\nprint(\"ICA NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nplt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.means_\n\nplt.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\nplt.suptitle((\"Clusters plot for EM clustering on ICA data \"\n \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight='bold')\n\nplt.show()\n\n##################################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_fs)\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.predict(X_test)\nprint(\"FS NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nplt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.means_\n\nplt.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\nplt.suptitle((\"Clusters plot for EM clustering on feature selection data \"\n \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight='bold')\n\nplt.show()\n\n#####################################################\nn_clusters = 20\nX_test=pd.DataFrame(X_pca)\n# for n_clusters in range_n_clusters:\nfig = plt.gcf()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111)\n\nclusterer = GaussianMixture(n_components=n_clusters, random_state=10).fit(X_test)\ncluster_labels = clusterer.predict(X_test)\nprint(\"PCA NMI score: %.6f\" % normalized_mutual_info_score(y, cluster_labels))\n\ncmap = cm.get_cmap(\"Spectral\") \ncolors = cmap(cluster_labels.astype(float) / n_clusters)\nplt.scatter( X_test.iloc[:, 1], X_test.iloc[:, 2], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\ncenters = clusterer.means_\n\nplt.scatter(centers[:, 1], centers[:, 2], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\nfor i, c in enumerate(centers):\n ax.scatter( c[1], c[2], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\nax.set_title(\"The visualization of the clustered data.\")\nax.set_xlabel(\"Feature space for the 1st feature\")\nax.set_ylabel(\"Feature space for the 2nd feature\")\nplt.suptitle((\"Clusters plot for EM clustering on PCA data \"\n \"with n_clusters = %d\" % n_clusters), fontsize=14, fontweight='bold')\n\nplt.show()\n\n#################################################\n#Rerun ANN on transformed features\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import learning_curve\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n time = datetime.datetime.now()\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n print(\"{}: {}\".format(title, datetime.datetime.now() - time))\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n plt.show()\n\nclf = MLPClassifier(hidden_layer_sizes=(8, 8, 8), random_state=0, solver=\"lbfgs\")\n# time = datetime.datetime.now()\nplot_learning_curve(clf, \"MLP using PCA transformed features\", X_pca, y, ylim=[0,1])\n# print(\"MLP using PCA transformed features Performence: {}\".format(datetime.datetime.now() - time))\n# time = datetime.datetime.now()\nplot_learning_curve(clf, \"MLP using ICA transformed features\", X_ica, y, ylim=[0,1])\n# print(\"MLP using ICA transformed features Performence: {}\".format(datetime.datetime.now() - time))\n# time = datetime.datetime.now()\nplot_learning_curve(clf, \"MLP using RCA transformed features\", X_rca, y, ylim=[0,1])\n# print(\"MLP using RCA transformed features Performence: {}\".format(datetime.datetime.now() - time))\n# time = datetime.datetime.now()\nplot_learning_curve(clf, \"MLP using Selected 5 features\", X_fs, y, ylim=[0,1])\n# print(\"MLP using Selected 5 features Performence: {}\".format(datetime.datetime.now() - time))\n\n#################################################\n#Rerun ANN on transformed features with clusters new feature\n\nclf = MLPClassifier(hidden_layer_sizes=(8, 8, 8), random_state=0, solver=\"lbfgs\")\n\nclusterer = KMeans(n_clusters=20, random_state=10).fit(X_pca)\ny_kmeans = clusterer.labels_\nX_df = pd.DataFrame(X_pca)\nX_df[11] = y_kmeans\nplot_learning_curve(clf, \"MLP using PCA transformed features\", X_df, y, ylim=[0,1])\n\nclusterer = KMeans(n_clusters=20, random_state=10).fit(X_ica)\ny_kmeans = clusterer.labels_\nX_df = pd.DataFrame(X_ica)\nX_df[11] = y_kmeans\nplot_learning_curve(clf, \"MLP using ICA transformed features\", X_df, y, ylim=[0,1])\n\nclusterer = KMeans(n_clusters=20, random_state=10).fit(X_rca)\ny_kmeans = clusterer.labels_\nX_df = pd.DataFrame(X_rca)\nX_df[11] = y_kmeans\nplot_learning_curve(clf, \"MLP using RCA transformed features\", X_df, y, ylim=[0,1])\n\nclusterer = KMeans(n_clusters=20, random_state=10).fit(X_fs)\ny_kmeans = clusterer.labels_\nX_df = pd.DataFrame(X_fs)\nX_df[11] = y_kmeans\nplot_learning_curve(clf, \"MLP using selected 5 features\", X_df, y, ylim=[0,1])\n\n" }, { "alpha_fraction": 0.6959999799728394, "alphanum_fraction": 0.7519999742507935, "avg_line_length": 61.5, "blob_id": "47bde34d1c9ccb62882fb5846bd8d50158472b4c", "content_id": "69509bbc83661084ae18d62e7317cfa02deb422e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 250, "license_type": "no_license", "max_line_length": 91, "num_lines": 4, "path": "/README.md", "repo_name": "Shally1130/CS7641-assignment3", "src_encoding": "UTF-8", "text": "# CS7641-assignment3\n1. The environment: python 3.6, scikit-learn 0.18.1\n2. Used libraries: numpy, scipy, scikit-learn, pandas, datetime, pandas, matplotlib.\n3. Download the all file in one folder, simple run \"python wine.py\" or \"python abalone.py\".\n" } ]
2
kamranrafiq/Automate-Boring-Work
https://github.com/kamranrafiq/Automate-Boring-Work
b788df6d8e2e6e9227e399ee07fc2fc9eb331dc5
2f5e5d2276679f8588743155ba6c3be57656b7d8
9796b91d90fe74e9a4b325b6f8dbdca2b0ae50c4
refs/heads/master
"2020-07-15T16:33:50.538144"
"2019-09-03T22:50:57"
"2019-09-03T22:50:57"
205,608,524
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7732558250427246, "alphanum_fraction": 0.7732558250427246, "avg_line_length": 33.400001525878906, "blob_id": "a68db97d10a96acb46ae25ede82e8dbd48521933", "content_id": "d78d8ac205bda6f84381a5686ef5e9ff87651f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 172, "license_type": "no_license", "max_line_length": 106, "num_lines": 5, "path": "/README.md", "repo_name": "kamranrafiq/Automate-Boring-Work", "src_encoding": "UTF-8", "text": "# Automate-Boring-Work\n\nStarted playing with Python! Loving it.\n\nCreating basic scripts for day to day work and trying to automate manual work. More to come... Stay tuned!\n" }, { "alpha_fraction": 0.5698323845863342, "alphanum_fraction": 0.5865921974182129, "avg_line_length": 24.714284896850586, "blob_id": "4a661a4a76a28d1c41468dee19cf4084cc97e6a2", "content_id": "6141efd71feefe27ffacc6acd5db6ff89c8d021d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 64, "num_lines": 7, "path": "/Get_Service_By_Ports.py", "repo_name": "kamranrafiq/Automate-Boring-Work", "src_encoding": "UTF-8", "text": "import socket\nfor x in range(0, 65):\n try:\n ports = socket.getservbyport(x)\n print(\"Port Number \",x, \" runs service \", ports.upper())\n except:\n continue" }, { "alpha_fraction": 0.6749634146690369, "alphanum_fraction": 0.6881405711174011, "avg_line_length": 27.45833396911621, "blob_id": "9fde08a7eaee066c4faea06ffbf01fc9b32c9f2d", "content_id": "e0c46d60cea19d20fd9ba93fc4d00eceb634343b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 683, "license_type": "no_license", "max_line_length": 124, "num_lines": 24, "path": "/Guess_game.py", "repo_name": "kamranrafiq/Automate-Boring-Work", "src_encoding": "UTF-8", "text": "import random\n\nname = input(\"What's your name? \")\nprint(\"Well! \" + name + \" I am thinking of a number between 1 and 20.\\nYou have 6 tries to guess that number.\")\n\nsecret_number = random.randint(1,20)\n\nfor guesstaken in range(1,7):\n\tguess = int(input(\"Take a guess: \"))\n\n\tif guess < secret_number:\n\t\tprint(\"Your guess is low. Try again\")\n\n\telif guess > secret_number:\n\t\tprint(\"Your guess is high. Try again\")\n\n\telse:\n\t\tbreak\n\nif guess == secret_number:\n\tprint(\"Your guess is correct! You guessed the correct number in \" + str(guesstaken) + \" guesses\")\n\nelse:\n\tprint(\"Sorry! You were not able to guess the correct number. The number i was thinking of was \" + str(secret_number) + \".\")\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 35.20000076293945, "blob_id": "64c1a76b59ab6dcb007aa3c9567bc2636f8ecfc6", "content_id": "2583c6c986edb220b9799d4b6ab8eacdd94408b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 67, "num_lines": 5, "path": "/Input_port_to_get_service.py", "repo_name": "kamranrafiq/Automate-Boring-Work", "src_encoding": "UTF-8", "text": "import socket\nport_numbers = input(\"Enter the port number: \")\n\nservice = socket.getservbyport(int(port_numbers))\nprint (\"Port number\",port_numbers,\"runs service:\", service.upper())" } ]
4
rafaelawon/pwnable
https://github.com/rafaelawon/pwnable
0f73b52ce81de5b42f11cf1c13fef1e7d0ca9e0b
0bfaa4805b275a0e8a944f54f873fcf626f0d4e5
c76b7ab6d6787a5ae08ab704f14fc354d56d1a81
refs/heads/master
"2020-07-09T23:05:32.736739"
"2019-08-24T05:36:44"
"2019-08-24T05:36:44"
204,104,302
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7638888955116272, "alphanum_fraction": 0.8055555820465088, "avg_line_length": 71, "blob_id": "7168bd3a950e5f3aff92c90e17d29179974b7220", "content_id": "067555f28e4f455f75c0d5b761ce62ffbaf8bdfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 71, "num_lines": 1, "path": "/tip.md", "repo_name": "rafaelawon/pwnable", "src_encoding": "UTF-8", "text": "how to solve pwntool installation error: https://pwnwiz.tistory.com/284\n" }, { "alpha_fraction": 0.5872340202331543, "alphanum_fraction": 0.651063859462738, "avg_line_length": 18.41666603088379, "blob_id": "44b3378eebb4cc5eaa70eda8dd9b24fa173d118d", "content_id": "eb4aaa8f4cd125b73bb266eeacf5863bf9500d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 61, "num_lines": 12, "path": "/won/input.py", "repo_name": "rafaelawon/pwnable", "src_encoding": "UTF-8", "text": "from pwn import *\n\ncontext.log_level = 'debug'\n\nargvs = [\"\" for i in range(100)]\nargvs[0] = \"./input\"\nargvs[65] = \"\\x00\"\nargvs[66] = \"\\x20\\x0a\\x0d\"\n\ntarget = process(executable='/home/input2/input', arvs=argvs)\n\ntarget.interactive()\n\n\n" } ]
2
JohvanyROB/au422_tp3_IPSA
https://github.com/JohvanyROB/au422_tp3_IPSA
7e7c80b686ef3489114b55179e1aa0c6270d654e
d9cda40fadfc3e77eb85a51bff076fdaa53457a7
a00f150d2c17de576e6daf52471008911ff5c253
refs/heads/main
"2023-03-29T11:52:48.037463"
"2023-03-15T13:48:02"
"2023-03-15T13:48:02"
356,914,214
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5258053541183472, "alphanum_fraction": 0.531347393989563, "avg_line_length": 29.38947296142578, "blob_id": "ada71d1aa7fb388d044a29feba05b7498c56998e", "content_id": "fce5152c38a718c46ae52881916ef8786bbbc57c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2887, "license_type": "permissive", "max_line_length": 97, "num_lines": 95, "path": "/au_422_navigation/scripts/path_planning_rrt.py", "repo_name": "JohvanyROB/au422_tp3_IPSA", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped, Pose2D\nfrom nav_msgs.srv import GetMap\nfrom nav_msgs.msg import Path\nimport tf\n\nfrom sys import exit\nimport cv2\n\n\nclass RRT:\n def __init__(self, K=0, dq=0):\n \"\"\" Attributes \"\"\"\n self.robot_pose = Pose2D()\n self.path = []\n self.listener = tf.TransformListener()\n #TO DO: add your attributes here.... \n\n \"\"\" Publishers and Subscribers \"\"\"\n rospy.Timer(rospy.Duration(secs=0.5), self.poseCb)\n rospy.Subscriber(\"/move_base_simple/goal\", PoseStamped, self.goal_pose_cb)\n self.pathPub = rospy.Publisher(\"/path\", Path, queue_size=1)\n\n \"\"\" Load the map and create the related image\"\"\"\n self.getMap()\n\n\n # **********************************\n def getMap(self):\n \"\"\" Call the static_map service and then get the map \"\"\"\n print(\"Waiting for map service to be available...\")\n rospy.wait_for_service('/static_map')\n try:\n get_map = rospy.ServiceProxy('/static_map', GetMap)\n self.map = get_map().map\n print(\"Map received !\")\n except rospy.ServiceException as e:\n print(f\"Map service call failed: {e}\")\n exit()\n\n \n # **********************************\n def poseCb(self, event):\n \"\"\" Get the current position of the robot each 500ms \"\"\"\n try:\n trans, rot = self.listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.robot_pose.x = trans[0]\n self.robot_pose.y = trans[1]\n print(f\"Robot's pose: {self.robot_pose.x}, {self.robot_pose.y}\")\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n print(f\"Could not transform /base_footprint to /map\")\n\n\n # **********************************\n def goal_pose_cb(self, msg):\n \"\"\" TODO - Get the goal pose \"\"\"\n #get the goal pose here\n #....\n\n self.run()\n\n\n # **********************************\n def run(self):\n \"\"\" TODO - Implement the RRT algorithm \"\"\"\n pass\n\n\n # **********************************\n def publishPath(self):\n \"\"\" Send the computed path so that RVIZ displays it \"\"\"\n \"\"\" TODO - Transform the waypoints from pixels coordinates to meters in the map frame \"\"\"\n msg = Path()\n msg.header.frame_id = \"map\"\n msg.header.stamp = rospy.Time.now()\n path_RVIZ = []\n for pose_img in self.path:\n pose = PoseStamped()\n #pose.pose.position.x = ....\n #pose.pose.position.y = ...\n path_RVIZ.append(pose)\n msg.poses = path_RVIZ\n self.pathPub.publish(msg)\n\n\nif __name__ == '__main__':\n \"\"\" DO NOT TOUCH \"\"\"\n rospy.init_node(\"RRT\", anonymous=True)\n\n rrt = RRT()\n\n rospy.spin()\n" }, { "alpha_fraction": 0.7191780805587769, "alphanum_fraction": 0.75, "avg_line_length": 15.277777671813965, "blob_id": "6d199888f861b42cf034800c70266ff0e4cc741a", "content_id": "a64aaf40462f441da8c0e9cd545d10a6b6a72957", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 292, "license_type": "permissive", "max_line_length": 39, "num_lines": 18, "path": "/au_422_simu/CMakeLists.txt", "repo_name": "JohvanyROB/au422_tp3_IPSA", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.0.2)\nproject(au_422_simu)\n\nfind_package(catkin REQUIRED COMPONENTS\n gazebo_ros\n)\n\ncatkin_package(\n# INCLUDE_DIRS include\n# LIBRARIES au_422_simu\n# CATKIN_DEPENDS gazebo_ros\n# DEPENDS system_lib\n)\n\ninclude_directories(\n# include\n ${catkin_INCLUDE_DIRS}\n)" }, { "alpha_fraction": 0.5935903191566467, "alphanum_fraction": 0.607059895992279, "avg_line_length": 30.202898025512695, "blob_id": "9024e2c37decc6733f510e9f3819a6ba42e89024", "content_id": "fcbdc1d0e8aca3e27c7702af28e2ec02e174f002", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2153, "license_type": "permissive", "max_line_length": 113, "num_lines": 69, "path": "/au_422_navigation/scripts/agent.py", "repo_name": "JohvanyROB/au422_tp3_IPSA", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport rospy\n\nfrom geometry_msgs.msg import Pose2D, Twist\nfrom nav_msgs.msg import Path\nimport tf\n\nclass Agent:\n def __init__(self):\n self.listener = tf.TransformListener()\n\n self.robot_pose = Pose2D()\n self.goal_received, self.reached = False, False\n\n self.path_sub = rospy.Subscriber(\"path\", Path, self.plannerCb, queue_size=1)\n self.vel_pub = rospy.Publisher(\"cmd_vel\", Twist, queue_size=1)\n self.timer_pose = rospy.Timer(rospy.Duration(0.5), self.poseCb)\n self.timer_follower = rospy.Timer(rospy.Duration(0.1), self.moveToGoal)\n\n\n def poseCb(self, event):\n \"\"\" Get the current position of the robot each 500ms \"\"\"\n try:\n trans, rot = self.listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.robot_pose.x = trans[0]\n self.robot_pose.y = trans[1]\n self.robot_pose.theta = tf.transformations.euler_from_quaternion(rot)[2]\n print(f\"Robot's pose: {self.robot_pose.x:.2f}, {self.robot_pose.y:.2f}, {self.robot_pose.theta:.2f}\")\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n print(f\"Could not transform /base_footprint to /map\")\n\n\n def plannerCb(self, msg):\n self.reached, self.goal_received = False, True\n self.path = msg.poses[1:] #remove the robot's pose\n\n\n def moveToGoal(self, event):\n if not self.reached and self.goal_received:\n pass\n #Add your strategy here\n\n \n def send_velocities(self):\n self.linear = self.constrain(self.linear, min=-2.0, max=2.0)\n self.angular = self.constrain(self.angular)\n\n cmd_vel = Twist()\n cmd_vel.linear.x = self.linear\n cmd_vel.angular.z = self.angular\n self.vel_pub.publish(cmd_vel)\n\n\n def constrain(self, val, min=-1.0, max=1.0):\n if val < min:\n return min\n elif val > max:\n return max\n return val\n \n\nif __name__ == \"__main__\":\n rospy.init_node(\"agent_node\", anonymous=True) \n\n node = Agent()\n\n rospy.spin()\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7528409361839294, "avg_line_length": 15.809523582458496, "blob_id": "a939a270516c815df0541ad355a9cbdbbc4bd4f5", "content_id": "2797982b56bedbb6069363a93a9c41efbfaa2869", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 352, "license_type": "permissive", "max_line_length": 49, "num_lines": 21, "path": "/au_422_navigation/CMakeLists.txt", "repo_name": "JohvanyROB/au422_tp3_IPSA", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 3.0.2)\nproject(au_422_navigation)\n\nfind_package(catkin REQUIRED COMPONENTS\n geometry_msgs\n nav_msgs\n rospy\n tf\n)\n\ncatkin_package(\n# INCLUDE_DIRS include\n# LIBRARIES au_422_navigation\n# CATKIN_DEPENDS geometry_msgs nav_msgs rospy tf\n# DEPENDS system_lib\n)\n\ninclude_directories(\n# include\n ${catkin_INCLUDE_DIRS}\n)" }, { "alpha_fraction": 0.7118958830833435, "alphanum_fraction": 0.7230483293533325, "avg_line_length": 25.899999618530273, "blob_id": "2676b82b3f397e1de54571e21be12bd017603436", "content_id": "3f821ce703a132b1e2fb95619c28e0508a97651c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 538, "license_type": "permissive", "max_line_length": 97, "num_lines": 20, "path": "/README.md", "repo_name": "JohvanyROB/au422_tp3_IPSA", "src_encoding": "UTF-8", "text": "# AU422\n\nConnect to [RDS](https://app.theconstructsim.com/#/) with your logins.\n\nGo to \"My rosjects\" and create a new project (Select the ROS **Noetic** distribution) and run it.\n\n\n\nOpen the web shell and follow the instructions **ONE AFTER THE OTHER** :\n\n```bash\nsudo apt update\nsudo apt install ros-noetic-slam-karto ros-noetic-map-server -y\ncd ~/catkin_ws/src && git clone https://github.com/JohvanyROB/AU422.git\ncd ~/catkin_ws && catkin_make && source ~/catkin_ws/devel/setup.bash\n```\n\n\n\nYou can now come back to the project subject.\n" } ]
5
Garlinsk/Password-Locker
https://github.com/Garlinsk/Password-Locker
6991faa787366795a3ff700d0dbb47567c0fab4b
6c22cde511d066ae8ea16f23466e5f0415f12973
59e833e345505d91a80613c05b608e469206c5b0
refs/heads/main
"2023-07-07T08:13:06.323463"
"2021-08-02T20:40:46"
"2021-08-02T20:40:46"
391,172,920
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6166219711303711, "alphanum_fraction": 0.6216487884521484, "avg_line_length": 32.11111068725586, "blob_id": "6d055de5f01ff34e9d81713c8aae8940f7e57536", "content_id": "54e3b5c687857108df0f2860267781672a82402b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2984, "license_type": "permissive", "max_line_length": 89, "num_lines": 90, "path": "/passlock_test.py", "repo_name": "Garlinsk/Password-Locker", "src_encoding": "UTF-8", "text": "import unittest #Importing the unittest module\nfrom passlock import User #Importing the user class\nfrom passlock import Credentials\nimport pyperclip\n\n\nclass TestCredentials(unittest.Testcase):\n \"\"\"\n Test class that defines test cases for the User class.\n\n Args:\n unittest.TestCase: TestCase class that helps in creating test cases\n \"\"\"\n def setUp(self):\n \"\"\"\n Method that runs before each individual test methods run.\n \"\"\"\n self.new_user = User(\"FrankGarlinsk\",\"1234zx\", \"[email protected]\")\n self.new_credential = Credentials('email','FrankGarlinsk','1234zx')\n\n\n def test_init(self):\n \"\"\"\n test class to check if the object has been initialized correctly\n \"\"\"\n def save_details(self):\n \"\"\"\n method to store a new credential to the credentials list\n \"\"\"\n Credentials.credentials_list.append(self)\n self.assertEqual(self.new_user.username,\"FrankGarlinsk\")\n self.assertEqual(self.new_user.password,\"1234zx\")\n self.assertEqual(self.new_user.email,\"[email protected]\")\n\n def test_save_user(self):\n \"\"\"\n test case to test if a new user instance has been saved into the User list\n \"\"\"\n self.new_user.save_user() # saving the new user\n self.assertEqual(len(User.user_list),1) \n\n def test_save_multiple_user(self):\n \"\"\"\n test_save_multiple_user to check if we can save multiple user\n objects to our user_list\n \"\"\"\n self.new_user.save_user()\n test_user = User(\"Test\",\"user\",\"1235zx\",\"[email protected]\") # new user\n test_user.save_user()\n self.assertEqual(len(User.user_list),2) \n\n def test_del_user(self):\n \"\"\"\n test class to test delete user method\n \"\"\"\n self.new_user.delete_user()# Deleting a contact object\n self.assertEqual(len(User.user_list),0) \n\n def test_find_user_by_username(self):\n '''\n test to check if we can find a user by username and display information\n '''\n\n self.new_user.save_user()\n test_user = User(\"Test\",\"user\",\"1235zx\",\"[email protected]\") # new user\n test_user.save_user()\n\n found_user = User.find_by_username(\"FrankGarlinsk\")\n\n self.assertEqual(found_user.email,test_user.email)\n\n def save_credential_test(self):\n \"\"\"\n test case to test if the crential object is saved into the credentials list.\n \"\"\"\n self.new_credential.save_details()\n self.assertEqual(len(Credentials.credentials_list),1)\n \n def test_save_many_accounts(self):\n '''\n test to check if we can save multiple credentials objects to our credentials list\n '''\n self.new_credential.save_details()\n test_credential = Credentials(\"Twitter\",\"mikeycharles\",\"Mfh45hfk\") \n test_credential.save_details()\n self.assertEqual(len(Credentials.credentials_list),2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n " }, { "alpha_fraction": 0.6351515054702759, "alphanum_fraction": 0.6375757455825806, "avg_line_length": 19.649999618530273, "blob_id": "83338c9b79e41f3d399302178aee3bf37d742223", "content_id": "3a6233dd9175316597655c9d878838b83d6b7848", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "permissive", "max_line_length": 76, "num_lines": 40, "path": "/run.py", "repo_name": "Garlinsk/Password-Locker", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3.9\nfrom os import name\nfrom passlock import User\n\ndef create_contact(fname,lname,phone,email):\n '''\n Function to create a new user\n '''\n new_user = User(fname,lname,phone,email)\n return new_user\n\ndef save_users(user):\n '''\n Function to save user\n '''\n user.save_user()\n\ndef del_user(user):\n '''\n Function to delete a user\n '''\n user.delete_user()\n\ndef find_user(name):\n '''\n Function that finds a contact by number and returns the user\n '''\n return name.find_by_username(name)\n\ndef check_existing_users(name):\n '''\n Function that check if a user exists with that name and return a Boolean\n '''\n return User.user_exist(name) \n\ndef display_users():\n '''\n Function that returns all the saved users\n '''\n return User.display_users()" }, { "alpha_fraction": 0.5731402635574341, "alphanum_fraction": 0.5731402635574341, "avg_line_length": 23.390243530273438, "blob_id": "19aa03d7fc30be532d9a2b44f82b1782879f35c9", "content_id": "74b5394234696e213f0f9b2be260cfcafdceee67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2003, "license_type": "permissive", "max_line_length": 82, "num_lines": 82, "path": "/passlock.py", "repo_name": "Garlinsk/Password-Locker", "src_encoding": "UTF-8", "text": "from os import name\nimport pyperclip\n\nclass User:\n\n \"\"\"\n Class that generates new instances of user.\n \"\"\"\n\n user_list = [] # Empty user list\n\n def __init__(self, username, password, email):\n\n # docstring removed for simplicity\n\n self.username = username\n self.password = password\n self.email = email\n\n\n def save_user(self):\n\n \"\"\"\n A method that saves a new user instace into the user list\n \"\"\"\n User.user_list.append(self)\n \n def save_multiple_user(self):\n \"\"\"\n save_multiple_user method is to check if we can save multiple user\n objects to our user_list\n \"\"\"\n self.new_user.save_user()\n\n def delete_user(self):\n '''\n delete_account method deletes a saved account from the list\n '''\n User.user_list.remove(self)\n\nclass Credentials():\n \"\"\"\n Create credentials class to help create new objects of credentials\n \"\"\"\n credentials_list = []\n\n @classmethod\n def find_by_username(cls,user):\n '''\n Method that takes in a name and returns a username that matches that name.\n\n Args:\n user: Username to search for\n Returns :\n Name of person that matches the user.\n '''@classmethod\n def copy_email(cls,user):\n user_found = user.find_by_username(user)\n pyperclip.copy(user_found.email)\n \n\n for user in cls.user_list:\n if user.user_name == user:\n return user \n\n\n def __init__(self,account,userName, password):\n \"\"\"\n method that defines user credentials to be stored\n \"\"\"\n\n def save_details(self):\n \"\"\"\n method to store a new credential to the credentials list\n \"\"\"\n Credentials.credentials_list.append(self) \n\n \n @classmethod\n def copy_email(cls,user):\n user_found = User.find_by_username(name)\n pyperclip.copy(user_found.email) " }, { "alpha_fraction": 0.737021267414093, "alphanum_fraction": 0.7446808218955994, "avg_line_length": 25.659090042114258, "blob_id": "ec39c346fa35b65441cfdcbbe1ae48523d917134", "content_id": "aa2a970abf853889f5519521f18fd6e6951ee3b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1175, "license_type": "permissive", "max_line_length": 289, "num_lines": 44, "path": "/README.md", "repo_name": "Garlinsk/Password-Locker", "src_encoding": "UTF-8", "text": "### PASSWORD-LOCKER\n\n## Author\n\n[Franklin-Kuloba]\n\n## Description\n\nThis project is a python application that manages login and signup credentials of a person for various accounts i.e. username and passwords for each account. It also stores the passwords and generates a unique password for a user if they do not want to generate new passwords by themselves\n\n\n## Setup/Installation Requirements\nThe application requires the following installations to operate.\n\n* Python3.9\n* pyperclip\n* pip\n\n## Cloning \n\n* Use terminal {ctrl+Alt+T} \n* git clone https://github.com/Garlinsk/Password-Locker.git\n* cd Password-Locker\n* based on the text editor you use,atom . or code .\n\n## Run Application\n* To run the app,open the cloned file in the terminal and run the following commands:\n * $ chmod +x interface.py\n $ ./interface.py\n* Run test for the app $Python3 passlock_test.py \n\n## Known Bugs\n* There are no known bugs currently but pull requests are allowed incase you spot a bug.\n\n## Technologies Used\n* Python3.9\n\n## Support and contact details\nAny question or contributions,kindly email me @ [[email protected]]\n\n### License\n\n* MIT License:\n* Copyright (c) 2021 [Franklin-Kuloba] \n " } ]
4
sarobe/VGDLEntityCreator
https://github.com/sarobe/VGDLEntityCreator
d3f48a4e548d41cea92b1847babafa46d315d72b
cd5abe29b8f5f7ad7a7d035d872a11654f9f183a
db3e433ca206b3b56c6bb7c510c3871b2406e994
refs/heads/master
"2020-05-20T00:31:59.742230"
"2013-05-22T12:00:27"
"2013-06-02T00:28:02"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5198326706886292, "alphanum_fraction": 0.5261356234550476, "avg_line_length": 37.8291130065918, "blob_id": "e641ce8dc17f90f4cbab669a9cac7309b4319ed9", "content_id": "9c12bec43aec68b2a5b0faaccc84bab63e4eefff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18404, "license_type": "no_license", "max_line_length": 111, "num_lines": 474, "path": "/vgdl/core.py", "repo_name": "sarobe/VGDLEntityCreator", "src_encoding": "UTF-8", "text": "'''\nVideo game description language -- parser, framework and core game classes.\n\n@author: Tom Schaul\n'''\n\nfrom random import choice\nfrom collections import defaultdict\n\nimport pygame\n\nfrom tools import Node, indentTreeParser\nfrom vgdl.tools import roundedPoints\n\n\nclass VGDLParser(object):\n \"\"\" Parses a string into a Game object. \"\"\" \n verbose = False\n \n @staticmethod \n def playGame(game_str, map_str):\n \"\"\" Parses the game and level map strings, and starts the game. \"\"\"\n g = VGDLParser().parseGame(game_str)\n g.buildLevel(map_str)\n g.startGame()\n \n @staticmethod\n def playSubjectiveGame(game_str, map_str):\n from pybrain.rl.experiments.episodic import EpisodicExperiment\n from vgdl.interfaces import GameTask\n from vgdl.subjective import SubjectiveGame\n from vgdl.agents import InteractiveAgent, UserTiredException\n g = VGDLParser().parseGame(game_str)\n g.buildLevel(map_str) \n senv = SubjectiveGame(g, actionDelay=100, recordingEnabled=True)\n task = GameTask(senv) \n iagent = InteractiveAgent()\n exper = EpisodicExperiment(task, iagent)\n try:\n exper.doEpisodes(1)\n except UserTiredException:\n pass\n \n def parseGame(self, tree):\n \"\"\" Accepts either a string, or a tree. \"\"\"\n if not isinstance(tree, Node):\n tree = indentTreeParser(tree).children[0]\n sclass, args = self._parseArgs(tree.content) \n self.game = sclass(**args) \n for c in tree.children:\n if c.content == \"SpriteSet\":\n self.parseSprites(c.children)\n if c.content == \"InteractionSet\":\n self.parseInteractions(c.children)\n if c.content == \"LevelMapping\":\n self.parseMappings(c.children)\n if c.content == \"TerminationSet\":\n self.parseTerminations(c.children)\n return self.game\n \n def _eval(self, estr):\n \"\"\" Whatever is visible in the global namespace (after importing the ontologies)\n can be used in the VGDL, and is evaluated. \n \"\"\"\n from vgdl.ontology import *\n return eval(estr)\n\n def parseInteractions(self, inodes): \n for inode in inodes:\n if \">\" in inode.content:\n pair, edef = [x.strip() for x in inode.content.split(\">\")]\n eclass, args = self._parseArgs(edef)\n self.game.collision_eff.append(tuple([x.strip() for x in pair.split(\" \") if len(x)>0]\n +[eclass, args]))\n if self.verbose: \n print \"Collision\", pair, \"has effect:\", edef \n \n def parseTerminations(self, tnodes):\n for tn in tnodes:\n sclass, args = self._parseArgs(tn.content)\n if self.verbose:\n print \"Adding:\", sclass, args \n self.game.terminations.append(sclass(**args))\n \n def parseSprites(self, snodes, parentclass=None, parentargs={}, parenttypes=[]):\n for sn in snodes:\n assert \">\" in sn.content\n key, sdef = [x.strip() for x in sn.content.split(\">\")]\n sclass, args = self._parseArgs(sdef, parentclass, parentargs.copy())\n stypes = parenttypes+[key]\n if 'singleton' in args:\n if args['singleton']==True:\n self.game.singletons.append(key)\n args = args.copy()\n del args['singleton']\n \n if len(sn.children) == 0:\n if self.verbose:\n print \"Defining:\", key, sclass, args, stypes \n self.game.sprite_constr[key] = (sclass, args, stypes)\n if key in self.game.sprite_order:\n # last one counts\n self.game.sprite_order.remove(key)\n self.game.sprite_order.append(key)\n else: \n self.parseSprites(sn.children, sclass, args, stypes)\n \n def parseMappings(self, mnodes): \n for mn in mnodes:\n c, val = [x.strip() for x in mn.content.split(\">\")]\n assert len(c) == 1, \"Only single character mappings allowed.\"\n # a char can map to multiple sprites\n keys = [x.strip() for x in val.split(\" \") if len(x)>0]\n if self.verbose: \n print \"Mapping\", c, keys \n self.game.char_mapping[c] = keys\n \n def _parseArgs(self, s, sclass=None, args=None):\n if not args: \n args = {}\n sparts = [x.strip() for x in s.split(\" \") if len(x) > 0]\n if len(sparts) == 0:\n return sclass, args\n if not '=' in sparts[0]:\n sclass = self._eval(sparts[0])\n sparts = sparts[1:]\n for sp in sparts:\n k, val = sp.split(\"=\")\n try:\n args[k] = self._eval(val)\n except:\n args[k] = val\n return sclass, args\n \n \nclass BasicGame(object):\n \"\"\" This regroups all the components of a game's dynamics, after parsing. \"\"\" \n MAX_SPRITES = 10000\n \n default_mapping = {'w': ['wall'],\n 'A': ['avatar'],\n }\n \n block_size = 10\n frame_rate = 20\n \n def __init__(self, **kwargs):\n from vgdl.ontology import Immovable, DARKGRAY, MovingAvatar\n for name, value in kwargs.items():\n if hasattr(self, name):\n self.__dict__[name] = value\n else:\n print \"WARNING: undefined parameter '%s' for game! \"%(name)\n \n # contains mappings to constructor (just a few defaults are known)\n self.sprite_constr = {'wall': (Immovable, {'color': DARKGRAY}, ['wall']),\n 'avatar': (MovingAvatar, {}, ['avatar']),\n }\n # z-level of sprite types (in case of overlap) \n self.sprite_order = ['wall', \n 'avatar',\n ] \n # contains instance lists\n self.sprite_groups = defaultdict(list)\n # which sprite types (abstract or not) are singletons?\n self.singletons = []\n # collision effects (ordered by execution order)\n self.collision_eff = []\n # for reading levels\n self.char_mapping = {}\n # termination criteria\n self.terminations = [Termination()]\n self.num_sprites = 0\n self.kill_list=[] \n self.is_stochastic = False\n \n def buildLevel(self, lstr): \n from vgdl.ontology import stochastic_effects\n lines = [l for l in lstr.split(\"\\n\") if len(l)>0]\n lengths = map(len, lines)\n assert min(lengths)==max(lengths), \"Inconsistent line lengths.\"\n self.width = lengths[0]\n self.height = len(lines)\n assert self.width > 1 and self.height > 1, \"Level too small.\"\n # rescale pixels per block to adapt to the level \n self.block_size = max(2,int(800./max(self.width, self.height)))\n self.screensize = (self.width*self.block_size, self.height*self.block_size)\n \n # create sprites\n for row, l in enumerate(lines):\n for col, c in enumerate(l):\n if c in self.char_mapping:\n pos = (col*self.block_size, row*self.block_size)\n self._createSprite(self.char_mapping[c], pos)\n elif c in self.default_mapping:\n pos = (col*self.block_size, row*self.block_size)\n self._createSprite(self.default_mapping[c], pos)\n self.kill_list=[]\n for _, _, effect, _ in self.collision_eff:\n if effect in stochastic_effects:\n self.is_stochastic = True\n \n # guarantee that avatar is always visible \n self.sprite_order.remove('avatar')\n self.sprite_order.append('avatar') \n \n def emptyBlocks(self):\n alls = [s for s in self]\n res = []\n for col in range(self.width):\n for row in range(self.height):\n r = pygame.Rect((col*self.block_size, row*self.block_size), (self.block_size, self.block_size))\n free = True\n for s in alls:\n if r.colliderect(s.rect):\n free = False\n break\n if free:\n res.append((col*self.block_size, row*self.block_size))\n return res\n \n def randomizeAvatar(self):\n if len(self.getAvatars()) == 0: \n self._createSprite(['avatar'], choice(self.emptyBlocks())) \n \n def _createSprite(self, keys, pos):\n res = []\n for key in keys:\n if self.num_sprites > self.MAX_SPRITES:\n print \"Sprite limit reached.\"\n return\n sclass, args, stypes = self.sprite_constr[key]\n # verify the singleton condition\n anyother = False\n for pk in stypes[::-1]:\n if pk in self.singletons:\n if self.numSprites(pk) > 0:\n anyother = True\n break\n if anyother:\n continue\n s = sclass(pos=pos, size=(self.block_size, self.block_size), **args)\n s.stypes = stypes\n s.name = key\n self.sprite_groups[key].append(s)\n self.num_sprites += 1\n if s.is_stochastic:\n self.is_stochastic = True\n res.append(s)\n return res\n \n def _initScreen(self, size):\n from vgdl.ontology import LIGHTGRAY\n pygame.init() \n self.screen = pygame.display.set_mode(size)\n self.background = pygame.Surface(size)\n self.background.fill(LIGHTGRAY)\n self.screen.blit(self.background, (0,0)) \n \n def __iter__(self):\n \"\"\" Iterator over all sprites \"\"\"\n for key in self.sprite_order:\n if key not in self.sprite_groups:\n # abstract type\n continue\n for s in self.sprite_groups[key]:\n yield s\n \n def numSprites(self, key):\n \"\"\" Abstract sprite groups are computed on demand only \"\"\"\n deleted = len([s for s in self.kill_list if key in s.stypes])\n if key in self.sprite_groups:\n return len(self.sprite_groups[key])-deleted\n else: \n return len([s for s in self if key in s.stypes])-deleted\n \n def getSprites(self, key):\n if key in self.sprite_groups:\n return [s for s in self.sprite_groups[key] if s not in self.kill_list]\n else:\n return [s for s in self if key in s.stypes and s not in self.kill_list]\n \n def getAvatars(self):\n \"\"\" The currently alive avatar(s) \"\"\"\n return [s for s in self if isinstance(s, Avatar) and s not in self.kill_list]\n \n def _clearAll(self, onscreen=True):\n for s in set(self.kill_list):\n if onscreen:\n s._clear(self.screen, self.background, double=True)\n self.sprite_groups[s.name].remove(s)\n if onscreen:\n for s in self:\n s._clear(self.screen, self.background)\n self.kill_list = [] \n \n def _drawAll(self):\n for s in self:\n s._draw(self.screen)\n \n def _updateCollisionDict(self):\n # create a dictionary that maps type pairs to a list of sprite pairs\n self.lastcollisions = defaultdict(list)\n nonstatics = [s for s in self if not s.is_static]\n statics = [s for s in self if s.is_static]\n for i, s1 in enumerate(nonstatics):\n for s2 in (nonstatics+statics)[i+1:]:\n assert s1 != s2\n if s1.rect.colliderect(s2.rect):\n for key1 in s1.stypes:\n for key2 in s2.stypes:\n self.lastcollisions[(key1, key2)].append((s1, s2))\n self.lastcollisions[(key2, key1)].append((s2, s1))\n # detect end-of-screen\n if not pygame.Rect((0,0), self.screensize).contains(s1.rect):\n for key1 in s1.stypes:\n self.lastcollisions[(key1, 'EOS')].append((s1, None))\n \n def _eventHandling(self):\n for g1, g2, effect, args in self.collision_eff:\n for s1, s2 in set(self.lastcollisions[(g1, g2)]):\n # TODO: this is not a bullet-proof way, but seems to work\n if s1 not in self.kill_list:\n effect(s1, s2, self, **args)\n \n def startGame(self): \n self._initScreen(self.screensize)\n clock = pygame.time.Clock()\n self.time = 0\n self.kill_list=[]\n pygame.display.flip()\n ended = False\n win = False\n while not ended:\n clock.tick(self.frame_rate) \n self.time += 1\n self._clearAll() \n # gather events\n pygame.event.pump()\n self.keystate = pygame.key.get_pressed() \n # termination criteria\n for t in self.terminations:\n ended, win = t.isDone(self)\n if ended:\n break \n # update sprites \n for s in self:\n s.update(self) \n # handle collision effects\n self._updateCollisionDict()\n self._eventHandling()\n self._drawAll() \n pygame.display.update(VGDLSprite.dirtyrects)\n VGDLSprite.dirtyrects = []\n \n if win:\n print \"Dude, you're a born winner!\"\n else:\n print \"Dang. Try again...\" \n pygame.time.wait(50) \n \n\nclass VGDLSprite(object):\n \"\"\" Base class for all sprite types. \"\"\"\n \n COLOR_DISC = [20,80,140,200]\n dirtyrects = []\n \n is_static= False\n is_avatar= False\n is_stochastic = False\n color = None\n cooldown = 0 # pause ticks in-between two moves \n speed = None \n mass = 1\n physicstype=None\n shrinkfactor=0\n \n def __init__(self, pos, size=(10,10), color=None, speed=None, cooldown=None, physicstype=None, **kwargs):\n self.rect = pygame.Rect(pos, size)\n self.lastrect = self.rect\n if physicstype is not None:\n self.physicstype = physicstype \n elif self.physicstype is None:\n from vgdl.ontology import GridPhysics\n self.physicstype = GridPhysics\n self.physics = self.physicstype(size)\n if speed is not None:\n self.speed = speed\n if cooldown is not None:\n self.cooldown = cooldown\n if color:\n self.color = color\n elif self.color is None:\n self.color = (choice(self.COLOR_DISC), choice(self.COLOR_DISC), choice(self.COLOR_DISC))\n for name, value in kwargs.items():\n if hasattr(self, name):\n self.__dict__[name] = value\n else:\n print \"WARNING: undefined parameter '%s' for sprite '%s'! \"%(name, self.__class__.__name__)\n # how many timesteps ago was the last move?\n self.lastmove = 0 \n \n def update(self, game):\n \"\"\" The main place where subclasses differ. \"\"\"\n self.lastrect = self.rect\n # no need to redraw if nothing was updated\n self.lastmove += 1\n if not self.is_static:\n self.physics.passiveMovement(self)\n \n def _updatePos(self, orientation, speed=None):\n if speed is None:\n speed = self.speed\n if not(self.cooldown > self.lastmove or abs(orientation[0])+abs(orientation[1])==0):\n self.rect = self.rect.move((orientation[0]*speed, orientation[1]*speed))\n self.lastmove = 0\n \n def _velocity(self):\n \"\"\" Current velocity vector. \"\"\"\n if self.speed is None or self.speed==0 or not hasattr(self, 'orientation'):\n return (0,0)\n else:\n return (self.orientation[0]*self.speed, self.orientation[1]*self.speed)\n \n @property\n def lastdirection(self):\n return (self.rect[0]-self.lastrect[0], self.rect[1]-self.lastrect[1]) \n \n def _draw(self, screen):\n from vgdl.ontology import LIGHTGREEN\n if self.shrinkfactor != 0:\n shrunk = self.rect.inflate(-self.rect.width*self.shrinkfactor, \n -self.rect.height*self.shrinkfactor)\n else:\n shrunk = self.rect\n \n if self.is_avatar:\n rounded = roundedPoints(shrunk)\n pygame.draw.polygon(screen, self.color, rounded)\n pygame.draw.lines(screen, LIGHTGREEN, True, rounded, 2)\n r = self.rect.copy()\n elif not self.is_static:\n rounded = roundedPoints(shrunk)\n pygame.draw.polygon(screen, self.color, rounded)\n r = self.rect.copy()\n else:\n r = screen.fill(self.color, shrunk)\n VGDLSprite.dirtyrects.append(r)\n \n def _clear(self, screen, background, double=False):\n r = screen.blit(background, self.rect, self.rect)\n VGDLSprite.dirtyrects.append(r)\n if double: \n r = screen.blit(background, self.lastrect, self.lastrect)\n VGDLSprite.dirtyrects.append(r) \n\n def __repr__(self):\n return self.name+\" at (%s,%s)\"%(self.rect.left, self.rect.top)\n\n\nclass Avatar(object):\n \"\"\" Abstract superclass of all avatars. \"\"\"\n shrinkfactor=0.15\n \nclass Termination(object):\n \"\"\" Base class for all termination criteria. \"\"\"\n def isDone(self, game):\n \"\"\" returns whether the game is over, with a win/lose flag \"\"\"\n from pygame.locals import K_ESCAPE, QUIT \n if game.keystate[K_ESCAPE] or pygame.event.peek(QUIT):\n return True, False \n else:\n return False, None" }, { "alpha_fraction": 0.8280254602432251, "alphanum_fraction": 0.8407643437385559, "avg_line_length": 78, "blob_id": "532a368084d5f56a34f83f32de13de8df1bd7a07", "content_id": "16dd3de27b941392322111e08d741211c3a778d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 79, "num_lines": 2, "path": "/vgdl/examples/gridphysics/mazes/__init__.py", "repo_name": "sarobe/VGDLEntityCreator", "src_encoding": "UTF-8", "text": "from vgdl.examples.gridphysics.mazes.mazegames import polarmaze_game, maze_game\nfrom vgdl.examples.gridphysics.mazes.simple import maze_level_1, maze_level_2" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 21, "blob_id": "2b15eb4b277ae7e953ec03909d6f2201800fe519", "content_id": "cc0a11b906b14b5c21cde5058dfb34f000d37a02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/entitycreator/__init__.py", "repo_name": "sarobe/VGDLEntityCreator", "src_encoding": "UTF-8", "text": "__author__ = 'Samuel Roberts'\n" }, { "alpha_fraction": 0.6313925981521606, "alphanum_fraction": 0.6449645757675171, "avg_line_length": 26.6358699798584, "blob_id": "a75aeb7c8c7023cbf3a6af05e908739ded3bdaf0", "content_id": "1589d40c514e7e1e1919dde86b20eb0ddb56e40c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5084, "license_type": "no_license", "max_line_length": 107, "num_lines": 184, "path": "/entitycreator/lunarlandertest.py", "repo_name": "sarobe/VGDLEntityCreator", "src_encoding": "UTF-8", "text": "import random\nfrom vgdl.core import VGDLParser\n\n\nUP = (0, -1)\nDOWN = (0, 1)\nLEFT = (-1, 0)\nRIGHT = (1, 0)\n\nBASEDIRS = [UP, LEFT, DOWN, RIGHT]\n\ngravity = 0.5\n\nREPEATS = 1\nACTIONS = 5\n\nended = False\nwin = False\n\n\ndef runLunarLander():\n # import lunar lander\n from vgdl.examples.continuousphysics.lander import lander_game, lander_level\n\n # build the game\n g = VGDLParser().parseGame(lander_game)\n g.buildLevel(lander_level)\n\n # TODO: Determine how to not need to bring up the pygame display in order to run the game.\n g._initScreen([1, 1])\n\n ship = g.getAvatars()[0]\n\n # store initial ship state\n initState = [ship.rect.x, ship.rect.y, ship.speed, ship.orientation]\n\n print \"starting position: \" + str(ship)\n print \"starting state: \" + str(initState)\n # get random actions\n actions = generateInput(ACTIONS)\n\n states = [initState]\n # move ship based on random actions\n print actions\n for a in actions:\n for i in range(REPEATS):\n ship.action = a\n updateGame(g, a)\n if ended:\n print a, i\n break\n states.append(makeState(ship))\n\n endState = states[len(states)-1]\n\n # confirm final position\n print \"first final position after actions: \" + str(ship)\n print \"final state: \" + str(endState)\n\n # reroll ship back to initial state\n setState(ship, initState)\n\n # vary action sequence\n # first pick a point to vary\n random.seed(10466)\n varyIndex = random.randint(0, len(actions) - 1)\n\n # then change that action\n oldAction = actions[varyIndex]\n actions[varyIndex] = BASEDIRS[random.randint(0, len(BASEDIRS) - 1)]\n\n # print out the change and the full list of actions\n print \"changed action \" + str(varyIndex) + \" to \" + str(actions[varyIndex])\n print \"new actions: \" + str(actions)\n\n # predict through simple calculation how the final position should be\n predictState = predictOutcome(states, actions, oldAction, varyIndex)\n print \"predicted state \" + str(predictState)\n\n # find out where the actual final position is\n for a in actions:\n for i in range(REPEATS):\n updateGame(g, a)\n if ended:\n print a, i\n break\n\n endState = makeState(ship)\n print \"actual ending position: \" + str(ship)\n print \"ending state: \" + str(endState)\n\n # get error\n error = [endState[0] - predictState[0], endState[1] - predictState[1]]\n print \"prediction error: \" + str(error)\n\n\ndef predictOutcome(states, actions, oldAction, newActionIndex):\n # determine the effective change in action\n newAction = actions[newActionIndex]\n changedAction = [newAction[0] - oldAction[0], newAction[1] - oldAction[1]]\n\n # try one! adjust position only without compensating for velocity!\n # endState = states[len(states)-1]\n # finalX = endState[0] + (changedAction[0] * REPEATS)\n # finalY = endState[1] + (changedAction[1] * REPEATS)\n # didn't work, unsurprisingly\n\n # try two! compensate for velocity!\n endState = states[len(states)-1]\n\n stateOfChange = states[newActionIndex]\n velocityBeforeChange = [stateOfChange[2] * stateOfChange[3][0], stateOfChange[2] * stateOfChange[3][1]]\n velocityCausedByChange = [changedAction[0] * REPEATS, changedAction[1] * REPEATS]\n changedVelocity = [velocityBeforeChange[0] + velocityCausedByChange[0],\n velocityBeforeChange[1] + velocityCausedByChange[1]]\n\n # compensate for gravity\n changedVelocity[1] += gravity * REPEATS\n\n # calculate the ending impact this has on the position after this action\n diffX = changedVelocity[0]\n diffY = changedVelocity[1]\n\n finalX = endState[0] + diffX\n finalY = endState[1] + diffY\n\n return [finalX, finalY]\n\n\ndef makeState(ship):\n return [ship.rect.x, ship.rect.y, ship.speed, ship.orientation]\n\n\ndef setState(ship, state):\n ship.rect.x = state[0]\n ship.rect.y = state[1]\n ship.speed = state[2]\n ship.orientation = state[3]\n\n\ndef generateInput(totalActions):\n random.seed(1234)\n actions = []\n for i in range(totalActions):\n actions.append(BASEDIRS[random.randint(0, len(BASEDIRS) - 1)])\n\n return actions\n\n\ndef setKeystate(game, action):\n from pygame.locals import K_LEFT, K_RIGHT, K_UP, K_DOWN\n\n # an incredible kludge that makes me somewhat ashamed\n keystate = [False] * 350\n\n if action == RIGHT:\n keystate[K_RIGHT] = True\n elif action == LEFT:\n keystate[K_LEFT] = True\n if action == UP:\n keystate[K_UP] = True\n elif action == DOWN:\n keystate[K_DOWN] = True\n\n return keystate\n\ndef updateGame(game, action):\n game.keystate = setKeystate(game, action)\n setKeystate(game, action)\n # termination criteria\n for t in game.terminations:\n ended, win = t.isDone(game)\n if ended:\n break\n # update sprites\n for s in game:\n s.update(game)\n # handle collision effects\n game._updateCollisionDict()\n game._eventHandling()\n\n\nif __name__ == '__main__':\n runLunarLander()" }, { "alpha_fraction": 0.7388059496879578, "alphanum_fraction": 0.7388059496879578, "avg_line_length": 32.5, "blob_id": "ae5ded88d693459f2095ae8b745b2ffcad13676d", "content_id": "adf6b0897e51da00af5dff0823ef7195f9bcdc9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "no_license", "max_line_length": 96, "num_lines": 4, "path": "/README.md", "repo_name": "sarobe/VGDLEntityCreator", "src_encoding": "UTF-8", "text": "VGDLEntityCreator\n=================\n\nAn investigation into generating self-motivated entities within the context of the VGDL project.\n" } ]
5
yz3007/bigdata
https://github.com/yz3007/bigdata
d2581d45aef1286f77a65f45e6e2082d8010ba24
7f084726bec92ad4c3a17702d8e2111ab35b72ec
7aab9305e15e23f28c9e37cb8efa0515edc78312
refs/heads/master
"2021-01-12T07:48:31.234921"
"2016-12-21T02:53:04"
"2016-12-21T02:53:04"
77,021,822
1
0
null
"2016-12-21T05:49:07"
"2016-12-20T17:03:24"
"2016-12-21T02:53:04"
null
[ { "alpha_fraction": 0.7803203463554382, "alphanum_fraction": 0.7848970293998718, "avg_line_length": 108.125, "blob_id": "89f8105fa8417d6328bae3fea110484600a53c8f", "content_id": "9bcfd298b407d184a8e3a1e2b9473bb09ac32a80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 888, "license_type": "no_license", "max_line_length": 386, "num_lines": 8, "path": "/README.md", "repo_name": "yz3007/bigdata", "src_encoding": "UTF-8", "text": "# bigdata EECSE 6893 \nBasically, the program is a python program. You can download the data from here:https://www.kaggle.com/c/expedia-hotel-recommendations/data. The test data dont contain the hotel_cluster , thus if you want to check the accuracy of the recommendation ,you have to split the train data into two parts. Here, training data is “traindata.csv” ,the test data is “testdata.csv”. When you plan \nto run theprogram you have to put the two data files and the program file in the same directory. After running the program, there will\nbe a file called “result.csv” produced. The result file would looks like:\n\nThe first number represent the user id and the five numbers behind represent the clusters recommended.\nWe also write a program to check the accuracy of the result. Similarly, you have to put it in the same \ndirectory with the data file when you run it. \n" }, { "alpha_fraction": 0.3954451382160187, "alphanum_fraction": 0.43478259444236755, "avg_line_length": 27.41176414489746, "blob_id": "61d8d390db437500458ed6bcd579ef2ca1cd5111", "content_id": "3ed9ebfa3159981446f10b8d333de62588ccf9e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 78, "num_lines": 17, "path": "/accuracy.py", "repo_name": "yz3007/bigdata", "src_encoding": "UTF-8", "text": "with open('result.csv', 'r') as result, open(\"testdata.csv\", 'r') as testdata:\n k = 0.0\n b = 0.0\n result.readline()\n testdata.readline()\n while 1:\n b = b+1\n r1 = result.readline()\n if r1 == '':\n break\n else:\n r = r1.strip().split(\" \")\n t = testdata.readline().strip().split(\",\")\n print t[23],r[1:],t[23] in r[1:]\n if t[23] in r[1:]:\n k = k+1\n print \"Accuracy:\",k/b\n" }, { "alpha_fraction": 0.5361344814300537, "alphanum_fraction": 0.5691176652908325, "avg_line_length": 30.953020095825195, "blob_id": "1fec7fa7debad7ae8d80140d555bcc4fce94673b", "content_id": "25bae11273040c8b8028b98c588231e17c991c50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4760, "license_type": "no_license", "max_line_length": 150, "num_lines": 149, "path": "/recommend.py", "repo_name": "yz3007/bigdata", "src_encoding": "UTF-8", "text": "#usr/bin/python2.7\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\nfrom operator import add\n\nconf = SparkConf().setAppName(\"expedia_hotel\")\nsc = SparkContext(conf=conf)\n\n\narr = sc.textFile(\"./traindata.csv\")\nprint arr.take(2)\narr = arr.map(lambda x:x.split(\",\"))\n\n\ndef get_best_hotels_od_ulc(arr):\n if arr[5] != '' and arr[6] != '':\n return ((arr[5], arr[6],arr[23]),1)\n else:\n return ((arr[5], arr[6],arr[23]),0)\n\ndef get_best_hotels_search_dest(arr):\n if arr[16] != '' and arr[21] != '' and arr[22] != '' and int(arr[0][:4]) == 2014:\n return ((arr[16], arr[21], arr[22], arr[23]), int(arr[18]) * 17 + 3)\n else:\n return ((arr[16], arr[21], arr[22], arr[23]), 0)\n\ndef get_best_hotels_search_dest1(arr):\n if arr[16] != '':\n return ((arr[16], arr[23]) ,int(arr[18]) * 17 + 3)\n else:\n return ((arr[16], arr[23]), 0)\n\ndef get_best_hotel_country(arr):\n if arr[21] != '':\n return ((arr[21], arr[23]), 1 + 5 * int(arr[18]))\n else:\n return ((arr[21], arr[23]), 0)\n\ndef get_popular_hotel_cluster(arr):\n return (arr[23],1)\n\n\nbest_hotels_od_ulc = arr.map(lambda x:get_best_hotels_od_ulc(x))\nbest_hotels_od_ulc = best_hotels_od_ulc.foldByKey(0, add).cache()\n\nbest_hotels_search_dest = arr.map(lambda x:get_best_hotels_search_dest(x))\nbest_hotels_search_dest = best_hotels_search_dest.foldByKey(0, add).cache()\n\n\nbest_hotels_search_dest1 = arr.map(lambda x:get_best_hotels_search_dest1(x))\nbest_hotels_search_dest1 = best_hotels_search_dest1.foldByKey(0, add).cache()\n\nbest_hotel_country = arr.map(lambda x:get_best_hotel_country(x))\nbest_hotel_country = best_hotel_country.foldByKey(0, add).cache()\n\npopular_hotel_cluster = arr.map(lambda x:get_popular_hotel_cluster(x))\npopular_hotel_cluster = popular_hotel_cluster.foldByKey(0, add).cache()\n\n\npath = 'result.csv'\nout = open(path, \"w\")\nf = open(\"./testdata.csv\", \"r\")\nschema = f.readline()\ntotal = 0\nout.write(\"id,hotel_cluster\\n\")\ntopclasters = popular_hotel_cluster.sortBy(lambda x: -x[1]).map(lambda x:x[0]).take(5)\nidnumber = 0\nwhile 1:\n line = f.readline().strip()\n total += 1\n\n if total % 10 == 0:\n print('Write {} lines...'.format(total))\n\n if total % 999 == 0:\n break\n\n arr = line.split(\",\")\n id = idnumber\n idnumber = idnumber + 1\n print arr\n user_location_city = arr[5]\n orig_destination_distance = arr[6]\n srch_destination_id = arr[16]\n hotel_country = arr[21]\n hotel_market = arr[22]\n\n out.write(str(id) + ',')\n filled = []\n\n\n Topitems = best_hotels_od_ulc.filter(lambda x:(x[0][0] == user_location_city)&(x[0][1] == orig_destination_distance))\n Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x:x[0][2])\n topitems = Topitems.take(5)\n for i in range(len(topitems)):\n if topitems[i] in filled:\n continue\n if len(filled) == 5:\n break\n out.write(' ' + topitems[i])\n filled.append(topitems[i])\n\n if len(filled) < 5:\n Topitems = best_hotels_search_dest.filter(lambda x: (x[0][0] == srch_destination_id) & (x[0][1] == hotel_country) & (x[0][2] == hotel_market))\n Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x: x[0][3])\n topitems = Topitems.take(5)\n for i in range(len(topitems)):\n if topitems[i] in filled:\n continue\n if len(filled) == 5:\n break\n out.write(' ' + topitems[i])\n filled.append(topitems[i])\n\n if len(filled) < 5:\n if len(topitems) != 0:\n Topitems = best_hotels_search_dest1.filter(lambda x: (x[0][0] == srch_destination_id))\n Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x: x[0][1])\n topitems = Topitems.take(5)\n for i in range(len(topitems)):\n if topitems[i] in filled:\n continue\n if len(filled) == 5:\n break\n out.write(' ' + topitems[i])\n filled.append(topitems[i])\n\n if len(filled) < 5:\n Topitems = best_hotel_country.filter(lambda x: (x[0][0] == hotel_country))\n Topitems = Topitems.sortBy(lambda x: -x[1]).map(lambda x: x[0][1])\n for i in range(len(topitems)):\n if topitems[i] in filled:\n continue\n if len(filled) == 5:\n break\n out.write(' ' + topitems[i])\n filled.append(topitems[i])\n\n if len(filled) < 5:\n for i in range(5):\n if topclasters[i] in filled:\n continue\n if len(filled) == 5:\n break\n out.write(' ' + topclasters[i])\n filled.append(topclasters[i])\n out.write(\"\\n\")\nout.close()\nprint('Completed!')" } ]
3
TrackingBird/pfm2png
https://github.com/TrackingBird/pfm2png
086e8b73c2f427880eabfa09f0afaffe256a5485
05de323a395a19c767d35f91409380f07bf8e5f0
68dc2ef7b2fde5ae8606062c3fe125af83da4097
refs/heads/master
"2020-03-28T15:34:45.966499"
"2018-09-13T08:34:21"
"2018-09-13T08:34:21"
148,607,528
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.774193525314331, "avg_line_length": 14.5, "blob_id": "d2a5995023047857e4786f8f33067c11186f01bf", "content_id": "cf59d16cc171e9e2d42b11b95143154b45b2fe65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/README.md", "repo_name": "TrackingBird/pfm2png", "src_encoding": "UTF-8", "text": "# pfm2png\nconvert pfm into png\n" }, { "alpha_fraction": 0.5737618803977966, "alphanum_fraction": 0.581138014793396, "avg_line_length": 24.648649215698242, "blob_id": "f16ce703b30560d0c528410a905073840769faf3", "content_id": "dd970f1f91c6b64efbfc8f4e9f5edab9b9bab4e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1898, "license_type": "no_license", "max_line_length": 78, "num_lines": 74, "path": "/pfm2png.py", "repo_name": "TrackingBird/pfm2png", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport re\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport cv2\n \n'''\nLoad a PFM file into a Numpy array. Note that it will have\na shape of H x W, not W x H. Returns a tuple containing the\nloaded image and the scale factor from the file.\n'''\n \n \ndef load_pfm(file):\n color = None\n width = None\n height = None\n scale = None\n endian = None\n \n header = file.readline().rstrip().decode('UTF-8')\n #print(header)\n if header == 'PF':\n color = True\n elif header == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n \n dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$', file.readline().decode('UTF-8'))\n if dim_match:\n width, height = map(int, dim_match.groups())\n else:\n raise Exception('Malformed PFM header.')\n \n scale = float(file.readline().decode('UTF-8').rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n \n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width)\n return np.reshape(data, shape), scale\n \nif len(sys.argv)==1:\n print('Usage: {} scale [files]'.format('pfmToPng'))\n sys.exit()\n \nscale_factor = int(sys.argv[1])\ntemp = sys.argv[2:]\nfiles = []\nfor f in temp:\n if os.path.exists(f):\n files.append(f)\n else:\n print('Skipping {}, file not found'.format(f))\n \nfor i,f in enumerate(files):\n with open(f,'rb') as f_in:\n disp, scale = load_pfm(f_in)\n disp[np.where(disp>0)]=0\n disp = scale_factor * np.flipud(disp)\n disp = disp.astype(np.uint16)\n cv2.imwrite(f.replace('.pfm','.png'),disp)\n print('{}/{}'.format(i,len(files)),end='\\r')\n # pippo = Image.open(path)\n # plt.imshow(pippo)\n # plt.show()\n os.remove(f)\nprint('DONE!')\n" } ]
2
briis/unifiprotect
https://github.com/briis/unifiprotect
516337d0a0d3ec10d7b4b34dd4b9f1437741c9ec
7ba7cdf52826ea0232facfb89c0df1daf003a345
33c6b77cbbbeb00bbf2f992d7a03cfba6102cde1
refs/heads/master
"2023-07-15T20:08:36.782299"
"2022-08-25T17:53:39"
"2022-08-25T17:53:39"
230,199,787
572
53
MIT
"2019-12-26T05:20:12"
"2022-01-14T13:57:15"
"2022-01-14T23:45:07"
Python
[ { "alpha_fraction": 0.6390249729156494, "alphanum_fraction": 0.6404948830604553, "avg_line_length": 32.875518798828125, "blob_id": "10cb6f976245051c3d7c88deaa99395a769d67f8", "content_id": "e61b8dfc3b3803e51ebcad04969a592410a452cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8164, "license_type": "permissive", "max_line_length": 88, "num_lines": 241, "path": "/custom_components/unifiprotect/__init__.py", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "\"\"\"UniFi Protect Platform.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nfrom datetime import timedelta\nimport logging\n\nfrom aiohttp import CookieJar\nfrom aiohttp.client_exceptions import ServerDisconnectedError\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONF_HOST,\n CONF_PASSWORD,\n CONF_PORT,\n CONF_USERNAME,\n CONF_VERIFY_SSL,\n EVENT_HOMEASSISTANT_STOP,\n Platform,\n)\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady\nfrom homeassistant.helpers import entity_registry as er\nfrom homeassistant.helpers.aiohttp_client import async_create_clientsession\nfrom pyunifiprotect import NotAuthorized, NvrError, ProtectApiClient\nfrom pyunifiprotect.data import ModelType\n\nfrom .const import (\n CONF_ALL_UPDATES,\n CONF_DOORBELL_TEXT,\n CONF_OVERRIDE_CHOST,\n CONFIG_OPTIONS,\n DEFAULT_SCAN_INTERVAL,\n DEVICES_FOR_SUBSCRIBE,\n DEVICES_THAT_ADOPT,\n DOMAIN,\n MIN_REQUIRED_PROTECT_V,\n OUTDATED_LOG_MESSAGE,\n PLATFORMS,\n)\nfrom .data import ProtectData\nfrom .services import async_cleanup_services, async_setup_services\n\n_LOGGER = logging.getLogger(__name__)\n\nSCAN_INTERVAL = timedelta(seconds=DEFAULT_SCAN_INTERVAL)\n\n\n@callback\nasync def _async_migrate_data(\n hass: HomeAssistant, entry: ConfigEntry, protect: ProtectApiClient\n) -> None:\n # already up to date, skip\n if CONF_ALL_UPDATES in entry.options:\n return\n\n _LOGGER.info(\"Starting entity migration...\")\n\n # migrate entry\n options = dict(entry.options)\n data = dict(entry.data)\n options[CONF_ALL_UPDATES] = False\n if CONF_DOORBELL_TEXT in options:\n del options[CONF_DOORBELL_TEXT]\n hass.config_entries.async_update_entry(entry, data=data, options=options)\n\n # migrate entities\n registry = er.async_get(hass)\n mac_to_id: dict[str, str] = {}\n mac_to_channel_id: dict[str, str] = {}\n bootstrap = await protect.get_bootstrap()\n for model in DEVICES_THAT_ADOPT:\n attr = model.value + \"s\"\n for device in getattr(bootstrap, attr).values():\n mac_to_id[device.mac] = device.id\n if model != ModelType.CAMERA:\n continue\n\n for channel in device.channels:\n channel_id = str(channel.id)\n if channel.is_rtsp_enabled:\n break\n mac_to_channel_id[device.mac] = channel_id\n\n count = 0\n entities = er.async_entries_for_config_entry(registry, entry.entry_id)\n for entity in entities:\n new_unique_id: str | None = None\n if entity.domain != Platform.CAMERA.value:\n parts = entity.unique_id.split(\"_\")\n if len(parts) >= 2:\n device_or_key = \"_\".join(parts[:-1])\n mac = parts[-1]\n\n device_id = mac_to_id[mac]\n if device_or_key == device_id:\n new_unique_id = device_id\n else:\n new_unique_id = f\"{device_id}_{device_or_key}\"\n else:\n parts = entity.unique_id.split(\"_\")\n if len(parts) == 2:\n mac = parts[1]\n device_id = mac_to_id[mac]\n channel_id = mac_to_channel_id[mac]\n new_unique_id = f\"{device_id}_{channel_id}\"\n else:\n device_id = parts[0]\n channel_id = parts[2]\n extra = \"\" if len(parts) == 3 else \"_insecure\"\n new_unique_id = f\"{device_id}_{channel_id}{extra}\"\n\n if new_unique_id is None:\n continue\n\n _LOGGER.debug(\n \"Migrating entity %s (old unique_id: %s, new unique_id: %s)\",\n entity.entity_id,\n entity.unique_id,\n new_unique_id,\n )\n try:\n registry.async_update_entity(entity.entity_id, new_unique_id=new_unique_id)\n except ValueError:\n _LOGGER.warning(\n \"Could not migrate entity %s (old unique_id: %s, new unique_id: %s)\",\n entity.entity_id,\n entity.unique_id,\n new_unique_id,\n )\n else:\n count += 1\n\n _LOGGER.info(\"Migrated %s entities\", count)\n if count != len(entities):\n _LOGGER.warning(\"%s entities not migrated\", len(entities) - count)\n\n\n@callback\ndef _async_import_options_from_data_if_missing(\n hass: HomeAssistant, entry: ConfigEntry\n) -> None:\n options = dict(entry.options)\n data = dict(entry.data)\n modified = False\n for importable_option in CONFIG_OPTIONS:\n if importable_option not in entry.options and importable_option in entry.data:\n options[importable_option] = entry.data[importable_option]\n del data[importable_option]\n modified = True\n\n if modified:\n hass.config_entries.async_update_entry(entry, data=data, options=options)\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Set up the UniFi Protect config entries.\"\"\"\n _async_import_options_from_data_if_missing(hass, entry)\n\n session = async_create_clientsession(hass, cookie_jar=CookieJar(unsafe=True))\n protect = ProtectApiClient(\n host=entry.data[CONF_HOST],\n port=entry.data[CONF_PORT],\n username=entry.data[CONF_USERNAME],\n password=entry.data[CONF_PASSWORD],\n verify_ssl=entry.data[CONF_VERIFY_SSL],\n session=session,\n subscribed_models=DEVICES_FOR_SUBSCRIBE,\n override_connection_host=entry.options.get(CONF_OVERRIDE_CHOST, False),\n ignore_stats=not entry.options.get(CONF_ALL_UPDATES, False),\n )\n _LOGGER.debug(\"Connect to UniFi Protect\")\n data_service = ProtectData(hass, protect, SCAN_INTERVAL, entry)\n\n try:\n nvr_info = await protect.get_nvr()\n except NotAuthorized as err:\n raise ConfigEntryAuthFailed(err) from err\n except (asyncio.TimeoutError, NvrError, ServerDisconnectedError) as err:\n raise ConfigEntryNotReady from err\n\n if nvr_info.version < MIN_REQUIRED_PROTECT_V:\n _LOGGER.error(\n OUTDATED_LOG_MESSAGE,\n nvr_info.version,\n MIN_REQUIRED_PROTECT_V,\n )\n return False\n\n await _async_migrate_data(hass, entry, protect)\n if entry.unique_id is None:\n hass.config_entries.async_update_entry(entry, unique_id=nvr_info.mac)\n\n await data_service.async_setup()\n if not data_service.last_update_success:\n raise ConfigEntryNotReady\n\n hass.data.setdefault(DOMAIN, {})[entry.entry_id] = data_service\n hass.config_entries.async_setup_platforms(entry, PLATFORMS)\n async_setup_services(hass)\n\n entry.async_on_unload(entry.add_update_listener(_async_options_updated))\n entry.async_on_unload(\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, data_service.async_stop)\n )\n\n return True\n\n\nasync def _async_options_updated(hass: HomeAssistant, entry: ConfigEntry) -> None:\n \"\"\"Update options.\"\"\"\n await hass.config_entries.async_reload(entry.entry_id)\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n \"\"\"Unload UniFi Protect config entry.\"\"\"\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n data: ProtectData = hass.data[DOMAIN][entry.entry_id]\n await data.async_stop()\n hass.data[DOMAIN].pop(entry.entry_id)\n async_cleanup_services(hass)\n\n return bool(unload_ok)\n\n\nasync def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n \"\"\"Migrate old entry.\"\"\"\n _LOGGER.debug(\"Migrating from version %s\", config_entry.version)\n\n if config_entry.version == 1:\n new = {**config_entry.data}\n # keep verify SSL false for anyone migrating to maintain backwards compatibility\n new[CONF_VERIFY_SSL] = False\n if CONF_DOORBELL_TEXT in new:\n del new[CONF_DOORBELL_TEXT]\n\n config_entry.version = 2\n hass.config_entries.async_update_entry(config_entry, data=new)\n\n _LOGGER.info(\"Migration to version %s successful\", config_entry.version)\n\n return True\n" }, { "alpha_fraction": 0.7645003199577332, "alphanum_fraction": 0.7812718152999878, "avg_line_length": 64.04545593261719, "blob_id": "4e2dc338148787ecb3c7a94f8ae3579b4afe9e39", "content_id": "c8bbe95520da8dae9aad2127a9c134f789368231", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 286, "num_lines": 22, "path": "/info.md", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "# // UniFi Protect for Home Assistant\n\n![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/briis/unifiprotect?include_prereleases&style=flat-square) [![hacs_badge](https://img.shields.io/badge/HACS-Default-orange.svg?style=flat-square)](https://github.com/custom-components/hacs)\n\n> **NOTE** If you are NOT running UniFi Protect V1.20.0 or higher, you must use the **V0.9.1** of this Integration.\n> Please the [CHANGELOG](https://github.com/briis/unifiprotect/blob/master/CHANGELOG.md) very carefully before you upgrade as there are many breaking changes going from V0.9.1 to 0.10.0\n>\n> You will also need Home Assistant **2021.11+** to upgrade to V0.10.0 as well.\n\nThe UniFi Protect Integration adds support for retrieving Camera feeds and Sensor data from a UniFi Protect installation on either a Ubiquiti CloudKey+, Ubiquiti UniFi Dream Machine Pro (UDMP) or UniFi Protect Network Video Recorder (UNVR).\n\nThere is support for the following entity types within Home Assistant:\n* Camera\n* Sensor\n* Binary Sensor\n* Switch\n* Select\n* Number\n\nIt supports both regular Ubiquiti Cameras and the UniFi Doorbell. Camera feeds, Motion Sensors, Doorbell Sensors, Motion Setting Sensors and Switches will be created automatically for each Camera found, once the Integration has been configured.\n\nGo to [Github](https://github.com/briis/unifiprotect) for Pre-requisites and Setup Instructions.\n" }, { "alpha_fraction": 0.6585034132003784, "alphanum_fraction": 0.6588435173034668, "avg_line_length": 31.07272720336914, "blob_id": "79c3f28d421153a0429172a7a0ba44f847a39b4c", "content_id": "1bda37c09bfcf0e6b3b4344baa77066f5a6c7f13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8820, "license_type": "permissive", "max_line_length": 88, "num_lines": 275, "path": "/custom_components/unifiprotect/binary_sensor.py", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "\"\"\"This component provides binary sensors for UniFi Protect.\"\"\"\nfrom __future__ import annotations\n\nfrom copy import copy\nfrom dataclasses import dataclass\nimport logging\n\nfrom homeassistant.components.binary_sensor import (\n BinarySensorDeviceClass,\n BinarySensorEntity,\n BinarySensorEntityDescription,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_LAST_TRIP_TIME, ATTR_MODEL\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.entity import EntityCategory\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom pyunifiprotect.data import NVR, Camera, Event, Light, MountType, Sensor\n\nfrom .const import DOMAIN\nfrom .data import ProtectData\nfrom .entity import (\n EventThumbnailMixin,\n ProtectDeviceEntity,\n ProtectNVREntity,\n async_all_device_entities,\n)\nfrom .models import ProtectRequiredKeysMixin\nfrom .utils import get_nested_attr\n\n_LOGGER = logging.getLogger(__name__)\n_KEY_DOOR = \"door\"\n\n\n@dataclass\nclass ProtectBinaryEntityDescription(\n ProtectRequiredKeysMixin, BinarySensorEntityDescription\n):\n \"\"\"Describes UniFi Protect Binary Sensor entity.\"\"\"\n\n ufp_last_trip_value: str | None = None\n\n\nMOUNT_DEVICE_CLASS_MAP = {\n MountType.GARAGE: BinarySensorDeviceClass.GARAGE_DOOR,\n MountType.WINDOW: BinarySensorDeviceClass.WINDOW,\n MountType.DOOR: BinarySensorDeviceClass.DOOR,\n}\n\n\nCAMERA_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (\n ProtectBinaryEntityDescription(\n key=\"doorbell\",\n name=\"Doorbell\",\n device_class=BinarySensorDeviceClass.OCCUPANCY,\n icon=\"mdi:doorbell-video\",\n ufp_required_field=\"feature_flags.has_chime\",\n ufp_value=\"is_ringing\",\n ufp_last_trip_value=\"last_ring\",\n ),\n ProtectBinaryEntityDescription(\n key=\"dark\",\n name=\"Is Dark\",\n icon=\"mdi:brightness-6\",\n ufp_value=\"is_dark\",\n ),\n)\n\nLIGHT_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (\n ProtectBinaryEntityDescription(\n key=\"dark\",\n name=\"Is Dark\",\n icon=\"mdi:brightness-6\",\n ufp_value=\"is_dark\",\n ),\n ProtectBinaryEntityDescription(\n key=\"motion\",\n name=\"Motion Detected\",\n device_class=BinarySensorDeviceClass.MOTION,\n ufp_value=\"is_pir_motion_detected\",\n ufp_last_trip_value=\"last_motion\",\n ),\n)\n\nSENSE_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (\n ProtectBinaryEntityDescription(\n key=_KEY_DOOR,\n name=\"Contact\",\n device_class=BinarySensorDeviceClass.DOOR,\n ufp_value=\"is_opened\",\n ufp_last_trip_value=\"open_status_changed_at\",\n ufp_enabled=\"is_contact_sensor_enabled\",\n ),\n ProtectBinaryEntityDescription(\n key=\"battery_low\",\n name=\"Battery low\",\n device_class=BinarySensorDeviceClass.BATTERY,\n entity_category=EntityCategory.DIAGNOSTIC,\n ufp_value=\"battery_status.is_low\",\n ),\n ProtectBinaryEntityDescription(\n key=\"motion\",\n name=\"Motion Detected\",\n device_class=BinarySensorDeviceClass.MOTION,\n ufp_value=\"is_motion_detected\",\n ufp_last_trip_value=\"motion_detected_at\",\n ufp_enabled=\"is_motion_sensor_enabled\",\n ),\n ProtectBinaryEntityDescription(\n key=\"tampering\",\n name=\"Tampering Detected\",\n device_class=BinarySensorDeviceClass.TAMPER,\n ufp_value=\"is_tampering_detected\",\n ufp_last_trip_value=\"tampering_detected_at\",\n ),\n)\n\nMOTION_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (\n ProtectBinaryEntityDescription(\n key=\"motion\",\n name=\"Motion\",\n device_class=BinarySensorDeviceClass.MOTION,\n ufp_value=\"is_motion_detected\",\n ufp_last_trip_value=\"last_motion\",\n ),\n)\n\n\nDISK_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (\n ProtectBinaryEntityDescription(\n key=\"disk_health\",\n name=\"Disk {index} Health\",\n device_class=BinarySensorDeviceClass.PROBLEM,\n entity_category=EntityCategory.DIAGNOSTIC,\n ),\n)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up binary sensors for UniFi Protect integration.\"\"\"\n data: ProtectData = hass.data[DOMAIN][entry.entry_id]\n entities: list[ProtectDeviceEntity] = async_all_device_entities(\n data,\n ProtectDeviceBinarySensor,\n camera_descs=CAMERA_SENSORS,\n light_descs=LIGHT_SENSORS,\n sense_descs=SENSE_SENSORS,\n )\n entities += _async_motion_entities(data)\n entities += _async_nvr_entities(data)\n\n async_add_entities(entities)\n\n\n@callback\ndef _async_motion_entities(\n data: ProtectData,\n) -> list[ProtectDeviceEntity]:\n entities: list[ProtectDeviceEntity] = []\n for device in data.api.bootstrap.cameras.values():\n for description in MOTION_SENSORS:\n entities.append(ProtectEventBinarySensor(data, device, description))\n _LOGGER.debug(\n \"Adding binary sensor entity %s for %s\",\n description.name,\n device.name,\n )\n\n return entities\n\n\n@callback\ndef _async_nvr_entities(\n data: ProtectData,\n) -> list[ProtectDeviceEntity]:\n entities: list[ProtectDeviceEntity] = []\n device = data.api.bootstrap.nvr\n for index, _ in enumerate(device.system_info.storage.devices):\n for description in DISK_SENSORS:\n entities.append(\n ProtectDiskBinarySensor(data, device, description, index=index)\n )\n _LOGGER.debug(\n \"Adding binary sensor entity %s\",\n (description.name or \"{index}\").format(index=index),\n )\n\n return entities\n\n\nclass ProtectDeviceBinarySensor(ProtectDeviceEntity, BinarySensorEntity):\n \"\"\"A UniFi Protect Device Binary Sensor.\"\"\"\n\n device: Camera | Light | Sensor\n entity_description: ProtectBinaryEntityDescription\n\n @callback\n def _async_update_device_from_protect(self) -> None:\n super()._async_update_device_from_protect()\n\n if self.entity_description.key == \"doorbell\":\n new_value = self.entity_description.get_ufp_value(self.device)\n if new_value != self.is_on:\n _LOGGER.debug(\n \"Changing doorbell sensor from %s to %s\", self.is_on, new_value\n )\n\n self._attr_is_on = self.entity_description.get_ufp_value(self.device)\n if self.entity_description.ufp_last_trip_value is not None:\n last_trip = get_nested_attr(\n self.device, self.entity_description.ufp_last_trip_value\n )\n attrs = self.extra_state_attributes or {}\n self._attr_extra_state_attributes = {\n **attrs,\n ATTR_LAST_TRIP_TIME: last_trip,\n }\n\n # UP Sense can be any of the 3 contact sensor device classes\n if self.entity_description.key == _KEY_DOOR and isinstance(self.device, Sensor):\n self.entity_description.device_class = MOUNT_DEVICE_CLASS_MAP.get(\n self.device.mount_type, BinarySensorDeviceClass.DOOR\n )\n\n\nclass ProtectDiskBinarySensor(ProtectNVREntity, BinarySensorEntity):\n \"\"\"A UniFi Protect NVR Disk Binary Sensor.\"\"\"\n\n entity_description: ProtectBinaryEntityDescription\n\n def __init__(\n self,\n data: ProtectData,\n device: NVR,\n description: ProtectBinaryEntityDescription,\n index: int,\n ) -> None:\n \"\"\"Initialize the Binary Sensor.\"\"\"\n description = copy(description)\n description.key = f\"{description.key}_{index}\"\n description.name = (description.name or \"{index}\").format(index=index)\n self._index = index\n super().__init__(data, device, description)\n\n @callback\n def _async_update_device_from_protect(self) -> None:\n super()._async_update_device_from_protect()\n\n disks = self.device.system_info.storage.devices\n disk_available = len(disks) > self._index\n self._attr_available = self._attr_available and disk_available\n if disk_available:\n disk = disks[self._index]\n self._attr_is_on = not disk.healthy\n self._attr_extra_state_attributes = {ATTR_MODEL: disk.model}\n\n\nclass ProtectEventBinarySensor(EventThumbnailMixin, ProtectDeviceBinarySensor):\n \"\"\"A UniFi Protect Device Binary Sensor with access tokens.\"\"\"\n\n device: Camera\n\n @callback\n def _async_get_event(self) -> Event | None:\n \"\"\"Get event from Protect device.\"\"\"\n\n event: Event | None = None\n if self.device.is_motion_detected and self.device.last_motion_event is not None:\n event = self.device.last_motion_event\n\n return event\n" }, { "alpha_fraction": 0.7024221420288086, "alphanum_fraction": 0.7214533090591431, "avg_line_length": 56.79999923706055, "blob_id": "603fb01740e2d42f2ec22218318a57b9edb6d478", "content_id": "fc28e8f92c1e6d35cfae54f9d0fb0357c5b7083c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 578, "license_type": "permissive", "max_line_length": 116, "num_lines": 10, "path": "/.devcontainer/Dockerfile", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "FROM ghcr.io/ludeeus/devcontainer/integration:stable\n\nRUN apt update \\\n && sudo apt install -y libpcap-dev ffmpeg vim curl jq libturbojpeg0 \\\n && mkdir -p /opt \\\n && cd /opt \\\n && git clone --depth=1 -b 2021.12.7 https://github.com/home-assistant/core.git hass \\\n && python3 -m pip --disable-pip-version-check install --upgrade ./hass \\\n && python3 -m pip install pyunifiprotect mypy black isort pyupgrade pylint pylint_strict_informational \\\n && ln -s /workspaces/unifiprotect/custom_components/unifiprotect /opt/hass/homeassistant/components/unifiprotect\n" }, { "alpha_fraction": 0.6840721964836121, "alphanum_fraction": 0.6844226121902466, "avg_line_length": 32.180233001708984, "blob_id": "249562a110b4e0dd08580631a2d9b9aea73b6f20", "content_id": "5cac350ebfff0a0cc1c9f3e9647a86b383d3041e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5707, "license_type": "permissive", "max_line_length": 88, "num_lines": 172, "path": "/custom_components/unifiprotect/services.py", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "\"\"\"UniFi Protect Integration services.\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport functools\nfrom typing import Any\n\nfrom homeassistant.config_entries import ConfigEntryState\nfrom homeassistant.const import ATTR_DEVICE_ID\nfrom homeassistant.core import HomeAssistant, ServiceCall, callback\nfrom homeassistant.exceptions import HomeAssistantError\nfrom homeassistant.helpers import config_validation as cv, device_registry as dr\nfrom homeassistant.helpers.service import async_extract_referenced_entity_ids\nfrom pydantic import ValidationError\nfrom pyunifiprotect.api import ProtectApiClient\nfrom pyunifiprotect.exceptions import BadRequest\nimport voluptuous as vol\n\nfrom .const import ATTR_MESSAGE, DOMAIN\nfrom .data import ProtectData\n\nSERVICE_ADD_DOORBELL_TEXT = \"add_doorbell_text\"\nSERVICE_REMOVE_DOORBELL_TEXT = \"remove_doorbell_text\"\nSERVICE_SET_DEFAULT_DOORBELL_TEXT = \"set_default_doorbell_text\"\n\nALL_GLOBAL_SERIVCES = [\n SERVICE_ADD_DOORBELL_TEXT,\n SERVICE_REMOVE_DOORBELL_TEXT,\n SERVICE_SET_DEFAULT_DOORBELL_TEXT,\n]\n\nDOORBELL_TEXT_SCHEMA = vol.All(\n vol.Schema(\n {\n **cv.ENTITY_SERVICE_FIELDS,\n vol.Required(ATTR_MESSAGE): cv.string,\n },\n ),\n cv.has_at_least_one_key(ATTR_DEVICE_ID),\n)\n\n\ndef _async_all_ufp_instances(hass: HomeAssistant) -> list[ProtectApiClient]:\n \"\"\"All active UFP instances.\"\"\"\n return [\n data.api for data in hass.data[DOMAIN].values() if isinstance(data, ProtectData)\n ]\n\n\n@callback\ndef _async_unifi_mac_from_hass(mac: str) -> str:\n # MAC addresses in UFP are always caps\n return mac.replace(\":\", \"\").upper()\n\n\n@callback\ndef _async_get_macs_for_device(device_entry: dr.DeviceEntry) -> list[str]:\n return [\n _async_unifi_mac_from_hass(cval)\n for ctype, cval in device_entry.connections\n if ctype == dr.CONNECTION_NETWORK_MAC\n ]\n\n\n@callback\ndef _async_get_ufp_instances(\n hass: HomeAssistant, device_id: str\n) -> tuple[dr.DeviceEntry, ProtectApiClient]:\n device_registry = dr.async_get(hass)\n if not (device_entry := device_registry.async_get(device_id)):\n raise HomeAssistantError(f\"No device found for device id: {device_id}\")\n\n if device_entry.via_device_id is not None:\n return _async_get_ufp_instances(hass, device_entry.via_device_id)\n\n macs = _async_get_macs_for_device(device_entry)\n ufp_instances = [\n i for i in _async_all_ufp_instances(hass) if i.bootstrap.nvr.mac in macs\n ]\n\n if not ufp_instances:\n # should not be possible unless user manually enters a bad device ID\n raise HomeAssistantError( # pragma: no cover\n f\"No UniFi Protect NVR found for device ID: {device_id}\"\n )\n\n return device_entry, ufp_instances[0]\n\n\n@callback\ndef _async_get_protect_from_call(\n hass: HomeAssistant, call: ServiceCall\n) -> list[tuple[dr.DeviceEntry, ProtectApiClient]]:\n referenced = async_extract_referenced_entity_ids(hass, call)\n\n instances: list[tuple[dr.DeviceEntry, ProtectApiClient]] = []\n for device_id in referenced.referenced_devices:\n instances.append(_async_get_ufp_instances(hass, device_id))\n\n return instances\n\n\nasync def _async_call_nvr(\n instances: list[tuple[dr.DeviceEntry, ProtectApiClient]],\n method: str,\n *args: Any,\n **kwargs: Any,\n) -> None:\n try:\n await asyncio.gather(\n *(getattr(i.bootstrap.nvr, method)(*args, **kwargs) for _, i in instances)\n )\n except (BadRequest, ValidationError) as err:\n raise HomeAssistantError(str(err)) from err\n\n\nasync def add_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:\n \"\"\"Add a custom doorbell text message.\"\"\"\n message: str = call.data[ATTR_MESSAGE]\n instances = _async_get_protect_from_call(hass, call)\n await _async_call_nvr(instances, \"add_custom_doorbell_message\", message)\n\n\nasync def remove_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:\n \"\"\"Remove a custom doorbell text message.\"\"\"\n message: str = call.data[ATTR_MESSAGE]\n instances = _async_get_protect_from_call(hass, call)\n await _async_call_nvr(instances, \"remove_custom_doorbell_message\", message)\n\n\nasync def set_default_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:\n \"\"\"Set the default doorbell text message.\"\"\"\n message: str = call.data[ATTR_MESSAGE]\n instances = _async_get_protect_from_call(hass, call)\n await _async_call_nvr(instances, \"set_default_doorbell_message\", message)\n\n\ndef async_setup_services(hass: HomeAssistant) -> None:\n \"\"\"Set up the global UniFi Protect services.\"\"\"\n services = [\n (\n SERVICE_ADD_DOORBELL_TEXT,\n functools.partial(add_doorbell_text, hass),\n DOORBELL_TEXT_SCHEMA,\n ),\n (\n SERVICE_REMOVE_DOORBELL_TEXT,\n functools.partial(remove_doorbell_text, hass),\n DOORBELL_TEXT_SCHEMA,\n ),\n (\n SERVICE_SET_DEFAULT_DOORBELL_TEXT,\n functools.partial(set_default_doorbell_text, hass),\n DOORBELL_TEXT_SCHEMA,\n ),\n ]\n for name, method, schema in services:\n if hass.services.has_service(DOMAIN, name):\n continue\n hass.services.async_register(DOMAIN, name, method, schema=schema)\n\n\ndef async_cleanup_services(hass: HomeAssistant) -> None:\n \"\"\"Cleanup global UniFi Protect services (if all config entries unloaded).\"\"\"\n loaded_entries = [\n entry\n for entry in hass.config_entries.async_entries(DOMAIN)\n if entry.state == ConfigEntryState.LOADED\n ]\n if len(loaded_entries) == 1:\n for name in ALL_GLOBAL_SERIVCES:\n hass.services.async_remove(DOMAIN, name)\n" }, { "alpha_fraction": 0.6795711517333984, "alphanum_fraction": 0.6795711517333984, "avg_line_length": 28.456140518188477, "blob_id": "12c86c4859f83eeb4f64f10d867f249e398b6130", "content_id": "f0a759e62811b5c36cbec73dc58bb78e37df6783", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1679, "license_type": "permissive", "max_line_length": 83, "num_lines": 57, "path": "/custom_components/unifiprotect/button.py", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "\"\"\"Support for Ubiquiti's UniFi Protect NVR.\"\"\"\nfrom __future__ import annotations\n\nimport logging\n\nfrom homeassistant.components.button import ButtonDeviceClass, ButtonEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom pyunifiprotect.data.base import ProtectAdoptableDeviceModel\n\nfrom .const import DEVICES_THAT_ADOPT, DOMAIN\nfrom .data import ProtectData\nfrom .entity import ProtectDeviceEntity\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Discover devices on a UniFi Protect NVR.\"\"\"\n data: ProtectData = hass.data[DOMAIN][entry.entry_id]\n\n async_add_entities(\n [\n ProtectButton(\n data,\n device,\n )\n for device in data.get_by_types(DEVICES_THAT_ADOPT)\n ]\n )\n\n\nclass ProtectButton(ProtectDeviceEntity, ButtonEntity):\n \"\"\"A Ubiquiti UniFi Protect Reboot button.\"\"\"\n\n _attr_entity_registry_enabled_default = False\n _attr_device_class = ButtonDeviceClass.RESTART\n\n def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n ) -> None:\n \"\"\"Initialize an UniFi camera.\"\"\"\n super().__init__(data, device)\n self._attr_name = f\"{self.device.name} Reboot Device\"\n\n async def async_press(self) -> None:\n \"\"\"Press the button.\"\"\"\n\n _LOGGER.debug(\"Rebooting %s with id %s\", self.device.model, self.device.id)\n await self.device.reboot()\n" }, { "alpha_fraction": 0.7516189813613892, "alphanum_fraction": 0.7668944597244263, "avg_line_length": 79.2700424194336, "blob_id": "b304be64a5884248c35b341007833be7f2b4561e", "content_id": "c944fbdac8060e00fd259314560a359890144a6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 95132, "license_type": "permissive", "max_line_length": 776, "num_lines": 1185, "path": "/CHANGELOG.md", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "# // Changelog\n\n## 0.12.0\n\n0.12.0 was originally planned as a beta only release, but after giving it more thought, I figured it would be be great to mark it as stable for the folks that cannot upgrade to the HA core version in 2022.2.\n\nThis release is primarily fixes from the HA core process. There is also full support added for the G4 Doorbell Pro, the UP Sense.\n\nThis will be the **last** HACS release. After this point, the HACS repo is considered deprecated. We will still take issues in the repo as if people prefer to make them here instead of the HA core repo. But after a month or 2 we plan to archive the repo and have the integration removed from HACS.\n\n### Differences between HACS version 0.12.0 and HA 2022.2.0b1 version:\n\n#### HACS Only\n\n* Migration code for updating from `0.10.x` or older still exists; this code has been _removed_ in the HA core version\n\n#### HA Core Only\n\n* Full language support. All of the languages HA core supports via Lokalise has been added to the ingration.\n\n* Auto-discovery. If you have a Dream machine or a Cloud Key/UNVR on the same VLAN, the UniFi Protect integration will automatically be discovered and prompted for setup.\n\n* UP Doorlock support. The HA core version has full support for the newly release EA UP Doorlock.\n\n### Changes\n\n* `CHANGE`: **BREAKING CHANGE** Removes all deprecations outlined in the 0.11.x release.\n\n* `CHANGE`: **BREAKING CHANGE** The \"Chime Duration\" number entity has been replaced with a \"Chime Type\" select entity. This makes Home Assistant work the same way as UniFi Protect. (https://github.com/briis/unifiprotect/issues/451)\n\n* `CHANGE`: **BREAKING CHANGE** Smart Sensor support has been overhauled and improved. If you have Smart Sensors, it is _highly recommended to delete your UniFi Protect integration config and re-add it_. Some of the categories for the sensors have changed and it is not easy to change those without re-adding the integration. The sensors for the Smart Sensor are may also appear unavaiable if that sensor is not configured to be abled. For example, if your have motion disabled on your Sensor in UniFi Protect, the motion sensor will be unavaiable in Home Assistnat. Full list of new Smart Sensor entites:\n\n * Alarm Sound and Tampering binary sensors\n * Motion Sensitivity number\n * Mount Type and Paired Camera selects\n * Status Light switch\n * Configuration switches for various sensors:\n * Motion Detection switch\n * Temperature Sensor switch\n * Humidity Sensor switch\n * Light Sensor switch\n * Alarm Sound Detection switch\n\n* `CHANGE`: **BREAKING CHANGE** Removes `profile_ws` debug service. Core plans to add a more centralized way of getting debug information from an integration. This will be back in some form after that feature is added (estimate: 1-2 major core releases).\n\n* `CHANGE`: **BREAKING CHANGE** Removes `event_thumbnail` attribute and associated `ThumbnailProxyView`. After a lot of discussion, core does not want to add more attributes with access tokens inside of attributes. We plan to add back event thumbnails in some form again. If you would like to follow along with the dicussion, checkout the [architecure dicussion for it](https://github.com/home-assistant/architecture/discussions/705).\n\n* `CHANGE`: Switches Doorbell binary_sensor to use `is_ringing` attr, should great improve relaiability of the sensor\n\n* `CHANGE`: Dynamic select options for Doorbell Text\n\n* `CHANGE`: Improves names for a number of entities\n\n* `CHANGE`: Adds a bunch of extra debug logging for entity updates\n\n* `NEW`: Adds full support for the package camera for the G4 Doorbell Pro. It should now always be enabled by default (if you are upgrading from an older version, it will still be disabled). The snapshot for the Package Camera has also been fixed. Since the camera if only 2 FPS, _streaming is disabled_ to prevent buffering.\n\n* `FIX`: Overhaul of the Websocket code. Websocket reconnects should be drastically improved. Hopefully all reconnnect issues should be gone now.\n\n* `FIX`: Fixes NVR memory sensor if no data is reported\n\n* `FIX`: Fixes spelling typo with Recording Capacity sensor (https://github.com/briis/unifiprotect/issues/440)\n\n* `FIX`: Fixes `is_connected` check for cameras\n\n* `FIX`: Adds back `last_trip_time` attribute to camera motion entity\n\n* `FIX`: Fixes NVR memory sensor if no data is reported\n\n* `FIX`: Fixes spelling typo with Recording Capacity sensor (https://github.com/briis/unifiprotect/issues/440)\n\n* `FIX`: Further improves relibility of Doorbell binary_sensor\n\n* `FIX`: Fixes voltage unit for doorbell voltage sensor\n\n* `FIX`: Fixes `connection_host` for Cameras so it can have DNS hosts in addition to IPs.\n\n* `FIX`: Improves relibility of entities when UniFi Protect goes offline and/or a device goes offline. Everything recovery seemlessly when UniFi Protect upgrades or firmware updates are applied (fixes https://github.com/briis/unifiprotect/issues/432).\n\n* `FIX`: Improves relibility of `media_player` entities so they should report state better and be able to play longer audio clips.\n\n* `FIX`: Fixes stopping in progress audio for `media_player` entities.\n\n* `FIX`: Allows DNS hosts in addition to IP addresses (fixes https://github.com/briis/unifiprotect/issues/431).\n\n* `FIX`: Fixes selection of default camera entity for when it is not the High Quality channel.\n\n* `FIX`: Fixes https://github.com/briis/unifiprotect/issues/428. All string enums are now case insensitive.\n\n* `FIX`: Fixes https://github.com/briis/unifiprotect/issues/427, affected cameras will automatically be converted to Detections recording mode.\n\n## 0.12.0-beta10\n\nThis is the last planned release for the HACS version. This release primarily adds new features for the G4 Doorbell Pro and the Smart Sensor. This release does unfortunatly have a couple of breaking changes for people with doorbells and Smart Sensors which are avoidable due to how soon the Home Assistant core release is.\n\n* `CHANGE`: **BREAKING CHANGE** The \"Chime Duration\" number entity has been replaced with a \"Chime Type\" select entity. This makes Home Assistant work the same way as UniFi Protect. (https://github.com/briis/unifiprotect/issues/451)\n\n* `CHANGE`: **BREAKING CHANGE** Smart Sensor support has been overhauled and improved. If you have Smart Sensors, it is _highly recommended to delete your UniFi Protect integration config and re-add it_. Some of the categories for the sensors have changed and it is not easy to change those without re-adding the integration. The sensors for the Smart Sensor are may also appear unavaiable if that sensor is not configured to be abled. For example, if your have motion disabled on your Sensor in UniFi Protect, the motion sensor will be unavaiable in Home Assistnat. Full list of new Smart Sensor entites:\n\n * Alarm Sound and Tampering binary sensors\n * Motion Sensitivity number\n * Mount Type and Paired Camera selects\n * Status Light switch\n * Configuration switches for various sensors:\n * Motion Detection switch\n * Temperature Sensor switch\n * Humidity Sensor switch\n * Light Sensor switch\n * Alarm Sound Detection switch\n\n* `CHANGE`: Adds full support for the package camera for the G4 Doorbell Pro. It should now always be enabled by default (if you are upgrading from an older version, it will still be disabled). The snapshot for the Package Camera has also been fixed. Since the camera if only 2 FPS, _streaming is disabled_ to prevent buffering.\n\n* `FIX`: Overhaul of the Websocket code. Websocket reconnects should be drastically improved. Hopefully all reconnnect issues should be gone now.\n\n## 0.12.0-beta9\n\nHome Assistant core port complete! The version that is in `2022.2` will officially have all of the same features. This is the final backport version to make sure the two versions are equal. The only difference between `0.12.0-beta9` and the code in `2022.2` is\n\n* Migration code from `< 0.11.x` has been dropped. You must be on at least `0.11.0` or newer to migrate to the Home Assistant core version.\n\nAdditionally, we could not add _every_ feature from the HACS version to the HA core version so there are 2 additional breaking changes in this release (sorry!):\n\n* `CHANGE`: **BREAKING CHANGE** Removes `profile_ws` and `take_sample` debug services. Core plans to add a more centralized way of getting debug information from an integration. This will be back in some form after that feature is added (estimate: 1-2 major core releases).\n\n* `CHANGE`: **BREAKING CHANGE** Removes `event_thumbnail` attribute and associated `ThumbnailProxyView`. After a lot of discussion, core does not want to add more attributes with access tokens inside of attributes. We plan to add back event thumbnails in some form again. If you would like to follow along with the dicussion, checkout the [architecure dicussion for it](https://github.com/home-assistant/architecture/discussions/705).\n\nGoing forward, there will be some new features for the 0.12.0-beta / core version that will be developed for core version and then be backported to the HACS version. These include improvements for the G4 Doorbell Pro and the UP Sense devices.\n\n## 0.12.0-beta8\n\n* `FIX`: Fixes NVR memory sensor if no data is reported\n\n* `FIX`: Fixes spelling typo with Recording Capacity sensor (https://github.com/briis/unifiprotect/issues/440)\n\n* `FIX`: Fixes `is_connected` check for cameras\n\n* `FIX`: Adds back `last_trip_time` attribute to camera motion entity\n\n## 0.12.0-beta7\n\n* `FIX`: Fixes NVR memory sensor if no data is reported\n\n* `FIX`: Fixes spelling typo with Recording Capacity sensor (https://github.com/briis/unifiprotect/issues/440)\n\n* `FIX`: Fixes is_connected check for cameras\n\n* `FIX`: Adds back `last_trip_time` attribute to camera motion entity\n\n## 0.12.0-beta7\n\n* `FIX`: Improve relibility of Websocket reconnects\n\n* `FIX`: Further improves relibility of Doorbell binary_sensor\n\n## 0.12.0-beta6\n\n* `CHANGE`: Switches Doorbell binary_sensor to use `is_ringing` attr, should great improve relaiability of the sensor\n\n* `NEW`: Adds `take_sample` service to help with debugging/issue reporting\n\n* `FIX`: Fixes voltage unit for doorbell voltage sensor\n\nBackports changes from Home Assistant core merge process:\n\n* `CHANGE`: Dynamic select options for Doorbell Text\n\n* `CHANGE`: Improves names for a number of entities\n\n* `CHANGE`: Adds a bunch of extra debug logging for entity updates\n\n## 0.12.0-beta5\n\n* `FIX`: Fixes `connection_host` for Cameras so it can have DNS hosts in addition to IPs.\n\n## 0.12.0-beta4\n\nBackports fixes from Home Assistant core merge process:\n\n* `FIX`: Improves relibility of entities when UniFi Protect goes offline and/or a device goes offline. Everything recovery seemlessly when UniFi Protect upgrades or firmware updates are applied (fixes https://github.com/briis/unifiprotect/issues/432).\n\n* `FIX`: Improves relibility of `media_player` entities so they should report state better and be able to play longer audio clips.\n\n* `FIX`: Fixes stopping in progress audio for `media_player` entities.\n\n* `FIX`: Allows DNS hosts in addition to IP addresses (fixes https://github.com/briis/unifiprotect/issues/431).\n\n* `FIX`: Fixes selection of default camera entity for when it is not the High Quality channel.\n\n## 0.12.0-beta3\n\n* `FIX`: Fixes https://github.com/briis/unifiprotect/issues/428. All string enums are now case insensitive.\n\n## 0.12.0-beta2\n\n* `FIX`: Fixes https://github.com/briis/unifiprotect/issues/427, affected cameras will automatically be converted to Detections recording mode.\n\n## 0.12.0-beta1\n\nThe 0.12.0-beta is designed to be a \"beta only\" release. There will not be a stable release for it. It is designed to test the final changes needed to merge the unifiprotect into Home Assistant core.\n\n* `CHANGE`: **BREAKING CHANGE** Removes all deprecations outlined in the 0.11.x release.\n\n## 0.11.2\n\n* `FIX`: Setting up camera entities will no longer error if a camera does not have a channel. Will now result in log and continue\n\n* `FIX`: Unadopted entities are ignored (fixes #420)\n\n* `FIX`: Event thumbnails now return instantly using newer endpoint from UniFi Protect. They appear to come back as a camera snapshot until after the events ends, but they should always return an image now.\n\n## 0.11.1\n\n### Deprecations\n\nAs an amended to the deprecations from 0.11.0, the `last_tripped_time` is _no longer_ deprecated as `last_changed` is not a full replacement (#411)\n\n### Other changes\n\n* `FIX`: Bumps version of `pyunifiprotect` to 1.3.4. This will fix talkback for all cameras that was not working as expected\n\n## 0.11.0\n\n### Deprecations\n\n0.11 is last major release planned before we merge the `unifiprotect` integration into core. As a result, a number of features are being removed when we merged into core.\n\nThe following services will be removed in the next version:\n\n* `unifiprotect.set_recording_mode` -- use the select introduced in 0.10 instead\n* `unifiprotect.set_ir_mode` -- use the select entity introduced in 0.10 instead\n* `unifiprotect.set_status_light` -- use the switch entity on the camera device instead\n* `unifiprotect.set_hdr_mode` -- use the switch entity on the camera device instead\n* `unifiprotect.set_highfps_video_mode` -- use the switch entity on the camera device instead\n* `unifiprotect.set_doorbell_lcd_message` -- use the select entity introduced in 0.10 instead\n* `unifiprotect.set_mic_volume` -- use the number entity introduced in 0.10 instead\n* `unifiprotect.set_privacy_mode` -- use the switch entity introduced in 0.10 instead\n* `unifiprotect.set_zoom_position` -- use the number entity introduced in 0.10 instead\n* `unifiprotect.set_wdr_value` -- use the number entity introduced in 0.10 instead\n* `unifiprotect.light_settings` -- use the select entity introduced in 0.10 instead\n* `unifiprotect.set_viewport_view` -- use the select entity introduced in 0.10 instead\n\nThe following events will be removed in the next version:\n\n* `unifiprotect_doorbell` -- use a State Changed event on \"Doorbell\" binary sensor on the device instead\n* `unifiprotect_motion` -- use a State Changed event on the \"Motion\" binary sensor on the device instead\n\nThe following entities will be removed in the next version:\n\n* The \"Motion Recording\" sensor for cameras (in favor of the \"Recording Mode\" select)\n* The \"Light Turn On\" sensor for flood lights (in favor of the \"Lighting\" select)\n\nAll of following attributes should be duplicated data that can be gotten from other devices/entities and as such, they will be removed in the next version.\n\n* `device_model` will be removed from all entities -- provided in the UI as part of the \"Device Info\"\n* `last_tripped_time` will be removed from binary sensor entities -- use the `last_changed` value provided by the [HA state instead](https://www.home-assistant.io/docs/configuration/state_object/)\n* `up_since` will be removed from camera and light entities -- now has its own sensor. The sensor is disabled by default so you will need to enable it if you want to use it.\n* `enabled_at` will be removed from light entities -- now has its own sensor\n* `camera_id` will be removed from camera entities -- no services need the camera ID anymore so it does not need to be exposed as an attribute. You can still get device IDs for testing/debugging from the Configuration URL in the \"Device Info\" section\n* `chime_duration`, `is_dark`, `mic_sensitivity`, `privacy_mode`, `wdr_value`, and `zoom_position` will be removed from camera entities -- all of them have now have their own sensors\n* `event_object` will be removed from the Motion binary sensor. Use the dedicated Detected Object sensor.\n\n### Breaking Changes in this release\n\n* `CHANGE`: **BREAKING CHANGE** The internal name of the Privacy Zone controlled by the \"Privacy Mode\" switch has been changed. Make sure you turn off all of your privacy mode switches before upgrading. If you do not, you will need to manually delete the old Privacy Zone from your UniFi Protect app.\n\n* `CHANGE`: **BREAKING CHANGE** WDR `number` entity has been removed from Cameras that have HDR. This is inline with changes made to Protect as you can no longer control WDR for cameras with HDR.\n\n* `CHANGE`: **BREAKING CHANGE** the `event_length` attribute has been removed from the motion and door binary sensors. The value was previously calculated in memory and not reliable between restarts.\n\n* `CHANGE`: **BREAKING CHANGE** the `event_object` attribute for binary motion sensors has changed the value for no object detected from \"None Identified\" (string) to \"None\" (NoneType/null)\n\n* `CHANGE`: **BREAKING CHANGE** The Doorbell Text select entity for Doorbells has been overhauled. The Config Flow option for Doorbell Messages has been removed. You now can use the the `unifiprotect.add_doorbell_text` and `unifiprotect.remove_doorbell_text` services to add/remove Doorbell messages. This will persist the messages in UniFi Protect and the choices will now be the same ones that appear in the UniFi Protect iOS/Android app. **NOTE**: After running one of these services, you must restart Home Assistant for the updated options to appear.\n\n### Other Changes in this release\n\n* `CHANGE`: Migrates `UpvServer` to new `ProtectApiClient` from `pyunifiprotect`.\n * This should lead to a number of behind-the-scenes reliability improvements.\n * Should fix/close the following issues: #248, #255, #297, #317, #341, and #360 (TODO: Verify)\n\n* `CHANGE`: Overhaul Config Flow\n * Adds Reauthentication support\n * Adds \"Verify SSL\"\n * Updates Setup / Reauth / Options flows to pre-populate forms from existing settings\n * Removes changing username/password as part of the options flow as it is redundant with Reauthentication support\n * Removes Doorbell Text option since it is handled directly by UniFi Protect now\n * Adds new config option to update all metrics (storage stat usage, uptimes, CPU usage, etc.) in realtime. **WARNING**: Enabling this option will greatly increase your CPU usage. ~2x is what we were seeing in our testing. It is recommended to leave it disabled for now as we do not have a lot of diagnostic sensors using this data yet.\n\n* `CHANGE`: The state of the camera entities now reflects on whether the camera is actually recording. If you set your Recording Mode to \"Detections\", your camera will switch back and forth between \"Idle\" and \"Recording\" based on if the camera is actually recording.\n * Closes #337\n\n* `CHANGE`: Configuration URLs for UFP devices will now take you directly to the device in the UFP Web UI.\n\n* `CHANGE`: Default names for all entities have been updated from `entity_name device_name` to `device_name entity_name` to match how Home Assistant expects them in 2021.11+\n\n* `CHANGE`: The Bluetooth strength sensor for the UP Sense is now disabled by default (will not effect anyone that already has the sensor).\n\n* `NEW`: Adds `unifiprotect.set_doorbell_message` service. This is just like the `unifiprotect.set_doorbell_lcd_message`, but it is not deprecated and it requires the Doorbell Text Select entity instead of the Camera entity. Should **only** be used to set dynamic doorbell text messages (i.e. setting the current outdoor temperate on your doorbell). If you want to use static custom messages, use the Doorbell Text Select entity and the `unifiprotect.add_doorbell_text` / `unifiprotect.remove_doorbell_text` service. `unifiprotect.set_doorbell_lcd_message` is still deprecated and will still be removed in the next release.\n * Closes #396\n\n* `NEW`: Adds \"Override Connection Host\" config option. This will force your RTSP(S) connection IP address to be the same as everything else. Should only be used if you need to forcibly use a different IP address.\n * For sure closes #248\n\n* `NEW`: Added Dark Mode brand images to https://github.com/home-assistant/brands.\n\n* `NEW`: Adds `phy_rate` and `wifi_signal` sensors so all connection states (BLE, WiFi and Wired) should have a diagnostic sensor. Disabled by default. Requires \"Realtime metrics\" option to update in realtime.\n\n* `NEW`: Added Detected Object sensor for cameras with smart detections. Values are `none`, `person` or `vehicle`. Contains `event_score` and `event_thumb` attributes.\n * Closes #342\n\n* `NEW`: Adds Paired Camera select entity for Viewports\n\n* `NEW`: Adds \"Received Data\", \"Transferred Data\", \"Oldest Recording\", \"Storage Used\", and \"Disk Write Rate\" sensors for cameras. Disabled by default. Requires \"Realtime metrics\" option to update in realtime.\n\n* `NEW`: (requires UniFi Protect 1.20.1) Adds \"Voltage\" sensor for doorbells. Disabled by default.\n\n* `NEW`: Adds \"System Sounds\" switch for cameras with speakers\n\n* `NEW`: Adds switches to toggle overlay information for video feeds on all cameras\n\n* `NEW`: Adds switches to toggle smart detection types on cameras with smart detections\n\n* `NEW`: Adds event thumbnail proxy view.\n * URL is `/api/ufp/thumbnail/{thumb_id}`. `thumb_id` is the ID of the thumbnail from UniFi Protect.\n * `entity_id` is a required query parameters. `entity_id` be for an sensor that has event thumbnails on it (like the Motion binary sensor)\n * `token` is a required query parameter is you are _not_ authenticated. It is an attribute on the motion sensor for the Camera\n * `w` and `h` are optional query string params for thumbnail resizing.\n\n* `NEW`: Adds `event_thumbnail` attribute to Motion binary sensor that uses above mentioned event thumbnail proxy view.\n\n* `NEW`: Adds NVR sensors. All of them are disabled by default. All of the sensors will only update every ~15 minutes unless the \"Realtime metrics\" config option is turned on. List of all sensors:\n * Disk Health (one per disk)\n * System Info: CPU Temp, CPU, Memory and Storage Utilization\n * Uptime\n * Recording Capacity (in seconds)\n * Distributions of stored video for Resolution (4K/HD/Free)\n * Distributions of stored video for Type (Continuous/Detections/Timelapse)\n\n* More clean up and improvements for upcoming Home Assistant core merge.\n\n* Adds various new blueprints to help users automate UniFi Protect. New [Blueprints can be found in the README](https://github.com/briis/unifiprotect#automating-services)\n\n## 0.11.0-beta.5\n\n* `FIX`: Fixes motion events and sensors for UP-Sense devices (#405)\n\n* `FIX`: Fixes error on start up for G4 Domes (#408)\n\n## 0.11.0-beta.4\n\n* `NEW`: Adds `unifiprotect.set_doorbell_message` service. This is just like the `unifiprotect.set_doorbell_lcd_message`, but it is not deprecated and it requires the Doorbell Text Select entity instead of the Camera entity. Should **only** be used to set dynamic doorbell text messages (i.e. setting the current outdoor temperate on your doorbell). If you want to use static custom messages, use the Doorbell Text Select entity and the `unifiprotect.add_doorbell_text` / `unifiprotect.remove_doorbell_text` service. `unifiprotect.set_doorbell_lcd_message` is still deprecated and will still be removed in the next release.\n * Closes #396\n\n* `NEW`: Adds \"Override Connection Host\" config option. This will force your RTSP(S) connection IP address to be the same as everything else. Should only be used if you need to forcibly use a different IP address.\n * For sure closes #248\n\n* `FIX`: Reset event_thumbnail attribute for Motion binary sensor after motion has ended\n\n* `FIX`: Change unit for signal strength from db to dbm. (fixes Camera Wifi Signal Strength should be dBm not dB)\n * Closes #394\n\n* `NEW`: Added Dark Mode brand images to https://github.com/home-assistant/brands.\n\n## 0.11.0-beta.3\n\n* `DEPRECATION`: The Motion binary sensor will stop showing details about smart detections in the next version. Use the new separate Detected Object sensor. `event_object` attribute will be removed as well.\n\n* `NEW`: Adds `phy_rate` and `wifi_signal` sensors so all connection states (BLE, WiFi and Wired) should have a diagnostic sensor. Disabled by default. Requires \"Realtime metrics\" option to update in realtime.\n\n* `NEW`: Added Detected Object sensor for cameras with smart detections. Values are `none`, `person` or `vehicle`. Contains `event_score` and `event_thumb` attributes.\n * Closes #342\n\n* `NEW`: Adds Paired Camera select entity for Viewports\n\n* `NEW`: Adds \"Received Data\", \"Transferred Data\", \"Oldest Recording\", \"Storage Used\", and \"Disk Write Rate\" sensors for cameras. Disabled by default. Requires \"Realtime metrics\" option to update in realtime.\n\n* `NEW`: (requires UniFi Protect 1.20.1) Adds \"Voltage\" sensor for doorbells. Disabled by default.\n\n* `NEW`: Adds \"System Sounds\" switch for cameras with speakers\n\n* `NEW`: Adds switches to toggle overlay information for video feeds on all cameras\n\n* `NEW`: Adds switches to toggle smart detection types on cameras with smart detections\n\n## 0.11.0-beta.2\n\n* `CHANGE`: Allows `device_id` parameter for global service calls to be any device from a UniFi Protect instance\n\n* `NEW`: Adds event thumbnail proxy view.\n * URL is `/api/ufp/thumbnail/{thumb_id}`. `thumb_id` is the ID of the thumbnail from UniFi Protect.\n * `entity_id` is a required query parameters. `entity_id` be for an sensor that has event thumbnails on it (like the Motion binary sensor)\n * `token` is a required query parameter is you are _not_ authenticated. It is an attribute on the motion sensor for the Camera\n * `w` and `h` are optional query string params for thumbnail resizing.\n\n* `NEW`: Adds `event_thumbnail` attribute to Motion binary sensor that uses above mentioned event thumbnail proxy view.\n\n* `NEW`: Adds NVR sensors. All of them are disabled by default. All of the sensors will only update every ~15 minutes unless the \"Realtime metrics\" config option is turned on. List of all sensors:\n * Disk Health (one per disk)\n * System Info: CPU Temp, CPU, Memory and Storage Utilization\n * Uptime\n * Recording Capacity (in seconds)\n * Distributions of stored video for Resolution (4K/HD/Free)\n * Distributions of stored video for Type (Continuous/Detections/Timelapse)\n\n* More clean up and improvements for upcoming Home Assistant core merge.\n\n## 0.11.0-beta.1\n\n### Deprecations\n\n0.11 is last major release planned before we merge the `unifiprotect` integration into core. As a result, a number of features are being removed when we merged into core.\n\nThe following services will be removed in the next version:\n\n* `unifiprotect.set_recording_mode` -- use the select introduced in 0.10 instead\n* `unifiprotect.set_ir_mode` -- use the select entity introduced in 0.10 instead\n* `unifiprotect.set_status_light` -- use the switch entity on the camera device instead\n* `unifiprotect.set_hdr_mode` -- use the switch entity on the camera device instead\n* `unifiprotect.set_highfps_video_mode` -- use the switch entity on the camera device instead\n* `unifiprotect.set_doorbell_lcd_message` -- use the select entity introduced in 0.10 instead\n* `unifiprotect.set_mic_volume` -- use the number entity introduced in 0.10 instead\n* `unifiprotect.set_privacy_mode` -- use the switch entity introduced in 0.10 instead\n* `unifiprotect.set_zoom_position` -- use the number entity introduced in 0.10 instead\n* `unifiprotect.set_wdr_value` -- use the number entity introduced in 0.10 instead\n* `unifiprotect.light_settings` -- use the select entity introduced in 0.10 instead\n* `unifiprotect.set_viewport_view` -- use the select entity introduced in 0.10 instead\n\nThe following events will be removed in the next version:\n\n* `unifiprotect_doorbell` -- use a State Changed event on \"Doorbell\" binary sensor on the device instead\n* `unifiprotect_motion` -- use a State Changed event on the \"Motion\" binary sensor on the device instead\n\nThe following entities will be removed in the next version:\n\n* The \"Motion Recording\" sensor for cameras (in favor of the \"Recording Mode\" select)\n* The \"Light Turn On\" sensor for flood lights (in favor of the \"Lighting\" select)\n\nAll of following attributes should be duplicated data that can be gotten from other devices/entities and as such, they will be removed in the next version.\n\n* `device_model` will be removed from all entities -- provided in the UI as part of the \"Device Info\"\n* `last_tripped_time` will be removed from binary sensor entities -- use the `last_changed` value provided by the [HA state instead](https://www.home-assistant.io/docs/configuration/state_object/)\n* `up_since` will be removed from camera and light entities -- now has its own sensor. The sensor is disabled by default so you will need to enable it if you want to use it.\n* `enabled_at` will be removed from light entities -- now has its own sensor\n* `camera_id` will be removed from camera entities -- no services need the camera ID anymore so it does not need to be exposed as an attribute. You can still get device IDs for testing/debugging from the Configuration URL in the \"Device Info\" section\n* `chime_duration`, `is_dark`, `mic_sensitivity`, `privacy_mode`, `wdr_value`, and `zoom_position` will be removed from camera entities -- all of them have now have their own sensors\n\n\n### Breaking Changes in this release\n\n* `CHANGE`: **BREAKING CHANGE** The internal name of the Privacy Zone controlled by the \"Privacy Mode\" switch has been changed. Make sure you turn off all of your privacy mode switches before upgrading. If you do not, you will need to manually delete the old Privacy Zone from your UniFi Protect app.\n\n* `CHANGE`: **BREAKING CHANGE** WDR `number` entity has been removed from Cameras that have HDR. This is inline with changes made to Protect as you can no longer control WDR for cameras with HDR.\n\n* `CHANGE`: **BREAKING CHANGE** the `event_length` attribute has been removed from the motion and door binary sensors. The value was previously calculated in memory and not reliable between restarts.\n\n* `CHANGE`: **BREAKING CHANGE** the `event_object` attribute for binary motion sensors has changed the value for no object detected from \"None Identified\" (string) to \"None\" (NoneType/null)\n\n* `CHANGE`: **BREAKING CHANGE** The Doorbell Text select entity for Doorbells has been overhauled. The Config Flow option for Doorbell Messages has been removed. You now can use the the `unifiprotect.add_doorbell_text` and `unifiprotect.remove_doorbell_text` services to add/remove Doorbell messages. This will persist the messages in UniFi Protect and the choices will now be the same ones that appear in the UniFi Protect iOS/Android app. **NOTE**: After running one of these services, you must restart Home Assistant for the updated options to appear.\n\n### Other Changes in this release\n\n* `CHANGE`: Migrates `UpvServer` to new `ProtectApiClient` from `pyunifiprotect`.\n * This should lead to a number of behind-the-scenes reliability improvements.\n * Should fix/close the following issues: #248, #255, #297, #317, #341, and #360 (TODO: Verify)\n\n* `CHANGE`: Overhaul Config Flow\n * Adds Reauthentication support\n * Adds \"Verify SSL\"\n * Updates Setup / Reauth / Options flows to pre-populate forms from existing settings\n * Removes changing username/password as part of the options flow as it is redundant with Reauthentication support\n * Removes Doorbell Text option since it is handled directly by UniFi Protect now\n * Adds new config option to update all metrics (storage stat usage, uptimes, CPU usage, etc.) in realtime. **WARNING**: Enabling this option will greatly increase your CPU usage. ~2x is what we were seeing in our testing. It is recommended to leave it disabled for now as we do not have a lot of diagnostic sensors using this data yet.\n\n* `CHANGE`: The state of the camera entities now reflects on whether the camera is actually recording. If you set your Recording Mode to \"Detections\", your camera will switch back and forth between \"Idle\" and \"Recording\" based on if the camera is actually recording.\n * Closes #337\n\n* `CHANGE`: Configuration URLs for UFP devices will now take you directly to the device in the UFP Web UI.\n\n* `CHANGE`: Default names for all entities have been updated from `entity_name device_name` to `device_name entity_name` to match how Home Assistant expects them in 2021.11+\n\n* `CHANGE`: The Bluetooth strength sensor for the UP Sense is now disabled by default (will not effect anyone that already has the sensor).\n\n* `NEW`: Adds all of the possible enabled UFP Camera channels as different camera entities; only the highest resolution secure (RTSPS) one is enabled by default. If you need RTSP camera entities, you can enable one of the given insecure camera entities.\n\n* `NEW`: Added the following attributes to Camera entity: `width`, `height`, `fps`, `bitrate` and `channel_id`\n\n* `NEW`: Added status light switch for Flood Light devices\n\n* `NEW`: Added \"On Motion - When Dark\" option for Flood Light Lighting switch\n\n* `NEW`: Added \"Auto-Shutoff Timer\" number entity for Flood Lights\n\n* `NEW`: Added \"Motion Sensitivity\" number entity for Flood Lights\n\n* `NEW`: Added \"Chime Duration\" number entity for Doorbells\n\n* `NEW`: Added \"Uptime\" sensor entity for all UniFi Protect adoptable devices. This is disabled by default.\n\n* `NEW`: Added `unifiprotect.set_default_doorbell_text` service to allow you to set your default Doorbell message text. **NOTE**: After running this service, you must restart Home Assistant for the default to be reflected in the options.\n\n* `NEW`: Added \"SSH Enabled\" switch for all adoptable UniFi Protect devices. This switch is disabled by default.\n\n* `NEW`: (requires 2021.12+) Added \"Reboot Device\" button for all adoptable UniFi Protect devices. This button is disabled by default. Use with caution as there is no confirm. \"Pressing\" it instantly reboots your device.\n\n* `NEW`: Added media player entity for cameras with speaker. Speaker will accept any ffmpeg playable audio file URI (URI must be accessible from _Home Assistant_, not your Camera). TTS works great!\n * TODO: Investigate for final release. This _may_ not work as expected on G4 Doorbells. Not sure yet if it is because of the recent Doorbell issues or because Doorbells are different.\n * Implements #304\n\n\n## 0.10.0\n\nReleased: 2021-11-24\n\n> **YOU MUST BE RUNNING V1.20.0 OF UNIFI PROTECT, TO USE THIS VERSION OF THE INTEGRATION. IF YOU ARE STILL ON 1.19.x STAY ON THE 0.9.2 RELEASE.\n\nAs UniFi Protect V1.20.0 is now released, we will also ship the final release of 0.10.0. If you were not on the beta, please read these Release Notes carefully, as there are many changes for this release, and many Breaking Changes.\n\n### Supported Versions\n\nThis release requires the following minimum Software and Firmware version:\n\n* **Home Assistant**: `2021.09.0`\n* **UniFi Protect**: `1.20.0`\n\n### Upgrade Instructions\n\n> If you are already running V0.10.0-beta.3 or higher of this release, there should not be any breaking changes, and you should be able to do a normal upgrade from HACS.\n\nDue to the many changes and entities that have been removed and replaced, we recommend the following process to upgrade from an earlier Beta or from an earlier release:\n\n* Upgrade the Integration files, either through HACS (Recommended) or by copying the files manually to your `custom_components/unifiprotect` directory.\n* Restart Home Assistant\n* Remove the UniFi Protect Integration by going to the Integrations page, click the 3 dots in the lower right corner of the UniFi Protect Integration and select *Delete*\n* While still on this page, click the `+ ADD INTEGRATION` button in the lower right corner, search for UnFi Protect, and start the installation, supplying your credentials.\n\n### Changes in this release\n\n* `CHANGE`: **BREAKING CHANGE** The support for *Anonymous Snapshots* has been removed as of this release. This has always been a workaround in a time where this did not work as well as it does now. If you have this flag set, you don't have to do anything, as snapshots are automatically moved to the supported method.\n\n* `NEW`: **BREAKING CHANGE** Also as part of Home Assistant 2021.11 a new [Entity Category](https://www.home-assistant.io/blog/2021/11/03/release-202111/#entity-categorization) is introduced. This makes it possible to classify an entity as either `config` or `diagnostic`. A `config` entity is used for entities that can change the configuration of a device and a `diagnostic` entity is used for devices that report status, but does not allow changes. These two entity categories have been applied to selected entities in this Integration. If you are not on HA 2021.11+ then this will not have any effect on your installation.\n\n* `CHANGE`: **BREAKING CHANGE** There has been a substansial rewite of the underlying IO API Module (`pyunifiprotect`) over the last few month. The structure is now much better and makes it easier to maintain going forward. It will take too long to list all the changes, but one important change is that we have removed the support for Non UnifiOS devices. These are CloudKey+ devices with a FW lower than 2.0.24. I want to give a big thank you to @AngellusMortis and @bdraco for making this happen.\n\n* `CHANGE`: **BREAKING CHANGE** As this release has removed the support for Non UnifiOS devices, we could also remove the Polling function for Events as this is served through Websockets. This also means that the Scan Interval is no longer present in the Configuration.\n\n* `CHANGE`: **BREAKING CHANGE** To future proof the Select entities, we had to change the the way the Unique ID is populated. The entity names are not changing, but the Unique ID's are If you have installed a previous beta of V0.10.0 you will get a duplicate of all Select entities, and the ones that were there before, will be marked as unavailable. You can either remove them manually from the Integration page, or even easier, just delete the UniFi Protect integration, and add it again. (The later is the recommended method)\n\n* `CHANGE`: **BREAKING CHANGE** All switches called `switch.ir_active_CAMERANAME` have been removed from the system. They are being migrated to a `Select Entity` which you can read more about below. If you have automations that turns these switches on and off, you will have to replace this with the `select.select_option` service, using the valid options described below for the `option` data.\n\n* `CHANGE`: **BREAKING CHANGE** The Service `unifiprotect.set_ir_mode` now supports the following values for ir_mode: `\"auto, autoFilterOnly, on, off\"`. This is a change from the previous valid options and if you have automations that uses this service you will need to make sure that you only use these supported modes.\n\n* `CHANGE`: **BREAKING CHANGE** The Service `unifiprotect.save_thumbnail_image` has been removed from the Integration. This service proved to be unreliable as the Thumbnail image very often was not available, when this service was called. Please use the service `camera.snapshot` instead.\n\n* `CHANGE`: **BREAKING CHANGE** All switches called `switch.record_smart_CAMERANAME` and `switch.record_motion_CAMERANAME` have been removed from the system. They are being migrated to a `Select Entity` which you can read more about below. If you have automations that turns these switches on and off, you will have to replace this with the `select.select_option` service, using the valid options described below for the `option` data.\n\n* `CHANGE`: **BREAKING CHANGE** All switches for the *Floodlight devices* have been removed from the system. They are being migrated to a `Select Entity` which you can read more about below. If you have automations that turns these switches on and off, you will have to replace this with the `select.select_option` service, using the valid options described below for the `option` data.\n\n* `CHANGE`: **BREAKING CHANGE** The Service `unifiprotect.set_recording_mode` now only supports the following values for recording_mode: `\"never, detections, always\"`. If you have automations that uses the recording_mode `smart` or `motion` you will have to change this to `detections`.\n\n* `CHANGE`: Config Flow has been slimmed down so it will only ask for the minimum values we need during installation. If you would like to change this after that, you can use the Configure button on the Integration page.\n\n* `CHANGE`: It is now possible to change the UFP Device username and password without removing and reinstalling the Integration. On the Home Assistant Integration page, select CONFIGURE in the lower left corner of the UniFi Protect integration, and you will have the option to enter a new username and/or password.\n\n* `CHANGE`: We will now use RTSPS for displaying video. This is to further enhance security, and to ensure that the system will continue running if Ubiquiti decides to remove RTSP completely. This does not require any changes from your side.\n\n* `NEW`: For each Camera there will be a binary sensor called `binary_sensor.is_dark_CAMERANAME`. This sensor will be on if the camera is perceiving it is as so dark that the Infrared lights will turn on (If enabled).\n\n* `CHANGE`: A significant number of 'under the hood' changes have been made by @bdraco, to bring the Integration up to Home Assistant standards and to prepare for the integration in to HA Core. Thank you to @bdraco for all his advise, coding and review.\n\n* `CHANGE`: `pyunifiprotect` is V1.0.4 and has been completely rewritten by @AngellusMortis, with the support of @bdraco and is now a much more structured and easier to maintain module. There has also been a few interesting additions to the module, which you will see the fruit of in a coming release. This version is not utilizing the new module yet, but stay tuned for the 0.11.0 release, which most likely also will be the last release before we try the move to HA Core.\n\n* `NEW`: Device Configuration URL's are introduced in Home Assistant 2021.11. In this release we add URL Link to allow the user to visit the device for configuration or diagnostics from the *Devices* page. If you are not on HA 2021.11+ then this will not have any effect on your installation.\n\n* `NEW`: A switch is being created to turn on and off the Privacy Mode for each Camera. This makes it possible to set the Privacy mode for a Camera directly from the UI. This is a supplement to the already existing service `unifiprotect.set_privacy_mode`\n\n* `NEW`: Restarted the work on implementing the UFP Sense device. We don't have physical access to this device, but @Madbeefer is kindly enough to do all the testing.\n * The following new sensors will be created for each UFP Sense device: `Battery %`, `Ambient Light`, `Humidity`, `Temperature` and `BLE Signal Strength`.\n * The following binary sensors will be created for each UFP Sense device: `Motion`, `Open/Close` and `Battery Low`. **Note** as of this release, these sensors are not working correctly, this is still work in progress.\n\n* `NEW`: For each Camera there will now be a `Select Entity` from where you can select the Infrared mode for each Camera. Valid options are `Auto, Always Enable, Auto (Filter Only, no LED's), Always Disable`. These are the same options you can use if you set this through the UniFi Protect App.\n\n* `NEW`: Added a new `Number` entity called `number.wide_dynamic_range_CAMERANAME`. You can now set the Wide Dynamic Range for a camera directly from the UI. This is a supplement to the already existing service `unifiprotect.set_wdr_value`.\n\n* `NEW`: Added `select.doorbell_text_DOORBELL_NAME` to be able to change the LCD Text on the Doorbell from the UI. In the configuration menu of the Integration there is now a field where you can type a list of Custom Texts that can be displayed on the Doorbell and then these options plus the two standard texts built-in to the Doorbell can now all be selected. The format of the custom text list has to ba a comma separated list, f.ex.: RING THE BELL, WE ARE SLEEPING, GO AWAY... etc.\n\n* `NEW`: Added a new `Number` entity called `number.microphone_level_CAMERANAME`. From here you can set the Microphone Sensitivity Level for a camera directly from the UI. This is a supplement to the already existing service `unifiprotect.set_mic_volume`.\n\n* `NEW`: Added a new `Number` entity called `number.zoom_position_CAMERANAME`. From here you can set the optical Zoom Position for a camera directly from the UI. This entity will only be added for Cameras that support optical zoom. This is a supplement to the already existing service `unifiprotect.set_zoom_position`.\n\n* `NEW`: For each Camera there will now be a `Select Entity` from where you can select the recording mode for each Camera. Valid options are `Always, Never, Detections`. Detections is what you use to enable motion detection. Whether they do People and Vehicle detection, depends on the Camera Type and the settings in the UniFi Protect App. We might later on implement a new Select Entity from where you can set the the Smart Detection options. Until then, this needs to be done from the UniFi Protect App. (as is the case today)\n\n* `NEW`: For each Floodlight there will now be a `Select Entity` from where you can select when the Light Turns on. This replaces the two switches that were in the earlier releases. Valid options are `On Motion, When Dark, Manual`.\n\n* `NEW`: Added a new event `unifiprotect_motion` that triggers on motion. You can use this instead of the Binary Sensor to watch for a motion event on any motion enabled device. The output from the event will look similar tom the below\n\n ```json\n {\n \"event_type\": \"unifiprotect_motion\",\n \"data\": {\n \"entity_id\": \"camera.outdoor\",\n \"smart_detect\": [\n \"person\"\n ],\n \"motion_on\": true\n },\n \"origin\": \"LOCAL\",\n \"time_fired\": \"2021-10-18T10:55:36.134535+00:00\",\n \"context\": {\n \"id\": \"b3723102b4fb71a758a423d0f3a04ba6\",\n \"parent_id\": null,\n \"user_id\": null\n }\n }\n ```\n\n\n## 0.10.0 Beta 5 Hotfix 1\n\nReleased: November 13th, 2021\n\n### Supported Versions\n\nThis release requires the following minimum Software and Firmware version:\n\n* **Home Assistant**: `2021.09.0`\n* **UniFi Protect**: `1.20.0-beta.7`\n\n### Upgrade Instructions\n\nDue to the many changes and entities that have been removed and replaced, we recommend the following process to upgrade from an earlier Beta or from an earlier release:\n\n* Upgrade the Integration files, either through HACS (Recommended) or by copying the files manually to your `custom_components/unifiprotect` directory.\n* Restart Home Assistant\n* Remove the UniFi Protect Integration by going to the Integrations page, click the 3 dots in the lower right corner of the UniFi Protect Integration and select *Delete*\n* While still on this page, click the `+ ADD INTEGRATION` button in the lower right corner, search for UnFi Protect, and start the installation, supplying your credentials.\n\n### Changes in this release\n\n* `CHANGE`: Updated `pyunifiprotect` to 1.0.2. Fixing errors that can occur when using Python 3.9 - Home Assistant uses that.\n\n## 0.10.0 Beta 5\n\nReleased: November 13th, 2021\n\n### Supported Versions\n\nThis release requires the following minimum Software and Firmware version:\n\n* **Home Assistant**: `2021.09.0`\n* **UniFi Protect**: `1.20.0-beta.7`\n\n### Upgrade Instructions\n\nDue to the many changes and entities that have been removed and replaced, we recommend the following process to upgrade from an earlier Beta or from an earlier release:\n\n* Upgrade the Integration files, either through HACS (Recommended) or by copying the files manually to your `custom_components/unifiprotect` directory.\n* Restart Home Assistant\n* Remove the UniFi Protect Integration by going to the Integrations page, click the 3 dots in the lower right corner of the UniFi Protect Integration and select *Delete*\n* While still on this page, click the `+ ADD INTEGRATION` button in the lower right corner, search for UnFi Protect, and start the installation, supplying your credentials.\n\n### Changes in this release\n\nAs there were still some changes we wanted to do before releasing this, we decided to do one more Beta, before freezing.\n\n* `CHANGE`: The support for *Anonymous Snapshots* has been removed as of this release. This had always been a workaround in a time where this did not work as well as it does now. If you have this flag set, you don't have to do anything, as snapshots are automatically moved to the supported method.\n* `CHANGE`: Config Flow has been slimmed down so it will only ask for the minimum values we need during installation. If you would like to change this after that, you can use the Configure button on the Integration page.\n* `CHANGE`: It is now possible to change the UFP Device username and password without removing and reinstalling the Integration. On the Home Assistant Integration page, select CONFIGURE in the lower left corner of the UniFi Protect integration, and you will have the option to enter a new username and/or password.\n* `NEW`: For each Camera there will be a binary sensor called `binary_sensor.is_dark_CAMERANAME`. This sensor will be on if the camera is perceiving it is as so dark that the Infrared lights will turn on (If enabled).\n* `CHANGE`: A significant number of 'under the hood' changes have been made, to bring the Integration up to Home Assistant standards and to prepare for the integration in to HA Core. Thank you to @bdraco for all his advise, coding and review.\n* `CHANGE`: `pyunifiprotect` has been completely rewritten by @AngellusMortis, with the support of @bdraco and is now a much more structured and easier to maintain module. There has also been a few interesting additions to the module, which you will see the fruit of in a coming release. This version is not utilizing the new module yet, but stay tuned for the 0.11.0 release, which most likely also will be the last release before we try the move to HA Core.\n\n## 0.10.0 Beta 4\n\nReleased: November 4th, 2021\n\n**REMINDER** This version is only valid for **V1.20.0-beta.2** or higher of UniFi Protect. If you are not on that version, stick with V0.9.1.\n\n### Upgrade Instructions\n\nDue to the many changes and entities that have been removed and replaced, we recommend the following process to upgrade from an earlier Beta or from an earlier release:\n\n* Upgrade the Integration files, either through HACS (Recommended) or by copying the files manually to your `custom_components/unifiprotect` directory.\n* Restart Home Assistant\n* Remove the UniFi Protect Integration by going to the Integrations page, click the 3 dots in the lower right corner of the UniFi Protect Integration and select *Delete*\n* While still on this page, click the `+ ADD INTEGRATION` button in the lower right corner, search for UnFi Protect, and start the installation, supplying your credentials.\n\n### Changes in this release\n\nThis will be the last beta with functional changes, so after this release it will only be bug fixes. The final release will come out when 1.20 of UniFi Protect is officially launched. Everything from Beta 1, 2 and 3 is included here, plus the following:\n\n* `NEW`: Device Configuration URL's are introduced in Home Assistant 2021.11. In this release we add URL Link to allow the user to visit the device for configuration or diagnostics from the *Devices* page. If you are not on HA 2021.11+ then this will not have any effect on your installation.\n* `NEW`: **BREAKING CHANGE** Also as part of Home Assistant 2021.11 a new [Entity Category](https://www.home-assistant.io/blog/2021/11/03/release-202111/#entity-categorization) is introduced. This makes it possible to classify an entity as either `config` or `diagnostic`. A `config` entity is used for entities that can change the configuration of a device and a `diagnostic` entity is used for devices that report status, but does not allow changes. These two entity categories have been applied to selected entities in this Integration. If you are not on HA 2021.11+ then this will not have any effect on your installation.<br>\nWe would like to have feedback from people on this choice. Have we categorized too many entities, should we not use this at all. Please come with the feedback.<br>\nEntities which have the entity_category set:\n * Are not included in a service call targetting a whole device or area.\n * Are, by default, not exposed to Google Assistant or Alexa. If entities are already exposed, there will be no change.\n * Are shown on a separate card on the device configuration page.\n * Do not show up on the automatically generated Lovelace Dashboards.\n* `NEW`: A switch is being created to turn on and off the Privacy Mode for each Camera. This makes it possible to set the Privacy mode for a Camera directly from the UI. This is a supplement to the already existing service `unifiprotect.set_privacy_mode`\n* `NEW`: Restarted the work on implementing the UFP Sense device. We don't have physical access to this device, but @Madbeefer is kindly enough to do all the testing.\n * The following new sensors will be created for each UFP Sense device: `Battery %`, `Ambient Light`, `Humidity`, `Temperature` and `BLE Signal Strength`.\n * The following binary sensors will be created for each UFP Sense device: `Motion`, `Open/Close` and `Battery Low`. **Note** as of this beta, these sensors are not working correctly, this is still work in progress.\n\n\n## 0.10.0 Beta 3\n\nReleased: October 27th, 2021\n\n**REMINDER** This version is only valid for **V1.20.0-beta.2** or higher of UniFi Protect. If you are not on that version, stick with V0.9.1.\n\n### Upgrade Instructions\n\nDue to the many changes and entities that have been removed and replaced, we recommend the following process to upgrade from an earlier Beta or from an earlier release:\n\n* Upgrade the Integration files, either through HACS (Recommended) or by copying the files manually to your `custom_components/unifiprotect` directory.\n* Restart Home Assistant\n* Remove the UniFi Protect Integration by going to the Integrations page, click the 3 dots in the lower right corner of the UniFi Protect Integration and select *Delete*\n* While still on this page, click the `+ ADD INTEGRATION` button in the lower right corner, search for UnFi Protect, and start the installation, supplying your credentials.\n\n### Changes in this release\n\nEverything from Beta 1 and 2 is included here, plus the following:\n\n* `CHANGE`: **BREAKING CHANGE** There has been a substansial rewite of the underlying IO API Module (`pyunifiprotect`) over the last few month. The structure is now much better and makes it easier to maintain going forward. It will take too long to list all the changes, but one important change is that we have removed the support for Non UnifiOS devices. These are CloudKey+ devices with a FW lower than 2.0.24. I want to give a big thank you to @AngellusMortis and @bdraco for making this happen.\n* `CHANGE`: **BREAKING CHANGE** As this release has removed the support for Non UnifiOS devices, we could also remove the Polling function for Events as this is served through Websockets. This also means that the Scan Interval is no longer present in the Configuration.\n* `CHANGE`: **BREAKING CHANGE** To future proof the Select entities, we had to change the the way the Unique ID is populated. The entity names are not changing, but the Unique ID's are If you have installed a previous beta of V0.10.0 you will get a duplicate of all Select entities, and the ones that were there before, will be marked as unavailable. You can either remove them manually from the Integration page, or even easier, just delete the UniFi Protect integration, and add it again. (The later is the recommended method)\n* `CHANGE`: **BREAKING CHANGE** All switches called `switch.ir_active_CAMERANAME` have been removed from the system. They are being migrated to a `Select Entity` which you can read more about below. If you have automations that turns these switches on and off, you will have to replace this with the `select.select_option` service, using the valid options described below for the `option` data.\n* `CHANGE`: **BREAKING CHANGE** The Service `unifiprotect.set_ir_mode` now supports the following values for ir_mode: `\"auto, autoFilterOnly, on, off\"`. This is a change from the previous valid options and if you have automations that uses this service you will need to make sure that you only use these supported modes.\n* `CHANGE`: **BREAKING CHANGE** The Service `unifiprotect.save_thumbnail_image` has been removed from the Integration. This service proved to be unreliable as the Thumbnail image very often was not available, when this service was called. Please use the service `camera.snapshot` instead.\n* `NEW`: For each Camera there will now be a `Select Entity` from where you can select the Infrared mode for each Camera. Valid options are `Auto, Always Enable, Auto (Filter Only, no LED's), Always Disable`. These are the same options you can use if you set this through the UniFi Protect App.\n* `NEW`: Added a new `Number` entity called `number.wide_dynamic_range_CAMERANAME`. You can now set the Wide Dynamic Range for a camera directly from the UI. This is a supplement to the already existing service `unifiprotect.set_wdr_value`.\n* `NEW`: Added `select.doorbell_text_DOORBELL_NAME` to be able to change the LCD Text on the Doorbell from the UI. In the configuration menu of the Integration there is now a field where you can type a list of Custom Texts that can be displayed on the Doorbell and then these options plus the two standard texts built-in to the Doorbell can now all be selected. The format of the custom text list has to ba a comma separated list, f.ex.: RING THE BELL, WE ARE SLEEPING, GO AWAY... etc.\n* `NEW`: Added a new `Number` entity called `number.microphone_level_CAMERANAME`. From here you can set the Microphone Sensitivity Level for a camera directly from the UI. This is a supplement to the already existing service `unifiprotect.set_mic_volume`.\n* `NEW`: Added a new `Number` entity called `number.zoom_position_CAMERANAME`. From here you can set the optical Zoom Position for a camera directly from the UI. This entity will only be added for Cameras that support optical zoom. This is a supplement to the already existing service `unifiprotect.set_zoom_position`.\n\n## 0.10.0 Beta 2\n\nReleased: October 24th, 2021\n\nEverything from Beta 1 is included here, plus the following:\n\n`CHANGE`: Changes to the underlying `pyunifiprotect` module done by @AngellusMortis to ensure all tests are passing and adding new functionality to be used in a later release.\n`NEW`: Added a new event `unifiprotect_motion` that triggers on motion. You can use this instead of the Binary Sensor to watch for a motion event on any motion enabled device. The output from the event will look similar tom the below\n\n ```json\n {\n \"event_type\": \"unifiprotect_motion\",\n \"data\": {\n \"entity_id\": \"camera.outdoor\",\n \"smart_detect\": [\n \"person\"\n ],\n \"motion_on\": true\n },\n \"origin\": \"LOCAL\",\n \"time_fired\": \"2021-10-18T10:55:36.134535+00:00\",\n \"context\": {\n \"id\": \"b3723102b4fb71a758a423d0f3a04ba6\",\n \"parent_id\": null,\n \"user_id\": null\n }\n }\n ```\n\n## 0.10.0 Beta 1\n\nReleased: October 17th, 2021\n\nThis is the first Beta release that will support **UniFi Protect 1.20.0**. There have been a few changes to the Protect API, that requires us to change this Integration. Unfortunately it cannot be avoided that these are Breaking Changes, so please read carefully below before you do the upgrade.\n\nWhen reading the Release Notes for UniFi Protect 1.20.0-beta.2 the following changes are directly affecting this Integration:\n\n* Integrate “Smart detections” and “Motion Detections” into “Detections”.\n* Generate only RTSPS links for better security. (RTSP streams are still available by removing S from RTSPS and by changing port 7441 to 7447.\n\n#### Changes implemented in this version:\n* `CHANGE`: **IMPORTANT** You MUST have at least UniFi Protect **V1.20.0-beta.1** installed for this Integration to work. There are checks on both new installations and upgraded installations to see if your UniFi Protect App is at the right version number. Please consult the HA Logfile for more information if something does not work.\nIf you are not running the 1.20.0 beta, DO NOT UPGRADE. If you did anyway, you can just uninstall and select the 0.9.1 release from HACS and all should be running again.\n* `CHANGE`: **BREAKING CHANGE** All switches called `switch.record_smart_CAMERANAME` and `switch.record_motion_CAMERANAME` have been removed from the system. They are being migrated to a `Select Entity` which you can read more about below. If you have automations that turns these switches on and off, you will have to replace this with the `select.select_option` service, using the valid options described below for the `option` data.\n* `CHANGE`: **BREAKING CHANGE** All switches for the *Floodlight devices* have been removed from the system. They are being migrated to a `Select Entity` which you can read more about below. If you have automations that turns these switches on and off, you will have to replace this with the `select.select_option` service, using the valid options described below for the `option` data.\n* `CHANGE`: **BREAKING CHANGE** The Service `unifiprotect.set_recording_mode` now only supports the following values for recording_mode: `\"never, detections, always\"`. If you have automations that uses the recording_mode `smart` or `motion` you will have to change this to `detections`.\n* `NEW`: For each Camera there will now be a `Select Entity` from where you can select the recording mode for each Camera. Valid options are `Always, Never, Detections`. Detections is what you use to enable motion detection. Whether they do People and Vehicle detection, depends on the Camera Type and the settings in the UniFi Protect App. We might later on implement a new Select Entity from where you can set the the Smart Detection options. Until then, this needs to be done from the UniFi Protect App. (as is the case today)\n* `NEW`: For each Floodlight there will now be a `Select Entity` from where you can select when the Light Turns on. This replaces the two switches that were in the earlier releases. Valid options are `On Motion, When Dark, Manual`.\n* `CHANGE`: We will now use RTSPS for displaying video. This is to further enhance security, and to ensure that the system will continue running if Ubiquiti decides to remove RTSP completely. This does not require any changes from your side.\n\n## 0.9.1\n\nReleased: October 17th, 2021\n\nThis will be the final release for devices not running the UnifiOS. With the next official release, there will no longer be support for the CloudKey+ running a firmware lover than 2.0.\n**NOTE** This release does not support UniFi Protect 1.20.0+. This will be supported in the next Beta release.\n\n* `FIX`: Issue #297. Improves determining reason for bad responses.\n\n## 0.9.0\n\nReleased: August 29th, 2021\n\n* `NEW`: This release adds support for the UFP Viewport device. This is done by adding the `select` platform, from where the views defined in Unifi Protect can be selected. When changing the selection, the Viewport will change it's current view to the selected item. The `select` platform will only be setup if UFP Viewports are found in Unfi Protect. When you create a view in Unifi Protect, you must check the box *Shared with Others* in order to use the view in this integration.<br>\n**NOTE**: This new entity requires a minimum of Home Assistant 2021.7. If you are on an older version, the Integration will still work, but you will get an error during startup.\n* `NEW`: As part of the support for the UFP Viewport, there also a new service being created, called `unifiprotect.set_viewport_view`. This service requires two parameters: The `entity_id` of the Viewport and the `view_id` of the View you want to set. `view_id` is a long string, but you can find the id number when looking at the Attributes for the `select` entity.\n* `FIX`: Issue #264, missing image_width variable is fixed in this release.\n* `CHANGE`: PR #276, Ensure setup is retried later when device is rebooting. Thanks to @bdraco\n* `CHANGE`: PR #271. Updated README, to ensure proper capitalization. Thanks to @jonbloom\n* `CHANGE`: PR #278. Allow requesting a custom snapshot width and height, to support 2021.9 release. Thank to @bdraco. Fixing Issue #282\n\n## 0.9.0 Beta 2\n\nReleased: July 17th, 2021\n\n* `BREAKING`: If you installed Beta 1, then you will have a media_player entity that is no longer used. You can disable it, or reinstall the Integration to get completely rid of it.\n* `NEW`: This release adds support for the UFP Viewport device. This is done by adding the `select` platform, from where the views defined in Unifi Protect can be selected. When changing the selection, the Viewport will change it's current view to the selected item. The `select` platform will only be setup if UFP Viewports are found in Unfi Protect. When you create a view in Unifi Protect, you must check the box *Shared with Others* in order to use the view in this integration.<br>\n**NOTE**: This new entity requires a minimum of Home Assistant 2021.7\n\n* `NEW`: As part of the support for the UFP Viewport, there also a new service being created, called `unifiprotect.set_viewport_view`. This service requires two parameters: The `entity_id` of the Viewport and the `view_id` of the View you want to set. `view_id` is a long string, but you can find the id number when looking at the Attributes for the `select` entity.\n* `FIX`: Issue #264, missing image_width variable is fixed in this release.\n\n## 0.9.0 Beta 1\n\nReleased: July 6th, 2021\n\n* `NEW`: This release adds support for the UFP Viewport device. This is done by adding the `media_player` platform, from where the views defined in Unifi Protect can be selected as source. When selecting the source, the Viewport will change it's current view to the selected source. The `media_player` platform will only be setup if UFP Viewports are found in Unfi Protect.\n* `NEW`: As part of the support for the UFP Viewport, there also a new service being created, called `unifiprotect.set_viewport_view`. This service requires two parameters: The `entity_id` of the Viewport and the `view_id` of the View you want to set. `view_id` is a long string, but you can find the id number when looking at the Attributes for the media_player.\n\n## 0.8.9\n\nReleased: June 29th, 2021\n\n* `FIXED`: During startup of the Integration, it would sometimes log `Error Code: 500 - Error Status: Internal Server Error`. (Issue #249) This was caused by some values not being available at startup.\n* `CHANGE`: The service `unifiprotect.save_thumbnail_image` now creates the directories in the filename if they do not exist. Issue #250.\n* `FIX`: We have started the integration of the new UFP-Sense devices, but these are not available in Europe yet, so the integration is not completed, and will not be, before I can get my hands on one of these devices. Some users with the devices, got a crash when running the latest version, which is now fixed. The integration is not completed, this fix, just removes the errors that were logged. Thanks to @michaeladam for finding this.\n* `NEW`: When the doorbell is pressed, the integration now fires an event with the type `unifiprotect_doorbell`. You can use this in automations instead of monitoring the binary sensor. The event will look like below and only fire when the doorbell is pressed, so there will be no `false`event. If you have multiple doorbells you use the `entity_id` value in the `data` section to check which doorbell was pressed.\n\n ```json\n {\n \"event_type\": \"unifiprotect_doorbell\",\n \"data\": {\n \"ring\": true,\n \"entity_id\": \"binary_sensor.doorbell_kamera_doerklokke\"\n },\n \"origin\": \"LOCAL\",\n \"time_fired\": \"2021-06-26T08:16:58.882088+00:00\",\n \"context\": {\n \"id\": \"6b8cbcecb61d75cbaa5035e2624a3051\",\n \"parent_id\": null,\n \"user_id\": null\n }\n }\n ```\n\n## 0.8.8\n\nReleased: May 22nd, 2021\n\n* `NEW`: As stated a few times, there is a delay of 10-20 seconds in the Live Stream from UniFi Protect. There is not much this integration can do about it, but what we can do is, to disable the RTSP Stream, so that JPEG push is used instead. This gives an almost realtime experience, with the cost of NO AUDIO. As of this version you can disable the RTSP Stream from the Config menu.\n* `FIXED`: Issue #235, where the aspect ratio of the Doorbell image was wrong when displayed in Lovelace or in Notifications. Now the aspect ratio is read from the camera, so all cameras should have a correct ratio.\n\n\n## 0.8.7\n\nReleased: May 4th, 2021\n\n* `CHANGED`: Added **iot_class** to `manifest.json` as per HA requirements\n* `FIXED`: Ensure the event_object is not cleared too soon, when a smart detect event occurs. Issue #225. Thanks to @bdraco for the fix.\n* `CHANGED`: Updated README.md with information on how to turn on Debug logger. Thank you @blaines\n\n\n## 0.8.6\n\nReleased: April 25th, 2021\n\n* `FIXED`: If authentication failed during setup or startup of the Integration it did not return the proper boolean, and did not close the session properly.\n* `CHANGED`: Stop updates on stop event to prevent shutdown delay.\n* `CHANGED`: Updated several files to ensure compatability with 2021.5+ of Home Assistant. Thanks to @bdraco for the fix.\n\n## 0.8.5\n\nReleased: March 30th, 2021\n\n* `ADDED`: Have you ever wanted to silence your doorbell chime when you go to bed, or you put your child to sleep? - Now this is possible. A new service to enable/disable the attached Doorbel Chime is delivered with this release. The service is called `unifiprotect.set_doorbell_chime_duration` and takes two parameters: Entity ID of the Doorbell, Duration in milliseconds which is a number between 0 and 10000. 0 equals no chime. 300 is the standard for mechanical chimes and 10000 is only used in combination with a digital chime. The function does not really exist in the API, so this is a workaround. Let me know what values are best for on with the different chimes. You might still hear a micro second of a ding, but nothing that should wake anyone up. Fixing issue #211\n\n## 0.8.4\n\nReleased: March 18th, 2021\n\n* `FIXED`: Issues when activating Services that required an Integer as value, and using a Template to supply that value. Services Schemas have now been converted to use `vol.Coerce(int)` instead of just `int`.\n* `CHANGED`: All Services definitions have now been rewritten to use the new format introduced with the March 2021 Home Assistant release. **NOTE**: You might need to do a Hard Refresh of your browser to see the new Services UI.\n* `FIXED`: When using the switches or service to change recording mode for a camera, the recording settings where reset to default values. This is now fixed, so the settings you do in the App are not modfied by activating the Service or Recording mode switches.\n\n## 0.8.3\n\nReleased: March 3rd, 2021\n\n* `ADDED`: New service `unifiprotect.set_wdr_value` which can set the Wide Dynamic Range of a camera to an integer between 0 and 4. Where 0 is disabled and 4 is full.\n## 0.8.2\n\nReleased: February 4th, 2021\n\n* `FIXED`: Use the UniFi Servers MAc address as unique ID to ensure that it never changes. Previously we used the name, and that can be changed by the user. This will help with stability and prevent integrations from suddenly stop working if the name of the UDMP, UNVR4 or CKP was changed.\n* `FIXED`: Further enhance the fix applied in 0.8.1 to ensure the Integration loads even if the first update fails. Thanks to @bdraco for implementing this.\n* `FIXED`: Sometimes we would be missing event_on or event_ring_on if the websocket connected before the integration setup the binary sensor. We now always return the full processed data, eliminating this error. Another fix by @bdraco\n\n## 0.8.1\n\nReleased: January 28th, 2021\n\n* `FIXED`: The service `unifiprotect.set_status_light` did not function, as it was renamed in the IO module. This has now been fixed so that both the service and the Switch work again.\n* `FIXED`: Issue #181, Add a retry if the first update request fails on load of the Integration.\n\n## 0.8.0\n\nReleased: January 8th, 2021\n\nThis release adds support for the new Ubiquiti Floodlight device. If found on your Protect Server, it will add a new entity type `light`, that will expose the Floodlight as a light entity and add support for turning on and off, plus adjustment of brightness.\n\nThere will also be support for the PIR motion sensor built-in to the Floodlight, and you will be able to adjust PIR settings and when to detect motion.\n\nYou must have UniFi Protect V1.17.0-beta.10+ installed for Floodlight Support. Below that version, you cannot add the Floodlight as a device to UniFi Protect.\n\nTHANK YOU again to @bdraco for helping with some of the code and for extensive code review. Without you, a lot of this would not have been possible.\n\n\n* `ADDED`: New `light` entity for each Floodlight found. You can use the normal *light* services to turn on and off. Be aware that *brightness* in the Protect App only accepts a number from 1-6, so when you adjust brightness from Lovelace or the Service, the number here will be converted to a number between 1 and 6.\n* `ADDED`: A Motion Sensor is created for each Floodlight attached. It will trigger motion despite the state of the Light. It will however not re-trigger until the time set in the *Auto Shutoff Timer* has passed.\n* `ADDED`: New service `unifiprotect.light_settings`. Please see the README file for details on this Service.\n* `FIXED`: Missing \" in the Services description, prevented message to be displayed to the user. Thank you to @MarcJenningsUK for spotting and fixing this.\n* `CHANGED`: Bumped `pyunifiprotect` to 0.28.8\n\n**IMPORTANT**: With the official FW 2.0.24 for the CloudKey+ all UniFi Protect Servers are now migrated to UniFiOS. So as of this release, there will be no more development on the Non UniFiOS features. What is there will still be working, but new features will only be tested on UniFiOS. We only have access to very limited HW to test on, so it is not possible to maintain HW for backwards compatability testing.\n\n#### This release is tested on:\n\n*Tested* means that either new features work on the below versions or they don't introduce breaking changes.\n\n* CloudKey+ G2: FW Version 2.0.24 with Unifi Protect V1.17.0-beta.13\n* UDMP: FW Version 1.18.5 with Unifi Protect V1.17.0-beta.13\n\n## Release 0.7.1\n\nReleased: January 3rd, 2021\n\n* `ADDED`: New service `unifiprotect.set_zoom_position` to set the optical zoom level of a Camera. This only works on Cameras that support optical zoom.\n\n The services takes two parameters: **entity_id** of the camera, **position** which can be between 0 and 100 where 0 is no zoom and 100 is maximum zoom.\n\n A new attribue called `zoom_position` is added to each camera, showing the current zoom position. For cameras that does not support setting optical zoom, this will always be 0.\n\n#### This release is tested on:\n\n*Tested* means that either new features work on the below versions or they don't introduce breaking changes.\n\n* CloudKey+ G2: FW Version 2.0.24 with Unifi Protect V1.16.9\n* UDMP: FW Version 1.18.5 with Unifi Protect V1.17.0-beta.10\n## Release 0.7.0\n\nReleased: December 20th, 2020\n\n* `ADDED`: New service `unifiprotect.set_privacy_mode` to enable or disable a Privacy Zone, that blacks-out the camera. The effect is that you cannot view anything on screen. If recording is enabled, the camera will still record, but the only thing you will get is a black screen. You can enable/disable the microphone and set recording mode from this service, by specifying the values you see below.\nIf the camera already has one or more Privacy Zones set up, they will not be overwritten, and will still be there when you turn of this.\nUse this instead of physically turning the camera off or on.\n\n The services takes four parameters: **entity_id** of the camera, **privacy_mode** which can be true or false, **mic_level** which can be between 0 and 100 and **recording_mode** which can be never, motion, always or smart.\n\n Also a new attribute called `privacy_mode` is added to each camera, that shows if this mode is enabled or not. (Issue #159)\n\n* `CHANGED`: Some users are getting a warning that *verify_sll* is deprecated and should be replaced with *ssl*. We changed the pyunifiportect module to use `ssl` instead of `verify_sll` (Issue #160)\n\n* `ADDED`: Dutch translation to Config Flow is now added. Thank you to @copperek for doing it.\n\n* `FIXED`: KeyError: 'server_id' during startup of Unifi Protect. This error poped up occasionally during startup of Home Assistant. Thank you to @bdraco for fixing this. (Issue #147)\n\n* `FIXED`: From V1.17.x of UniFi Protect, Non Adopted Cameras would be created as camera.none and creating all kinds of errors. Now these cameras will be ignored, until they are properly adopted by the NVR. Thank you to @bdraco for helping fixing this.\n\n#### This release is tested on:\n\n*Tested* means that either new features work on the below versions or they don't introduce breaking changes.\n\n* CloudKey+ G2: FW Version 1.1.13 with Unifi Protect V1.13.37\n* UDMP: FW Version 1.18.4-3 with Unifi Protect V1.17.0-beta.6\n\n## Release 0.6.7\n\nReleased: December 15th, 2020\n\n`ADDED`: New attribute on each camera called `is_dark`. This attribute is true if the camera sees the surroundings as dark. If infrared mode is set to *auto*, then infrared mode would be turned on when this changes to true.\n\n`ADDED`: New Service `unifiprotect.set_mic_volume` to set the Sensitivity of the built-in Microphone on each Camera. Requires two parameters: *Camera Entity* and *level*, where level is a number between 0 and 100. If level is set to 0, the Camera will not react on Audio Events.\nOn each camera there is also now a new attribute called `mic_sensitivity` which displayes the current value.\n\nSee [README.md](https://github.com/briis/unifiprotect#create-input-slider-for-microphone-sensitivity) for instructions on how to setup an Input Slider in Lovelace to adjust the value.\n\n`CHANGED`: Updated the README.md documentation and added more information and a TOC.\n\n#### This release is tested on:\n\n*Tested* means that either new features work on the below versions or they don't introduce breaking changes.\n\n* CloudKey+ G2: FW Version 1.1.13 with Unifi Protect V1.13.37\n* UDMP: FW Version 1.18.3 with Unifi Protect V1.17.0-beta.6\n## Release 0.6.6\n\nWith the release of Unifi Protect V1.7.0-Beta 1, there is now the option of detecting Vehicles on top of the Person detection that is allready there. This is what Ubiquiti calls *Smart Detection*. Also you can now set recording mode to only look for Smart Detection events, meaning that motion is only activated if a person or a vehicle is detected on the cameras. Smart Detection requires a G4-Series camera and a UnifiOS device.\n\n**NOTE**: If you are not running Unifi Protect V1.17.x then the new features introduced here will not apply to your system. It has been tested on older versions of Unifi Protect, and should not break any existing installations.\n\n* **New** For all G4 Cameras, a new Switch will be created called *Record Smart*, where you can activate or de-active Smart Recording on the camera\n* **New** The service `unifiprotect.set_recording_mode` now has a new option for `recording_mode` called *smart*. This will turn on Smart Recording for the selected Camera. Please note this will only work on G4-Series cameras.\n* **Fix** When the G4 Doorbell disconnected or restarted, the Ring Sensor was triggered. This fix now ensures that this does not happen.\n\n### This release is tested on:\n\n*Tested* means that either new features work on the below versions or they don't introduce breaking changes.\n\n* CloudKey+ G2: FW Version 1.1.13 with Unifi Protect V1.13.37\n* UDMP: FW Version 1.18.3-5 with Unifi Protect V1.17.0-beta.1\n* UNVR: FW Version 1.3.15 with Unifi Protect V1.15.0\n\n## Release 0.6.5\n\n* **Hotfix** The recording of motion score and motion length got out of sync with the motion detections on/off state. With this fix, motion score and length are now updated together with the off state of the binary motion sensors. This was only an issue for Non UnifiOS devices (*CloudKey+ users with the latest original firmware version or below*).\n\n*This release is tested on*:\n* CloudKey+ G2: FW Version 1.1.13 with Unifi Protect V1.13.37\n* UDMP: FW Version 1.18.3-5 with Unifi Protect V1.16.8\n\n## Release 0.6.4\n\n* **Hotfix** for those who experience that motion sensors no longer work after upgrading to 0.6.3. Users affected will be those who are running a version of Unifi Protect that does not support SmartDetection.\n\n*This release is tested on*:\n* CloudKey+ G2: FW Version 1.1.13 with Unifi Protect V1.13.37\n* UDMP: FW Version 1.18.3-4 with Unifi Protect V1.16.7\n* UDMP FW Version 1.18.0 with Unifi Protect V1.14.11\n\n## Release 0.6.3\n\n@bdraco made some serious changes to the underlying IO module, that gives the following new features:\n\n* When running UnifiOS on the Ubiquiti Device, events are now fully constructed from Websockets.\n* Motion Events are now triggered regardless of the Recording Mode, meaning you can use your cameras as Motion Detectors. **Object detection** still requires that the Cameras recording mode is enabled (Motion or Always) as this information is only passed back when either of these are on.\n\n **BREAKING** If your Automations trigger on Motion Detection from a Camera, and you assume that Recording is enabled on a camera then you now need to make a check for that in the Condition Section of your automation.\n* Bumped pyunifiprotect to 0.24.3\n\n## Release 0.6.2\n\n* Changed text for Config Flow, to differ between UnifiOS and NON UNifiOS devices, instead of CloudKey and UDMP. This makes more sense, now that CloudKey+ also can run UnifiOS.\n* Changed the default port in Config Flow, from 7443 to 443, as this will be the most used port with the update to CloudKey+\n* Added a Debug option to Config Flow, so that we can capture the actual error message when trying to Authenticate.\n\n## Release 0.6.1\n@bdraco strikes again and fixed the following problems:\n\n* If the system is loaded, we miss events because the time has already passed.\n* If the doorbell is rung at the same time as motion, we don't see the ring event because the motion event obscures it.\n* If the hass clock and unifi clock are out of sync, we see the wrong events. (Still recommend to ensure that unifi and hass clocks are synchronized.)\n* The Doorbell is now correctly mapped as DEVICE_CLASS_OCCUPANCY.\n\n## Release 0.6.0\nThe Integration has now been rewritten to use **Websockets** for updating events, giving a lot of benefits:\n\n* Motion and doorbell updates should now happen right away\n* Reduces the amount of entity updates since we now only update cameras that change when we poll instead of them all.\n* Reduce the overall load on Home Assistant.\n\nUnfortunately, Websockets are **only available for UnifiOS** powered devices (UDMP & UNVR), so this will not apply to people running on the CloudKey. Here we will still need to do polling. Hopefully Ubiquity, will soon move the CloudKey to UnifiOS or add Websockets to this device also.\n\nAll Credits for this rewrite goes to:\n* @bdraco, who did the rewrite of both the IO module and the Integration\n* @adrum for the initial work on the Websocket support\n* @hjdhjd for reverse engineering the Websocket API and writing up the description\n\nThis could not have been done without all your work.\n\n### Other changes\n\n* When setting the LCD text on the Doorbell, this is now truncated to 30 Characters, as this is the maximum supported Characters. Thanks to @hjdhjd for documenting this.\n* Fixed an error were sometimes the External IP of the Server was used for the Internal Stream. Thanks to @adrum for fixing this.\n* Added Switch for changing HDR mode from Lovelace (Issue #128). This switch will only be created for Cameras that support HDR mode.\n* Added Switch for changing High FPS mode from Lovelace (Issue #128). This switch will only be created for Cameras that support High FPS mode.\n* Improved error handling.\n* Added German translation for Config Flow. Thank you @SeraphimSerapis\n\n## Release 0.5.8\nObject Detection was introduced with 1.14 of Unifi Protect for the UDMP/UNVR with the G4 series of Cameras. (I am unsure about the CloudKey+, but this release should not break on the CloudKey+ even without object detection). This release now adds a new Attribute to the Binary Motion Sensors that will display the object detected. I have currently only seen `person` being detected, but I am happy to hear if anyone finds other objects. See below on how this could be used.\nThis release also introduces a few new Services, as per user request. Please note that HDR and High FPS Services will require a version of Unifi Protect greater than 1.13.x. You will still be able to upgrade, but the functions might not work.\n\n* **New feature**: Turn HDR mode on or off, asked for in Issue #119. Only selected Cameras support HDR mode, but for those cameras that support it, you can now switch this on or off by calling the service: `unifiprotect.set_hdr_mode`. Please note that when you use this Service the stream will reset, so expect a drop out in the stream for a little while.\n* **New feature**: Turn High FPS video mode on or off. The G4 Cameras support High FPS video mode. With this release there is now a service to turn this on or off. Call the service `unifiprotect.set_highfps_video_mode`.\n* **New feature**: Set the LCD Message on the G4 Doorbell. There is now a new service called `unifiprotect.set_doorbell_lcd_message` from where you can set a Custom Text for the LCD. Closing Issue #104\n* **New attribute** `event_object` that will add the object detected when Motion occurs. It will contain the string `None Identified` if no specific object is detected. If a human is detected it will return `person` in this attribute, which you can test for in an automation. (See README.md for an example)\n\n## Release 0.5.6\nNew feature: Turn the Status Light on or off, asked for in Issue #102. With this release there is now the possibility to turn the Status light on each camera On or Off. This can be done in two ways:\n1. Use the service `unifiprotect.set_status_light`\n2. Use the new switch that will be created for each camera.\n\nDisabled the Websocket update, that was introduced in 0.5.5, as it is currently not being used, and caused error messages when HA was closing down, due to not being stopped.\n\n\n## Release 0.5.5\n\nThe latest beta of Unifi Protect includes the start of Ubiquiti's version of AI, and introduces a concept called Smart Detect, which currently can identify People on specific Camera models. When this is enabled on a Camera, the event type changes from a *motion* event to a *smartdetect* event, and as such these cameras will no longer trigger motion events.\n\nThis release is a quick fix for the people who have upgraded to the latest Unifi Protect Beta version. I will later introduce more Integration features based on these new Unifi Protect features.\n\n## Release 0.5.4\n\nA more permanent fix for Issue #88, where the snapshot images did not always get the current image. The API call has now been modified, so that it forces a refresh of the image when pulling it from the camera. Thank you to @rajeevan for finding the solution.\nIf you installed release 0.5.3 AND enabled *Anonymous Snapshots* you can now deselect that option again, and you will not have to enable the Anonymous Snapshot on each Camera.\n\n## Release 0.5.3\n\nFix for Issue #88 - The function for saving a Camera Snapshot works fine for most people, but it turns out that the image it saves is only refreshed every 10-15 seconds. There might be a way to force a new image, but as the Protect API is not documented I have not found this yet. If you need the guaranteed latest image from the Camera, there is a way around it, and that is to enable Anonymous Snapshots on each Camera, as this function always gets the latest image directly from the Camera.\n\nThis version introduces a new option where you can enable or disable anonymous snapshots in the Unifi Integration. If enabled, it will use a different function than if disabled, but it will only work if you login to each of your Cameras and enable the *Anonymous Snapshot*.\n\nTo use the Anonymous Snapshot, after this update has been installed, do the following:\n\n1. Login to each of your Cameras by going to http://CAMERA_IP. The Username is *ubnt* and the Camera Password can be found in Unifi Protect under *Settings*.\n2. If you have never logged in to the Camera before, it might take you through a Setup procedure - just make sure to keep it in *Unifi Video* mode, so that it is managed by Unifi Protect.\n3. Once you are logged in, you will see an option on the Front page for enabling Anonymous Snapshots. Make sure this is checked, and then press the *Save Changes* button.\n4. Repeat step 3 for each of your Cameras.\n5. Now go to the Integrations page in Home Assistant. Find the Unifi Protect Widget and press options.\n6. Select the checkbox *Use Anonymous Snapshots* and press *Submit*\n\nNow the Unfi Protect Integration will use the direct Snapshot from the Camera, without going through Unfi Protect first.\n\n## Release 0.5.2\n\n* Added exception handling when the http connection is dropped on a persistent connection. The Integration will now throw a `ConfigEntryNotReady` instead and retry.\n\n## Release 0.5.1\n\nTwo fixes are implemented in this release:\n1. Basic data for Cameras were pulled at the same interval as the Events for Motion and Doorbell. This caused an unnecessary load on the CPU. Now the base Camera data is only pulled every 60 seconds, to minimize that load.\n2. A user reported that when having more than 20 Cameras attached, the Binary Sensors stayed in an unavailable state. This was caused by the fact that a poll interval of 2 seconds for the events, was not enough to go through all cameras, so the state was never reported back to Home Assistant. With this release there is now an option on the *Integration Widget* to change the Scan Interval to a value between 2 and 30 seconds. You **ONLY** have to make this adjustment if you experience that the sensors stay unavailable - so typically if you have many Cameras attached. Default is still 2 seconds.\n3. The same user mentioned above, is running Unifi Protect on the new NVR4 device, and the Integration seems to work fine on this new platform. I have not heard from anyone else on this, but at least one user has success with that.\n4. Bumped pyunifiprotect to v0.16 which fixes the problem mentioned in point 1 above. Thank you to @bdraco for the fix.\n\n## Release 0.5 - Fully Integration based\n\nThe Beta release 0.4 has been out for a while now, and I belive we are at a stage where I will release it officially, and it will be called Version 0.5.\n\nAfter the conversion to use all the Async Libraries, it became obvious to also move away from *Yaml Configuration*, to the fully UI based Integration. As I wrote in the Tester Notes, I know there are some people with strong feelings about this, but I made the decision to make the move, and going forward **only** to support this way of adding Unifi Protect to Home Assistant.\n\n### ***** BREAKING CHANGES *****\nOnce setup, the base functionality will be the same as before, with the addition of a few minor changes. But behind the scene there are many changes in all modules, which also makes this a lot more ready for becoming an official Integration in Home Assistant Core.\n\nI want to send a BIG THANK YOU to @bdraco who has made a lot of code review, and helped me shape this to conform to Home Assistant standards. I learned so much from your suggestions and advice, so thank you!\n\nHere are the Breaking changes:\n\n1. Configuration can only be done from the *Integration* menu on the *Configuration* tab. So you will have to remove all references to *unifiprotect* from your configuration files.\n2. All entities will get the `unifiprotect_` prefix removed from them, so you will have to change automations and scripts where you use these entities. This is done to make sure that entities have a Unique Id and as such can be renamed from the UI as required by Home Assistant. I will give a 99% guarantee, that we do not need to change entity names again.\n\n### Upgrading and Installing\nIf you have not used Unifi Protect before, go to step 4.\n\nIf you are already runing a version of *Unifi Protect* with version 0.3.x or lower:\n\n1. Remove the previous installation\n * If you have installed through HACS, then go to HACS and remove the Custom Component\n * If you manually copied the files to your system, go to the `custom_components` directory and delete the `unifiprotect` directory.\n* Edit `configuration.yaml` and remove all references to *unifiprotect*. Some have split the setup in to multiple files, so remember to remove references to unifiprotect from these files also.\n* I recommend to restart Home Assistant at this point, but in theory it should not be necessary.\n\n4. Install the new version\n * If you use HACS, go there, and add Unifi Protect V0.5 or later.\n * If you do it manually, go to [Github](https://github.com/briis/unifiprotect/tree/master/custom_components/unifiprotect) and copy the files to `custom_components/unifiprotect`. Remember to include the `translations` directory and the files in here.\n* Restart Home Assistant\n* Now go to the *Integration* menu on the *Configuration* tab, and search for *Unifi Protect*. If it does not show up, try and clear your browser cache, and refresh your browser.\n* From there, it should be self explanatory.\n\nI will leave Release 0.3.4 as an option in HACS, so if you want to stick with the Yaml version, feel free to do so, but please note that I will not make any changes to this version going forward.\n\n## Version 0.3.2\n\n**NOTE** When upgrading Home Assistant to +0.110 you will receive warnings during startup about deprecated BinaryDevice and SwitchDevice. There has been a change to SwitchEntity and BinaryEntity in HA 0.110. For now this will not be changed, as not everybody is on 0.110 and if changing it this Component will break for users not on that version as it is not backwards compatible.\n\nWith this release the following items are new or have been fixed:\n\n* **BREAKING** Attribute `last_motion` has been replaced with `last_tripped_time` and attribute `motion_score` has been replaced with `event_score`. So if you use any of these attributes in an automation you will need to change the automation.\n* **NEW** There is now support for the Unifi Doorbell. If a Doorbell is discovered there will be an extra Binary Sensor created for each Doorbell, so a Doorbell Device will have both a Motion Binary Sensor and a Ring Binary Sensor. The later, turns True if the doorbell is pressed.<br>\n**BREAKING** As part of this implementation, it is no longer possible to define which binary sensors to load - all motion and doorbell *binary sensors* found are loaded. So the `monitored_condition` parameter is removed from the configuration for `binary_sensor` and needs to be removed from your `configuration.yaml` file if present.\n* **FIX** The Switch Integration was missing a Unique_ID\n" }, { "alpha_fraction": 0.7538905143737793, "alphanum_fraction": 0.759942352771759, "avg_line_length": 48.57143020629883, "blob_id": "f5cb62755ed81bdf96850a97317132be774544cc", "content_id": "6c90535ba760f9a1381819acdf2e84d3895d7ae5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3470, "license_type": "permissive", "max_line_length": 311, "num_lines": 70, "path": "/unifiprotect.markdown", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "---\ntitle: Ubiquiti UniFi Protect\ndescription: Instructions on how to configure UniFi Protect integration by Ubiquiti.\nha_category:\n - Hub\n - Camera\n - Light\n - Number\n - Sensor\n - Select\n - Switch\nha_release: 2021.11\nha_iot_class: Local Push\nha_config_flow: true\nha_quality_scale: platinum\nha_codeowners:\n - '@briis'\nha_domain: unifiprotect\nha_ssdp: true\nha_platforms:\n - camera\n - binary_sensor\n - sensor\n - light\n - switch\n - select\n - number\n---\n\nThe [UniFi Protect Integration](https://ui.com/camera-security) by [Ubiquiti Networks, inc.](https://www.ui.com/), adds support for retrieving Camera feeds and Sensor data from a UniFi Protect installation on either a Ubiquiti CloudKey+, Ubiquiti UniFi Dream Machine Pro or UniFi Protect Network Video Recorder.\n\nThere is support for the following device types within Home Assistant:\n* Camera\n * A camera entity for each camera found on the NVR device will be created\n* Sensor\n * A sensor for each camera found will be created. This sensor will hold the current recording mode.\n * A sensor for each Floodlight device found will be created. This sensor will hold the status of when light will turn on.\n* Binary Sensor\n * One to two binary sensors will be created per camera found. There will always be a binary sensor recording if motion is detected per camera. If the camera is a doorbell, there will also be a binary sensor created that records if the doorbell is pressed.\n* Switch\n * For each camera supporting High Dynamic Range (HDR) a switch will be created to turn this setting on or off.\n * For each camera supporting High Frame Rate recording a switch will be created to turn this setting on or off.\n * For each camera a switch will be created to turn the status light on or off.\n* Light\n * A light entity will be created for each UniFi Floodlight found. This works as a normal light entity, and has a brightness scale also.\n* Select\n * For each Camera found there will be a Select entity created from where you can set the cameras recording mode.\n * For each Doorbell found, there will be a Select entity created that makes it possible to set the LCD Text. If you make a list of Texts in the Integration configuration, you can both set the standard texts and custom text that you define here.\n * For each Camera found there will be a Select entity created from where you can set the behavior of the Infrared light on the Camera\n * For each Viewport found, there will be a Select entity from where you change the active View being displayed on the Viewport.\n * For each Floodlight device there be a Select entity to set the behavior of the built-in motion sensor.\n* Number\n * For each camera supporting WDR, a number entity will be setup to set the active value.\n * For each camera a number entity will be created from where you can set the microphone sensitivity level.\n * For each camera supporting Optical Zoom, a number entity will be setup to set the zoom position.\n\n\n{% include integrations/config_flow.md %}\n\n### Hardware\n\nThis Integration supports all Ubiquiti Hardware that can run UniFi Protect. Currently this includes:\n\n* UniFi Protect Network Video Recorder (**UNVR**)\n* UniFi Dream Machine Pro (**UDMP**)\n* UniFi Cloud Key Gen2 Plus (**CKGP**) Minimum required Firmware version is **2.0.24** Below that this Integration will not run on a CloudKey+\n\n### Software Versions\n* UniFi Protect minimum version is **1.20.0**\n* Home Assistant minimum version is **2021.9.0**\n" }, { "alpha_fraction": 0.6169264912605286, "alphanum_fraction": 0.6169264912605286, "avg_line_length": 20.380952835083008, "blob_id": "4f057a1bf237478527dda729d0d3942c26d885d3", "content_id": "14a64798ca232ec3774341940706ecc985c64460", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 449, "license_type": "permissive", "max_line_length": 48, "num_lines": 21, "path": "/custom_components/unifiprotect/utils.py", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "\"\"\"UniFi Protect Integration utils.\"\"\"\nfrom __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Any\n\n\ndef get_nested_attr(obj: Any, attr: str) -> Any:\n \"\"\"Fetch a nested attribute.\"\"\"\n attrs = attr.split(\".\")\n\n value = obj\n for key in attrs:\n if not hasattr(value, key):\n return None\n value = getattr(value, key)\n\n if isinstance(value, Enum):\n value = value.value\n\n return value\n" }, { "alpha_fraction": 0.7553478479385376, "alphanum_fraction": 0.7679927945137024, "avg_line_length": 68.95803833007812, "blob_id": "8e76fed85dfd794978a2b7e45900f59173428276", "content_id": "f686915841d4acf4bbeb56cf6c61318acee10d5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20016, "license_type": "permissive", "max_line_length": 398, "num_lines": 286, "path": "/README.md", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "# // UniFi Protect for Home Assistant\n\n\n## THIS REPOSITORY IS NOW ARCHIEVED AND READONLY.\nThe `unifiprotect` integration is now in Home Assistant core, so no more updates will be made to this repository. Please visit the official documentation for [UniFi Protect](https://www.home-assistant.io/integrations/unifiprotect/) to read more.\n\n-----\n## ⚠️ ⚠️ WARNING ABOUT Home Assistant v2022.2\nThe `unifiprotect` integration will be in Home Assistant core v2022.2. If you are running **0.10.x or older** of the HACS integration, **do not install v2022.2.x of Home Assistant core**.\n\nIf you are running 0.11.x or the 0.12.0, you should be safe to delete the HACS version as part of your upgrade. The 0.11.x branch is designed to be compatible with the 0.12.0-beta and the HA core version. The latest version of 0.12.0-beta will be the version of `unifiprotect` in HA core in v2022.0.\n\nThis repo is now **deprecated** in favor of the Home Assistant core version. This repo will be archived and removed from HACS after the 2022.4 release of Home Assistant.\n\n### Reporting Issues\n\nWe have disable reporting issues to the HACS Github repo for the `unifiprotect` integration. If you have an issue you would like to report for the `unifiprotect` integration, please make you are running the HA core version of the integration provided by 2022.2.0 or new and then report your issue on the [HA core repo](https://github.com/home-assistant/core/issues/new/choose). \n\nIf you would still like to discuss the HACS version of the `unifiprotect` integration, feel free to use the [dicussions section](https://github.com/briis/unifiprotect/discussions) or the [HA Community forums thread](https://community.home-assistant.io/t/custom-component-unifi-protect/158041/865).\n\n### Migration to HA Core Version Steps\n\nIf you have Smart Sensor devices and you are **not** running `0.12.0-beta10` or newer, it is recommended you just delete your UniFi Protect integration config and re-add it. If you do not have Smart Sensor devices, you can migrate to the Home Assistant core version by following the steps below:\n\n1. Upgrade to the 0.12.0 version for the HACS unifiprotect integration and restart Home Assistant.\n2. Remove your HACS `unifiprotect` integration from HACS (do not remove your `unifiprotect` config entry). It is safe to ignore the warning about needing to remove your config first.\n3. Do *not* restart HA yet.\n4. Upgrade to Home Assistant 2022.2.x\n\nYou **must** remove the HACS integration efore upgrading to 2022.2.0 first to prevent a conflicting version of `pyunifiprotect` from being installed.\n\n### Differences between HACS version 0.12.0 and HA 2022.2.0b1 version:\n\n#### HACS Only\n\n* Migration code for updating from `0.10.x` or older still exists; this code has been _removed_ in the HA core version\n\n#### HA Core Only\n\n* Full language support. All of the languages HA core supports via Lokalise has been added to the ingration.\n\n* Auto-discovery. If you have a Dream machine or a Cloud Key/UNVR on the same VLAN, the UniFi Protect integration will automatically be discovered and prompted for setup.\n\n* UP Doorlock support. The HA core version has full support for the newly release EA UP Doorlock.\n\n-----\n\n![GitHub release (latest by date)](https://img.shields.io/github/v/release/briis/unifiprotect?style=flat-square) [![hacs_badge](https://img.shields.io/badge/HACS-Default-orange.svg?style=flat-square)](https://github.com/custom-components/hacs) [![](https://img.shields.io/badge/COMMUNITY-FORUM-success?style=flat-square)](https://community.home-assistant.io/t/custom-component-unifi-protect/158041)\n\nThe UniFi Protect Integration adds support for retrieving Camera feeds and Sensor data from a UniFi Protect installation on either an Ubiquiti CloudKey+, Ubiquiti UniFi Dream Machine Pro or UniFi Protect Network Video Recorder.\n\nThere is support for the following device types within Home Assistant:\n* Camera\n * A camera entity for each camera channel and RTSP(S) combination found on the NVR device will be created\n* Sensor\n * **Cameras**: (only for cameras with Smart Detections) Currently detected object\n * **Sensors**: Sensors for battery level, light level, humidity and temperate\n * **All Devices** (Disabled by default): a sensor for uptime, BLE signal (only for bluetooth devices), link speed (only for wired devices), WiFi signal (only for WiFi devices)\n * **Cameras** (Disabled by default): sensors for bytes transferred, bytes received, oldest recording, storage used by camera recordings, write rate for camera recordings\n * **Doorbells** (Disabled by default, requires UniFi Protect 1.20.1+) current voltage sensor\n * **NVR** (Disabled by default): sensors for uptime, CPU utilization, CPU temp, memory utilization, storage utilization, percent distribution of timelapse, continuos, and detections video on disk, percentage of HD video, 4K video and free space of disk, estimated recording capacity\n* Binary Sensor\n * **Cameras** and **Flood Lights**: sensors for if it is dark, if motion is detected\n * **Doorbells**: sensor if the doorbell is currently being rung\n * **Sensors**: sensors for if the door is open, battery is low and if motion is detected\n * **NVR** (Disabled by default): a sensor for the disk health for each disk\n * **NOTE**: The disk numbers here are _not guaranteed to match up to the disk numbers shown in UniFiOS_\n* Switch\n * **Cameras**: switches to enabled/disable status light, HDR, High FPS mode, \"Privacy Mode\", System Sounds (if the camera has speakers), toggles for the Overlay information, toggles for smart detections objects (if the camera has smart detections)\n * **Privacy Mode**: Turning on Privacy Mode adds a privacy zone that blacks out the camera so nothing can be seen, turn microphone sensitivity to 0 and turns off recording\n * **Flood Lights**: switch to enable/disable status light\n * **All Devices** (Disabled by default): Switch to enable/disable SSH access\n* Light\n * A light entity will be created for each UniFi Floodlight found. This works as a normal light entity, and has a brightness scale also.\n* Select\n * **Cameras**: selects to choose between the recording mode and the current infrared settings (if the camera has IR LEDs)\n * **Doorbells**: select to choose between the currently disable text options on the LCD screen\n * **Flood Lights**: select to choose between the light turn on mode and the paired camera (used for motion detections)\n * **Viewports**: select to choose between the currently active Liveview display on the Viewport\n* Number\n * **Cameras**: number entities for the current WDR setting (only if the camera does not have HDR), current microphone sensitivity level, current optical zoom level (if camera has optical zoom),\n * **Doorbells**: number entity for the current chime duration\n * **Flood Lights**: number entities for the current motion sensitivity level and auto-shutdown duration after the light triggers on\n* Media Player\n * A media player entity is added for any camera that has speakers that allow talkback\n* Button\n * A button entity is added for every adoptable device (anything except the UniFiOS console) to allow you to reboot the device\n\nIt supports both regular Ubiquiti Cameras and the UniFi Doorbell. Camera feeds, Motion Sensors, Doorbell Sensors, Motion Setting Sensors and Switches will be created automatically for each Camera found, once the Integration has been configured.\n\n## Table of Contents\n\n1. [UniFi Protect Support](#unifi-protect-support)\n2. [Hardware Support](#hardware-support)\n3. [Prerequisites](#prerequisites)\n4. [Installation](#installation)\n5. [UniFi Protect Services](#special-unifi-protect-services)\n6. [UniFi Protect Events](#unifi-protect-events)\n7. [Automating Services](#automating-services)\n * [Send a notification when the doorbell is pressed](#send-a-notification-when-the-doorbell-is-pressed)\n * [Person Detection](#automate-person-detection)\n * [Input Slider for Doorbell Chime Duration](#create-input-slider-for-doorbell-chime-duration)\n8. [Enable Debug Logging](#enable-debug-logging)\n9. [Contribute to Development](#contribute-to-the-project-and-developing-with-a-devcontainer)\n\n## UniFi Protect Support\n\nIn general, stable/beta version of this integration mirror stable/beta versions of UniFi Protect. That means:\n\n**Stable versions of this integration require the latest stable version of UniFi Protect to run.**\n\n**Beta versions / `master` branch of this integration require the latest beta version of UniFi Protect to run (or the latest stable if there is no beta)**\n\nWe try our best to avoid breaking changes so you may need to use older versions of UniFi Protect with newer versions of the integration. Just keep in mind, we may not be able to support you if you do.\n\n## Docs for Old Versions\n\nIf you are not using the latest beta of the integration, you can view old versions of this README at any time in GitHub at `https://github.com/briis/unifiprotect/tree/{VERSION}`. Example, docs for v0.9.1 can be found at [https://github.com/briis/unifiprotect/tree/v0.9.1](https://github.com/briis/unifiprotect/tree/v0.9.1)\n\n## Minimal Versions\n\nAs of v0.10 of the integration, the following versions of HA and UniFi Protect are _required_ to even install the integration:\n\n* UniFi Protect minimum version is **1.20.0**\n* Home Assistant minimum version is **2021.11.0**\n\n## Hardware Support\n\nThis Integration supports all UniFiOS Consoles that can run UniFi Protect. Currently this includes:\n\n* UniFi Protect Network Video Recorder (**UNVR**)\n* UniFi Protect Network Video Recorder Pro (**UNVRPRO**)\n* UniFi Dream Machine Pro (**UDMP**)\n* UniFi Cloud Key Gen2 Plus (**CKGP**) firmware version v2.0.24+\n\nUbiquity released V2.0.24 as an official firmware release for the CloudKey+, and it is recommended that people upgrade to this UniFiOS based firmware for their CloudKey+, as this gives a much better realtime experience.\n\nCKGP with Firmware V1.x **do NOT run UniFiOS**, you must upgrade to firmware v2.0.24 or newer.\n\n**NOTE**: If you are still running a version of UniFi Protect without a UniFiOS Console, you can use a V0.8.x as it is the last version fully supported by NON UniFiOS devices. However, please note NON UniFiOS devices are not supported by us anymore.\n\n## Prerequisites\n\nBefore you install this Integration you need to ensure that the following two settings are applied in UniFi Protect:\n\n1. **Local User**\n * Login to your *Local Portal* on your UniFiOS device, and click on *Users*\n * In the upper right corner, click on *Add User*\n * Click *Add Admin*, and fill out the form. Specific Fields to pay attention to:\n * Role: Must be *Limited Admin*\n * Account Type: *Local Access Only*\n * CONTROLLER PERMISSIONS - Under UniFi Protect, select Administrators.\n * Click *Add* in at the bottom Right.\n\n **HINT**: A few users have reported that they had to restart their UDMP device after creating the local user for it to work. So if you get some kind of *Error 500* when setting up the Integration, try restart the UDMP.\n\n ![ADMIN_UNIFIOS](https://github.com/briis/unifiprotect/blob/master/images/screenshots/unifi_os_admin.png)\n\n2. **RTSP Stream**\n\n The Integration uses the RTSP Stream as the Live Feed source, so this needs to be enabled on each camera. With the latest versions of UniFi Protect, the stream is enabled per default, but it is recommended to just check that this is done. To check and enable the the feature\n * open UniFi Protect and click on *Devices*\n * Select *Manage* in the Menu bar at the top\n * Click on the + Sign next to RTSP\n * Enable minimum 1 stream out of the 3 available. UniFi Protect will select the Stream with the Highest resolution\n\n## Installation\n\nThis Integration is part of the default HACS store. Search for *unifi protect* under *Integrations* and install from there. After the installation of the files you must restart Home Assistant, or else you will not be able to add UniFi Protect from the Integration Page.\n\nIf you are not familiar with HACS, or haven't installed it, I would recommend to [look through the HACS documentation](https://hacs.xyz/), before continuing. Even though you can install the Integration manually, I would recommend using HACS, as you would always be reminded when a new release is published.\n\n**Please note**: All HACS does, is copying the needed files to Home Assistant, and placing them in the right directory. To get the Integration to work, you now need to go through the steps in the *Configuration* section.\n\nBefore you restart Home Assistant, make sure that the stream component is enabled. Open `configuration.yaml` and look for *stream:*. If not found add `stream:` somewhere in the file and save it.\n\n## Configuration\n\nTo add *UniFi Protect* to your Home Assistant installation, go to the Integrations page inside the configuration panel, click on `+ ADD INTEGRATION`, find *UniFi Protect*, and add your UniFi Protect server by providing the Host IP, Port Number, Username and Password.\n\n**Note**: If you can't find the *UniFi Protect* integration, hard refresh your browser, when you are on the Integrations page.\n\nIf the UniFi Protect Server is found on the network it will be added to your installation. After that, you can add more UniFi Protect Servers, should you have more than one installed.\n\n**You can only add UniFi Protect through the Integration page, Yaml configuration is no longer supported.**\n\n### MIGRATING FROM CLOUDKEY+ V1.x\n\nWhen you upgrade your CloudKey+ from FW V1.x to 2.x, your CK wil move to UniFiOS as core operating system. That also means that where you previously used port 7443 you now need to use port 443. There are two ways to fix this:\n\n* Delete the UniFi Protect Integration and re-add it, using port 443.\n* Edit the file `.storage/core.config_entries` in your Home Assistant instance. Search for UniFi Protect and change port 7443 to 443. Restart Home Assistant. (Make a backup first)\n\n### CONFIGURATION VARIABLES\n\n**host**:<br>\n *(string)(Required)*<br>\n Type the IP address of your *UniFi Protect NVR*. Example: `192.168.1.1`\n\n**port**:<br>\n *(int)(Optional)*<br>\n The port used to communicate with the NVR. Default is 443.\n\n**username**:<br>\n *(string)(Required)*<br>\n The local username you setup under the *Prerequisites* section.\n\n**password**:<br>\n *(string)(Required)*<br>\n The local password you setup under the *Prerequisites* section.\n\n**verify ssl**:<br>\n *(bool)(Required)*<br>\n If your UniFi Protect instance has a value HTTPS cert, you can enforce validation of the cert\n\n**deactivate rtsp stream**<br>\n *(bool)Optional*<br>\n If this box is checked, the camera stream will not use the RTSP stream, but instead jpeg push. This gives a realtime stream, but does not include Audio.\n\n**realtime metrics**<br>\n *(bool)Optional*<br>\n Enable processing of all Websocket events from UniFi Protect. This enables realtime updates for many sensors that are disabled by default. If this is disabled, those sensors will only update once every 15 minutes. **Will greatly increase CPU usage**, do not enable unless you plan to use it.\n\n**override connection host**\n *(bool)Optional*<br>\n By default uses the connection host provided by your UniFi Protect instance for connecting to cameras for RTSP(S) streams. If you would like to force the integration to use the same IP address you provided above, set this to true.\n\n## Special UniFi Protect Services\n\nThe Integration adds specific *UniFi Protect* services and supports the standard camera services. Below is a list of the *UniFi Protect* specific services:\n\nService | Parameters | Description\n:------------ | :------------ | :-------------\n`unifiprotect.add_doorbell_text` | `device_id` - A device for your current UniFi Protect instance (in case you have multiple).<br>`message` - custom message text to add| Adds a new custom message for Doorbells.\\*\n`unifiprotect.remove_doorbell_text` | `device_id` - A device for your current UniFi Protect instance (in case you have multiple).<br>`message` - custom message text to remove| Remove an existing custom message for Doorbells.\\*\n`unifiprotect.set_default_doorbell_text` | `device_id` - A device for your current UniFi Protect instance (in case you have multiple).<br>`message` - default text for doorbell| Sets the \"default\" text for when a message is reset or none is set.\\*\n`unifiprotect.set_doorbell_message` | `device_id` - A device for your current UniFi Protect instance (in case you have multiple).<br>`message` - text for doorbell| Dynamically sets text for doorbell.\\*\\*\n`unifiprotect.profile_ws_messages` | `device_id` - A device for your current UniFi Protect instance (in case you have multiple).<br>`duration` - how long to provide| Debug service to help profile the processing of Websocket messages from UniFi Protect.\n\n\\*: Adding, removing or changing a doorbell text option requires you to restart your Home Assistant instance to be able to use the new ones. This is a limitation of how downstream entities and integrations subscribe to options for select entities. They cannot be dynamic.\n\n\\*\\*: The `unifiprotect.set_doorbell_message` service should _only_ be used for setting the text of your doorbell dynamically. i.e. if you want to set the current time or outdoor temp on it. If you want to set a static message, use the select entity already provided. See the [Dynamic Doorbell](#dynamic-doorbell-messages) blueprint for an example.\n\n## Automating Services\n\nAs part of the integration, we provide a couple of blueprints that you can use or extend to automate stuff.\n\n### Doorbell Notifications\n\n[![Open your Home Assistant instance and show the blueprint import dialog with a specific blueprint pre-filled.](https://my.home-assistant.io/badges/blueprint_import.svg)](https://my.home-assistant.io/redirect/blueprint_import/?blueprint_url=https%3A%2F%2Fraw.githubusercontent.com%2Fbriis%2Funifiprotect%2Fmaster%2Fblueprints%2Fautomation%2Funifiprotect%2Fpush_notification_doorbell_event.yaml)\n\n### Motion Notifications\n\n[![Open your Home Assistant instance and show the blueprint import dialog with a specific blueprint pre-filled.](https://my.home-assistant.io/badges/blueprint_import.svg)](https://my.home-assistant.io/redirect/blueprint_import/?blueprint_url=https%3A%2F%2Fraw.githubusercontent.com%2Fbriis%2Funifiprotect%2Fmaster%2Fblueprints%2Fautomation%2Funifiprotect%2Fpush_notification_motion_event.yaml)\n\n### Smart Detection Notifications\n\n[![Open your Home Assistant instance and show the blueprint import dialog with a specific blueprint pre-filled.](https://my.home-assistant.io/badges/blueprint_import.svg)](https://my.home-assistant.io/redirect/blueprint_import/?blueprint_url=https%3A%2F%2Fraw.githubusercontent.com%2Fbriis%2Funifiprotect%2Fmaster%2Fblueprints%2Fautomation%2Funifiprotect%2Fpush_notification_smart_event.yaml)\n\n### Dynamic Doorbell Messages\n\n[![Open your Home Assistant instance and show the blueprint import dialog with a specific blueprint pre-filled.](https://my.home-assistant.io/badges/blueprint_import.svg)](https://my.home-assistant.io/redirect/blueprint_import/?blueprint_url=https%3A%2F%2Fraw.githubusercontent.com%2Fbriis%2Funifiprotect%2Fmaster%2Fblueprints%2Fautomation%2Funifiprotect%2Fdynamic_doorbell.yaml)\n\n### Enable Debug Logging\n\nIf logs are needed for debugging or reporting an issue, use the following configuration.yaml:\n\n```yaml\nlogger:\n default: error\n logs:\n pyunifiprotect: debug\n custom_components.unifiprotect: debug\n```\n\n### CONTRIBUTE TO THE PROJECT AND DEVELOPING WITH A DEVCONTAINER\n\n1. Fork and clone the repository.\n\n2. Open in VSCode and choose to open in devcontainer. Must have VSCode devcontainer prerequisites.\n\n3. Run the command container start from VSCode terminal\n\n4. A fresh Home Assistant test instance will install and will eventually be running on port 9123 with this integration running\n\n5. When the container is running, go to http://localhost:9123 and the add UniFi Protect from the Integration Page.\n" }, { "alpha_fraction": 0.6734979152679443, "alphanum_fraction": 0.6734979152679443, "avg_line_length": 34.196720123291016, "blob_id": "cbbfbc1327e70c9ab4689f5aa72d4c3b9f96fd23", "content_id": "45e2daa240cb6b88416dba7885a1ee9cb4c14f71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2147, "license_type": "permissive", "max_line_length": 82, "num_lines": 61, "path": "/custom_components/unifiprotect/models.py", "repo_name": "briis/unifiprotect", "src_encoding": "UTF-8", "text": "\"\"\"The unifiprotect integration models.\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Callable, Coroutine\nfrom dataclasses import dataclass\nimport logging\nfrom typing import Any\n\nfrom homeassistant.helpers.entity import EntityDescription\nfrom pyunifiprotect.data import NVR, ProtectAdoptableDeviceModel\n\nfrom .utils import get_nested_attr\n\n_LOGGER = logging.getLogger(__name__)\n\n\n@dataclass\nclass ProtectRequiredKeysMixin:\n \"\"\"Mixin for required keys.\"\"\"\n\n ufp_required_field: str | None = None\n ufp_value: str | None = None\n ufp_value_fn: Callable[[ProtectAdoptableDeviceModel | NVR], Any] | None = None\n ufp_enabled: str | None = None\n\n def get_ufp_value(self, obj: ProtectAdoptableDeviceModel | NVR) -> Any:\n \"\"\"Return value from UniFi Protect device.\"\"\"\n if self.ufp_value is not None:\n return get_nested_attr(obj, self.ufp_value)\n if self.ufp_value_fn is not None:\n return self.ufp_value_fn(obj)\n\n # reminder for future that one is required\n raise RuntimeError( # pragma: no cover\n \"`ufp_value` or `ufp_value_fn` is required\"\n )\n\n def get_ufp_enabled(self, obj: ProtectAdoptableDeviceModel | NVR) -> bool:\n \"\"\"Return value from UniFi Protect device.\"\"\"\n if self.ufp_enabled is not None:\n return bool(get_nested_attr(obj, self.ufp_enabled))\n return True\n\n\n@dataclass\nclass ProtectSetableKeysMixin(ProtectRequiredKeysMixin):\n \"\"\"Mixin to for settable values.\"\"\"\n\n ufp_set_method: str | None = None\n ufp_set_method_fn: Callable[\n [ProtectAdoptableDeviceModel, Any], Coroutine[Any, Any, None]\n ] | None = None\n\n async def ufp_set(self, obj: ProtectAdoptableDeviceModel, value: Any) -> None:\n \"\"\"Set value for UniFi Protect device.\"\"\"\n assert isinstance(self, EntityDescription)\n _LOGGER.debug(\"Setting %s to %s for %s\", self.name, value, obj.name)\n if self.ufp_set_method is not None:\n await getattr(obj, self.ufp_set_method)(value)\n elif self.ufp_set_method_fn is not None:\n await self.ufp_set_method_fn(obj, value)\n" } ]
11
marina-kantar/Python-for-Everybody
https://github.com/marina-kantar/Python-for-Everybody
81a661593e7598063acbae58f6769237dc218763
a06967d0fe85c5527047933d09caa12a09e3e023
f0aa77e982837fafe48d16abaa049c5a116da7a0
refs/heads/master
"2022-07-14T00:29:44.438466"
"2020-05-13T14:35:01"
"2020-05-13T14:35:01"
260,200,286
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5737051963806152, "alphanum_fraction": 0.5816733241081238, "avg_line_length": 24.100000381469727, "blob_id": "86d25a14c38143d3f24451651f8f4d8007701cb0", "content_id": "3829934304e7398747d6a6a93c5158080d073718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 251, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/mail_regex.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import re\nname = input ('Enter file name: ')\nif len(name) <= 1 : name = 'mbox-short.txt'\ny = list()\nhandle = open(name)\nfor line in handle :\n line = line.rstrip()\n y= y+ re.findall('^From: (\\S+@\\S+)', line)\n if len(y) < 1 : continue\nprint(y)\n" }, { "alpha_fraction": 0.6009615659713745, "alphanum_fraction": 0.6057692170143127, "avg_line_length": 25, "blob_id": "d84da25730a6ddaaf5614afdb23b508ac45773b9", "content_id": "1ee550acf64d15b419ad3053d3ec69ea6e210964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/first_regex.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import re\nname = input('Enter file name: ')\nif len(name) <= 1 : name = 'mbox-short.txt'\nhandle = open(name)\nfor line in handle:\n line = line.rstrip()\n if re.search ('From: ', line):\n print(line)\n" }, { "alpha_fraction": 0.7651331424713135, "alphanum_fraction": 0.7675544619560242, "avg_line_length": 28.428571701049805, "blob_id": "b5c5e346e864995520759fc2816392d5a27e46cb", "content_id": "87b19fd504d727cdc7d28ee71ec662d1bf3f216d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "no_license", "max_line_length": 86, "num_lines": 14, "path": "/open_text_from_page.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "# Write a Python program to display the content of robot.txt for en.wikipedia.org.\n\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nhtml= urllib.request.urlopen('http://en.wikipedia.org/robots.txt', context=ctx).read()\nsoup =BeautifulSoup(html, 'html.parser')\n\nprint(soup)\n\n" }, { "alpha_fraction": 0.6828885674476624, "alphanum_fraction": 0.6954473853111267, "avg_line_length": 25.58333396911621, "blob_id": "9e3f53c30cafb23ebc767a2b9e9d96118d7608f3", "content_id": "e2c58f221a3f5613c0ed1335b4b90a9f13607154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 637, "license_type": "no_license", "max_line_length": 84, "num_lines": 24, "path": "/first_db.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import sqlite3\n\nconn = sqlite3.connect('music.sqlite')\ncur = conn.cursor ()\n\ncur.execute('DROP TABLE IF EXISTS Tracks')\ncur.execute('CREATE TABLE Tracks (title TEXT, plays INTEGER)')\n\ncur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)', ('My Way', 15))\ncur.execute('INSERT INTO Tracks (title, plays) VALUES (?, ?)', ('Thunderstone', 20))\nconn.commit()\n\nprint('Tracks:')\ncur.execute('SELECT title, plays FROM Tracks')\nfor row in cur :\n print(row)\n\ncur.execute('DELETE FROM Tracks WHERE plays < 19')\nconn.commit()\nprint('New Tracks:')\ncur.execute('SELECT title, plays FROM Tracks')\nfor row in cur :\n print(row)\nconn.close()" }, { "alpha_fraction": 0.5318351984024048, "alphanum_fraction": 0.5617977380752563, "avg_line_length": 25.700000762939453, "blob_id": "4ddba2b0875c3c54ec364a973adb4e415934e207", "content_id": "0771f796e3a749d14b9141b5650a65a59d1b2338", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/time_regex.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import re\nname = input ('Enter file name: ')\nif len(name) <=1 : name = 'mbox-short.txt'\nhandle = open(name)\ny = list()\nfor line in handle :\n line = line.rstrip()\n y = y+ re.findall ('^From .+ ([0-9]+:[0-9]+:[0-9]+)', line)\n if len(y) < 1 : continue\nprint(y)\n" }, { "alpha_fraction": 0.44680851697921753, "alphanum_fraction": 0.5390070676803589, "avg_line_length": 30.44444465637207, "blob_id": "869e8d088bcc5df49de6ae124c653b09fd9739e1", "content_id": "10bc9a992c60fe6e8edf78ffb0d777faca34d980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 93, "num_lines": 9, "path": "/phone_regex.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import re\nname = input('Enter file name: ')\nif len(name) <= 1 : name = 'exp.txt'\nhandle = open(name)\nfor line in handle :\n line = line.rstrip()\n y = re.findall('[0-9][0-9][0-9][0-9]* [0-9][0-9][0-9][0-9]* [0-9][0-9][0-9][0-9]*', line)\n if len(y) < 1 : continue\n print(y)" }, { "alpha_fraction": 0.6202247142791748, "alphanum_fraction": 0.632584273815155, "avg_line_length": 26, "blob_id": "3618d16491d6ce088ec7ab6edbc15a84e696105d", "content_id": "e9d4103a51d51db27a794c21f54fe77f90483af4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "no_license", "max_line_length": 80, "num_lines": 33, "path": "/sql_assignment.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import sqlite3\nimport re\n\nconn = sqlite3.connect('domsql.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS Counts')\ncur.execute('CREATE TABLE Counts (org TEXT, count INTEGER)')\n\nfname = input('Enter file name: ')\nif len(fname)< 1 : fname = 'mbox.txt'\nhandle = open(fname)\n\nfor line in handle :\n line = line.rstrip()\n dom = re.findall('^From:.+@([a-z.]+)', line)\n if len(dom) < 1 : continue\n org = dom[0]\n cur.execute('SELECT count FROM Counts WHERE org= ?', (org,))\n row = cur.fetchone()\n if row is None :\n cur.execute('INSERT INTO Counts (org, count) VALUES (?, 1)', (org,))\n else :\n cur.execute('UPDATE Counts SET count = count + 1 WHERE org = ?', (org,))\nconn.commit()\n\n# ogranici na 10\nsqlstr = 'SELECT org, count FROM Counts ORDER BY count'\nfor row in cur.execute(sqlstr):\n print(str(row[0]), row[1])\nconn.commit()\n \ncur.close()" }, { "alpha_fraction": 0.7089946866035461, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 25.071428298950195, "blob_id": "b46e9e19fb6017a35063f226f121aaf40c3b5c90", "content_id": "5ffae64bb45eb3746a05259267d020590a93e33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 83, "num_lines": 14, "path": "/number_of_datasets.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "#Write a Python program to get the number of datasets currently listed on data.gov.\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nsource = requests.get('https://www.data.gov/').text\nsoup = BeautifulSoup(source, 'html.parser')\n\n#print(soup.prettify)\n\nx = soup.small.a.text\n#print(x)\nl =x.split()\nprint('Number of datasets currently listed on data.gov is: ', l[0])\n\n\n\n \n \n\n" }, { "alpha_fraction": 0.5046728849411011, "alphanum_fraction": 0.5358255505561829, "avg_line_length": 22, "blob_id": "0e5615165533917e2d595ee8050ff0fda60c5f60", "content_id": "9f7178c003236cac5372a52fd3251bda917b0652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 47, "num_lines": 14, "path": "/xml1.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\n\ndata = '''\n <person>\n <name> Chuck </name>\n <phone type=\"init\">\n +1 73 4465 789\n </phone>\n <email hide=\"yes\"/>\n </person>'''\n\ntree = ET.fromstring(data)\nprint('Name: ', tree.find('name').text)\nprint('Atrr: ', tree.find('email').get('hide'))" }, { "alpha_fraction": 0.6829745769500732, "alphanum_fraction": 0.694716215133667, "avg_line_length": 23.33333396911621, "blob_id": "f3de083b3b00840e6847d9e51acc512e2166b862", "content_id": "786bff98682c637d7604667d59e290c5efacc470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/assignment_beaut_soup.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input ('Enter - ')\nhtml = urllib.request.urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, 'html.parser')\nsuma = 0\ncount =0\n# sve linkove\ntags = soup ('span')\nfor tag in tags :\n #print(tag.contents[0])\n suma = suma + int(tag.contents[0])\n count = count + 1\nprint('Count ', count)\nprint('Sum', suma)\n" }, { "alpha_fraction": 0.6564157009124756, "alphanum_fraction": 0.6691410541534424, "avg_line_length": 23.179487228393555, "blob_id": "0ccd917391d95348439799252ee8ae0e06b9ee11", "content_id": "5d84ab2439257b89538db68d4b253ce683d12f51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "no_license", "max_line_length": 58, "num_lines": 39, "path": "/assignment2_beautiful_soup.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "# To run this, download the BeautifulSoup zip file\n# http://www.py4e.com/code3/bs4.zip\n# and unzip it in the same directory as this file\n\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter - ')\ncount = input ('Enter count: ')\nif int(count) < 1 :\n print('Error')\n count = 7\ncount = int(count)\n\nposition = input('Enter position: ')\nif int(position) < 1 :\n print('Error')\n position = 18\nposition = int(position)\n# Retrieve all of the anchor tags\n\n\nfor i in range(count):\n html = urllib.request.urlopen(url, context=ctx).read()\n soup = BeautifulSoup(html, 'html.parser')\n\n tags = soup('a')\n l = list()\n for tag in tags:\n x= tag.get('href', None)\n l.append(x)\n url = l[position-1]\nprint(tags[position-1].contents[0])\n" }, { "alpha_fraction": 0.5636363625526428, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 20.153846740722656, "blob_id": "0929d080211d1723011e8e481432cce97d86a592", "content_id": "b16b14cb906a07cba66888fb2b70e009f43e4524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 48, "num_lines": 13, "path": "/assigment-regulare.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import re\ny = list()\nzbir = 0\nname = input('Enter file name: ')\nif len(name) <=1 : name = 'regex_sum_468299.txt'\nhandle = open(name)\nfor line in handle :\n line = line.rstrip()\n y=y+ re.findall('[0-9]+', line)\n#print(y)\nfor i in y :\n zbir = zbir + int(i)\nprint(zbir)\n" }, { "alpha_fraction": 0.49145299196243286, "alphanum_fraction": 0.5085470080375671, "avg_line_length": 18.5, "blob_id": "bb64d7690e4e58225a0e78b39b9dd190fb0d63b2", "content_id": "91d489d9be38f0b1602b1ccfa04bb7be52cb8a75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 234, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/sorttuples.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "d = {'a' : 2 , 'c' : 1 , 'd' : 4, 'b' : 3}\nt =d.items()\nprint(t)\ns = sorted(d.items())\nprint(s)\nfor i, v in sorted(d.items()) :\n print(i, v)\no = list()\nfor i,v in d.items():\n o.append((v, i))\nprint(o)\no= sorted(o, reverse=True)\n" }, { "alpha_fraction": 0.6654929518699646, "alphanum_fraction": 0.6848591566085815, "avg_line_length": 20.074073791503906, "blob_id": "d93b1ae623c2ec3df3fa0c434d56987e3c043d99", "content_id": "d1c401c975b45f8db25c8850d99ce58031656ba4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 75, "num_lines": 27, "path": "/json_assignment.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import urllib.request, urllib.parse, urllib.error\nimport json\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter url: ')\nif len(url) < 1: url = 'http://py4e-data.dr-chuck.net/comments_468304.json'\n\nfhand = urllib.request.urlopen(url, context=ctx)\ndata = fhand.read()\n\ninfo = json.loads(data)\n#print('User count:', len(info))\n\nbr = 0\nsuma = 0\n\nfor item in info[\"comments\"]:\n br = br + 1\n suma = suma + item[\"count\"]\n\nprint('Count:', br)\nprint('Sum' , suma)" }, { "alpha_fraction": 0.6746166944503784, "alphanum_fraction": 0.6933560371398926, "avg_line_length": 20.740739822387695, "blob_id": "c7c4a597acdd3e9512d1a26cea422c1cfae91fb1", "content_id": "fcb3bb41f432ea02881bf716c788cbd7bcb35bc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "no_license", "max_line_length": 74, "num_lines": 27, "path": "/assignment_parsing_xml.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import urllib.request, urllib.parse, urllib.error\nimport xml.etree.ElementTree as ET\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter url: ')\nif len(url) < 1: url = 'http://py4e-data.dr-chuck.net/comments_468303.xml'\n\nfhand = urllib.request.urlopen(url, context=ctx)\ndata = fhand.read()\n\ntree = ET.fromstring(data)\n\nbr = 0\nsuma = 0\n\ncounts = tree.findall('.//count')\nfor item in counts :\n br = br + 1\n suma = suma + int(item.text)\n\nprint('Count:', br)\nprint('Sum' , suma)\n" }, { "alpha_fraction": 0.6422122120857239, "alphanum_fraction": 0.6557562351226807, "avg_line_length": 28.566667556762695, "blob_id": "01f9afc2b5f952eb09127dcbd333456861caf2c1", "content_id": "970978e7e4beffa3d6e8f1530d0c643c8c03ada3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 886, "license_type": "no_license", "max_line_length": 86, "num_lines": 30, "path": "/sqlite_py.py", "repo_name": "marina-kantar/Python-for-Everybody", "src_encoding": "UTF-8", "text": "import sqlite3\n\nconn = sqlite3.connect('emaildb.sqlite')\ncur = conn.cursor()\n\ncur.execute('DROP TABLE IF EXISTS Counts')\ncur.execute('CREATE TABLE Counts (email TEXT, counts INTEGER)')\n\nfname = input('Enter file name: ')\nif len(fname)< 1 : fname = 'mbox-short.txt'\nhandle = open(fname)\n\nfor line in handle :\n if not line.startswith('From: ') : continue\n piece = line.split()\n email = piece[1]\n cur.execute('SELECT counts FROM Counts WHERE email= ?', (email,))\n row = cur.fetchone()\n if row is None :\n cur.execute('INSERT INTO Counts (email, counts) VALUES (?, 1)', (email,))\n else :\n cur.execute('UPDATE Counts SET counts = counts + 1 WHERE email = ?', (email,))\n conn.commit()\n\n# ogranici na 10\nsqlstr = 'SELECT email, counts FROM Counts ORDER BY counts DESC LIMIT 10'\nfor row in cur.execute(sqlstr):\n print(str(row[0]), row[1])\n \ncur.close()" } ]
16
garyrh/blingee-grab
https://github.com/garyrh/blingee-grab
f5caf435284ca65a4a62c87c1a31aa2ebd749907
74d6f8a1db83336b2d2259b0612a1c1029ca3695
4d4c8cd00b43c894537791ddfb174e4ae38fc763
refs/heads/master
"2016-09-06T18:22:31.186922"
"2015-10-03T12:20:07"
"2015-10-03T12:20:07"
40,741,319
1
0
null
"2015-08-15T00:30:22"
"2015-08-15T11:19:56"
"2015-08-16T19:12:36"
Lua
[ { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6952908635139465, "avg_line_length": 24.64285659790039, "blob_id": "14a18c36435bfd1d64abf18378852c488d828ff9", "content_id": "8ac301c03f092ce3efadcc38f9ce8e2c0d180748", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 361, "license_type": "permissive", "max_line_length": 98, "num_lines": 14, "path": "/warrior-install.sh", "repo_name": "garyrh/blingee-grab", "src_encoding": "UTF-8", "text": "#!/bin/bash\nsudo apt-get update\nif ! pip search requests 2>/dev/null | grep -q -z1 -Poi \"\\- Python HTTP for Humans.[\\s]*INSTALLED\"\nthen\n echo \"Installing python-requests\"\n sudo pip install requests\nfi\nif ! dpkg-query -Wf'${Status}' python-lxml 2>/dev/null | grep -q '^i'\nthen\n echo \"Installing python-lxml\"\n sudo apt-get -y install python-lxml\nfi\n\nexit 0\n\n\n" }, { "alpha_fraction": 0.48177677392959595, "alphanum_fraction": 0.4874715209007263, "avg_line_length": 22.105262756347656, "blob_id": "9b3267419deb02ebb89deaaf7799c76e225a20aa", "content_id": "ebbbdd68af995f8e774f3654a1c32d5116a8cfbc", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "permissive", "max_line_length": 53, "num_lines": 38, "path": "/parse_html.py", "repo_name": "garyrh/blingee-grab", "src_encoding": "UTF-8", "text": "import sys\nimport requests\nfrom lxml import etree\n\ndef main():\n #print sys.argv\n the_file = sys.argv[1]\n pattern = sys.argv[2]\n if len(sys.argv) > 3:\n index = sys.argv[3]\n else:\n index = False\n html = \"\"\n while True:\n try:\n f = open(the_file, \"r\")\n html = f.read()\n f.close()\n break\n except EnvironmentError:\n # Hit the limit of file descriptors, etc.\n continue\n if html:\n myparser = etree.HTMLParser(encoding=\"utf-8\")\n tree = etree.HTML(html, parser=myparser)\n urls = tree.xpath(pattern)\n if urls and html:\n if index != False:\n print urls[int(index)]\n else:\n print urls\n else:\n print \"none\"\n else:\n print \"none\"\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5317946672439575, "alphanum_fraction": 0.5490877032279968, "avg_line_length": 38.167999267578125, "blob_id": "c6f3370a2f0c7b8af4ce63535c5097ce638bb63d", "content_id": "1afde00e9d555a46568ba31ea6aa9bd9d905c2c6", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14688, "license_type": "permissive", "max_line_length": 172, "num_lines": 375, "path": "/pipeline.py", "repo_name": "garyrh/blingee-grab", "src_encoding": "UTF-8", "text": "# encoding=utf8\nimport datetime\nfrom distutils.version import StrictVersion\nimport hashlib\nimport os.path\nimport random\nfrom seesaw.config import realize, NumberConfigValue\nfrom seesaw.item import ItemInterpolation, ItemValue\nfrom seesaw.task import SimpleTask, LimitConcurrent\nfrom seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \\\n UploadWithTracker, SendDoneToTracker\nimport shutil\nimport socket\nimport subprocess\nimport sys\nimport time\nimport string\nimport requests\nfrom lxml import etree\n\nimport seesaw\nfrom seesaw.externalprocess import WgetDownload\nfrom seesaw.pipeline import Pipeline\nfrom seesaw.project import Project\nfrom seesaw.util import find_executable\n\n\n# check the seesaw version\nif StrictVersion(seesaw.__version__) < StrictVersion(\"0.8.5\"):\n raise Exception(\"This pipeline needs seesaw version 0.8.5 or higher.\")\n\n\n###########################################################################\n# Find a useful Wget+Lua executable.\n#\n# WGET_LUA will be set to the first path that\n# 1. does not crash with --version, and\n# 2. prints the required version string\nWGET_LUA = find_executable(\n \"Wget+Lua\",\n [\"GNU Wget 1.14.lua.20130523-9a5c\"],\n [\n \"./wget-lua\",\n \"./wget-lua-warrior\",\n \"./wget-lua-local\",\n \"../wget-lua\",\n \"../../wget-lua\",\n \"/home/warrior/wget-lua\",\n \"/usr/bin/wget-lua\"\n ]\n)\n\nif not WGET_LUA:\n raise Exception(\"No usable Wget+Lua found.\")\n\ndef base36_encode(n):\n \"\"\"\n Encode integer value `n` using `alphabet`. The resulting string will be a\n base-N representation of `n`, where N is the length of `alphabet`.\n\n Copied from https://github.com/benhodgson/basin/blob/master/src/basin.py\n \"\"\"\n alphabet=\"0123456789abcdefghijklmnopqrstuvwxyz\"\n if not (isinstance(n, int) or isinstance(n, long)):\n raise TypeError('value to encode must be an int or long')\n r = []\n base = len(alphabet)\n while n >= base:\n r.append(alphabet[n % base])\n n = n / base\n r.append(str(alphabet[n % base]))\n r.reverse()\n return ''.join(r)\n\n###########################################################################\n# The version number of this pipeline definition.\n#\n# Update this each time you make a non-cosmetic change.\n# It will be added to the WARC files and reported to the tracker.\nVERSION = \"20151003.01\"\nTRACKER_ID = 'blingee'\nTRACKER_HOST = 'tracker.archiveteam.org'\n# Number of blingees per item\nNUM_BLINGEES = 100\n# Number of profiles per item\nNUM_PROFILES = 25\n\nUSER_AGENTS = ['Mozilla/5.0 (Windows NT 6.3; rv:24.0) Gecko/20100101 Firefox/39.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/39.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',\n 'Opera/9.80 (Windows NT 6.0; rv:2.0) Presto/2.12.388 Version/12.16',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.155 Safari/537.36']\nUSER_AGENT = random.choice(USER_AGENTS)\nREQUESTS_HEADERS = {\"User-Agent\": USER_AGENT, \"Accept-Encoding\": \"gzip\"}\n\n###########################################################################\n# This section defines project-specific tasks.\n#\n# Simple tasks (tasks that do not need any concurrency) are based on the\n# SimpleTask class and have a process(item) method that is called for\n# each item.\nclass CheckIP(SimpleTask):\n def __init__(self):\n SimpleTask.__init__(self, \"CheckIP\")\n self._counter = 0\n\n def process(self, item):\n # NEW for 2014! Check if we are behind firewall/proxy\n if self._counter <= 0:\n item.log_output('Checking IP address.')\n ip_set = set()\n\n ip_set.add(socket.gethostbyname('twitter.com'))\n ip_set.add(socket.gethostbyname('facebook.com'))\n ip_set.add(socket.gethostbyname('youtube.com'))\n ip_set.add(socket.gethostbyname('microsoft.com'))\n ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))\n ip_set.add(socket.gethostbyname('archiveteam.org'))\n\n if len(ip_set) != 6:\n item.log_output('Got IP addresses: {0}'.format(ip_set))\n item.log_output(\n 'Are you behind a firewall/proxy? That is a big no-no!')\n raise Exception(\n 'Are you behind a firewall/proxy? That is a big no-no!')\n\n # Check only occasionally\n if self._counter <= 0:\n self._counter = 10\n else:\n self._counter -= 1\n\n\nclass PrepareDirectories(SimpleTask):\n def __init__(self, warc_prefix):\n SimpleTask.__init__(self, \"PrepareDirectories\")\n self.warc_prefix = warc_prefix\n\n def process(self, item):\n item_name = item[\"item_name\"]\n escaped_item_name = item_name.replace(':', '_').replace('/', '_').replace('~', '_')\n dirname = \"/\".join((item[\"data_dir\"], escaped_item_name))\n\n if os.path.isdir(dirname):\n shutil.rmtree(dirname)\n\n os.makedirs(dirname)\n\n item[\"item_dir\"] = dirname\n item[\"warc_file_base\"] = \"%s-%s-%s\" % (self.warc_prefix, escaped_item_name,\n time.strftime(\"%Y%m%d-%H%M%S\"))\n\n open(\"%(item_dir)s/%(warc_file_base)s.warc.gz\" % item, \"w\").close()\n\n\nclass MoveFiles(SimpleTask):\n def __init__(self):\n SimpleTask.__init__(self, \"MoveFiles\")\n\n def process(self, item):\n # NEW for 2014! Check if wget was compiled with zlib support\n if os.path.exists(\"%(item_dir)s/%(warc_file_base)s.warc\" % item):\n raise Exception('Please compile wget with zlib support!')\n\n os.rename(\"%(item_dir)s/%(warc_file_base)s.warc.gz\" % item,\n \"%(data_dir)s/%(warc_file_base)s.warc.gz\" % item)\n\n shutil.rmtree(\"%(item_dir)s\" % item)\n\n\ndef get_hash(filename):\n with open(filename, 'rb') as in_file:\n return hashlib.sha1(in_file.read()).hexdigest()\n\n\nCWD = os.getcwd()\nPIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))\nLUA_SHA1 = get_hash(os.path.join(CWD, 'blingee.lua'))\n\n\ndef stats_id_function(item):\n # NEW for 2014! Some accountability hashes and stats.\n d = {\n 'pipeline_hash': PIPELINE_SHA1,\n 'lua_hash': LUA_SHA1,\n 'python_version': sys.version,\n }\n\n return d\n\n\nclass WgetArgs(object):\n def realize(self, item):\n wget_args = [\n WGET_LUA,\n \"-U\", USER_AGENT,\n \"--header\", \"Accept-Language: en-US,en;q=0.8\",\n \"-nv\",\n \"--lua-script\", \"blingee.lua\",\n \"-o\", ItemInterpolation(\"%(item_dir)s/wget.log\"),\n \"--no-check-certificate\",\n \"--output-document\", ItemInterpolation(\"%(item_dir)s/wget.tmp\"),\n \"--truncate-output\",\n \"-e\", \"robots=off\",\n \"--rotate-dns\",\n \"--retry-connrefused\",\n \"--no-cookies\",\n \"--no-parent\",\n \"--timeout\", \"30\",\n \"--tries\", \"inf\",\n \"--domains\", \"blingee.com,s3.amazonaws.com,image.blingee.com,image.blingee.com.s3.amazonaws.com\",\n \"--span-hosts\",\n \"--waitretry\", \"30\",\n \"--warc-file\", ItemInterpolation(\"%(item_dir)s/%(warc_file_base)s\"),\n \"--warc-header\", \"operator: Archive Team\",\n \"--warc-header\", \"blingee-dld-script-version: \" + VERSION,\n \"--warc-header\", ItemInterpolation(\"blingee: %(item_name)s\")\n ]\n\n item_name = item['item_name']\n assert ':' in item_name\n item_type, item_value = item_name.split(':', 1)\n\n item['item_type'] = item_type\n item['item_value'] = item_value\n\n assert item_type in ('blingee',\n 'stamp',\n 'group',\n 'competition',\n 'challenge',\n 'badge',\n 'profile',\n '1blingee')\n\n if 'blingee' in item_type:\n if item_type == '1blingee':\n total_blingees = 1\n elif item_type == 'blingee':\n total_blingees = NUM_BLINGEES\n for val in xrange(int(item_value), int(item_value)+total_blingees):\n wget_args.append(\"http://blingee.com/blingee/view/{0}\".format(val))\n wget_args.append(\"http://bln.gs/b/{0}\".format(base36_encode(val)))\n wget_args.append(\"http://blingee.com/blingee/{0}/comments\".format(val))\n elif item_type == 'stamp':\n wget_args.append(\"http://blingee.com/stamp/view/{0}\".format(item_value))\n elif item_type == 'group':\n wget_args.extend([\"--recursive\", \"--level=inf\"])\n wget_args.append(\"http://blingee.com/group/{0}\".format(item_value))\n wget_args.append(\"http://blingee.com/group/{0}/members\".format(item_value))\n elif item_type == 'competition':\n wget_args.append(\"http://blingee.com/competition/view/{0}\".format(item_value))\n wget_args.append(\"http://blingee.com/competition/rankings/{0}\".format(item_value))\n elif item_type == 'challenge':\n wget_args.append(\"http://blingee.com/challenge/view/{0}\".format(item_value))\n wget_args.append(\"http://blingee.com/challenge/rankings/{0}\".format(item_value))\n elif item_type == 'badge':\n wget_args.append(\"http://blingee.com/badge/view/{0}\".format(item_value))\n wget_args.append(\"http://blingee.com/badge/winner_list/{0}\".format(item_value))\n elif item_type == 'profile':\n profile_parser = etree.HTMLParser(encoding=\"utf-8\")\n for val in xrange(int(item_value), int(item_value)+NUM_PROFILES):\n while True:\n print(\"Getting username for ID {0}...\".format(val))\n sys.stdout.flush()\n url = \"http://blingee.com/badge/view/42/user/{0}\".format(val)\n html = requests.get(url, headers=REQUESTS_HEADERS)\n status_code = html.status_code\n\n if status_code == 200 and html.text:\n tree = etree.HTML(html.text, parser=profile_parser)\n links = tree.xpath('//div[@id=\"badgeinfo\"]//a/@href')\n username = [link for link in links if \"/profile/\" in link]\n if not username:\n if \"Oops, Error\" in html.text:\n print(\"Skipping deleted/private/nonexistent profile.\")\n sys.stdout.flush()\n break\n else:\n print(\"Status code is 200, but couldn't find username! Sleeping.\")\n sys.stdout.flush()\n time.sleep(5)\n else:\n username = username[0]\n wget_args.append(\"http://blingee.com{0}\".format(username))\n wget_args.append(\"http://blingee.com{0}/statistics\".format(username))\n wget_args.append(\"http://blingee.com{0}/circle\".format(username))\n wget_args.append(\"http://blingee.com{0}/badges\".format(username))\n wget_args.append(\"http://blingee.com{0}/comments\".format(username))\n print(\"Username is {0}\".format(username.replace(\"/profile/\", \"\")))\n sys.stdout.flush()\n break\n else:\n print(\"Got status code {0}. Sleeping.\".format(html.status_code))\n sys.stdout.flush()\n time.sleep(5)\n time.sleep(1)\n\n else:\n raise Exception('Unknown item')\n\n if 'bind_address' in globals():\n wget_args.extend(['--bind-address', globals()['bind_address']])\n print('')\n print('*** Wget will bind address at {0} ***'.format(\n globals()['bind_address']))\n print('')\n\n return realize(wget_args, item)\n\n###########################################################################\n# Initialize the project.\n#\n# This will be shown in the warrior management panel. The logo should not\n# be too big. The deadline is optional.\nproject = Project(\n title=\"blingee\",\n project_html=\"\"\"\n <img class=\"project-logo\" alt=\"Project logo\" src=\"http://archiveteam.org/images/6/6e/Blingee_logo.png\" height=\"50px\" title=\"\"/>\n <h2>blingee.com <span class=\"links\"><a href=\"http://blingee.com/\">Website</a> &middot; <a href=\"http://tracker.archiveteam.org/blingee/\">Leaderboard</a></span></h2>\n <p>Saving all images and content from Blingee.</p>\n \"\"\",\n utc_deadline=datetime.datetime(2015, 8, 25, 0, 0, 0)\n)\n\npipeline = Pipeline(\n CheckIP(),\n GetItemFromTracker(\"http://%s/%s\" % (TRACKER_HOST, TRACKER_ID), downloader,\n VERSION),\n PrepareDirectories(warc_prefix=\"blingee\"),\n WgetDownload(\n WgetArgs(),\n max_tries=2,\n accept_on_exit_code=[0, 8],\n env={\n \"item_dir\": ItemValue(\"item_dir\"),\n \"item_value\": ItemValue(\"item_value\"),\n \"item_type\": ItemValue(\"item_type\"),\n }\n ),\n PrepareStatsForTracker(\n defaults={\"downloader\": downloader, \"version\": VERSION},\n file_groups={\n \"data\": [\n ItemInterpolation(\"%(item_dir)s/%(warc_file_base)s.warc.gz\")\n ]\n },\n id_function=stats_id_function,\n ),\n MoveFiles(),\n LimitConcurrent(NumberConfigValue(min=1, max=4, default=\"1\",\n name=\"shared:rsync_threads\", title=\"Rsync threads\",\n description=\"The maximum number of concurrent uploads.\"),\n UploadWithTracker(\n \"http://%s/%s\" % (TRACKER_HOST, TRACKER_ID),\n downloader=downloader,\n version=VERSION,\n files=[\n ItemInterpolation(\"%(data_dir)s/%(warc_file_base)s.warc.gz\")\n ],\n rsync_target_source_path=ItemInterpolation(\"%(data_dir)s/\"),\n rsync_extra_args=[\n \"--recursive\",\n \"--partial\",\n \"--partial-dir\", \".rsync-tmp\",\n ]\n ),\n ),\n SendDoneToTracker(\n tracker_url=\"http://%s/%s\" % (TRACKER_HOST, TRACKER_ID),\n stats=ItemValue(\"stats\")\n )\n)\n" }, { "alpha_fraction": 0.5929021835327148, "alphanum_fraction": 0.6003014445304871, "avg_line_length": 32.78703689575195, "blob_id": "520171f5ee5d36cd82b7f8de9104cee3edf5e0ac", "content_id": "0dcd5961120f7db2a961602958d1f51718ac7130", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Lua", "length_bytes": 14596, "license_type": "permissive", "max_line_length": 105, "num_lines": 432, "path": "/blingee.lua", "repo_name": "garyrh/blingee-grab", "src_encoding": "UTF-8", "text": "dofile(\"urlcode.lua\")\ndofile(\"table_show.lua\")\nrequire 'io'\n\nlocal url_count = 0\nlocal tries = 0\nlocal item_type = os.getenv('item_type')\nlocal item_value = os.getenv('item_value')\n\nlocal downloaded = {}\nlocal todo = {}\n\nread_file = function(file)\n if file then\n local f = assert(io.open(file))\n local data = assert(f:read(\"*all\"))\n f:close()\n return data\n else\n return \"\"\n end\nend\n\nsplit = function(inputstr, sep)\n -- https://stackoverflow.com/questions/1426954/split-string-in-lua\n inputstr = inputstr .. sep\n if sep == nil then\n sep = \"%s\"\n end\n local t = {}\n for str in string.gmatch(inputstr, \"([^\"..sep..\"]+)\") do\n table.insert(t, str)\n end\n return t\nend\n\nline_num = function(linenum, filename)\n local num = 0\n for line in io.lines(filename) do\n num = num + 1\n if num == linenum then\n return line\n end\n end\nend\n\ntrim = function(s)\n return (s:gsub(\"^%s*(.-)%s*$\", \"%1\"))\nend\n\nparse_html = function(file, selector, index)\n index = index or \"\"\n -- Make sure file exists.\n local f = assert(io.open(file, \"r\"))\n f:close()\n while true do\n local handle = io.popen(\"python ./parse_html.py \"..file..\" \"..selector..\" \"..index..\"; echo $?\")\n if handle then\n local html = handle:read(\"*a\") or \"\"\n handle:close()\n local output = split(html, \"\\n\")\n local matched = output[1]\n local err = output[2]\n if matched ~= nil and matched ~= \"\" and not string.match(matched, \"^[\\n%d]+$\") and err == \"0\" then\n if matched == \"none\" then\n return \"\"\n else\n return matched\n end\n else\n io.stdout:write(\"HTML parsing failed! Trying again...\\n\")\n io.stdout:flush()\n end\n else\n io.stdout:write(\"HTML parsing failed! Trying again...\\n\")\n io.stdout:flush()\n end\n end\nend\n\nis_resource = function(url)\n local patterns = {\"%.gif[%?%d]*$\",\n \"%.png[%?%d]*$\",\n \"%.jpe?g[%?%d]*$\",\n \"%.css[%?%d]*$\",\n \"%.js[%?%d]*$\",\n \"%.swf[%?%d]*$\"}\n for _,pattern in ipairs(patterns) do\n if string.match(url, pattern) then\n return true\n end\n end\n return false\nend\n\ncheck = function(url, parent, verdict)\n if downloaded[url] == true or todo[url] == true then\n return false\n\n -- url should actually be a url.\n elseif not string.match(url, \"^https?://\") then\n return false\n\n -- Ignore blingee language options\n elseif string.match(url, \"https?://de%.blingee%.com/\") or\n string.match(url, \"https?://es%.blingee%.com/\") or\n string.match(url, \"https?://fr%.blingee%.com/\") or\n string.match(url, \"https?://it%.blingee%.com/\") or\n string.match(url, \"https?://nl%.blingee%.com/\") or\n string.match(url, \"https?://pt%.blingee%.com/\") or\n string.match(url, \"https?://ru%.blingee%.com/\") or\n string.match(url, \"https?://ja%.blingee%.com/\") or\n string.match(url, \"https?://ko%.blingee%.com/\") then\n return false\n\n -- Groups: Skip avatars/thumbnails on group frontpage, topics, and managers.\n elseif parent and is_resource(url) and\n (string.match(parent[\"url\"], \"blingee%.com/group/%d+$\") or\n string.match(parent[\"url\"], \"blingee%.com/group/%d+-\") or\n string.match(parent[\"url\"], \"blingee%.com/group/%d+/managers\") or\n string.match(parent[\"url\"], \"blingee%.com/group/%d+/topic\") or\n string.match(parent[\"url\"], \"blingee%.com/group/%d+/member\")) then\n return false\n\n -- Groups: Except for resources, only grab urls that contain item_type.\n elseif parent and (item_type == \"group\" and\n string.match(parent[\"url\"], \"/group/\") and\n not is_resource(url) and\n not string.match(url, \"blingee%.com/group/\")) then\n return false\n\n -- Groups: Skip other groups.\n elseif item_type == \"group\" and\n string.match(url, \"blingee%.com/group/%d+[^%d]*\") and\n not string.match(url, \"blingee%.com/group/\"..item_value..\"/\") then\n return false\n\n -- No need to redo badges as we're already grabbing them.\n elseif string.match(url, \"blingee%.com/images/badges/\") and item_type ~= \"badge\" then\n return false\n\n -- No ads or trackers\n elseif string.match(url, \"https?://partner%.googleadservices%.com\") or\n string.match(url, \"http://.+%.scorecardresearch%.com\") or\n string.match(url, \"http://.+%.quantserve%.com\") then\n return false\n\n -- Ignore static stuff that has no timestamps.\n elseif string.match(url, \"http://blingee%.com/images/web_ui/[^%?]+$\") or\n string.match(url, \"http://blingee%.com/favicon%.gif\") or\n string.match(url, \"http://blingee%.com/images/spaceball%.gif\") then\n return false\n\n -- Site stuff that is already saved elsewhere,\n elseif string.match(url, \"^https?://blingee%.com/$\") or\n string.match(url, \"blingee%.com/about\") or\n string.match(url, \"blingee%.com/partner\") or\n string.match(url, \"blingee%.com/group/%d+/.+page=1$\") or\n string.match(url, \"[%?&]list_type=409[78]\") or\n string.match(url, \"blingee%.com/group/%d+/member/\") or\n string.match(url, \"blingee%.com/group/%d+/blingees\") or\n string.match(url, \"blingee%.com/groups$\") or\n (string.match(url, \"host%d+-static%.blingee%.com\") and item_type == \"group\") or\n string.match(url, \"%?offset=%d+\") then\n return false\n\n -- ... requires a login, or makes wget go nuts.\n elseif string.match(url, \"blingee%.com/images/web_ui/default_deleted_avatar%.gif%?1341491498\") or\n string.match(url, \"blingee%.com/images/web_ui/default_avatar%.gif%?1341491498\") or\n string.match(url, \"/choose_blingee$\") or\n string.match(url, \"/choose_spotlight$\") or\n string.match(url, \"/upload_base$\") or\n string.match(url, \"/join$\") or\n string.match(url, \"/signup$\") or\n string.match(url, \"/login$\") or\n string.match(url, \"%?page=%d+%?page=%d+\") or\n string.match(url, \"blingee%.com/gift/\") or\n string.match(url, \"blingee%.com/user_circle/join\") or\n string.match(url, \"blingee%.com/user_circle/block_user\") or\n string.match(url, \"blingee%.com/profile/.+/spotlight\") or\n string.match(url, \"blingee%.com/profile/.+/postcards\") or\n string.match(url, \"blingee%.com/profile/.+/challenges\") or\n string.match(url, \"blingee%.com/goodie_bag\") or\n string.match(url, \"/add_topic\") or\n string.match(url, \"/add_post\") or\n string.match(url, \"blingee%.com/group/tags/\") or\n string.match(url, \"blingee%.com/blingee/tags/\") or\n string.match(url, \"blingee%.com/pictures/\") or\n string.match(url, \"[%?&]lang=\") then\n return false\n end\n return verdict or true\nend\n\n-- Ignore urls that are already saved.\nfor url in string.gmatch(read_file(\"ignorelist.txt\"), \"[^\\n]+\") do\n downloaded[url] = true\nend\n\n\nwget.callbacks.download_child_p = function(urlpos, parent, depth, start_url_parsed, iri, verdict, reason)\n local url = urlpos[\"url\"][\"url\"]\n passed = check(url, parent, verdict)\n if passed then\n todo[url] = true\n end\n return passed\nend\n\n\nwget.callbacks.get_urls = function(file, url, is_css, iri)\n local urls = {}\n local html = read_file(file)\n\n if downloaded[url] ~= true then\n downloaded[url] = true\n end\n\n -- Check url and, if valid and not downloaded, insert into urls.\n insert = function(newurl)\n if newurl ~= nil and check(newurl) and todo[newurl] ~= true\n and downloaded[newurl] ~= true then\n table.insert(urls, { url=newurl })\n todo[newurl] = true\n end\n end\n\n -- Check url for possible matches.\n -- If matched, returns newurl. Else, nil\n match_url = function(newurl)\n -- Get extra, possibly new css/js.\n if string.match(newurl, \"%.css[%?%d]*$\") or string.match(newurl, \"%.js[%?%d]*$\") then\n return newurl\n -- I don't think there are any swfs other than for stamps,\n -- but just in case\n elseif string.match(newurl, \"%.swf[%?%d]*$\") then\n return newurl\n else\n return nil\n end\n end\n\n -- Find various common links.\n if not is_resource(url) then\n for newurl in string.gmatch(html, '\"(https?://[^\"]+)\"') do\n insert(match_url(newurl))\n end\n\n for newurl in string.gmatch(html, '(\"/[^\"]+)\"') do\n if string.match(newurl, '\"//') then\n insert(match_url(string.gsub(newurl, '\"//', 'http://')))\n elseif not string.match(newurl, '\"//') then\n insert(match_url(string.match(url, \"(https?://[^/]+)/\")..string.match(newurl, '\"(/.+)')))\n end\n end\n\n for newurl in string.gmatch(html, \"('/[^']+)'\") do\n if string.match(newurl, \"'//\") then\n insert(match_url(string.gsub(newurl, \"'//\", \"http://\")))\n elseif not string.match(newurl, \"'//\") then\n insert(match_url(string.match(url, \"(https?://[^/]+)/\")..string.match(newurl, \"'(/.+)\")))\n end\n end\n end\n\n -- Profiles\n -- First, all the people in their \"circle\"\n if string.match(url, \"blingee%.com/profile/.+/circle\") then\n local partial_url = trim(parse_html(file, [[//div[@class=\\\"pagination\\\"]/a/@href]], -1))\n if partial_url then\n local total_num = string.match(partial_url, \"%d+$\")\n if total_num and string.match(partial_url, \"page=%d+\") then\n for num=2,total_num do\n newurl = url .. \"?page=\" .. num\n insert(newurl)\n end\n end\n end\n -- And comments\n elseif string.match(url, \"blingee%.com/profile/.+/comments\") then\n local partial_url = trim(parse_html(file, [[//div[@class=\\\"li2center\\\"]//div//a/@href]], -1))\n if partial_url then\n local total_num = string.match(partial_url, \"%d+$\")\n if total_num and string.match(partial_url, \"page=%d+\") then\n for num=2,total_num do\n newurl = url .. \"?page=\" .. num\n insert(newurl)\n end\n end\n end\n -- Get the avatar\n elseif string.match(url, \"blingee%.com/profile/\") then\n local newurl = trim(parse_html(file, [[//div[@class=\\'bigbox\\']//img/@src]], 0))\n insert(newurl)\n\n -- Blingees\n elseif string.match(url, \"blingee%.com/blingee/view/%d+$\") then\n -- The way Blingee stores images is odd. A lot of the thumbnails\n -- have very similar urls to the actual image.\n -- This selector gets just the main image, which is in the bigbox div.\n local newurl = trim(parse_html(file, [[//div[@class=\\'bigbox\\']//img/@src]], 0))\n insert(newurl)\n\n -- Blingee comments\n elseif string.match(url, \"blingee%.com/blingee/%d+/comments$\") then\n -- The very last url has the total number of comment pages\n local partial_url = trim(parse_html(file, [[//div[@class=\\'li2center\\']//div//a/@href]], -1))\n local total_num = string.match(partial_url, \"%d+$\")\n if total_num and string.match(partial_url, \"page=%d+\") then\n for num=2,total_num do\n newurl = url .. \"?page=\" .. num\n insert(newurl)\n end\n end\n\n -- Stamps\n elseif string.match(url, \"blingee%.com/stamp/view/\") then\n local partial_url = trim(parse_html(file, [[//div[@class=\\'bigbox\\']//img/@style]], 0))\n newurl = string.match(partial_url, \"http?://[^%)]+\")\n insert(newurl)\n\n -- Group urls are found via the --recursive wget flag,\n -- but we do have to add the group logo.\n elseif string.match(url, \"blingee%.com/group/%d+$\") then\n newurl = trim(parse_html(file, [[//div[@class=\\'bigbox\\']//img/@src]], 0))\n insert(newurl)\n\n -- Competition rankings\n elseif string.match(url, \"blingee%.com/competition/rankings/%d+$\") then\n local partial_url = trim(parse_html(file, [[//div[@class=\\'content_section\\']//a/@href]], -1))\n local total_num = string.match(partial_url, \"%d+$\")\n if total_num and string.match(partial_url, \"page/%d+\") then\n for num=2,total_num do\n newurl = url .. \"/page/\" .. num\n insert(newurl)\n end\n end\n\n -- Challenge rankings\n elseif string.match(url, \"blingee%.com/challenge/rankings/%d+$\") then\n local partial_url = trim(parse_html(file, [[//div[@class=\\'content_section\\']//a/@href]], -1))\n local total_num = string.match(partial_url, \"%d+$\")\n if total_num and string.match(partial_url, \"page=%d+\") then\n for num=2,total_num do\n newurl = url .. \"?page=\" .. num\n insert(newurl)\n end\n end\n\n -- Badges\n elseif string.match(url, \"blingee%.com/badge/\") then\n -- Get the actual badge\n if string.match(url, \"/view/%d+$\") then\n local description = trim(parse_html(file, [[//div[@class=\\'description\\']//p//a//img/@src]], 0))\n if description then\n insert(\"http:\" .. description)\n end\n -- Winner list\n elseif string.match(url, \"/winner_list/%d+$\") then\n local partial_url = trim(parse_html(file, [[//div[@class=\\'pagination\\']//a/@href]], -1))\n local total_num = string.match(partial_url, \"%d+$\")\n if total_num and string.match(partial_url, \"page=%d+\") then\n for num=2,total_num do\n newurl = url .. \"?page=\" .. num\n insert(newurl)\n end\n end\n end\n end\n return urls\nend\n\n\nwget.callbacks.httploop_result = function(url, err, http_stat)\n -- NEW for 2014: Slightly more verbose messages because people keep\n -- complaining that it's not moving or not working\n local status_code = http_stat[\"statcode\"]\n \n url_count = url_count + 1\n io.stdout:write(url_count .. \"=\" .. status_code .. \" \" .. url[\"url\"] .. \". \\n\")\n io.stdout:flush()\n\n -- Save the url shortener.\n if (status_code == 302 or status_code == 301) and\n (item_type == \"blingee\" and string.match(url.url, \"^https?://blingee%.com/b/.+\")) then\n return wget.actions.NOTHING\n\n elseif status_code >= 500 or\n (status_code >= 400 and status_code ~= 404 and status_code ~= 403) then\n\n io.stdout:write(\"\\nServer returned \"..http_stat.statcode..\". Sleeping.\\n\")\n io.stdout:flush()\n\n os.execute(\"sleep 15\")\n\n if tries >= 8 then\n io.stdout:write(\"\\nI give up...\\n\")\n io.stdout:flush()\n tries = 0\n return wget.actions.ABORT\n else\n tries = tries + 1\n return wget.actions.CONTINUE\n end\n\n elseif status_code == 0 then\n io.stdout:write(\"\\nServer returned \"..http_stat.statcode..\". Sleeping.\\n\")\n io.stdout:flush()\n\n os.execute(\"sleep 15\")\n\n if tries >= 5 then\n io.stdout:write(\"\\nI give up...\\n\")\n io.stdout:flush()\n tries = 0\n return wget.actions.ABORT\n else\n tries = tries + 1\n return wget.actions.CONTINUE\n end\n end\n\n tries = 0\n\n local sleep_time = 0.5 * (math.random(75, 125) / 100.0)\n\n if sleep_time >= 0.5 then\n os.execute(\"sleep \" .. sleep_time)\n end\n\n return wget.actions.NOTHING\nend\n" } ]
4
ShaneRich5/lab3-ex1
https://github.com/ShaneRich5/lab3-ex1
30aad3050aa7233aba6767a82c76a54f9078fa02
41a2661fec1780e3601a8073261e06595cd19ca7
cd77aafe9429628bf3324e36ed7a2b888153e62c
refs/heads/master
"2021-01-10T15:40:42.812934"
"2016-02-18T01:56:05"
"2016-02-18T01:56:05"
51,942,179
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7361111044883728, "alphanum_fraction": 0.7638888955116272, "avg_line_length": 23.33333396911621, "blob_id": "8a5fec68541f175e758033c7c28d0e2fff5cae33", "content_id": "15f355e8fac6f576d35e0efb8b08169c3541f9cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 72, "license_type": "no_license", "max_line_length": 52, "num_lines": 3, "path": "/README.md", "repo_name": "ShaneRich5/lab3-ex1", "src_encoding": "UTF-8", "text": "# Lab 3 Exercise 1\n\nThis repo will be removed at the end of the semester" }, { "alpha_fraction": 0.6696628928184509, "alphanum_fraction": 0.6764044761657715, "avg_line_length": 13.866666793823242, "blob_id": "f540d1a57e4609b0edafd26279367e036015ddeb", "content_id": "7cea189aac1b33ea19052a7409fa268306c7d858", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 48, "num_lines": 30, "path": "/sendmail.py", "repo_name": "ShaneRich5/lab3-ex1", "src_encoding": "UTF-8", "text": "import smtplib\n\nfromaddr = 'shane.richards212@gmail'\ntoaddr = '[email protected]'\n\nmessage = \"\"\"From: {} <{}> \nTo: {} <{}>\nSubject: {}\n\n{}\n\"\"\"\n\nmessagetosend = message.format(\n\tfromname,\n\tfromaddr,\n\ttoname,\n\ttoaddr,\n\tsubject,\n\tmsg)\n\n# Credentials\nusername = '[email protected]'\npassword = 'curryishot'\n\n# The actual message\nserver = smtplib.SMTP('smtp.gmail.com:587')\nserver.starttls()\nserver.login(username, password)\nserver.sendmail(fromaddr, toaddr, messagetosend)\nserver.quit()" } ]
2
Basetcan/Rock_Paper_Scissors-AI-Game
https://github.com/Basetcan/Rock_Paper_Scissors-AI-Game
feb6bee1c8a2b3bdb66272f34f4d3f10b8343263
3ae0b031ea41c9b28a5e05e60d24ae52df06aee6
14f08785594a346aeb95ee5e5d4d756d2e4ca45c
refs/heads/main
"2023-03-07T22:07:39.008254"
"2021-02-22T09:51:51"
"2021-02-22T09:51:51"
341,151,063
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7844827771186829, "alphanum_fraction": 0.7844827771186829, "avg_line_length": 57, "blob_id": "a5208a99c7a91f47359186025c1b7d6f872f2918", "content_id": "550f40d3d1ea270a8bab9387c7f08eea0d880ba5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 116, "license_type": "no_license", "max_line_length": 89, "num_lines": 2, "path": "/README.md", "repo_name": "Basetcan/Rock_Paper_Scissors-AI-Game", "src_encoding": "UTF-8", "text": "# Rock_Paper_Scissors AI \n In this project, a virtual RPS game is designed using the Naive Bayes classifier method.\n" }, { "alpha_fraction": 0.5825638175010681, "alphanum_fraction": 0.604426920413971, "avg_line_length": 31.171171188354492, "blob_id": "10f32d395428520a1101ead2ec28f4da5f206ca9", "content_id": "1d7981e3730e03167852868982672c72d020e63c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7364, "license_type": "no_license", "max_line_length": 126, "num_lines": 222, "path": "/rps_ai_game.py", "repo_name": "Basetcan/Rock_Paper_Scissors-AI-Game", "src_encoding": "UTF-8", "text": "# importing the libraries that we use\r\n\r\nimport random\r\nimport numpy as np\r\nimport math\r\nimport itertools\r\nimport time\r\n\r\nchoice = 0\r\n\r\nrock_data = []\r\npaper_data = []\r\nscissors_data = []\r\n\r\nnext_move_rock = []\r\nnext_move_paper = []\r\nnext_move_scissors = []\r\n\r\n## We keep the times of the moves in these lists.\r\nplayer0_rock = []\r\nplayer0_paper = []\r\nplayer0_scissors = []\r\n\r\n##ai move's times\r\nplayer1_rock = []\r\nplayer1_paper = []\r\nplayer1_scissors = []\r\n\r\n## move time\r\ncounter = 1\r\n\r\n## counters for possible states\r\nplayer0_won = 0\r\nplayer1_won = 0\r\ndraw = 0\r\n\r\n## for select mode ai vs user or ai vs comp\r\nmode = int(input(\"which mode (1)user vs ai (2)comp vs ai\"))\r\n\r\n## how many times do you want\r\nn_times = int(input(\"how many times do you want to play ?\"))\r\n\r\n\r\n## we use this method for compare the moves and get result.we can learn who is winner with this method.\r\ndef compare(counter, player0_rock, player0_paper, player0_scissors, player1_rock, player1_paper, player1_scissors) :\r\n global draw\r\n global player0_won\r\n global player1_won\r\n\r\n ## this conditions for get the winner side,these return boolean values\r\n rock_result_0 = counter in player0_rock\r\n rock_result_1 = counter in player1_rock\r\n\r\n paper_result_0 = counter in player0_paper\r\n paper_result_1 = counter in player1_paper\r\n\r\n scissors_result_0 = counter in player0_scissors\r\n scissors_result_1 = counter in player1_scissors\r\n\r\n ## we compare the couple result,we get winner side and number of their win,lose and so on.\r\n if rock_result_0 and rock_result_1 :\r\n print(\"DRAW\")\r\n draw += 1\r\n if paper_result_0 and paper_result_1 :\r\n print(\"DRAW\")\r\n draw += 1\r\n if scissors_result_0 and scissors_result_1 :\r\n print(\"DRAW\")\r\n draw += 1\r\n\r\n if rock_result_0 and paper_result_1 :\r\n print(\"PLAYER AI WON\")\r\n player1_won += 1\r\n if rock_result_0 and scissors_result_1 :\r\n print(\"PLAYER COMP/USER WON\")\r\n player0_won += 1\r\n\r\n if paper_result_0 and scissors_result_1 :\r\n print(\"PLAYER AI WON\")\r\n player1_won += 1\r\n if paper_result_0 and rock_result_1 :\r\n print(\"PLAYER COMP/USER WON\")\r\n player0_won += 1\r\n\r\n if scissors_result_0 and rock_result_1 :\r\n print(\"PLAYER AI WON\")\r\n player1_won += 1\r\n if scissors_result_0 and paper_result_1 :\r\n print(\"PLAYER COMP/USER WON\")\r\n player0_won += 1\r\n\r\n return draw, player0_won, player1_won\r\n\r\n\r\ndef player0(counter) :\r\n global player0_rock\r\n global player0_paper\r\n global player0_scissors\r\n global choice\r\n\r\n ## if user play, user need to choice R,P or S\r\n if mode == 1 :\r\n choice_user = str(input(\"enter R,P or S \"))\r\n\r\n if choice_user == 'R' :\r\n choice = 1\r\n if choice_user == \"P\" :\r\n choice = 2\r\n if choice_user == \"S\" :\r\n choice = 3\r\n ## if comp play, comp will play randomly.\r\n if mode == 2 :\r\n choice = random.choice([1, 2, 3])\r\n\r\n if choice == 1 :\r\n player0_rock.append(counter)\r\n if choice == 2 :\r\n player0_paper.append(counter)\r\n if choice == 3 :\r\n player0_scissors.append(counter)\r\n ## exception case\r\n if choice < 1 or choice > 3 or choice == \" \" :\r\n print(\"select again\")\r\n player0(counter)\r\n\r\n return choice, player0_rock, player0_paper, player0_scissors\r\n\r\n\r\n## our ai player . ai player moves with using probability of rock,paper and scissors.\r\ndef player1(counter) : ## counter as a paramter that used for time of move.\r\n if counter < 10 : ## ai starts with 10 random moves so there is no enough data for move logical.\r\n choice = random.choice([1, 2, 3])\r\n if choice == 1 :\r\n player1_rock.append(counter)\r\n if choice == 2 :\r\n player1_paper.append(counter)\r\n if choice == 3 :\r\n player1_scissors.append(counter)\r\n print(\"draw,player,ai,#ofgame \\n\",\r\n compare(counter, player0_rock, player0_paper, player0_scissors, player1_rock, player1_paper, player1_scissors),\r\n counter) ## show the result\r\n else :\r\n ## ai get the robability of rock,paper and scissors,and decide the move via a rule base system.\r\n prock = p_rock(player0_rock, counter)\r\n ppaper = p_paper(player0_paper, counter)\r\n pscissors = p_scissors(player0_scissors, counter)\r\n if prock > ppaper and prock > pscissors:\r\n if ppaper > pscissors :\r\n player1_paper.append(counter)\r\n if ppaper < pscissors :\r\n player1_rock.append(counter)\r\n if ppaper > prock and ppaper > pscissors :\r\n if pscissors > prock :\r\n player1_scissors.append(counter)\r\n if pscissors < prock :\r\n player1_paper.append(counter)\r\n if pscissors > ppaper and pscissors > prock :\r\n if prock > ppaper :\r\n player1_rock.append(counter)\r\n if prock < ppaper :\r\n player1_scissors.append(counter)\r\n print(prock, ppaper, pscissors) ## we can check the probability of rock,paper and scissors.\r\n ## if you want to show results step by step you can use these several prints.\r\n \"\"\" \r\n print(player0_rock)\r\n print(player0_paper)\r\n print(player0_scissors)\r\n print(player1_rock)\r\n print(player1_paper)\r\n print(player1_scissors)\r\n \"\"\"\r\n\r\n print(\"draw,player,ai,#ofgame \\n\",\r\n compare(counter, player0_rock, player0_paper, player0_scissors, player1_rock, player1_paper, player1_scissors),\r\n counter) ## shot the result\r\n\r\n return player1_rock, player1_paper, player1_scissors\r\n\r\n\r\n\r\n# We used Naive Bayes Classification to calculate probability of the moves according to the our previous datas\r\n# https://en.wikipedia.org/wiki/Naive_Bayes_classifier\r\n\"\"\" \r\nWith this mathematical function,\r\nwe are trying to calculate what the next move will be with the previously played data \r\nby making a naive bayes classification.\r\n\"\"\"\r\n\r\n\r\ndef p_rock(rock_data, counter) :\r\n var_rock = np.var(rock_data, ddof=1)\r\n mean_rock = np.mean(rock_data)\r\n p_rock = abs((1 / math.sqrt(2 * math.pi * var_rock)) * math.exp(- pow((counter - mean_rock), 2) / abs((2 * var_rock))))\r\n return p_rock\r\n\r\n\r\ndef p_paper(paper_data, counter) :\r\n var_paper = np.var(paper_data, ddof=1)\r\n mean_paper = np.mean(paper_data)\r\n p_paper = abs(\r\n (1 / math.sqrt(2 * math.pi * var_paper)) * math.exp(- pow((counter - mean_paper), 2) / abs((2 * var_paper))))\r\n return p_paper\r\n\r\n\r\ndef p_scissors(scissors_data, counter) :\r\n var_scissors = np.var(scissors_data, ddof=1)\r\n mean_scissors = np.mean(scissors_data)\r\n p_scissors = abs(\r\n (1 / math.sqrt(2 * math.pi * var_scissors)) * math.exp(- pow((counter - mean_scissors), 2) / abs((2 * var_scissors))))\r\n return p_scissors\r\n\r\n\r\n## counter must be bigger than zero for play the game and it must be smaller than n_times(how many time you want to play?)\r\nwhile counter > 0 and counter <= n_times :\r\n player0(counter)\r\n player1(counter)\r\n counter += 1\r\n\r\nif (player1_won > player0_won):\r\n print(\"* AI WON THE GAME *\")\r\nif (player0_won > player1_won):\r\n print(\"* COMP/USER WON THE GAME *\")\r\n" } ]
2
beichao1314/TREC2016
https://github.com/beichao1314/TREC2016
cd188e16d61a29d9ffceb07bbba55d382aedbda0
3eb92ac1a294c644350b68e33bb4bb4519a7a0e9
494963ee8b829ac93989c11e6ff09ec1ae517f20
refs/heads/master
"2020-04-06T07:02:32.009908"
"2017-09-19T02:20:33"
"2017-09-19T02:20:33"
58,422,363
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6593406796455383, "alphanum_fraction": 0.6788766980171204, "avg_line_length": 21.75, "blob_id": "c26daf3b240661844f5811bb8832ea9017ea72c8", "content_id": "2310ea4482f22e41ed08888c7b63d3af509f741a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 97, "num_lines": 36, "path": "/run1_crawlerA/process_profile.py", "repo_name": "beichao1314/TREC2016", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 7 16:17:49 2016\n\n@author: xiaobei\n\"\"\"\n\nimport nltk\nfrom nltk.corpus import stopwords\nimport re\n\n\ndef removeStopWords_1(originSegs):\n stops = set(stopwords.words('english'))\n resultStr = [seg.lower() for seg in originSegs if seg.lower() not in stops and seg.isalpha()]\n return resultStr\n\n\ndef filters(content):\n results = re.compile(r'http://[a-zA-Z0-9.?/&=:]*', re.S)\n filter = results.sub(\"\", content)\n return filter\n\n\ndef preprocess_profile(sentence):\n # filterwords = filters(sentence)\n words = nltk.word_tokenize(sentence)\n removestopwords = removeStopWords_1(words)\n result = stemword(removestopwords)\n return result\n\n\ndef stemword(word):\n porter = nltk.PorterStemmer().stem\n result = list(map(porter, word))\n return result\n" }, { "alpha_fraction": 0.371578574180603, "alphanum_fraction": 0.38213780522346497, "avg_line_length": 56.27027130126953, "blob_id": "bb4a82a725889906721cd1140ccdf9771cd47ea8", "content_id": "21fe8a8e45bf3d660806774df70683c0e7961405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16952, "license_type": "no_license", "max_line_length": 134, "num_lines": 296, "path": "/run2_crawlerA/rewritesummary.py", "repo_name": "beichao1314/TREC2016", "src_encoding": "UTF-8", "text": "import pymysql\nfrom process import preprocess\nimport time as T\nimport nltk\nimport math\nimport operator\n\n\nclass PushSummary():\n def __init__(self, lemda, interest_files, time, rest, fa, topicid):\n self.topicid = topicid\n self.L = len(self.topicid)\n self.SumOfLenthOfStream = 0\n self.wordInStream = {}\n self.lemda = lemda\n self.interest_files = interest_files\n self.time = time\n self.day = 1\n self.rest = rest\n self.fa = fa\n self.tfidfthresholdA = []\n self.jsdthresholdA = []\n self.lmsthresholdA = []\n self.tfidfthresholdB = []\n self.jsdthresholdB = []\n self.lmsthresholdB = []\n self.numofdayA = []\n self.numofdayB = []\n self.queries_numOfTweet = []\n self.queries_numOfWord = []\n self.queries_word = []\n self.queries_occur = []\n self.summaryA = []\n self.summaryB = []\n self.qoccur = []\n self.numofq = []\n self.numofqinstream = {}\n for i in range(self.L):\n # self.numofdayA[number] = 0\n # self.numofdayB[number] = 0\n self.numofdayA.append(0)\n self.numofdayB.append(0)\n\n self.queries_word.append({})\n self.queries_occur.append({})\n self.summaryA.append([])\n self.summaryB.append([])\n # self.word_tweet_query.append({})\n self.qoccur.append({})\n self.numofq.append({})\n self.tfidfthresholdA.append(0.7)\n self.jsdthresholdA.append(0.04)\n self.lmsthresholdA.append(0.02)\n self.tfidfthresholdB.append(0.5)\n self.jsdthresholdB.append(0.04)\n self.lmsthresholdB.append(0.02)\n self.queries_numOfTweet.append(0)\n self.queries_numOfWord.append(0)\n\n def pushSummarys(self, tweet):\n if ('delete' not in tweet) and (tweet['lang'] == 'en'):\n if 'retweeted_status' in tweet:\n tem = tweet['retweeted_status']\n tem['timestamp_ms'] = tweet['timestamp_ms']\n tem['created_at'] = tweet['created_at']\n tweet = tem\n delta = self.time.calculatetime(tweet['created_at'])\n if delta >= 1:\n for x in range(self.L):\n stemwords_interest_profile = self.interest_files[x]\n self.numofdayA[x] = 0\n self.numofdayB[x] = 0\n listofsummaryA = [summary[0] for summary in self.summaryA[x] if summary[1] == self.day]\n if len(listofsummaryA) > 0:\n self.tfidfthresholdA[x] = min(summaryA[2] for summaryA in listofsummaryA)\n\n listofsummaryB = [summary[0] for summary in self.summaryB[x] if summary[1] == self.day]\n if len(listofsummaryB) > 0:\n self.tfidfthresholdB[x] = min(summaryB[2] for summaryB in listofsummaryB)\n sumoflen = sum(summaryBBBB[5] for summaryBBBB in listofsummaryB)\n ADL = sumoflen / len(listofsummaryB)\n lenofq = len(stemwords_interest_profile)\n result = []\n for summaryBBB in listofsummaryB:\n score = 0\n TF = summaryBBB[4]\n for q in stemwords_interest_profile:\n tf = TF[q]\n avgtf = sum(TF[qq] for qq in stemwords_interest_profile) / len(TF)\n RITF = math.log2(1 + tf) / math.log2(1 + avgtf)\n LRTF = tf * math.log2(1 + ADL / summaryBBB[5])+0.0001\n w = 2 / (1 + math.log2(1 + lenofq))\n TFF = w * RITF / (1 + RITF) + (1 - w) * LRTF / (1 + LRTF)\n IDF = math.log((len(listofsummaryB) + 1) / (self.qoccur[x][q] + 1)) + 0.0001\n AEF = self.numofq[x][q] / (self.qoccur[x][q] + 1)\n TDF = IDF * AEF / (1 + AEF)\n sim = TFF * TDF\n score += sim\n del tf, avgtf, RITF, LRTF, w, TFF, IDF, AEF, TDF, sim\n result.append([score, summaryBBB[1]])\n del listofsummaryB\n result.sort(key=operator.itemgetter(0), reverse=True)\n j = 1\n for i in result:\n if (self.day) > 9:\n d = '201608' + str(self.day)\n else:\n d = '2016080' + str(self.day)\n with open('B.txt', 'a') as ff:\n ff.write(\n '%s %s Q0 %s %s %s CCNUNLPrun2\\n' % (\n d, self.topicid[x], i[1], str(j), i[0]))\n j = j + 1\n self.time.settime()\n self.day = self.day + 1\n content = tweet['text']\n stemwords_tweet = preprocess(content)\n del content\n wordInTweet = {}\n if stemwords_tweet == False:\n pass\n else:\n numOfWordAtweet = len(stemwords_tweet)\n self.SumOfLenthOfStream = numOfWordAtweet + self.SumOfLenthOfStream\n id_str = tweet['id_str']\n for word in stemwords_tweet:\n if word in self.wordInStream:\n self.wordInStream[word] += 1\n else:\n self.wordInStream[word] = 1\n if word in wordInTweet:\n wordInTweet[word] += 1\n else:\n wordInTweet[word] = 1\n for x in range(self.L):\n stemwords_interest_profile = self.interest_files[x]\n for q in stemwords_interest_profile:\n if q in self.numofqinstream:\n self.numofqinstream[q] += stemwords_tweet.count(q)\n else:\n self.numofqinstream[q] = stemwords_tweet.count(q)\n for x in range(self.L):\n stemwords_interest_profile = self.interest_files[x]\n count = sum(stemwords_tweet.count(wordsss) for wordsss in stemwords_interest_profile)\n lenofq = len(stemwords_interest_profile)\n if count >= 1:\n qt = {}\n for qqq in stemwords_interest_profile:\n if qqq in qt:\n qt[qqq] += stemwords_tweet.count(qqq)\n else:\n qt[qqq] = stemwords_tweet.count(qqq)\n lms = 0\n samewords = [q for q in stemwords_interest_profile if q in stemwords_tweet]\n for qq in samewords:\n Pq = self.lemda * 1.0 / float(lenofq) + (1 - self.lemda) * float(\n self.numofqinstream[qq]) / float(\n self.SumOfLenthOfStream)\n Pt = self.lemda * qt[qq] / float(numOfWordAtweet) + (1 - self.lemda) * float(\n self.numofqinstream[qq]) / float(self.SumOfLenthOfStream)\n M = 0.5 * (Pq + Pt)\n lms += 0.5 * Pq * math.log(Pq / M) + 0.5 * Pt * math.log(Pt / M)\n if lms <= self.lmsthresholdA[x]:\n sumoftfidf = 0.0\n for word in stemwords_tweet:\n if word in self.queries_word[x]:\n self.queries_word[x][word] += 1\n else:\n self.queries_word[x][word] = 1\n for word in set(stemwords_tweet):\n if word not in self.queries_occur[x]:\n self.queries_occur[x][word] = 1\n else:\n self.queries_occur[x][word] += 1\n\n self.queries_numOfWord[x] += numOfWordAtweet\n self.queries_numOfTweet[x] += 1\n\n for word in stemwords_tweet:\n tf = self.queries_word[x][word] / self.queries_numOfWord[x]\n idf = math.log2((self.queries_numOfTweet[x] + 1) / self.queries_occur[x][word])\n sumoftfidf = sumoftfidf + tf * idf\n if sumoftfidf >= self.tfidfthresholdA[x] and self.numofdayA[x] < 10:\n listofsummaryA = [summary[0] for summary in self.summaryA[x]]\n if len(listofsummaryA) > 0:\n jsd = []\n for summary in listofsummaryA:\n sumofjsd = 0\n tf = {}\n for wordss in summary[0]:\n if wordss in tf:\n tf[wordss] += 1\n else:\n tf[wordss] = 1\n sameword = [word for word in stemwords_tweet if\n word in summary[0]]\n if len(sameword) > 0:\n for word in sameword:\n Pti = float(wordInTweet[word]) / float(numOfWordAtweet)\n Psi = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)\n thetaTi = self.lemda * Pti + (1 - self.lemda) * Psi\n Ptj = float(tf[word]) / float(len(summary[0]))\n Psj = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)\n thetaTj = self.lemda * Ptj + (1 - self.lemda) * Psj\n # sumofjsd += thetaTi * math.log(thetaTi / thetaTj)\n M = float((thetaTi + thetaTj) / 2)\n sumofjsd += 0.5 * (thetaTi * math.log(thetaTi / M)) + 0.5 * (\n thetaTj * math.log(thetaTj / M))\n jsd.append(sumofjsd)\n else:\n jsd.append(0.06)\n JSD = min(jsd)\n else:\n JSD = 0.04\n # print('kld:' + str(JSD))\n if JSD >= self.jsdthresholdA[x]:\n #self.rest.Post(self.topicid[x], id_str)\n self.lmsthresholdA[x] = lms\n self.jsdthresholdA[x] = JSD\n self.numofdayA[x] += 1\n a = [stemwords_tweet, id_str, sumoftfidf, JSD]\n self.summaryA[x].append([a, self.day])\n self.fa.write('%s %s tfidf:%s jsd:%s lms:%s\\n' % (self.day, self.topicid[x], sumoftfidf, JSD,lms))\n if lms <= self.lmsthresholdB[x]:\n sumoftfidf = 0.0\n for word in stemwords_tweet:\n if word in self.queries_word[x]:\n self.queries_word[x][word] += 1\n else:\n self.queries_word[x][word] = 1\n for word in set(stemwords_tweet):\n if word not in self.queries_occur[x]:\n self.queries_occur[x][word] = 1\n else:\n self.queries_occur[x][word] += 1\n\n self.queries_numOfWord[x] += numOfWordAtweet\n self.queries_numOfTweet[x] += 1\n\n for word in stemwords_tweet:\n tf = self.queries_word[x][word] / self.queries_numOfWord[x]\n idf = math.log2((self.queries_numOfTweet[x] + 1) / self.queries_occur[x][word])\n sumoftfidf = sumoftfidf + tf * idf\n if sumoftfidf >= self.tfidfthresholdB[x] and self.numofdayB[x] < 100:\n listofsummaryB = [summary[0] for summary in self.summaryB[x]]\n if len(listofsummaryB) > 0:\n jsd = []\n for summary in listofsummaryB:\n sumofjsd = 0\n sameword = [word for word in stemwords_tweet if word in summary[0]]\n tf = {}\n for wordss in summary[0]:\n if wordss in tf:\n tf[wordss] += 1\n else:\n tf[wordss] = 1\n if len(sameword) > 0:\n for word in sameword:\n Pti = float(wordInTweet[word]) / float(numOfWordAtweet)\n Psi = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)\n thetaTi = self.lemda * Pti + (1 - self.lemda) * Psi\n Ptj = float(tf[word]) / float(len(summary[0]))\n Psj = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream)\n thetaTj = self.lemda * Ptj + (1 - self.lemda) * Psj\n # sumofjsd += thetaTi * math.log(thetaTi / thetaTj)\n M = float((thetaTi + thetaTj) / 2)\n sumofjsd += 0.5 * (thetaTi * math.log(thetaTi / M)) + 0.5 * (\n thetaTj * math.log(thetaTj / M))\n jsd.append(sumofjsd)\n else:\n jsd.append(0.06)\n JSD = min(jsd)\n else:\n JSD = 0.04\n if JSD >= self.jsdthresholdB[x]:\n self.numofdayB[x] += 1\n lenoflistB=len(listofsummaryB)\n self.jsdthresholdB[x]=(self.jsdthresholdB[x]*lenoflistB+JSD)/(lenoflistB+1)\n self.lmsthresholdB[x]=(self.lmsthresholdB[x]*lenoflistB+JSD)/(lenoflistB+1)\n TF = {}\n for q in stemwords_interest_profile:\n TF[q] = stemwords_tweet.count(q)\n if q in stemwords_tweet:\n if q in self.qoccur[x]:\n self.qoccur[x][q] += 1\n else:\n self.qoccur[x][q] = 1\n else:\n self.qoccur[x][q] = 0\n if q in self.numofq[x]:\n self.numofq[x][q] += stemwords_tweet.count(q)\n else:\n self.numofq[x][q] = stemwords_tweet.count(q)\n b = [stemwords_tweet, id_str, sumoftfidf, JSD, TF, numOfWordAtweet]\n self.summaryB[x].append([b, self.day])\n pass\n" }, { "alpha_fraction": 0.5905905961990356, "alphanum_fraction": 0.6256256103515625, "avg_line_length": 30.1875, "blob_id": "539eb5a89edde12c8bae0046a3ef235627214bae", "content_id": "15260036313cd5aa44393a414abb084b33556510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 999, "license_type": "no_license", "max_line_length": 93, "num_lines": 32, "path": "/run2_crawlerA/Rest.py", "repo_name": "beichao1314/TREC2016", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 8 19:37:33 2016\n\n@author: xiaobei\n\"\"\"\nimport pycurl\nimport requests\nimport json\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\nclass REST(object):\n def __init__(self,clientid):\n self.clientid=clientid\n self.c = pycurl.Curl()\n self.c.setopt(pycurl.CUSTOMREQUEST, 'POST')\n self.c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json'])\n\n def GetTopic(self):\n # curl -H 'Content-Type: application/json' hostname.com/topics/abcdefghijk\n url = \"http://54.164.151.19:80/topics/\" + self.clientid\n header = {'content-type': 'application/json'}\n r = requests.get(url, headers=header)\n return json.loads(r.text)\n\n def Post(self, topicid, tweetid):\n url = \"http://54.164.151.19:80/tweet/\" + topicid + \"/\" + tweetid + \"/\" + self.clientid\n self.c.setopt(pycurl.URL, url)\n self.c.perform()\n # r = self.c.getinfo(pycurl.HTTP_CODE)\n return True\n\n" }, { "alpha_fraction": 0.6524389982223511, "alphanum_fraction": 0.6981707215309143, "avg_line_length": 26.33333396911621, "blob_id": "6fc708517c514747d585498aeb14687e22568d87", "content_id": "d7b2ca5ae210b647a056a1343c411ed27b2ed9b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 65, "num_lines": 12, "path": "/run2_crawlerA/extension.py", "repo_name": "beichao1314/TREC2016", "src_encoding": "UTF-8", "text": "from py_bing_search import PyBingWebSearch\n\n# s1= \t9uCkTYlAG9x4iPdxAeDuQipYvc2vEn6oUbPKZJnFlVY\n# s2=3L8LwEROeBFVSA1FwUVKLfIO+Ue979rarr+Y4mBZwaE\ns3 = 'E+ok1GP7qpi6xgtE0yfsbrQFZSElgMBK2ZD1kwf/WXA'\ns4 = 'AKvk0/D9XzJuCQA9n/a+TFbqwOFder9xd9Yj/22ivA8'\ns5='r8OUqrE+DW/W4qs8ShfN2ljAU8214AkuksvYy7iMPGk'\n\ndef search(search_term):\n bing_web = PyBingWebSearch(s5, search_term,web_only=False) \n first_ten_result = bing_web.search(limit=10, format='json') \n return first_ten_result\n" }, { "alpha_fraction": 0.6795774698257446, "alphanum_fraction": 0.6830986142158508, "avg_line_length": 28.894737243652344, "blob_id": "677911ccbda9258074e871d69d89a300fd14745a", "content_id": "1056e12cdd3c598edf664e7536af540f88fe8ed6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/run2_crawlerA/estimate_time.py", "repo_name": "beichao1314/TREC2016", "src_encoding": "UTF-8", "text": "# from datetime import datetime\nimport datetime\nimport time as T\nfrom email.utils import parsedate\n\n\nclass Time(object):\n def __init__(self, firsttime):\n self.firsttime = parsedate(firsttime)\n self.firsttime = datetime.datetime.fromtimestamp(T.mktime(self.firsttime))\n\n def calculatetime(self, time):\n time = parsedate(time)\n time = datetime.datetime.fromtimestamp(T.mktime(time))\n t = (time - self.firsttime).days\n return t\n\n def settime(self):\n self.firsttime = self.firsttime + datetime.timedelta(hours=24)\n" }, { "alpha_fraction": 0.5242910981178284, "alphanum_fraction": 0.5377751588821411, "avg_line_length": 30.518749237060547, "blob_id": "9547e594290ac73bb593ca6e0744ec108bd59537", "content_id": "42cc68e8780cbfb46429dfc1d50baeb6786d64c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5043, "license_type": "no_license", "max_line_length": 119, "num_lines": 160, "path": "/run2_crawlerA/crawler.py", "repo_name": "beichao1314/TREC2016", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 7 17:10:58 2016\n\n@author: xiaobei\n\"\"\"\n\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport logging.handlers\nfrom rewritesummary import PushSummary\nfrom Rest import REST\nimport json\nfrom estimate_time import Time\nfrom extension import search as sch\nimport logging\nfrom process_profile import preprocess_profile\nimport time\n\nconsumer_key = \"bEyQ2mZRAABCIdZajeaYhpnUe\"\nconsumer_secret = \"kJUa3IHjUFm1znHCoAnaDQY7RUPGzcMqveFcgvsh3i7v4Jta3b\"\n\naccess_token = \"2910563640-Z77URQhoPhDsg393yazywkd0WHjjqWrn1tlV8aH\"\naccess_token_secret = \"gPRcz33gphQL2VTDEQ40Uu8yTqVNoOwXZ1TAMQYSV4MHm\"\nlogging.basicConfig(level=logging.INFO)\n\nclass TweetListener(StreamListener):\n def __init__(self, api=None):\n super(TweetListener, self).__init__(api)\n self.logger = logging.getLogger('tweetlogger')\n\n # print('a')\n statusHandler = logging.handlers.TimedRotatingFileHandler('status.log', when='H', encoding='utf-8', utc=True)\n statusHandler.setLevel(logging.INFO)\n self.logger.addHandler(statusHandler)\n\n warningHandler = logging.handlers.TimedRotatingFileHandler('warning.log', when='H', encoding='utf-8', utc=True)\n warningHandler.setLevel(logging.WARN)\n self.logger.addHandler(warningHandler)\n logging.captureWarnings(True)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(logging.WARN)\n self.logger.addHandler(consoleHandler)\n\n # self.logger.setLevel(logging.INFO)\n self.count = 0\n\n def on_data(self, data):\n data=json.loads(data,encoding='utf-8')\n # print(data)\n pushSummary.pushSummarys(data)\n self.count += 1\n # self.logger.info(data)\n # with open('test.txt','a') as f:\n # f.write(data+'\\n')\n # print(data)\n # tweet=json.load(data)\n # print(type(tweet))\n # pushSummary.pushSummarys(json.loads(data))\n # print(self.count)\n if self.count % 1000 == 0:\n print(\"%d statuses processed %s\" % (self.count, time.strftime('%X', time.localtime(time.time()))))\n return True\n\n def on_error(self, exception):\n self.logger.warning(str(exception))\n\n\nif __name__ == '__main__':\n listener = TweetListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, listener)\n # clientid = rest.Getclientid()\n with open('clientidrun2.txt', 'r') as f:\n clientid = f.read()\n rest = REST(clientid)\n # topics = rest.GetTopic()\n # interest_files = {}\n # count = 0\n # x = 0\n # topicid = {}\n # for i in topics:\n # number = i['topid']\n # title = i['title']\n # Desc = i['description']\n # Narr = i['narrative']\n # title = preprocess_profile(title)\n # Desc = preprocess_profile(Desc)\n # Narr = preprocess_profile(Narr)\n # tf = {}\n # for word in title:\n # if word in tf:\n # tf[word] += 1\n # else:\n # tf[word] = 1\n # for word in Desc:\n # if word in tf:\n # tf[word] += 1\n # else:\n # tf[word] = 1\n # for word in Narr:\n # if word in tf:\n # tf[word] += 1\n # else:\n # tf[word] = 1\n # a = sorted(tf.items(), key=lambda d: d[1], reverse=True)\n # b = [d[0] for d in a[0:5]]\n # stemwords_interest_profile = b\n # b = ' '.join(b)\n # s = sch(b)\n # count += 1\n # logging.info(count)\n # search = []\n # stf = {}\n # for i in s:\n # j = preprocess_profile(i.title)\n # for k in j:\n # f = []\n # for l in k:\n # if ord(l) < 127:\n # f.append(l)\n # search.append(''.join(f))\n # for word in search:\n # if word in stf:\n # stf[word] += 1\n # else:\n # stf[word] = 1\n # d = sorted(stf.items(), key=lambda d: d[1], reverse=True)\n # e = 0\n # for n in range(len(d)):\n # if d[n][0] not in stemwords_interest_profile:\n # stemwords_interest_profile.append(d[n][0])\n # e += 1\n # if e >= 5:\n # break\n # interest_files[x] = stemwords_interest_profile\n # topicid[x] = number\n # x += 1\n with open('q_e.txt', 'r') as f:\n c = f.read()\n interest_files = eval(c)\n with open('q_x.txt', 'r') as ff:\n d = ff.read()\n topicid = eval(d)\n # with open('q_e.txt', 'w') as f:\n # f.write(str(interest_files))\n # with open('q_x.txt', 'w') as ff:\n # ff.write(str(topicid))\n times = Time('Tue Aug 02 00:00:00 +0000 2016')\n fa = open('A.txt', 'a', encoding='utf-8')\n pushSummary = PushSummary(0.9, interest_files, times, rest, fa, topicid)\n while True:\n try:\n stream.sample()\n except Exception as ex:\n print(str(ex))\n pass\n" } ]
6
frontier96/Covid-data-analysis-and-prediction
https://github.com/frontier96/Covid-data-analysis-and-prediction
73a1b89179e994fe2b256beddba77e164d196ffb
e51644a298b99d04dbead17d57a5e9063dbfcc58
79dce81ff23a91bc6506aea3439f698179e43ed1
refs/heads/main
"2022-12-22T20:22:56.654298"
"2020-09-30T03:11:32"
"2020-09-30T03:11:32"
299,793,851
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6521506905555725, "alphanum_fraction": 0.6743515729904175, "avg_line_length": 29.910959243774414, "blob_id": "96ae51d04b69f829e462d8de80ee40a1ed8ad0a0", "content_id": "48313fa5f213cdfba67fe70c914d4f0c238ab07e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9369, "license_type": "no_license", "max_line_length": 198, "num_lines": 292, "path": "/old/temp.py", "repo_name": "frontier96/Covid-data-analysis-and-prediction", "src_encoding": "UTF-8", "text": "# mortality_rate = data_states1['Mortality_Rate']\n# #operate_data_counties1['mortality_rate'] = operate_data_counties1[operate_data_counties1['State'].isin(mortality_rate.index)]\n\n# plt.figure(figsize = (5,5))\n# sns.distplot(data_states1['Mortality_Rate'])\n# plt.figure(figsize = (5,5))\n# sns.scatterplot(x=data_states1['Province_State'],y=data_states1['Mortality_Rate'])\n\n\n\n\n\n#added 5/10 sunday by Jack\nmortality_rate = data_states1['Mortality_Rate']\n#operate_data_counties1['mortality_rate'] = operate_data_counties1[operate_data_counties1['State'].isin(mortality_rate.index)]\n\nplt.figure(figsize = (10,3))\nsns.distplot(data_states1['Mortality_Rate'])\nplt.figure(figsize = (10,7))\np1 = sns.scatterplot(x=data_states1['People_Tested'],y=data_states1['Mortality_Rate'])\n\nfor line in range(0,data_states1.shape[0]):\n if data_states1['People_Tested'][line] >0 and data_states1['Mortality_Rate'][line] >0:\n p1.text(data_states1['People_Tested'][line]+0.2, data_states1['Mortality_Rate'][line], data_states1['Province_State'][line],\n #data_states1['People_Tested'],data_states1['Mortality_Rate'],data_states1['Province_State'], \n horizontalalignment='left', size='small', color='black') #, weight='semibold')\n\n\n\n\n\n#mortality_rate.value_counts()\n#data_states1[data_states1['Mortality_Rate']>8]\n\n\n\n\n\nstate_abbreviation = data_counties1.groupby(['StateName', 'State']).agg(sum)#['StateName']\nstate_dict_original = state_abbreviation.reset_index()[['StateName', 'State']]\nnew_state_df = pd.DataFrame({'StateName': ['AK','VI', 'PR', \"HI\", 'GU',\"AS\",'MP'], \n 'State': ['Alaska', 'Virgin Islands','Puerto Rico','Hawaii', \"Guam\",'American Samoa'\n ,'Northern Marianas']})\nstate_dict_combined = pd.concat([state_dict_original, new_state_df])\nstate_dict = state_dict_combined.set_index('StateName')['State']\nmapped_state = data_counties1['StateName'].map(state_dict)\nmapped_state.isna().sum()\n#state_dict\n\n\n\n\ndata_counties1['State updated'] = mapped_state\nmortality_dict = data_states1[data_states1['Country_Region'] == 'US'][['Province_State', 'Mortality_Rate']].set_index('Province_State')['Mortality_Rate']\nmapped_mortality = data_counties1['State updated'].map(mortality_dict)\nmapped_mortality.isna().sum() #should be American Samoa \ndata_counties1['Mortality Rate'] = mapped_mortality\ndata_counties1.head()\n\n\n\n\n\nfirst_case_count = data_conf1['First_Case'].value_counts()\nplt.figure(figsize=(12, 9))\nplt.bar(first_case_count.index, first_case_count.values)\nplt.title(\"Distribution of First discovered case by county\")\n\n\n\n\nfirst_death_count = data_death1['First_Death'].value_counts()\nplt.figure(figsize=(12, 9))\nplt.bar(first_death_count.index, first_death_count.values)\nplt.show()\n\n\n\n\noperate_data_counties1 = data_counties1[['CountyName', 'State updated', 'stay at home', 'public schools', '>500 gatherings', 'entertainment/gym', 'restaurant dine-in',\n 'Mortality Rate']][data_counties1['State updated'] != 'American Samoa'][data_counties1['State updated'] != 'Northern Marianas']\n#operate_data_counties1[operate_data_counties1['CountyName']=='Washington']\noperate_data_counties1['Mortality Rate'] = operate_data_counties1['Mortality Rate'].fillna(3.413353)\noperate_data_counties1_with_states = operate_data_counties1\noperate_data_counties1_with_states\n\n\n\n\noperate_data_counties1.isnull().sum()\n\n\n\n\noperate_data_counties1_with_states['stay at home'] = operate_data_counties1_with_states['stay at home'].fillna(np.mean(operate_data_counties1_with_states['stay at home']))\noperate_data_counties1_with_states['public schools'] = operate_data_counties1_with_states['public schools'].fillna(np.mean(operate_data_counties1_with_states['public schools']))\noperate_data_counties1_with_states['>500 gatherings'] = operate_data_counties1_with_states['>500 gatherings'].fillna(np.mean(operate_data_counties1_with_states['>500 gatherings']))\noperate_data_counties1_with_states.isnull().sum()\n#operate_data_counties1_with_states[operate_data_counties1_with_states['stay at home'].isnull()]\n\n\n\n\n\n#operate_data_counties1_with_states['stay at home'].value_counts()\n#operate_data_counties1_with_states['public schools'].value_counts()\n#operate_data_counties1_with_states['>500 gatherings'].value_counts()\n\n\n\n\noperate_data_counties1_with_states = operate_data_counties1_with_states.merge(data_conf1[[\"\", \"First_Case\", \"First_Hundred_Case\"]], on = )\n\n\n\n\n\n\ndata_counties1_PCA = data_counties1.select_dtypes(['number']).drop(columns=['STATEFP','COUNTYFP'])\n# center our data and normalize the variance\ndf_mean = np.mean(data_counties1_PCA)\ndf_centered = data_counties1_PCA - df_mean\ndf_centered_scaled = df_centered / (np.var(df_centered))**0.5\ndata_counties1_PCA = df_centered_scaled\ndata_counties1_PCA_fillna =data_counties1_PCA.fillna(method = 'ffill') #use the previous valid data to fill NaN,\n #good here since closeby county likely to be in the same State\n\ndata_counties1_PCA_fillna2 = data_counties1_PCA_fillna.fillna(0) #fill NaN with no previous valid data (whole column is NaN)\n#sum(data_counties1_PCA_fillna2.isna().sum())\ndata_counties1_PCA_fillna2\n\n\n\n\n#PCA \nu, s, vt = np.linalg.svd(data_counties1_PCA_fillna2, full_matrices=False)\nP = u @ np.diag(s)\ndf_1st_2_pcs =pd.DataFrame(P[:,0:2], columns=['pc1', 'pc2'])\nfirst_2_pcs = df_1st_2_pcs\n\n#jittered scatter plot (added noise)\nfirst_2_pcs_jittered = first_2_pcs + np.random.normal(0, 0.1, size = (len(first_2_pcs), 2))\nsns.scatterplot(data = first_2_pcs_jittered, x = \"pc1\", y = \"pc2\");\n\n#a better looking scatter plot with labels\n#import plotly.express as px\n#px.scatter(data_frame = first_2_pcs_jittered, x = \"pc1\", y = \"pc2\", text = list(df_1972_to_2016.index)).update_traces(textposition = 'top center')\n\n\n\n\n\n#scree plot\nplt.figure(figsize = (10,10))\nx = list(range(1, s.shape[0]+1)) \nplt.plot(x, s**2 / sum(s**2)); \nplt.xticks(x, x);\nplt.xlabel('PC #');\nplt.ylabel('Fraction of Variance Explained');\n\n\n\n\n\n\nfrom sklearn.model_selection import train_test_split\n\ntrain, test = train_test_split(operate_data_counties1_with_states, test_size=0.1, random_state=42)\n\n\n\n\n\n\nplt.figure(figsize = (5,5))\n#sns.regplot(operate_data_counties1_with_states['Mortality_Rate'])\n\n\n\n\noperate_data_counties1_with_states = operate_data_counties1_with_states.merge(data_first_case, how = \"inner\", left_on = ['CountyName', 'State updated'], right_on = ['County_Name', 'Province_State'])\noperate_data_counties1_with_states = operate_data_counties1_with_states.merge(data_first_death, how = \"left\", left_on = ['CountyName', 'State updated'], right_on = ['County_Name', 'Province_State'])\n\noperate_data_counties1_with_states.head()\n\n\n\n\n\noperate_data_counties1_with_states.drop(['Province_State_x', 'Province_State_y', 'County_Name_y', 'County_Name_x'], axis = 1, inplace = True)\noperate_data_counties1_with_states.head()\n\n\n\n\n\n\n\ntime_since_first_case = operate_data_counties1_with_states.copy()\ntime_since_first_case['stay at home'] = time_since_first_case['stay at home'] - time_since_first_case['First_Case']\ntime_since_first_case['public schools'] = time_since_first_case['public schools'] - time_since_first_case['First_Case']\ntime_since_first_case['entertainment/gym'] = time_since_first_case['entertainment/gym'] - time_since_first_case['First_Case']\ntime_since_first_case['>500 gatherings'] = time_since_first_case['>500 gatherings'] - time_since_first_case['First_Case']\ntime_since_first_case['restaurant dine-in'] = time_since_first_case['restaurant dine-in'] - time_since_first_case['First_Case']\ntime_since_first_case.head()\n\n\n\n\n\n\n\n\n\nX_train = train.drop(['CountyName', 'State updated','Mortality Rate'], axis=1)\nY_train = train['Mortality Rate']\n\nX_train[:5], Y_train[:5]\n\n\n\n\n\n\n\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\n\n\n\nmodel = LinearRegression(fit_intercept=True) # should fit intercept be true?\nmodel.fit(X_train, Y_train)\n\nY_prediction = model.predict(X_train)\n\n\ntraining_loss = metrics.mean_squared_error(Y_prediction, Y_train)\nprint(\"Training loss: \", training_loss)\n\n\n\n\n\n\n\n\nplt.figure(figsize = (5,5))\nsns.regplot(Y_prediction, Y_train)\n\n\n\n\n\n\n\n\n\nplt.figure(figsize = (5,5))\nsns.regplot(Y_prediction, Y_train-Y_prediction)\n\n\n\n\n\n\n# perform cross validation\nfrom sklearn import model_selection as ms\n\n# finding which features to use using Cross Validation\nerrors = []\nrange_of_num_features = range(1, X_train.shape[1] + 1)\nfor N in range_of_num_features:\n print(f\"Trying first {N} features\")\n model = LinearRegression()\n \n # compute the cross validation error\n error = ms.cross_val_score(model, X_train.iloc[:, 0:N], Y_train).mean()\n \n print(\"\\tScore:\", error)\n errors.append(error)\n\nbest_num_features = np.argmax(errors) + 1\nprint (best_num_features)\nbest_err = min(errors)\n\nprint(f\"Best choice, use the first {best_num_features} features\")\n\n\n\n#===================================================================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.7515528202056885, "alphanum_fraction": 0.7763975262641907, "avg_line_length": 39, "blob_id": "cd934357927b8e5b00e2d23685c111eefd58273f", "content_id": "e063010b8c214705aa105c5ff131487b8c60eab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 77, "num_lines": 4, "path": "/README.md", "repo_name": "frontier96/Covid-data-analysis-and-prediction", "src_encoding": "UTF-8", "text": "# Covid data analysis and prediction\n Covid data analysis and prediction project\n\nView \"COVID-19_Project__report v1.9 .pdf\" if jupyter notebook does not load. \n" } ]
2
nurtai00/WebDevProjectBack
https://github.com/nurtai00/WebDevProjectBack
a2fe5968831420356435e8554a99a3aa3d5af09e
f19619640936d51a6c51577e65dc69c6de609ee0
1ac9ff43def77434f5e4773388502747aada4e98
refs/heads/main
"2023-04-22T04:29:52.240443"
"2021-05-08T09:15:33"
"2021-05-08T09:15:33"
365,426,786
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6418337821960449, "alphanum_fraction": 0.6418337821960449, "avg_line_length": 24.846153259277344, "blob_id": "157348790bd8724fba9deb9d231091b9649e3740", "content_id": "a8be24941c6f455fd11db42d2258f87bd7bb9377", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 61, "num_lines": 26, "path": "/projectback/api/serializers.py", "repo_name": "nurtai00/WebDevProjectBack", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\r\nfrom api.models import Category, Product, Cart, User\r\n\r\n\r\nclass CategoryModelSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Category\r\n fields = ('id', 'name', 'description')\r\n\r\n\r\nclass ProductSerializer(serializers.Serializer):\r\n class Meta:\r\n model = Product\r\n fields = ('name', 'description', 'price', 'category')\r\n\r\n\r\nclass CartSerializer(serializers.Serializer):\r\n class Meta:\r\n model = Cart\r\n fields = ('username', 'address', 'book')\r\n\r\n\r\nclass UserModelSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = User\r\n fields = ('id', 'username', 'password')\r\n" }, { "alpha_fraction": 0.6808874011039734, "alphanum_fraction": 0.69112628698349, "avg_line_length": 37.06666564941406, "blob_id": "cc876a25ca6afb9103111111039eeaab0d0fb359", "content_id": "1ffda28bc24fca6ba0e39706f3346ddd9a07341c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 96, "num_lines": 15, "path": "/projectback/api/urls.py", "repo_name": "nurtai00/WebDevProjectBack", "src_encoding": "UTF-8", "text": "from django.urls import path\r\nfrom api import views\r\n\r\nfrom api.views import product_list, product_detail, category_list, product2_list, category2_list\r\n\r\n\r\nurlpatterns = [\r\n path('api/product', product_list),\r\n path('api/product/<int:product_id>/', product_detail),\r\n path('api/category', category_list),\r\n path('api/product2', product2_list),\r\n path('api/category2', category2_list),\r\n path('api/product-list', views.ProductViewSet.as_view(), name='product-list'),\r\n path('product-list/<str:pk>/', views.ProductDetailViewSet.as_view(), name='product-detail')\r\n]\r\n" }, { "alpha_fraction": 0.6410579085350037, "alphanum_fraction": 0.6502938866615295, "avg_line_length": 32.273380279541016, "blob_id": "ace02cc48ae82fac632f3d66aabae1e6027db866", "content_id": "39a93110936bf48eb294b39c10c925932bd5d7cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4764, "license_type": "no_license", "max_line_length": 107, "num_lines": 139, "path": "/projectback/api/views.py", "repo_name": "nurtai00/WebDevProjectBack", "src_encoding": "UTF-8", "text": "from api.models import Product, Category\r\nfrom django.http.response import JsonResponse\r\nfrom api.serializers import CategoryModelSerializer, ProductSerializer, CartSerializer, UserModelSerializer\r\nfrom rest_framework.decorators import api_view, permission_classes\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.views import APIView\r\n\r\nfrom django.contrib.auth import authenticate\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom rest_framework.authtoken.models import Token\r\nfrom rest_framework.decorators import api_view, permission_classes\r\nfrom rest_framework.permissions import AllowAny\r\nfrom rest_framework.status import (\r\n HTTP_400_BAD_REQUEST,\r\n HTTP_404_NOT_FOUND,\r\n HTTP_200_OK\r\n)\r\nfrom rest_framework.response import Response\r\n\r\n\r\ndef product_list(request):\r\n product = Product.objects.all()\r\n product_json = [product.to_json() for product in product]\r\n return JsonResponse(product_json, safe=False)\r\n\r\n\r\ndef product_detail(request, product_id):\r\n try:\r\n product = Product.objects.get(id=product_id)\r\n except Product.DoesNotExist as e:\r\n return JsonResponse({'message': str(e)}, status=400)\r\n return JsonResponse(product.to_json())\r\n\r\n\r\ndef category_list(request):\r\n category = Category.objects.all()\r\n category_json = [category.to_json() for category in category]\r\n return JsonResponse(category_json, safe=False)\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\ndef product2_list(request):\r\n if request.method == 'GET':\r\n try:\r\n products = Product.objects.all()\r\n serializer = ProductSerializer(products, many=True)\r\n return JsonResponse(serializer.data, safe=False)\r\n except:\r\n return JsonResponse({\"status\": \"500\"}, safe=False)\r\n if request.method == 'POST':\r\n try:\r\n category = Category.objects.get(name=request.data['category'])\r\n except:\r\n return JsonResponse({\"status\": \"200\"}, safe=False)\r\n Product.objects.create(\r\n category=category,\r\n name=request.data['name'],\r\n description=request.data['description'],\r\n image=request.data['image'],\r\n price=request.data['price']\r\n )\r\n return JsonResponse({\"status\": \"200\"}, safe=False)\r\n\r\n\r\n@api_view(['GET', 'POST'])\r\ndef category2_list(request):\r\n if request.method == 'GET':\r\n try:\r\n categories = Category.objects.all()\r\n serializer = CategoryModelSerializer(categories, many=True)\r\n return JsonResponse(serializer.data, safe=False)\r\n except:\r\n return JsonResponse({\"status\": \"505\"}, safe=False)\r\n if request.method == 'POST':\r\n try:\r\n category = Category.objects.get(name=request.data['category'])\r\n serializer = CategoryModelSerializer(category, many=True)\r\n return JsonResponse(serializer.data, safe=False)\r\n except:\r\n return JsonResponse({'status': '200'}, safe=False)\r\n\r\n\r\nclass ProductViewSet(APIView):\r\n @staticmethod\r\n def get(request):\r\n queryset = Product.objects.all()\r\n serializer = ProductSerializer(queryset, many=True)\r\n return JsonResponse(serializer.data, safe=False)\r\n\r\n\r\nclass ProductDetailViewSet(APIView):\r\n @staticmethod\r\n def get(request, pk):\r\n queryset = Product.objects.get(id=pk)\r\n serializer = ProductSerializer(queryset, many=False)\r\n return Response(serializer.data)\r\n\r\n\r\n@csrf_exempt\r\n@api_view([\"POST\"])\r\n@permission_classes((AllowAny,))\r\ndef login(request):\r\n username = request.data.get(\"username\")\r\n password = request.data.get(\"password\")\r\n if username is None or password is None:\r\n return Response({'error': 'Please provide both username and password'},\r\n status=HTTP_400_BAD_REQUEST)\r\n user = authenticate(username=username, password=password)\r\n if not user:\r\n return Response({'error': 'Invalid Credentials'},\r\n status=HTTP_404_NOT_FOUND)\r\n token, _ = Token.objects.get_or_create(user=user)\r\n return Response({'token': token.key},\r\n status=HTTP_200_OK)\r\n\r\n\r\n@csrf_exempt\r\n@api_view([\"GET\"])\r\ndef sample_api(request):\r\n data = {'sample_data': 123}\r\n return Response(data, status=HTTP_200_OK)\r\n# def get(request):\r\n# queryset = Product.objects.all()\r\n# serializer = ProductSerializer(queryset, many=True)\r\n# return Response(serializer.data)\r\n#\r\n#\r\n# class ProductViewSet(APIView):\r\n# pass\r\n#\r\n#\r\n# def get(request, pk):\r\n# queryset = Product.objects.get(id=pk)\r\n# serializer = ProductSerializer(queryset, many=False)\r\n# return Response(serializer.data)\r\n#\r\n#\r\n# class ProductDetailViewSet(APIView):\r\n# pass\r\n" }, { "alpha_fraction": 0.7689075469970703, "alphanum_fraction": 0.7689075469970703, "avg_line_length": 27.75, "blob_id": "3504137fb71dfaf2eacaedd99ba9c51d7b9c2e41", "content_id": "608ea4834340a9d1c9adf40bfb2a499fe9c1c8da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/projectback/api/admin.py", "repo_name": "nurtai00/WebDevProjectBack", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom api.models import Product, Category, Cart, User\r\n\r\n# Register your models here.\r\nadmin.site.register(Product),\r\nadmin.site.register(Category),\r\nadmin.site.register(Cart),\r\nadmin.site.register(User)\r\n" }, { "alpha_fraction": 0.5957821011543274, "alphanum_fraction": 0.614235520362854, "avg_line_length": 27.179487228393555, "blob_id": "17cb4496db56e3ac987afa51e0a77d5e4e024858", "content_id": "24e985ec1f94c1054fc1dc9fed0abe9d086bfc7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1138, "license_type": "no_license", "max_line_length": 91, "num_lines": 39, "path": "/projectback/api/models.py", "repo_name": "nurtai00/WebDevProjectBack", "src_encoding": "UTF-8", "text": "from django.db import models\r\n\r\n\r\nclass Category(models.Model):\r\n name = models.CharField(max_length=200)\r\n description = models.TextField(max_length=500, default='')\r\n\r\n def to_json(self):\r\n return {\r\n 'id': self.id,\r\n 'name': self.name,\r\n 'description': self.description\r\n }\r\n\r\n\r\nclass Product(models.Model):\r\n name = models.CharField(max_length=200)\r\n description = models.TextField(max_length=500, default='')\r\n price = models.IntegerField(default=0)\r\n category = models.ForeignKey(Category, null=True, on_delete=models.CASCADE, blank=True)\r\n\r\n def to_json(self):\r\n return {\r\n 'id': self.id,\r\n 'name': self.name,\r\n 'description': self.description,\r\n 'price': self.price\r\n }\r\n\r\n\r\nclass Cart(models.Model):\r\n username = models.CharField(max_length=50)\r\n address = models.TextField()\r\n book = models.ForeignKey(Product, null=True, on_delete=models.CASCADE, blank=True)\r\n\r\n\r\nclass User(models.Model):\r\n username = models.CharField(max_length=20)\r\n password = models.CharField(max_length=2222)\r\n" }, { "alpha_fraction": 0.5097940564155579, "alphanum_fraction": 0.5248618721961975, "avg_line_length": 33.55356979370117, "blob_id": "823eb1197fa37fd57ea1d6041462c50eef922cb7", "content_id": "4b5e5570bdbc45b67d02d8fd3ac11a9e274ec4b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1991, "license_type": "no_license", "max_line_length": 130, "num_lines": 56, "path": "/projectback/api/migrations/0002_auto_20210508_0140.py", "repo_name": "nurtai00/WebDevProjectBack", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.2 on 2021-05-07 19:40\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('api', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Category',\r\n fields=[\r\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=99)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='User',\r\n fields=[\r\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('username', models.CharField(max_length=20)),\r\n ('password', models.CharField(max_length=2222)),\r\n ],\r\n ),\r\n migrations.RemoveField(\r\n model_name='product',\r\n name='address',\r\n ),\r\n migrations.RemoveField(\r\n model_name='product',\r\n name='city',\r\n ),\r\n migrations.AddField(\r\n model_name='product',\r\n name='price',\r\n field=models.IntegerField(default=0),\r\n ),\r\n migrations.CreateModel(\r\n name='Cart',\r\n fields=[\r\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('username', models.CharField(max_length=50)),\r\n ('address', models.TextField()),\r\n ('book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.product')),\r\n ],\r\n ),\r\n migrations.AddField(\r\n model_name='product',\r\n name='category',\r\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.category'),\r\n ),\r\n ]\r\n" } ]
6
DonJayamanne/vscode-python-samples
https://github.com/DonJayamanne/vscode-python-samples
c45c7cfb762254cd5c753e0af112de4a9318e626
8b78d3048333f5f67484ed9d0716892633cda574
cc17019f1bc261c6629fd890037862cc678e3415
refs/heads/master
"2021-07-16T14:53:31.333374"
"2019-07-30T00:22:35"
"2019-07-30T00:22:35"
121,982,763
75
42
MIT
"2018-02-18T19:23:55"
"2021-05-28T01:32:50"
"2021-06-10T18:57:28"
Python
[ { "alpha_fraction": 0.6711711883544922, "alphanum_fraction": 0.7117117047309875, "avg_line_length": 13.800000190734863, "blob_id": "4547cbbc1be8fddff077ff05c64e212847e6a868", "content_id": "972d9e84dee195ec6b66199b87b321ffab860d2b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 222, "license_type": "permissive", "max_line_length": 41, "num_lines": 15, "path": "/remote-debugging-locally/sample.py", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "import ptvsd\nimport time\nimport os\n\nprint(os.curdir)\nprint(\"Waiting to attach\")\n\naddress = ('0.0.0.0', 3000)\nptvsd.enable_attach('my_secret', address)\nptvsd.wait_for_attach()\n\ntime.sleep(2)\n\nprint(\"attached\")\nprint(\"end\")\n" }, { "alpha_fraction": 0.7162673473358154, "alphanum_fraction": 0.7360235452651978, "avg_line_length": 58.474998474121094, "blob_id": "6ffc31c98acfb2bf5f5828a053d105f84faac1c9", "content_id": "b3ae09d6bd6624e249b1ca66c66cf966307b2d58", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2379, "license_type": "permissive", "max_line_length": 276, "num_lines": 40, "path": "/remote-debugging-locally/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Remote Debugging Sample (on the same Machine)\n\n### Step 1: Configure VS Code to use a Python environment \n* Open a terminal window\n* Type the following command in the terminal window\n`virtualenv --python=python3.6 .env`\n* Reload VS Code using the command `Reload Window` (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Open the file `sample.py`\n* Select the command `Python: Select Interpteter` and select the Python environment created above (found in `./.env` directory created above)\n\n### Step 2: Install PTVSD version 3.0.0 \n* Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Enter the following command in the above terminal\n `python -m pip install ptvsd==3.0.0`\n\n### Step 3: Start program to be debugged\n* Run the file `sample.py` in the Python environment containing PTVSD\n * Option 1:\n * Open the file `sample.py`\n * Right click on editor window and select the menu `Run Python File in Terminal`\n * Option 2:\n * Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n * Enter the following command in the above terminal\n `python sample.py`\n* Now, the above program is running and waiting for a debugger to attach to it\n\n### Step 4: Attaching the debugger\n* Open `sample.py`\n* Add a breakpoint to the line `print(\"attached\")`\n* Go into the debugger menu and select `Python: Attach` and press the green arrow icon \n* Wait for around 2 seconds and the debugger should hit at the breakpoint\n\n### Troubleshooting\n* Have you started the `sample.py`?\n* Check whether the debugger is listening on port 3000 using the commands \n * Use a command line tool such as `netstat` or any other\n * `netstat -an -p tcp | grep 3000` or `netstat -ano | find \"3000\"`\n* Check whether you are able to connect to the above port\n * Use a command line tool such as `telnet` or `nc` or any other\n * `telnet 127.0.0.1 3000` or `nc 127.0.0.1 3000`\n" }, { "alpha_fraction": 0.7396658062934875, "alphanum_fraction": 0.7481676936149597, "avg_line_length": 57.81034469604492, "blob_id": "42b7367ef78e73233cf16cbcb969e34dbf275afa", "content_id": "4e10eaf9b2449c54268d27d7197044ce3dad39fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3411, "license_type": "permissive", "max_line_length": 276, "num_lines": 58, "path": "/remote-debugging-flask/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Remote Debugging Sample (on the same Machine)\n\n## Setup your remote environment\n### Step 1: Open the workspace in your remote environment\n* This step will ensure you have setup Flask to be executed in the remote environment.\n\n### Step 2: Configure VS Code to use a Python environment\n* Open a terminal window\n* Type the following command in the terminal window\n`virtualenv --python=python3.6 .env`\n* Reload VS Code using the command `Reload Window` (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Open the file `app.py`\n* Select the command `Python: Select Interpteter` and select the Python environment created above (found in `./.env` directory created above)\n\n### Step 3: Install Flask and PTVSD version 3.0.0\n* Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Enter the following command in the above terminal\n `python -m pip install flask ptvsd==3.0.0`\n\n### Step 4: Start flask to be debugged\n* Run the Flask application\n * Option 1:\n * Go into the debugger menu and start debugging using the `Python: Flask` debug configuration.\n * Option 2:\n * Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n * Enter the following command in the above terminal\n `export FLASK_APP=app.py`\n `python -m flask run`\n* Wait for Flask to start in the terminal window\n* Once started you should see a message similar to the following in the terminal window:\n```shell\n * Serving Flask app \"app\"\n * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)\n ```\n\n## Setup your local environment\n### Step 1: Confirm flask is running\n* Identify the IP Address of your remote environment.\n* Open a browser window pointing to the Url that Flask is listening on (replacing the IP Address in the url)\n* Confirm the output on the browser window is `Hello World!`\n\n### Step 2: Open the workspace in your local environment\n* This step will ensure you have your local environment setup to debug the remote flask application.\n* You do not to setup the Python environment or Flask locally to debug a remote environment.\n\n### Step 2: Update the IP Address in `launch.json` (`\"host\": \"localhost\"`)\n* Identify the IP Address of your remote environment.\n* Open `.vscode/launch.json` and replace the value of the setting `host` from `localhost` to the IP address identified earlier.\n\n### Step 3: Update the remote folder in `launch.json` (`\"remoteRoot\": \"${workspaceFolder}\",`)\n* Identify the full path to the directory containing the file `sample.py` in your remote environment.\n* Open `.vscode/launch.json` and replace the value of the setting `remoteRoot` from `${workspaceFolder}` to the path identified earlier.\n\n### Step 4: Attach the debugger\n* Go into the debugger menu and select `Python: Attach` and press the green arrow icon \n* Open the `app.py` file and add a break point to the line `return \"Hello World!\"`\n* Refresh your browser window.\n* The debugger should hit the breakpoint.\n" }, { "alpha_fraction": 0.7523809671401978, "alphanum_fraction": 0.7650793790817261, "avg_line_length": 41.95454406738281, "blob_id": "2a2ded8be3cfebc0ac4df84f41caf6f908b3ef3c", "content_id": "2ec0d354288e6ec5e43c8c38406a3c0c776367bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 945, "license_type": "permissive", "max_line_length": 87, "num_lines": 22, "path": "/remote-debugging-docker/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Remote Debugging Sample (on the same Machine)\n\n## Setup your remote environment\n### Step 1: Build docker image and run it\n* Open a temrinal window\n* Type the following command in the terminal window\n`docker build -t remote-debugging-docker .`\n* Type the following command in the terminal window\n`docker run -it -p 3000:3000 remote-debugging-docker`\n* Confirm the following is displayed in the terminal window\n`Waiting to attach`\n\n## Setup your local environment\n### Step 1: Open the workspace in your local environment\n* This step will ensure you have setup the program to be debugged locally.\n* You do not to setup the Python environment locally to debug a remote environment.\n\n### Step 2: Attach the debugger\n* Open `sample.py`\n* Add a breakpoint to the line `print(\"attached\")`\n* Go into the debugger menu and select `Python: Attach` and press the green arrow icon \n* Wait for around 2 seconds and the debugger should hit at the breakpoint\n" }, { "alpha_fraction": 0.7348674535751343, "alphanum_fraction": 0.7483741641044617, "avg_line_length": 45.488372802734375, "blob_id": "1b10c00c3f97d8e354d13b106cfc43695001f66e", "content_id": "dbd4cd4deea80654ed3a6906d0838d39be57a521", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1999, "license_type": "permissive", "max_line_length": 268, "num_lines": 43, "path": "/sample-flask/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Flask Debugging Sample\n\n### Step 1: Configure VS Code to use a Python environment \n* Open a terminal window\n* Type the following command in the terminal window\n`virtualenv --python=python3.6 .env`\n* Reload VS Code using the command `Reload Window` (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Open the file `sample.py`\n* Select the command `Python: Select Interpteter` and select the Python environment created above (found in `./.env` directory created above)\n\n### Step 2: Install Flask \n* Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Enter the following command in the above terminal\n `python -m pip install flask`\n\n### Step 3: Launch Flask in debug mode\n* Go into the debugger menu and start debugging using the `Python: Flask` debug configuration.\n* Wait for Flask to start in the terminal window\n* Once started you should see something similar to the following in the terminal window:\n```shell\n * Serving Flask app \"app\"\n * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)\n ```\n\n### Step 4: Confirm flask is running\n* Open a browser window pointing to the Url that Flask is listening on\n* Confirm the output on the browser window is `Hello World!`\n\n### Step 5: Debug Flask\n* Open the `app.py` file and add a break point to the line `return \"Hello World!\"`\n* Refresh your browser window.\n* The debugger should hit the breakpoint.\n\n### Troubleshooting\n* Confirm flask is running in the terminal window.\n* Confirm you can see `Hello World!` in the browser widnow.\n* Try running flask manually and testing it as follows (in your terminal window):\n```shell\n$ export FLASK_APP=app.py\n$ python -m flask run\n * Serving Flask app \"app\"\n * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)\n```\n" }, { "alpha_fraction": 0.40740740299224854, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 12.5, "blob_id": "5b911e3a65521b97ab66b6e662ef2cb83f93648c", "content_id": "5f01e9a6086cab2225c9b89e7f4cc0bd16094318", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 27, "license_type": "permissive", "max_line_length": 13, "num_lines": 2, "path": "/remote-debugging-docker-django/requirements.txt", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "Django==2.0.2\nptvsd==3.0.0\n" }, { "alpha_fraction": 0.7181870937347412, "alphanum_fraction": 0.7524694800376892, "avg_line_length": 44.28947448730469, "blob_id": "d195ae6e63adb4fc365dadff9268279f95983b5e", "content_id": "364a53c188aaecaf62f6d9ecf15a8563ea8d9dd0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1721, "license_type": "permissive", "max_line_length": 139, "num_lines": 38, "path": "/remote-debugging-docker-django/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Django Debugging Sample\n\n**Note:**\nThis sample uses Python 3.6 and Django 2.0 \nThis is a sample Django application created using [tutorial](https://docs.djangoproject.com/en/2.0/intro/tutorial01/) on the Django website\n\n## Setup your remote environment\n### Step 1: Build docker image and run it\n* Open a temrinal window\n* Type the following command in the terminal window\n`docker build -t remote-debugging-docker-django .`\n* Type the following command in the terminal window\n`docker run -it -p 3000:3000 -p 8000:8000 remote-debugging-docker-django`\n* Wait for Django to start in the container\n* Once started you should see something similar to the following in the terminal window:\n```shell\nStarting development server at http://127.0.0.1:8000/\nQuit the server with CONTROL-C.\n```\n\n### Step 2: Confirm Django is running\n* Open a browser window pointing to the Url that Django is listening on\n* Confirm Django home page appears\n* Next navigate to the `polls` page ([http://127.0.0.1:8000/polls](http://127.0.0.1:8000/polls))\n* Confirm the output on the browser window is `Hello, world. You're at the polls index.`\n\n## Setup your local environment\n### Step 1: Open the workspace in your local environment\n* This step will ensure you have setup the program to be debugged locally.\n* You do not to setup the Python environment locally to debug a remote environment.\n\n### Step 2: Attach the debugger\n* Go into the debugger menu and select `Python: Attach` and press the green arrow icon \n\n### Step 3: Debug Django\n* Open the `polls/views.py` file and add a break point to the line `return HttpResponse(\"Hello, world. You're at the polls index.\")` \n* Refresh your browser window.\n* The debugger should hit the breakpoint.\n" }, { "alpha_fraction": 0.7364341020584106, "alphanum_fraction": 0.7582364082336426, "avg_line_length": 53.31578826904297, "blob_id": "7082a01527d32c15dc2da845a32c1fd547e54c9f", "content_id": "5520b2af0a716590a665d307980a76afc9c603a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2064, "license_type": "permissive", "max_line_length": 268, "num_lines": 38, "path": "/sample-django/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Django Debugging Sample\n\n**Note:**\nThis sample uses Python 3.6 and Django 2.0 \nThis is a sample Django application created using [tutorial](https://docs.djangoproject.com/en/2.0/intro/tutorial01/) on the Django website\n\n### Step 1: Configure VS Code to use a Python environment \n* Open a terminal window\n* Type the following command in the terminal window\n`virtualenv --python=python3.6 .env`\n* Reload VS Code using the command `Reload Window` (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Open the file `sample.py`\n* Select the command `Python: Select Interpteter` and select the Python environment created above (found in `./.env` directory created above)\n\n### Step 2: Install Django\n* Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Enter the following command in the above terminal\n `python -m pip install -r requirements.txt`\n\n### Step 3: Launch Django in debug mode\n* Go into the debugger menu and start debugging using the `Python: Django` debug configuration.\n* Wait for Django to start in the terminal window\n* Once started you should see something similar to the following in the terminal window:\n```shell\nStarting development server at http://127.0.0.1:8000/\nQuit the server with CONTROL-C.\n```\n\n### Step 4: Confirm Django is running\n* Open a browser window pointing to the Url that Django is listening on\n* Confirm Django home page appears\n* Next navigate to the `polls` page ([http://127.0.0.1:8000/polls](http://127.0.0.1:8000/polls))\n* Confirm the output on the browser window is `Hello, world. You're at the polls index.`\n\n### Step 5: Debug Django\n* Open the `polls/views.py` file and add a break point to the line `return HttpResponse(\"Hello, world. You're at the polls index.\")` \n* Refresh your browser window.\n* The debugger should hit the breakpoint.\n" }, { "alpha_fraction": 0.7327283620834351, "alphanum_fraction": 0.7441452145576477, "avg_line_length": 58.92982482910156, "blob_id": "27c8a8b3b404c93fc2ee967ddebc433f3d41f524", "content_id": "8c5d251dbb906b4666da79a901e2965a1adfa8e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3416, "license_type": "permissive", "max_line_length": 276, "num_lines": 57, "path": "/remote-debugging/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# Remote Debugging Sample (on the same Machine)\n\n## Setup your remote environment\n### Step 1: Open the workspace in your remote environment\n* This step will ensure you have setup the program to be executed in the remote environment.\n\n### Step 2: Configure VS Code to use a Python environment\n* Open a terminal window\n* Type the following command in the terminal window\n`virtualenv --python=python3.6 .env`\n* Reload VS Code using the command `Reload Window` (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Open the file `sample.py`\n* Select the command `Python: Select Interpteter` and select the Python environment created above (found in `./.env` directory created above)\n\n### Step 3: Install PTVSD version 3.0.0\n* Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n* Enter the following command in the above terminal\n `python -m pip install ptvsd==3.0.0`\n\n### Step 4: Start program to be debugged\n* Run the file `sample.py` in the Python environment containing PTVSD\n * Option 1:\n * Open the file `sample.py`\n * Right click on editor window and select the menu `Run Python File in Terminal`\n * Option 2:\n * Open a terminal using the command [Python: Create Terminal](https://code.visualstudio.com/docs/python/environments#_activating-an-environment-in-the-terminal) (from your [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette))\n * Enter the following command in the above terminal\n `python sample.py`\n* Now, the above program is running and waiting for a debugger to attach to it\n\n## Setup your local environment\n### Step 1: Open the workspace in your local environment\n* This step will ensure you have setup the program to be debugged locally.\n* You do not to setup the Python environment locally to debug a remote environment.\n\n### Step 2: Update the IP Address in `launch.json` (`\"host\": \"localhost\"`)\n* Identify the IP Address of your remote environment.\n* Open `.vscode/launch.json` and replace the value of the setting `host` from `localhost` to the IP address identified earlier.\n\n### Step 3: Update the remote folder in `launch.json` (`\"remoteRoot\": \"${workspaceFolder}\",`)\n* Identify the full path to the directory containing the file `sample.py` in your remote environment.\n* Open `.vscode/launch.json` and replace the value of the setting `remoteRoot` from `${workspaceFolder}` to the path identified earlier.\n\n### Step 4: Attach the debugger\n* Open `sample.py`\n* Add a breakpoint to the line `print(\"attached\")`\n* Go into the debugger menu and select `Python: Attach` and press the green arrow icon \n* Wait for around 2 seconds and the debugger should hit at the breakpoint\n\n## Troubleshooting\n* Have you started the `sample.py`?\n* Check whether the debugger is listening on port 3000 using the commands \n * Use a command line tool such as `netstat` or any other\n * `netstat -an -p tcp | grep 3000` or `netstat -ano | find \"3000\"`\n* Check whether you are able to connect to the above port\n * Use a command line tool such as `telnet` or `nc` or any other\n * `telnet <Remote IPAddress> 3000` or `nc <Remote IPAddress> 3000`\n" }, { "alpha_fraction": 0.8196721076965332, "alphanum_fraction": 0.8196721076965332, "avg_line_length": 29.5, "blob_id": "5b47e8728f572a18086a34dbfb1fe362c7636185", "content_id": "6e440abb940cd974279456655b6c81782427793a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "permissive", "max_line_length": 36, "num_lines": 2, "path": "/README.md", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "# vscode-python-samples\nSamples for VS Code Python extension\n" }, { "alpha_fraction": 0.6988543272018433, "alphanum_fraction": 0.6988543272018433, "avg_line_length": 29.549999237060547, "blob_id": "cb361d13070ae067c5edb8d0a73f609c79c17cf1", "content_id": "dca1b87b30cc2d4121df289024f185f70f730b97", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "permissive", "max_line_length": 117, "num_lines": 20, "path": "/sample-django/home/views.py", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.template import loader\n\n\ndef index(request):\n context = {\n 'value_from_server':'one', \n 'another_value_from_server':'two'\n }\n return render(request, 'index.html', context)\n\n# from django.shortcuts import render\n# from django.shortcuts import render_to_response\n# # Create your views here.\n# from django.http import HttpResponse\n\n\n# def index(request):\n# return render_to_response('index.html', context={'value_from_server':'one', 'another_value_from_server':'two'})\n# #return HttpResponse(\"Hello, world. You're at the home index.\")\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.6428571343421936, "avg_line_length": 13, "blob_id": "aa2e88dc9798a9f08fc74ca49e782202d650bf50", "content_id": "1682a0c12988a8b85fbd363abc9420001ce55ec1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 14, "license_type": "permissive", "max_line_length": 13, "num_lines": 1, "path": "/sample-django/requirements.txt", "repo_name": "DonJayamanne/vscode-python-samples", "src_encoding": "UTF-8", "text": "Django==2.0.2\n" } ]
12
AEJ-FORMATION-DATA-IA/exercicepython-kmk
https://github.com/AEJ-FORMATION-DATA-IA/exercicepython-kmk
27eb76bbb18f5b38d97fe37f980a717942873cf4
61f21017bdd18ae0ca81fb43d4f84f1fe16c5577
ae79bc38ff41aba5719d44725dc62b5b216e3410
refs/heads/main
"2023-08-17T20:02:04.398657"
"2021-10-11T02:48:27"
"2021-10-11T02:48:27"
415,756,257
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5934320092201233, "alphanum_fraction": 0.6258094310760498, "avg_line_length": 12.339506149291992, "blob_id": "4229c41b1d77bfbb352dc07476473aba98fc7118", "content_id": "4cd50cc768b212a2800ca15876df17b5b0529f9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2170, "license_type": "no_license", "max_line_length": 151, "num_lines": 162, "path": "/exercice IGS.py", "repo_name": "AEJ-FORMATION-DATA-IA/exercicepython-kmk", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[92]:\n\n\nA = 15\nB = 4\nC = A + B\nprint(\"le résultat de \", A, \"+\", B,\" est \",C)\n\n\n# In[93]:\n\n\n#la multiplication des deux variables\nD = A * B\nprint(\"le résultat de la multiplication de \", A,\"*\",B ,\" est \",D)\n#la puissance\nE = A**B\nprint(\"le resultat de la puissance de \", A,\"**\", B ,\"est \",E)\n#la division retournant la partie decimal\nF = A / B\nprint(\"le resultat de cette opération \", A,\"/\",B,\" est \",F)\n#la division retournant la partie entier\nG = A // B\nprint(\"le resultat de cette opération \", A,\"//\",B,\" est \",G)\n#le reste de notre division\nH = A % B\nprint(\"le reste cette opération \", A,\"%\",B,\" est \",H)\n\n\n# In[94]:\n\n\n#creation d'un dictionnaire et remplissage par defaut \ndico_igs = {\n A + B : C,\n A * B : D,\n A**B : E,\n A/B : F,\n A//B : G,\n A%B : H\n}\nprint([A+B])\n\n\n# In[96]:\n\n\n#Ajouter un objet dans dictionnaire\ndico_igs[\"IA\"]=\"Groupe 1\"\nprint(dico_igs)\n\n\n# In[98]:\n\n\n#modifier un objet du dictionaire\n\ndico_igs[\"IA\"]=\"Data IA Groupe 1\"\nprint(dico_igs)\n\n\n# In[99]:\n\n\n#suppression d'un objet dans le dictionnaire\ndico_igs.pop(A+B)\nprint(dico_igs)\n\n\n# In[101]:\n\n\n#afficher la liste des clés d'un dictionnaire\nfor cle in dico_igs.keys():\n print (cle)\n\n\n# In[102]:\n\n\n#afficher la liste des valeurs d'un dictionnaire\nfor valeur in dico_igs.values():\n print(valeur)\n\n\n# In[103]:\n\n\n#afficher la liste cle-valeur du dictionnaire\nfor cle,valeur in dico_igs.items():\n print(cle,\":\", valeur)\n\n\n# In[107]:\n\n\n# creation de notre tuple\ntuple_igs = (A,B,C)\nprint(tuple_igs)\n\n\n# In[108]:\n\n\n#ajout de valeur a notre tuple\n#on ne peut pas ajouter de valeur dans un tuples car On utilisera un tuple pour définir des sortes de constantes qui n'ont donc pas vocation à changer.\n\n\n# In[133]:\n\n\n#les listes\n#creation d'une liste\n\nlist_igs=[\"A\",\"B\",\"C\",\"D\"]\nprint(list_igs)\n\n\n# In[135]:\n\n\nliste1 = [\"A\",\"B\",\"C\",\"D\"]\n\n\n# In[134]:\n\n\nliste2 = [A, B, C, D]\nprint(liste2)\n\n\n# In[136]:\n\n\nliste3 = [liste1, liste2]\nprint(liste3)\n\n\n# In[137]:\n\n\n#ajouter E et F a la liste1\nliste1.append(\"E, F\")\nprint(liste3)\n\n\n# In[138]:\n\n\n#Supprimer B de la liste1\nliste1.remove('B')\nprint(liste1)\n\n\n# In[140]:\n\n\nliste1[0]=('G')\nprint(liste1)\n\n" }, { "alpha_fraction": 0.5793871879577637, "alphanum_fraction": 0.5793871879577637, "avg_line_length": 24.714284896850586, "blob_id": "8f72266aa6749ebf4af4759557e32cc51b645d75", "content_id": "9c88bde999bc40c37f2f63d2a4751adc50a47dee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 359, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/temp.py", "repo_name": "AEJ-FORMATION-DATA-IA/exercicepython-kmk", "src_encoding": "UTF-8", "text": "A = input(\"entrez un nombre entier: \")\ntry:\n A = int(A)\nexcept:\n A=input(\"\\n Erreur !!! Veuillez entrer un nombre entier: \")\n A=int(A)\nB=input(\"entrez un deuxieme nombre entier: \")\ntry:\n B = int(B)\nexcept:\n B = input(\"\\n Erreur !!!, Votre nombre doit etre un nombre entier: \")\n B = int(B)\nC = A + B\nprint(\"le resultat de \",A,\" + \",B,\" = \",C)" } ]
2
torchioalexis/python_basico
https://github.com/torchioalexis/python_basico
db090917ffb293b10029260db4f07cda8e9959a3
bd95330e14dff65b9b4fa40b1817ead7d4c8f9e5
f946ec340a7ba363083de9f5ab4a1293efdf8a3c
refs/heads/main
"2023-05-07T00:20:51.123331"
"2021-05-30T04:01:48"
"2021-05-30T04:01:48"
371,582,510
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5698924660682678, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 25.64285659790039, "blob_id": "5e32543f2ad54ba96bde3f39bae40a413075d0bf", "content_id": "f166ca35f94109e8b47c8de3ab22f29da81e13b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 82, "num_lines": 14, "path": "/exha_enum.py", "repo_name": "torchioalexis/python_basico", "src_encoding": "UTF-8", "text": "def run():\n square_root = int(input(\"Ingrese un número para calcular su raíz cuadrada: \"))\n square = 0\n\n while square**2 < square_root:\n square += 1\n\n if square**2 == square_root:\n print (\"La raíz cuadrada de\", square_root, \"es\", square)\n else:\n print (square_root, \"no tiene raíz cuadrada exacta\")\n\nif __name__ == \"__main__\":\n run()" }, { "alpha_fraction": 0.7801418304443359, "alphanum_fraction": 0.7943262457847595, "avg_line_length": 70, "blob_id": "99d6295e2c91c9750837413af3ceb7b645386465", "content_id": "c4f768d8efedd24e780f23b3de60816ec55f8d17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "no_license", "max_line_length": 110, "num_lines": 2, "path": "/README.md", "repo_name": "torchioalexis/python_basico", "src_encoding": "UTF-8", "text": "<h1> PYTHON BASIC COURSE </h1>\nRepository created to upload the exercises I did based on the Python basic course done on the Platzi platform." } ]
2
Sohan-Pramanik/boilermake
https://github.com/Sohan-Pramanik/boilermake
a1bceaeb9df330fe0024b822ee859dc25cfefabb
2b43722430f2d74e815f8eac442c76025b5958ef
e694348bae9775ef723acd9f1e0d64af6fe7a526
refs/heads/main
"2023-02-21T18:21:08.607196"
"2021-01-23T17:50:49"
"2021-01-23T17:50:49"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.8166666626930237, "avg_line_length": 59, "blob_id": "6b8ac5c171678309c31eb35d48692d7c31ad08ad", "content_id": "835f50eabf3852478ca2f07d02319aa56c5b7613", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "permissive", "max_line_length": 59, "num_lines": 1, "path": "/bin/timeline.py", "repo_name": "Sohan-Pramanik/boilermake", "src_encoding": "UTF-8", "text": "# Developed by matthew-notaro, nalinahuja22, and ClarkChan1\n" }, { "alpha_fraction": 0.5890603065490723, "alphanum_fraction": 0.5960729122161865, "avg_line_length": 22.766666412353516, "blob_id": "e04ab312e9317d77b7919ba848d950897c931a77", "content_id": "fd9669fe4fc36f4a6d8c44b873b14b8f0f6681aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "permissive", "max_line_length": 73, "num_lines": 30, "path": "/bin/audio.py", "repo_name": "Sohan-Pramanik/boilermake", "src_encoding": "UTF-8", "text": "# Developed by matthew-notaro, nalinahuja22, and ClarkChan1\n\nimport os\nimport sys\n\nclass Audio:\n def __init__(self, afile):\n # Audio File Path\n self.afile = afile\n\n # Audio Analysis\n self.track = []\n\n def analyze(self):\n # Audio File Duration\n duration = librosa.get_duration(filename = self.afile)\n\n # Iterate Over Audio\n for i in range(int(duration)):\n data, sr = librosa.load(self.afile, offset = i, duration = 1)\n\n onset = librosa.onset.onset_strength(data, sr = sr)\n\n tempo = librosa.beat.tempo(onset_envelope = onset, sr = sr)\n\n print(tempo)\n\nobj = Audio(\"../media/audio/solstice.mp3\")\n\nobj.analyze()\n" }, { "alpha_fraction": 0.7278911471366882, "alphanum_fraction": 0.7278911471366882, "avg_line_length": 14.473684310913086, "blob_id": "484aa4ab1947b72c915ef05204f9f0fabbc0ce34", "content_id": "8c715323810c8170bb0737277a81ec802b23920c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 294, "license_type": "permissive", "max_line_length": 63, "num_lines": 19, "path": "/README.md", "repo_name": "Sohan-Pramanik/boilermake", "src_encoding": "UTF-8", "text": "## Mazeru\n\nSomething big is here...\n\nAnime Lofi Editor: Edits/Directs anime lofi videos for YouTube.\n\nData Structures:\n\n Timeline Object:\n\n Merges music and audio by correlating motion to tempo.\n\n Video Object:\n\n Performs motion analysis.\n\n Music Object:\n\n Performs tempo analysis.\n" } ]
3
jj240396/Medium
https://github.com/jj240396/Medium
284265249592f2f4689e92e16b28399db50b6b9b
c9187e8d26407d50c61e3afe8b71196de4bce352
322d0c74bf9048ba18f5999f345dc9c81f571899
refs/heads/master
"2022-04-19T11:00:53.562135"
"2020-04-19T11:39:30"
"2020-04-19T11:39:30"
256,982,657
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.695142388343811, "alphanum_fraction": 0.697934091091156, "avg_line_length": 32.12963104248047, "blob_id": "ece9ff07a87bb4d6482ec0893de7c6675de878d9", "content_id": "0a94e658276bdee434b3e3ce25bbfcdb2a277edf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1791, "license_type": "no_license", "max_line_length": 95, "num_lines": 54, "path": "/stock_portfolio_risk.py", "repo_name": "jj240396/Medium", "src_encoding": "UTF-8", "text": "\n\n#df - table containing the historical stock prices for the past 10 years.\n#df_weights - dict containing the weightage of each stock in the portfolio\n\n#converting the df_weights dict to dataframe\nweightage = pd.DataFrame.from_dict(df_weights)\nweightage = weightage.transpose()\nweightage.columns = ['weightage']\nnp.sum(weightage['weightage'])\nweightage.reset_index(inplace=True)\nweightage.columns = ['stock','weights']\n\n#calculating the annual return \ndf = df.groupby(['stock','year']).agg(\n{\n 'avg':'mean'\n})\ndf['prev_avg'] = df.groupby(['stock'])['avg'].shift(1)\ndf.reset_index(inplace=True)\ndf.dropna(inplace=True)\ndf['return'] = (df['avg'] - df['prev_avg'])/df['prev_avg']\n\n#calculating the weighted annual return\ndf = df.merge(weightage,on='stock')\ndf['weighted_return'] = df['return']*df['weights']\n\n#pivoting the table to get the covariance matrix and calculate the portfolio standard deviation\ndf_pivot = df.pivot('year', 'stock', 'weighted_return') \ndf_pivot.reset_index(inplace=True)\ncov_matrix = df_pivot.cov()\n\nfor i in range(len(cov_matrix)):\n for j in range(len(cov_matrix.columns)):\n if i != j:\n cov_matrix.iloc[i,j] = 2*cov_matrix.iloc[i,j]\n \nportfolio_std_deviation = np.sqrt(np.sum(cov_matrix.sum(axis=0)))\n\n#calculating the expected portfolio return\ndf_mean = df.groupby(['stock']).agg(\n{\n 'return':'mean'\n})\ndf_mean.columns = ['expected_return']\ndf_std = df.groupby(['stock']).agg(\n{\n 'return':'std'\n})\ndf_std.columns = ['standard_deviation']\ndf_stats = df_mean.merge(df_std,on='stock')\ndf_stats.reset_index(inplace=True)\ndf_stats = df_stats.merge(weightage,on='stock')\ndf_stats['expected_return_weighted'] = df_stats['expected_return']*df_stats['weights']\n\nexpected_portolio_return = np.sum(df_stats['expected_return_weighted'])\n" } ]
1
Serqati/Flask-Bot-Classes
https://github.com/Serqati/Flask-Bot-Classes
cc15ebea12ae234b1948e4d3a020537fb0eecc6a
f1d1303486e88357eaf08423e866cfab1cd193ed
0388b84f0262b874f2583a083e9607972d38c854
refs/heads/master
"2022-04-09T01:22:36.212846"
"2020-03-14T13:06:26"
"2020-03-14T13:06:26"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7102803587913513, "alphanum_fraction": 0.7102803587913513, "avg_line_length": 14.285714149475098, "blob_id": "42e53131302589c2ca31407e21fdf88ca64e85d0", "content_id": "d01dde6f4a3fe82a44069910fb84423df82ccde0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 107, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/README.md", "repo_name": "Serqati/Flask-Bot-Classes", "src_encoding": "UTF-8", "text": "# flask-bot\n## Telegram bot with flask and webhook using heroku\n\n[] Web-Panel\n[X] Bot\n\n# Flask-Bot-Classes\n" }, { "alpha_fraction": 0.691956639289856, "alphanum_fraction": 0.6925271153450012, "avg_line_length": 52.83076858520508, "blob_id": "dcec1f5c555c100c1d9b01013623c81beed85c9a", "content_id": "0fc6b24fc1da2559cf82836526fdb2e35a4b25c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3506, "license_type": "no_license", "max_line_length": 289, "num_lines": 65, "path": "/main.py", "repo_name": "Serqati/Flask-Bot-Classes", "src_encoding": "UTF-8", "text": "from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nclass Message:\n\tdef __init__(self, update):\n\t\tupdate =eval(update)\n\t\tself.userid = update['message']['from']['id']\n\t\tself.is_bot = update['message']['from']['is_bot']\n\t\tself.first_name = update['message']['from']['first_name']\n\t\tself.username = update['message']['from']['username']\n\t\tself.language_code = update['message']['from']['language_code']\n\t\tself.username = update['message']['from']['username']\n\t\tself.chatid = update['message']['chat']['id']\n\t\tself.chatname = update['message']['chat']['first_name']\n\t\tself.chatusername = update['message']['chat']['username']\n\t\tself.chattype = update['message']['chat']['type']\n\t\tself.date = update['message']['date']\n\t\tself.text = update['message']['text']\n\t\tself.message_id = update['message']['message_id']\n\t\tprint( self.message_id, self.userid, self.is_bot, self.first_name, self.username, self.language_code, self.username, self.chatid, self.chatname, self.chatusername, self.chattype, self.date, self.text)\n\nclass Bot:\n\tdef __init__(self, token):\n\t\tself.url = f\"https://api.telegram.org/bot{token}/\"\n\n\tdef call(method, **kwargs):\n\t\tar = \"\"\n\t\tn = len(kwargs.keys())\n\t\ti = 0\n\t\tfor en,key,val in zip(kwargs.keys(), kwargs.values()):\n\t\t\ti+=1\n\t\t\tif i == n:\n\t\t\t\tar +=f\"{key}={val}\"\n\t\t\telse:\n\t\t\t\tar +=f\"{key}={val}&\"\n\t\tu = f\"{self.url}{method}?{ar}\"\n\t\treturn u \n\n\tdef send_message(self, chat_id, text, parse_mode='markdown', disable_web_page_preview=True, disable_notification=False, reply_to_message_id=None, reply_markup=None,schedule_date=None):\n\t\tu = self.call('sendMessage',chat_id=chat_id, text=text, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, schedule_date=schedule_date ,reply_markup=reply_markup)\n\n\tdef forward_message(self, chat_id, from_chat_id, message_ids, disable_notification=False , as_copy=False);\n\t\tif isinstance(message_ids, list):\n\t\t\tfor msg_id in message_ids:\n\t\t\t\tu = self.call('forwardMessage',chat_id=chat_id, from_chat_id=from_chat_id, message_id=msg_id, disable_notification=disable_notification , as_copy=as_copy)\n\t\telse:\n\t\t\tu = self.call('forwardMessage',chat_id=chat_id, from_chat_id=from_chat_id, message_id=msg_id, disable_notification=disable_notification,as_copy=as_copy)\n\t\n\tdef send_photo(self , chat_id , photo , file_ref=None , caption=None , parse_mode = \"markdown\" , disable_notification=False , reply_to_message_id=None , schedule_time=None , reply_markup=None):\n\t\tu = self.call('sendPhoto' , chat_id=chat_id , photo = photo , file_ref=file_ref , caption=caption , parse_mode=parse_mode , disable_notification=disable_notification , reply_to_message_id=reply_to_message_id , reply_markup=reply_markup)\n\t\n\tdef send_document(self , chat_id , document , file_ref=None , thumb=None , caption=None,parse_mode=\"markdown\" , disable_notification=False , reply_to_message_id=None , schedule_date=None,reply_markup=None):\n\t\tu = self.call('sendDocument' , chat_id=chat_id , document=document , file_ref=file_ref , thumb=thumb , caption=caption , parse_mode=parse_mode , disable_notification=disable_notification , reply_to_message_id=reply_to_message_id , reply_markup=reply_markup , schedule_date=schedule_date)\[email protected](\"/\", methods=['GET', 'POST'])\ndef home():\n\n if request.method==\"GET\":\n return \"\"\n\n else:\n \t\n print(request.get_json(force=True))\n Message(str(request.get_json(force=True)))\n return \":)\"\n\n\n\n\n\n\n\n" } ]
2
vanya2143/ITEA-tasks
https://github.com/vanya2143/ITEA-tasks
9227e2fc68c040ab9228e82b3516744fc8c68b25
d6c70da93d29896b89d753620087651af723bce5
653e94e6523d4df25d6fd9aa5bc0145ccf14f757
refs/heads/master
"2023-03-12T01:16:58.010730"
"2021-03-03T13:41:26"
"2021-03-03T13:41:26"
256,027,167
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5614973306655884, "alphanum_fraction": 0.5971479415893555, "avg_line_length": 19.035715103149414, "blob_id": "e3cb91f766c7f44ad517f5d2bdcd8929d8a12f2e", "content_id": "f3f8fa3e4c65267cdca127066017f8ce5cfaf29a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 73, "num_lines": 28, "path": "/hw-2/task_2.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\n2. Написать декоратор log, который будет выводить на экран все аргументы,\nкоторые передаются вызываемой функции.\n@log\ndef my_sum(*args):\n return sum(*args)\n\nmy_sum(1,2,3,1) - выведет \"Функция была вызвана с - 1, 2, 3, 1\"\nmy_sum(22, 1) - выведет \"Функция была вызвана с - 22, 1\"\n\"\"\"\n\n\ndef log(func):\n def wrapper(*args):\n res = func(*args)\n print(\"Функция была вызвана с - \" + ', '.join(map(str, args)))\n return res\n\n return wrapper\n\n\n@log\ndef my_sum(*args):\n return\n\n\nif __name__ == '__main__':\n my_sum(11, 2, 3, 's', 4)\n" }, { "alpha_fraction": 0.5354645252227783, "alphanum_fraction": 0.5674325823783875, "avg_line_length": 24.024999618530273, "blob_id": "41f4e3f4429feb30c231a438ee5cb1e3a013dfb7", "content_id": "0654583e1228c97a52dfd2e86b2457addad825dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "no_license", "max_line_length": 71, "num_lines": 40, "path": "/hw-1/task_3.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\nРеализовать алгоритм бинарного поиска на python.\nНа вход подается упорядоченный список целых чисел, а так же элемент,\nкоторый необходимо найти и указать его индекс,\nв противном случае – указать что такого элемента нет в заданном списке.\n\"\"\"\n\n\ndef search_item(some_list, find_item):\n some_list.sort()\n list_length = len(some_list)\n start = 0\n end = list_length - 1\n mid = list_length // 2\n i = 0\n\n while i < list_length:\n if find_item == some_list[mid]:\n return f'Число {some_list[mid]}, найдено по индексу {mid}'\n\n elif find_item > some_list[mid]:\n start = mid + 1\n mid = start + (end - start) // 2\n\n else:\n end = mid - 1\n mid = (end - start) // 2\n\n i += 1\n\n else:\n return f'Числа {find_item} нету в списке!'\n\n\nif __name__ == '__main__':\n # my_list = list(range(0, 100))\n my_list = [1, 23, 33, 54, 42, 77, 234, 99, 2]\n my_item = 42\n\n print(search_item(my_list, my_item))\n" }, { "alpha_fraction": 0.47804176807403564, "alphanum_fraction": 0.5651547908782959, "avg_line_length": 36.5405387878418, "blob_id": "d90656b52f8bc76052e7bfe50f2277b94394888e", "content_id": "d46e114cc1aff16dfefe2bbb1ebbe2d269773b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1457, "license_type": "no_license", "max_line_length": 105, "num_lines": 37, "path": "/hw-6/task_2.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "# 2. Используя модуль unittests написать тесты: сложения двух матриц, умножения матрицы и метод transpose\n\nimport unittest\nfrom .task_1 import Matrix, MatrixSizeError\n\n\nclass TestMatrix(unittest.TestCase):\n def setUp(self) -> None:\n self.matrix_1 = Matrix([[1, 2, 9], [3, 4, 0], [5, 6, 4]])\n self.matrix_2 = Matrix([[2, 3, 0], [1, 2, 3], [5, 6, 4]])\n\n self.matrix_3 = Matrix([[2, 9], [4, 0], [6, 4]])\n self.matrix_4 = Matrix([[2, 9], [4, 0], [6, 4]])\n\n def test_add_three(self):\n self.assertEqual(self.matrix_1 + self.matrix_2, [[3, 5, 9], [4, 6, 3], [10, 12, 8]])\n\n def test_add_two_size(self):\n self.assertEqual(self.matrix_3 + self.matrix_4, [[4, 18], [8, 0], [12, 8]])\n\n def test_add_error(self):\n with self.assertRaises(MatrixSizeError):\n self.matrix_1 + self.matrix_3\n\n def test_mul_integer(self):\n self.assertEqual(self.matrix_1 * 2, [[2, 4, 18], [6, 8, 0], [10, 12, 8]])\n\n def test_mul_float(self):\n self.assertEqual(self.matrix_1 * 2.5, [[2.5, 5.0, 22.5], [7.5, 10.0, 0.0], [12.5, 15.0, 10.0]])\n\n def test_transpose_and_transpose_over_transposed_instance(self):\n self.assertEqual(self.matrix_1.transpose(), [[1, 3, 5], [2, 4, 6], [9, 0, 4]])\n self.assertEqual(self.matrix_1.transpose(), [[1, 2, 9], [3, 4, 0], [5, 6, 4]])\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6044142842292786, "alphanum_fraction": 0.6230899691581726, "avg_line_length": 24.60869598388672, "blob_id": "81048907a887fa526649a1890787532a7d359cda", "content_id": "95bcaf3a598b9c6bbde61e2dac600fb2d6b8940c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 778, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/hw-1/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\n1. Определить количество четных и нечетных чисел в заданном списке. \nОформить в виде функции, где на вход будет подаваться список с целыми числами. \nРезультат функции должен быть 2 числа, количество четных и нечетных соответственно.\n\"\"\"\n\n\ndef list_check(some_list):\n even_numb = 0\n not_even_numb = 0\n\n for elem in some_list:\n if elem % 2 == 0:\n even_numb += 1\n else:\n not_even_numb += 1\n\n return f\"even: {even_numb}, not even: {not_even_numb}\"\n\n\nif __name__ == '__main__':\n my_list = list(range(1, 20))\n print(list_check(my_list))\n" }, { "alpha_fraction": 0.6393229365348816, "alphanum_fraction": 0.6647135615348816, "avg_line_length": 29.719999313354492, "blob_id": "e67bf07fc498c1a6c3660f0ca020c81998e92fe8", "content_id": "7c12c08eeb958ce6908cfd070b59cae8111b9fef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2044, "license_type": "no_license", "max_line_length": 109, "num_lines": 50, "path": "/hw-3/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\nРеализовать некий класс Matrix, у которого:\n1. Есть собственный конструктор, который принимает в качестве аргумента - список списков,\nкопирует его (то есть при изменении списков, значения в экземпляре класса не должны меняться).\nЭлементы списков гарантированно числа, и не пустые.\n\n2. Метод size без аргументов, который возвращает кортеж вида (число строк, число столбцов).\n\n3. Метод transpose, транспонирующий матрицу и возвращающую результат (данный метод модифицирует\nэкземпляр класса Matrix)\n\n4. На основе пункта 3 сделать метод класса create_transposed, который будет принимать на вход список списков,\nкак и в пункте 1, но при этом создавать сразу транспонированную матрицу.\n\nhttps://ru.wikipedia.org/wiki/%D0%A2%D1%80%D0%B0%D0%BD%D1%81%D0%BF%D0%BE%D0%BD%D0%B8%D1%80%D0%\n\"\"\"\n\n\nclass Matrix:\n def __init__(self, some_list):\n self.data_list = some_list.copy()\n\n def size(self):\n row = len(self.data_list)\n col = len(self.data_list[0])\n return row, col\n\n def transpose(self):\n t_matrix = [\n [item[i] for item in self.data_list] for i in range(self.size()[1])\n ]\n self.data_list = t_matrix\n return self.data_list\n\n @classmethod\n def create_transposed(cls, int_list):\n obj = cls(int_list)\n obj.transpose()\n return obj\n\n\nif __name__ == '__main__':\n my_list = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]\n\n t = Matrix(my_list)\n t.transpose()\n print(t.data_list)\n\n t2 = Matrix.create_transposed(my_list)\n print(t2.data_list)\n" }, { "alpha_fraction": 0.5772399306297302, "alphanum_fraction": 0.5916580557823181, "avg_line_length": 27.985074996948242, "blob_id": "e3e50b9e61aaa2d34c6274f79dbd5c4741130cc2", "content_id": "26557162200adebaa7151463cd7a4c318dc0db73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2146, "license_type": "no_license", "max_line_length": 110, "num_lines": 67, "path": "/hw-6/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\n1. Реализовать подсчёт елементов в классе Matrix с помощью collections.Counter.\nМожно реализовать протоколом итератора и тогда будет такой вызов - Counter(maxtrix).\nЛибо сделать какой-то метод get_counter(), который будет возвращать объект Counter и подсчитывать все элементы\nвнутри матрицы. Какой метод - ваш выбор.\n\"\"\"\n\nfrom collections import Counter\n\n\nclass MatrixSizeError(Exception):\n pass\n\n\nclass Matrix:\n def __init__(self, some_list):\n self.data_list = some_list.copy()\n self.counter = Counter\n\n def __add__(self, other):\n if self.size() != other.size():\n raise MatrixSizeError(\n f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}'\n )\n\n return [\n [self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])]\n for row in range(self.size()[0])\n ]\n\n def __mul__(self, other):\n return [[item * other for item in row] for row in self.data_list]\n\n def __str__(self):\n return ''.join('%s\\n' % '\\t'.join(map(str, x)) for x in self.data_list).rstrip('\\n')\n\n def get_counter(self):\n return self.counter(elem for list_elem in self.data_list for elem in list_elem)\n\n def size(self):\n row = len(self.data_list)\n col = len(self.data_list[0])\n return row, col\n\n def transpose(self):\n t_matrix = [\n [item[i] for item in self.data_list] for i in range(self.size()[1])\n ]\n self.data_list = t_matrix\n return self.data_list\n\n @classmethod\n def create_transposed(cls, int_list):\n obj = cls(int_list)\n obj.transpose()\n return obj\n\n\nif __name__ == '__main__':\n list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]\n list_2 = [[2, 3], [1, 2], [5, 6]]\n\n matrix1 = Matrix(list_1)\n matrix2 = Matrix(list_2)\n\n print(matrix1.get_counter())\n print(matrix2.get_counter())\n" }, { "alpha_fraction": 0.807692289352417, "alphanum_fraction": 0.807692289352417, "avg_line_length": 25, "blob_id": "5f24b582f5249200ff1d0a22665b11d252fe4faf", "content_id": "2853a35f6c250ad1b9503319254ff55de08c5293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/README.md", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "Only study, only practice\n" }, { "alpha_fraction": 0.5633535385131836, "alphanum_fraction": 0.5891502499580383, "avg_line_length": 28.288888931274414, "blob_id": "ae1bb5f589622d728c4704420825085bbdc25870", "content_id": "3732937ac5fac106b50f90d1c4b56e5414aa3620", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3155, "license_type": "no_license", "max_line_length": 103, "num_lines": 90, "path": "/hw-4/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\nК реализованному классу Matrix в Домашнем задании 3 добавить следующее:\n1. __add__ принимающий второй экземпляр класса Matrix и возвращающий сумму матриц,\nесли передалась на вход матрица другого размера - поднимать исключение MatrixSizeError\n(по желанию реализовать так, чтобы текст ошибки содержал размерность 1 и 2 матриц - пример:\n\"Matrixes have different sizes - Matrix(x1, y1) and Matrix(x2, y2)\")\n\n2. __mul__ принимающий число типа int или float и возвращающий матрицу, умноженную на скаляр\n\n3. __str__ переводящий матрицу в строку.\nСтолбцы разделены между собой табуляцией, а строки — переносами строк (символ новой строки).\nПри этом после каждой строки не должно быть символа табуляции и в конце не должно быть переноса строки.\n\"\"\"\n\n\nclass MatrixSizeError(Exception):\n pass\n\n\nclass Matrix:\n def __init__(self, some_list):\n self.data_list = some_list.copy()\n\n def __add__(self, other):\n if self.size() != other.size():\n raise MatrixSizeError(\n f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}'\n )\n\n return [\n [self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])]\n for row in range(self.size()[0])\n ]\n\n def __mul__(self, other):\n return [[item * other for item in row] for row in self.data_list]\n\n def __str__(self):\n return ''.join('%s\\n' % '\\t'.join(map(str, x)) for x in self.data_list).rstrip('\\n')\n\n def size(self):\n row = len(self.data_list)\n col = len(self.data_list[0])\n return row, col\n\n def transpose(self):\n t_matrix = [\n [item[i] for item in self.data_list] for i in range(self.size()[1])\n ]\n self.data_list = t_matrix\n return self.data_list\n\n @classmethod\n def create_transposed(cls, int_list):\n obj = cls(int_list)\n obj.transpose()\n return obj\n\n\nif __name__ == '__main__':\n list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]]\n list_2 = [[2, 3, 0], [1, 2, 3], [5, 6, 4]]\n list_3 = [[2, 3], [1, 2], [5, 6]]\n\n t1 = Matrix(list_1)\n t1.transpose()\n\n t2 = Matrix.create_transposed(list_2)\n\n t3 = Matrix(list_3)\n\n print(\"t1: \", t1.data_list)\n print(\"t2: \", t2.data_list)\n print(\"t3: \", t3.data_list)\n\n # __add__\n print(\"\\nt1.__add__(t2) : \", t1 + t2)\n\n try:\n print(\"\\nПробую: t1 + t3\")\n print(t1 + t3)\n except MatrixSizeError:\n print('Тут было вызвано исключение MatrixSizeError')\n\n # __mul__\n print(\"\\nt2.__mul__(3): \\n\", t2 * 3)\n\n # __str__\n print('\\nt1.__str__')\n print(t1)\n" }, { "alpha_fraction": 0.6111603379249573, "alphanum_fraction": 0.6279893517494202, "avg_line_length": 32.70149230957031, "blob_id": "742b06203ff02ccc3af807f8a839f80a9cc20a36", "content_id": "615773d6b15617514ec3a63e391e28cf36b61515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2478, "license_type": "no_license", "max_line_length": 107, "num_lines": 67, "path": "/hw-7/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\nСделать скрипт, который будет делать GET запросы на следующие ресурсы:\n \"http://docs.python-requests.org/\",\n \"https://httpbin.org/get\",\n \"https://httpbin.org/\",\n \"https://api.github.com/\",\n \"https://example.com/\",\n \"https://www.python.org/\",\n \"https://www.google.com.ua/\",\n \"https://regex101.com/\",\n \"https://docs.python.org/3/this-url-will-404.html\",\n \"https://www.nytimes.com/guides/\",\n \"https://www.mediamatters.org/\",\n \"https://1.1.1.1/\",\n \"https://www.politico.com/tipsheets/morning-money\",\n \"https://www.bloomberg.com/markets/economics\",\n \"https://www.ietf.org/rfc/rfc2616.txt\"\n\nДля каждого запроса должен быть вывод по примеру: \"Resource 'google.com.ua',\nrequest took 0.23 sec, response status - 200.\"\nВ реализации нет ограничений - можно использовать процессы, потоки, асинхронность.\nЛюбые вспомагательные механизмы типа Lock, Semaphore, пулы для тредов и потоков.\n\"\"\"\n\nimport aiohttp\nimport asyncio\nfrom time import time\n\n\nasync def get_response(session, url):\n async with session.get(url) as resp:\n return resp.status\n\n\nasync def request(url):\n async with aiohttp.ClientSession() as session:\n time_start = time()\n status_code = await get_response(session, url)\n print(f\"Resource '{url}', request took {time() - time_start:.3f}, response status - {status_code}\")\n\n\nif __name__ == '__main__':\n urls = [\n \"http://docs.python-requests.org/\",\n \"https://httpbin.org/get\",\n \"https://httpbin.org/\",\n \"https://api.github.com/\",\n \"https://example.com/\",\n \"https://www.python.org/\",\n \"https://www.google.com.ua/\",\n \"https://regex101.com/\",\n \"https://docs.python.org/3/this-url-will-404.html\",\n \"https://www.nytimes.com/guides/\",\n \"https://www.mediamatters.org/\",\n \"https://1.1.1.1/\",\n \"https://www.politico.com/tipsheets/morning-money\",\n \"https://www.bloomberg.com/markets/economics\",\n \"https://www.ietf.org/rfc/rfc2616.txt\"\n ]\n\n futures = [request(url) for url in urls]\n\n loop = asyncio.get_event_loop()\n t_start = time()\n loop.run_until_complete(asyncio.wait(futures))\n t_end = time()\n print(f\"Full fetching got {t_end - t_start:.3f} seconds.\")\n" }, { "alpha_fraction": 0.6430517435073853, "alphanum_fraction": 0.6893733143806458, "avg_line_length": 27.230770111083984, "blob_id": "805104f7f451ec7961d842dd27c4e142cfdae5b2", "content_id": "81aa085d6272000c79c589f5ca98885a85b6c121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 85, "num_lines": 13, "path": "/hw-1/task_2.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\nНаписать функцию, которая принимает 2 числа.\nФункция должна вернуть сумму всех элементов числового ряда между этими двумя числами.\n(если подать 1 и 5 на вход, то результат должен считаться как 1+2+3+4+5=15)\n\"\"\"\n\n\ndef all_numbers_sum(num1, num2):\n return sum([num for num in range(num1, num2 + 1)])\n\n\nif __name__ == '__main__':\n print(all_numbers_sum(1, 5))\n" }, { "alpha_fraction": 0.572822093963623, "alphanum_fraction": 0.5785402059555054, "avg_line_length": 27.586538314819336, "blob_id": "3c93d54e81f57a1b0662e7870fb6959328dcbad8", "content_id": "d15586fdfcac7f951788357c0f8eaf41c8bee73a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3230, "license_type": "no_license", "max_line_length": 88, "num_lines": 104, "path": "/hw-5/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "# Реализовать пример использования паттерна Singleton\nfrom random import choice\n\n\n# Генератор событий\ndef gen_events(instance, data, count=2):\n for i in range(count):\n event = choice(data)\n instance.add_event(f'Event-{event}-{i}', event)\n\n\n# Singleton на примере списка событий\nclass EventsMeta(type):\n _instance = None\n\n def __call__(cls):\n if cls._instance is None:\n cls._instance = super().__call__()\n return cls._instance\n\n\nclass Events(metaclass=EventsMeta):\n # __metaclass__ = EventsMeta\n\n _events = {\n 'ok': [],\n 'info': [],\n 'warn': [],\n 'error': []\n }\n\n def get_all_events(self):\n \"\"\"\n :return: dict with all events and types\n \"\"\"\n return self._events\n\n def get_events_count(self, key: str = None):\n \"\"\"\n :param key: if need count of specific type\n :return: all events count or specific event count if param key: not None\n :rtype: tuple, int\n \"\"\"\n if key:\n try:\n return len(self._events[key])\n # return key, len(self._events[key])\n except KeyError:\n print('Тип события должен быть ' + ', '.join(self._events.keys()))\n return\n\n return tuple((event, len(self._events[event])) for event in self._events.keys())\n\n def add_event(self, event: str, event_type: str):\n \"\"\"\n :param event: event message\n :param event_type: ok, info, warn, error\n :return: None\n \"\"\"\n try:\n self._events[event_type].append(event)\n except KeyError:\n print('Тип события должен быть ' + ', '.join(self._events.keys()))\n\n def read_event(self, event_type: str):\n \"\"\"\n :param event_type: ok, info, warn, error\n :return: tuple last item of event_type, all count events or None\n \"\"\"\n try:\n return self._events[event_type].pop(), len(self._events[event_type])\n except IndexError:\n print('Событий больше нет')\n return\n except KeyError:\n print('Указан неверный тип события')\n return\n\n @classmethod\n def get_events_types(cls):\n return cls._events.keys()\n\n\nif __name__ == '__main__':\n event_instance1 = Events()\n event_instance2 = Events()\n event_instance3 = Events()\n\n print(type(event_instance1), id(event_instance1))\n print(type(event_instance2), id(event_instance2))\n\n # Генерируем события\n gen_events(event_instance3, list(event_instance3.get_events_types()), 50)\n\n # Получаем все события\n print(event_instance2.get_all_events())\n\n # Получаем колличества всех типов событий и обределенного типа\n print(event_instance3.get_events_count())\n print(f\"Error: {event_instance3.get_events_count('error')}\")\n\n # Читаем события\n while event_instance3.get_events_count('ok'):\n print(event_instance3.read_event('ok'))\n" }, { "alpha_fraction": 0.652482271194458, "alphanum_fraction": 0.6903073191642761, "avg_line_length": 29.214284896850586, "blob_id": "d0122ec4895a62ca04f7f07d88b7fb88ede93f4e", "content_id": "9b566e92b090b2cd0cb53dff98ba94bed4054d9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 73, "num_lines": 14, "path": "/hw-2/task_1.py", "repo_name": "vanya2143/ITEA-tasks", "src_encoding": "UTF-8", "text": "\"\"\"\n1. Написать функцию, которая будет принимать на вход натуральное число n,\nи возращать сумму его цифр. Реализовать используя рекурсию\n(без циклов, без строк, без контейнерных типов данных).\nПример: get_sum_of_components(123) -> 6 (1+2+3)\n\"\"\"\n\n\ndef get_sum_of_components_two(n):\n return 0 if not n else n % 10 + get_sum_of_components_two(n // 10)\n\n\nif __name__ == '__main__':\n print(get_sum_of_components_two(123))\n" } ]
12
nkgenius/graphene-sqlalchemy-mutation
https://github.com/nkgenius/graphene-sqlalchemy-mutation
c5e3ba6abcecdb37004dfc30130f52ec29d555f4
4fe2233a8a03729d50b9c46dade78063cf184d42
74d615ec1bc36d4b16c8e85c3c6a21ca75351220
refs/heads/master
"2021-08-12T01:45:40.376235"
"2017-11-14T09:06:04"
"2017-11-14T09:06:04"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5463977456092834, "alphanum_fraction": 0.5489188432693481, "avg_line_length": 34.674739837646484, "blob_id": "41ee726b031b2d28e8be94301ec7a00360433e64", "content_id": "20146e3859c301bab606818a9cdfe81767cf91f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10313, "license_type": "no_license", "max_line_length": 139, "num_lines": 289, "path": "/graphene_sqlalchemy_mutation/types.py", "repo_name": "nkgenius/graphene-sqlalchemy-mutation", "src_encoding": "UTF-8", "text": "from graphene import InputObjectType, ObjectType, String, ID, Field, List, Int\nfrom graphene.types.mutation import MutationMeta, Mutation\nfrom graphene.utils import is_base_type\nfrom graphene.utils.is_base_type import is_base_type\nfrom graphene.types.argument import Argument\nfrom graphene.types.dynamic import Dynamic\nfrom graphene_sqlalchemy.converter import convert_sqlalchemy_column\nfrom graphene_sqlalchemy.utils import get_query, is_mapped\nfrom graphene.types.typemap import TypeMap, NonNull, Interface, Scalar, Enum, Union, GraphQLTypeMap\n\n\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy.inspection import inspect as sqlalchemyinspect\n\nimport re\nimport graphene\nimport base64\n\n\ndef graphene_reducer(self, map, type):\n if isinstance(type, (List, NonNull)):\n return self.reducer(map, type.of_type)\n if type._meta.name in map:\n _type = map[type._meta.name]\n return map\n\n if issubclass(type, ObjectType):\n internal_type = self.construct_objecttype(map, type)\n elif issubclass(type, InputObjectType):\n internal_type = self.construct_inputobjecttype(map, type)\n elif issubclass(type, Interface):\n internal_type = self.construct_interface(map, type)\n elif issubclass(type, Scalar):\n internal_type = self.construct_scalar(map, type)\n elif issubclass(type, Enum):\n internal_type = self.construct_enum(map, type)\n elif issubclass(type, Union):\n internal_type = self.construct_union(map, type)\n else:\n raise Exception(\n \"Expected Graphene type, but received: {}.\".format(type))\n\n return GraphQLTypeMap.reducer(map, internal_type)\n\n\nTypeMap.graphene_reducer = graphene_reducer\n\n\ndef camel_to_snake(s):\n s = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', s)\n s = re.sub('(.)([0-9]+)', r'\\1_\\2', s)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s).lower()\n\n\nclass SQLAlchemyMutationMeta(MutationMeta):\n\n def __new__(cls, name, bases, attrs):\n\n if not is_base_type(bases, SQLAlchemyMutationMeta):\n return type.__new__(cls, name, bases, attrs)\n\n input_class = attrs.pop('Meta', None)\n\n if not input_class or not getattr(input_class, 'model', None) or \\\n not getattr(input_class, 'field', None):\n return MutationMeta.__new__(cls, name, bases, attrs)\n\n assert is_mapped(input_class.model), ('You need valid SQLAlchemy Model in {}.Meta, received \"{}\".').format(name, input_class.model)\n\n field_name = camel_to_snake(input_class.model.__name__)\n inspected_model = sqlalchemyinspect(input_class.model)\n\n def mutate(cls, instance, args, context, info):\n session = cls.query\n arg_attrs = {}\n primary_key = {}\n for name, column in inspected_model.columns.items():\n if column.primary_key and name in args:\n try:\n klazz, pk = base64.b64decode(args['id']).split(b\":\")\n except:\n pk = args.get(name, None)\n finally:\n primary_key[name] = int(pk)\n continue\n if name in args:\n arg_attrs[name] = args.get(name, None)\n\n if len(primary_key) > 0:\n session.query(input_class.model).filter_by(**primary_key).update(arg_attrs)\n session.commit()\n field = session.query(input_class.model).filter_by(**primary_key).first()\n else:\n field = input_class.model(**arg_attrs)\n session.add(field)\n\n try:\n session.commit()\n ok = True\n message = \"ok\"\n except SQLAlchemyError as e:\n session.rollback()\n message = e.message\n ok = False\n\n kwargs = {\n 'ok': ok,\n 'message': message,\n field_name: field\n }\n return cls(**kwargs)\n\n input_attrs = {}\n\n for name, column in inspected_model.columns.items():\n input_attrs[name] = convert_sqlalchemy_column(column)\n if column.default or column.server_default or column.primary_key:\n input_attrs[name].kwargs['required'] = False\n\n mutation_attrs = {\n 'Input': type('Input', (object,), input_attrs),\n 'ok': graphene.Boolean(),\n 'message': graphene.String(),\n 'mutate': classmethod(mutate),\n field_name: graphene.Field(input_class.field)\n }\n\n cls = MutationMeta.__new__(cls, name, bases, mutation_attrs)\n return cls\n\n\nclass SQLAlchemyMutation(Mutation, metaclass=SQLAlchemyMutationMeta):\n pass\n\n\nclass SQLAlchemyMutationMetaUpdate(SQLAlchemyMutationMeta):\n\n def __new__(cls, name, bases, attrs):\n\n if not is_base_type(bases, SQLAlchemyMutationMeta):\n return type.__new__(cls, name, bases, attrs)\n\n input_class = attrs.pop('Meta', None)\n\n if not input_class or not getattr(input_class, 'model', None) or \\\n not getattr(input_class, 'field', None):\n return MutationMeta.__new__(cls, name, bases, attrs)\n\n assert is_mapped(input_class.model), \\\n ('You need valid SQLAlchemy Model in {}.Meta, received \"{}\".').format(name, input_class.model)\n\n field_name = camel_to_snake(input_class.model.__name__)\n inspected_model = sqlalchemyinspect(input_class.model)\n\n def mutate(cls, instance, args, context, info):\n session = cls.query\n arg_attrs = {}\n primary_key = {}\n for name, column in inspected_model.columns.items():\n if column.primary_key and name in args:\n try:\n klazz, pk = base64.b64decode(args['id']).split(b\":\")\n except:\n pk = args.get(name, None)\n finally:\n primary_key[name] = int(pk)\n continue\n if name in args:\n arg_attrs[name] = args.get(name, None)\n\n if len(primary_key) > 0:\n session.query(input_class.model).filter_by(**primary_key).update(arg_attrs)\n session.commit()\n field = session.query(input_class.model).filter_by(**primary_key).first()\n else:\n field = input_class.model(**arg_attrs)\n session.add(field)\n\n try:\n session.commit()\n ok = True\n message = \"ok\"\n except SQLAlchemyError as e:\n session.rollback()\n message = e.message\n ok = False\n\n kwargs = {\n 'ok': ok,\n 'message': message,\n field_name: field\n }\n return cls(**kwargs)\n\n input_attrs = {}\n for name, column in inspected_model.columns.items():\n input_attrs[name] = convert_sqlalchemy_column(column)\n if column.primary_key:\n input_attrs[name].kwargs['required'] = True\n else:\n input_attrs[name].kwargs['required'] = False\n\n mutation_attrs = {\n 'Input': type('Input', (object,), input_attrs),\n 'ok': graphene.Boolean(),\n 'message': graphene.String(),\n 'mutate': classmethod(mutate),\n field_name: graphene.Field(input_class.field)\n }\n\n cls = MutationMeta.__new__(cls, name, bases, mutation_attrs)\n return cls\n\n\nclass SQLAlchemyMutationUpdate(Mutation, metaclass=SQLAlchemyMutationMetaUpdate):\n pass\n\n\nclass SQLAlchemyMutationMetaDelete(SQLAlchemyMutationMeta):\n\n def __new__(cls, name, bases, attrs):\n\n if not is_base_type(bases, SQLAlchemyMutationMeta):\n return type.__new__(cls, name, bases, attrs)\n\n input_class = attrs.pop('Meta', None)\n\n if not input_class or not getattr(input_class, 'model', None) or \\\n not getattr(input_class, 'field', None):\n return MutationMeta.__new__(cls, name, bases, attrs)\n\n assert is_mapped(input_class.model), \\\n ('You need valid SQLAlchemy Model in {}.Meta, received \"{}\".').format(name, input_class.model)\n\n field_name = camel_to_snake(input_class.model.__name__)\n inspected_model = sqlalchemyinspect(input_class.model)\n\n def mutate(cls, instance, args, context, info):\n session = cls.query\n arg_attrs = {}\n primary_key = {}\n for name, column in inspected_model.columns.items():\n if column.primary_key and name in args:\n try:\n klazz, pk = base64.b64decode(args['id']).split(b\":\")\n except:\n pk = args.get(name, None)\n finally:\n primary_key[name] = int(pk)\n break\n\n session.query(input_class.model).filter_by(**primary_key).delete()\n\n try:\n session.commit()\n ok = True\n message = \"ok\"\n except SQLAlchemyError as e:\n session.rollback()\n message = e.message\n ok = False\n\n kwargs = {\n 'ok': ok,\n 'message': message,\n }\n return cls(**kwargs)\n\n input_attrs = {}\n for name, column in inspected_model.columns.items():\n if column.primary_key:\n input_attrs[name] = convert_sqlalchemy_column(column)\n input_attrs[name].kwargs['required'] = True\n break\n\n mutation_attrs = {\n 'Input': type('Input', (object, ), input_attrs),\n 'ok': graphene.Boolean(),\n 'message': graphene.String(),\n 'mutate': classmethod(mutate),\n field_name: graphene.Field(input_class.field)\n }\n\n cls = MutationMeta.__new__(cls, name, bases, mutation_attrs)\n return cls\n\n\nclass SQLAlchemyMutationDelete(Mutation, metaclass=SQLAlchemyMutationMetaDelete):\n pass\n\n\n\n" }, { "alpha_fraction": 0.7019400596618652, "alphanum_fraction": 0.7019400596618652, "avg_line_length": 26, "blob_id": "db7408674222536cbcb9bc6411a5782d0029bc31", "content_id": "5a7f5017d437714cf1833d2b73cecc0515f1979c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 567, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/README.rst", "repo_name": "nkgenius/graphene-sqlalchemy-mutation", "src_encoding": "UTF-8", "text": "graphene-sqlalchemy-mutation\n============================\n\n.. image:: https://img.shields.io/pypi/v/graphene-sqlalchemy-mutation.svg\n :target: https://pypi.python.org/pypi/graphene-sqlalchemy-mutation\n :alt: Latest PyPI version\n\n.. image:: https://travis-ci.org/borntyping/cookiecutter-pypackage-minimal.png\n :target: https://travis-ci.org/borntyping/cookiecutter-pypackage-minimal\n :alt: Latest Travis CI build status\n\nMutation for graphene with sqlAlchemy\n\nUsage\n-----\n\n\nAuthors\n-------\n\n`graphene-sqlalchemy-mutation` was written by `Nikita Kryuchkov <[email protected]>`_.\n" }, { "alpha_fraction": 0.5874125957489014, "alphanum_fraction": 0.6083915829658508, "avg_line_length": 27.600000381469727, "blob_id": "ba98199e80dca8272906fb16e92a627109900451", "content_id": "6d0294389bb23d23e26ffee2d2fa15cc8d66effc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 74, "num_lines": 5, "path": "/graphene_sqlalchemy_mutation/__init__.py", "repo_name": "nkgenius/graphene-sqlalchemy-mutation", "src_encoding": "UTF-8", "text": "\"\"\"graphene-sqlalchemy-mutation - Mutation for graphene with sqlAlchemy\"\"\"\n\n__version__ = '0.1.0'\n__author__ = 'Nikita Kryuchkov <[email protected]>'\n__all__ = []\n" }, { "alpha_fraction": 0.7448979616165161, "alphanum_fraction": 0.8061224222183228, "avg_line_length": 15.333333015441895, "blob_id": "2c98da91a527f10304b95ff703de50c5fd647ed6", "content_id": "a177b94ccfabfdbfa36efbad4015b220aac1db66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 98, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/tox.ini", "repo_name": "nkgenius/graphene-sqlalchemy-mutation", "src_encoding": "UTF-8", "text": "[tox]\nenvlist=py27,py34,py35\n\n[testenv]\ncommands=py.test graphene-sqlalchemy-mutation\ndeps=pytest\n" } ]
4
glorizen/hi10enc
https://github.com/glorizen/hi10enc
7bee9df5eb57d265c54ce5a9c4f4b51dbfe63641
efa0c50f7f4c1e0dfdf6e2c2c690fa871671e876
92d1de4325edd7fd6b40cb0b7889ebb113c2043c
refs/heads/master
"2021-07-07T00:46:03.575319"
"2017-10-03T14:23:17"
"2017-10-03T14:23:17"
104,760,917
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7025291323661804, "alphanum_fraction": 0.7153753638267517, "avg_line_length": 21.432432174682617, "blob_id": "f370aace0252f0c9dfd0540f70cd4dbb5548f6e5", "content_id": "e54353b81937b9944595b08e4ef69f2964d0d7a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2491, "license_type": "no_license", "max_line_length": 86, "num_lines": 111, "path": "/app.py", "repo_name": "glorizen/hi10enc", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask import render_template\nfrom flask import send_from_directory\nfrom parsers import MediaParser\nfrom parsers import AvsParser\n\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = 'd:/temp'\n\[email protected]('/static/metronic_v5.0.2/metronic_v5.0.2/theme/dist/html/default')\ndef default():\n # return send_from_directory(app.config['UPLOAD_FOLDER'])\n return None\n\[email protected]('/static/metronic_v5.0.2/metronic_v5.0.2/theme/dist/html/demo2')\ndef demo2():\n # return send_from_directory(app.config['UPLOAD_FOLDER'])\n return None\n\[email protected]('/static/styles')\ndef styles():\n return None\n\[email protected]('/static/scripts')\ndef scripts():\n return None\n\[email protected]('/')\ndef index():\n return render_template('site_specific/index.html')\n\n\[email protected]('/encode/video')\ndef video_command():\n return 'Video Command Here.'\n\n\[email protected]('/encode/audio')\ndef audio_command():\n return 'Audio Command Here.'\n\n\[email protected]('/extract/subtitle')\ndef sub_extract_command():\n return 'Sub-Extraction Command Here.'\n\n\[email protected]('/extract/video')\ndef video_extract_command():\n return 'Video-Extraction Command Here.'\n\n\[email protected]('/extract/audio')\ndef audio_extract_commmand():\n return 'Audio-Extraction Command Here.'\n\n\[email protected]('/info/ffmpeg')\ndef ffmpeg_info():\n return 'ffmpeg info Here.'\n\n\[email protected]('/info/x264')\ndef x264_info():\n return 'X264 info here.'\n\n\[email protected]('/info/x265')\ndef x265_info():\n return 'X265 info here.'\n\n\[email protected]('/info/libopus')\ndef libopus_info():\n return 'libopus info here.'\n\n\[email protected]('/info/libfdk_aac')\ndef libfdk_info():\n return 'libfdk_aac info here.'\n\n\[email protected]('/merge/mkvmerge')\ndef mkvmerge_command():\n return 'mkvmerge command here.'\n\n\[email protected]('/ajax/metadata', methods=[\"GET\", \"POST\"])\ndef ajax_parse_metadata():\n\n xml_string = request.json['mediainfo']\n avs_string = request.json['avscript']\n\n if not xml_string:\n pass\n\n media_parser = MediaParser(xml_string)\n avs_parser = AvsParser(avs_string)\n\n data = dict()\n data['general_details'] = media_parser.get_general_details(media_parser.mediainfo)\n data['video_details'] = media_parser.get_video_details(media_parser.mediainfo)\n data['audio_details'] = media_parser.get_audio_details(media_parser.mediainfo)\n data['subtitle_details'] = media_parser.get_subtitle_details(media_parser.mediainfo)\n data['menu_details'] = media_parser.get_menu_details(media_parser.mediainfo)\n\n return jsonify(data)\n\n" }, { "alpha_fraction": 0.5041710734367371, "alphanum_fraction": 0.5052328109741211, "avg_line_length": 34.44623565673828, "blob_id": "f09dc904f4e906f9316f108a50931d26a7e80a40", "content_id": "86fe0d64f447fa9c98b6cded8fc4f87719cedb15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6593, "license_type": "no_license", "max_line_length": 96, "num_lines": 186, "path": "/parsers.py", "repo_name": "glorizen/hi10enc", "src_encoding": "UTF-8", "text": "from pymediainfo import MediaInfo\n\nclass MediaParser(object):\n\n def __init__(self, xml_string):\n self.mediainfo = MediaInfo(xml_string)\n self.metadata = self.mediainfo.to_data()\n \n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def get_general_details(self, mediainfo):\n\n general_details = list()\n for track in mediainfo.tracks:\n if 'general' in track.track_type.lower():\n track_details = dict()\n track_details['file_name'] = track.file_name\n track_details['file_extension'] = track.file_extension\n track_details['file_size'] = track.file_size\n track_details['codec'] = track.codec\n track_details['duration'] = float(track.duration)\n track_details['stream_size'] = track.stream_size\n track_details['attachments'] = track.attachments\n \n general_details.append(track_details)\n\n return general_details\n\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def get_video_details(self, mediainfo):\n\n vid_details = list()\n for track in mediainfo.tracks:\n if 'video' in track.track_type.lower():\n track_details = dict()\n track_details['_id'] = track.track_id\n track_details['codec'] = track.codec\n track_details['frame_rate_mode'] = track.frame_rate_mode\n track_details['frame_rate'] = float(track.frame_rate)\n track_details['resolution'] = (track.width, track.height)\n track_details['duration'] = float(track.duration)\n track_details['bit_rate'] = float(track.bit_rate)\n track_details['bit_depth'] = track.bit_depth\n track_details['stream_size'] = track.stream_size\n track_details['display_aspect_ratio'] = float(track.display_aspect_ratio)\n track_details['title'] = track.title\n track_details['language'] = track.language\n track_details['default'] = track.default\n track_details['forced'] = track.forced\n \n vid_details.append(track_details)\n\n return vid_details\n\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def get_audio_details(self, mediainfo):\n\n aud_details = list()\n \n for track in mediainfo.tracks:\n if 'audio' in track.track_type.lower():\n track_details = dict()\n track_details['_id'] = track.track_id\n track_details['codec'] = track.codec\n track_details['duration'] = float(track.duration)\n track_details['bit_rate'] = track.bit_rate\n track_details['channels'] = track.channel_s\n track_details['sampling_rate'] = track.sampling_rate\n track_details['stream_size'] = track.stream_size\n track_details['title'] = track.title\n track_details['language'] = track.language\n track_details['default'] = track.default\n track_details['forced'] = track.forced\n\n aud_details.append(track_details)\n\n return aud_details\n\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def get_subtitle_details(self, mediainfo):\n\n aud_details = list()\n \n for track in mediainfo.tracks:\n if 'text' in track.track_type.lower():\n track_details = dict()\n track_details['_id'] = track.track_id\n track_details['codec'] = track.codec\n track_details['duration'] = float(track.duration)\n track_details['bit_rate'] = track.bit_rate\n track_details['stream_size'] = track.stream_size\n track_details['title'] = track.title\n track_details['language'] = track.language\n track_details['default'] = track.default\n track_details['forced'] = track.forced\n\n aud_details.append(track_details)\n\n return aud_details\n\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def get_menu_details(self, mediainfo):\n\n menu_details = list()\n for track in mediainfo.tracks:\n if 'menu' in track.track_type.lower():\n menu_data = track.to_data()\n menu = list()\n for key in menu_data:\n if key.replace('_', str()).isdigit():\n menu.append((key.replace('_', ':'), menu_data[key]))\n\n menu_details.append(menu)\n\n return menu_details\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\nclass AvsParser(object):\n\n def __init__(self, avs_string):\n \n self.avs_content = [line for line in avs_string.split('\\n')\n if line and not line.startswith('#') or line.startswith('##>') \n or line.startswith('##!!')]\n\n print(self.avs_content)\n\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def parse_avs_chapters(self, avs_content):\n \n avs_chap_string = ''.join([x.strip('##!!') for x in avs_content \n if x.startswith('##!!') and '>' in x and '<' in x])\n\n if not avs_chap_string:\n return None\n\n filtered_chaps = [x.strip('>').strip('<').strip(' ').strip('\\n') \n for x in avs_chap_string.split(',')] if avs_chap_string else None\n\n avs_chapters = dict()\n avs_chapters['names'] = list(); avs_chapters['frames'] = list()\n\n for chapter in filtered_chaps:\n name = chapter.split('[')[0]\n start = int(chapter.split('[')[1].split(':')[0].strip(' '))\n end = int(chapter.split('[')[1].split(':')[1].split(']')[0].strip(' '))\n avs_chapters['names'].append(name)\n avs_chapters['frames'].append((start, end))\n\n return avs_chapters\n\n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n #<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n def get_custom_commands(self, avs_content):\n\n commands_dict = dict()\n avsfile = open(input_file)\n file_content = avsfile.readlines()\n avsfile.close()\n\n commands = ','.join([x.strip('##>') for x in avs_content if x.startswith('##>')]).split(',')\n \n for command in commands:\n if not command or len(command) < 3:\n continue\n \n option, value = command.split('=')\n commands_dict[option] = value.strip('\\r').strip('\\n')\n\n avs_chapters = parse_avs_chapters(avs_content)\n return commands_dict\n" } ]
2
sam-grant/LowDCAs
https://github.com/sam-grant/LowDCAs
92b61ff40be574d15afcdb35e84f616428ec8ed8
a71304ac87e94bb86e93fd31d2d577a5ef86124a
9d681eecaf686921e62b2358e7bc0b113d46fe35
refs/heads/master
"2020-09-29T01:42:42.305424"
"2020-01-21T05:24:58"
"2020-01-21T05:24:58"
217,665,913
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4480874240398407, "alphanum_fraction": 0.7158470153808594, "avg_line_length": 34.79999923706055, "blob_id": "6bbd108bc3da3dc2a100220b1de70a5574ae887b", "content_id": "72f9c0380e2d9536312bc671597705c440f835d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 183, "license_type": "no_license", "max_line_length": 86, "num_lines": 5, "path": "/BK/ScriptsBK/MakeFileList.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "path=/pnfs/GM2/mc/run1_gasgun_1062/runs_1567190000/1567190852\n\nfor i in {200..225}; do\n ls $path/gm2ringsim_muon_gasgun_truth_22551${i}_1567190852.1.root > Files/File${i}\ndone \n\n" }, { "alpha_fraction": 0.5017321109771729, "alphanum_fraction": 0.7650115489959717, "avg_line_length": 74.3043441772461, "blob_id": "3653aea066ac85f70e1bad96b29c03343f4573a4", "content_id": "d601fa388f42de943d750c8d1298931d06502a97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1732, "license_type": "no_license", "max_line_length": 92, "num_lines": 23, "path": "/BK/RunSimBK/MakeFileList.sh~", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "path=/pnfs/GM2/mc/run1_gasgun_1062/runs_1567190000/1567190852\n\nfor i in {200..225}; do\n# lr $path/gm2ringsim_muon_gasgun_truth_22551${i}_1567190852.1.root\n ls $path/gm2ringsim_muon_gasgun_truth_22551${i}_1567190852.1.root > Files/File${i}\ndone \n\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{200..225}_1567190852.1.root > Files/FileList0\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{225..250}_1567190852.1.root > Files/FileList1\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{250..275}_1567190852.1.root > Files/FileList2\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{275..300}_1567190852.1.root > Files/FileList3\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{300..325}_1567190852.1.root > Files/FileList4\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{325..350}_1567190852.1.root > Files/FileList5\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{350..375}_1567190852.1.root > Files/FileList6\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{375..400}_1567190852.1.root > Files/FileList7\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{400..425}_1567190852.1.root > Files/FileList8\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{425..450}_1567190852.1.root > Files/FileList9\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{450..475}_1567190852.1.root > Files/FileList10\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{475..500}_1567190852.1.root > Files/FileList11\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{500..525}_1567190852.1.root > Files/FileList12\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{525..550}_1567190852.1.root > Files/FileList13\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{550..575}_1567190852.1.root > Files/FileList14\n# ls $path/gm2ringsim_muon_gasgun_truth_22551{575..600}_1567190852.1.root > Files/FileList15\n" }, { "alpha_fraction": 0.7032192945480347, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 21.613636016845703, "blob_id": "9a38de13583bf3ae8ceca3ee609f8796ab7e015f", "content_id": "b29c2a2f5e9d3605090811a5b650edcc2c283a4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 994, "license_type": "no_license", "max_line_length": 82, "num_lines": 44, "path": "/RunSim/RunJob.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "# Define output path\n#path=/gm2/app/users/sgrant/Offline/LowDCAs/JobOutput/\n\n#returnPath=/gm2/app/users/sgrant/Offline/LowDCAs/\n\n# If STOP file is created, pause job\nif [ -f STOP ]; then \n echo \"STOP file found. Not processing\"\n return\n exit\nfi\n\n# Label piped input file\ninFile=$1\necho $inFile\n\n# Check if it's output already exists\n#if [ -d $inFile ]; then\n# echo \"$inFile already exists...\"\n# return\n# exit\n#fi\n\n# If not, create a new dir for output, copy input file into it\n# mkdir $inFile\ncd $inFile\ncp ../Files/$inFile .\n\n# Get the list of root files\nfilesToRun=\"\"\nfor line in `cat $inFile`; do\n filesToRun=$filesToRun\" \"$line\ndone\n\n# Run sim\necho $filesToRun\ngm2 -c /gm2/app/users/sgrant/Offline/LowDCAs/fcl/RunSimScan.fcl -s $filesToRun\n#gm2 -c /gm2/app/users/sgrant/Offline/LowDCAs/FCL/RunSimLowDCAs500.fcl -S $inFile\n# -T TrackCaloMatchingSim_${inFile}.root\n# Return to run dir\ncd ../\n\n# Run sim\n# gm2 -c /gm2/app/users/sgrant/Offline/LowDCAs/FCL/RunSimLowDCAs500.fcl -S $inFile" }, { "alpha_fraction": 0.6475095748901367, "alphanum_fraction": 0.6781609058380127, "avg_line_length": 31.75, "blob_id": "4259726a3e9c3659a61ebb2a10ecef1c8c083615", "content_id": "001ff7c69d6cc3419e079c62d10dba3199b76f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 261, "license_type": "no_license", "max_line_length": 98, "num_lines": 8, "path": "/RunSimLongPlots/RunSimPlots.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "for i in {200..225}; do\n# mkdir Plots${i}\n cd Plots${i}\n gm2 -c ../../fcl/RunPlotsSimLongScan.fcl -s ../../RunLongSim/File${i}/gm2tracker_sim_scan.root\n cd ../\ndone\n\nhadd -f LowDCAs_SimScanLongPlots_HitLevel.root Plots*/LowDCAs_SimLongScanPlots.root" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6925926208496094, "avg_line_length": 32.875, "blob_id": "f4bd26c8ff516520d32477af3d4867999c1f4a39", "content_id": "cc50656531e0fc81fb023d4f52eeb230be9b1cc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 270, "license_type": "no_license", "max_line_length": 112, "num_lines": 8, "path": "/BK/RunSimPlotsNoDrift/RunSimPlots.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "for i in {200..225}; do\n mkdir Plots${i}\n cd Plots${i}\n gm2 -c ../../FCL/RunPlotsSimLowDCAs500.fcl -s ../../RunSimNoDrift/File${i}/gm2tracker_particle_gun_full.root\n cd ../\ndone\n\nhadd -f Plots/LowDCAsSimPlots500All_NoDrift.root Plots*/LowDCAsSimPlots500.root" }, { "alpha_fraction": 0.573913037776947, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 17.91666603088379, "blob_id": "3f54b342da35067b85df8218c08f587ce874c67c", "content_id": "6d8fb6a2d8262fd12e11ef1acc59bb33e263ed13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 230, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/BK/RunSimOld/Merge.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "prefix=\"FileList\"\nfile=\"gm2tracker_particle_gun_full.root\"\n\nfor i in {0..15} ; do\n # if test -f \"$prefix$i/$file\"; then\n cd FileList$i\n \t\n\thadd -f \"../gm2tracker_particle_gun_full_200.root\" $file\n\n\tcd ../\n# fi\ndone\n\n\n\n" }, { "alpha_fraction": 0.629482090473175, "alphanum_fraction": 0.6613546013832092, "avg_line_length": 30.5, "blob_id": "ccd3f2a2db92e958b37a124c12a0f3bef3473cee", "content_id": "b9cdaaedd99f1765484dd0eb14082586337ba77b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 251, "license_type": "no_license", "max_line_length": 90, "num_lines": 8, "path": "/RunSimPlots/RunSimPlots.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "for i in {200..225}; do\n# mkdir Plots${i}\n cd Plots${i}\n gm2 -c ../../fcl/RunPlotsSimScan.fcl -s ../../RunSim/File${i}/gm2tracker_sim_scan.root\n cd ../\ndone\n\n#hadd -f LowDCAs_SimScanPlotsFull_Ambiguous.root Plots*/LowDCAs_SimScanPlots.root" }, { "alpha_fraction": 0.6179378628730774, "alphanum_fraction": 0.6733757257461548, "avg_line_length": 30.82022476196289, "blob_id": "0213e2055ee82bec2d7354d0adb7a45c9c4416b5", "content_id": "cc3e145d999df5a5fc9b5682abfb7d78373bf33b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2832, "license_type": "no_license", "max_line_length": 153, "num_lines": 89, "path": "/Macros/MainPlotter.py", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "from ROOT import TFile, TCanvas, TH1F, TH1D, TLegend, TAxis, TAttMarker, TGraph, TGraphErrors\nfrom ROOT import gROOT\nfrom array import array\n\n\ndef DrawScat(hists, DCAs, flag, title, fname):\n\n\tc = TCanvas(\"c2\",\"\",800,600)\n\n\t# Normal arrays don't work for whatever reason, must be a ROOT thing\n\tx, y, ex, ey = array('d'), array('d'), array('d'), array('d')\n\tn = len(hists);\n\n\tfor i in range(0,n):\n\n\t\tif (flag == 0): \n\t\t\tprint(str(DCAs[i])+\" * \"+str(hists[i].GetMean()))\n\t\t\tx.append(DCAs[i])\n\t\t\tex.append(0)\n\t\t\ty.append(hists[i].GetMean())\n\t\t\tey.append(hists[i].GetMeanError())\n\t\telse: \n\t\t\tprint(str(DCAs[i])+\" * \"+str(hists[i].GetEntries()))\n\t\t\tx.append(DCAs[i])\n\t\t\tex.append(0)\n\t\t\ty.append(hists[i].GetEntries())\n\t\t\tey.append(0)\n\n\n\n\tscat = TGraphErrors(n,x,y,ex,ey)\n\tscat.SetTitle(title)\n\tscat.GetXaxis().SetTitleSize(.04)\n\tscat.GetYaxis().SetTitleSize(.04)\n\tscat.GetXaxis().SetTitleOffset(1.1)\n\tscat.GetYaxis().SetTitleOffset(1.25)\n\tscat.GetXaxis().CenterTitle(1)\n\tscat.GetYaxis().CenterTitle(1)\n\t# scat.GetYaxis().SetRangeUser(0.086,0.106)\n\tscat.GetXaxis().SetRangeUser(-5,505)\n\tscat.GetYaxis().SetMaxDigits(4)\n\t#scat.SetMarkerSize(3)\n\t#scat.SetLineWidth(3)\n\tscat.SetMarkerStyle(20) # Full circle\n\t#scat.SetMarkerColor(4)\n\t#scat.SetLineColor(4)\n\tscat.Draw(\"AP\")\n\tc.SaveAs(fname)\n\n\nfData = TFile.Open(\"~/Documents/gm2/LowDCAs/ROOT/LowDCAsPlots500-25-11-19.root\")\n# fData = TFile.Open(\"~/Documents/gm2/LowDCAs/ROOT/LowDCAsPlots500.root\")\nfSim = TFile.Open(\"~/Documents/gm2/LowDCAs/ROOT/LowDCAsSimPlots500All-25-11-19.root\")\n\nDCAsArray = [0,25,50,75,100,125,150,175,200,\n\t\t\t 225,250,275,300,325,350,375,400,\n\t\t\t 425,450,475,500]\n\nhistType = [\"pValues\",\"ChiSqrDof\",\"Run\"]\nnameType = [\"p-value\",\"#chi^{2}/ndf\",\"Number of tracks\"]\n\nfor itype in range(0,len(histType)):\n\n\t# Put histograms in an array\n\thistsArrayData = []\n\thistsArraySim = []\n\n\tfor ihist in range(0,len(DCAsArray)):\n\t\t# print(\"Tracks\",ihist)\n\t\thistsArrayData.append(fData.Get(\"plots\"+str(ihist)+\"/\"+histType[itype]))\n\t\thistsArraySim.append(fSim.Get(\"plots\"+str(ihist)+\"/\"+histType[itype]))\n\t\n\ttypeFlag = 0\n\tif (itype > 1): typeFlag = 1\n\n\tmean = \"Mean \"\n\tif (typeFlag == 1): mean = \"\" \n\t\n\tprint(\"\\n********** DATA **********\\n\")\n\tprint(\"Threshold [um] * \"+histType[itype])\n\tDrawScat(histsArrayData, DCAsArray, typeFlag, \";Low DCA threshold [#mum];\"+mean+nameType[itype],\"../Plots-25-11-19/\"+histType[itype]+\"Scat500_DATA.pdf\")\n\tprint(\"\\n********** SIM **********\\n\")\n\tprint(\"Threshold [um] * \"+histType[itype])\n\tDrawScat(histsArraySim, DCAsArray, typeFlag, \";Low DCA threshold [#mum];\"+mean+nameType[itype],\"../Plots-25-11-19/\"+histType[itype]+\"Scat500_SIM.pdf\")\n\n\n\n# draw1D(histsArrayCut, DCAsArray, \";p-value;Tracks / 0.005\",\"../Plots/pValues1DExtreme.pdf\")\n# drawScat(histsArrayCut, DCAsArray, \";Low DCA Threshold [#mum];Mean p-value\",\"../Plots/pValuesScatExtreme.pdf\")\n" }, { "alpha_fraction": 0.6633333563804626, "alphanum_fraction": 0.6899999976158142, "avg_line_length": 14.050000190734863, "blob_id": "90ff44696572de5c124acef8303a353b2a98d1f4", "content_id": "469d0392d644993b748451b60cce1f14ddf27cac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 300, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/RunLongSim/Rename.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "prefix=\"FileList\"\nfile=\"gm2tracker_particle_gun_full.root\"\n\nfor i in {0..15}; do\n\n\techo $prefix$i\n\n\techo $file\n\n\techo gm2tracker_particle_gun_full_${i}.root\n\n\tcd $prefix$i\n\n\tmv $file gm2tracker_particle_gun_full_${i}.root\n\n\t#ls gm2tracker_particle_gun_full_${i}.root > ../FileList4HADD\n\n\tcd ../\n\ndone" }, { "alpha_fraction": 0.6201378703117371, "alphanum_fraction": 0.672575056552887, "avg_line_length": 27.815603256225586, "blob_id": "f23ff1c2ca3404329ae9580c3016df915ee9a873", "content_id": "68babe6ee1858d14424e22b04c2728fe8da7a288", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4062, "license_type": "no_license", "max_line_length": 155, "num_lines": 141, "path": "/Macros/TruthLongPlotter.py", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "# Plotter for truth LR information\n# !!! - Fraction of tracks with wrong LR guess !!!\n# - Fraction of tracks with p-values below 5%\n# - Reduced chi^2\n\n\nfrom ROOT import TFile, TCanvas, TH1F, TH1D, TLegend, TAxis, TAttMarker, TGraph, TGraphErrors\nfrom ROOT import gROOT\nfrom array import array\n\n# # Return the number of tracks with a p-value less than 5%\n# def pValFrac(hist):\n\n# \t# Loop over the bins\n# \t# Get bin values\n# \t# Sum them\n# \t# Break when x-value is 5%\n# \t# Divide by total entries\n\n# \tbinVal = 0\n# \tfor bin in range(0, hist.GetNbinsX()):\n# \t\txVal = hist.GetBinCenter(bin+1)\n# \t\t# print(xVal)\n# \t\t# binVal = hist.GetBinContent(bin+1) + binVal\n# \t\tif(xVal < 0.05):\n# \t\t\tbinVal = hist.GetBinContent(bin+1) + binVal\n# \t\t\tfrac = binVal / hist.GetEntries()\n\n# \t\t\treturn frac\n\n# \t\t\tdef wrongHitsFrac(allHist, wrongHist):\n\n# \t\t\t\treturn wrongHist.GetEntries() / allHist.GetEntries()\n\ndef Frac(all_, rightOrWrong_):\n\n\tfrac = []\n\n\tfor i in range(0,len(all_)):\n\n\t\tfrac.append(rightOrWrong_[i].GetEntries() / all_[i].GetEntries())\n\n\t\tprint(str(rightOrWrong_[i].GetEntries())+\" * \"+str(all_[i].GetEntries()))\n\n\treturn frac\n\ndef DrawScat(y_, DCAs_, title, fname):\n\n\tc = TCanvas(\"c2\",\"\",800,600)\n\n\t# Normal arrays don't work for whatever reason, must be a ROOT thing\n\tx, y, ex, ey = array('d'), array('d'), array('d'), array('d')\n\n\tn = len(y_)\n\n\t# if(n != len(wrongHist)):\n\t\t# print(\"*** Error, hist arrays different length ***\")\n\t\t# return\n\n\tfor i in range(0,n):\n\n#\t\tfrac = wrong_[i].GetEntries() / all_[i].GetEntries()\n\t\tx.append(DCAs_[i])\n\t\tex.append(0)\n\t\ty.append(y_[i])\n\t\tey.append(0)\n\n\t\t# print(str(DCAs_[i])+\" * \"+str(y_)+\" * \"+str(wrong_[i].GetEntries())+\" * \"+str(all_[i].GetEntries()))\n\n\tscat = TGraphErrors(n,x,y,ex,ey)\n\tscat.SetTitle(title)\n\t\t\t\n\tscat.GetXaxis().SetTitleSize(.04)\n\tscat.GetYaxis().SetTitleSize(.04)\n\tscat.GetXaxis().SetTitleOffset(1.1)\n\tscat.GetYaxis().SetTitleOffset(1.25)\n\tscat.GetXaxis().CenterTitle(1)\n\tscat.GetYaxis().CenterTitle(1)\n\t# scat.GetYaxis().SetRangeUser(0.086,0.106)\n\tscat.GetXaxis().SetRangeUser(-5,2500)\n\tscat.GetYaxis().SetMaxDigits(4)\n\t#scat.SetMarkerSize(3)\n\t#scat.SetLineWidth(3)\n\tscat.SetMarkerStyle(20) # Full circle\n\t#scat.SetMarkerColor(4)\n\t#scat.SetLineColor(4)\n\tscat.Draw(\"AP\")\n\tc.SaveAs(fname)\n\n\treturn\n\n\nfile = TFile.Open(\"~/Documents/gm2/LowDCAs/plots/LowDCAs_SimScanLongPlotsFull.root\")\n\n# Real dumb bit of logic here\n#DCAsArray_ = [0,25,50,75,100,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500]\n#DCAsArray_ = [0,100,200,300,400,500,600,125,150,175,200,225,250,275,300,325,350,375,400,425,450,475,500]\nDCAsArray_ = list(range(0,2400,100))\n# \n# histType = [\"Run\",\"pValues\"]\n# nameType = [\"Fraction of wrong tracks\",\"Fraction of tracks with p-value < 5%\"]\n\n# Loop over histogram types\n\n\n\nallHits_ = []\nwrongHits_ = []\nrightHits_ = []\n# fracHits = []\n\n# Loop over DCA scan\nfor ihist in range(0,len(DCAsArray_)):\n\tprint(\"Tracks\",ihist)\n\n\tallHits_.append(file.Get(\"plots\"+str(ihist)+\"/AllHits/Run\"))\n\twrongHits_.append(file.Get(\"plots\"+str(ihist)+\"/WrongHits/Run\"))\n\trightHits_.append(file.Get(\"plots\"+str(ihist)+\"/RightHits/Run\"))\t\n\n\t\t# print(DCAsArray[ihist],pValFrac(allHits[ihist]))\n\n\t# print(\"len(allHits) \"+str(len(allHits)))\n\t# print(\"allHits[0].GetMean() \"+str(allHits[0].GetMean()))\n\n\n\t#typeFlag = 0\n\t#if (itype > 1): typeFlag = 1\n\n\t#mean = \"Mean \"\n\t#if (typeFlag == 1): mean = \"\" \n\n\t\n\t#print(\"Threshold [um] * \"+histType[itype])\n\t# DrawScat(histsArrayData, DCAsArray, typeFlag, \";Low DCA threshold [#mum];\"+mean+nameType[itype],\"../Plots-25-11-19/\"+histType[itype]+\"Scat500_DATA.pdf\")\n\nDrawScat(Frac(allHits_, wrongHits_), DCAsArray_, \";Low DCA threshold [#mum];Fraction of tracks with wrong LR\", \"Plots/FracWrongTracksLong.png\")\nDrawScat(Frac(allHits_, rightHits_), DCAsArray_, \";Low DCA threshold [#mum];Fraction of tracks with right LR\", \"Plots/FracRightTracksLong.png\")\n\n\n# draw1D(histsArrayCut, DCAsArray, \";p-value;Tracks / 0.005\",\"../Plots/pValues1DExtreme.pdf\")\n# drawScat(histsArrayCut, DCAsArray, \";Low DCA Threshold [#mum];Mean p-value\",\"../Plots/pValuesScatExtreme.pdf\")" }, { "alpha_fraction": 0.5149105191230774, "alphanum_fraction": 0.7905898094177246, "avg_line_length": 82.83333587646484, "blob_id": "e90e1fde795900f6cfd89c6ea08a39155a53f91b", "content_id": "816cec681d6899f608f7aba7b022d95c943d365f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1509, "license_type": "no_license", "max_line_length": 90, "num_lines": 18, "path": "/BK/RunSimOld/MakeFileList.sh", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "path=/pnfs/GM2/mc/run1_gasgun_1062/runs_1567190000/1567190852\n\nls $path/gm2ringsim_muon_gasgun_truth_22551{200..225}_1567190852.1.root > Files/FileList0\nls $path/gm2ringsim_muon_gasgun_truth_22551{225..250}_1567190852.1.root > Files/FileList1\nls $path/gm2ringsim_muon_gasgun_truth_22551{250..275}_1567190852.1.root > Files/FileList2\nls $path/gm2ringsim_muon_gasgun_truth_22551{275..300}_1567190852.1.root > Files/FileList3\nls $path/gm2ringsim_muon_gasgun_truth_22551{300..325}_1567190852.1.root > Files/FileList4\nls $path/gm2ringsim_muon_gasgun_truth_22551{325..350}_1567190852.1.root > Files/FileList5\nls $path/gm2ringsim_muon_gasgun_truth_22551{350..375}_1567190852.1.root > Files/FileList6\nls $path/gm2ringsim_muon_gasgun_truth_22551{375..400}_1567190852.1.root > Files/FileList7\nls $path/gm2ringsim_muon_gasgun_truth_22551{400..425}_1567190852.1.root > Files/FileList8\nls $path/gm2ringsim_muon_gasgun_truth_22551{425..450}_1567190852.1.root > Files/FileList9\nls $path/gm2ringsim_muon_gasgun_truth_22551{450..475}_1567190852.1.root > Files/FileList10\nls $path/gm2ringsim_muon_gasgun_truth_22551{475..500}_1567190852.1.root > Files/FileList11\nls $path/gm2ringsim_muon_gasgun_truth_22551{500..525}_1567190852.1.root > Files/FileList12\nls $path/gm2ringsim_muon_gasgun_truth_22551{525..550}_1567190852.1.root > Files/FileList13\nls $path/gm2ringsim_muon_gasgun_truth_22551{550..575}_1567190852.1.root > Files/FileList14\nls $path/gm2ringsim_muon_gasgun_truth_22551{575..600}_1567190852.1.root > Files/FileList15\n" }, { "alpha_fraction": 0.6459790468215942, "alphanum_fraction": 0.6704545617103577, "avg_line_length": 26.031496047973633, "blob_id": "6e9f57f30cc6345bd5ae5b3d8dbc0a5fd07a5e4a", "content_id": "05cbd2c8345974de9bc618babb2bea22b5873526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3432, "license_type": "no_license", "max_line_length": 155, "num_lines": 127, "path": "/Macros/TruthPlotter.py", "repo_name": "sam-grant/LowDCAs", "src_encoding": "UTF-8", "text": "# Plotter for truth LR information\n# !!! - Fraction of tracks with wrong LR guess !!!\n# - Fraction of tracks with p-values below 5%\n# - Reduced chi^2\n\n\nfrom ROOT import TFile, TCanvas, TH1F, TH1D, TLegend, TAxis, TAttMarker, TGraph, TGraphErrors\nfrom ROOT import gROOT\nfrom array import array\n\n# # Return the number of tracks with a p-value less than 5%\n# def pValFrac(hist):\n\n# \t# Loop over the bins\n# \t# Get bin values\n# \t# Sum them\n# \t# Break when x-value is 5%\n# \t# Divide by total entries\n\n# \tbinVal = 0\n# \tfor bin in range(0, hist.GetNbinsX()):\n# \t\txVal = hist.GetBinCenter(bin+1)\n# \t\t# print(xVal)\n# \t\t# binVal = hist.GetBinContent(bin+1) + binVal\n# \t\tif(xVal < 0.05):\n# \t\t\tbinVal = hist.GetBinContent(bin+1) + binVal\n# \t\t\tfrac = binVal / hist.GetEntries()\n\n# \t\t\treturn frac\n\n# \t\t\tdef wrongHitsFrac(allHist, wrongHist):\n\n# \t\t\t\treturn wrongHist.GetEntries() / allHist.GetEntries()\n\ndef DrawScat(all_, wrong_, DCAs_, title, fname):\n\n\tc = TCanvas(\"c2\",\"\",800,600)\n\n\t# Normal arrays don't work for whatever reason, must be a ROOT thing\n\tx, y, ex, ey = array('d'), array('d'), array('d'), array('d')\n\n\tn = len(all_)\n\n\t# if(n != len(wrongHist)):\n\t\t# print(\"*** Error, hist arrays different length ***\")\n\t\t# return\n\n\n\n\tfor i in range(0,n):\n\n\t\tfrac = wrong_[i].GetEntries() / all_[i].GetEntries()\n\t\tx.append(DCAs_[i])\n\t\tex.append(0)\n\t\ty.append(frac)\n\t\tey.append(0)\n\n\t\tprint(str(DCAs_[i])+\" * \"+str(frac)+\" * \"+str(wrong_[i].GetEntries())+\" * \"+str(all_[i].GetEntries()))\n\n\tscat = TGraphErrors(n,x,y,ex,ey)\n\tscat.SetTitle(title)\n\t\t\t\n\tscat.GetXaxis().SetTitleSize(.04)\n\tscat.GetYaxis().SetTitleSize(.04)\n\tscat.GetXaxis().SetTitleOffset(1.1)\n\tscat.GetYaxis().SetTitleOffset(1.25)\n\tscat.GetXaxis().CenterTitle(1)\n\tscat.GetYaxis().CenterTitle(1)\n\t# scat.GetYaxis().SetRangeUser(0.086,0.106)\n\tscat.GetXaxis().SetRangeUser(-5,505)\n\tscat.GetYaxis().SetMaxDigits(4)\n\t#scat.SetMarkerSize(3)\n\t#scat.SetLineWidth(3)\n\tscat.SetMarkerStyle(20) # Full circle\n\t#scat.SetMarkerColor(4)\n\t#scat.SetLineColor(4)\n\tscat.Draw(\"AP\")\n\tc.SaveAs(fname)\n\n\treturn\n\n\nfile = TFile.Open(\"~/Documents/gm2/LowDCAs/plots/LowDCAs_SimScanPlotsFull.root\")\n\n\nDCAsArray_ = list(range(0,525,25))\n# \n# histType = [\"Run\",\"pValues\"]\n# nameType = [\"Fraction of wrong tracks\",\"Fraction of tracks with p-value < 5%\"]\n\n# Loop over histogram types\n\n\n# Put histograms in an array, or a python list (whatever this is)\nallHits_ = []\nwrongHits_ = []\n# fracHits = []\n\n# Loop over DCA scan\nfor ihist in range(0,len(DCAsArray_)):\n\t# print(\"Tracks\",ihist)\n\n\tallHits_.append(file.Get(\"plots\"+str(ihist)+\"/AllHits/Run\"))\n\twrongHits_.append(file.Get(\"plots\"+str(ihist)+\"/WrongHits/Run\"))\n\n\t\t# print(DCAsArray[ihist],pValFrac(allHits[ihist]))\n\n\t# print(\"len(allHits) \"+str(len(allHits)))\n\t# print(\"allHits[0].GetMean() \"+str(allHits[0].GetMean()))\n\n\n\t#typeFlag = 0\n\t#if (itype > 1): typeFlag = 1\n\n\t#mean = \"Mean \"\n\t#if (typeFlag == 1): mean = \"\" \n\n\t\n\t#print(\"Threshold [um] * \"+histType[itype])\n\t# DrawScat(histsArrayData, DCAsArray, typeFlag, \";Low DCA threshold [#mum];\"+mean+nameType[itype],\"../Plots-25-11-19/\"+histType[itype]+\"Scat500_DATA.pdf\")\n\nDrawScat(allHits_, wrongHits_, DCAsArray_, \";Low DCA threshold [#mum];Fraction of tracks with wrong LR\",\"Plots/FracWrongTracks.png\")\n\n\n\n# draw1D(histsArrayCut, DCAsArray, \";p-value;Tracks / 0.005\",\"../Plots/pValues1DExtreme.pdf\")\n# drawScat(histsArrayCut, DCAsArray, \";Low DCA Threshold [#mum];Mean p-value\",\"../Plots/pValuesScatExtreme.pdf\")" } ]
12
adbedada/aia
https://github.com/adbedada/aia
7aff77a672a43d8208744358ea3f71ab5feb7da4
2d14c720c2cecd952ad0d602aa0cf640d4f78077
028608323dcb8c3c8ce2d993845db6b8a0811f6e
refs/heads/master
"2022-05-27T19:50:52.920996"
"2019-11-12T21:20:59"
"2019-11-12T21:20:59"
220,318,077
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.3882244825363159, "alphanum_fraction": 0.39926403760910034, "avg_line_length": 34.064517974853516, "blob_id": "bd137d3d44782a57c72318f7b5cd75374a682444", "content_id": "6c5a9575b2027af93606e2b4393e1d4136a483c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 107, "num_lines": 31, "path": "/sagemaker/README.md", "repo_name": "adbedada/aia", "src_encoding": "UTF-8", "text": "# Yolo on Sagemaker\n\nThe Dockerfile is used to create a YOLO-ECR file that saves the `darknet` github repository and compile it.\n \n* Save Yolo setting files in the `cfg` folder\n \n - your.data \n - your.cfg\n - your.names \n \n* Your Dataset should be stored in the `data` folder in the following structure.\n \n data \n |---> custom\n |---> object\n |---> train\n |--->images\n |---> image_001.jpg\n |---> image_002.jpg\n ...\n |--->labels\n |---> label_001.txt\n |---> label_002.txt\n ...\n |---> test\n ...\n |----> train.txt\n |-----> test.txt\n \nInside the `custom` folder, you should also save the `train.txt` and `test.txt` files\npointing to the image and label files inside the `object` folder.\n" }, { "alpha_fraction": 0.5711110830307007, "alphanum_fraction": 0.5866666436195374, "avg_line_length": 18.565217971801758, "blob_id": "9034ff6813d3fc919d2418774287673fd150bbd2", "content_id": "60c2134f33188b0b9cf9d81365dc259ed78e00f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 450, "license_type": "no_license", "max_line_length": 41, "num_lines": 23, "path": "/sagemaker/Dockerfile", "repo_name": "adbedada/aia", "src_encoding": "UTF-8", "text": "FROM nvcr.io/nvidia/pytorch:19.09-py3\n\nRUN apt-get update && yes|apt-get upgrade\nRUN apt-get install -y wget \\\n curl \\\n bzip2 \\\n nano \\\n sudo \\\n apt-utils \\\n dpkg \\\n gnupg2 \\\n\tnginx \\\n software-properties-common\n\n\nENV PATH=\"/opt/ml/code:${PATH}\"\nWORKDIR /opt/ml/code\n\nRUN mkdir /opt/ml/code/yolo\nCOPY yolo/. /opt/ml/code/yolo/.\nRUN cd /opt/ml/code/yolo/darknet && \\\n\tsudo make && \\\n\t./darknet\n" }, { "alpha_fraction": 0.5820642709732056, "alphanum_fraction": 0.5930625796318054, "avg_line_length": 27.16666603088379, "blob_id": "316da979d8e4daab8f1c324f062410284431d0e3", "content_id": "6de7d9df9b3e4ffd50c5350635a8cfaa23faa48d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1182, "license_type": "no_license", "max_line_length": 99, "num_lines": 42, "path": "/sagemaker/yolo/train", "repo_name": "adbedada/aia", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# test yolo\n#based on\n\"\"\"\nhttps://github.com/awslabs/amazon-sagemaker-examples/blob/\nmaster/advanced_functionality/tensorflow_bring_your_own/container/cifar10/train\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nimport traceback\nimport tqdm\n\ndef _run(cmd):\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ)\n # stdout, stderr = process.communicate()\n # line = process.stdout.readline()\n # print(line)\n return_code = process.poll()\n if return_code:\n error_msg = 'Return Code: {}, CMD: {}, Err: {}'.format(return_code, cmd, stderr)\n raise Exception(error_msg)\n\nif __name__ == '__main__':\n try:\n train_cmd = ['./darknet/darknet',\n 'detector',\n 'train',\n 'cfg/boat.data',\n 'cfg/boat.cfg',\n 'cfg/darknet19_448.conv.23']\n\n _run(train_cmd)\n\n sys.exit(0)\n except Exception as e:\n trc = traceback.format_exc()\n print('Exception during training: ' + str(e) + '\\n' + trc, file=sys.stderr)\n sys.exit(255)\n\n #./darknet/darknet detector test cfg/ships" } ]
3
stunum/python_exercise
https://github.com/stunum/python_exercise
1aed06992d62bcec96d90a060b983ea66afe3d77
c458ceb8dc45607f19ef2a8f7ef3b879bdc26c97
701ab04e84cb5ca43be9ab851efdbd6c28ea3773
refs/heads/master
"2020-03-22T12:53:55.097622"
"2018-07-24T12:23:05"
"2018-07-24T12:23:05"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5248161554336548, "alphanum_fraction": 0.5441176295280457, "avg_line_length": 25.536584854125977, "blob_id": "106125da0637bee543efd85f0fbff37bb131fa7f", "content_id": "68c7b93f2108b5c3a1d07d20c7365e70b033b8ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1620, "license_type": "no_license", "max_line_length": 62, "num_lines": 41, "path": "/QCscore.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "'''\n老师想知道从某某同学当中,分数最高的是多少,现在请你编程模拟老师的询问。\n当然,老师有时候需要更新某位同学的成绩.\n\n输入包括多组测试数据。\n每组输入第一行是两个正整数N和M(0 < N <= 30000,0 < M < 5000),分别代表学生的数目和操作的数目。\n学生ID编号从1编到N。\n第二行包含N个整数,代表这N个学生的初始成绩,其中第i个数代表ID为i的学生的成绩\n接下来又M行,每一行有一个字符C(只取‘Q’或‘U’),和两个正整数A,B,当C为'Q'的时候, 表示这是一条询问操作,\n他询问ID从A到B(包括A,B)的学生当中,成绩最高的是多少\n当C为‘U’的时候,表示这是一条更新操作,要求把ID为A的学生的成绩更改为B。\n'''\n\ndef qcscore():\n N, M = input(\"请输入第一组数据:\").split(\" \")\n stuScore = []\n stuScore.append(input(\"请输入学生成绩:\").split(\" \"))\n stuDic = {}\n for i in range(0, int(N)):\n stuDic[i] = int(stuScore[0][i])\n qclist = []\n for n in range(0, int(M)):\n qclist.append(input(\"请输入操作:\").split(' '))\n Anum = int(qclist[n][1])\n Bnum = int(qclist[n][2])\n bigger = stuDic[Anum]\n if qclist[n][0] == \"Q\":\n while True:\n if Anum == Bnum:\n break\n if stuDic[Anum - 1] < stuDic[Anum]:\n bigger = stuDic[Anum]\n Anum += 1\n\n print(bigger)\n else:\n stuDic[Anum - 1]= Bnum\n\n\nif __name__ == \"__main__\":\n qcscore()\n" }, { "alpha_fraction": 0.5448275804519653, "alphanum_fraction": 0.5586206912994385, "avg_line_length": 21.947368621826172, "blob_id": "f3ed4a83353beb42ac0ead5110c695128cede8eb", "content_id": "79809db818c52e742d2a12fe0c3693aaddc78ae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 62, "num_lines": 19, "path": "/countBitDiff.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "\"\"\"\n题目:世界上有10种人,一种懂二进制,一种不懂。那么你知道两个int32整数m和n的二进制表达,有多少个位(bit)不同么?\n\"\"\"\ndef countBitDiff():\n x = bin(int(input(\"第一个数:\")))\n y = bin(int(input(\"第二个数:\")))\n strx=str(x)\n stry=str(y)\n num=abs(len(strx)-len(stry))\n lennum=len(strx)\n if len(strx)>len(stry):\n lennum =len(stry)\n for i in range(0,lennum):\n if strx[i]!=stry[i]:\n num +=1\n print(num)\n\nif __name__==\"__main__\":\n countBitDiff()" }, { "alpha_fraction": 0.527220606803894, "alphanum_fraction": 0.5816618800163269, "avg_line_length": 37.77777862548828, "blob_id": "627d30ed6a42bd38643eecdbb614dccda8904199", "content_id": "8b381ae349fb79e74e476d328737b74c6a532e50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 63, "num_lines": 9, "path": "/BubbleSort.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "testlist = [4, 5, 7, 2, 1, 15, 8, 22, 11, 9, 3]\n\nlistlen = len(testlist) # 提前计算待排list的长度\nprint('排序前:', testlist)\nfor i in range(listlen): # [0,1,2,.....]\n for j in range(i + 1, listlen): # i+1从第二个开始比较\n if testlist[i] > testlist[j]: # switch 小数往前排\n testlist[i], testlist[j] = testlist[j], testlist[i]\nprint('排完后:', testlist)\n" }, { "alpha_fraction": 0.418552041053772, "alphanum_fraction": 0.5067873597145081, "avg_line_length": 23.55555534362793, "blob_id": "7dcec01a412b0eb399f8b0d45a1cf537b60c963d", "content_id": "548fa2cb870bcc1edfbaa7ffd7d795d20d32140a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/rotateMatrix.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "'''\n给定一个NxN的矩阵,和矩阵的阶数N,请返回旋转后的NxN矩阵.\n\n例子:\n 输入:[[1,2,3],[4,5,6],[7,8,9]],3\n 返回:[[7,4,1],[8,5,2],[9,6,3]]\n'''\n\nimport numpy as np\ndef rotateMatrix(matrix, n):\n newmatrix = np.empty_like(matrix)\n for i in range(0, n):\n for j in range(0, n):\n newmatrix[i][j] = matrix[-(j + 1)][i]\n print(newmatrix)\nif __name__ == \"__main__\":\n matr = [[1, 2, 3,4], [4, 9, 6,7], [6,7, 8, 9],[2,3,6,4]]\n rotateMatrix(matr, 4)\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8235294222831726, "avg_line_length": 15.5, "blob_id": "612e24e8e923d5e27f908270d7a9c17a510e888a", "content_id": "ae94d639dba3b3a06b870dbdd92cfa554694a41f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "# python_exercise\n日常做的python的练习题\n\n" }, { "alpha_fraction": 0.3726707994937897, "alphanum_fraction": 0.47826087474823, "avg_line_length": 21, "blob_id": "83dfefd399acabb1fb197e17f3c348e144dc1863", "content_id": "cf08933d6461b3389d7f05a65167c89fd8712b0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/printMatrix.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "'''\n对于一个矩阵,请设计一个算法,将元素按“之”字形打印。具体见样例。\n给定一个整数矩阵mat,以及他的维数nxm,请返回一个数组,其中元素依次为打印的数字。\n\n例子:\n输入:[[1,2,3],[4,5,6],[7,8,9],[10,11,12]],4,3\n\n返回:[1,2,3,6,5,4,7,8,9,12,11,10]\n'''\n\ndef printMatrix(matrix, n, m):\n for i in range(n):\n for j in range(m):\n if i % 2 == 0:\n print(matrix[i][j])\n else:\n print(matrix[i][-j])\n\n\nif __name__ == \"__main__\":\n matr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n printMatrix(matr, 4, 3)" }, { "alpha_fraction": 0.26750001311302185, "alphanum_fraction": 0.35749998688697815, "avg_line_length": 18.047618865966797, "blob_id": "eff6d9e3a9d2283b895a3a2824034354c3c4c57a", "content_id": "4f1d0c87174cafc3b95ecdcd30a5b63337c898d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "no_license", "max_line_length": 63, "num_lines": 21, "path": "/perfectSquare.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "'''\n一个整数,它加上100后是一个完全平方数,再加上168又是一个完全平方数,请问该数是多少?\nx+100=n^2;\nx+100+168=m^2\n\n'''\n\n\ndef perSq():\n for i in range(2, 85):\n if 168 % i == 0:\n j = 168 // i\n if i > j and (i + j) % 2 == 0 and (i - j) % 2 == 0:\n m = (i + j) // 2\n n = (i - j) // 2\n x = n * n - 100\n print(x)\n\n\nif __name__ == \"__main__\":\n perSq()\n" }, { "alpha_fraction": 0.45272207260131836, "alphanum_fraction": 0.469914048910141, "avg_line_length": 22.299999237060547, "blob_id": "57d0c2a47d50c5eddc2d6e45fa13ae2db18f001d", "content_id": "79416b45195f5406c982408a14e950e8d11a475f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 814, "license_type": "no_license", "max_line_length": 97, "num_lines": 30, "path": "/noRepeatNumber.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "'''\n给定几个数,能组成多少个互不相同且无重复数字的4位数数?各是多少?\n'''\ndef noRepeatNumber():\n numlist=[]\n while True:\n print('分别输入几个个位数,以“e”表示结束输入。')\n \n inputnum =input(\"请输入数字:\")\n if inputnum == 'e':\n break\n numlist.append(int(inputnum))\n print(numlist)\n \n count = 0\n countlist=[]\n for i in numlist:\n for o in numlist:\n for j in numlist:\n for k in numlist:\n if (i != o) and (i != j) and (i != k) and (o != j) and (o != k) and (j != k):\n count +=1\n countlist.append(i*1000+o*100+j*10+k)\n\n print(count,countlist)\n\n\n\nif __name__==\"__main__\":\n noRepeatNumber()" }, { "alpha_fraction": 0.49066388607025146, "alphanum_fraction": 0.5269709825515747, "avg_line_length": 25.054054260253906, "blob_id": "f3f4a5e370faa6b3990c2c74685c40e94a4a156b", "content_id": "501b69fe6e79d94bd446b261a1efa16bc2c54571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 89, "num_lines": 37, "path": "/binarySearch.py", "repo_name": "stunum/python_exercise", "src_encoding": "UTF-8", "text": "'''\n给定一个整数数组A及它的大小n,同时给定要查找的元素val,请返回它在数组中的位置(从0开始),\n若不存在该元素,返回-1。若该元素出现多次,请返回第一次出现的位置。\n输入:[1,3,5,7,9],5,3\n返回:1\n'''\n\n\nclass binarySearch(object):\n def __init__(self, listnum, n, val):\n self.start = 0\n self.mid = n // 2\n self.end = n\n self.listlen = n // 2\n self.val = val\n self.listnum = listnum\n\n def binarySearch(self):\n # print(self.listnum[self.start:self.mid])\n\n for i in range(self.listlen):\n if self.val in self.listnum[self.start:self.mid]:\n numindex = self.listnum[self.start:self.mid].index(self.val) + self.start\n print(numindex)\n return numindex\n else:\n self.start = self.mid\n self.mid = self.end\n continue\n print(-1)\n return -1\n\n\nif __name__ == \"__main__\":\n ls = [1, 4, 5, 7, 9, 44, 3, 6, 3, 22, 11, 55]\n bs = binarySearch(ls, 12, 99)\n bs.binarySearch()\n" } ]
9
HelenMao/dataset_util
https://github.com/HelenMao/dataset_util
47902f4148785bee03fbf2c16c5692a9db4f109d
911678c6d079bc327ce828e868a5ef530823c3b5
a61da1732266b80806683448165247e7608521a7
refs/heads/master
"2018-04-03T17:35:01.072704"
"2017-04-21T12:40:10"
"2017-04-21T12:40:10"
88,814,417
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4373064935207367, "alphanum_fraction": 0.49845200777053833, "avg_line_length": 32.47999954223633, "blob_id": "4d88a6a6b62650163fe59675fb770a4319be94ad", "content_id": "a4f18261a60df941d5c89a8e203f6a67b7da6310", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2584, "license_type": "no_license", "max_line_length": 67, "num_lines": 75, "path": "/cifar.py", "repo_name": "HelenMao/dataset_util", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 10 11:31:29 2017\r\n\r\n@author: DrLC\r\n\"\"\"\r\n\r\nimport pickle\r\nimport numpy\r\nimport random\r\n\r\npath = 'cifar-10-python/'\r\ndata_batch = ['data_batch_1',\r\n 'data_batch_2',\r\n 'data_batch_3',\r\n 'data_batch_4',\r\n 'data_batch_5']\r\ntest_batch = 'test_batch'\r\n\r\ndef load_data():\r\n data = ([[],[]],[[],[]])\r\n for i in range(len(data_batch)):\r\n with open(path+data_batch[i], 'rb') as fo:\r\n tmp = pickle.load(fo, encoding='bytes')\r\n data[0][0].append(tmp[b'data'])\r\n data[0][1].append(tmp[b'labels'])\r\n print (path+data_batch[i]+' loaded!')\r\n data[0][0] = numpy.array(data[0][0], dtype='float32') / 255.\r\n data[0][0] = numpy.transpose(numpy.reshape(data[0][0],\r\n (50000, 3, 32, 32)),\r\n axes=(0,2,3,1))\r\n data[0][1] = numpy.array(data[0][1], dtype='int32')\r\n data[0][1] = numpy.reshape(data[0][1], (50000,))\r\n label = []\r\n for i in range(50000):\r\n tmp = data[0][1][i]\r\n label.append([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n label[-1][tmp] = 1\r\n data[0][1] = numpy.array(label, dtype='float64')\r\n with open(path+test_batch, 'rb') as fo:\r\n tmp = pickle.load(fo, encoding='bytes')\r\n data[1][0] = tmp[b'data']\r\n data[1][1] = tmp[b'labels']\r\n print (path+test_batch+' loaded!')\r\n data[1][0] = numpy.array(data[1][0], dtype='float32') / 255.\r\n data[1][0] = numpy.transpose(numpy.reshape(data[1][0],\r\n (10000, 3, 32, 32)),\r\n axes=(0,2,3,1))\r\n data[1][1] = numpy.array(data[1][1], dtype='int32')\r\n label = []\r\n for i in range(10000):\r\n tmp = data[1][1][i]\r\n label.append([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n label[-1][tmp] = 1\r\n data[1][1] = numpy.array(label, dtype='float64')\r\n return data\r\n\r\ndef minibatch(data, label, batch_size):\r\n l = label.shape[0]\r\n candidate = random.sample(range(l), batch_size)\r\n mini_data = []\r\n mini_label = []\r\n for i in range(batch_size):\r\n mini_data.append(data[candidate[i]])\r\n mini_label.append(label[candidate[i]])\r\n mini_data = numpy.array(mini_data, dtype='float32')\r\n mini_label = numpy.array(mini_label, dtype='float32')\r\n return mini_data, mini_label\r\n\r\nif __name__ == '__main__':\r\n train, test = load_data()\r\n print (train[0].shape)\r\n print (train[1].shape)\r\n print (test[0].shape)\r\n print (test[1].shape)" } ]
1
HeyamBasem/Digit-Recognition-
https://github.com/HeyamBasem/Digit-Recognition-
c30e4f48a25a237d59e61d73291d5484f83b4aed
35350a594de17b0bc600d89c5d75a84e9cdcb4f8
5878b586e760e5678cbb377d3eea5831c07c5d7a
refs/heads/main
"2023-01-01T06:51:10.228176"
"2020-10-24T11:18:34"
"2020-10-24T11:18:34"
306,866,779
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6405092477798462, "alphanum_fraction": 0.677006185054779, "avg_line_length": 28.78384780883789, "blob_id": "6dbf3b25b4d6b858c8ba3f46de6c9e526ecf4b5c", "content_id": "33aec78022dad54a0d7558727c193bcd31ac7171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12960, "license_type": "no_license", "max_line_length": 106, "num_lines": 421, "path": "/BookProject.py", "repo_name": "HeyamBasem/Digit-Recognition-", "src_encoding": "UTF-8", "text": "# sort data because using fetch_openml() return unsorted data\r\nfrom scipy import ndimage\r\n\r\n\r\ndef sort_by_target(mnist):\r\n reorder_train = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[:60000])]))[:, 1]\r\n reorder_test = np.array(sorted([(target, i) for i, target in enumerate(mnist.target[60000:])]))[:, 1]\r\n mnist.data[:60000] = mnist.data[reorder_train]\r\n mnist.target[:60000] = mnist.target[reorder_train]\r\n mnist.data[60000:] = mnist.data[reorder_test + 60000]\r\n mnist.target[60000:] = mnist.target[reorder_test + 60000]\r\n\r\n\r\nimport numpy as np\r\nfrom sklearn.datasets import fetch_openml\r\n\r\nmnist = fetch_openml('mnist_784', version=1, cache=True)\r\nmnist.target = mnist.target.astype(np.int8) # fetch_openml() returns targets as strings\r\nprint(sort_by_target(mnist)) # fetch_openml() returns an unsorted dataset\r\n\r\nX, y = mnist[\"data\"], mnist[\"target\"]\r\nprint(X, y)\r\nprint(X.shape)\r\nprint(y.shape)\r\n\r\n\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\nsome_digit = X[36000]\r\nsome_digit_image = some_digit.reshape(28, 28)\r\nplt.imshow(some_digit_image, cmap=matplotlib.cm.binary, interpolation=\"nearest\")\r\nplt.axis(\"off\")\r\n\r\n# plt.show()\r\n\r\ndef plot_digit(data):\r\n image = data.reshape(28, 28)\r\n plt.imshow(image, cmap = matplotlib.cm.binary,\r\n interpolation=\"nearest\")\r\n plt.axis(\"off\")\r\n\r\ndef plot_digits(instances, images_per_row=10, **options):\r\n size = 28\r\n images_per_row = min(len(instances), images_per_row)\r\n images = [instance.reshape(size,size) for instance in instances]\r\n n_rows = (len(instances) - 1) // images_per_row + 1\r\n row_images = []\r\n n_empty = n_rows * images_per_row - len(instances)\r\n images.append(np.zeros((size, size * n_empty)))\r\n for row in range(n_rows):\r\n rimages = images[row * images_per_row : (row + 1) * images_per_row]\r\n row_images.append(np.concatenate(rimages, axis=1))\r\n image = np.concatenate(row_images, axis=0)\r\n plt.imshow(image, cmap = matplotlib.cm.binary, **options)\r\n plt.axis(\"off\")\r\n\r\nplt.figure(figsize=(9,9))\r\nexample_images = np.r_[X[:12000:600], X[13000:30600:600], X[30600:60000:590]]\r\nplot_digits(example_images, images_per_row=10)\r\n# plt.show()\r\n\r\nprint(y[36000])\r\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\r\n\r\nimport numpy as np\r\n\r\nshuffle_index = np.random.permutation(60000)\r\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]\r\n\r\n# binary classifier\r\ny_train_5 = (y_train == 5)\r\ny_test_5 = (y_test == 5)\r\n\r\nfrom sklearn.linear_model import SGDClassifier\r\n\r\nsgd_clf = SGDClassifier(max_iter=5, tol=-np.infty, random_state=42)\r\nsgd_clf.fit(X_train, y_train_5)\r\n\r\nprint(sgd_clf.predict([some_digit]))\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nprint(cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\"))\r\n\r\n\r\nfrom sklearn.base import BaseEstimator\r\n\r\n\r\nclass Never5Classifier(BaseEstimator):\r\n def fit(self, X, y=None):\r\n pass\r\n\r\n def predict(self, X):\r\n return np.zeros((len(X), 1), dtype=bool)\r\n\r\n\r\nnever_5_clf = Never5Classifier()\r\nprint(cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\"), '\\n')\r\n\r\nfrom sklearn.model_selection import cross_val_predict\r\n\r\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nprint(confusion_matrix(y_train_5, y_train_pred), '\\n')\r\n\r\ny_train_perfect_predictions = y_train_5\r\n\r\nprint(confusion_matrix(y_train_5, y_train_perfect_predictions), '\\n')\r\n\r\nfrom sklearn.metrics import precision_score, recall_score\r\n\r\nprint(precision_score(y_train_5, y_train_pred))\r\n# print(4344 / (4344 + 1307), '\\n') #must be the same result\r\n\r\nprint(recall_score(y_train_5, y_train_pred))\r\n# print(4344 / (4344 + 1077), '\\n') #must be the same result\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\nprint(f1_score(y_train_5, y_train_pred))\r\n# print(4344 / (4344 + (1077 + 1307)/2), '\\n') #must be the same result\r\n\r\ny_scores = sgd_clf.decision_function([some_digit])\r\nprint(y_scores)\r\n\r\nthreshold = 0\r\ny_some_digit_pred = (y_scores > threshold)\r\n\r\nprint(y_some_digit_pred)\r\n\r\nthreshold = 200000\r\ny_some_digit_pred = (y_scores > threshold)\r\nprint(y_some_digit_pred)\r\n\r\ny_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3,\r\n method=\"decision_function\")\r\n\r\nfrom sklearn.metrics import precision_recall_curve\r\n\r\nprecisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)\r\n\r\n# Precision and Recall versus the decision Threshold\r\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\r\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\", linewidth=2)\r\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\", linewidth=2)\r\n plt.xlabel(\"Threshold\", fontsize=16)\r\n plt.legend(loc=\"upper left\", fontsize=16)\r\n plt.ylim([0, 1])\r\n\r\n\r\nplt.figure(figsize=(8, 4))\r\nplot_precision_recall_vs_threshold(precisions, recalls, thresholds)\r\nplt.xlim([-700000, 700000])\r\n# plt.show()\r\n\r\n# Precision versus Recall\r\ndef plot_precision_vs_recall(precisions, recalls):\r\n plt.plot(recalls, precisions, \"b-\", linewidth=2)\r\n plt.xlabel(\"Recall\", fontsize=16)\r\n plt.ylabel(\"Precision\", fontsize=16)\r\n plt.axis([0, 1, 0, 1])\r\n\r\n\r\nplt.figure(figsize=(8, 6))\r\nplot_precision_vs_recall(precisions, recalls)\r\n# plt.show()\r\n\r\n\r\ny_train_pred_90 = (y_scores > 70000)\r\n\r\nprecision_score(y_train_5, y_train_pred_90)\r\n\r\nrecall_score(y_train_5, y_train_pred_90)\r\n\r\n\r\n# ROC curves\r\nfrom sklearn.metrics import roc_curve\r\n\r\nfpr, tpr, thresholds = roc_curve(y_train_5, y_scores)\r\n\r\n# ROC Curve\r\ndef plot_roc_curve(fpr, tpr, label=None):\r\n plt.plot(fpr, tpr, linewidth=2, label=label)\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n plt.axis([0, 1, 0, 1])\r\n plt.xlabel('False Positive Rate', fontsize=16)\r\n plt.ylabel('True Positive Rate', fontsize=16)\r\n\r\n\r\nplt.figure(figsize=(8, 6))\r\nplot_roc_curve(fpr, tpr)\r\n# plt.show()\r\n\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nprint(roc_auc_score(y_train_5, y_scores))\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nforest_clf = RandomForestClassifier(n_estimators=10, random_state=42)\r\ny_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3,\r\n method=\"predict_proba\")\r\n\r\ny_scores_forest = y_probas_forest[:, 1] # score = proba of positive class\r\nfpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5, y_scores_forest)\r\n\r\n# Comparing ROC Curves\r\nplt.figure(figsize=(8, 6))\r\nplt.plot(fpr, tpr, \"b:\", linewidth=2, label=\"SGD\")\r\nplot_roc_curve(fpr_forest, tpr_forest, \"Random Forest\")\r\nplt.legend(loc=\"lower right\", fontsize=16)\r\n# plt.show()\r\n\r\n# show the plot of compairson between random forest and SGD\r\n\r\nprint(roc_auc_score(y_train_5, y_scores_forest))\r\n\r\ny_train_pred_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3)\r\nprecision_score(y_train_5, y_train_pred_forest)\r\n\r\nrecall_score(y_train_5, y_train_pred_forest)\r\n\r\n# Multiclass Classification\r\nsgd_clf.fit(X_train, y_train) # y_train, not y_train_5\r\nprint(sgd_clf.predict([some_digit]))\r\n\r\nsome_digit_scores = sgd_clf.decision_function([some_digit])\r\nprint(some_digit_scores)\r\n\r\nprint(np.argmax(some_digit_scores))\r\n\r\nprint(sgd_clf.classes_)\r\n\r\nprint(sgd_clf.classes_[5])\r\n\r\nfrom sklearn.multiclass import OneVsOneClassifier\r\n\r\novo_clf = OneVsOneClassifier(SGDClassifier(max_iter=5, tol=-np.infty, random_state=42))\r\novo_clf.fit(X_train, y_train)\r\novo_clf.predict([some_digit])\r\n\r\nprint(len(ovo_clf.estimators_))\r\n\r\nforest_clf.fit(X_train, y_train)\r\nprint(forest_clf.predict([some_digit]))\r\n\r\n\"\"\"\" Difference between predict method and predict_proba : \"\"\"\r\n\r\nprint(forest_clf.predict_proba([some_digit]))\r\n\r\nprint(cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring=\"accuracy\"))\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nscaler = StandardScaler()\r\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\r\nprint(cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring=\"accuracy\"))\r\n\r\n# error analysis\r\ny_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\r\nconf_mx = confusion_matrix(y_train, y_train_pred)\r\nprint(conf_mx)\r\n\r\n\r\ndef plot_confusion_matrix(matrix):\r\n \"\"\"If you prefer color and a colorbar\"\"\"\r\n fig = plt.figure(figsize=(8, 8))\r\n ax = fig.add_subplot(111)\r\n cax = ax.matshow(matrix)\r\n fig.colorbar(cax)\r\n\r\n\r\nplt.matshow(conf_mx, cmap='magma')\r\n# plt.show()\r\n\r\n\r\nrow_sums = conf_mx.sum(axis=1, keepdims=True)\r\nnorm_conf_mx = conf_mx / row_sums\r\n\r\nnp.fill_diagonal(norm_conf_mx, 0)\r\nplt.matshow(norm_conf_mx, cmap='cividis')\r\nplt.show()\r\n\r\ncl_a, cl_b = 3, 5\r\nX_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]\r\nX_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]\r\nX_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]\r\nX_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]\r\n\r\nplt.figure(figsize=(8, 8))\r\nplt.subplot(221);\r\nplot_digits(X_aa[:25], images_per_row=5)\r\nplt.subplot(222);\r\nplot_digits(X_ab[:25], images_per_row=5)\r\nplt.subplot(223);\r\nplot_digits(X_ba[:25], images_per_row=5)\r\nplt.subplot(224);\r\nplot_digits(X_bb[:25], images_per_row=5)\r\nplt.show()\r\n\r\n# Multilabel classification\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\ny_train_large = (y_train >= 7)\r\ny_train_odd = (y_train % 2 == 1)\r\ny_multilabel = np.c_[y_train_large, y_train_odd]\r\n\r\nknn_clf = KNeighborsClassifier()\r\nknn_clf.fit(X_train, y_multilabel)\r\n\r\nprint(knn_clf.predict([some_digit]))\r\n\r\n# # the below code takes very long running time\r\n# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1)\r\n# print(f1_score(y_multilabel, y_train_knn_pred, average=\"macro\"))\r\n# # y_train not y_multilables , the book didn't se n_jobs=-1 either\r\n# # the answer should be 0.97709078477525\r\n\r\n\r\n# Multioutput classification\r\n# add noise to img of 5 and see if it still class it as 5\r\nnoise = np.random.randint(0, 100, (len(X_train), 784))\r\nX_train_mod = X_train + noise\r\nnoise = np.random.randint(0, 100, (len(X_test), 784))\r\nX_test_mod = X_test + noise\r\ny_train_mod = X_train\r\ny_test_mod = X_test\r\n\r\n# Image of number 5\r\nsome_index = 5500\r\nplt.subplot(121);\r\nplot_digit(X_test_mod[some_index]) # The noisy input image\r\nplt.subplot(122);\r\nplot_digit(y_test_mod[some_index]) # Original image (target)\r\nplt.show()\r\n\r\nknn_clf.fit(X_train_mod, y_train_mod)\r\nclean_digit = knn_clf.predict([X_test_mod[some_index]])\r\nplot_digit(clean_digit) # Clean image after removing the noises\r\n\r\n# Exercises\r\n# Q1\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nknn = KNeighborsClassifier()\r\n\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\ng = GridSearchCV(knn, {'weights': [\"uniform\", \"distance\"], 'n_neighbors': [3, 4, 5]}, cv=5)\r\ng.fit(X_train, y_train_5)\r\n\r\nprint(g.best_params_)\r\n\r\nprint(g.best_score_)\r\n\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ny_pred = g.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n\r\n\r\n# Q2\r\nfrom scipy.ndimage.interpolation import shift\r\n\r\n\r\ndef shift_image(image, dx, dy):\r\n image = image.reshape((28, 28))\r\n shifted_image = shift(image, [dy, dx], cval=0, mode=\"constant\")\r\n return shifted_image.reshape([-1])\r\n\r\n\r\nimage = X_train[2645]\r\nshifted_image_down = shift_image(image, 0, 4)\r\nshifted_image_left = shift_image(image, -5, 0)\r\nshifted_image_up = shift_image(image, 0, -7)\r\nshifted_image_right = shift_image(image, 7, 0)\r\n\r\nplt.figure(figsize=(12, 3))\r\nplt.subplot(231)\r\nplt.title(\"Original\", fontsize=14)\r\nplt.imshow(image.reshape(28, 28), interpolation=\"nearest\", cmap=\"Greys\")\r\nplt.subplot(232)\r\nplt.title(\"Shifted down\", fontsize=14)\r\nplt.imshow(shifted_image_down.reshape(28, 28), interpolation=\"nearest\", cmap=\"Greys\")\r\nplt.subplot(233)\r\nplt.title(\"Shifted left\", fontsize=14)\r\nplt.imshow(shifted_image_left.reshape(28, 28), interpolation=\"nearest\", cmap=\"Greys\")\r\nplt.show()\r\nplt.subplot(234)\r\nplt.title(\"Shifted up\", fontsize=14)\r\nplt.imshow(shifted_image_up.reshape(28, 28), interpolation=\"nearest\", cmap=\"Greys\")\r\nplt.show()\r\nplt.subplot(235)\r\nplt.title(\"Shifted right\", fontsize=14)\r\nplt.imshow(shifted_image_right.reshape(28, 28), interpolation=\"nearest\", cmap=\"Greys\")\r\nplt.show()\r\n\r\nX_train_augmented = [image for image in X_train]\r\ny_train_augmented = [label for label in y_train]\r\n\r\nfor dx, dy in ((1, 0), (-1, 0), (0, 1), (0, -1)):\r\n for image, label in zip(X_train, y_train):\r\n X_train_augmented.append(shift_image(image, dx, dy))\r\n y_train_augmented.append(label)\r\n\r\nX_train_augmented = np.array(X_train_augmented)\r\ny_train_augmented = np.array(y_train_augmented)\r\n\r\nshuffle_idx = np.random.permutation(len(X_train_augmented))\r\nX_train_augmented = X_train_augmented[shuffle_idx]\r\ny_train_augmented = y_train_augmented[shuffle_idx]\r\n\r\nknn = KNeighborsClassifier(**g.best_params_)\r\nknn.fit(X_train_augmented, y_train_augmented)\r\n\r\ny_pred = knn_clf.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n# End of code\r\n" } ]
1
issyl0/alexa-house-cleaning-rota
https://github.com/issyl0/alexa-house-cleaning-rota
2df874ea7492334d07e9de761dade70e88aca4ac
8b822da15c049e96e9a00e422d038d950b9f2b90
afa46631e0e4af8383ade7204818a3d7afcf1fe0
refs/heads/master
"2021-08-23T05:09:15.980141"
"2017-12-03T14:48:39"
"2017-12-03T14:49:05"
103,042,291
0
0
null
"2017-09-10T15:59:07"
"2017-09-10T15:59:07"
"2017-09-10T16:04:37"
null
[ { "alpha_fraction": 0.8115941882133484, "alphanum_fraction": 0.8115941882133484, "avg_line_length": 68, "blob_id": "2120fa03f20e9f8bf5ed1b9c4317f322177f355f", "content_id": "505993696e5191d8eb57a0c6cb8d627f9fe62ec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "no_license", "max_line_length": 68, "num_lines": 1, "path": "/README.md", "repo_name": "issyl0/alexa-house-cleaning-rota", "src_encoding": "UTF-8", "text": "An Alexa skill that tells you details about our house cleaning rota.\n" }, { "alpha_fraction": 0.5379897952079773, "alphanum_fraction": 0.5393526554107666, "avg_line_length": 26.688678741455078, "blob_id": "210421d9891852b16fe7b415063230109af914fd", "content_id": "966aeccce0c2e17755b1b36d3605023082a8b3db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2935, "license_type": "no_license", "max_line_length": 92, "num_lines": 106, "path": "/handler.py", "repo_name": "issyl0/alexa-house-cleaning-rota", "src_encoding": "UTF-8", "text": "def alexa_handler(event, context):\n request = event['request']\n\n # called when invoked with no values - early exit\n if request['type'] == 'LaunchRequest':\n return get_welcome_response()\n\n if request['type'] == 'IntentRequest':\n intent = request['intent']\n\n if intent['name'] == 'HouseCleaningRota':\n return make_response(\n get_cleaning_rota_status(intent),\n card_title='Lookup'\n )\n elif intent['name'] == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent['name'] in ('AMAZON.StopIntent', 'AMAZON.CancelIntent'):\n return make_response(\n 'Thank you for using House Cleaning Rota',\n card_title='Goodbye',\n )\n\n # default catch all in case nothing else matches\n return make_response(\"Sorry, I didn't understand that request\")\n\ndef get_welcome_response():\n welcome = \"\"\"\n Welcome to the House Cleaning Rota Alexa skill. You can\n ask me which week of the rota it is, and find out what\n jobs each person has to do.\n \"\"\"\n\n return make_response(\n welcome,\n card_title='Welcome',\n reprompt_text=welcome,\n should_end_session=False\n )\n\ndef _get_cleaning_rota(intent):\n slots = intent.get('slots')\n speech_output = None\n\n if slots:\n cleaning_rota = slots['HouseCleaningRota'].get('value')\n\n if cleaning_rota:\n week = check_week\n all_jobs = check_all_jobs\n\n speech_output = 'It is week ' + week + '. Jack must: ' +\n all_jobs['jack'] '. Phil must: ' + all_jobs['phil'] + '.\n Isabell must: ' + all_jobs['isabell'] + '.'\n\telse:\n\t speech_output = 'Ask me to check the house cleaning rota.'\n\n return speech_output\n\ndef check_week:\n import requests\n\n week = 'Unknown'\n\n r = requests.get('https://house-cleaning-rota.eu-west-2.elasticbeanstalk.com/week.json')\n week = r.json()['week']\n\n return week\n\ndef check_all_jobs:\n import requests\n\n jobs = ''\n\n r = requests.get('https://house-cleaning-rota.eu-west-2.elasticbeanstalk.com/jobs.json')\n jobs = r.json()\n\n return jobs\n\ndef make_response(text, card_title='Thanks', should_end_session=True,\n reprompt_text=None):\n response = {\n 'version': '1.0',\n 'response': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': text,\n },\n 'card': {\n 'type': 'Simple',\n 'title': card_title,\n 'content': text\n },\n 'shouldEndSession': should_end_session\n }\n }\n\n if reprompt_text:\n response['reprompt'] = {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n }\n\n return response\n" } ]
2
joseruiz1989/teste_python_vsc_github
https://github.com/joseruiz1989/teste_python_vsc_github
b3267b6c0820154f6fce51388adf2adabba325b8
84b3e6d8b718d57fdba6e9319b7c5779ddb91f73
cc01926aa27e03b86a5e9eab116e15f91322f9e8
refs/heads/master
"2022-09-30T16:07:36.645639"
"2020-06-07T23:35:20"
"2020-06-07T23:35:20"
270,455,132
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6991150379180908, "alphanum_fraction": 0.7256637215614319, "avg_line_length": 18, "blob_id": "7485e638baded83c0d221ec1a4fa15b86ea0694d", "content_id": "93957a7cfae896e7b8704f076a470c4ee815872b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/teste_code.py", "repo_name": "joseruiz1989/teste_python_vsc_github", "src_encoding": "UTF-8", "text": "print(\"teste print file from github\")\nprint(\"hola desde vs\")\n\nprint(\"testesito más 1\")\n\nprint(\"testesito más 12\")" } ]
1
AriniInf/PROGJAR_05111740007003
https://github.com/AriniInf/PROGJAR_05111740007003
41015a301bcc69f0db53847330771746559518a2
e1fc70198cc5a9f13be54f0cd169a0b5323240d3
2f70ecdef35189047971dba4dd8651cebd642abc
refs/heads/master
"2020-12-28T00:11:12.126890"
"2020-05-13T19:48:30"
"2020-05-13T19:48:30"
238,116,301
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5803571343421936, "alphanum_fraction": 0.5803571343421936, "avg_line_length": 21.600000381469727, "blob_id": "2884851899e5200aedf2e2683eed959ebc9402c5", "content_id": "3a67655d31c7e9473d280c766b84916fcb14e487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/tugas4/client_upload.py", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "from client import *\n\nif __name__=='__main__':\n os.chdir('./client')\n upload('progjar.txt', 'progjar.txt')" }, { "alpha_fraction": 0.759036123752594, "alphanum_fraction": 0.7710843086242676, "avg_line_length": 27, "blob_id": "ed0654fe61cf9155c6ed283f43d2d8edf3ada914", "content_id": "f16b3e812e1277f18b534f08a3af1f9403fe55dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 83, "license_type": "no_license", "max_line_length": 58, "num_lines": 3, "path": "/tugas9/README.md", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "Hasil Tes Performansi :\n\nhttps://docs.google.com/document/d/19gDGpFVT5vS7JR8it4my2kiKCCHCDU8yzXfR37LenhQ/edit?usp=sharing" }, { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.8611111044883728, "avg_line_length": 47.33333206176758, "blob_id": "86b6113ee21ffe9a0ad95873ecd6c75e2bd02561", "content_id": "06c46e5ae5443562de8f1a0f668190060d4d4911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 144, "license_type": "no_license", "max_line_length": 96, "num_lines": 3, "path": "/tugas7/README.md", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "Link catatan keluaran hasil performance test :\n\nhttps://docs.google.com/document/d/18Xf02fhcAA9BGLVYZnVHqcF51odZvCyW8skgS1vr0Zo/edit?usp=sharing" }, { "alpha_fraction": 0.6449704170227051, "alphanum_fraction": 0.668639063835144, "avg_line_length": 29.178571701049805, "blob_id": "cb2840e185ed653a185e4ce01e7a78cf9ae44f06", "content_id": "0d82dd0cbe01d94afb7e68008e9dee7301dd0648", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 845, "license_type": "no_license", "max_line_length": 56, "num_lines": 28, "path": "/tugas1/tugas1b/server/server.py", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "import sys\nimport socket\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# Bind the socket to the port\nserver_address = ('127.0.0.1', 10000)\nprint(f\"starting up on {server_address}\")\nsock.bind(server_address)\n# Listen for incoming connections\nsock.listen(1)\nwhile True:\n # Wait for a connection\n print(\"waiting for a connection\")\n connection, client_address = sock.accept()\n print(f\"connection from {client_address}\")\n # Receive the data in small chunks and retransmit it\n request = connection.recv(1024)\n file = open(request.decode(),\"rb\")\n print(\"request received\")\n while True:\n data = file.read(1024)\n if not data:\n break\n connection.sendall(data)\n print(\"sending.....\")\n # Clean up the connection\n file.close()\n connection.close()\n" }, { "alpha_fraction": 0.603706955909729, "alphanum_fraction": 0.6169461607933044, "avg_line_length": 25.952381134033203, "blob_id": "1a1b8ec81bc34ff2e84d8fca42524e5928f80d25", "content_id": "d8a10d4678793cb36a6714958e5791533e350993", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1133, "license_type": "no_license", "max_line_length": 101, "num_lines": 42, "path": "/tugas3/client_3.py", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "import threading\nimport logging\nimport requests\nimport datetime\nimport os\n\n\ndef download_gambar(url=None):\n if (url is None):\n return False\n ff = requests.get(url)\n tipe = dict()\n tipe['image/png']='png'\n tipe['image/jpg']='jpg'\n tipe['image/jpeg']='jpg'\n\n content_type = ff.headers['Content-Type']\n logging.warning(content_type)\n if (content_type in list(tipe.keys())):\n namafile = os.path.basename(url)\n ekstensi = tipe[content_type]\n logging.warning(f\"writing {namafile}\")\n fp = open(f\"{namafile}\",\"wb\")\n fp.write(ff.content)\n fp.close()\n else:\n return False\n\n\nif __name__=='__main__':\n\n threads = []\n gambar = [ \n 'https://myrepro.files.wordpress.com/2015/10/wpid-wallpaper-pemandangan-pantai-jpg.jpeg', \n 'https://myrepro.files.wordpress.com/2015/10/wpid-wallpaper-pemandangan-air-terjun-jpg.jpeg',\n 'https://upload.wikimedia.org/wikipedia/commons/6/65/Pemandangan_alam.jpg'\n ]\n \n for i in gambar:\n t = threading.Thread(target=download_gambar,args=(i,))\n threads.append(t)\n t.start()\n\n" }, { "alpha_fraction": 0.7012578845024109, "alphanum_fraction": 0.7028301954269409, "avg_line_length": 25.45833396911621, "blob_id": "b01c16eebf66f0acdce1eb214ce9cfc9e6db8ea0", "content_id": "4a05f225a02e820368fdb966d9337cb2d9a174d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 636, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/tugas4/README.md", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "# Tugas 4\n\n- melihat list file (list)\n- meletakkan file (upload)\n- mengambil file (download)\n\n- list : untuk list file pada direktori tertentu\n request : list\n parameter : tidak ada\n response : berhasil -> berhasil\n\n- upload : untuk meletakkan file\n request: upload\n parameter : source, destination\n response: berhasil -> berhasil\n gagal -> File sudah tersedia\n \n- download : untuk mengambil file\n request: download\n parameter: filename, nama yang akan disimpan\n response: berhasil -> berhasil\n gagal -> File tidak ditemukan\n\n- jika perintah tidak dikenali akan merespon dengan Perintah salah\n\n" }, { "alpha_fraction": 0.5514018535614014, "alphanum_fraction": 0.5514018535614014, "avg_line_length": 17, "blob_id": "9e863b269fdf0777a26ab994e9e7a8188f42bc56", "content_id": "97edc3cccba718431ec0594d04b4915167ef1467", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 34, "num_lines": 6, "path": "/tugas4/client_download.py", "repo_name": "AriniInf/PROGJAR_05111740007003", "src_encoding": "UTF-8", "text": "from client import *\n\n\nif __name__=='__main__':\n os.chdir('./client')\n download('opo.txt', 'abc.txt')" } ]
7
xmlabs-io/xmlabs-python
https://github.com/xmlabs-io/xmlabs-python
8b90fe3e061ea5532c5c7f667762700041fb1aea
605fda524a7e5bc36fc4a9de52fdaeda7937fce7
d943316b682ab66c5796abf9e86d30e7877b49ba
refs/heads/master
"2022-11-30T03:57:18.862415"
"2020-08-14T00:07:48"
"2020-08-14T00:07:48"
287,384,702
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8260869383811951, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 45, "blob_id": "8c2ca9f13b5fc428f3ba5d90c15f69fc69422b0a", "content_id": "0dea6c8e563eddb9bd71d6ebf953edb216994e5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 45, "num_lines": 1, "path": "/xmlabs/__init__.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from .aws_lambda import xmlabs_lambda_handler\n" }, { "alpha_fraction": 0.5820244550704956, "alphanum_fraction": 0.5820244550704956, "avg_line_length": 29.972972869873047, "blob_id": "cfa0c3e9ea9d9863ad74ea81449e0e7b0af4b26d", "content_id": "2c9dd83e3c5037cdfd4db829c9828965f510887e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1146, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/xmlabs/aws_lambda/handler.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from .config import xmlabs_settings\nfrom .env import get_environment\nfrom functools import wraps\n\n\ndef xmlabs_lambda_handler(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n env, config = None , None\n try:\n env = get_environment(*args, **kwargs)\n if not env:\n raise Exception(\"No Environment detected\")\n except Exception as ex:\n ## TODO: Improve Exception catching here\n ## TODO: Log to cloudwatch that Getting environment failed\n raise \n\n try:\n config = xmlabs_settings(env)\n if not config:\n raise Exception(\"No Configuration found\")\n except Exception as ex:\n ## TODO: Improve Exception catching\n ## TODO: Log to cloudwatch that Retrieving Settings failed\n raise\n\n ## Standard Invoke logging for \n #lambda_invoke_logger(*args, **kwargs)\n\n try:\n return fn(*args, **kwargs, config=config)\n except Exception as ex:\n # Make a standard error log to Cloudwatch for eas of capturing\n raise\n\n return wrapped\n" }, { "alpha_fraction": 0.732876718044281, "alphanum_fraction": 0.732876718044281, "avg_line_length": 19.85714340209961, "blob_id": "348fa2af69cc7b53c01448e223dea950076dc911", "content_id": "f0a06cff96cb6491eedbdc8c6cbcb8433c8fddc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/tests/test_aws_lambda_settings.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom xmlabs.aws_lambda.config import settings\n\ndef test_xmlabs_aws_lambda_config():\n \"\"\"Assert Settings\"\"\"\n assert settings\n" }, { "alpha_fraction": 0.5896980166435242, "alphanum_fraction": 0.6500887870788574, "avg_line_length": 16.030303955078125, "blob_id": "7d014a910e2de65e91d79b59479b8fb394361978", "content_id": "532041efd5881a34de579845e2d7e6b1372f497d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 563, "license_type": "no_license", "max_line_length": 36, "num_lines": 33, "path": "/pyproject.toml", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"xmlabs\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\"Markus Jonsson <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.5\"\ndynaconf = \"^3.0.0\"\nboto3 = \"^1.14.35\"\nrequests = \"^2.24.0\"\n\n[tool.poetry.dev-dependencies]\npytest = \"^6.0.1\"\n\n[tool.tox]\nlegacy_tox_ini = \"\"\"\n[tox]\nisolated_build = True\n#envlist = py35, py36, py37, py38\nenvlist = py38\n[testenv]\ndeps = pytest\n\nwhitelist_externals = poetry\ncommands =\n poetry install -v\n poetry run pytest tests/\n\"\"\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 42, "blob_id": "6c383b34d2e93e6bb82902ec0570a4514bb774de", "content_id": "4d4f52d6216f0f06ba9a221fc9af15d1d819262b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "no_license", "max_line_length": 42, "num_lines": 1, "path": "/xmlabs/aws_lambda/__init__.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from .handler import xmlabs_lambda_handler\n\n" }, { "alpha_fraction": 0.4762096703052521, "alphanum_fraction": 0.47822579741477966, "avg_line_length": 31.207792282104492, "blob_id": "f16df921ae063495ce97b8c6643e12cf8482ff79", "content_id": "0927cb9fab29bcb0035708fee88f0dae93a979be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2480, "license_type": "no_license", "max_line_length": 108, "num_lines": 77, "path": "/xmlabs/aws_lambda/env.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "import os\nimport logging\n\nlogger = logging.getLogger()\n\n\ndef get_environment(event, context=None):\n valid_envs = [\"stage\", \"prod\", \"dev\"]\n env = None\n# default_env = os.getenv(\"DEFAULT_ENV\", \"dev\")\n default_env = os.getenv(\"APP_ENV\", os.getenv(\"DEFAULT_ENV\", \"dev\"))\n override_env = os.getenv(\"ENV\")\n\n if override_env:\n logger.info(\"Overriding Environment with {}\".format(override_env))\n return override_env\n\n ####################################\n ### X-Environment ###\n ### (override) ###\n ####################################\n if event.get('headers'):\n if event['headers'].get(\"X-Environment\"):\n return event['headers']['X-Environment'].lower()\n\n\n ####################################\n ### if lambda function arn ###\n ####################################\n split_arn = None\n try:\n split_arn = context.invoked_function_arn.split(':')\n except Exception as ex:\n split_arn = None\n if split_arn:\n\n ####################################\n ### lambda function arn alias ###\n ### (preferred) ###\n ####################################\n e = split_arn[len(split_arn) - 1]\n if e in valid_envs:\n env = e\n return env.lower()\n\n\n #######################################\n ### Lambda Function Name Evaluation ###\n #######################################\n split_fn = split_arn[6].split(\"_\")\n if split_fn[-1].lower() in valid_envs:\n return split_fn[-1].lower()\n\n\n ####################################\n ### Stage Variable Evaluation ###\n ####################################\n apiStageVariable = None\n if event.get(\"stageVariables\"):\n apiStageVariable = event[\"stageVariables\"].get(\"env\")\n env = apiStageVariable\n apiStage = None\n if event.get(\"requestContext\"):\n apiStage = event[\"requestContext\"].get(\"stage\")\n if not env:\n env = apiStage\n if apiStage and apiStageVariable and apiStage != apiStageVariable:\n logger.warning(\"Tentrr: Using different api GW stagename and api Stage Variable is not recommended\")\n if env:\n return env.lower()\n\n # If invoked without alias\n if (not split_arn or len(split_arn) == 7) and default_env:\n return default_env\n else:\n raise Exception(\"Environment could not be determined\")\n return None\n" }, { "alpha_fraction": 0.639769434928894, "alphanum_fraction": 0.6685879230499268, "avg_line_length": 20.6875, "blob_id": "be12ce88702b403e10e89bbd258c96708385658d", "content_id": "406fa91e121036b43c33480d5cb3311757ed04b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 347, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/example/aws_lambda_fullapp/pyproject.toml", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"aws_lambda_fullapp\"\nversion = \"0.1.0\"\ndescription = \"\"\nauthors = [\"Markus Jonsson <[email protected]>\"]\n\n[tool.poetry.dependencies]\npython = \"^3.8\"\nxmlabs = {git = \"https://github.com/xmlabs-io/xmlabs-python.git\"}\n\n[tool.poetry.dev-dependencies]\npytest = \"^5.2\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n" }, { "alpha_fraction": 0.557323694229126, "alphanum_fraction": 0.5580469965934753, "avg_line_length": 29.72222137451172, "blob_id": "b2f4c55be1230725a14b5bdba82c90be857c5648", "content_id": "16bc6ce8dbfffa5423de080568c8097ed7d3dd54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2765, "license_type": "no_license", "max_line_length": 76, "num_lines": 90, "path": "/xmlabs/dynaconf/aws_ssm_loader.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "import boto3\nimport logging\nimport requests\nfrom functools import lru_cache\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\n\nlogger = logging.getLogger()\n\nIDENTIFIER = 'aws_ssm'\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source\n :param obj: the settings instance\n :param env: settings current env (upper case) default='DEVELOPMENT'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all from `env`\n :param filename: Custom filename to load (useful for tests)\n :return: None\n \"\"\"\n # Load data from your custom data source (file, database, memory etc)\n # use `obj.set(key, value)` or `obj.update(dict)` to load data\n # use `obj.find_file('filename.ext')` to find the file in search tree\n # Return nothing\n prefix = \"\"\n if obj.get(\"AWS_SSM_PREFIX\"):\n prefix = \"/{}\".format(obj.AWS_SSM_PREFIX)\n path = \"{}/{}/\".format(prefix, env.lower())\n if key:\n path = \"{}{}/\".format(path, key)\n data = _read_aws_ssm_parameters(path)\n\n try:\n if data and key:\n value = parse_conf_data(\n data.get(key), tomlfy=True, box_settings=obj)\n if value:\n obj.set(key, value)\n elif data:\n obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)\n except Exception as e:\n if silent:\n return False\n raise\n\n\n@lru_cache\ndef _read_aws_ssm_parameters(path):\n logger.debug(\n \"Reading settings AWS SSM Parameter Store (Path = {}).\".format(path)\n )\n print(\n \"Reading settings AWS SSM Parameter Store (Path = {}).\".format(path)\n )\n result = {}\n try:\n ssm = boto3.client(\"ssm\")\n response = ssm.get_parameters_by_path(\n Path=path,\n Recursive=True,\n WithDecryption=True\n )\n while True:\n params = response[\"Parameters\"]\n for param in params:\n name = param[\"Name\"].replace(path, \"\").replace(\"/\", \"_\")\n value = param[\"Value\"]\n result[name] = value\n if \"NextToken\" in response:\n response = ssm.get_parameters_by_path(\n Path=path,\n Recursive=True,\n WithDecryption=True,\n NextToken=response[\"NextToken\"],\n )\n else:\n break\n\n except Exception as ex:\n print(\n \"ERROR: Trying to read aws ssm parameters (for {}): {}!\".format(\n path, str(ex)\n )\n )\n result = {}\n\n logger.debug(\"Read {} parameters.\".format(len(result)))\n return result\n" }, { "alpha_fraction": 0.5522388219833374, "alphanum_fraction": 0.5522388219833374, "avg_line_length": 7.375, "blob_id": "a7a132887c2b14b805b35924e2e94e1352b444c1", "content_id": "0618840a30b75ed6381a3c6cdea3115cbfce5ab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 67, "license_type": "no_license", "max_line_length": 13, "num_lines": 8, "path": "/example/aws_lambda_fullapp/settings.toml", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "[default]\nname = \"TEST\"\n\n[dev]\nname = \"DEV\"\n\n[prod] \nname = \"PROD\"\n" }, { "alpha_fraction": 0.6027190089225769, "alphanum_fraction": 0.6450151205062866, "avg_line_length": 33.842105865478516, "blob_id": "23c42d3990e0faf228d8694007412aec122a8ef1", "content_id": "acc2f23b53b6c1106e1534b8ebcfe9501133ad67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 662, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/xmlabs/dynaconf/aws_ec2_userdata_loader.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from .base import ConfigSource\nimport logging\nimport requests\n\nlogger = logging.getLogger()\nclass ConfigSourceAwsEc2UserData(ConfigSource):\n def load(self):\n if self._running_in_ec2():\n #TODO: fetch EC2 USERDATA\n raise Exception(\"ConfigSourceEC2UserData Load Unimplemented\")\n \n def _running_in_ec2(self):\n try:\n # Based on https://gist.github.com/dryan/8271687\n instance_ip_url = \"http://169.254.169.254/latest/meta-data/local-ipv4\"\n requests.get(instance_ip_url, timeout=0.01)\n return True\n except requests.exceptions.RequestException:\n return False\n" }, { "alpha_fraction": 0.5810185074806213, "alphanum_fraction": 0.5810185074806213, "avg_line_length": 32.07692337036133, "blob_id": "df8ad8e88f114a7b75476c69281c1849f91b9e92", "content_id": "a02dccd30a2da1c385ece5e86c54a8c02e457256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 51, "num_lines": 13, "path": "/example/aws_lambda/app.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from xmlabs.aws_lambda import lambda_handler\n\n@lambda_handler\ndef main(event, context, config):\n print(config.STRIPE_API_SECRET_KEY)\n pass\n\nif __name__ == \"__main__\":\n main({\"headers\":{\"X-Environment\": \"dev\"}}, {})\n main({\"headers\":{\"X-Environment\": \"prod\"}}, {})\n main({\"headers\":{\"X-Environment\": \"dev\"}}, {})\n main({\"headers\":{\"X-Environment\": \"dev\"}}, {})\n main({\"headers\":{\"X-Environment\": \"prod\"}}, {})\n\n\n" }, { "alpha_fraction": 0.7799999713897705, "alphanum_fraction": 0.7799999713897705, "avg_line_length": 23, "blob_id": "8f1e76ac8787d78a606e5f79241c1c0bf5b034a7", "content_id": "a3497923d5c0b34cfefe4599e7d374fcaad2253b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/README.md", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "# xmlabs-python\r\nPython utilities for the XMLABS\r\n" }, { "alpha_fraction": 0.7286432385444641, "alphanum_fraction": 0.7286432385444641, "avg_line_length": 17, "blob_id": "a6a937e26cca6472f16495abcfc80e2e5bca94b2", "content_id": "d79a0a07f03529dc6334e4906263c6a0249ce508", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 199, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/tests/test_aws_lambda_integration.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "\nimport pytest\n\nfrom xmlabs import xmlabs_lambda_handler\n\n\n@xmlabs_lambda_handler\ndef lambda_handler(event, context, config):\n assert(config)\n\ndef test_lambda_handler():\n lambda_handler({},{})\n" }, { "alpha_fraction": 0.7156626582145691, "alphanum_fraction": 0.7156626582145691, "avg_line_length": 30.923076629638672, "blob_id": "47e3be9f06e742738de987b153b8708536ea93e5", "content_id": "06fcedeb092348593ff5c5f03aa1d8fdad4419e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "no_license", "max_line_length": 110, "num_lines": 26, "path": "/xmlabs/aws_lambda/config.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from dynaconf import Dynaconf\nfrom dynaconf.constants import DEFAULT_SETTINGS_FILES\n\nLOADERS_FOR_DYNACONF = [\n 'dynaconf.loaders.env_loader', #Inorder to configure AWS_SSM_PREFIX we need to load it from environment\n 'xmlabs.dynaconf.aws_ssm_loader',\n 'dynaconf.loaders.env_loader', #Good to load environment last so that it takes precedenceover other config\n]\n\nENVIRONMENTS= ['prod','dev','stage']\n\nsettings = Dynaconf(\n #settings_files=['settings.toml', '.secrets.toml'],\n warn_dynaconf_global_settings = True,\n load_dotenv = True,\n default_settings_paths = DEFAULT_SETTINGS_FILES,\n loaders = LOADERS_FOR_DYNACONF,\n envvar_prefix= \"APP\",\n env_switcher = \"APP_ENV\",\n env='dev',\n environments=ENVIRONMENTS,\n #environments=True,\n)\n\ndef xmlabs_settings(env):\n return settings.from_env(env)\n" }, { "alpha_fraction": 0.7440758347511292, "alphanum_fraction": 0.7440758347511292, "avg_line_length": 20.100000381469727, "blob_id": "bdb6501a448a14d9a54f264336fd3d3928aaf220", "content_id": "e6a17d9882c55b9d3c268afa9d9df44a4dfc6a71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/tests/test_dynaconf.py", "repo_name": "xmlabs-io/xmlabs-python", "src_encoding": "UTF-8", "text": "from dynaconf import Dynaconf\n\n\ndef test_dynaconf_settingsenv():\n settingsenv = Dynaconf(environments=True)\n assert settingsenv\n\ndef test_dynaconf_settings():\n settings = Dynaconf()\n assert settings\n" } ]
15
b3b0/allyourbase
https://github.com/b3b0/allyourbase
b3e961f47d97c254dc74b6883ae88b12b091924a
9840b2a3fd6d45c59aa6b56cbd71ee3065b2fab8
a3e712ee5ee7fd1d19819de587acf9bd4f5199c2
refs/heads/master
"2021-01-23T21:18:35.882842"
"2018-04-20T18:21:22"
"2018-04-20T18:21:22"
102,893,041
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7674418687820435, "avg_line_length": 27.66666603088379, "blob_id": "ad14c8c1945ceec722b2458f0c2335649bbb7e3e", "content_id": "a43b10e3c48b60d02881894ac3267416aeee8d45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 86, "license_type": "no_license", "max_line_length": 67, "num_lines": 3, "path": "/README.md", "repo_name": "b3b0/allyourbase", "src_encoding": "UTF-8", "text": "# AllYourBasePie\n\nRun a simple Python 2.7 script to test oyur WAazuh / OSSEC ruleset.\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 16.14285659790039, "blob_id": "e668c7d723bdd6926ee54fc7996ff612c6ee27f2", "content_id": "9337339b21925fb127b478feb30e558056b2df1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/allyourbase.py", "repo_name": "b3b0/allyourbase", "src_encoding": "UTF-8", "text": "import os\n\ndef wazuh():\n os.system('echo \"ALLYOURBASE\" >> /var/log/auth.log')\n print(\"IT HAS BEEN DONE\")\n\nwazuh()\n" } ]
2
rexapex/tealight-files
https://github.com/rexapex/tealight-files
3b80468cab57b091bd30abe3057b641de52d82bb
ee7cae61e29f9ef96e9118c7534ec374f6d73bc1
35ccc0b76c8df0333357d9d79aec3c919c069dd2
refs/heads/master
"2021-01-16T22:53:45.906965"
"2015-08-21T09:06:05"
"2015-08-21T09:06:05"
40,966,381
0
0
null
"2015-08-18T10:42:05"
"2014-08-18T13:45:21"
"2014-08-20T16:39:30"
null
[ { "alpha_fraction": 0.521142840385437, "alphanum_fraction": 0.5462856888771057, "avg_line_length": 21.205127716064453, "blob_id": "4bd9f1fd8b2a3d07fa2bcb1a076e7468890bb2b5", "content_id": "3456bf0e90a7f709b99eb6e2a3d01e284bd0d18d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "no_license", "max_line_length": 93, "num_lines": 39, "path": "/art/explosion.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.art import (color, line, spot, circle, box, image, text, background, rectangle)\n\nfrom tealight.art import screen_width, screen_height\n\nfrom math import sin, cos, pi, sqrt\n\n\nclass explosion:\n \n def __init__(self):\n self.time = 50\n self.x = 0\n self.y = 0\n \n def set_pos(self, x, y):\n self.x = x\n self.y = y\n \n def star(self, x, y, c, size, spines):\n color(c)\n \n angle = 0\n \n for i in range(0, spines):\n x0 = x + (size * cos(angle))\n y0 = y + (size * sin(angle))\n \n line(x, y, x0, y0)\n \n angle = angle + (2 * pi / spines)\n \n def draw(self):\n if self.time > 0:\n self.star(self.x, self.y, \"orange\", 50-self.time, 50-self.time)\n self.time -= 1\n return False\n if self.time == 0:\n self.star(self.x, self.y, \"white\", 50-self.time, 50-self.time)\n return True\n \n " }, { "alpha_fraction": 0.6374895572662354, "alphanum_fraction": 0.6711297035217285, "avg_line_length": 25.559999465942383, "blob_id": "7300a79d8bb77cb3ef1b9fe9dff6abd51aac9f3f", "content_id": "3601680892c27d29bd2a7d14427b88452ab19c3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5975, "license_type": "no_license", "max_line_length": 140, "num_lines": 225, "path": "/art/prj_racetrack.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.art import (color, line, spot, circle, box, image, text, background, rectangle)\n\nfrom tealight.art import screen_width, screen_height\n\nfrom math import sin, cos, pi, sqrt\n\nfrom github.Krimzar.art.racecar import car\nfrom github.rexapex.art.explosion import explosion\n\ncar1 = None #The player using this computer\ncar2 = None\n\nouterWallX = 5\nouterWallY = 5\nouterWallWidth = screen_width-10\nouterWallHeight = screen_height-10\ninnerWallX = 120\ninnerWallY = 250\ninnerWallWidth = screen_width-240\ninnerWallHeight = screen_height-500\n\nwPressed = False\naPressed = False\nsPressed = False\ndPressed = False\nupPressed = False\ndownPressed = False\nrightPressed = False\nleftPressed = False\n\nexplosions = [None] * 1024\nexplosionCount = 0\n\n#def init():\n #background(\"track.png\")\n\ndef start():\n global car1, car2\n \n car1 = car()\n car2 = car()\n \n car1.set_name(\"Foo\")\n car2.set_name(\"Bar\")\n \n car1.change_orientation(1)\n car2.change_orientation(1)\n\ndef handle_frame():\n global car1, car2, leftPressed, rightPressed, upPressed, downPressed, aPressed, sPressed, dPressed, wPressed, explosions, explosionCount\n \n color(\"white\")\n box(0, 0, screen_width, screen_height)\n color(\"red\")\n \n if leftPressed:\n car1.change_orientation(4)\n elif rightPressed:\n car1.change_orientation(-4)\n elif upPressed:\n car1.Acceleration += 0.01\n if car1.Acceleration > 0.05:\n car1.Acceleration = 0.05\n elif downPressed:\n if car1.Acceleration == 0:\n if car1.Acceleration < -0.05:\n car1.Acceleration = -0.05\n else:\n car1.Acceleration -= 0.01\n \n if aPressed:\n car2.change_orientation(4)\n elif dPressed:\n car2.change_orientation(-4)\n elif wPressed:\n car2.Acceleration += 0.01\n if car2.Acceleration > 0.05:\n car2.Acceleration = 0.05\n elif sPressed:\n if car2.Acceleration == 0:\n if car2.Acceleration < -0.05:\n car2.Acceleration = -0.05\n else:\n car2.Acceleration -= 0.01\n \n \n car1.update_speed()\n car2.update_speed()\n \n testCollisions()\n \n car1.draw_car(\"Foo\")\n car2.draw_car(\"Bar\")\n \n for i in range(0, explosionCount):\n if explosions[i] != None:\n if explosions[i].draw():\n explosions[i] = None\n \n # for i in range (0, len(otherCars)): #Draw connected players cars\n # otherCars[i].draw()\n \n #Draw the map\n color(\"green\")\n rectangle(outerWallX, outerWallY, outerWallWidth, outerWallHeight)\n #box(innerWallX, innerWallY, innerWallWidth, innerWallHeight)\n #spot(screen_width/2, innerWallY, innerWallWidth/2)\n #spot(screen_width/2, innerWallHeight+innerWallY, innerWallWidth/2)\n\n \ndef testCollisions():\n global car1, car2\n \n #Outer Wall Collision\n if car1.CoordD[\"x\"] <= outerWallX:\n car1.CoordD[\"x\"] = outerWallX\n car1.Acceleration = 0\n #car1.Speed = -car1.Speed\n car1.change_orientation(-car1.TotalOrientation*2)\n elif car1.CoordD[\"x\"] >= outerWallWidth:\n car1.CoordD[\"x\"] = outerWallWidth\n car1.Acceleration = 0\n # car1.Speed = -car1.Speed\n car1.change_orientation(-car1.TotalOrientation*2)\n if car1.CoordD[\"y\"] <= outerWallY:\n car1.CoordD[\"y\"] = outerWallY\n car1.Acceleration = 0\n # car1.Speed = -car1.Speed\n car1.change_orientation(360+car1.TotalOrientation)\n elif car1.CoordD[\"y\"] >= outerWallHeight:\n car1.CoordD[\"y\"] = outerWallHeight\n car1.Acceleration = 0\n # car1.Speed = -car1.Speed\n car1.change_orientation(360+car1.TotalOrientation)\n \n \n \n if car2.CoordD[\"x\"] <= outerWallX:\n car2.CoordD[\"x\"] = outerWallX\n car2.Acceleration = 0\n #car1.Speed = -car1.Speed\n car2.change_orientation(-car2.TotalOrientation*2)\n elif car2.CoordD[\"x\"] >= outerWallWidth:\n car2.CoordD[\"x\"] = outerWallWidth\n car2.Acceleration = 0\n # car1.Speed = -car1.Speed\n car2.change_orientation(-car2.TotalOrientation*2)\n if car2.CoordD[\"y\"] <= outerWallY:\n car2.CoordD[\"y\"] = outerWallY\n car2.Acceleration = 0\n # car1.Speed = -car1.Speed\n car2.change_orientation(360+car2.TotalOrientation)\n elif car2.CoordD[\"y\"] >= outerWallHeight:\n car2.CoordD[\"y\"] = outerWallHeight\n car2.Acceleration = 0\n # car1.Speed = -car1.Speed\n car2.change_orientation(360+car2.TotalOrientation)\n \n #Inner Wall Collision\n #if boxCollision(thisCar.x, thisCar.y, innerWallX, innerWallY, innerWallWidth, innerWallHeight):\n # print \"Collided with centre box\"\n \n \n#Returns True if point is inside the box\n#def boxCollision(x, y, boxX, boxY, boxWidth, boxHeight):\n# if x >= boxX and x <= boxWidth and y >= boxY and y <= boxHeight:\n# return True\n# else:\n# return False\n\n \n#Returns True if point is inside the circle\n#def circleCollision():\n \n\ndef handle_keydown(key):\n global car1, car2, leftPressed, rightPressed, upPressed, downPressed, aPressed, sPressed, dPressed, wPressed, explosions, explosionCount \n \n if key == \"left\":\n leftPressed = True\n elif key == \"right\":\n rightPressed = True\n elif key == \"up\":\n upPressed = True\n elif key == \"down\":\n downPressed = True\n elif key == \"a\":\n aPressed = True\n elif key == \"d\":\n dPressed = True\n elif key == \"w\":\n wPressed = True\n elif key == \"s\":\n sPressed = True\n elif key == \"space\":\n explosions[explosionCount] = explosion()\n explosions[explosionCount].set_pos(car2.CoordD[\"x\"], car2.CoordD[\"y\"])\n explosionCount += 1\n elif key == \"ctrl\":\n explosions[explosionCount] = explosion()\n explosions[explosionCount].set_pos(car1.CoordD[\"x\"], car1.CoordD[\"y\"])\n explosionCount += 1 \n \ndef handle_keyup(key):\n global car1, car2, leftPressed, rightPressed, upPressed, downPressed, aPressed, sPressed, dPressed, wPressed\n \n if key == \"left\":\n leftPressed = False\n elif key == \"right\":\n rightPressed = False\n elif key == \"up\":\n upPressed = False\n elif key == \"down\":\n downPressed = False\n elif key == \"a\":\n aPressed = False\n elif key == \"d\":\n dPressed = False\n elif key == \"w\":\n wPressed = False\n elif key == \"s\":\n sPressed = False\n\n#init()\nstart()" }, { "alpha_fraction": 0.5080214142799377, "alphanum_fraction": 0.5347593426704407, "avg_line_length": 12.371428489685059, "blob_id": "ede760ecfd6d164454b1de24c310c70851874921", "content_id": "6cd9f797b4c000a387bbb0ed3d04655613e79efa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "no_license", "max_line_length": 82, "num_lines": 70, "path": "/art/racetrack.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.art import (color, line, spot, circle, box, image, text, background)\n\nfrom tealight.art import screen_width, screen_height\n\nfrom math import sin, cos, pi\n\nrunning = False\ncar1 = None\n\ndef handle_keydown(key):\n global ax, ay\n\n if key == \"left\" or key == \"right\":\n car1.ax = 1\n elif key == \"up\" or key == \"down\":\n car1.ay = 1\n\ndef start():\n global car1\n \n background(\"track.png\")\n car1 = car()\n car1.init()\n car1.draw()\n \n running = True\n update()\n \ndef update():\n global running\n \n while True:\n print(\"running\")\n car1.update()\n draw()\n \ndef draw():\n car1.draw()\n\nclass car:\n x = 0\n y = 0\n vx = 0\n vy = 0\n ax = 0\n ay = 0\n \n def init(self):\n x = 0\n y = 0\n vx = 0\n vy = 0\n ax = 0\n ay = 0\n \n def update(self):\n vx = vx + ax\n vy = vy + ay\n \n x = x + vx\n y = y + vy\n \n def draw(self):\n color(\"red\")\n spot(self.x, self.y, 25)\n \n \n\n\nstart()" }, { "alpha_fraction": 0.6429906487464905, "alphanum_fraction": 0.6579439043998718, "avg_line_length": 20.200000762939453, "blob_id": "c39c4ee750079a59942124437d12e97d2e165b67", "content_id": "40dbf2eb1995668fc7902e2ac6de7e7a50faa1db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "no_license", "max_line_length": 82, "num_lines": 25, "path": "/art/prj_car.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.art import (color, line, spot, circle, box, image, text, background)\n\nfrom tealight.art import screen_width, screen_height\n\nfrom math import sin, cos, pi\n\nclass car:\n x = 0\n y = 0\n orientation = 0\n acceleration = 0\n \n power = 0.3\n \n def update(self):\n self.x += self.acceleration\n \n def draw(self):\n spot(self.x, self.y, 25)\n \n def editOrientation(self, dOri):\n self.orientation = self.orientation + dOri\n \n def editAcceleration(self, da):\n self.acceleration = self.acceleration + da\n \n" }, { "alpha_fraction": 0.359649121761322, "alphanum_fraction": 0.3662280738353729, "avg_line_length": 17.280000686645508, "blob_id": "d744795cb12a0ef10524c315915353640371acbc", "content_id": "e63d5974f32e664caa85792125b6a16642f52963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 39, "num_lines": 25, "path": "/robot/mine.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.robot import (move, \n turn, \n look, \n touch, \n smell, \n left_side, \n right_side)\n\n# Add your code here\ndef moveBy(spaces):\n for i in range(0, spaces):\n move()\n \n\n\ndef go():\n moveBy(3) \n turn(-1)\n while True:\n if right_side() or left_side():\n move()\n \n \n \ngo()" }, { "alpha_fraction": 0.5105740427970886, "alphanum_fraction": 0.5709969997406006, "avg_line_length": 14.809523582458496, "blob_id": "b89ed6f4f2c8034e8191b5d03029d2245e15c807", "content_id": "27cd3db01becbe281df8b7deef9ff93a4304b713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 36, "num_lines": 21, "path": "/logo/chess.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.logo import move, turn\n\ndef square(side):\n for i in range(0,4):\n move(side)\n turn(90)\n\ndef chessboard():\n sqSize = 8\n for i in range(0, 8):\n for j in range(0, 8):\n square(sqSize)\n move(sqSize)\n turn(180)\n move(8 * sqSize)\n turn(-90)\n move(8)\n turn(-90)\n \nturn(-90)\nchessboard()" }, { "alpha_fraction": 0.5568976402282715, "alphanum_fraction": 0.5835981965065002, "avg_line_length": 16.41111183166504, "blob_id": "453728c669eebde7704be9a5d4ba32fd01b1f548", "content_id": "b4605eca7737fb0ad531578ab6372db42657b5c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1573, "license_type": "no_license", "max_line_length": 82, "num_lines": 90, "path": "/art/orbits.py", "repo_name": "rexapex/tealight-files", "src_encoding": "UTF-8", "text": "from tealight.art import (color, line, spot, circle, box, image, text, background)\n\nfrom tealight.art import screen_width, screen_height\n\nfrom math import sin, cos, pi\n\nx = screen_width / 2\ny = screen_height / 2\nvx = 0\nvy = 0\nax = 0\nay = 0\n\ngravity = 0.2\ndrag = 0\n\n\npower = 0.3\n\nexplosionX = 0\nexplosionY = 0\nexplosionTime = 0\n\ndef star(x, y, c, size, spines):\n \n color(c)\n \n angle = 0\n \n for i in range(0, spines):\n x0 = x + (size * cos(angle))\n y0 = y + (size * sin(angle))\n \n line(x, y, x0, y0)\n \n angle = angle + (2 * pi / spines)\n\ndef handle_keydown(key):\n global ax, ay, explosionTime, explosionX, explosionY\n \n if key == \"left\":\n ax = -power\n elif key == \"right\":\n ax = power\n elif key == \"up\":\n ay = -power\n elif key == \"down\":\n ay = power \n elif key ==\"space\":\n explosionX = x\n explosionY = y\n explosionTime = 50\n\ndef handle_keyup(key):\n global ax, ay\n\n if key == \"left\" or key == \"right\":\n ax = 0\n elif key == \"up\" or key == \"down\":\n ay = 0\n\ndef do_explosion():\n global explosionTime, explosionX, explosionY\n \n if explosionTime > 0:\n star(explosionX, explosionY, \"orange\", 50-explosionTime, 50-explosionTime)\n explosionTime -= 1\n if explosionTime == 0:\n star(explosionX, explosionY, \"white\", 50-explosionTime, 50-explosionTime)\n \ndef handle_frame():\n global x,y,vx,vy,ax,ay\n \n color(\"white\")\n \n spot(x,y,8)\n vx = vx + ax\n vy = vy + ay + gravity\n \n drag = - vy * 0.005\n vy += drag\n \n x = x + vx\n y = y + vy\n \n do_explosion()\n \n color(\"blue\")\n \n spot(x,y,8)\n \n \n" } ]
7
kapilkalra04/face-off-demo-python-flask
https://github.com/kapilkalra04/face-off-demo-python-flask
81f5e61fc42bcad5b63f34f7ed30f75b0c1fa0ac
b67a962d44dfa41cd7250498a53322e3647dcd75
88b815a1f932febc8f324935e1a1d346466a0371
refs/heads/master
"2020-03-27T10:53:55.955101"
"2018-09-19T04:35:30"
"2018-09-19T04:35:30"
146,452,363
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6726008057594299, "alphanum_fraction": 0.697564959526062, "avg_line_length": 32.02027130126953, "blob_id": "9dcb7412f973e7ac7e74d442454105fa28f45983", "content_id": "2ddd37dd16333e7c90be0d787dd421787eee2ed1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4887, "license_type": "permissive", "max_line_length": 113, "num_lines": 148, "path": "/src/alignment.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport keras\nimport pandas as pd\n \ndef rotate(face,left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y):\n\tlx = left_eye_center_x\n\tly = left_eye_center_y\n\trx = right_eye_center_x\n\try = right_eye_center_y\n\n\t# carry out angle calculations through arctan\n\tdY = ry - ly \n\tdX = rx - lx\n\n\tangle = np.degrees(np.arctan2(dY, dX))\t\t\t\t# angle should be in degrees\n\t\n\tscale = 1\t\t\t\t\t\t\t\t\t\t\n\n\tcx = (rx+lx)/2.0\n\tcy = (ry+ly)/2.0\n\tcenter = (cx,cy)\t\t\t\t\t\t\t\t\t# rotation will take place around the eye center\n\t\n\treturn center, angle, scale\n\ndef detectEyeCenters(face):\n\tcnn = load_model('src/CNN_21_1000.h5')\n\t\n\t# find the scaling ratios \n\tfaceHeight = np.float32(face.shape[0])\n\tfaceWidth = np.float32(face.shape[1])\n\theightScaling = 96.0/faceHeight\n\twidthScaling = 96.0/faceWidth\n\t\n\tface2 = face\n\t# resize the image to the size on which the CNN was trained\n\tfaceResized = cv2.resize(face2,(96,96))\n\t\n\t# prepare Input for CNN\n\tfaceResized = np.expand_dims(faceResized,axis=0)\n\tfaceResized = np.expand_dims(faceResized,axis=3)\n\tfaceResized = np.float32(faceResized)\n\tfaceResized = faceResized/255.0\n\t\n\t# obtain output\n\toutputVector = cnn.predict(faceResized)\n\toutputVector = (outputVector*48) + 48\n\n\t# scale up the eye centers obtained\n\tref_left_eye_center_x = outputVector[0,2]/widthScaling\n\tref_left_eye_center_y = outputVector[0,3]/heightScaling\n\tref_right_eye_center_x = outputVector[0,0]/widthScaling\n\tref_right_eye_center_y = outputVector [0,1]/heightScaling\n\tprint (ref_left_eye_center_x,ref_left_eye_center_y,ref_right_eye_center_x,ref_right_eye_center_y)\n\t\n\tkeras.backend.clear_session()\n\n\n\t# load haar cascade classifiers\n\teye_cascade = cv2.CascadeClassifier('src/haarcascade_eye.xml')\n\teye_cascade_2 = cv2.CascadeClassifier('src/haarcascade_eye_2.xml')\n\n\teyes = eye_cascade.detectMultiScale(face)\n\t\n\tif(len(eyes)<2):\n\t\teyes = eye_cascade_2.detectMultiScale(face)\n\n\tprint (eyes)\n\t\n\tboundaryX = face.shape[1]/2.0\t\t\t# separate them into Left and Right\n\tboundaryY = face.shape[0]/2.0\t\t\t# remove bottom half false candidates\n\n\teyeCenterLeftX = []\t\t\t\t\t\t\n\teyeCenterLeftY = []\n\teyeCenterLeftArea = []\n\t\n\teyeCenterRightX = []\n\teyeCenterRightY = []\n\teyeCenterRightArea = []\n\t\n\t# separate out all possible eye centers candidate into LHS and RHS candidates\n\tfor i in range(0,len(eyes)):\n\t\tif(eyes[i][0] + (eyes[i][2]/2.0) <= boundaryX - (boundaryX/16) and eyes[i][1] + (eyes[i][3]/2.0) <= boundaryY):\n\t\t\teyeCenterLeftX.append(eyes[i][0] + (eyes[i][2]/2.0))\n\t\t\teyeCenterLeftY.append(eyes[i][1] + (eyes[i][3]/2.0))\n\t\t\teyeCenterLeftArea.append(eyes[i][2] * eyes[i][3])\n\t\tif(eyes[i][0] + (eyes[i][2]/2.0) > boundaryX + (boundaryX/16) and eyes[i][1] + (eyes[i][3]/2.0) <= boundaryY):\n\t\t\teyeCenterRightX.append(eyes[i][0] + (eyes[i][2]/2.0))\n\t\t\teyeCenterRightY.append(eyes[i][1] + (eyes[i][3]/2.0))\n\t\t\teyeCenterRightArea.append(eyes[i][2] * eyes[i][3])\n\t\n\tindexL = 0\n\tindexR = 0\n\t\t\n\tif(len(eyeCenterLeftX) > 0 ):\n\t\t# obtain main left-eye-center through the largest eye-box area criteria\n\t\tminimumL = eyeCenterLeftArea[0]\n\t\tfor i in range(0,len(eyeCenterLeftArea)):\n\t\t\tif eyeCenterLeftArea[i] >= minimumL:\n\t\t\t\tindexL = i\n\t\t\t\tminimumL = eyeCenterLeftArea[i]\n\n\t\t# compare obtained haar cordinates to CNN coordinates\n\t\tif(abs(eyeCenterLeftX[indexL] - ref_left_eye_center_x) < 2.5/widthScaling):\n\t\t\t left_eye_center_x = eyeCenterLeftX[indexL]\n\t\telse:\n\t\t\tleft_eye_center_x = ref_left_eye_center_x\n\t\t\t\n\t\tif(abs(eyeCenterLeftY[indexL] - ref_left_eye_center_y) < 2.5/heightScaling):\n\t\t\t left_eye_center_y = eyeCenterLeftY[indexL]\n\t\telse:\n\t\t\tleft_eye_center_y = ref_left_eye_center_y\n\t\t\n\telse:\n\t\tleft_eye_center_x = ref_left_eye_center_x\n\t\tleft_eye_center_y = ref_right_eye_center_y\n\t\t\n\n\n\tif(len(eyeCenterRightX) > 0):\n\t\t# obtain main right-eye-center through the largest eye-box area criteria\n\t\tminimumR = eyeCenterRightArea[0]\n\t\tfor i in range(0,len(eyeCenterRightArea)):\n\t\t\tif eyeCenterRightArea[i] >= minimumR:\n\t\t\t\tindexR = i\n\t\t\t\tminimumR = eyeCenterRightArea[i]\n\n\t\t# compare obtained haar cordinates to CNN coordinates\n\t\tif(abs(eyeCenterRightX[indexR] - ref_right_eye_center_x) < 2.5/widthScaling):\n\t\t\t right_eye_center_x = eyeCenterRightX[indexR]\n\t\telse:\n\t\t\tright_eye_center_x = ref_right_eye_center_x\n\t\t\t\n\t\tif(abs(eyeCenterRightY[indexR] - ref_right_eye_center_y) < 2.5/heightScaling):\n\t\t\t right_eye_center_y = eyeCenterRightY[indexR]\n\t\telse:\n\t\t\tright_eye_center_y = ref_right_eye_center_y\n\t\t\n\telse:\n\t\tright_eye_center_x = ref_right_eye_center_x\n\t\tright_eye_center_y = ref_right_eye_center_y\n\t\n\n\t# print ref_left_eye_center_x,ref_left_eye_center_y,ref_right_eye_center_x,ref_right_eye_center_y\n\tprint (left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y)\t\n\treturn left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y\n" }, { "alpha_fraction": 0.6797698736190796, "alphanum_fraction": 0.7066155076026917, "avg_line_length": 23.85714340209961, "blob_id": "f49b632632e9ef61e79d7595be8b2b9d0a743918", "content_id": "ec9972c8cd951a719b42f57a439bb698d2b477f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "permissive", "max_line_length": 72, "num_lines": 42, "path": "/src/app.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import request\nimport base64\nimport siameseTrain as ST1\nimport siameseTest as ST2\nimport siameseRecognizer as SR\nimport json\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n return \"Connection Successful\"\n\[email protected](\"/upload\", methods=['POST'])\ndef upload():\n\tbase64Data = request.form.get('imageData')\n\tempCount = request.form.get('empCount')\n\twith open(\"data/library/train2/\"+ str(empCount) + \".jpeg\", \"wb\") as fh:\n\t\tfh.write(base64.b64decode(base64Data))\n\t\n\treturn \"Data Received\"\n\t\[email protected](\"/train\",methods=['GET'])\ndef train():\n\tST1.calculateTrainEmbeddings();\n\treturn \"Repository Embeddings Generated\"\n\[email protected](\"/verify\", methods=['POST'])\ndef verify():\n\tbase64Data = request.form.get('imageData')\n\twith open(\"data/library/test2/\" + \"test.jpeg\", \"wb\") as fh:\n\t\tfh.write(base64.b64decode(base64Data))\n\tST2.calculateTestEmbeddings();\n\tresponse = SR.calculateNorm();\n\tresponse = json.dumps(response)\n\n\treturn str(response)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True)" }, { "alpha_fraction": 0.6318727135658264, "alphanum_fraction": 0.6600614190101624, "avg_line_length": 29.632478713989258, "blob_id": "39e300052f9771624427acf84b3d22ccc1f83342", "content_id": "9544902477f97c1b432efa14c1855f237977b11c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3583, "license_type": "permissive", "max_line_length": 111, "num_lines": 117, "path": "/src/detection.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef convertToRGB(img): \n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\ndef convertToGRAY(img): \n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndef detect(model,weights,image,isPath):\n\n\t# specify locations of the \tmodel and its weights\n\targs = {}\n\targs[\"model\"] = model\t\t\t\t\t# model-definition\n\targs[\"weights\"] = weights\t\t\t\t# pre-trained weights\n\targs[\"image\"] = image\t\t\t\t\t# images are loaded as 3D matrix - (h x w x c)\n\targs[\"confidence\"] = 0.75\t\t\t\t# when confidence>value then it is a face\n\n\t# load the caffe model \n\tprint (\"[INFO] Loading model\")\n\t# net = cnn used to detect faces\n\tnet = cv2.dnn.readNetFromCaffe(args[\"model\"], args[\"weights\"])\n\t \n\t# load the input image\n\tif(isPath==True): \n\t\timage = cv2.imread(args[\"image\"])\n\telse:\n\t\timage = image\n\n\t# print len(image)\t\t\t\t\t\t\t# height of the image\n\t# print len(image[0])\t\t\t\t\t\t# width of the image\n\t# print len(image[0][0])\t\t\t\t\t# no of color-channels \n\t# print image.shape\t\t\t\t\t\t\t# stores h,w,c values\n\t(h, w) = image.shape[:2]\n\n\t# construct an input blob for the image\n\t# by resizing to a fixed 300x300 pixels and then normalizing it \n\t# along with doing a mean subtraction\n\tblob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)),\n\t 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n\tprint (\"[INFO] Computing face detections...\")\n\tnet.setInput(blob)\n\tdetections = net.forward()\n\n\tcount = 0\t\t\t\t\t\t\t\t\t# count of no of faces detected\n\tfaces = {}\t\t\t\t\t\t\t\t\t# stores the faces rectangles co-ordinates\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the confidence (i.e., probability) associated with the\n\t\t# prediction\n\t\tconfidence = detections[0, 0, i, 2]\n\t\t\n\t\t# filter out weak detections by ensuring the `confidence` is\n\t\t# greater than the minimum confidence\n\t\tif confidence > args[\"confidence\"]:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for the\n\t\t\t# face\n\t\t\tfaces[i] = []\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\t# extracting integral values\n\t\t\t# adding area details along with co-ordinate values\n\t\t\tif(startX < 0):\n\t\t\t\tstartX = 0\n\t\t\tif(startY < 0):\n\t\t\t\tstartY = 0\n\t\t\tif(endX < 0):\n\t\t\t\tendX = 0\n\t\t\tif(endY < 0):\n\t\t\t\tendY = 0\n\t\t\tif(startX > w):\n\t\t\t\tstartX = w\n\t\t\tif(startY > h):\n\t\t\t\tstartY = h\n\t\t\tif(endX > w):\n\t\t\t\tendX = w\n\t\t\tif(endY > h):\n\t\t\t\tendY = h\n\t\t\tfaces[i].extend([startX,endX,startY,endY,((endX-startX)*(endY-startY))])\n\n\t\t\t# plotting the face rectangle\t\n\t\t\tx = []\n\t\t\ty = []\n\n\t\t\t# plot the box\n\t\t\tx.extend([startX,endX,endX,startX,startX])\n\t\t\ty.extend([startY,startY,endY,endY,startY])\n\t\t\tplt.plot(x,y)\n\t\t\tcount = count + 1\n\n\tprint (\"Faces Detected = \" + str(count))\n\t\n\tlargestFaceIndex = -1\n\tlargestAreaYet = 0\n\t\n\tfor i in range(0,len(faces)):\n\t\tif(faces[i][4]>largestAreaYet):\n\t\t\tlargestFaceIndex = i\n\t\t\tlargestAreaYet = faces[i][4]\n\tif isPath == True:\t\t\n\t\treturn convertToRGB(image),convertToGRAY(image),faces[largestFaceIndex]\n\telse:\n\t\treturn image,convertToGRAY(image),faces[largestFaceIndex]\n\nif __name__ == '__main__':\n\tmodel = \"src/deploy.prototxt.txt\"\t\t\t\t\t\t\t# model-definition\n\tweights = \"src/res10_300x300_ssd_iter_140000.caffemodel\"\t# pre-trained weights\n\timage = \"data/library/test2/test.jpeg\"\t\t\t\t\t\t\t\t# image name reqd. images are loaded as 3D matrix - (h x w x c)\t\n\tisPath = True\n\tprint (\"Hello\")\n\tplt.subplot(2,1,1)\n\tcolorImage, grayImage, mainFaceBox = detect(model,weights,image,isPath)\n\tplt.imshow(colorImage)\n\tplt.subplot(2,1,2)\n\tmainFaceGray = grayImage[mainFaceBox[2]:mainFaceBox[3], mainFaceBox[0]:mainFaceBox[1]]\n\tplt.imshow(mainFaceGray)\n\tplt.show()" }, { "alpha_fraction": 0.7323905825614929, "alphanum_fraction": 0.7553361654281616, "avg_line_length": 37.24489974975586, "blob_id": "92317cac6dda029fc43b199051d7d187b8bb84bc", "content_id": "74d9b91c6c0f2cd6589d67d53d409f5feea3b9bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3748, "license_type": "permissive", "max_line_length": 128, "num_lines": 98, "path": "/src/pre_processing2.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "import detection\nimport matplotlib.pyplot as plt\nimport cv2\nimport alignment\n\ndef detectMainFace(imageName,isPath):\t\n\tmodel = \"src/deploy.prototxt.txt\"\t\t\t\t\t\t\t# model-definition\n\tweights = \"src/res10_300x300_ssd_iter_140000.caffemodel\"\t# pre-trained weights\n\timage = imageName\t\t\t\t\t\t\t\t\t\t\t# image name reqd. images are loaded as 3D matrix - (h x w x c)\t\n\n\t# send for face detection\n\tcolorImage, grayImage, mainFaceBox = detection.detect(model,weights,image,isPath)\n\t\n\t# crop the misaligned face from the whole image\n\tmainFaceGray = grayImage[mainFaceBox[2]:mainFaceBox[3], mainFaceBox[0]:mainFaceBox[1]]\n\tmainFaceColor = colorImage[mainFaceBox[2]:mainFaceBox[3], mainFaceBox[0]:mainFaceBox[1]]\n\n\treturn colorImage, mainFaceColor, mainFaceGray, mainFaceBox\n\ndef alignImage(colorImage,mainFaceGray,mainFaceBox):\n\t# obtain eye centers\n\tleft_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y = alignment.detectEyeCenters(mainFaceGray)\n\t\n\t# obtain affine transformation values\n\tcenter, angle, scale = alignment.rotate(mainFaceGray,left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y)\n\t\n\t# update co-ordinates according to colorImage the orignal iage\n\tleft_eye_center_x = left_eye_center_x + mainFaceBox[0]\n\tright_eye_center_x = right_eye_center_x + mainFaceBox[0]\n\tleft_eye_center_y = left_eye_center_y + mainFaceBox[2]\n\tright_eye_center_y = right_eye_center_y + mainFaceBox[2]\n\tcenter = (center[0]+mainFaceBox[0],center[1]+mainFaceBox[2])\n\n\t# perform affine transformation\n\tM = cv2.getRotationMatrix2D(center, angle, scale)\n\talignedImage = cv2.warpAffine(colorImage,M,(colorImage.shape[1],colorImage.shape[0]),flags=cv2.INTER_CUBIC)\n\t\n\treturn alignedImage, left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y\n\n# return the face in gray scale\ndef getFaceGray(imagePath):\n\t# detect the misaligned largest face in gray\n\tcolorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(imagePath,True)\n\t\n\t# straighten the actual image\n\talignedImage, e1x, e1y, e2x, e2y = alignImage(colorImage,mainFaceGray,mainFaceBox)\n\t\n\t# detect the aligned largest face in gray\n\tcolorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(alignedImage,False)\n\t\n\t# apply denoising\n\tmainFaceGray = cv2.fastNlMeansDenoising(mainFaceGray)\t\t\t\t\t\t\t\t\t\t# denoising\n\t\t\n\treturn mainFaceGray\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns a grayscaled,aligned,(256,256) face\n\n# return the face in RGB\ndef getFaceColor(imagePath):\n\t# detect the misaligned largest face in gray\n\tcolorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(imagePath,True)\n\t\n\t# straighten the actual image\n\talignedImage, e1x, e1y, e2x, e2y = alignImage(colorImage,mainFaceGray,mainFaceBox)\n\t\n\t# detect the aligned largest face in gray\n\tcolorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(alignedImage,False)\n\t\n\t# apply denoising\n\tmainFaceColor = cv2.fastNlMeansDenoisingColored(mainFaceColor)\t\t\t\t\t\t\t\t# denoising\n\t\n\treturn mainFaceColor\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns a grayscaled,aligned,(256,256) face\n\nif __name__ == '__main__':\n\t\n\tplt.subplot(2,2,1)\n\tcolorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace('data/library/test2/test.jpeg',True)\n\tplt.imshow(colorImage)\n\n\tplt.subplot(2,2,2)\n\tplt.imshow(mainFaceColor)\n\t\n\talignedImage, e1x, e1y, e2x, e2y = alignImage(colorImage,mainFaceGray,mainFaceBox)\n\tX = [e1x,e2x]\n\tY = [e1y,e2y]\n\tplt.subplot(2,2,3)\n\tplt.imshow(alignedImage)\n\tplt.plot(X,Y,'-D',markersize=3)\n\n\tplt.subplot(2,2,4)\n\t# plt.imshow(alignedImage,cmap='gray')\n\t# plt.show()\n\t\n\tcolorImage, mainFaceColor, mainFaceGray, mainFaceBox = detectMainFace(alignedImage,False)\n\tplt.imshow(mainFaceColor)\n\tplt.show()\n\t\n\n\t# plt.imshow(getFace('data/library/train/IMG_0007.JPG'),cmap='gray')\n\t# plt.show()\n" }, { "alpha_fraction": 0.6178650856018066, "alphanum_fraction": 0.6578947305679321, "avg_line_length": 35.9315071105957, "blob_id": "7d98b93066d863b2698d5008ed6b069b472dcf3a", "content_id": "57bd99895d93c59e402aeb1cfea286007999cd47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2698, "license_type": "permissive", "max_line_length": 98, "num_lines": 73, "path": "/src/siameseTest.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "# The pre-trained model was provided by https://github.com/iwantooxxoox/Keras-OpenFace #\n\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport glob\nimport pre_processing2 as pre\nimport matplotlib.pyplot as plt\n\ndef load_graph(frozen_graph_filename):\n with tf.gfile.GFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(\n graph_def, \n input_map=None, \n return_elements=None, \n producer_op_list=None\n )\n \n return graph\n\n\ndef calculateTestEmbeddings():\n graph = load_graph('src/20180402-114759/20180402-114759.pb')\n \n faceList = []\n for imagePath in glob.glob('data/library/test2/*'):\n # loading cropped,RGBscale,aligned (160,160)sized faces as reqd by FaceNet\n faceList.append(np.expand_dims(cv2.resize(pre.getFaceColor(imagePath),(160,160)), axis=0))\n \n with tf.Session(graph=graph) as sess:\n images_placeholder = graph.get_tensor_by_name(\"import/input:0\")\n embeddings = graph.get_tensor_by_name(\"import/embeddings:0\")\n phase_train_placeholder = graph.get_tensor_by_name(\"import/phase_train:0\")\n \n faceListInput = np.concatenate(faceList, axis=0)\n #normalizing the input\n faceListInput = np.float32(faceListInput)/255.0\n \n feedDict = {phase_train_placeholder: False, images_placeholder: faceListInput}\n values = sess.run(embeddings,feedDict)\n\n # save embedding values \n np.save('src/cstmrEmbeddings',values)\n\n tf.reset_default_graph(); \n\nif __name__ == '__main__':\n\n graph = load_graph('src/20180402-114759/20180402-114759.pb')\n faceList = []\n for imagePath in glob.glob('data/library/test2/*'):\n # loading cropped,RGBscale,aligned (160,160)sized faces as reqd by FaceNet\n faceList.append(np.expand_dims(cv2.resize(pre.getFaceColor(imagePath),(160,160)), axis=0))\n \n with tf.Session(graph=graph) as sess:\n images_placeholder = graph.get_tensor_by_name(\"import/input:0\")\n embeddings = graph.get_tensor_by_name(\"import/embeddings:0\")\n phase_train_placeholder = graph.get_tensor_by_name(\"import/phase_train:0\")\n \n faceListInput = np.concatenate(faceList, axis=0)\n #normalizing the input\n faceListInput = np.float32(faceListInput)/255.0\n print (faceListInput.shape)\n\n feedDict = {phase_train_placeholder: False, images_placeholder: faceListInput}\n values = sess.run(embeddings,feedDict)\n\n # save embedding values \n np.save('src/cstmrEmbeddings',values)\n\n\n" }, { "alpha_fraction": 0.6773518919944763, "alphanum_fraction": 0.7031359076499939, "avg_line_length": 22.933332443237305, "blob_id": "4c30365d6cfd4fa9ecafb2a0576e6f31200ca007", "content_id": "acc1ff6ae758f2464ded819192ac626ed918e21b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1435, "license_type": "permissive", "max_line_length": 81, "num_lines": 60, "path": "/src/siameseRecognizer.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "import numpy as np\nimport glob\nimport cv2\nimport pre_processing2 as pre\nimport matplotlib.pyplot as plt\n\ndef calculateNorm():\n\n\tempEmbeddings = np.load('src/empEmbeddings.npy')\n\t\n\tcstmrEmbeddings = np.load('src/cstmrEmbeddings.npy')\n\t\n\tfaceListTrain = []\n\tfaceListTest = []\n\n\tanswer = {}\n\tnorm = []\n\tfor i in range(0,len(empEmbeddings)):\n\t\tfor j in range(0,len(cstmrEmbeddings)):\n\t\t\tnorm.append(np.float64(np.linalg.norm(empEmbeddings[i] - cstmrEmbeddings[j])))\n\tflag = \"NO\"\n\tfor e in norm:\n\t\tif(e<0.9):\n\t\t\tflag = 'YES'\n\t\t\tbreak\n\n\tanswer['norm'] = norm\n\tanswer['result'] = flag;\n\treturn answer\n\nif __name__ == '__main__':\n\tempEmbeddings = np.load('src/empEmbeddings.npy')\n\tprint (empEmbeddings.shape)\n\n\tcstmrEmbeddings = np.load('src/cstmrEmbeddings.npy')\n\tprint (cstmrEmbeddings.shape)\n\n\tfaceListTrain = []\n\tfaceListTest = []\n\n\tfor imagePath in glob.glob('data/library/train2/*'):\n\t\tfaceListTrain.append(cv2.resize(pre.getFaceColor(imagePath),(160,160)))\n\n\tfor imagePath in glob.glob('data/library/test2/*'):\n\t\tfaceListTest.append(cv2.resize(pre.getFaceColor(imagePath),(160,160)))\n\n\tplt.subplot2grid((1,4),(0,0))\n\tplt.imshow(faceListTrain[0])\n\n\tfor i in range(0,len(empEmbeddings)):\n\t\tfor j in range(0,len(cstmrEmbeddings)):\n\t\t\tplt.subplot2grid((1,4),(0,j+1))\n\t\t\tplt.imshow(faceListTest[j])\n\t\t\tplt.title(np.linalg.norm(empEmbeddings[i] - cstmrEmbeddings[j]))\n\n\n\n\tplt.tight_layout()\n\tplt.suptitle('ONE SHOT LEARNING TEST')\n\tplt.show()" }, { "alpha_fraction": 0.6519736647605896, "alphanum_fraction": 0.6980262994766235, "avg_line_length": 24.33333396911621, "blob_id": "2d663b62c4523d19a96bb81cfbf0b7c4d9ab11c0", "content_id": "aef33d594aecdb8203f9c309c71b0d0f9aa9eeb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "permissive", "max_line_length": 65, "num_lines": 60, "path": "/src/recognition.py", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "import glob\nimport numpy as np\nimport pre_processing2 as pre\nimport cv2\nimport matplotlib.pyplot as plt\n\nimages = []\nfor imagePath in glob.glob('data/library/train2/*'):\n\timages.append(imagePath)\n\nfaceList = []\n# labelList = [0,0,0,0,0,0,0,0,0,0]\nlabelList = [0]\n\nindex = 0\n\nfor path in images:\n\ttemp = pre.getFaceGray(path)\n\ttemp = cv2.resize(temp,(369,512))\n\tfaceList.append(temp)\t\t\t\n\tprint (\"[INFO] Image Loaded: \" + str(index+1))\n\tprint (faceList[-1].shape)\n\t# plt.subplot2grid((5,3),(index%5,index/5))\n\tplt.subplot2grid((1,4),(0,0))\n\tplt.imshow(faceList[-1])\n\tindex = index + 1\n\n\nprint (labelList)\nfaceRecognizer = cv2.face.LBPHFaceRecognizer_create(1,8,8,8,123)\nfaceRecognizer.train(faceList,np.array(labelList))\n\nimagesTest = []\nfor imagePath in glob.glob('data/library/test2/*'):\n\timagesTest.append(imagePath)\n\nprint (\"[INFO] ========TESTING=======\")\nfaceListTest = []\nprediction = {}\nindex = 0\nfor path in imagesTest:\n\ttestSample = pre.getFaceGray(path)\t\t\t#np.array.shape = (256,256)\n\ttestSample = cv2.resize(testSample,(369,512))\n\tprint (\"[INFO] Test Image Loaded: \" + str(index+1))\n\tprediction[index] = []\n\tpredictedLabel, confidence = faceRecognizer.predict(testSample)\n\t\n\t# plt.subplot2grid((5,3),(index,2))\n\tplt.subplot2grid((1,4),(0,index+1))\n\tplt.imshow(testSample,cmap='gray')\n\tplt.title(str(predictedLabel) + \" : \" + str(confidence))\n\t\n\tprediction[index].extend([predictedLabel,confidence])\t\n\tindex = index + 1\n\t\n\nplt.tight_layout()\nplt.suptitle('ONE SHOT LEARNING TEST')\nplt.show()\nprint (prediction)\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 28, "blob_id": "0f24e8c692a9277aafed1e3030f4ca0cbcfc5718", "content_id": "589a24ba4e8ac8e7a0857da92d72c02e3159a4df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "permissive", "max_line_length": 28, "num_lines": 1, "path": "/README.md", "repo_name": "kapilkalra04/face-off-demo-python-flask", "src_encoding": "UTF-8", "text": "# face-off-demo-python-flask" } ]
8
acheng6845/PuzzleSolver
https://github.com/acheng6845/PuzzleSolver
9406b951137f213cb9eb13ceb7035f96e10d6903
abebea81e49351daeda98d446ceb060c0beb4e0f
901a13b996ddf22cea8ce0af687bf464d3707e0b
refs/heads/master
"2021-09-02T07:43:00.326951"
"2017-12-31T15:44:04"
"2017-12-31T15:44:04"
115,871,564
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6023392081260681, "alphanum_fraction": 0.6065163016319275, "avg_line_length": 31.37837791442871, "blob_id": "f23ddb3f4d248b7a9aa0acf8ea13c01829de652d", "content_id": "b45f8e92eb724762c2e937e70e4cc52c7fbb92d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1197, "license_type": "no_license", "max_line_length": 88, "num_lines": 37, "path": "/PADCompleter.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\n\nclass PADCompleter(QCompleter):\n def __init__(self):\n super().__init__()\n self.prefix = ''\n self.model = None\n\n def _set_model_(self, model):\n self.model = model\n super().setModel(self.model)\n\n def _update_model_(self):\n prefix = self.prefix\n\n class InnerProxyModel(QSortFilterProxyModel):\n def filterAcceptsRow(self, row, parent):\n index = self.sourceModel().index(row, 0, parent)\n search_string = prefix.lower()\n model_string = self.sourceModel().data(index, Qt.DisplayRole).lower()\n #print(search_string, 'in', model_string, search_string in model_string)\n return search_string in model_string\n\n proxy_model = InnerProxyModel()\n proxy_model.setSourceModel(self.model)\n self.setModel(proxy_model)\n #print('match :', proxy_model.rowCount())\n\n def splitPath(self, path):\n self.prefix = str(path)\n self._update_model_()\n return self.sourceModel().data()" }, { "alpha_fraction": 0.5897377133369446, "alphanum_fraction": 0.6005200147628784, "avg_line_length": 47.794776916503906, "blob_id": "4283c2ec4d18c0253a11c17bd1819684e438b1ee", "content_id": "a11c6e27b2f2b57a6090ef588443c984606a8d12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26154, "license_type": "no_license", "max_line_length": 117, "num_lines": 536, "path": "/Calculator_Screen.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\n# Class Description:\n# Create framework for the split screens used in PAD_GUI\n# import necessary files\nimport os\nimport json\nfrom functools import partial\nfrom PyQt5.QtWidgets import (QLabel, QWidget, QHBoxLayout,\n QFrame, QSplitter, QStyleFactory,\n QGridLayout, QLineEdit, QPushButton,\n QVBoxLayout, QCompleter, QComboBox,\n QScrollArea, QToolTip)\nfrom PyQt5.QtGui import QPixmap, QColor, QFont\nfrom PyQt5.QtCore import Qt, QStringListModel\nfrom PAD_Monster import PADMonster\nfrom PAD_Team import PADTeam\n\nclass CalculatorScreen(QHBoxLayout):\n def __init__(self, gui):\n super().__init__()\n\n # 0 = lead1, 1 = sub1,..., 5 = lead2\n self.team = [PADMonster() for x in range(6)]\n self.pad_team = PADTeam(self.team)\n # keeps old team stats before modification from leader multipliers\n self.team_base = [PADMonster() for x in range(6)]\n\n # open monsters.txt and load it into a python object using json\n # self.json_file = requests.get('https://padherder.com/api/monsters')\n self.json_file = open(os.path.join('.\\monsters.txt'), 'r')\n self.json_monsters = json.loads(self.json_file.read())\n # print(self.json_monsters[0][\"name\"])\n\n self.completer_string_list_model = QStringListModel()\n array_of_monster_names = []\n for x in range(len(self.json_monsters)):\n array_of_monster_names.append(self.json_monsters[x][\"name\"])\n self.completer_string_list_model.setStringList(array_of_monster_names)\n\n # checks if the modified button has been pressed so other functions can know which stat to display\n self.is_pressed = False\n\n QToolTip.setFont(QFont('SansSerif', 10))\n\n self.init_screen(gui)\n\n def init_screen(self, gui):\n\n # add things to top of the screen here (Monitor section)!\n\n # Create an overarching top widget/layout\n\n supreme_top_box = QWidget()\n supreme_top_box_layout = QVBoxLayout()\n supreme_top_box.setLayout(supreme_top_box_layout)\n\n # Monitor section will have labels inside of a grid layout\n top_box = QWidget()\n grid = QGridLayout()\n top_box.setLayout(grid)\n supreme_top_box_layout.addWidget(top_box)\n\n # Creates lists of labels, initially having only static labels and having\n # the tangible labels substituted with ''\n static_labels = ['', '', '', '', '', '', '', '',\n '', 'Lead 1', 'Sub 1 ', 'Sub 2 ', 'Sub 3 ', 'Sub 4 ', 'Lead 2', 'Team Totals',\n 'Type:', '', '', '', '', '', '', '',\n 'HP:', 0, 0, 0, 0, 0, 0, 0,\n 'Atk:', 0, 0, 0, 0, 0, 0, 0,\n 'Pronged Atk:', 0, 0, 0, 0, 0, 0, 0,\n 'RCV:', 0, 0, 0, 0, 0, 0, 0,\n 'Awakenings:', '', '', '', '', '', '', '']\n\n self.display_labels = [QLabel(gui) for x in range(len(static_labels))]\n\n for s_label, d_label in zip(static_labels, self.display_labels):\n\n if s_label == '':\n continue\n d_label.setText(str(s_label))\n\n positions = [(i, j) for i in range(8) for j in range(8)]\n\n for position, d_label in zip(positions, self.display_labels):\n # why *position? because the array is [(i,j), (i,j),...,(i,j)]\n grid.addWidget(d_label, *position)\n grid.setAlignment(d_label, Qt.AlignHCenter)\n\n self.leader_skills_labels = [QLabel(gui) for x in range(2)]\n for x in range(2):\n self.leader_skills_labels[x].setText('Leader Skill '+str(x+1)+': ')\n supreme_top_box_layout.addWidget(self.leader_skills_labels[x])\n\n # Create another row of labels for Awoken Skills Image Lists\n\n # Create another row of labels to show the Leader Skill Multipliers\n\n ########################################################################\n\n # add things to bottom of the screen here (Input section)!\n\n # Input section will be split in two: have LineEdits in a grid layout and then PushButtons in a separate grid\n # layout\n bottom_box = QWidget()\n grid2 = QGridLayout()\n bottom_box.setLayout(grid2)\n\n bottom_labels_text = ['Leader 1', 'Sub 1', 'Sub 2', 'Sub 3', 'Sub 4', 'Leader 2']\n bottom_labels = [QLabel(gui) for x in range(6)]\n instruction_labels_text = ['Please enter the name here:', 'Enter level here:', 'Enter pluses here:']\n instruction_labels = [QLabel(gui) for x in range(3)]\n self.line_edits = [QLineEdit(gui) for x in range(6)]\n line_edit_completer = QCompleter()\n line_edit_completer.setCaseSensitivity(Qt.CaseInsensitive)\n line_edit_completer.setFilterMode(Qt.MatchContains)\n line_edit_completer.setModel(self.completer_string_list_model)\n\n # Combo Boxes for Levels and Pluses\n level_boxes = [QComboBox(gui) for x in range(6)]\n self.plus_boxes_types = [QComboBox(gui) for x in range(6)]\n self.plus_boxes_values = [QComboBox(gui) for x in range(6)]\n for x in range(6):\n for n in range(0,100):\n if n != 0 and n <= self.team[x].max_level:\n level_boxes[x].addItem(str(n))\n self.plus_boxes_values[x].addItem(str(n))\n self.plus_boxes_types[x].addItem('hp')\n self.plus_boxes_types[x].addItem('atk')\n self.plus_boxes_types[x].addItem('rcv')\n self.plus_boxes_values[x].hide()\n\n # add the labels and line_edits to the bottom grid\n for x in range(6):\n bottom_labels[x].setText(bottom_labels_text[x])\n bottom_labels[x].adjustSize()\n grid2.addWidget(bottom_labels[x], *(x+1, 0))\n grid2.addWidget(self.line_edits[x], *(x+1, 1))\n grid2.addWidget(level_boxes[x], *(x+1, 2))\n grid2.addWidget(self.plus_boxes_types[x], *(x+1, 3))\n grid2.addWidget(self.plus_boxes_values[x], *(x+1, 3))\n self.line_edits[x].textChanged[str].connect(partial(self._on_changed_, x))\n self.line_edits[x].setCompleter(line_edit_completer)\n self.line_edits[x].setMaxLength(50)\n level_boxes[x].activated[str].connect(partial(self._on_level_activated_, x))\n self.plus_boxes_types[x].activated[str].connect(partial(self._on_plus_type_activated_, x))\n for x in range(3):\n instruction_labels[x].setText(instruction_labels_text[x])\n instruction_labels[x].adjustSize()\n grid2.addWidget(instruction_labels[x], *(0, x+1))\n\n ###########################################################################\n\n # create the button widgets in a separate widget below bottom_box\n below_bottom_box = QWidget()\n grid3 = QGridLayout()\n below_bottom_box.setLayout(grid3)\n\n # create a set of buttons below the line_edits:\n # White(Base) Red Blue Green Yellow Purple\n buttons = []\n button_labels = ['Fire', 'Water', 'Wood', 'Light', 'Dark', 'Base']\n button_colors = ['red', 'lightskyblue', 'green', 'goldenrod', 'mediumpurple', 'white']\n for x in range(6):\n buttons.append(QPushButton(button_labels[x], gui))\n buttons[x].clicked.connect(partial(self._handle_button_, x))\n buttons[x].setStyleSheet('QPushButton { background-color : %s }' % button_colors[x])\n grid3.addWidget(buttons[x], *(0, x))\n\n # create a QHBoxLayout widget that holds the page turners and toggle\n page_turner = QWidget()\n page_turner_layout = QHBoxLayout()\n page_turner.setLayout(page_turner_layout)\n\n # create the page turner and toggle widgets\n page_turner_layout.addStretch()\n self.toggle_button = QPushButton('Toggle On Modified Stats', gui)\n self.toggle_button.setCheckable(True)\n self.toggle_button.clicked[bool].connect(self._handle_toggle_button_)\n page_turner_layout.addWidget(self.toggle_button)\n page_turner_layout.addStretch()\n\n # Create overarching bottom widget\n supreme_bottom_box = QWidget()\n supreme_bottom_box_layout = QVBoxLayout()\n supreme_bottom_box.setLayout(supreme_bottom_box_layout)\n button_label = QLabel('Select from below the attribute you would like to display.')\n supreme_bottom_box_layout.setAlignment(button_label, Qt.AlignHCenter)\n supreme_bottom_box_layout.addWidget(bottom_box)\n supreme_bottom_box_layout.addWidget(button_label)\n supreme_bottom_box_layout.addWidget(below_bottom_box)\n supreme_bottom_box_layout.addWidget(page_turner)\n\n # Add the two screens into a split screen\n splitter = QSplitter(Qt.Vertical)\n splitter.addWidget(supreme_top_box)\n splitter.addWidget(supreme_bottom_box)\n\n # Add the split screen to our main screen\n self.addWidget(splitter)\n\n def _create_monster_(self, index, dict_index, name):\n\n \"\"\"\n When a valid name has been entered into the line edits, create a PADMonster Class\n using the values stored in the json dictionary and save the PADMonster to the appropriate\n index in the team array and PADTeam Class subsequently.\n :param index: 0 = lead 1, 1 = sub 1, 2 = sub 2, 3 = sub 3, 4 = sub 4, 5 = lead 2\n :param dict_index: the index in the json dictionary containing the monster\n :param name: the monster's name\n \"\"\"\n self.team[index] = PADMonster()\n self.team_base[index] = PADMonster()\n\n hp_max = self.json_monsters[dict_index][\"hp_max\"]\n atk_max = self.json_monsters[dict_index][\"atk_max\"]\n rcv_max = self.json_monsters[dict_index][\"rcv_max\"]\n attr1 = self.json_monsters[dict_index][\"element\"]\n attr2 = self.json_monsters[dict_index][\"element2\"]\n type1 = self.json_monsters[dict_index][\"type\"]\n type2 = self.json_monsters[dict_index][\"type2\"]\n image60_size = self.json_monsters[dict_index][\"image60_size\"]\n image60_href = self.json_monsters[dict_index][\"image60_href\"]\n awakenings = self.json_monsters[dict_index][\"awoken_skills\"]\n leader_skill_name = self.json_monsters[dict_index][\"leader_skill\"]\n max_level = self.json_monsters[dict_index][\"max_level\"]\n hp_min = self.json_monsters[dict_index][\"hp_min\"]\n atk_min = self.json_monsters[dict_index][\"atk_min\"]\n rcv_min = self.json_monsters[dict_index][\"rcv_min\"]\n hp_scale = self.json_monsters[dict_index][\"hp_scale\"]\n atk_scale = self.json_monsters[dict_index][\"atk_scale\"]\n rcv_scale = self.json_monsters[dict_index][\"rcv_scale\"]\n # use PAD_Monster's function to set our monster's stats\n self.team[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1,\n type2, image60_size, image60_href, awakenings,\n leader_skill_name, max_level, hp_min, hp_scale,\n atk_min, atk_scale, rcv_min, rcv_scale)\n # create a PADTeam Class according to our team of Six PADMonster Classes\n self.pad_team = PADTeam(self.team)\n # set our labels according to our monsters\n self._set_labels_(self.team[index], index)\n\n # save our team for future modifications:\n self.team_base[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1,\n type2, image60_size, image60_href, awakenings,\n leader_skill_name, max_level, hp_min, hp_scale,\n atk_min, atk_scale, rcv_min, rcv_scale)\n\n def _set_labels_(self, monster, index):\n \"\"\"\n Set the labels according to the values in the indexed PADMonster Class\n :param monster: the PADMonster associated with the index\n :param index: the index associated with the PADMonster [0-5]\n \"\"\"\n # extract and display image\n self.display_labels[index + 1].setPixmap(QPixmap(os.path.join('images') + '/' + monster.name + '.png'))\n # display name\n font = QFont()\n font.setPointSize(5)\n type_text = monster.type_main_name+'/'+monster.type_sub_name\n self.display_labels[index + 17].setText(type_text)\n self.display_labels[index + 17].setFont(font)\n self.display_labels[index + 17].adjustSize()\n self.display_labels[index + 17].setToolTip(type_text)\n # display hp\n hp = monster.hp\n # if modified by leader skills button has been pressed, multiply monster's stat by its\n # respective index in the stats modified variable of the PADTeam Class\n if self.is_pressed:\n hp *= self.pad_team.stats_modified_by[index][0]\n # if plus values have been set, display how many\n if monster.hp_plus > 0:\n self.display_labels[index + 25].setText(str(round(hp)) + ' (+' + str(monster.hp_plus) + ')')\n else:\n self.display_labels[index + 25].setText(str(round(hp)))\n self.display_labels[index + 25].adjustSize()\n # display attack and pronged attack of main element\n self._set_attack_labels_(index, 5, monster.atk[monster.attr_main], monster.pronged_atk[monster.attr_main],\n monster.base_atk_plus)\n # display rcv\n rcv = monster.rcv\n # if modified by leader skills button has been pressed, multiply monster's stat by its\n # respective index in the stats modified variable of the PADTeam Class\n if self.is_pressed:\n rcv *= self.pad_team.stats_modified_by[index][2]\n # if plus values have been set, display how many\n if monster.rcv_plus > 0:\n self.display_labels[index + 49].setText(str(round(rcv)) + ' (+' + str(monster.rcv_plus) + ')')\n else:\n self.display_labels[index + 49].setText(str(round(rcv)))\n self.display_labels[index + 49].adjustSize()\n # display awakenings\n awakenings_text = ''\n awakenings_font = QFont()\n awakenings_font.setPointSize(6)\n for x in range(len(monster.awakenings)):\n if monster.awakenings[x][2] > 0:\n awakenings_text += monster.awakenings[x][0]+': '+str(monster.awakenings[x][2])+'\\n'\n # set awakenings string to a tooltip since it can't fit into the grid\n self.display_labels[index + 57].setText('Hover Me!')\n self.display_labels[index + 57].setFont(awakenings_font)\n self.display_labels[index + 57].adjustSize()\n self.display_labels[index + 57].setToolTip(awakenings_text)\n # calculate and change our display labels for team total values with each change in monster\n self._set_team_labels_()\n # if the monster is in the first or last index, it's considered the leader and its leader skill name\n # and effect are displayed accordingly.\n if index == 0:\n text = 'Leader Skill 1: '+self.team[0].leader_skill_name+' > '+self.team[0].leader_skill_desc\n # if the string is too long, splice it up\n if len(text) > 50:\n divider = len(text)//2\n # separate the string at a part that is a whitespace\n while text[divider] != ' ':\n divider += 1\n final_text = text[:divider]+'\\n'+text[divider:]\n else:\n final_text = text\n self.leader_skills_labels[0].setText(final_text)\n\n elif index == 5:\n text = 'Leader Skill 1: '+self.team[5].leader_skill_name+' > '+self.team[5].leader_skill_desc\n # if the string is too long, splice it up\n if len(text) > 50:\n divider = len(text)//2\n # separate the string at a part that is a whitespace\n while text[divider] != ' ':\n divider += 1\n final_text = text[:divider]+'\\n'+text[divider:]\n else:\n final_text = text\n self.leader_skills_labels[1].setText(final_text)\n\n def _set_attack_labels_(self, index, color_num, atk_value, pronged_atk_value, plus_value = 0):\n \"\"\"\n Set the attack labels according to the values given.\n :param index: the index of the PADMonster [0-5] and 6 = the team total\n :param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base\n :param atk_value: the value to be displayed in the attack label\n :param pronged_atk_value: the value to be displayed in the pronged attack label\n :param plus_value: the amount of pluses is set to 0 initially\n \"\"\"\n # an array holding the colors associated with each value of color_num\n colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'black']\n\n # if modified by leader skills button has been pressed, multiply monster's stat by its\n # respective index in the stats modified variable of the PADTeam Class\n if self.is_pressed and index != 6:\n atk_value *= self.pad_team.stats_modified_by[index][1]\n pronged_atk_value *= self.pad_team.stats_modified_by[index][1]\n # display attack of main element\n if plus_value > 0:\n self.display_labels[index + 33].setText(str(round(atk_value)) + ' (+' + str(plus_value) + ')')\n else:\n self.display_labels[index + 33].setText(str(round(atk_value)))\n self.display_labels[index + 33].setStyleSheet(\"QLabel { color : %s }\" % colors[color_num])\n self.display_labels[index + 33].adjustSize()\n # display pronged attack of main element\n self.display_labels[index + 41].setText(str(round(pronged_atk_value)))\n self.display_labels[index + 41].setStyleSheet(\"QLabel {color : %s }\" % colors[color_num])\n self.display_labels[index + 41].adjustSize()\n\n def _set_team_labels_(self):\n \"\"\"\n Access the PADTeam Class to extract the values to be displayed in the Team Totals Labels\n \"\"\"\n # initialize objects to store the total values\n hp_total = self.pad_team.hp\n atk_total = self.pad_team.base_atk\n pronged_atk_total = self.pad_team.base_pronged_atk\n rcv_total = self.pad_team.rcv\n total_awakenings = self.pad_team.awakenings\n\n # if the modified by leader skills button is pressed, use the team's modified stats instead\n if self.is_pressed:\n hp_total = self.pad_team.hp_modified\n atk_total = self.pad_team.base_atk_modified\n pronged_atk_total = self.pad_team.base_pronged_atk_modified\n rcv_total = self.pad_team.rcv_modified\n\n # display our total value objects on our labels\n self.display_labels[31].setText(str(round(hp_total)))\n self.display_labels[31].adjustSize()\n self._set_attack_labels_(6, 5, atk_total, pronged_atk_total)\n self.display_labels[55].setText(str(round(rcv_total)))\n self.display_labels[55].adjustSize()\n\n # set the label containing the team's total awakenings to a tooltip since it won't fit\n awakenings_font = QFont()\n awakenings_font.setPointSize(6)\n self.display_labels[63].setText('Hover Me!')\n self.display_labels[63].setFont(awakenings_font)\n self.display_labels[63].adjustSize()\n self.display_labels[63].setToolTip(total_awakenings)\n\n def _get_total_attr_attack_(self, attr):\n \"\"\"\n Returns the values stored in PADTeam for the Team's Total Attacks and Pronged Attacks\n for the specified element or the sum of all the element's attacks (BASE)\n :param attr: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base\n :return:\n \"\"\"\n # if we're not looking for the base values a.k.a. sum of all the values\n if attr != 5:\n if not self.is_pressed:\n atk_total = self.pad_team.atk[attr]\n pronged_atk_total = self.pad_team.pronged_atk[attr]\n else:\n atk_total = self.pad_team.atk_modified[attr]\n pronged_atk_total = self.pad_team.pronged_atk_modified[attr]\n # if we're looking for the base values\n else:\n if not self.is_pressed:\n atk_total = self.pad_team.base_atk\n pronged_atk_total = self.pad_team.base_pronged_atk\n else:\n atk_total = self.pad_team.base_atk_modified\n pronged_atk_total = self.pad_team.base_pronged_atk_modified\n\n return atk_total, pronged_atk_total\n\n # when line_edits are altered, activate this line code according to the text in the line\n def _on_changed_(self, index, text):\n \"\"\"\n When a line edit is altered, check the text entered to see if it matches with any of\n the names in the json dictionary and create a PADMonster at the appropriate index in\n the team array if the name is found.\n :param index: the index of the line edit corresponding to the index of the PADMonster\n in the team array.\n :param text: the text currently inside the line edit\n \"\"\"\n for x in range(len(self.json_monsters)):\n if text == self.json_monsters[x][\"name\"]:\n self._create_monster_(index, x, text)\n elif text.title() == self.json_monsters[x][\"name\"]:\n self._create_monster_(index, x, text.title())\n\n def _handle_button_(self, color_num, pressed):\n \"\"\"\n Only show the Attack and Pronged Attack values of the appropriate element or sum of the\n elements if BASE is chosen.\n :param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base\n :param pressed: useless event input\n \"\"\"\n for index in range(6):\n if color_num == 5:\n self._set_attack_labels_(index, color_num, self.team[index].atk[self.team[index].attr_main],\n self.team[index].pronged_atk[self.team[index].attr_main])\n else:\n self._set_attack_labels_(index, color_num, self.team[index].atk[color_num],\n self.team[index].pronged_atk[color_num])\n\n atk_total, pronged_atk_total = self._get_total_attr_attack_(color_num)\n\n self._set_attack_labels_(6, color_num, atk_total, pronged_atk_total)\n\n def _handle_toggle_button_(self, pressed):\n \"\"\"\n If the modify stats by leader skills button is pressed, modify the button's text, set\n the Class Variable is_pressed to True/False accordingly, and reset the labels now that\n is_pressed has been changed.\n :param pressed: Useless event input.\n \"\"\"\n if pressed:\n self.is_pressed = True\n self.toggle_button.setText('Toggle Off Modified Stats')\n else:\n self.is_pressed = False\n self.toggle_button.setText('Toggle On Modified Stats')\n\n for monster in range(6):\n self._set_labels_(self.team[monster], monster)\n\n def _on_level_activated_(self, index, level):\n \"\"\"\n If a level for the PADMonster has been selected, change the monster's base stats\n according to that level, reset pad_team according to these new values and reset\n labels accordingly.\n :param index: PADMonster's index in the team array. [0-5]\n :param level: the level the PADMonster will be set to\n \"\"\"\n self.team[index]._set_stats_at_level_(int(level))\n self.team_base[index]._set_stats_at_level_(int(level))\n self.pad_team = PADTeam(self.team)\n\n for monster in range(6):\n self._set_labels_(self.team[monster], monster)\n\n def _on_plus_type_activated_(self, index, text):\n \"\"\"\n If hp, atk, or rcv has been selected in the drop down menu, hide the menu asking for the\n type and show the menu asking for the value of pluses between 0-99.\n :param index: PADMonster's index in the team array. [0-5]\n :param text: 'hp', 'atk', or 'rcv'\n \"\"\"\n self.plus_boxes_types[index].hide()\n self.plus_boxes_values[index].show()\n try: self.plus_boxes_values[index].activated[str].disconnect()\n except Exception: pass\n self.plus_boxes_values[index].activated[str].connect(partial(self._on_plus_value_activated_, index, text))\n self.plus_boxes_types[index].disconnect()\n def _on_plus_value_activated_(self, index, type, value):\n \"\"\"\n If the value pertaining to the specified type has been selected, modify the appropriate\n stat of the indexed PADMonster according the specified amount of pluses, reset the\n pad_team according to the modified stats, and redisplay the new values\n :param index: PADMonster's index in the team array. [0-5]\n :param type: 'hp', 'atk', or 'rcv'\n :param value: the value, 0-99, of pluses the PADMonster has for the specified type\n \"\"\"\n self.plus_boxes_types[index].show()\n self.plus_boxes_types[index].activated[str].connect(partial(self._on_plus_type_activated_, index))\n self.plus_boxes_values[index].hide()\n self.team[index]._set_stats_with_pluses_(type, int(value))\n self.team_base[index]._set_stats_with_pluses_(type, int(value))\n self.pad_team = PADTeam(self.team)\n\n for monster in range(6):\n self._set_labels_(self.team[monster], monster)\n\n# class mouselistener(QLabel):\n# def __init__(self):\n# super().__init__()\n#\n# self.setMouseTracking(True)\n# self.widget_location = self.rect()\n#\n# def mouseMoveEvent(self, event):\n# posMouse = event.pos()\n# font = QFont()\n# if self.widget_location.contains(posMouse):\n# font.setPointSize(8)\n#\n# QToolTip.setFont(font)\n# self.setToolTip(self.text())\n#\n# return super().mouseReleaseEvent(event)\n" }, { "alpha_fraction": 0.6404055953025818, "alphanum_fraction": 0.6622464656829834, "avg_line_length": 22.759260177612305, "blob_id": "a35d7083b4173955d03d5da94df20008ebf20851", "content_id": "d4adddfa8d97c819a4e6e272b6540239419e7698", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1282, "license_type": "no_license", "max_line_length": 64, "num_lines": 54, "path": "/PAD_GUI.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\n\n# import necessary files\nfrom PyQt5 import PyQt5\nimport sys\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout,\n QFrame, QSplitter, QStyleFactory,\n QMainWindow, QStackedWidget)\nfrom PyQt5.QtCore import Qt\n\nfrom PADScreen import PADScreen\n\nclass GUIMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n widget = PADScreen(self)\n self.setCentralWidget(widget)\n self.setGeometry(300, 300, 300, 200)\n self.setWindowTitle('PAD Damage Calculator')\n self.show()\n\n\nclass PADGUI(QStackedWidget):\n\n def __init__(self, main_window):\n super().__init__()\n\n self.init_UI(main_window)\n\n def init_UI(self, main_window):\n #The initial screen that we'll be working on\n screen = PADScreen(self, main_window)\n screen_widget = QWidget(main_window)\n\n #Make the main screen our layout\n screen_widget.setLayout(screen)\n\n self.addWidget(screen_widget)\n\n #Add simulation screen here:\n\n #Set the window dimensions, title and show it off!\n self.setGeometry(300, 300, 300, 200)\n self.setWindowTitle('PAD Damage Calculator')\n self.show()\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n gui = GUIMainWindow()\n\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.5389026999473572, "alphanum_fraction": 0.5580758452415466, "avg_line_length": 41.952205657958984, "blob_id": "09cb429e45f56bde03059a6125ab4cabbe1df87b", "content_id": "9e954a7ac12a48e85f574a40b553242442b4a75e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11683, "license_type": "no_license", "max_line_length": 120, "num_lines": 272, "path": "/PAD_Monster.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\n\n# Class Description:\n# Our Monster Class where we hold all of the Monster's stats and calculate the values needed with those stats\n\nimport os\nimport json\n\n\nclass PADMonster:\n def __init__(self):\n\n # initialize the Class's stats\n # _max, _min, and _scale are used for when the monster's level is set to something other than its max level\n # _bonus used for when awakenings add value to the base stat\n self.name = ''\n self.hp = 0\n self.hp_max = 0\n self.hp_min = 0\n self.hp_scale = 0\n self.hp_plus = 0\n self.hp_bonus = 0\n self.hp_base = 0\n self.rcv_base = 0\n self.rcv = 0\n self.rcv_max = 0\n self.rcv_min = 0\n self.rcv_scale = 0\n self.rcv_plus = 0\n self.rcv_bonus = 0\n self.base_base_atk = 0\n self.base_atk = 0\n self.base_atk_max = 0\n self.base_atk_min = 0\n self.base_atk_scale = 0\n self.base_atk_plus = 0\n self.base_atk_bonus = 0\n # Array of Attack: atk[attribute]\n self.atk = [0, 0, 0, 0, 0]\n # Array of Pronged Attack: [attribute][0 = Main, 1 = Sub]\n self.pronged_atk = [0, 0, 0, 0, 0]\n\n self.max_level = 99\n self.current_level = 99\n\n # 'fire' = 0, 'water' = 1, 'wood' = 2, 'light' = 3, 'dark' = 4\n self.attr_main = 0\n self.attr_sub = 0\n # check if main attribute = sub attribute\n self.is_same_attr = False\n # save list of attribute types\n self.attributes = ['fire', 'water', 'wood', 'light', 'dark']\n\n # see list of types for corresponding index number\n self.type_main = 0\n self.type_sub = 0\n self.type_main_name = ''\n self.type_sub_name = ''\n # save list of types\n self.types = ['Evo Material', 'Balanced', 'Physical', 'Healer', 'Dragon', 'God', 'Attacker',\n 'Devil', '', '', '', '', 'Awoken Skill Material', 'Protected', 'Enhance Material']\n # save leader skill multipliers; leader_skill[0 = hp, 1 = atk, 2 = rcv]\n self.leader_skill = [0, 0, 0]\n\n # store image 60x60 size and file location on padherder.com\n self.image60_size = 0\n self.image60_href = ''\n\n # save amount of each awoken skill\n # id: 1 -> Enhanced HP, 2 -> Enhanced Attack, 3 -> Enhanced Heal, 4 -> Reduce Fire Damage,\n # 5 -> Reduce Water Damage,\n # 6 -> Reduce Wood Damage, 7 -> Reduce Light Damage, 8 -> Reduce Dark Damage, 9 -> Auto-Recover,\n # 10 -> Resistance-Bind, 11 -> Resistance-Dark, 12 -> Resistance-Jammers, 13 -> Resistance-Poison,\n # 14 -> Enhanced Fire Orbs, 15 -> Enhanced Water Orbs, 16 -> Enhanced Wood Orbs, 17 -> Enhanced Light Orbs,\n # 18 -> Enhanced Dark Orbs, 19 -> Extend Time, 20 -> Recover Bind, 21 -> Skill Boost, 22 -> Enhanced Fire Att.,\n # 23 -> Enhanced Water Att., 24 -> Enhanced Wood Att., 25 -> Enhanced Light Att., 26 -> Enhanced Dark Att.,\n # 27 -> Two-Pronged Attack, 28 -> Resistance-Skill Lock\n self.awakenings = [['', '', 0] for x in range(28)]\n self.awakenings_names = ['Enhanced HP', 'Enhanced Attack', 'Enhanced Heal', 'Reduce Fire Damage',\n 'Reduce Water Damage', 'Reduce Wood Damage', 'Reduce Light Damage',\n 'Reduce Dark Damage', 'Auto-Recover', 'Resistance-Bind', 'Resistance-Dark',\n 'Resistance-Jammers', 'Resistance-Poison', 'Enhanced Fire Orbs', 'Enhanced Water Orbs',\n 'Enahnced Wood Orbs', 'Enhanced Light Orbs', 'Enhanced Dark Orbs', 'Extend Time',\n 'Recover Bind', 'Skill Boost', 'Enhanced Fire Att.', 'Enhanced Water Att.',\n 'Enhanced Wood Att.', 'Enhanced Light Att.', 'Enhanced Dark Att.',\n 'Two-Pronged Attack', 'Resistance-Skill Lock']\n\n # open awakenings.txt and load it into a python object using json\n self.json_file = open(os.path.join('awakenings.txt'), 'r')\n self.json_awakenings = json.loads(self.json_file.read())\n\n # iterate through self.json_awakenings and extract the necessary information into self.awakenings\n # awakenings[id-1][name, desc, count]\n for awakening in self.json_awakenings:\n self.awakenings[awakening['id'] - 1] = [awakening['name'], awakening['desc'], 0]\n\n # leader skill\n self.leader_skill_name = ''\n self.leader_skill_desc = ''\n # [xhp, xatk, xrcv, ['elem/type?', which elem/type?]]\n self.leader_skill_effect = [1, 1, 1]\n\n self.json_file = open(os.path.join('leader skills.txt'), 'r')\n self.json_leader_skills = json.loads(self.json_file.read())\n\n def set_base_stats(self, name, hp, atk, rcv, attr1, attr2, type1, type2, size, href, awakenings, leader_skill,\n level, hp_min, hp_scale, atk_min, atk_scale, rcv_min, rcv_scale):\n\n self.name = name\n self.hp = hp\n self.hp_base = hp\n self.hp_max = hp\n self.hp_min = hp_min\n self.hp_scale = hp_scale\n self.base_atk = atk\n self.base_base_atk = atk\n self.base_atk_max = atk\n self.base_atk_min = atk_min\n self.base_atk_scale = atk_scale\n self.rcv = rcv\n self.rcv_base = rcv\n self.rcv_max = rcv\n self.rcv_min = rcv_min\n self.rcv_scale = rcv_scale\n self.max_level = level\n self.current_level = level\n self.attr_main = attr1\n self.attr_sub = attr2\n self.type_main = type1\n self.type_main_name = self.types[type1]\n self.type_sub = type2\n if type2:\n self.type_sub_name = self.types[type2]\n self.image60_size = size\n self.image60_href = href\n self.leader_skill_name = leader_skill\n\n for awakening in awakenings:\n self.awakenings[awakening - 1][2] += 1\n\n # sets _bonus stats if awakenings[0-2][2] a.k.a. the stat bonus awakenings are greater than 1\n for x in range(3):\n if self.awakenings[x][2] > 0:\n if x == 0:\n self.hp_bonus = self.awakenings[x][2] * 200\n self.hp += self.hp_bonus\n self.hp_base = self.hp\n if x == 1:\n self.base_atk_bonus = self.awakenings[x][2] * 100\n self.base_atk += self.base_atk_bonus\n self.base_base_atk = self.base_atk\n if x == 2:\n self.rcv_bonus = self.awakenings[x][2] * 50\n self.rcv += self.rcv_bonus\n self.rcv_base = self.rcv\n # find the leader skills' effects and description in the json library according to the name\n for x in range(len(self.json_leader_skills)):\n if leader_skill == self.json_leader_skills[x]['name']:\n self.leader_skill_desc = self.json_leader_skills[x]['effect']\n if 'data' in self.json_leader_skills[x].keys():\n self.leader_skill_effect = self.json_leader_skills[x]['data']\n\n self._set_atk_(self.attr_main, self.attr_sub)\n self._set_pronged_atk_(self.attr_main, self.attr_sub)\n\n def _set_attr_main_(self, attr):\n \"\"\"\n If the attribute name is valid, set the Class's attr_main value to the value corresponding\n to the attr\n :param attr: attribute name\n \"\"\"\n if attr.lower() in self.attributes:\n self.attr_main = self.attributes.index(attr.lower())\n\n # if attribute is changed, check if main and sub attributes are the same\n if self.attr_main == self.attr_sub:\n self.is_same_attr = True\n else:\n self.is_same_attr = False\n\n def _set_attr_sub_(self, attr):\n \"\"\"\n If the attribute name is valid, set the Class's attr_sub value to the value corresponding\n to the attr\n :param attr: attribute name\n \"\"\"\n if attr.lower() in self.attributes:\n self.attr_sub = self.attributes.index(attr.lower())\n\n # if attribute is changed, check if main and sub attributes are the same\n if self.attr_main == self.attr_sub:\n self.is_same_attr = True\n else:\n self.is_same_attr = False\n\n def _set_atk_(self, attr1, attr2):\n \"\"\"\n Calculate and set atk for each attribute\n :param attr1: value corresponding to main attribute\n :param attr2: value corresponding to sub attribute\n \"\"\"\n if attr1 in [0, 1, 2, 3, 4]:\n if attr1 != attr2:\n self.atk[attr1] = self.base_atk\n else:\n self.atk[attr1] = self.base_atk * 1.1\n\n if attr2 in [0, 1, 2, 3, 4]:\n if attr1 != attr2:\n self.atk[attr2] = self.base_atk * (1/3)\n\n def _set_pronged_atk_(self, attr1, attr2):\n \"\"\"\n Calculate and set pronged atk for each attribute\n :param attr1: value corresponding to main attribute\n :param attr2: value corresponding to sub attribute\n \"\"\"\n if attr1 in [0, 1, 2, 3, 4]:\n self.pronged_atk[attr1] = self.atk[attr1] * 1.5 ** self.awakenings[26][2]\n\n if attr2 in [0, 1, 2, 3, 4] and attr1 != attr2:\n self.pronged_atk[attr2] = self.atk[attr2] * 1.5 ** self.awakenings[26][2]\n\n def _set_stats_at_level_(self, level):\n \"\"\"\n Modify all stats according to level.\n :param level: Level the monster will be set to.\n \"\"\"\n self.current_level = level\n self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale)\n self.hp += self.hp_bonus\n self.hp_base = self.hp\n self._set_stats_with_pluses_('hp', self.hp_plus)\n self.base_atk = self._use_growth_formula(self.base_atk_min, self.base_atk_max, self.base_atk_scale)\n self.base_atk += self.base_atk_bonus\n self.base_base_atk = self.base_atk\n self._set_stats_with_pluses_('atk', self.base_atk_plus)\n self.rcv = self._use_growth_formula(self.rcv_min, self.rcv_max, self.rcv_scale)\n self.rcv += self.rcv_bonus\n self.rcv_base = self.rcv\n self._set_stats_with_pluses_('rcv', self.rcv_plus)\n\n def _use_growth_formula(self, min_value, max_value, scale):\n \"\"\"\n Applies the growth formula to get the values of the specified stat at the current level.\n :param min_value: the minimum value of the stat\n :param max_value: the maximum value of the stat\n :param scale: the scaling rate of the stat\n :return: the value of the stat at the current level\n \"\"\"\n value = ((self.current_level - 1) / (self.max_level - 1)) ** scale\n value *= (max_value - min_value)\n value += min_value\n return value\n\n def _set_stats_with_pluses_(self, type, num):\n \"\"\"\n Modify the specified stat according to the specified amount of pluses\n :param type: 'hp', 'atk', or 'rcv'\n :param num: 0-99, the number of pluses for the specified stat\n \"\"\"\n if type == 'hp':\n self.hp_plus = num\n self.hp = self.hp_base + self.hp_plus * 10\n elif type == 'atk':\n self.base_atk_plus = num\n self.base_atk = self.base_base_atk + self.base_atk_plus * 5\n self._set_atk_(self.attr_main, self.attr_sub)\n self._set_pronged_atk_(self.attr_main, self.attr_sub)\n elif type == 'rcv':\n self.rcv_plus = num\n self.rcv = self.rcv_base + self.rcv_plus * 3\n" }, { "alpha_fraction": 0.6188371181488037, "alphanum_fraction": 0.6204709410667419, "avg_line_length": 47.62616729736328, "blob_id": "883550415957088e6f3c6101518e0627df5a00e2", "content_id": "a29cb515c3f596ae88b755511d4704cf11dcd980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10405, "license_type": "no_license", "max_line_length": 104, "num_lines": 214, "path": "/PADScreen.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\n\nfrom Calculator_Screen import CalculatorScreen\nfrom Board_Screen import BoardScreen\nfrom PAD_Monster import PADMonster\nfrom PAD_Team import PADTeam\nfrom PyQt5.QtWidgets import (QVBoxLayout, QHBoxLayout, QWidget, QPushButton, QSplitter, QAction,\n QFileDialog, QMainWindow, QStackedWidget, QSplitter)\nfrom PyQt5.QtCore import Qt\nimport os\nimport json\nfrom functools import partial\n\n\nclass PADScreen(QStackedWidget):\n\n def __init__(self, main_window):\n\n \"\"\"\n Initialize the PADScreen Class\n :param gui: the main interface which will hold all of our widgets\n :param main_window: the main window widget which will hold our menu bar\n \"\"\"\n super().__init__()\n\n # create an open file and save file action for our menu bar and connects them to their\n # respective functions\n open_file = QAction('Load Team...', main_window)\n open_file.setShortcut('Ctrl+O')\n open_file.triggered.connect(partial(self._show_dialog_box_, 'Open', main_window))\n save_file = QAction('Save Team...', main_window)\n save_file.setShortcut('Ctrl+S')\n save_file.triggered.connect(partial(self._show_dialog_box_, 'Save', main_window))\n\n clear_team = QAction('New Team', main_window)\n clear_team.setShortcut('Ctrl+N')\n clear_team.triggered.connect(self.__clear__team__)\n\n # create our menu bar, attach it to our main window and add to it our open and save actions\n menubar = main_window.menuBar()\n file_menu = menubar.addMenu('&File')\n file_menu.addAction(open_file)\n file_menu.addAction(save_file)\n file_menu.addAction(clear_team)\n\n # create the widget containing the first page of the GUI, the calculator page\n self.calculator_screen = QWidget(self)\n # use custom calculator layout for the widget's layout\n self.calculator_screen_layout = CalculatorScreen(self)\n self.calculator_screen.setLayout(self.calculator_screen_layout)\n\n # initialize a variable to hold the PADTeam\n self.pad_team = self.calculator_screen_layout.pad_team\n self.team = self.calculator_screen_layout.team\n\n # create the widget containing the second page of the GUI, the board page\n self.board_screen = QWidget(self)\n # use custom board layout for the widget's layout\n self.board_screen_layout = BoardScreen(self, self.team, self.pad_team)\n self.board_screen.setLayout(self.board_screen_layout)\n # initially hide this page until the next page button is pressed\n #self.board_screen.hide()\n\n # create the bottom widget for the GUI which will contain the page turning buttons\n self.page_turner = QWidget(main_window)\n page_turner_layout = QHBoxLayout(main_window)\n self.page_turner.setLayout(page_turner_layout)\n self.turn_left = QPushButton('<', main_window)\n page_turner_layout.addWidget(self.turn_left)\n page_turner_layout.addStretch()\n page_turner_layout.addStretch()\n self.turn_right = QPushButton('>', main_window)\n page_turner_layout.addWidget(self.turn_right)\n # initially hide the button to turn left as the GUI initializes on page 1\n self.turn_left.hide()\n\n self.page_one_splitter = QSplitter(Qt.Vertical)\n self.page_one_splitter.addWidget(self.calculator_screen)\n self.page_one_splitter.addWidget(self.page_turner)\n self.addWidget(self.page_one_splitter)\n #self.setCurrentWidget(self.page_one_splitter)\n\n self.page_two_splitter = QSplitter(Qt.Vertical)\n self.page_two_splitter.addWidget(self.board_screen)\n #self.page_two_splitter.addWidget(page_turner)\n self.addWidget(self.page_two_splitter)\n #self.setCurrentWidget(self.page_two_splitter)\n\n self._init_screen_()\n\n def _init_screen_(self):\n\n \"\"\"\n Set right click button to connect to the second page\n :param gui: the main interface all the widgets will be attached to\n \"\"\"\n self.turn_right.clicked.connect(self._go_to_board_screen_)\n \n def _go_to_board_screen_(self, clicked):\n\n \"\"\"\n Set the active screen to the second page and hide the first page when the respective\n button is clicked. Also hide the right button, show the left button and connect the\n left button to the first page.\n :param gui: same.\n :param clicked: the clicking event, useless.\n \"\"\"\n self.board_screen_layout.team = self.calculator_screen_layout.team\n self.board_screen_layout.team_totals = self.calculator_screen_layout.pad_team\n self.board_screen_layout.set__team(self.board_screen_layout.team)\n self.setCurrentWidget(self.page_two_splitter)\n self.page_two_splitter.addWidget(self.page_turner)\n #self.board_screen.show()\n #self.calculator_screen.hide()\n\n self.turn_right.hide()\n self.turn_left.show()\n self.turn_left.clicked.connect(self._go_to_calculator_screen_)\n\n def _go_to_calculator_screen_(self, clicked):\n\n \"\"\"\n Set the active screen to the first page and hide the second page when the respective\n button is clicked. Also hide the left button, show the right button and connect the\n right button to the second page.\n :param gui: same.\n :param clicked: useless clicking event.\n \"\"\"\n self._init_screen_()\n self.turn_left.hide()\n self.turn_right.show()\n self.turn_right.clicked.connect(self._go_to_board_screen_)\n\n self.page_one_splitter.addWidget(self.page_turner)\n self.setCurrentWidget(self.page_one_splitter)\n #self.board_screen.hide()\n #self.calculator_screen.show()\n\n def _show_dialog_box_(self, stringname, gui):\n\n \"\"\"\n If the stringname is 'Open', open a dialog where the user can select a team to load\n into the line edits.\n If the stringname is 'Save', open a dialog where the user can save the names of the\n team members into a txt file.\n :param stringname: 'Open' or 'Save', the corresponding menu action will contain the\n key stringname.\n :param gui: same.\n \"\"\"\n if stringname == 'Open':\n filename = QFileDialog.getOpenFileName(gui, 'Load Team...', os.path.join('saved teams'),\n 'Text files (*.txt)')\n # if not empty string and has the appropriate subscript\n if filename[0] and filename[0].endswith('txt'):\n with open(os.path.realpath(filename[0]), 'r') as file:\n json_content = json.loads(file.read())\n # decode the names in case of unicode strings like the infinity sign\n #content_decoded = content.decode('utf-8')\n #monster_names = content_decoded.splitlines()\n for monster in range(6):\n # decode the name in case of unicode strings like the infinity sign\n # name = json_content[monster]['name'].decode('utf-8')\n name = json_content[monster]['name']\n hp_plus = json_content[monster]['hp plus']\n atk_plus = json_content[monster]['atk plus']\n rcv_plus = json_content[monster]['rcv plus']\n level = json_content[monster]['level']\n # enter the names into the line edits\n self.calculator_screen_layout.line_edits[monster].setText(name)\n self.calculator_screen_layout._on_plus_value_activated_(monster, 'hp', hp_plus)\n self.calculator_screen_layout._on_plus_value_activated_(monster, 'atk', atk_plus)\n self.calculator_screen_layout._on_plus_value_activated_(monster, 'rcv', rcv_plus)\n self.calculator_screen_layout._on_level_activated_(monster, level)\n\n\n if stringname == 'Save':\n filename = QFileDialog.getSaveFileName(gui, 'Save Team...', os.path.join('saved teams'),\n 'Text files (*.txt')\n # if not empty string\n if filename[0]:\n # create json file\n json_file = [{} for monster in range(6)]\n #monster_names = ''\n for monster in range(6):\n # copy the team member's name to a variable\n monster_name = self.calculator_screen_layout.team[monster].name\n # copy the team member's pluses to variables\n hp_plus = self.calculator_screen_layout.team[monster].hp_plus\n atk_plus = self.calculator_screen_layout.team[monster].base_atk_plus\n rcv_plus = self.calculator_screen_layout.team[monster].rcv_plus\n # copy the team member's current level to a variable\n current_level = self.calculator_screen_layout.team[monster].current_level\n #monster_names += monster_name+'\\n'\n # encode the string to be saved for symbols like the infinity sign\n #monster_name_encoded = monster_name.encode('utf8', 'replace')\n json_file[monster]['name'] = monster_name\n json_file[monster]['hp plus'] = hp_plus\n json_file[monster]['atk plus'] = atk_plus\n json_file[monster]['rcv plus'] = rcv_plus\n json_file[monster]['level'] = current_level\n with open(os.path.realpath(filename[0]+'.txt'), 'w') as file:\n json.dump(json_file, file)\n\n def __clear__team__(self):\n for index in range(6):\n self.calculator_screen_layout.line_edits[index].clear()\n self.calculator_screen_layout.team = [PADMonster() for monster in range(6)]\n self.calculator_screen_layout.pad_team = PADTeam(self.calculator_screen_layout.team)\n for index in range(6):\n self.calculator_screen_layout._set_labels_(self.calculator_screen_layout.team[index], index)\n # self.calculator_screen = QWidget(gui)\n # self.calculator_screen_layout = CalculatorScreen(gui)\n # self.calculator_screen.setLayout(self.calculator_screen_layout)\n # self.active_screen = self.calculator_screen" }, { "alpha_fraction": 0.5482920408248901, "alphanum_fraction": 0.5649029016494751, "avg_line_length": 49.100669860839844, "blob_id": "ee28bffc8456f44f39db17b6cfe225a62e48abab", "content_id": "14f93f976e9e4dbc1afceff1d9b233a3b5d5d74a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7465, "license_type": "no_license", "max_line_length": 113, "num_lines": 149, "path": "/PAD_Team.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\nimport os\nfrom PAD_Monster import PADMonster\n\nclass PADTeam:\n\n def __init__(self, team):\n \"\"\"\n Initializes the PADTeam Class.\n :param team: an array containing 6 PADMonster Classes\n \"\"\"\n # self.team = [PADMonster() for monster in range(6)] -> how the team should look\n self.team = team\n # below we initialize the variables that will be containing the team stats.\n self.hp = 0\n # for all atk arrays: [fire atk, water atk, wood atk, light atk, dark atk]\n self.atk = [0, 0, 0, 0, 0]\n # for all base atks, it's the sum of each value in the array\n self.base_atk = 0\n self.pronged_atk = [0, 0, 0, 0, 0]\n self.base_pronged_atk = 0\n self.rcv = 0\n\n # below we initialize the modified stats, the team's total stats after being\n # multiplied by the effects of the two leader skills\n self.hp_modified = 0\n self.atk_modified = [0, 0, 0, 0, 0]\n self.base_atk_modified = 0\n self.pronged_atk_modified = [0, 0, 0, 0, 0]\n self.base_pronged_atk_modified = 0\n self.rcv_modified = 0\n\n # a string that will contain all our the teams' awakenings\n self.awakenings = ''\n\n # the leader skills effects: [hp multiplied by, atk multiplied by, rcv multiplied by]\n self.leader1_effects = [1, 1, 1]\n self.leader2_effects = [1, 1, 1]\n # store how each monster's stats will be modified as in if the monster satisfies the\n # leader skill's conditions\n self.stats_modified_by = [[1, 1, 1] for monster in range(6)]\n\n # set all the variables according to the team input\n self.__set__team__hp()\n self.__set__team__rcv()\n self.__set__team__atk()\n self.__set__team__base__atk()\n self.__set__team__awakenings()\n self.__set__modified__stats__()\n\n def __set__team__hp(self):\n self.hp = 0\n for monster in range(6):\n self.hp += self.team[monster].hp\n def __set__team__rcv(self):\n self.rcv = 0\n for monster in range(6):\n self.rcv += self.team[monster].rcv\n def __set__team__awakenings(self):\n self.awakenings = ''\n for awakening in range(len(self.team[0].awakenings)):\n # count stores how many instances of a specific awakening are contained in the team\n count = 0\n for monster in range(6):\n if self.team[monster].awakenings[awakening][2] > 0:\n count += self.team[monster].awakenings[awakening][2]\n if count > 0:\n # if the team has an awakening, save it to the string and add the count number\n self.awakenings += self.team[0].awakenings[awakening][0]+': '+str(count)+'\\n'\n def __set__team__atk(self):\n self.atk = [0, 0, 0, 0, 0]\n self.pronged_atk = [0, 0, 0, 0, 0]\n for attr in range(5):\n for monster in self.team:\n self.atk[attr] += monster.atk[attr]\n self.pronged_atk[attr] += monster.pronged_atk[attr]\n def __set__team__base__atk(self):\n self.base_atk = 0\n self.base_pronged_atk = 0\n for monster in self.team:\n self.base_atk += monster.atk[monster.attr_main]\n self.base_pronged_atk += monster.pronged_atk[monster.attr_main]\n def __set__modified__stats__(self):\n\n self.stats_modified_by = [[1, 1, 1] for monster in range(6)]\n\n # the first and last team members of the team are considered the leaders and we use\n # their respective leader skills.\n for index in [0, 5]:\n # if the leader skill isn't \"\"\n if self.team[index].leader_skill_name:\n # the skill effect will look [hp modified by, atk modified by, rcv modified by]\n # an additional 4th index exists if there's a conditional which will look like:\n # [hp * by, atk * by, rcv * by, ['elem' or 'type', # associated with elem or type]]\n if len(self.team[index].leader_skill_effect) > 3:\n # if fourth array exists, save whether the conditional asks for an element\n # or type in attribute variable\n # and save the # associated in the num variable\n attribute = self.team[index].leader_skill_effect[3][0]\n num = self.team[index].leader_skill_effect[3][1]\n\n # check if each monster in the team satisfies the elem or type conditional\n # if true, the stats modified index for that monster will be multiplied appropriately\n if attribute == \"elem\":\n for monster in range(6):\n if self.team[monster].attr_main == num or self.team[monster].attr_sub == num:\n self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]\n self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]\n self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]\n\n elif attribute == \"type\":\n for monster in range(6):\n if self.team[monster].type_main == num or self.team[monster].type_sub == num:\n self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]\n self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]\n self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]\n\n # if there isn't a 4th index conditional, just multiply all of the stats modified indexes\n # by the appropriate skill effect amounts\n else:\n for monster in range(6):\n self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0]\n self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1]\n self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2]\n\n hp = 0\n base_atk = 0\n atk = [0, 0, 0, 0, 0]\n base_pronged_attack = 0\n pronged_atk = [0, 0, 0, 0, 0]\n rcv = 0\n\n # modify each team stat according to the leader skills' effects and save them to their respective\n # variables.\n for monster in range(6):\n hp += self.team[monster].hp * self.stats_modified_by[monster][0]\n rcv += self.team[monster].rcv * self.stats_modified_by[monster][2]\n main_attr = self.team[monster].attr_main\n base_atk += self.team[monster].atk[main_attr] * self.stats_modified_by[monster][1]\n base_pronged_attack += self.team[monster].pronged_atk[main_attr] * self.stats_modified_by[monster][1]\n for attr in range(5):\n atk[attr] += self.team[monster].atk[attr] * self.stats_modified_by[monster][1]\n pronged_atk[attr] += self.team[monster].pronged_atk[attr] * self.stats_modified_by[monster][1]\n self.hp_modified = hp\n self.atk_modified = atk\n self.base_atk_modified = base_atk\n self.pronged_atk_modified = pronged_atk\n self.base_pronged_atk_modified = base_pronged_attack\n self.rcv_modified = rcv\n" }, { "alpha_fraction": 0.5503060817718506, "alphanum_fraction": 0.5613718628883362, "avg_line_length": 43.24652862548828, "blob_id": "a98fc0d249fc1199af8b1d0a8c1c36ddd9f9a168", "content_id": "f3302eefc928c1ad1dfedf1b3255b2841da0b2a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12742, "license_type": "no_license", "max_line_length": 118, "num_lines": 288, "path": "/Board_Screen.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\nfrom PyQt5.QtWidgets import (QVBoxLayout, QWidget, QLabel, QGridLayout, QSplitter,\n QPushButton, QHBoxLayout)\nfrom PyQt5.QtCore import Qt, QMimeData\nfrom PyQt5.QtGui import QPixmap, QDrag\nimport os\nfrom PAD_Monster import PADMonster\nfrom PAD_Team import PADTeam\nfrom functools import partial\nclass BoardScreen(QVBoxLayout):\n default_team = [PADMonster() for monster in range(6)]\n default_team_totals = PADTeam(default_team)\n def __init__(self, gui, team=default_team, team_totals=default_team_totals):\n super().__init__()\n\n self.team = team\n self.team_totals = team_totals\n self.damage_array = [[{'main attribute': 0, 'sub attribute': 0} for col in range(2)] for row in range(6)]\n\n self.__init__screen__(gui, self.team, self.team_totals)\n\n def __init__screen__(self, gui, team, team_totals):\n # DAMAGE SCREEN\n damage_screen = QWidget()\n damage_screen_layout = QGridLayout()\n damage_screen.setLayout(damage_screen_layout)\n self.addWidget(damage_screen)\n self.damage_labels = [[QLabel(gui) for column in range(2)] for row in range(6)]\n for row in range(6):\n for column in range(2):\n damage_screen_layout.addWidget(self.damage_labels[row][column], row, column)\n # RECOVERY LABEL\n self.hp_recovered = QLabel(gui)\n self.addWidget(self.hp_recovered)\n # BOARD\n board = QWidget()\n board_layout = QGridLayout()\n board.setLayout(board_layout)\n self.addWidget(board)\n # TEAM IMAGES\n self.team_labels = []\n for index in range(6):\n label = QLabel(gui)\n self.team_labels.append(label)\n board_layout.addWidget(label, 0, index)\n board_layout.setAlignment(label, Qt.AlignHCenter)\n self.set__team(team)\n # BOARD\n self.board_labels = [[PADLabel(gui) for column in range(8)] for row in range(8)]\n # positions = [(i+1, j) for i in range(8) for j in range(8)]\n light_brown = 'rgb(120, 73, 4)'\n dark_brown = 'rgb(54, 35, 7)'\n color = dark_brown\n for row in self.board_labels:\n for column in row:\n row_index = self.board_labels.index(row)\n col_index = row.index(column)\n column.setStyleSheet(\"QLabel { background-color: %s }\" % color)\n if color == dark_brown and (col_index+1) % 8 != 0:\n color = light_brown\n elif color == light_brown and (col_index+1) % 8 != 0:\n color = dark_brown\n board_layout.addWidget(column, row_index+1, col_index)\n #for position, label in zip(positions, self.board_labels):\n # board_layout.addWidget(label, *position)\n for row in range(9):\n board_layout.setRowStretch(row, 1)\n for column in range(8):\n board_layout.setColumnStretch(column, 1)\n\n self.board_array = []\n self.__create__board___(5, 6)\n\n # CALCULATE DAMAGE BUTTON\n calculate_damage_button = QPushButton('Calculate Damage', gui)\n calculate_damage_button.clicked.connect(partial(self.calculate_damage, team, team_totals))\n self.addWidget(calculate_damage_button)\n # ORBS\n # orb_wrapper = QWidget(gui)\n # orb_wrapper_layout = QHBoxLayout()\n # orb_wrapper.setLayout(orb_wrapper_layout)\n\n # elements = ['fire', 'water', 'wood', 'light', 'dark']\n # for element in elements:\n # orb = PADIcon(gui)\n # orb.setPixmap(QPixmap(os.path.join('icons')+'\\\\'+element+'.png'))\n # orb_wrapper_layout.addWidget(orb)\n #\n # self.addWidget(orb_wrapper)\n\n def __create__board___(self, row, column):\n self.board_array = [['' for column in range(column)] for row in range(row)]\n\n for row_index in self.board_labels:\n for col_label in row_index:\n col_label.hide()\n for x in range(row):\n for y in range(column):\n self.board_labels[x][y].show()\n\n def calculate_damage(self, team=default_team, team_totals=default_team_totals):\n for row in range(len(self.board_array)):\n for column in range(len(self.board_array[0])):\n self.board_array[row][column] = self.board_labels[row][column].element\n all_positions = set()\n # 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = heart\n elemental_damage = [{'fire': 0, 'water': 0, 'wood': 0, 'light': 0, 'dark': 0}\n for monster in range(6)]\n total_hp_recovered = 0\n combo_count = 0\n colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'pink']\n attribute_translator = ['fire', 'water', 'wood', 'light', 'dark', 'heart']\n for row in range(len(self.board_array)):\n for column in range(len(self.board_array[0])):\n combo_length, positions = self.__find__combos__recursively__(self.board_array, row, column)\n if combo_length >= 3 and not next(iter(positions)) in all_positions and self.board_array[row][column]:\n print(str(self.board_array[row][column])+\":\",combo_length,'orb combo.')\n\n attribute = attribute_translator.index(self.board_array[row][column])\n if attribute != 5:\n for monster in range(6):\n if combo_length == 4:\n damage = team[monster].pronged_atk[attribute] * 1.25\n else:\n damage = team[monster].atk[attribute] * (1+0.25*(combo_length-3))\n elemental_damage[monster][self.board_array[row][column]] += damage\n else:\n total_rcv = 0\n for monster in range(6):\n total_rcv += team[monster].rcv\n total_hp_recovered += total_rcv * (1+0.25*(combo_length-3))\n print(total_hp_recovered)\n print(total_rcv)\n all_positions |= positions\n combo_count += 1\n combo_multiplier = 1+0.25*(combo_count-1)\n for monster in range(6):\n main_attribute = attribute_translator[team[monster].attr_main]\n sub_attribute = ''\n if team[monster].attr_sub or team[monster].attr_sub == 0:\n sub_attribute = attribute_translator[team[monster].attr_sub]\n if sub_attribute:\n if main_attribute != sub_attribute:\n main_damage = elemental_damage[monster][main_attribute] * combo_multiplier\n sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier\n else:\n main_damage = elemental_damage[monster][main_attribute] * combo_multiplier * (10/11)\n sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier * (1/11)\n else:\n main_damage = elemental_damage[monster][main_attribute] * combo_multiplier\n sub_damage = 0\n self.damage_labels[monster][0].setText(str(main_damage))\n self.damage_labels[monster][0].setStyleSheet(\"QLabel { color : %s }\" % colors[team[monster].attr_main])\n self.damage_labels[monster][1].setText(str(sub_damage))\n if team[monster].attr_sub or team[monster].attr_sub == 0:\n self.damage_labels[monster][1].setStyleSheet(\"QLabel { color : %s }\" % colors[team[monster].attr_sub])\n\n total_hp_recovered *= combo_multiplier\n self.hp_recovered.setText(str(total_hp_recovered))\n self.hp_recovered.setStyleSheet(\"QLabel { color : %s }\" % colors[5])\n\n def set__team(self, team):\n for label, member in zip(self.team_labels, team):\n try:\n image = QPixmap(os.path.join('images')+'/'+member.name+'.png')\n image.scaled(75, 75)\n label.setPixmap(image)\n except Exception: pass\n\n def __find__combos__recursively__(self, array, row, column):\n combo_length = 0\n positions = set()\n row_length = self.checkIndexInRow(array, row, column)\n if row_length >= 3:\n more_length, more_positions = self.__find__combos__recursively__(array, row, column+row_length-1)\n combo_length += row_length + more_length - 1\n positions |= more_positions\n for col_index in range(row_length):\n positions.add((row, column+col_index))\n column_length = self.checkIndexInColumn(array, row, column)\n if column_length >= 3:\n more_length, more_positions = self.__find__combos__recursively__(array, row+column_length-1, column)\n combo_length += column_length + more_length - 1\n positions |= more_positions\n for row_index in range(column_length):\n positions.add((row+row_index, column))\n if row_length >= 3 and column_length >= 3:\n return combo_length - 1, positions\n elif row_length < 3 and column_length < 3:\n return 1, positions\n return combo_length, positions\n\n def checkIndexInRow(self, array, row, col_index):\n combo_length = 0\n if array[row].count(array[row][col_index]) >= 3:\n if col_index > 0:\n if array[row][col_index - 1] != array[row][col_index]:\n combo_length += self.recurseThroughRow(array, row, col_index)\n else:\n combo_length += self.recurseThroughRow(array, row, col_index)\n return combo_length\n\n def recurseThroughRow(self, array, row, col_index, count=1):\n if array[row][col_index + count] == array[row][col_index]:\n count += 1\n if col_index + count < len(array[row]):\n return self.recurseThroughRow(array, row, col_index, count)\n else:\n return count\n else:\n return count\n\n def checkIndexInColumn(self, array, row_index, col):\n elements_in_column = []\n combo_length = 0\n for index in range(row_index, len(array)):\n elements_in_column.append(array[index][col])\n if elements_in_column.count(array[row_index][col]) >= 3:\n if row_index > 0:\n if array[row_index][col] != array[row_index - 1][col]:\n combo_length += self.recurseThroughCol(array, row_index, col)\n else:\n combo_length += self.recurseThroughCol(array, row_index, col)\n return combo_length\n\n def recurseThroughCol(self, array, row_index, col, count=1):\n if array[row_index + count][col] == array[row_index][col]:\n count += 1\n if row_index + count < len(array):\n return self.recurseThroughCol(array, row_index, col, count)\n else:\n return count\n else:\n return count\n\nclass PADLabel(QLabel):\n\n def __init__(self, gui):\n super().__init__(gui)\n self.setAcceptDrops(True)\n self.setMouseTracking(True)\n self.setScaledContents(True)\n\n self.color_counter = -1\n self.colors = ['fire', 'water', 'wood', 'light', 'dark', 'heart']\n self.element = ''\n\n self.setFixedSize(75, 75)\n\n def mousePressEvent(self, click):\n if click.button() == Qt.LeftButton and self.rect().contains(click.pos()):\n if self.color_counter != 5:\n self.color_counter += 1\n else:\n self.color_counter = 0\n self.element = self.colors[self.color_counter]\n icon = QPixmap(os.path.join('icons')+'/'+self.element+'.png')\n icon.scaled(75, 75)\n self.setPixmap(icon)\n def dragEnterEvent(self, event):\n if event.mimeData().hasImage():\n event.accept()\n else:\n event.ignore()\n def dropEvent(self, event):\n image = event.mimeData().imageData().value<QImage>()\n self.setPixmap(image)\n\nclass PADIcon(QLabel):\n def __init__(self, gui):\n super().__init__()\n\n self.gui = gui\n\n self.setMouseTracking(True)\n self.location = self.rect()\n\n def mousePressEvent(self, click):\n if click.button() == Qt.LeftButton and self.rect().contains(click.pos()):\n print('On it!')\n drag = QDrag(self.gui)\n mimeData = QMimeData()\n\n mimeData.setImageData(self.pixmap().toImage())\n drag.setMimeData(mimeData)\n drag.setPixmap(self.pixmap())\n\n dropAction = drag.exec()" }, { "alpha_fraction": 0.5177692770957947, "alphanum_fraction": 0.5221432447433472, "avg_line_length": 30.016948699951172, "blob_id": "70a5cf5d5cb76dbc3968e5d2bc21df81fa2c3ebf", "content_id": "bd98ac4b50f704edf76c34e1c5d0552abef17ad9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1829, "license_type": "no_license", "max_line_length": 90, "num_lines": 59, "path": "/image_updater.py", "repo_name": "acheng6845/PuzzleSolver", "src_encoding": "UTF-8", "text": "__author__ = 'Aaron'\n\n# Class Description:\n# Update our monsters.txt file and our images folder\n\nfrom urllib3 import urllib3\nimport shutil\nimport os\nimport json\n\nclass image_updater():\n\n def __init__(self):\n\n # update monsters.txt here:\n\n self.json_file = open(os.path.realpath('./monsters.txt'), 'r')\n self.json_object = json.loads(self.json_file.read())\n\n path = os.path.realpath('images')\n\n team = ['Sparkling Goddess of Secrets, Kali', 'Holy Night Kirin Princess, Sakuya',\n 'Soaring Dragon General, Sun Quan', 'divine law goddess, valkyrie rose']\n\n for x in range(len(self.json_object)):\n\n #for x in range(1):\n\n url = 'https://padherder.com'+self.json_object[x][\"image60_href\"]\n #print(url)\n name = self.json_object[x][\"name\"]\n\n if name in team:\n #if name.islower():\n # name += 'chibi'\n\n request = urllib3.PoolManager().request('GET', url)\n\n #print(os.path.realpath('images2'))\n\n #is_accessible = os.access(path, os.F_OK)\n #print(is_accessible)\n\n # if the directory doesn't exist, create the directory - too risky\n #if is_accessible == False:\n # os.makedirs(os.path.realpath('images2'))\n\n os.chdir(path)\n #print(path)\n #print(path+'\\\\'+name+'.png')\n if os.access(path+'/'+name+'.png', os.F_OK) == False:\n with open(os.path.join(path+'/'+name+'.png'), 'wb') as file:\n file.write(request.data)\n request.release_conn()\n else:\n print(name+'.png already exists.')\n\nif __name__ == '__main__':\n updater = image_updater()" } ]
8
lonce/dcn_soundclass
https://github.com/lonce/dcn_soundclass
aed32872cebd70ca5e4ba312bb0d844665ecd3fc
99c16fb49648f3aa5d4d7738f409809383bc3c73
24fef27a0434e6b8933e2fbc4ffe11bfd0d76099
refs/heads/master
"2021-01-18T23:10:21.849858"
"2017-06-15T14:49:26"
"2017-06-15T14:49:26"
87,092,004
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.646484375, "alphanum_fraction": 0.71875, "avg_line_length": 35.57143020629883, "blob_id": "264237c8f1efdb442a416a939301ef3cfd341590", "content_id": "8ef54636e8698978150b3c26162c1e9f40ac5f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 512, "license_type": "no_license", "max_line_length": 193, "num_lines": 14, "path": "/runstyletest.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# nohup ./runstyle.sh >>styleout/2017.04.29/multilog.txt 2>&1 & \n# Individual logs will also still get stored in their respective directories \nsource activate tflow2\n\nstatefile=logs.2017.05.14/mtl_16.or_height.epsilon_1.0/state.pickle\niter=200\n\nnoise=.2\nrand=0\ncontent=BeingRural5.0\nstyle=agf5.0\n\npython style_transfer.py --weightDecay 0 --content ${content} --style ${style} --noise ${noise} --outdir testout --stateFile ${statefile} --iter $iter --alpha 10 --beta 10 --randomize ${rand}\n" }, { "alpha_fraction": 0.6026028990745544, "alphanum_fraction": 0.6791767477989197, "avg_line_length": 31.382352828979492, "blob_id": "7eab60065aafcf2c19ce6434687d95a2c7674b63", "content_id": "868381a66f8a55697990fa2e0d6874146baca1e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3304, "license_type": "no_license", "max_line_length": 105, "num_lines": 102, "path": "/testPickledModel.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "\"\"\"\neg \npython testPickledModel.py logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/state.pickle \n\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport pickledModel\n\nfrom PIL import TiffImagePlugin\nfrom PIL import Image\n\n# get args from command line\nimport argparse\nFLAGS = None\n\n\nVERBOSE=False\n# ------------------------------------------------------\n# get any args provided on the command line\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('pickleFile', type=str, help='stored graph' ) \nFLAGS, unparsed = parser.parse_known_args()\n\nk_freqbins=257\nk_width=856\n\nstyg = pickledModel.load(FLAGS.pickleFile)\n\nprint(' here we go ........')\n\n\ndef soundfileBatch(slist) :\t\n\treturn ([pickledModel.loadImage(name) for name in slist ])\n\n#just test the validation set \n#Flipping and scaling seem to have almost no effect on the clasification accuracy\nrimages=soundfileBatch(['data2/validate/205 - Chirping birds/5-242490-A._11_.tif',\n\t'data2/validate/205 - Chirping birds/5-242491-A._12_.tif',\n\t'data2/validate/205 - Chirping birds/5-243448-A._14_.tif',\n\t'data2/validate/205 - Chirping birds/5-243449-A._15_.tif',\n\t'data2/validate/205 - Chirping birds/5-243450-A._15_.tif',\n\t'data2/validate/205 - Chirping birds/5-243459-A._13_.tif',\n\t'data2/validate/205 - Chirping birds/5-243459-B._13_.tif',\n\t'data2/validate/205 - Chirping birds/5-257839-A._10_.tif',\n\t'data2/validate/101 - Dog/5-203128-A._4_.tif',\n\t'data2/validate/101 - Dog/5-203128-B._5_.tif',\n\t'data2/validate/101 - Dog/5-208030-A._9_.tif',\n\t'data2/validate/101 - Dog/5-212454-A._4_.tif',\n\t'data2/validate/101 - Dog/5-213855-A._4_.tif',\n\t'data2/validate/101 - Dog/5-217158-A._2_.tif',\n\t'data2/validate/101 - Dog/5-231762-A._1_.tif',\n\t'data2/validate/101 - Dog/5-9032-A._12_.tif',\n\t])\n\nim=np.empty([1,1,k_width,k_freqbins ])\n\nnp.set_printoptions(precision=2)\nnp.set_printoptions(suppress=True)\n\nwith tf.Session() as sess:\n\n\tpredictions=[]\n\tsess.run ( tf.global_variables_initializer ())\n\n\t#print('ok, all initialized')\n\tif 0 :\n\t\tprint ('...GLOBAL_VARIABLES :') #probalby have to restore from checkpoint first\n\t\tall_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\t\tfor v in all_vars:\n\t\t\tv_ = sess.run(v)\n\t\t\tprint(v_)\n\n\tif 0 :\n\t\tfor v in [\"s_w1:0\", \"s_b1:0\", \"s_w2:0\", \"s_b2:0\", \"s_W_fc1:0\", \"s_b_fc1:0\", \"s_W_fc2:0\", \"s_b_fc2:0\"] :\n\t\t\tprint(tf.get_default_graph().get_tensor_by_name(v))\n\t\t\tprint(sess.run(tf.get_default_graph().get_tensor_by_name(v)))\n\n\n\tif 1 :\n\t\tfor v in [\"s_h1:0\"] :\n\t\t\t#im = np.reshape(np.transpose(rimages[6]), [1,k_width*k_freqbins ])\n\t\t\tim=rimages[6]\n\t\t\tprint('assigning input variable an image with shape ' + str(im.shape))\n\t\t\tsess.run(styg[\"X\"].assign(im)) #transpose to make freqbins channels\n\t\t\tprint(tf.get_default_graph().get_tensor_by_name(v))\n\t\t\tprint(sess.run(tf.get_default_graph().get_tensor_by_name(v)))\n\n\n\n\tprint('predictions are : ')\n\tfor im_ in rimages :\n\t\t#im = np.reshape(np.transpose(im_), [1,k_width*k_freqbins ])\n\t\tim=im_\n\t\tsess.run(styg[\"X\"].assign(im)) #transpose to make freqbins channels\n\t\tprediction = sess.run(styg[\"softmax_preds\"])\n\t\tprint(str(prediction[0])) \n\t\t#predictions.extend(prediction[0])\n\n\n\t#pickledModel.save_image(np.transpose(im, [0,3,2,1])[0,:,:,0],'fooimage.tif')\n\tpickledModel.save_image(im[0,:,:,:],'fooimage.tif')\n\n" }, { "alpha_fraction": 0.5710201263427734, "alphanum_fraction": 0.5795976519584656, "avg_line_length": 44.75233459472656, "blob_id": "8f11dc8b32a6a19b7e64417e63acc65d1b60ed4f", "content_id": "7c82c4f3be323e4936e651a5b7217341095ee83e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9793, "license_type": "no_license", "max_line_length": 158, "num_lines": 214, "path": "/utils/ESC50_Convert.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n# https://github.com/librosa/librosa\nimport librosa\nimport librosa.display\n\nimport scipy\n\nfrom PIL import TiffImagePlugin\nfrom PIL import Image\nimport tiffspect\n\n# Set some project parameters\nK_SR = 22050\nK_FFTSIZE = 512 # also used for window length where that parameter is called for\nK_HOP = 128\nK_DUR = 5.0 # make all files this duration\nK_FRAMEMULTIPLEOF = 4 # some programs like to have convinent dimensions for conv and decimation\n # the last columns of a matrix are removed if necessary to satisfy\n # 1 means any number of frames will work\n\n# location of subdirectories of ogg files organized by category\nK_OGGDIR = '/home/lonce/tflow/DATA-SETS/ESC-50'\n# location to write the wav files (converted from ogg)\nK_WAVEDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-wave'\n# location to write the spectrogram files (converted from wave files)\nK_SPECTDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-spect'\n\n#===============================================\n\ndef get_subdirs(a_dir):\n \"\"\" Returns a list of sub directory names in a_dir \"\"\" \n return [name for name in os.listdir(a_dir)\n if (os.path.isdir(os.path.join(a_dir, name)) and not (name.startswith('.')))]\n\ndef listDirectory(directory, fileExtList): \n \"\"\"Returns list of file info objects in directory that extension in the list fileExtList - include the . in your extension string\"\"\"\n fnameList = [os.path.normcase(f)\n for f in os.listdir(directory)\n if (not(f.startswith('.')))] \n fileList = [os.path.join(directory, f) \n for f in fnameList\n if os.path.splitext(f)[1] in fileExtList] \n return fileList , fnameList\n\ndef dirs2labelfile(parentdir, labelfile):\n \"\"\"takes subdirectories of parentdir and writes them, one per line, to labelfile\"\"\"\n namelist = get_subdirs(parentdir)\n #with open(labelfile, mode='wt', encoding='utf-8') as myfile:\n with open(labelfile, mode='wt') as myfile:\n myfile.write('\\n'.join(namelist))\n\n# ===============================================\n\ndef stereo2mono(data) :\n \"\"\" Combine 2D array into a single array, averaging channels \"\"\" \n \"\"\" Deprecated, since we use librosa for this now. \"\"\" \n print('converting stereo data of shape ' + str(data.shape))\n outdata=np.ndarray(shape=(data.shape[0]), dtype=np.float32)\n if data.ndim != 2 :\n print('You are calling stero2mono on a non-2D array')\n else : \n print(' converting stereo to mono, with outdata shape = ' + str(outdata.shape))\n for idx in range(data.shape[0]) :\n outdata[idx] = (data[idx,0]+data[idx,1])/2\n return outdata\n\n# ===============================================\n\ndef esc50Ogg2Wav (topdir, outdir, dur, srate) :\n \"\"\" \n Creates regularlized wave files for the ogg files in the ESC-50 dataset. \n Creates class folders for the wav files in outdir with the same structure found in topdir.\n \n Parameters\n topdir - the ESC-50 dir containing class folders. \n outdir - the top level directory to write wave files to (written in to class subfolders)\n dur - (in seconds) all files will be truncated or zeropadded to have this duration given the srate\n srate - input files will be resampled to srate as they are read in before being saved as wav files\n \"\"\" \n sample_length = int(dur * srate)\n try:\n os.stat(outdir) # test for existence\n except:\n os.mkdir(outdir) # create if necessary\n \n subdirs = get_subdirs(topdir)\n for subdir in subdirs :\n try:\n os.stat(outdir + '/' + subdir) # test for existence\n except:\n os.mkdir(outdir + '/' + subdir) # create if necessary\n print('creating ' + outdir + '/' + subdir)\n \n fullpaths, _ = listDirectory(topdir + '/' + subdir, '.ogg') \n for idx in range(len(fullpaths)) : \n fname = os.path.basename(fullpaths[idx])\n # librosa.load resamples to sr, clips to duration, combines channels. \n audiodata, samplerate = librosa.load(fullpaths[idx], sr=srate, mono=True, duration=dur) # resamples if necessary (some esc-50 files are in 48K)\n # just checking ..... \n if (samplerate != srate) :\n print('You got a sound file ' + subdir + '/' + fname + ' with sample rate ' + str(samplerate) + '!')\n print(' ********* BAD SAMPLE RATE ******** ')\n if (audiodata.ndim != 1) :\n print('You got a sound file ' + subdir + '/' + fname + ' with ' + str(audiodata.ndim) + ' channels!')\n audiodata = stereo2mono(audiodata)\n if (len(audiodata) > sample_length) :\n print('You got a long sound file ' + subdir + '/' + fname + ' with shape ' + str(audiodata.shape) + '!')\n audiodata = np.resize(audiodata, sample_length)\n # print(' ..... and len(audiodata) = ' + str(len(audiodata)) + ', while sample_length is sposed to be ' + str(sample_length))\n print('trimming data to shape ' + str(audiodata.shape))\n if (len(audiodata) < sample_length) :\n print('You got a short sound file ' + subdir + '/' + fname + ' with shape ' + str(audiodata.shape) + '!')\n audiodata = np.concatenate([audiodata, np.zeros((sample_length-len(audiodata)))])\n print(' zero padding data to shape ' + str(audiodata.shape))\n # write the file out as a wave file\n librosa.output.write_wav(outdir + '/' + subdir + '/' + os.path.splitext(fname)[0] + '.wav', audiodata, samplerate)\n\n# ===============================================\n\n\n\ndef wav2spect(fname, srate, fftSize, fftHop, dur=None, showplt=False, dcbin=True, framesmulitpleof=1) :\n try:\n audiodata, samplerate = librosa.load(fname, sr=srate, mono=True, duration=dur) \n except:\n print('can not read ' + fname)\n return\n \n S = np.abs(librosa.stft(audiodata, n_fft=fftSize, hop_length=fftHop, win_length=fftSize, center=False))\n\n if (dcbin == False) :\n S = np.delete(S, (0), axis=0) # delete freq 0 row\n #note: a pure DC input signal bleeds into bin 1, too.\n \n #trim the non-mulitple fat if necessary\n nr, nc = S.shape \n fat = nc%framesmulitpleof\n for num in range(0,fat):\n S = np.delete(S, (nc-1-num), axis=1)\n \n \n D = librosa.amplitude_to_db(S, ref=np.max)\n \n if showplt : # Dangerous for long runs - it opens a new figure for each file!\n librosa.display.specshow(D, y_axis='linear', x_axis='time', sr=srate, hop_length=fftHop)\n plt.colorbar(format='%+2.0f dB')\n plt.title(showplt)\n plt.show(block=True)\n \n return D\n# ===============================================\n\ndef esc50Wav2Spect(topdir, outdir, dur, srate, fftSize, fftHop, showplt=False, dcbin=True) :\n \"\"\" \n Creates spectrograms for subfolder-labeled wavfiles. \n Creates class folders for the spectrogram files in outdir with the same structure found in topdir.\n \n Parameters\n topdir - the dir containing class folders containing wav files. \n outdir - the top level directory to write wave files to (written in to class subfolders)\n dur - (in seconds) all files will be truncated or zeropadded to have this duration given the srate\n srate - input files will be resampled to srate as they are read in before being saved as wav files\n \"\"\" \n \n try:\n os.stat(outdir) # test for existence\n except:\n os.mkdir(outdir) # create if necessary\n \n subdirs = get_subdirs(topdir)\n count = 0\n for subdir in subdirs :\n try:\n os.stat(outdir + '/' + subdir) # test for existence\n except:\n os.mkdir(outdir + '/' + subdir) # create if necessary\n print('creating ' + outdir + '/' + subdir)\n \n fullpaths, _ = listDirectory(topdir + '/' + subdir, '.wav') \n \n for idx in range(len(fullpaths)) : \n fname = os.path.basename(fullpaths[idx])\n # librosa.load resamples to sr, clips to duration, combines channels. \n #\n #try:\n # audiodata, samplerate = librosa.load(fullpaths[idx], sr=srate, mono=True, duration=dur) \n #except:\n # print('can not read ' + fname)\n # \n #S = np.abs(librosa.stft(audiodata, n_fft=fftSize, hop_length=fftHop, win_length=fftSize, center=False))\n #\n #if (! dcbin) :\n # S = np.delete(S, (0), axis=0) # delete freq 0 row\n ##print('esc50Wav2Spect\" Sfoo max is ' + str(np.max(Sfoo)) + ', and Sfoo sum is ' + str(np.sum(Sfoo)) + ', and Sfoo min is ' + str(np.min(Sfoo)))\n #\n #\n #D = librosa.amplitude_to_db(S, ref=np.max)\n D = wav2spect(fullpaths[idx], srate, fftSize, fftHop, dur=dur, dcbin=True, showplt=False, framesmulitpleof=K_FRAMEMULTIPLEOF)\n \n #plt.title(str(count) + ': ' + subdir + '/' + os.path.splitext(fname)[0]) \n \n tiffspect.logSpect2Tiff(D, outdir + '/' + subdir + '/' + os.path.splitext(fname)[0] + '.tif')\n \n print(str(count) + ': ' + subdir + '/' + os.path.splitext(fname)[0])\n count +=1\n \n# ===============================================\n\n# DO IT\n#esc50Ogg2Wav(K_OGGDIR, K_WAVEDIR, K_DUR, K_SR)\n#esc50Wav2Spect(K_WAVEDIR, K_SPECTDIR, K_DUR, K_SR, K_FFTSIZE, K_HOP, dcbin=True) \ndirs2labelfile(K_SPECTDIR, K_SPECTDIR + '/labels.text')\n\n\n" }, { "alpha_fraction": 0.3589743673801422, "alphanum_fraction": 0.38461539149284363, "avg_line_length": 25, "blob_id": "4c35293baddac035b599cda25c235d3b2290865a", "content_id": "6cbf9ed79e4058365a752049cbbbc097903065ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 390, "license_type": "no_license", "max_line_length": 174, "num_lines": 15, "path": "/foo1.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\norientationArray=(height)\n\nfor orientation in ${orientationArray[@]}\ndo\n if [ \"$orientation\" == \"channels\" ]\n then\n\tl1channels=2048\n else\n\tl1channels=32\n fi\n echo \"l1 channels is $l1channels\"\n\ndone\n" }, { "alpha_fraction": 0.6952767968177795, "alphanum_fraction": 0.7125444412231445, "avg_line_length": 30.70967674255371, "blob_id": "8dc6795242f40aa17a5211af75dce7311fb0f9bd", "content_id": "b5ebd83fe40a8fe0b5f562e84650f91d7530e75c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1969, "license_type": "no_license", "max_line_length": 102, "num_lines": 62, "path": "/trainedModel.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#\n#\n#Morgans great example code:\n#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc\n#\n# GitHub utility for freezing graphs:\n#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py\n#\n#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants\n\n\nimport tensorflow as tf\nimport numpy as np\n\n#global variables \ng_st_saver=None\ng_chkptdir=None\ng_trainedgraph=None\n\nVERBOSE=1\n\n\n#-------------------------------------------------------------\n\ndef load(meta_model_file, restore_chkptDir) :\n\n\tglobal g_st_saver\n\tglobal g_chkptdir\n\tglobal g_trainedgraph\n\n\tg_st_saver = tf.train.import_meta_graph(meta_model_file)\n\t# Access the graph\n\tg_trainedgraph = tf.get_default_graph()\n\n\twith tf.Session() as sess:\n\t\tg_chkptdir=restore_chkptDir # save in global for use during initialize\n\t\t#g_st_saver.restore(sess, tf.train.latest_checkpoint(restore_chkptDir))\n\n\n\n\treturn g_trainedgraph, g_st_saver\n\ndef initialize_variables(sess) :\n\tg_st_saver.restore(sess, tf.train.latest_checkpoint(g_chkptdir))\n\n\ttf.GraphKeys.USEFUL = 'useful'\n\tvar_list = tf.get_collection(tf.GraphKeys.USEFUL)\n\n\t#print('var_list[3] is ' + str(var_list[3]))\n\t\n\n\t#JUST WANTED TO TEST THIS TO COMPARE TO STYLE MODEL CODE\n\t# Now get the values of the trained graph in to the new style graph\n\t#sess.run((g_trainedgraph.get_tensor_by_name(\"w1:0\")).assign(var_list[3]))\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"b1:0\").assign(var_list[4]))\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"w2:0\").assign(var_list[5]))\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"b2:0\").assign(var_list[6]))\n\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"W_fc1:0\").assign(var_list[7]))\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"b_fc1:0\").assign(var_list[8]))\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"W_fc2:0\").assign(var_list[9]))\n\t#sess.run(g_trainedgraph.get_tensor_by_name(\"b_fc2:0\").assign(var_list[10]))\n\n\t\n" }, { "alpha_fraction": 0.6037735939025879, "alphanum_fraction": 0.6037735939025879, "avg_line_length": 16.33333396911621, "blob_id": "e19f79854bac34fd7be9a1eadbeb334063799027", "content_id": "3874fa0b1b04f33120d31e691c39671e2537d765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 53, "license_type": "no_license", "max_line_length": 19, "num_lines": 3, "path": "/clean.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \nrm -r checkpoints/*\nrm -r log_graph/*\n\n" }, { "alpha_fraction": 0.3914027214050293, "alphanum_fraction": 0.418552041053772, "avg_line_length": 26.5625, "blob_id": "003f8e42dff7991e14d80d1a9251cf21bddcc9f5", "content_id": "9e782cc5dd1fdd6a5d4b74513fc1a29c82b5f0b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 442, "license_type": "no_license", "max_line_length": 174, "num_lines": 16, "path": "/foo.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n\ncontentArray=(BeingRural5.0 agf5.0 Nancarrow5.0)\nstyleArray=(BeingRural5.0 agf5.0 Nancarrow5.0)\n\nfor content in ${contentArray[@]}\ndo\n for style in ${styleArray[@]}\n do\n\tif [ \"$style\" == \"$content\" ]\n\tthen\n\t continue\n\tfi\n\techo $content $style\n done\ndone\n\n" }, { "alpha_fraction": 0.41358935832977295, "alphanum_fraction": 0.43279173970222473, "avg_line_length": 44.106666564941406, "blob_id": "16e9f85cc7283bce97dd2a5ea5ecbeca1d51d9d5", "content_id": "f3d9015f0b8ddf7d9b3f2c2bfe27167e1a128aed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3385, "license_type": "no_license", "max_line_length": 182, "num_lines": 75, "path": "/run50.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# To store logs and see both stderr and stdout on the screen: \n# nohup ./run50.sh logs >>logs/multilog.txt 2>&1 & \n# Individual logs will also still get stored in their respective directories \n \nsource activate tflow2a\nDATE=`date +%Y.%m.%d`\necho $DATE\n#maindir=logs.$DATE\n#mkdir $maindir\n\nif [ $# -eq 0 ]\n then\n echo \"please supply output directory as a command line argument\"\n exit\nfi\n\nmaindir=$1\nmkdir $maindir\n\nepsilon=1.0\noptimizer=adam\nlearningrate=.01\norientationArray=(height)\nlayers=2\nmtl=0\n\nindir=data50Q\n\nl1channels=0 # SET CONDITIONALLY BELOW\nl2channelsArray=(64)\nfcsize=32\nbnArray=(0) \n\nfor orientation in ${orientationArray[@]}\ndo\n if [ \"$orientation\" == \"channels\" ]\n then\n\tl1channels=2048\n else\n\tl1channels=32\n fi\n echo \"l1 channels is $l1channels\"\n\n\n for l2channels in ${l2channelsArray[@]}\n do\n for bn in ${bnArray[@]}\n do\n #make output dir for paramter settings \n echo \" ------- new batch run --------\"\n OUTDIR=\"$maindir/l1r_${l1channels}.l2_${l2channels}.fc_${fcsize}.or_${orientation}.bn_${bn}\"\n mkdir $OUTDIR\n echo \"outdir is \" $OUTDIR\n\n #keep a copy of this run file \n me=`basename \"$0\"`\n cp $me $OUTDIR\n\n #make subdirs for logging and checkpoints \n mkdir \"$OUTDIR/log_graph\"\n mkdir \"$OUTDIR/checkpoints\"\n mkdir \"$OUTDIR/stderr\"\n # wrap python call in a string so we can do our fancy redirecting below \n runcmd='python DCNSoundClass.py --outdir $OUTDIR --checkpointing 1 --checkpointPeriod 500 --indir ${indir} '\n runcmd+=' --freqbins 513 --numFrames 424 --convRows 9 '\n runcmd+=' --numClasses 50 --batchsize 20 --n_epochs 100 --learning_rate ${learningrate} --batchnorm ${bn}'\n runcmd+=' --keepProb .5 --l1channels ${l1channels} --l2channels ${l2channels} --fcsize ${fcsize} --freqorientation ${orientation} '\n runcmd+=' --numconvlayers ${layers} --adamepsilon ${epsilon} --optimizer ${optimizer} --mtlnumclasses ${mtl}'\n # direct stdout and sterr from each run into their proper directories, but tww so we can still watch \n echo \"---------- now run!!!\"\n eval $runcmd > >(tee $OUTDIR/log.txt) 2> >(tee $OUTDIR/stderr/stderr.log >&2)\n done\n done\ndone\n\n\n" }, { "alpha_fraction": 0.45731425285339355, "alphanum_fraction": 0.4946931302547455, "avg_line_length": 42.34000015258789, "blob_id": "e427c3c363d2b92568f586d1c9776ec30cd9c52b", "content_id": "71217bb1e978d5aa3a9abed76edcb1e2f58738eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2167, "license_type": "no_license", "max_line_length": 163, "num_lines": 50, "path": "/runstyle.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# nohup ./runstyle.sh >>styleout/2017.05.02/multilog.txt 2>&1 & \n# Individual logs will also still get stored in their respective directories \nsource activate tflow2\nDATE=`date +%Y.%m.%d`\nmaindir=styleout/$DATE\nmkdir $maindir\n\nstatefile=testmodel/state.pickle\niter=3000\nalpha=10\nbetaArray=(2 10)\nnoiseArray=(.2 .7)\nrand=0\n#contentArray=(BeingRural5.0 agf5.0 Superstylin5.0 roosters5.0 Nancarrow5.0 Toys5.0 inc5.0 sheepfarm5.0)\n#styleArray=(BeingRural5.0 agf5.0 Superstylin5.0 roosters5.0 Nancarrow5.0 Toys5.0 inc5.0 sheepfarm5.0)\ncontentArray=(Superstylin5.0 agf5.0 wavenetbabble5.0 Toys5.0 inc5.0 Nancarrow5.0)\nstyleArray=(Superstylin5.0 agf5.0 wavenetbabble5.0 Toys5.0 inc5.0 Nancarrow5.0)\n\nfor noise in ${noiseArray[@]}\ndo\n for beta in ${betaArray[@]}\n do\n for content in ${contentArray[@]}\n do\n\t for style in ${styleArray[@]}\n\t do\n\n\t if [ \"$style\" == \"$content\" ]\n\t then\n\t\t continue\n\t fi\n\t #make output dir for paramter settings \n\t echo \" ------- new batch run --------\"\n\t OUTDIR=\"$maindir/content_${content}.style_${style}.beta_${beta}.noise_${noise}\"\n\t mkdir $OUTDIR\n\t echo \"outdir is \" $OUTDIR\n\n\t #make subdirs for logging and checkpoints \n\n\t mkdir \"$OUTDIR/log_graph\"\n\t mkdir \"$OUTDIR/checkpoints\"\n\n\t runcmd='python style_transfer.py --content ${content} --style ${style} --noise ${noise} --outdir $OUTDIR '\n\t runcmd+='--stateFile ${statefile} --iter $iter --alpha ${alpha} --beta ${beta} --randomize ${rand}'\n\t eval $runcmd > >(tee $OUTDIR/log.txt) 2> >(tee $OUTDIR.stderr.log >&2)\n\t done\n done\n done\ndone\n" }, { "alpha_fraction": 0.6348379850387573, "alphanum_fraction": 0.6892361044883728, "avg_line_length": 33.5533332824707, "blob_id": "5539ba28ed83d5dc833829d6a8cc70bb2a572e62", "content_id": "506540a295dd8469da776d64bbafd1f0d33d2ee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5184, "license_type": "no_license", "max_line_length": 170, "num_lines": 150, "path": "/testTrainedModel.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "\"\"\"\neg \npython testModel.py logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/my-model.meta logs.2017.04.28/mtl_2.or_channels.epsilon_1.0/checkpoints/\n\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport trainedModel\n\nfrom PIL import TiffImagePlugin\nfrom PIL import Image\n\n# get args from command line\nimport argparse\nFLAGS = None\n\nVERBOSE=False\n# ------------------------------------------------------\n# get any args provided on the command line\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('metamodel', type=str, help='stored graph' ) \nparser.add_argument('checkptDir', type=str, help='the checkpoint directory from where the latest checkpoint will be read to restore values for variables in the graph' ) \nFLAGS, unparsed = parser.parse_known_args()\n\nk_freqbins=257\nk_width=856\n\ng, savior = trainedModel.load(FLAGS.metamodel, FLAGS.checkptDir)\n\n\n#vnamelist =[n.name for n in tf.global_variables()]\nif VERBOSE : \n\tvnamelist =[n.name for n in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]\n\tprint('TRAINABLE vars:')\n\tfor n in vnamelist :\n\t\tprint(n)\n\n\n#opslist = [n.name for n in g.get_operations()] \n#print('----Operatios in graph are : ' + str(opslist))\ntf.GraphKeys.USEFUL = 'useful'\n\nif VERBOSE : \n\tprint ('...and useful :') #probalby have to restore from checkpoint first\n\tall_vars = tf.get_collection(tf.GraphKeys.USEFUL)\n\tfor v in all_vars:\n\t\tprint(v)\n\n#\n#print(' here we go ........')\n\n\nvar_list = tf.get_collection(tf.GraphKeys.USEFUL)\n\n####tf.add_to_collection(tf.GraphKeys.USEFUL, X) #input place holder\n####tf.add_to_collection(tf.GraphKeys.USEFUL, keepProb) #place holder\n####tf.add_to_collection(tf.GraphKeys.USEFUL, softmax_preds)\n####tf.add_to_collection(tf.GraphKeys.USEFUL, h1)\n####tf.add_to_collection(tf.GraphKeys.USEFUL, h2)\n\n#X = g.get_tensor_by_name('X/Adam:0')# placeholder for input\n#X = tf.placeholder(tf.float32, [None,k_freqbins*k_width], name= \"X\")\nX=var_list[0]\n#print('X is ' + str(X))\n\n#keepProb = g.get_tensor_by_name('keepProb')\n#keepProb=tf.placeholder(tf.float32, (), name= \"keepProb\")\nkeepProb=var_list[1]\n#print('keepProb is ' + str(keepProb))\n\n\nsoftmax_preds=var_list[2]\nassert softmax_preds.graph is tf.get_default_graph()\n\ndef soundfileBatch(slist) :\n\t# The training network scales to 255 and then flattens before stuffing into batches\n\treturn [np.array(Image.open(name).point(lambda i: i*255)).flatten() for name in slist ]\n\n\n#just test the validation set \n#Flipping and scaling seem to have almost no effect on the clasification accuracy\nrimages=soundfileBatch(['data2/validate/205 - Chirping birds/5-242490-A._11_.tif',\n\t'data2/validate/205 - Chirping birds/5-242491-A._12_.tif',\n\t'data2/validate/205 - Chirping birds/5-243448-A._14_.tif',\n\t'data2/validate/205 - Chirping birds/5-243449-A._15_.tif',\n\t'data2/validate/205 - Chirping birds/5-243450-A._15_.tif',\n\t'data2/validate/205 - Chirping birds/5-243459-A._13_.tif',\n\t'data2/validate/205 - Chirping birds/5-243459-B._13_.tif',\n\t'data2/validate/205 - Chirping birds/5-257839-A._10_.tif',\n\t'data2/validate/101 - Dog/5-203128-A._4_.tif',\n\t'data2/validate/101 - Dog/5-203128-B._5_.tif',\n\t'data2/validate/101 - Dog/5-208030-A._9_.tif',\n\t'data2/validate/101 - Dog/5-212454-A._4_.tif',\n\t'data2/validate/101 - Dog/5-213855-A._4_.tif',\n\t'data2/validate/101 - Dog/5-217158-A._2_.tif',\n\t'data2/validate/101 - Dog/5-231762-A._1_.tif',\n\t'data2/validate/101 - Dog/5-9032-A._12_.tif',\n\t])\n\n#rimages=np.random.uniform(0.,1., (3,k_freqbins*k_width))\n\n\n#print('got my image, ready to run!')\n\n#Z = tf.placeholder(tf.float32, [k_freqbins*k_width], name= \"Z\")\n#Y=tf.Variable(tf.truncated_normal([k_freqbins*k_width], stddev=0.1), name=\"Y\")\n#Y=tf.assign(Y,Z)\n\n#with tf.Session() as sess:\n#\tsess.run ( tf.global_variables_initializer ())\n#\tfoo = sess.run(Y, feed_dict={Z: rimage})\nprint(' here we go ........')\n\nnp.set_printoptions(precision=2)\nnp.set_printoptions(suppress=True)\n\nwith tf.Session() as sess:\n\t#sess.run ( tf.global_variables_initializer ())\n\t#savior.restore(sess, tf.train.latest_checkpoint(FLAGS.checkptDir))\n\ttrainedModel.initialize_variables(sess)\n\tif 0 :\n\t\tprint ('...GLOBAL_VARIABLES :') #probalby have to restore from checkpoint first\n\t\tall_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n\t\tfor v in all_vars:\n\t\t\tv_ = sess.run(v)\n\t\t\tprint(v_)\n\n\tif 0 :\n\t\tfor v in [\"w1:0\", \"b1:0\", \"w2:0\", \"b2:0\", \"W_fc1:0\", \"b_fc1:0\", \"W_fc2:0\", \"b_fc2:0\"] :\n\t\t\tprint(tf.get_default_graph().get_tensor_by_name(v))\n\t\t\tprint(sess.run(tf.get_default_graph().get_tensor_by_name(v)))\n\n\tif 1 :\n\t\tfor v in [\"h1:0\"] :\n\t\t\tim = np.reshape(rimages[6], [1,k_width*k_freqbins ])\n\t\t\tprint(tf.get_default_graph().get_tensor_by_name(v))\n\t\t\tprint(sess.run(tf.get_default_graph().get_tensor_by_name(v), feed_dict ={ X : im, keepProb : 1.0 }))\n\n\n\tprint('predictions are : ')\n\tfor im_ in rimages :\n\t\tim = np.reshape(im_, [1,k_width*k_freqbins ])\n\t\tprediction = sess.run(softmax_preds, feed_dict ={ X : im, keepProb : 1.0 })\n\t\tprint(str(prediction[0]))\n\n\n\t# Run the standard way .... in batches\n\t#predictions = sess.run(softmax_preds, feed_dict ={ X : rimages , keepProb : 1.0 })\n\t#print('predictions are : ')\n\t#print(str(predictions))\n\n" }, { "alpha_fraction": 0.6049220561981201, "alphanum_fraction": 0.6128043532371521, "avg_line_length": 37.18394470214844, "blob_id": "9e338785fece87a8b04bb6afec298d518132c47d", "content_id": "abfe2b961a5aa7bc2ead693995297121bd8f572c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11418, "license_type": "no_license", "max_line_length": 144, "num_lines": 299, "path": "/style_transfer.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "\n\"\"\" An implementation of the paper \"A Neural Algorithm of Artistic Style\"\nby Gatys et al. in TensorFlow.\n\nAuthor: Chip Huyen ([email protected])\nPrepared for the class CS 20SI: \"TensorFlow for Deep Learning Research\"\nFor more details, please read the assignment handout:\nhttp://web.stanford.edu/class/cs20si/assignments/a2.pdf\n\"\"\"\nfrom __future__ import print_function\nimport sys \n\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport pickledModel\n\n# get args from command line\nimport argparse\nFLAGS = []\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--content', type=str, help='name of file in content dir, w/o .ext' ) \nparser.add_argument('--style', type=str, help='name of file in style dir, w/o .ext' ) \nparser.add_argument('--noise', type=float, help='in range [0,1]', default=.5 ) \nparser.add_argument('--iter', type=int, help='number of iterations (on cpu, runtime is less than 1 sec/iter)', default=600 ) \nparser.add_argument('--alpha', type=float, help='amount to weight conent', default=10 ) \nparser.add_argument('--beta', type=float, help='amount to weight style', default=200 ) \nparser.add_argument('--randomize', type=int, help='0: use trained weights, 1: randomize model weights', choices=[0,1], default=0 ) \nparser.add_argument('--weightDecay', type=float, help='factor for L2 loss to keep vals in [0,255]', default=.01 ) \n\nparser.add_argument('--outdir', type=str, help='for output images', default=\".\" ) \nparser.add_argument('--stateFile', type=str, help='stored graph', default=None ) \n\nFLAGS, unparsed = parser.parse_known_args()\nprint('\\n FLAGS parsed : {0}'.format(FLAGS))\n\nif any(v is None for v in vars(FLAGS).values()) :\n print('All args are required with their flags. For help: python style_transfer --help')\n sys.exit()\n\n\nCHECKPOINTING=False\n\nFILETYPE = \".tif\"\n# parameters to manage experiments\nSTYLE = FLAGS.style\nCONTENT = FLAGS.content\nSTYLE_IMAGE = 'content/' + STYLE + FILETYPE\nCONTENT_IMAGE = 'content/' + CONTENT + FILETYPE\n\n # This seems to be the paramter that really controls the balance between content and style\n # The more noise, the less content\nNOISE_RATIO = FLAGS.noise # percentage of weight of the noise for intermixing with the content image\n\n# Layers used for style features. You can change this.\nSTYLE_LAYERS = ['h1', 'h2']\nW = [1.0, 2.0] # give more weights to deeper layers.\n\n# Layer used for content features. You can change this.\nCONTENT_LAYER = 'h2'\n\n#Relationship a/b is 1/20\nALPHA = FLAGS.alpha #content\nBETA = FLAGS.beta #style\n\nLOGDIR = FLAGS.outdir + '/log_graph'\t\t\t#create folder manually\nCHKPTDIR = FLAGS.outdir + '/checkpoints'\t\t# create folder manually\nOUTPUTDIR = FLAGS.outdir\n\nITERS = FLAGS.iter\nLR = 2.0\n\nWEIGHT_DECAY=FLAGS.weightDecay\n\ndef _create_range_loss(im) : \n over = tf.maximum(im-255, 0)\n under = tf.minimum(im, 0)\n out = tf.add(over, under)\n rangeloss = WEIGHT_DECAY*tf.nn.l2_loss(out)\n return rangeloss\n\n\ndef _create_content_loss(p, f):\n \"\"\" Calculate the loss between the feature representation of the\n content image and the generated image.\n \n Inputs: \n p, f are just P, F in the paper \n (read the assignment handout if you're confused)\n Note: we won't use the coefficient 0.5 as defined in the paper\n but the coefficient as defined in the assignment handout.\n Output:\n the content loss\n\n \"\"\"\n pdims=p.shape\n #print('p has dims : ' + str(pdims)) \n coef = np.multiply.reduce(pdims) # Hmmmm... maybe don't want to include the first dimension\n #this makes the loss 0!!!\n #return (1/4*coef)*tf.reduce_sum(tf.square(f-p))\n return tf.reduce_sum((f-p)**2)/(4*coef)\n\n\ndef _gram_matrix(F, N, M):\n \"\"\" Create and return the gram matrix for tensor F\n Hint: you'll first have to reshape F\n\n inputs: F: the tensor of all feature channels in a given layer\n N: number of features (channels) in the layer\n M: the total number of filters in each filter (length * height)\n\n F comes in as numchannels*length*height, and \n \"\"\"\n # We want to reshape F to be number of feaures (N) by the values in the feature array ( now represented in one long vector of length M) \n\n Fshaped = tf.reshape(F, (M, N))\n return tf.matmul(tf.transpose(Fshaped), Fshaped) # return G of size #channels x #channels\n\n\ndef _single_style_loss(a, g):\n \"\"\" Calculate the style loss at a certain layer\n Inputs:\n a is the feature representation of the real image\n g is the feature representation of the generated image\n Output:\n the style loss at a certain layer (which is E_l in the paper)\n\n Hint: 1. you'll have to use the function _gram_matrix()\n 2. we'll use the same coefficient for style loss as in the paper\n 3. a and g are feature representation, not gram matrices\n \"\"\"\n horizdim = 1 # recall that first dimension of tensor is minibatch size\n vertdim = 2\n featuredim = 3\n\n\n\n # N - number of features\n N = a.shape[featuredim] #a & g are the same shape\n # M - product of first two dimensions of feature map\n M = a.shape[horizdim]*a.shape[vertdim]\n\n #print(' N is ' + str(N) + ', and M is ' + str(M))\n \n # This is 'E' from the paper and the homework handout.\n # It is a scalar for a single layer\n diff = _gram_matrix(a, N, M)-_gram_matrix(g, N, M)\n sq = tf.square(diff)\n s=tf.reduce_sum(sq)\n return (s/(4*N*N*M*M))\n \n\ndef _create_style_loss(A, model):\n \"\"\" Return the total style loss\n \"\"\"\n n_layers = len(STYLE_LAYERS)\n # E has one dimension with length equal to the number of layers\n E = [_single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]\n\n ###############################\n ## TO DO: return total style loss\n return np.dot(W, E)\n ###############################\n\ndef _create_losses(model, input_image, content_image, style_image):\n print('_create_losses')\n with tf.variable_scope('loss') as scope:\n with tf.Session() as sess:\n sess.run(input_image.assign(content_image)) # assign content image to the input variable\n # model[CONTENT_LAYER] is a relu op\n p = sess.run(model[CONTENT_LAYER])\n\n content_loss = _create_content_loss(p, model[CONTENT_LAYER])\n\n with tf.Session() as sess:\n sess.run(input_image.assign(style_image))\n A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS]) \n style_loss = _create_style_loss(A, model)\n\n reg_loss = _create_range_loss(model['X'])\n\n ##########################################\n ## TO DO: create total loss. \n ## Hint: don't forget the content loss and style loss weights\n total_loss = ALPHA*content_loss + BETA*style_loss + reg_loss\n ##########################################\n\n return content_loss, style_loss, total_loss\n\ndef _create_summary(model):\n \"\"\" Create summary ops necessary\n Hint: don't forget to merge them\n \"\"\"\n with tf.name_scope ( \"summaries\" ):\n tf.summary.scalar ( \"content loss\" , model['content_loss'])\n tf.summary.scalar ( \"style_loss\" , model['style_loss'])\n tf.summary.scalar ( \"total_loss\" , model['total_loss'])\n # because you have several summaries, we should merge them all\n # into one op to make it easier to manage\n return tf.summary.merge_all()\n\n\ndef train(model, generated_image, initial_image):\n \"\"\" Train your model.\n Don't forget to create folders for checkpoints and outputs.\n \"\"\"\n skip_step = 1\n with tf.Session() as sess:\n saver = tf.train.Saver()\n sess.run ( tf.global_variables_initializer ())\n print('initialize .....')\n writer = tf.summary.FileWriter(LOGDIR, sess.graph)\n ###############################\n print('Do initial run to assign image')\n sess.run(generated_image.assign(initial_image))\n if CHECKPOINTING :\n ckpt = tf.train.get_checkpoint_state(os.path.dirname(CHKPTDIR + '/checkpoint'))\n else :\n ckpt = False\n\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n initial_step = model['global_step'].eval()\n \n start_time = time.time()\n step_time=start_time\n for index in range(initial_step, ITERS):\n if index >= 5 and index < 20:\n skip_step = 10\n elif index >= 20:\n skip_step = 100\n \n sess.run(model['optimizer'])\n if (index + 1) % skip_step == 0:\n ###############################\n ## TO DO: obtain generated image and loss\n # following the optimazaiton step, calculate loss\n gen_image, total_loss, summary = sess.run([generated_image, model['total_loss'], \n model['summary_op']])\n \n ###############################\n #gen_image = gen_image + MEAN_PIXELS\n writer.add_summary(summary, global_step=index)\n print('Step {}\\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))\n print(' Loss: {:5.1f}'.format(sess.run(model['total_loss']))) #???????\n print(' Time: {}'.format(time.time() - step_time))\n step_time = time.time()\n\n filename = OUTPUTDIR + '/%d.tif' % (index)\n #pickledModel.save_image(np.transpose(gen_image[0][0]), filename)\n print('style_transfer: about to save image with shape ' + str(gen_image.shape))\n pickledModel.save_image(gen_image[0], filename)\n\n if (index + 1) % 20 == 0:\n saver.save(sess, CHKPTDIR + '/style_transfer', index)\n\n print(' TOTAL Time: {}'.format(time.time() - start_time))\n writer.close()\n\n#-----------------------------------\n\nprint('RUN MAIN')\n\nmodel=pickledModel.load(FLAGS.stateFile, FLAGS.randomize)\n\nprint('MODEL LOADED')\n\nmodel['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')\n\ncontent_image = pickledModel.loadImage(CONTENT_IMAGE)\nprint('content_image shape is ' + str(content_image.shape))\nprint('content_image max is ' + str(np.amax(content_image) ))\nprint('content_image min is ' + str(np.amin(content_image) ))\n\n#content_image = content_image - MEAN_PIXELS\nstyle_image = pickledModel.loadImage(STYLE_IMAGE)\nprint('style_image max is ' + str(np.amax(style_image) ))\nprint('style_image min is ' + str(np.amin(style_image) ))\n#style_image = style_image - MEAN_PIXELS\n\nprint(' NEXT, create losses')\nmodel['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, \n model[\"X\"], content_image, style_image)\n###############################\n## TO DO: create optimizer\n## model['optimizer'] = ...\nmodel['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], var_list=[model[\"X\"]])\n###############################\nmodel['summary_op'] = _create_summary(model)\n\ninitial_image = pickledModel.generate_noise_image(content_image, NOISE_RATIO)\n#def train(model, generated_image, initial_image):\ntrain(model, model[\"X\"], initial_image)\n\n#if __name__ == '__main__':\n# main()\n" }, { "alpha_fraction": 0.6252787113189697, "alphanum_fraction": 0.6549609899520874, "avg_line_length": 36.76315689086914, "blob_id": "381a9a9f6a19e2a8902cc6bee1ed87cdd96c5a0a", "content_id": "1a90ac3d960dd658201bf9d31e0ebf51ddb2329c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7176, "license_type": "no_license", "max_line_length": 197, "num_lines": 190, "path": "/pickledModel.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#\n#\n#Morgans great example code:\n#https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc\n#\n# GitHub utility for freezing graphs:\n#https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py\n#\n#https://www.tensorflow.org/api_docs/python/tf/graph_util/convert_variables_to_constants\n\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom PIL import TiffImagePlugin, ImageOps\nfrom PIL import Image\n\n\nimport pickle\n\ng_graph=None\n\n#k_freqbins=257\n#k_width=856\n\nVERBOSE=0\n\n#------------------------------------------------------------\n\n#global\n# gleaned from the parmeters in the pickle file; used to load images\nheight=0\nwidth=0\ndepth=0\n\n#-------------------------------------------------------------\n\ndef getShape(g, name) :\n\treturn g.get_tensor_by_name(name + \":0\").get_shape()\n\ndef loadImage(fname) :\n\t#transform into 1D width with frequbins in channel dimension (we do this in the graph in the training net, but not with this reconstructed net)\n\tif (height==1) : \n\t\treturn np.transpose(np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,depth,width,1]), [0,3,2,1]) \n\telse :\n\t\treturn np.reshape(np.array(Image.open(fname).point(lambda i: i*255)), [1,height,width,1])\n\n\ndef generate_noise_image(content_image, noise_ratio=0.6):\n\tprint('generate_noise_image with height=' + str(height) + ', width =' + str(width) + ', and depth =' + str(depth))\n\tnoise_image = np.random.uniform(-1, 1, (1, height, width, depth)).astype(np.float32)\n\tprint('noise_image shape is ' + str(noise_image.shape))\n\treturn noise_image * noise_ratio + content_image * (1. - noise_ratio)\n\n# Assumes caller puts image into the correct orientation\ndef save_image(image, fname, scaleinfo=None):\n\tprint('save_image: shape is ' + str(image.shape))\n\tif (height==1) : # orientation is freq bins in channels\n\t\tprint('saving image in channel orientation')\n\t\timage = np.transpose(image, [2,1,0])[:,:,0]\n\telse :\n\t\tprint('saving image in image orientation')\n\t\timage = image[:,:,0]\n\t\n\tprint('AFTER reshaping, save_image: shape is ' + str(image.shape))\n\n\n\t\n\tprint('image max is ' + str(np.amax(image) ))\n\tprint('image min is ' + str(np.amin(image) ))\n\t# Output should add back the mean pixels we subtracted at the beginning\n\n\t# [0,80db] -> [0, 255]\n\t# after style transfer, images range outside of [0,255].\n\t# To preserve scale, and mask low values, we shift by (255-max), then clip at 0 and then have all bins in the top 80dB.\n\timage = np.clip(image-np.amax(image)+255, 0, 255).astype('uint8')\n\n\tinfo = TiffImagePlugin.ImageFileDirectory()\n \n\tif (scaleinfo == None) :\n\t info[270] = '80, 0'\n\telse :\n\t info[270] = scaleinfo\n\n\t#scipy.misc.imsave(path, image)\n\n\tbwarray=np.asarray(image)/255.\n\n\tsavimg = Image.fromarray(np.float64(bwarray)) #==============================\n\tsavimg.save(fname, tiffinfo=info)\n\t#print('RGB2TiffGray : tiffinfo is ' + str(info))\n\treturn info[270] # just in case you want it for some reason\n \n\ndef constructSTModel(state, params) :\n\tglobal g_graph\n\tg_graph = {} \n\n\n\t#This is the variable that we will \"train\" to match style and content images.\n\t##g_graph[\"X\"] = tf.Variable(np.zeros([1,k_width*k_freqbins]), dtype=tf.float32, name=\"s_x_image\")\n\t##g_graph[\"x_image\"] = tf.reshape(g_graph[\"X\"], [1,k_height,k_width,k_inputChannels])\n\n\tg_graph[\"X\"] = tf.Variable(np.zeros([1,params['k_height'], params['k_width'], params['k_inputChannels']]), dtype=tf.float32, name=\"s_X\")\n\t\n\tg_graph[\"w1\"]=tf.constant(state[\"w1:0\"], name=\"s_w1\")\n\tg_graph[\"b1\"]=tf.constant(state[\"b1:0\"], name=\"s_b1\")\n\t#g_graph[\"w1\"]=tf.Variable(tf.truncated_normal(getShape( tg, \"w1\"), stddev=0.1), name=\"w1\")\n\t#g_graph[\"b1\"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, \"b1\")), name=\"b1\")\n\t\n\t# tf.nn.relu(tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1, name=\"h1\")\n\tg_graph[\"h1\"]=tf.nn.relu(tf.nn.conv2d(g_graph[\"X\"], g_graph[\"w1\"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph[\"b1\"], name=\"s_h1\")\n\t# 2x2 max pooling\n\tg_graph[\"h1pooled\"] = tf.nn.max_pool(g_graph[\"h1\"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name=\"s_h1_pooled\")\n\n\tg_graph[\"w2\"]=tf.constant(state[\"w2:0\"], name=\"s_w2\")\n\tg_graph[\"b2\"]=tf.constant(state[\"b2:0\"], name=\"s_b2\")\n\t#g_graph[\"w2\"]=tf.Variable(tf.truncated_normal(getShape( tg, \"w2\"), stddev=0.1), name=\"w2\")\n\t#g_graph[\"b2\"]=tf.Variable(tf.constant(0.1, shape=getShape( tg, \"b2\")), name=\"b2\")\n\n\tg_graph[\"h2\"]=tf.nn.relu(tf.nn.conv2d(g_graph[\"h1pooled\"], g_graph[\"w2\"], strides=[1, params['k_ConvStrideRows'], params['k_ConvStrideCols'], 1], padding='SAME') + g_graph[\"b2\"], name=\"s_h2\")\n\n\tg_graph[\"h2pooled\"] = tf.nn.max_pool(g_graph[\"h2\"], ksize=[1, params['k_poolRows'], 2, 1], strides=[1, params['k_poolStride'], 2, 1], padding='SAME', name='s_h2_pooled')\n\tg_graph[\"convlayers_output\"] = tf.reshape(g_graph[\"h2pooled\"], [-1, params['k_downsampledWidth'] * params['k_downsampledHeight']*params['L2_CHANNELS']]) # to prepare it for multiplication by W_fc1\n\n#\n\tg_graph[\"W_fc1\"] = tf.constant(state[\"W_fc1:0\"], name=\"s_W_fc1\")\n\tg_graph[\"b_fc1\"] = tf.constant(state[\"b_fc1:0\"], name=\"s_b_fc1\")\n\n\t#g_graph[\"keepProb\"]=tf.placeholder(tf.float32, (), name= \"keepProb\")\n\t#g_graph[\"h_fc1\"] = tf.nn.relu(tf.matmul(tf.nn.dropout(g_graph[\"convlayers_output\"], g_graph[\"keepProb\"]), g_graph[\"W_fc1\"]) + g_graph[\"b_fc1\"], name=\"h_fc1\")\n\tg_graph[\"h_fc1\"] = tf.nn.relu(tf.matmul(g_graph[\"convlayers_output\"], g_graph[\"W_fc1\"]) + g_graph[\"b_fc1\"], name=\"s_h_fc1\")\n\n\n\t#Read out layer\n\tg_graph[\"W_fc2\"] = tf.constant(state[\"W_fc2:0\"], name=\"s_W_fc2\")\n\tg_graph[\"b_fc2\"] = tf.constant(state[\"b_fc2:0\"], name=\"s_b_fc2\")\n\n\n\tg_graph[\"logits_\"] = tf.matmul(g_graph[\"h_fc1\"], g_graph[\"W_fc2\"])\n\tg_graph[\"logits\"] = tf.add(g_graph[\"logits_\"] , g_graph[\"b_fc2\"] , name=\"s_logits\")\n\n\n\tg_graph[\"softmax_preds\"] = tf.nn.softmax(logits=g_graph[\"logits\"], name=\"s_softmax_preds\")\n\n\n\treturn g_graph\n\n# Create and save the picke file of paramters \ndef saveState(sess, vlist, parameters, fname) :\n\t# create object to stash tensorflow variables in\n\tstate={}\n\tfor v in vlist :\n\t\tstate[v.name] = sess.run(v)\n\n\t# combine state and parameters into a single object for serialization\n\tnetObject={\n\t\t'state' : state,\n\t\t'parameters' : parameters\n\t}\n\tpickle.dump(netObject, open( fname, \"wb\" ))\n\n\n# Load the pickle file of parameters\ndef load(pickleFile, randomize=0) :\n\tprint(' will read state from ' + pickleFile)\n\tnetObject=pickle.load( open( pickleFile, \"rb\" ) )\n\tstate = netObject['state']\n\tparameters = netObject['parameters']\n\n\tif randomize ==1 :\n\t\tprint('randomizing weights')\n\t\tfor n in state.keys():\n\t\t\tprint('shape of state[' + n + '] is ' + str(state[n].shape))\n\t\t\tstate[n] = .2* np.random.random_sample(state[n].shape).astype(np.float32) -.1\n\n\tfor p in parameters.keys() :\n\t\tprint('param[' + p + '] = ' + str(parameters[p]))\n\n\n\tglobal height\n\theight = parameters['k_height']\n\n\tglobal width \n\twidth = parameters['k_width']\n\n\tglobal depth\n\tdepth = parameters['k_inputChannels']\n\n\treturn constructSTModel(state, parameters)\n\n" }, { "alpha_fraction": 0.6744379997253418, "alphanum_fraction": 0.6881128549575806, "avg_line_length": 42.769344329833984, "blob_id": "b44ca46d9cea640a054b0f87246c641424dfd8e9", "content_id": "81b2ff3268f1e6916e2a8ec3373aea71c7affba5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29982, "license_type": "no_license", "max_line_length": 239, "num_lines": 685, "path": "/DCNSoundClass.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "\"\"\"\n\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport spectreader\nimport os\nimport time\nimport math\n\nimport pickledModel\n\n# get args from command line\nimport argparse\nFLAGS = None\n# ------------------------------------------------------\n# get any args provided on the command line\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--indir', type=str, help='directory holding TFRecords of data', default='.') \nparser.add_argument('--outdir', type=str, help='output directory for logging', default='.') \nparser.add_argument('--numClasses', type=int, help='number of classes in data', choices=[2,50], default=2) #default for testing\nparser.add_argument('--checkpointing', type=int, help='0/1 - used for both saving and starting from checkpoints', choices=[0,1], default=0)\nparser.add_argument('--checkpointPeriod', type=int, help='checkpoint every n batches', default=8) \n\nparser.add_argument('--freqbins', type=int, help='number of frequency bins in the spectrogram input', default=513) \nparser.add_argument('--numFrames', type=int, help='number of frames in the spectrogram input (must be divisible by 4)', default=424) \n\nparser.add_argument('--learning_rate', type=float, help='learning rate', default=.001) \nparser.add_argument('--batchsize', type=int, help='number of data records per training batch', default=8) #default for testing\nparser.add_argument('--n_epochs', type=int, help='number of epochs to use for training', default=2) #default for testing\nparser.add_argument('--keepProb', type=float, help='keep probablity for dropout before 1st fully connected layer during training', default=1.0) #default for testing\nparser.add_argument('--batchnorm', type=int, help='0/1 - to batchnorm or not to batchnorm', choices=[0,1], default=1)\n\n\nparser.add_argument('--freqorientation', type=str, help='freq as height or as channels', choices=[\"height\",\"channels\"], default=\"channels\") #default for testing\n\nparser.add_argument('--numconvlayers', type=int, help='number of convolutional layers', choices=[1,2], default=2) #default for testing\n\nparser.add_argument('--l1channels', type=int, help='Number of channels in the first convolutional layer', default=32) #default for testing\nparser.add_argument('--l2channels', type=int, help='Number of channels in the second convolutional layer (ignored if numconvlayers is 1)', default=64) #default for testing\nparser.add_argument('--fcsize', type=int, help='Dimension of the final fully-connected layer', default=32) #default for testing\n\nparser.add_argument('--convRows', type=int, help='size of conv kernernel in freq dimension if orientation is height (otherwise ignored)', default=5) #default for testing\nparser.add_argument('--convColumns', type=int, help='size of conv kernernel in temporal dimension ', default=5) #default for testing\n\nparser.add_argument('--optimizer', type=str, help='optimizer', choices=[\"adam\",\"gd\"], default=\"gd\") #default for testing\nparser.add_argument('--adamepsilon', type=float, help='epsilon param for adam optimizer', default=.1) \n\nparser.add_argument('--learnCondition', type=str, help='when to learn', choices=[\"always\",\"whenWrong\"], default=\"always\") #default for testing\n\nparser.add_argument('--mtlnumclasses', type=int, help='if nonzero, train using secondary classes (which must be stored in TFRecord files', default=0)\n\n\nFLAGS, unparsed = parser.parse_known_args()\nprint('\\n FLAGS parsed : {0}'.format(FLAGS))\n\n\n\n#HARD-CODED data-dependant parameters ------------------\n#dimensions of image (pixels)\nk_freqbins=FLAGS.freqbins\n\nk_height=1\t\t\t\t\t\t# default for freqs as channels\nk_inputChannels=k_freqbins\t\t# default for freqs as channels\n\nif FLAGS.freqorientation == \"height\" :\n\tk_height=k_freqbins\n\tk_inputChannels=1\n\nk_numFrames=FLAGS.numFrames\n\n#number of samples for training and validation\nk_numClasses=FLAGS.numClasses #determines wether to read mini data set in data2 or full dataset in data50\nvalidationSamples=8*k_numClasses\ntrainingSamples=32*k_numClasses\n\n\nk_mtlnumclasses=FLAGS.mtlnumclasses #only matters if K_MTK is not 0\n\n# ------------------------------------------------------\n# Define paramaters for the training\nlearning_rate = FLAGS.learning_rate\nk_batchsize = FLAGS.batchsize \nn_epochs = FLAGS.n_epochs #6 #NOTE: we can load from checkpoint, but new run will last for n_epochs anyway\n\n# ------------------------------------------------------\n# Define paramaters for the model \nK_NUMCONVLAYERS = FLAGS.numconvlayers\n\nL1_CHANNELS=FLAGS.l1channels\nL2_CHANNELS=FLAGS.l2channels\nFC_SIZE = FLAGS.fcsize\n\nk_downsampledHeight = 1\t\t\t# default for freqs as channels\nif FLAGS.freqorientation == \"height\" :\n\t# see https://www.tensorflow.org/api_guides/python/nn#convolution for calculating size from strides and padding\n\tk_downsampledHeight = int(math.ceil(math.ceil(k_height/2.)/2.))# k_height/4 #in case were using freqs as y dim, and conv layers = 2\n\tprint(':::::: k_downsampledHeight is ' + str(k_downsampledHeight))\n\nk_downsampledWidth = k_numFrames/4 # no matter what the orientation - freqs as channels or as y dim\nk_convLayerOutputChannels = L2_CHANNELS\nif (K_NUMCONVLAYERS == 1) :\n\tk_downsampledWidth = k_numFrames/2\n\tk_convLayerOutputChannels = L1_CHANNELS\n\tif FLAGS.freqorientation == \"height\" :\n\t\tk_downsampledHeight = int(math.ceil(k_height/2.)) # k_height/2 #in case were using freqs as y dim, and conv layers = 1\n\nprint(':::::: k_downsampledHeight is ' + str(k_downsampledHeight))\nprint(':::::: k_downsampledWidth is ' + str(k_downsampledWidth))\n\nK_ConvRows=1 # default for freqs as channels\nif FLAGS.freqorientation == \"height\" :\n\tK_ConvRows=FLAGS.convRows\n\t\nK_ConvCols=FLAGS.convColumns\nk_ConvStrideRows=1\nk_ConvStrideCols=1\n\nk_poolRows = 1 # default for freqs as channels\nk_poolStrideRows = 1 # default for freqs as channels\nif FLAGS.freqorientation == \"height\" :\n\tk_poolRows = 2\n\tk_poolStrideRows = 2 \n\n\n\nk_keepProb=FLAGS.keepProb\n\nk_OPTIMIZER=FLAGS.optimizer\nk_adamepsilon = FLAGS.adamepsilon\n\nLEARNCONDITION = FLAGS.learnCondition\n\n# ------------------------------------------------------\n# Derived parameters for convenience (do not change these)\nk_vbatchsize = min(validationSamples, k_batchsize)\nk_numVBatches = validationSamples/k_vbatchsize\nprint(' ------- For validation, will run ' + str(k_numVBatches) + ' batches of ' + str(k_vbatchsize) + ' datasamples')\n\n#ESC-50 dataset has 50 classes of 40 sounds each\nk_batches_per_epoch = k_numClasses*40/k_batchsize\nk_batchesPerLossReport= k_batches_per_epoch #writes loss to the console every n batches\nprint(' ----------will write out report every ' + str(k_batchesPerLossReport) + ' batches')\n#k_batchesPerLossReport=1 #k_batches_per_epoch\n\n# Create list of paramters for serializing so that network can be properly reconstructed, and for documentation purposes\nparameters={\n\t'k_height' : k_height, \n\t'k_numFrames' : k_numFrames, \n\t'k_inputChannels' : k_inputChannels, \n\t'K_NUMCONVLAYERS' : K_NUMCONVLAYERS, \n\t'L1_CHANNELS' : L1_CHANNELS, \n\t'L2_CHANNELS' : L2_CHANNELS, \n\t'FC_SIZE' : FC_SIZE, \n\t'K_ConvRows' : K_ConvRows, \n\t'K_ConvCols' : K_ConvCols, \n\t'k_ConvStrideRows' : k_ConvStrideRows, \n\t'k_ConvStrideCols' : k_ConvStrideCols, \n\t'k_poolRows' : k_poolRows, \n\t'k_poolStrideRows' : k_poolStrideRows, \n\t'k_downsampledHeight' : k_downsampledHeight, \n\t'k_downsampledWidth' : k_downsampledWidth,\n\t'freqorientation' : FLAGS.freqorientation\n}\n# ------------------------------------------------------\n#Other non-data, non-model params\nCHECKPOINTING=FLAGS.checkpointing\nk_checkpointPeriod = FLAGS.checkpointPeriod # in units of batches\n\nINDIR = FLAGS.indir\nOUTDIR = FLAGS.outdir\n\nCHKPOINTDIR = OUTDIR + '/checkpoints' # create folder manually\nCHKPTBASE = CHKPOINTDIR + '/model.ckpt'\t# base name used for checkpoints\nLOGDIR = OUTDIR + '/log_graph'\t\t\t#create folder manually\n#OUTPUTDIR = i_outdir\n\nNUM_THREADS = 4 #used for enqueueing TFRecord data \n#=============================================\n\ndef getImage(fnames, nepochs=None, mtlclasses=0) :\n \"\"\" Reads data from the prepaired *list* files in fnames of TFRecords, does some preprocessing \n params:\n fnames - list of filenames to read data from\n nepochs - An integer (optional). Just fed to tf.string_input_producer(). Reads through all data num_epochs times before generating an OutOfRange error. None means read forever.\n \"\"\"\n if mtlclasses : \n \tlabel, image, mtlabel = spectreader.getImage(fnames, nepochs, mtlclasses)\n else : \n \tlabel, image = spectreader.getImage(fnames, nepochs)\n\n #same as np.flatten\n # I can't seem to make shuffle batch work on images in their native shapes.\n image=tf.reshape(image,[k_freqbins*k_numFrames])\n\n # re-define label as a \"one-hot\" vector \n # it will be [0,1] or [1,0] here. \n # This approach can easily be extended to more classes.\n label=tf.stack(tf.one_hot(label-1, k_numClasses))\n\n if mtlclasses :\n \tmtlabel=tf.stack(tf.one_hot(mtlabel-1, mtlclasses))\n \treturn label, image, mtlabel\n else :\n \treturn label, image\n\ndef get_datafiles(a_dir, startswith):\n \"\"\" Returns a list of files in a_dir that start with the string startswith.\n e.g. e.g. get_datafiles('data', 'train-') \n \"\"\" \n return [a_dir + '/' + name for name in os.listdir(a_dir)\n if name.startswith(startswith)]\n\ndef batch_norm(x, is_trainingP, scope):\n\twith tf.variable_scope(scope):\n\t\treturn tf.layers.batch_normalization(x,\n\t\t\t\taxis=3, # is this right? - our conv2D returns NHWC ordering? \n\t\t\t\tcenter=True, \n\t\t\t\tscale=True, \n\t\t\t\ttraining=is_trainingP,\n\t\t\t\tname=scope+\"_bn\")\n\n\n#=============================================\n# Step 1: Read in data\n\n# getImage reads data for enqueueing shufflebatch, shufflebatch manages it's own dequeing \n# ---- First set up the graph for the TRAINING DATA\nif k_mtlnumclasses : \n\ttarget, data, mtltargets = getImage(get_datafiles(INDIR, 'train-'), nepochs=n_epochs, mtlclasses=k_mtlnumclasses)\n\timageBatch, labelBatch, mtltargetBatch = tf.train.shuffle_batch(\n\t [data, target, mtltargets], batch_size=k_batchsize,\n\t num_threads=NUM_THREADS,\n\t allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize\n\t enqueue_many=False, #IMPORTANT to get right, default=False - \n\t capacity=1000, #1000,\n\t min_after_dequeue=500) #500\nelse :\n\ttarget, data = getImage(get_datafiles(INDIR, 'train-'), n_epochs)\n\timageBatch, labelBatch = tf.train.shuffle_batch(\n\t [data, target], batch_size=k_batchsize,\n\t num_threads=NUM_THREADS,\n\t allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize\n\t enqueue_many=False, #IMPORTANT to get right, default=False - \n\t capacity=1000, #1000,\n\t min_after_dequeue=500) #500\n\n\n# ---- same for the VALIDATION DATA\n# no need for mtl labels for validation\nvtarget, vdata = getImage(get_datafiles(INDIR, 'validation-')) # one \"epoch\" for validation\n\n#vimageBatch, vlabelBatch = tf.train.shuffle_batch(\n# [vdata, vtarget], batch_size=k_vbatchsize,\n# num_threads=NUM_THREADS,\n# allow_smaller_final_batch=True, #want to finish an eposh even if datasize doesn't divide by batchsize\n# enqueue_many=False, #IMPORTANT to get right, default=False - \n# capacity=1000, #1000,\n# min_after_dequeue=500) #500\n\nvimageBatch, vlabelBatch = tf.train.batch(\n [vdata, vtarget], batch_size=k_vbatchsize,\n num_threads=NUM_THREADS,\n allow_smaller_final_batch=False, #want to finish an eposh even if datasize doesn't divide by batchsize\n enqueue_many=False, #IMPORTANT to get right, default=False - \n capacity=1000)\n\n# Step 2: create placeholders for features (X) and labels (Y)\n# each lable is one hot vector.\n# 'None' here allows us to fill the placeholders with different size batches (which we do with training and validation batches)\n#X = tf.placeholder(tf.float32, [None,k_freqbins*k_numFrames], name= \"X\")\nX = tf.placeholder(tf.float32, [None,k_freqbins*k_numFrames], name= \"X\")\n\nif FLAGS.freqorientation == \"height\" :\n\tx_image = tf.reshape(X, [-1,k_height,k_numFrames,k_inputChannels]) \nelse :\n\tprint('set up reshaping for freqbins as channels')\n\tfoo1 = tf.reshape(X, [-1,k_freqbins,k_numFrames,1]) #unflatten (could skip this step if it wasn't flattenned in the first place!)\n\tx_image = tf.transpose(foo1, perm=[0,3,2,1]) #moves freqbins from height to channel dimension\n\nY = tf.placeholder(tf.float32, [None,k_numClasses], name= \"Y\") #labeled classes, one-hot\nMTLY = tf.placeholder(tf.float32, [None,k_mtlnumclasses], name= \"MTLY\") #labeled classes, one-hot \n\n# Step 3: create weights and bias\ntrainable=[]\n\n#Layer 1\n# 1 input channel, L1_CHANNELS output channels\nisTraining=tf.placeholder(tf.bool, (), name= \"isTraining\") #passed in feeddict to sess.runs\n\nw1=tf.Variable(tf.truncated_normal([K_ConvRows, K_ConvCols, k_inputChannels, L1_CHANNELS], stddev=0.1), name=\"w1\")\ntrainable.extend([w1])\n\nif (FLAGS.batchnorm==1) : \n\t#convolve Wx (w/o adding bias) then relu \n\tl1preactivation=tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') \n\tbn1=batch_norm(l1preactivation, isTraining, \"batch_norm_1\")\n\th1=tf.nn.relu(bn1, name=\"h1\")\n\t# 2x2 max pooling\nelse : \n\t# convolve and add bias Wx+b\n\tb1=tf.Variable(tf.constant(0.1, shape=[L1_CHANNELS]), name=\"b1\")\n\ttrainable.extend([b1])\n\tl1preactivation=tf.nn.conv2d(x_image, w1, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b1\n\th1=tf.nn.relu(l1preactivation, name=\"h1\")\n\nh1pooled = tf.nn.max_pool(h1, ksize=[1, k_poolRows, 2, 1], strides=[1, k_poolStrideRows, 2, 1], padding='SAME')\n\n\nif K_NUMCONVLAYERS == 2 :\n\t#Layer 2\n\t#L1_CHANNELS input channels, L2_CHANNELS output channels\n\tw2=tf.Variable(tf.truncated_normal([K_ConvRows, K_ConvCols, L1_CHANNELS, L2_CHANNELS], stddev=0.1), name=\"w2\")\n\ttrainable.extend([w2])\n\n\tif (FLAGS.batchnorm==1) : \n\t\t#convolve (w/o adding bias) then norm \n\t\tl2preactivation= tf.nn.conv2d(h1pooled, w2, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') \n\t\tbn2=batch_norm(l2preactivation, isTraining, \"batch_norm_2\")\n\t\th2=tf.nn.relu(bn2, name=\"h2\")\n\telse :\n\t\tb2=tf.Variable(tf.constant(0.1, shape=[L2_CHANNELS]), name=\"b2\")\n\t\ttrainable.extend([b2])\n\t\tl2preactivation= tf.nn.conv2d(h1pooled, w2, strides=[1, k_ConvStrideRows, k_ConvStrideCols, 1], padding='SAME') + b2\n\t\th2=tf.nn.relu(l2preactivation, name=\"h2\")\n\n\n\twith tf.name_scope ( \"Conv_layers_out\" ):\n\t\th2pooled = tf.nn.max_pool(h2, ksize=[1, k_poolRows, 2, 1], strides=[1, k_poolStrideRows, 2, 1], padding='SAME', name='h2_pooled')\n\t\tprint('k_downsampledWidth = ' + str(k_downsampledWidth) + ', k_downsampledHeight = ' + str(k_downsampledHeight) + ', L2_CHANNELS = ' + str(L2_CHANNELS))\n\t\tprint('requesting a reshape of size ' + str(k_downsampledWidth * k_downsampledHeight*L2_CHANNELS))\n\t\tconvlayers_output = tf.reshape(h2pooled, [-1, k_downsampledWidth * k_downsampledHeight*L2_CHANNELS]) # to prepare it for multiplication by W_fc1\n\n\t#h2pooled is number of pixels / 2 / 2 (halved in size at each layer due to pooling)\n\t# check our dimensions are a multiple of 4\n\tif (k_numFrames%4) : # or ((FLAGS.freqorientation == \"height\") and k_height%4 )):\n\t\tprint ('Error: width and height must be a multiple of 4')\n\t\tsys.exit(1)\nelse :\n\tconvlayers_output = tf.reshape(h1pooled, [-1, k_downsampledWidth * k_downsampledHeight*L1_CHANNELS])\n\n#now do a fully connected layer: every output connected to every input pixel of each channel\nW_fc1 = tf.Variable(tf.truncated_normal([k_downsampledWidth * k_downsampledHeight * k_convLayerOutputChannels, FC_SIZE], stddev=0.1), name=\"W_fc1\")\nb_fc1 = tf.Variable(tf.constant(0.1, shape=[FC_SIZE]) , name=\"b_fc1\")\n\nkeepProb=tf.placeholder(tf.float32, (), name= \"keepProb\")\nfc1preactivation = tf.matmul(tf.nn.dropout(convlayers_output, keepProb), W_fc1) + b_fc1\nh_fc1 = tf.nn.relu(fc1preactivation, name=\"h_fc1\")\n\n#Read out layer\nW_fc2 = tf.Variable(tf.truncated_normal([FC_SIZE, k_numClasses], stddev=0.1), name=\"W_fc2\")\nb_fc2 = tf.Variable(tf.constant(0.1, shape=[k_numClasses]), name=\"b_fc2\")\n\ntrainable.extend([W_fc1, b_fc1, W_fc2, b_fc2])\n\nif k_mtlnumclasses : \n\t#MTL Read out layer - This is the only part of the net that is different for the secondary classes\n\tmtlW_fc2 = tf.Variable(tf.truncated_normal([FC_SIZE, k_mtlnumclasses], stddev=0.1), name=\"mtlW_fc2\")\n\tmtlb_fc2 = tf.Variable(tf.constant(0.1, shape=[k_mtlnumclasses]), name=\"mtlb_fc2\")\n\n\ttrainable.extend([mtlW_fc2, mtlb_fc2])\n\n# Step 4: build model\n# the model that returns the logits.\n# this logits will be later passed through softmax layer\n# to get the probability distribution of possible label of the image\n# DO NOT DO SOFTMAX HERE\n#could do a dropout here on h\nlogits_ = tf.matmul(h_fc1, W_fc2)\nlogits = tf.add(logits_ , b_fc2, name=\"logits\")\n\n\nif k_mtlnumclasses : \n\tmtllogits = tf.matmul(h_fc1, mtlW_fc2) + mtlb_fc2\n\n# Step 5: define loss function\n# use cross entropy loss of the real labels with the softmax of logits\n# returns a 1D tensor of length batchsize\nif LEARNCONDITION==\"whenWrong\" :\n\tsummaryloss_primary_raw = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)\n\n\tsmpreds = tf.nn.softmax(logits=logits, name=\"softmax_preds\")\n\t# argmax returns a batchsize tensor of type int64, batchsize tensor of booleans\n\t# equal returns a batchsize tensor of type boolean\n\twrong_preds = tf.not_equal(tf.argmax(smpreds, 1), tf.argmax(Y, 1))\n\t# ones where labe != max of softmax, tensor of length batchsize\n\twrongMask = tf.cast(wrong_preds, tf.float32) # need numpy.count_nonzero(boolarr) :(\n\tsummaryloss_primary = tf.multiply(summaryloss_primary_raw, wrongMask, name=\"wrongloss\")\nelse :\n\tsummaryloss_primary = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)\n\nmeanloss_primary = tf.reduce_mean(summaryloss_primary)\n\n\nif k_mtlnumclasses : \n\tsummaryloss_mtl = tf.nn.softmax_cross_entropy_with_logits(logits=mtllogits, labels=MTLY) \n\tmeanloss_mtl = tf.reduce_mean(summaryloss_mtl)\n\tmeanloss=meanloss_primary+meanloss_mtl\nelse : \n\tmeanloss=meanloss_primary\n\n\n\n#if k_mtlnumclasses :\n#\tmeanloss = tf.assign(meanloss, meanloss_primary + meanloss_mtl) #training thus depends on MTLYY in the feeddict if k_mtlnumclasses != 0\n#else :\n#\tmeanloss = tf.assign(meanloss, meanloss_primary)\n\n\n# Step 6: define training op\n# NOTE: Must save global step here if you are doing checkpointing and expect to start from step where you left off.\nglobal_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')\noptimizer=None\nif (k_OPTIMIZER == \"adam\") :\n\toptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=k_adamepsilon ).minimize(meanloss, var_list=trainable, global_step=global_step)\nif (k_OPTIMIZER == \"gd\") :\n\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(meanloss, var_list=trainable, global_step=global_step)\nassert(optimizer)\n\n#Get the beta and gamma ops used for batchn ormalization since we have to update them explicitly during training\nextra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nprint('extra update ops are ' + str(extra_update_ops))\n\n#---------------------------------------------------------------\n# VALIDATE\n#--------------------------------------------------------------\n# The nodes are used for running the validation data and getting accuracy scores from the logits\nwith tf.name_scope(\"VALIDATION\"):\n\tsoftmax_preds = tf.nn.softmax(logits=logits, name=\"softmax_preds\")\n\t# argmax returns a batchsize tensor of type int64, batchsize tensor of booleans\n\t# equal returns a batchsize tensor of type boolean\n\tcorrect_preds = tf.equal(tf.argmax(softmax_preds, 1), tf.argmax(Y, 1))\n\tbatchNumCorrect = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(\n\n\t# All this, just to feed a friggin float computed over several batches into a tensor we want to use for a summary\n\tvalidationtensor = tf.Variable(0.0, trainable=False, name=\"validationtensor\")\n\twtf = tf.placeholder(tf.float32, ())\n\tsummary_validation = tf.assign(validationtensor, wtf)\n\n#-----------------------------------------------------------------------------------\n# These will be available to other programs that want to use this trained net.\ntf.GraphKeys.USEFUL = 'useful'\ntf.add_to_collection(tf.GraphKeys.USEFUL, X) #input place holder\ntf.add_to_collection(tf.GraphKeys.USEFUL, keepProb) #place holder\ntf.add_to_collection(tf.GraphKeys.USEFUL, softmax_preds)\ntf.add_to_collection(tf.GraphKeys.USEFUL, w1)\nif (FLAGS.batchnorm==0) :\n\ttf.add_to_collection(tf.GraphKeys.USEFUL, b1)\ntf.add_to_collection(tf.GraphKeys.USEFUL, w2)\n\nif (FLAGS.batchnorm==0) :\n\ttf.add_to_collection(tf.GraphKeys.USEFUL, b2)\ntf.add_to_collection(tf.GraphKeys.USEFUL, W_fc1)\ntf.add_to_collection(tf.GraphKeys.USEFUL, b_fc1)\ntf.add_to_collection(tf.GraphKeys.USEFUL, W_fc2)\ntf.add_to_collection(tf.GraphKeys.USEFUL, b_fc2)\n\n\n\n#-----------------------------------------------------------------------------------\n\n\n# Run the validation set through the model and compute statistics to report as summaries\ndef validate(sess, printout=False) : \n\twith tf.name_scope ( \"summaries\" ):\n\t\t# test the model\n\t\ttotal_correct_preds = 0\n\n\t\ttry:\n\t\t\tfor i in range(k_numVBatches):\n\t\t\t\t\n\t\t\t\tX_batch, Y_batch = sess.run([vimageBatch, vlabelBatch])\n\t\t\t\tbatch_correct, predictions = sess.run([batchNumCorrect, softmax_preds], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : 1., isTraining : False}) \n\t\t\t\t\n\t\t\t\ttotal_correct_preds += batch_correct\n\t\t\t\t#print (' >>>> Batch \" + str(i) + ' with batch_correct = ' + str(batch_correct) + ', and total_correct is ' + str(total_correct_preds))\n\n\t\t\t\tif printout:\n\t\t\t\t\tprint(' labels for batch:')\n\t\t\t\t\tprint(Y_batch)\n\t\t\t\t\tprint(' predictions for batch')\n\t\t\t\t\tprint(predictions)\n\t\t\t\t\t# print num correct for each batch\n\t\t\t\t\tprint(u'(Validation batch) num correct for batchsize of {0} is {1}'.format(k_vbatchsize , batch_correct))\n\n\n\t\t\tprint (u'(Validation EPOCH) num correct for EPOCH size of {0} ({1} batches) is {2}'.format(validationSamples , i+1 , total_correct_preds))\n\t\t\tprint('so the percent correction for validation set = ' + str(total_correct_preds/validationSamples))\n\n\t\t\tmsummary = sess.run(mergedvalidation, feed_dict ={ X : X_batch , Y : Y_batch, wtf : total_correct_preds/validationSamples, keepProb : 1., isTraining : False}) #using last batch to computer loss for summary\n\t\t\t\n\n\t\texcept Exception, e:\n\t\t\tprint e\n\n\t\treturn msummary\n\n\n#--------------------------------------------------------------\n# Visualize with Tensorboard\n# -------------------------------------------------------------\n\ndef create_train_summaries ():\n\t\twith tf.name_scope ( \"train_summaries\" ):\n\t\t\ttf.summary.scalar ( \"mean_loss\" , meanloss_primary)\n\t\t\ttf.summary.histogram (\"w_1\", w1)\n\t\t\ttf.summary.histogram (\"l1preactivation\", l1preactivation)\n\t\t\ttf.summary.histogram (\"h_1\", h1)\n\t\t\ttf.summary.histogram (\"w_2\", w2)\n\t\t\ttf.summary.histogram (\"l2preactivation\", l2preactivation)\n\t\t\ttf.summary.histogram (\"h_2\", h2)\n\t\t\ttf.summary.histogram (\"w_fc1\", W_fc1)\n\t\t\ttf.summary.histogram (\"fc1preactivation\", fc1preactivation)\n\t\t\ttf.summary.histogram (\"h_fc1\", h_fc1)\n\t\t\ttf.summary.histogram (\"w_fc2\", W_fc2)\n\n\t\t\treturn tf.summary.merge_all ()\n\nmergedtrain = create_train_summaries()\n\ndef create_validation_summaries ():\n\t\twith tf.name_scope ( \"validation_summaries\" ):\n\t\t\t#tf.summary.scalar ( \"validation_correct\" , batchNumCorrect)\n\t\t\ttf.summary.scalar ( \"summary_validation\", summary_validation)\n\t\t\treturn tf.summary.merge_all ()\n\nmergedvalidation = create_validation_summaries()\n\n# --------------------------------------------------------------\n# TRAIN\n#---------------------------------------------------------------\ndef trainModel():\n\n\twith tf.Session() as sess:\n\t\twriter = tf.summary.FileWriter(LOGDIR) # for logging\n\t\tsaver = tf.train.Saver() # for checkpointing\n\n\t\t#### Must run local initializer if nepochs arg to getImage is other than None!\n\t\t#sess.run(tf.local_variables_initializer())\n\t\tsess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))\n\n\t\t#not doing it here, but global_step could have been initialized by a checkpoint\n\t\tif CHECKPOINTING :\n\t\t\tckpt = tf.train.get_checkpoint_state(os.path.dirname(CHKPTBASE))\n\t\telse :\n\t\t\tckpt = False\n\t\tif ckpt and ckpt.model_checkpoint_path:\n\t\t\tprint('Checkpointing restoring from path ' + ckpt.model_checkpoint_path)\n\t\t\tsaver.restore(sess, ckpt.model_checkpoint_path)\n\t\telse:\n\t\t\t#only save graph if we are not starting run from a checkpoint\n\t\t\twriter.add_graph(sess.graph)\n\n \n\t\tinitial_step = global_step.eval()\n\t\tprint('initial step will be ' + str(initial_step)) # non-zero if check pointing\n\t\tbatchcount=initial_step\n\t\tstart_time = time.time()\n\t\t\n\t\t# Create a coordinator, launch the queue runner threads.\n\t\tcoord = tf.train.Coordinator()\n\t\tenqueue_threads = tf.train.start_queue_runners(sess=sess,coord=coord)\n\t\t\n\t\ttry:\n\t\t\tbatchcountloss = 0 #for reporting purposes\n\t\t\twhile True: # for each batch, until data runs out\n\t\t\t\tif coord.should_stop():\n\t\t\t\t\tbreak\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif k_mtlnumclasses :\n\t\t\t\t\tX_batch, Y_batch, MTLY_batch = sess.run([imageBatch, labelBatch, mtltargetBatch])\n\t\t\t\t\t_, loss_batch, _nada = sess.run([optimizer, meanloss, extra_update_ops], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : k_keepProb, MTLY : MTLY_batch, isTraining : True}) #DO WE NEED meanloss HERE? Doesn't optimer depend on it? \n\t\t\t\telse :\n\t\t\t\t\tX_batch, Y_batch = sess.run([imageBatch, labelBatch])\n\t\t\t\t\t_, loss_batch, _nada = sess.run([optimizer, meanloss, extra_update_ops], feed_dict ={ X : X_batch , Y : Y_batch, keepProb : k_keepProb, isTraining : True}) #DO WE NEED meanloss HERE? Doesn't optimer depend on it?\n\n\t\t\t\tbatchcountloss += loss_batch\n\n\n\t\t\t\tbatchcount += 1\n\t\t\t\tif (not batchcount%k_batchesPerLossReport) :\n\t\t\t\t\tprint('batchcount = ' + str(batchcount))\n\t\t\t\t\tavgBatchLoss=batchcountloss/k_batchesPerLossReport\n\t\t\t\t\tprint(u'Average loss per batch {0}: {1}'.format(batchcount, avgBatchLoss))\n\t\t\t\t\tbatchcountloss=0\n\n\t\t\t\t\ttsummary = sess.run(mergedtrain, feed_dict ={ X : X_batch , Y : Y_batch, keepProb : 1.0, isTraining : False }) #?? keep prob ??\n\t\t\t\t\twriter.add_summary(tsummary, global_step=batchcount)\n\n\t\t\t\t\tvsummary=validate(sess)\n\t\t\t\t\twriter.add_summary(vsummary, global_step=batchcount)\n\n\n\t\t\t\tif not (batchcount % k_checkpointPeriod) :\n\t\t\t\t\tsaver.save(sess, CHKPTBASE, global_step=batchcount)\n\n\t\texcept tf.errors.OutOfRangeError, e: #done with training epochs. Validate once more before closing threads\n\t\t\t# So how, finally?\n\t\t\tprint('ok, let\\'s validate now that we\\'ve run ' + str(batchcount) + 'batches ------------------------------')\n\n\t\t\tvsummary=validate(sess, False)\n\t\t\twriter.add_summary(vsummary, global_step=batchcount+1)\n\n\n\t\t\tcoord.request_stop(e)\n\n\t\texcept Exception, e:\t\n\t\t\tprint('train: WTF')\n\t\t\tprint e\n\n\t\tfinally :\n\t\t\tcoord.request_stop()\n\t\t\tcoord.join(enqueue_threads)\n\t\t\twriter.close()\n\t\t\n\t\t# grab the total training time\n\t\ttotalruntime = time.time() - start_time\n\t\tprint 'Total training time: {0} seconds'.format(totalruntime)\n\t\tprint(' Finished!') # should be around 0.35 after 25 epochs\n\n\t\tprint(' now save meta model')\n\t\tmeta_graph_def = tf.train.export_meta_graph(filename=OUTDIR + '/my-model.meta')\n\t\tpickledModel.saveState(sess, trainable, parameters, OUTDIR + '/state.pickle') \n\n\t\tprint(' ===============================================================') \n\n#=============================================================================================\nprint(' ---- Actual parameters for this run ----')\nprint('INDIR : ' + INDIR)\nprint('k_freqbins : ' + str(k_freqbins) \n\t+ ' ' + 'k_numFrames: ' + str(k_numFrames) )\n#FLAGS.freqorientation, k_height, k_numFrames, k_inputChannels\nprint('FLAGS.freqorientation: ' + str(FLAGS.freqorientation) \n\t+ ', ' + 'k_height: ' + str(k_height) \n\t+ ', ' + 'k_numFrames: ' + str(k_numFrames) \n\t+ ', ' + 'k_inputChannels: ' + str(k_inputChannels))\n#k_numClasses, validationSamples, trainingSamples\nprint('k_numClasses: ' + str(k_numClasses)\n\t+ ', ' + 'validationSamples: ' + str(validationSamples)\n\t+ ', ' + 'trainingSamples: ' + str(trainingSamples))\n#learning_rate, k_keepProb, k_batchsize, n_epochs \nprint('learning_rate: ' + str(learning_rate)\n\t+ ', ' + 'k_keepProb: ' + str(k_keepProb)\n\t+ ', ' + 'k_batchsize: ' + str(k_batchsize)\n\t+ ', ' + 'n_epochs: ' + str(n_epochs))\n#K_NUMCONVLAYERS, L1_CHANNELS, L2_CHANNELS, FC_SIZE \nprint('K_NUMCONVLAYERS: ' + str(K_NUMCONVLAYERS)\n\t+ ', ' + 'L1_CHANNELS: ' + str(L1_CHANNELS)\n\t+ ', ' + 'L2_CHANNELS: ' + str(L2_CHANNELS)\n\t+ ', ' + 'FC_SIZE: ' + str(FC_SIZE))\n#k_downsampledHeight, k_downsampledWidth , k_convLayerOutputChannels \nprint('k_downsampledHeight: ' + str(k_downsampledHeight)\n\t+ ', ' + 'k_downsampledWidth: ' + str(k_downsampledWidth)\n\t+ ', ' + 'k_convLayerOutputChannels: ' + str(k_convLayerOutputChannels))\n#K_ConvRows, K_ConvCols, k_ConvStrideRows, k_ConvStrideCols, k_poolRows, k_poolStrideRows \nprint('K_ConvRows: ' + str(K_ConvRows)\n\t+ ', ' + 'K_ConvCols: ' + str(K_ConvCols)\n\t+ ', ' + 'k_ConvStrideRows: ' + str(k_ConvStrideRows)\n\t+ ', ' + 'k_ConvStrideCols: ' + str(k_ConvStrideCols)\n\t+ ', ' + 'k_poolRows: ' + str(k_poolRows)\n\t+ ', ' + 'k_poolStrideRows : ' + str(k_poolStrideRows ))\nif (k_OPTIMIZER == \"adam\") : \n\tprint('k_OPTIMIZER: ' + str(k_OPTIMIZER)\n\t+ ', ' + 'k_adamepsilon: ' + str(k_adamepsilon))\nelse :\n\tprint('k_OPTIMIZER: ' + str(k_OPTIMIZER))\n\nprint('LEARNCONDITION: ' + LEARNCONDITION)\nprint('batchnorm: ' + str(FLAGS.batchnorm))\nprint('k_mtlnumclasses: ' + str(k_mtlnumclasses))\n\n#OUTDIR\nprint('OUTDIR: ' + str(OUTDIR))\nprint('CHECKPOINTING: ' + str(CHECKPOINTING))\nprint(' vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ')\nfor x in trainable :\n\tprint(x.name + ' : ' + str(x.get_shape()))\nprint('TOTAL number of parameters in the model is ' + str(np.sum([np.product([xi.value for xi in x.get_shape()]) for x in trainable])))\nprint(' vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ')\n\n#=============================================================================================\n# Do it\ntrainModel()\n" }, { "alpha_fraction": 0.4702901840209961, "alphanum_fraction": 0.48410871624946594, "avg_line_length": 49.46511459350586, "blob_id": "d606667c17b8a1368be9e81c90872be2cfd6fb44", "content_id": "eb131eb7dabfdc477a170afae5dfed90165009ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2171, "license_type": "no_license", "max_line_length": 184, "num_lines": 43, "path": "/run2_ST.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# To store logs and see both stderr and stdout on the screen: \n# nohup ./run2.sh >>logs/multilog.txt 2>&1 & \n# Individual logs will also still get stored in their respective directories \nsource activate tflow2\nDATE=`date +%Y.%m.%d`\nmaindir=logs.$DATE\nmkdir $maindir\n\nnumconvlayers=2\nlearningrate=.01\noptimizer=adam\n\norientationArray=(channels)\nepsilonArray=(1.0)\n\nmtlArray=(16)\n\nfor mtl in ${mtlArray[@]}\ndo\n for orientation in ${orientationArray[@]}\n do\n for epsilon in ${epsilonArray[@]}\n do\n #make output dir for paramter settings \n echo \" ------- new batch run --------\"\n OUTDIR=\"$maindir/mtl_${mtl}.or_${orientation}.epsilon_${epsilon}\"\n mkdir $OUTDIR\n echo \"outdir is \" $OUTDIR\n\n #make subdirs for logging and checkpoints \n mkdir \"$OUTDIR/log_graph\"\n mkdir \"$OUTDIR/checkpoints\"\n # wrap python call in a string so we can do our fancy redirecting below\n runcmd='python DCNSoundClass.py --outdir $OUTDIR --checkpointing 1 --checkpointPeriod 10 '\n runcmd+='--numClasses 2 --batchsize 20 --n_epochs 10 --learning_rate ${learningrate} --keepProb .5 '\n runcmd+='--l1channels 64 --l2channels 32 --fcsize 32 --freqorientation ${orientation} '\n runcmd+='--adamepsilon ${epsilon} --optimizer ${optimizer} --numconvlayers ${numconvlayers} --mtlnumclasses ${mtl}'\n\t\t\t# direct stdout and sterr from each run into their proper directories, but tww so we can still watch\n \teval $runcmd > >(tee $OUTDIR/log.txt) 2> >(tee $OUTDIR.stderr.log >&2)\n done\n done\ndone\n\n" }, { "alpha_fraction": 0.6250640749931335, "alphanum_fraction": 0.6337775588035583, "avg_line_length": 38.0099983215332, "blob_id": "ece6573431328e33e4e33005d62c3e1216b9c7f8", "content_id": "ae625379366c70254d5b45a7baca1b33a5935141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3902, "license_type": "no_license", "max_line_length": 135, "num_lines": 100, "path": "/utils/Centroid2ndaryClassMaker.py", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "import os \nimport re\n\nimport numpy as np\nimport math\nimport tiffspect\n\nimport librosa\nimport librosa.display\n\nimport matplotlib.pyplot as plt\n\nK_SPECTDIR = '/home/lonce/tflow/DATA-SETS/ESC-50-spect'\nk_soundsPerClass=125 # must divide the total number of sounds evenly!\n\n#============================================\n\ndef weightedCentroid(spect) :\n \"\"\"\n param: spect - a magnitude spectrum\n Returns the spectral centroid averaged over frames, and weighted by the rms of each frame\n \"\"\"\n cent = librosa.feature.spectral_centroid(S=spect)\n rms = librosa.feature.rmse(S=spect)\n avg = np.sum(np.multiply(cent, rms))/np.sum(rms)\n return avg\n\ndef log2mag(S) : \n \"\"\" Get your log magnitude spectrum back to magnitude\"\"\"\n return np.power(10, np.divide(S,20.))\n\ndef spectFile2Centroid(fname) :\n \"\"\" Our spect files are in log magnitude, and in tiff format\"\"\"\n D1, _ = tiffspect.Tiff2LogSpect(fname)\n D2 = log2mag(D1)\n return weightedCentroid(D2)\n#============================================\n\n# Next, some utilities for managing files\n#----------------------------------------\n\ndef fullpathfilenames(directory): \n '''Returns the full path to all files living in directory (the leaves in the directory tree)\n '''\n fnames = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(directory)) for f in fn]\n return fnames\n\ndef esc50files(directory, regexString) :\n filenames = fullpathfilenames(directory)\n return [fname for fname in filenames if re.match(regexString, fname)]\n\ndef addClass2Filename(fname, cname, action=\"move\") : \n newname = re.sub('.tif', '._'+ str(cname) + '_.tif', fname)\n if (action == \"move\") :\n os.rename(fname, newname)\n else :\n print(newname)\n \ndef filestats (filenames, func) :\n stats = [[fname, func(fname)] for fname in filenames]\n return stats\n\n#============================================\n\n\ndef createBalancedClassesWithFunc(topDirectory, regexString, func, numPerClass, action=\"move\") :\n \"\"\"\n Groups files in topDirectory matching regexString by the single number returned by func.\n Each group will have numPerClass files in it (the total number of files must be divisible by numPerClass)\n Renames them using their group index, gidx: origFilename.tif -> origFilename._gidx_.tif\n if action=\"move, files are renames. Otherwise, the new names are just printed to console.\n \"\"\"\n wholelist=esc50files(topDirectory, regexString)\n stats = filestats(wholelist, func)\n stats_ordered = sorted(stats, key=lambda a_entry: a_entry[1])\n classes=np.array(stats_ordered)[:,0].reshape(-1, numPerClass)\n for i in range(len(classes)) :\n for j in range(len(classes[i])) :\n addClass2Filename(classes[i,j],i, action)\n\n return stats, stats_ordered #returns stuff just for viewing \n\n#--------------------------------------------------------------------------------\n#if you got yourself in trouble, and need to remove all the secondary classnames:\ndef removeAllSecondaryClassNames(directory) :\n \"\"\"Revomve ALL the 2ndary class names (of the form ._cname_) from ALL files in the directory restoring them to their original\"\"\"\n for fname in fullpathfilenames(directory) :\n m = re.match('.*?(\\._.*?_)\\.tif$', fname) #grabs the string of all secondary classes if there is a seq of them\n if (m) :\n newname = re.sub(m.group(1), '', fname)\n print('Will move ' + fname + '\\n to ' + newname)\n os.rename(fname, newname)\n else :\n print('do nothing with ' + fname)\n\n#============================================\n\n# DO IT\nstats, stats_ordered = createBalancedClassesWithFunc(K_SPECTDIR, '.*/([1-5]).*', spectFile2Centroid, k_soundsPerClass, action=\"print\")\nstats, stats_ordered = createBalancedClassesWithFunc(K_SPECTDIR, '.*/([1-5]).*', spectFile2Centroid, k_soundsPerClass, action=\"move\")\n\n" }, { "alpha_fraction": 0.7234811186790466, "alphanum_fraction": 0.738916277885437, "avg_line_length": 51.5, "blob_id": "61fc7384fc3c4fd54d08474ab32cb900b2d53da9", "content_id": "678389a70c18ec791b54fc0af1277ca589f237ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3045, "license_type": "no_license", "max_line_length": 188, "num_lines": 58, "path": "/README.MD", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "\nDeveloped with Python 2.7 and Tensorflow 1.0\n--------------------------------------------\n\nNote: This code uses spectrogram files stored in the .tif format. Spectrograms are stored as log magnitude, normalized to [0, 1] so that -80dB corresponds to 0, and 0dB corresponds to 1. \n\n\nTo run this model, \n\n1) first get the original raw data.\nIt is messy, uneven lengths, ogg format, some mono, some stero. \n\nIn some origigData directory, \npro mpt> git clone https://github.com/karoldvl/ESC-50.git\n\n2) Next we'll massage the data to make it uniform, and convert the audio files to \nspectrogram files in tiff format. \n\nNavigate to the utils directory, and open the Jupyter (python) notebook,\nESC50_Convert.ipynb\n\nSet the parameters in the first cell, making sure that the directories you want to use exist. \nRun each cell in sequence to create wave files from the ogg files, and then tiff spectrogram files from wav files. \nThis notebook also includes code for generating a label file (text file with list of class lables)\n\n\n2.5) If you want to add a secondary class to the files to be used for Multi Task Learning (MTL - a kind of regularization), use the Centroid2ndaryClassMaker python notebook (in utils).\n\tSecondary class ids are tacked on to the file names (foo.tif -> foo._2ndaryID_.tif), and the rest of the pipeline knows how to deal with that. \n \n\n3) Divide your data into two folders, train and validate \n\ta) Go to the data directory for the project.\n\t Create two new subdirectories there, train and validate\n\t Your home directory for this project should now have a directory stucture:\n\t\t\t./data/esc50spect/[subdirectories of sound classes]\n\t\t\t./data/train\n\t\t\t./data/validate\n\n\tb) using either cp --parents (or rsync -R on OSX), copy the data you want from esc50spect to either the train folder or the validate folder. \n\t\tEach sound class is divided into five folds, with file names begining with [1-5]\n\t\tSo for example, to put 20% of the files in train, and 20% in validate, I do this on my Mac:\n\t\t\tgo to ./data/esc50spect/\n\t\t\tprompt> rsync -R */[1-4]*.tif ../train\n\t\t\tprompt> rsync -R */5*.tif ../validate\n\n4) The final step is to turn these data into TFRecord files to be read by the tensorflow training code.\n\ta) move the label file from the ./data/esc50spect directory to the ./data directory\n\tb) Go to ./data and execute runcmd.txt (or runcmd.2label.txt to include secondary labels for MTL)\n\t\n\n5) Now you are ready to back up to the main project directory and train using the TFRecord files for data.\n\n+++++++++++++++\n\nNote: You can test and explore this model with a tiny subset of the ESC-50 sounds provided with the git repository. \n./data2 contains dog and vird sounds, already converted from sounds -> to spectorgrams -> separated into train/ and test/ folders. All you need to do is \na) navigate to the data2 directory and run the runcmd to convert the data to TFRecords (or runcmd.2label.txt to include secondary labels for MTL), and then \nb) go to the main project folder and run \n prompt> python DCNSoundClass.py" }, { "alpha_fraction": 0.3859885334968567, "alphanum_fraction": 0.4021556079387665, "avg_line_length": 57.17647171020508, "blob_id": "40b39cf863aacb2fcecda4885eb088b28e0b9581", "content_id": "57f3d07d374b8d8d349e4e31f86159a779e3b066", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2969, "license_type": "no_license", "max_line_length": 177, "num_lines": 51, "path": "/testmodel/run50.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# To store logs and see both stderr and stdout on the screen: \n# nohup ./run50.sh >>logs/multilog.txt 2>&1 & \n# Individual logs will also still get stored in their respective directories \nsource activate tflow2\nDATE=`date +%Y.%m.%d`\nmaindir=logs.$DATE\nmkdir $maindir\n\nepsilon=1.0\noptimizer=adam\nlearningrate=.01\norientationArray=(channels)\norientation=channels\nlayers=2\nmtl=16\n\nl1channelsArray=(2048)\nl2channelsArray=(64)\nfcsizeArray=(32)\n\nfor l1channels in ${l1channelsArray[@]}\ndo\n for l2channels in ${l2channelsArray[@]}\n do\n for fcsize in ${fcsizeArray[@]}\n do\n #make output dir for paramter settings \n echo \" ------- new batch run --------\"\n OUTDIR=\"$maindir/l1r_${l1channels}.l2_${l2channels}.fc_${fcsize}\"\n mkdir $OUTDIR\n echo \"outdir is \" $OUTDIR\n\n #keep a copy of this run file \n me=`basename \"$0\"`\n cp $me $OUTDIR\n\n #make subdirs for logging and checkpoints \n mkdir \"$OUTDIR/log_graph\"\n mkdir \"$OUTDIR/checkpoints\"\n mkdir \"$OUTDIR/stderr\"\n # wrap python call in a string so we can do our fancy redirecting below \n runcmd='python DCNSoundClass.py --outdir $OUTDIR --checkpointing 1 --checkpointPeriod 500 '\n runcmd+='--numClasses 50 --batchsize 20 --n_epochs 50 --learning_rate ${learningrate} '\n runcmd+='--keepProb .5 --l1channels ${l1channels} --l2channels ${l2channels} --fcsize ${fcsize} --freqorientation ${orientation} '\n runcmd+='--numconvlayers ${layers} --adamepsilon ${epsilon} --optimizer ${optimizer} --mtlnumclasses ${mtl}'\n # direct stdout and sterr from each run into their proper directories, but tww so we can still watch \n eval $runcmd > >(tee $OUTDIR/log.txt) 2> >(tee $OUTDIR/stderr/stderr.log >&2)\n done\n done\ndone\n\n\n" }, { "alpha_fraction": 0.46150144934654236, "alphanum_fraction": 0.47497594356536865, "avg_line_length": 55.135135650634766, "blob_id": "cf7b7ff97bdba2f2188041ffcd5c58c17ed8afa8", "content_id": "e1a8eab465768a462d4ef8560a1147c04c9a31e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2078, "license_type": "no_license", "max_line_length": 184, "num_lines": 37, "path": "/scripts/run2.sh", "repo_name": "lonce/dcn_soundclass", "src_encoding": "UTF-8", "text": "#!/bin/bash \n# To store logs and see both stderr and stdout on the screen: \n# nohup ./run2.sh >>logs/multilog.txt 2>&1 & \n# Individual logs will also still get stored in their respective directories \nsource activate tflow2\nDATE=`date +%Y.%m.%d`\nmaindir=logs.$DATE\nmkdir $maindir\n\nlearningrateArray=(.01)\norientationArray=(channels height)\nlayersArray=(1 2)\nfor learningrate in ${learningrateArray[@]}\ndo\n for orientation in ${orientationArray[@]}\n do\n for layers in ${layersArray[@]}\n do\n #make output dir for paramter settings \n echo \" ------- new batch run --------\"\n OUTDIR=\"$maindir/lr_${learningrate}.o_${orientation}.layers_${layers}\"\n mkdir $OUTDIR\n echo \"outdir is \" $OUTDIR\n\n #make subdirs for logging and checkpoints \n mkdir \"$OUTDIR/log_graph\"\n mkdir \"$OUTDIR/checkpoints\"\n # wrap python call in a string so we can do our fancy redirecting below\n runcmd='python DCNSoundClass.py --outdir $OUTDIR --checkpointing 0 --checkpointPeriod 1000 '\n runcmd+='--numClasses 2 --batchsize 20 --n_epochs 2 --learning_rate ${learningrate} --keepProb .5 '\n runcmd+='--l1channels 64 --l2channels 32 --fcsize 32 --freqorientation ${orientation} '\n runcmd+='--numconvlayers ${layers}'\n\t\t\t# direct stdout and sterr from each run into their proper directories, but tww so we can still watch\n \teval $runcmd > >(tee $OUTDIR/log.txt) 2> >(tee $OUTDIR.stderr.log >&2)\n done\n done\ndone\n\n" } ]
18
twise2/CM2Vec
https://github.com/twise2/CM2Vec
a004ff8ec2e84d5bc3c78f0f20bd3d418c839002
c7e71d6e832a312feb0ebce93a02a8ea801db9b4
778e9c0020bdbda83dbb948068c7b7db9f46fbbd
refs/heads/master
"2017-12-01T20:41:27.496667"
"2016-06-14T20:01:11"
"2016-06-14T20:01:11"
61,139,005
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6754490733146667, "alphanum_fraction": 0.6754490733146667, "avg_line_length": 51.1875, "blob_id": "83ae625fad414e339fbf5c5f5aeea2470b5c3f8f", "content_id": "7d4c9fee40d6cbe7bf2bfc835f5d3a01a58f1e39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "no_license", "max_line_length": 150, "num_lines": 16, "path": "/storeCMData.py", "repo_name": "twise2/CM2Vec", "src_encoding": "UTF-8", "text": "import json\n#converts json data coming in format council_member_id, short_bio into\n#files named council_member_id.txt, with content of short_bio to be\n#utilized with makingCM's.py\ninput_file = 'Full_CM_Data.json' #name of json file to get input from\nlabel_name = 'council_member_id'\ndescription_name = 'short_bio'\noutput_directory_name = 'entire_cm_data/'\n\nwith open(input_file) as f: #open the json file to input from\n for line in f:\n cm = json.loads(line)\n if cm[information_name] is not None: #remove members with no bio\n memberID = outputFolder_name + '/' + str(cm[council_member_id]) +'.txt' #create new file in data directory named council_member_id.txt\n with open(memberID, 'wb') as g:\n json.dump((cm[short_bio]),g) #add the bio to each council member's file\n" }, { "alpha_fraction": 0.679257333278656, "alphanum_fraction": 0.689500629901886, "avg_line_length": 29.627450942993164, "blob_id": "0e3edddfef4b2b60ca25e1d5c6d986f66416c789", "content_id": "401d77b434926023836ff85cafbff57074a8437e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 127, "num_lines": 51, "path": "/convertCM2vec.py", "repo_name": "twise2/CM2Vec", "src_encoding": "UTF-8", "text": "#imports the names of the files and puts ID's into doclabels\nfrom os import listdir\nfrom os.path import isfile, join\nimport gensim\n\ndata_location = \"entire_cm_data/\" #location of data where filename is the wanted label, and the file contains the training data\noutput_file = 'CM_vectors/vector_data' #location to save the vector\n\n\ndocLabels = []\n\n#takes names of all the files in the directory that end in txt\ndocLabels = [f for f in listdir(data_location) if f.endswith('.txt')]\n\n#adds the data for each file into data\ndata = []\nfor doc in docLabels:\n #data.append(open(\"data/\" + doc, 'r')) #if file needs to stay open\n temp = open(data_location + doc, 'r')\n data.append(str(temp.read()))\n temp.close()\n\n\nclass LabeledSentence(object):\n import gensim\n #make iterator able to supply data and list of labels\n def __init__(self, doc_list, labels_list):\n self.labels_list = labels_list\n self.doc_list = doc_list\n #loop through all the data and put document filename as the label for the data\n def __iter__(self):\n for ed, doc in enumerate(self.doc_list):\n words = doc.split()\n labels=[self.labels_list[ed]]\n yield gensim.models.doc2vec.LabeledSentence(words,labels)\n\n#create the itr object\nit = LabeledSentence(data, docLabels)\n\n\nmodel = gensim.models.Doc2Vec(alpha=0.025, min_alpha=0.025)\nprint it\nmodel.build_vocab(it)\nfor epoch in range(10):\n model.alpha -= 0.002\n model.min_alpha = model.alpha\n model.train(it)\n print 'working'\n\n#save the data\nmodel.save(output_file)\n" } ]
2
scissorhands/pynal
https://github.com/scissorhands/pynal
d48cddc2ad451c0ac7be001dd8474653c8483f7f
2a4f89da405696d6d53a88b437de065978235838
801e0d87ce54e35f307aa3342324013911f387ec
refs/heads/master
"2021-01-01T19:18:42.851067"
"2017-08-22T21:30:04"
"2017-08-22T21:30:04"
98,564,230
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.695067286491394, "alphanum_fraction": 0.695067286491394, "avg_line_length": 19.363636016845703, "blob_id": "f22c797c8cd5dcfca94ac39de04960ace51c7f70", "content_id": "e702e39eeff70bae3e8318f756925d13b07d2fd5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "permissive", "max_line_length": 37, "num_lines": 11, "path": "/index.py", "repo_name": "scissorhands/pynal", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport json\nfrom etl import Etl\n\ndef lambda_connect(event, context):\n\tetl = Etl()\n\tetl.retrieve_all_stats()\n\treturn 'pickle rick'\n\nif __name__ == '__main__':\n\tlambda_connect(None, None)" }, { "alpha_fraction": 0.5500922203063965, "alphanum_fraction": 0.552550733089447, "avg_line_length": 28.071428298950195, "blob_id": "8da5b9bcb237f5d02e1d4bca90d59795a463cc70", "content_id": "b1cd32cb7fbebd7b0fab29a08faf77a6b5011a30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1627, "license_type": "permissive", "max_line_length": 78, "num_lines": 56, "path": "/requester.py", "repo_name": "scissorhands/pynal", "src_encoding": "UTF-8", "text": "import analytics as service\n\nclass Requester:\n\tdef __init__(self):\n\t\tself.analytics = service.initialize_analyticsreporting()\n\t\tself.general_stats_metrics = [\n\t\t\t{'expression': 'ga:sessions'},\n\t\t\t{'expression': 'ga:pageViews'},\n\t\t\t{'expression': 'ga:avgTimeOnPage'},\n\t\t\t{'expression': 'ga:exits'},\n\t\t\t{'expression': 'ga:organicSearches'}\n \t]\n\n\tdef get_hostname_stats(self, from_date = '7daysAgo', to_date = 'yesterday' ):\n\t\treturn service.generic_request(self.analytics,\n\t\t\tself.general_stats_metrics,\n\t \t[\n\t \t\t{'name' : 'ga:hostname'}, \n\t \t\t# {'name' : 'ga:pagePath'},\n\t \t\t{'name' : 'ga:date'}\n\t \t],\n\t \tfrom_date, to_date\n\t )\n\n\tdef get_city_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):\n\t\treturn service.generic_request(self.analytics,\n\t\t\tself.general_stats_metrics,\n\t \t[\n\t \t\t{'name' : 'ga:hostname'},\n\t \t\t{'name' : 'ga:city'}, \n\t \t\t{'name' : 'ga:date'}\n\t \t],\n\t \tfrom_date, to_date\n\t )\n\n\tdef get_region_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):\n\t\treturn service.generic_request(self.analytics,\n\t\t\tself.general_stats_metrics,\n\t \t[\n\t \t\t{'name' : 'ga:hostname'},\n\t \t\t{'name' : 'ga:region'}, \n\t \t\t{'name' : 'ga:date'}\n\t \t],\n\t \tfrom_date, to_date\n\t )\n\n\tdef get_devices_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):\n\t\treturn service.generic_request(self.analytics,\n\t\t\tself.general_stats_metrics,\n\t \t[\n\t \t\t{'name' : 'ga:hostname'},\n\t \t\t{'name' : 'ga:deviceCategory'}, \n\t \t\t{'name' : 'ga:date'}\n\t \t],\n\t \tfrom_date, to_date\n\t )" }, { "alpha_fraction": 0.6012658476829529, "alphanum_fraction": 0.6208285093307495, "avg_line_length": 24.202898025512695, "blob_id": "641935dc7e7eb5faf37465ba6249e97cf5707f78", "content_id": "99d7450beb81574d20e11892eba9a915af5f45a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1738, "license_type": "permissive", "max_line_length": 88, "num_lines": 69, "path": "/dbconnector.py", "repo_name": "scissorhands/pynal", "src_encoding": "UTF-8", "text": "import dbconfig\nimport mysql.connector as _connector\nfrom mysql.connector import errorcode as dberror\n\nclass Connector:\n\tdef __init__(self):\n\t\tself.cnx = self.cur = None\n\t\ttry:\n\t\t\tself.cnx = _connector.connect(**dbconfig.config)\n\t\texcept _connector.Error as e:\n\t\t\tif(e.errno == dberror.ER_ACCESS_DENIED_ERROR):\n\t\t\t\tprint('Invalid credentials')\n\t\t\telif(e.errno == dberror.ER_BAD_DB_ERROR):\n\t\t\t\tprint('Invalid database')\n\t\t\telse:\n\t\t\t\tprint(e)\n\t\telse:\n\t\t\tself.cur = self.cnx.cursor()\n\n\tdef test_select(self):\n\t\tself.cur.execute(\"SELECT * FROM users AS U LIMIT 10\")\n\t\tprint()\n\t\tprint('{0:3} {1:25} {2}'.format('ID:', 'EMAIL:', 'LANG:'))\n\t\tfor row in self.cur.fetchall():\n\t\t\tprint('{0:3} {1:25} {2}'.format(row[0], row[2], row[4]))\n\t\tprint()\n\n\tdef insert_ignore(self, table, data_dictionary):\n\t\tinsert_id = None\n\t\tkeys = \"(\"+\", \".join( \"`\"+key+\"`\" for key in data_dictionary.keys() )+\")\"\n\t\tvalues = \"(\"+\", \".join( \"%(\"+str(value)+\")s\" for value in data_dictionary.keys() )+\")\"\n\t\tquery = (\"INSERT IGNORE INTO {0}\\n\"\n\t\t\t\"{1}\\n\"\n\t\t\t\"VALUES {2}\".format(table, keys, values) )\n\t\ttry:\n\t\t\tself.cur.execute(query, data_dictionary)\n\t\t\tself.cnx.commit()\n\t\t\tinsert_id = self.cur.lastrowid\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\treturn insert_id\n\n\tdef serv_destory(self):\n\t\tif self.cur:\n\t\t\tself.cur.close()\n\t\tif self.cnx:\n\t\t\tself.cnx.close()\n\t\tprint(\"Connection destroyed\")\n\n\n\tdef main(self):\n\t\tid = self.insert_ignore('analytics_hostname_stats', {\n\t\t\t'hostname': 'hostname',\n\t\t\t'sessions': 1,\n\t\t\t'page_views': 1,\n\t\t\t'avg_time_on_page': 2.1,\n\t\t\t'exits': 3,\n\t\t\t'organic_searches': 5,\n\t\t\t'date': '2017-07-31',\n\t\t})\n\t\tif self.cur:\n\t\t\tself.cur.close()\n\t\tif self.cnx:\n\t\t\tself.cnx.close()\n\n\nif __name__ == '__main__':\n\tconnector = Connector()\n\tconnector.main()" }, { "alpha_fraction": 0.6813489198684692, "alphanum_fraction": 0.6923606395721436, "avg_line_length": 31.299999237060547, "blob_id": "6026e49739dad8393b2b1a777779001cb4d4498c", "content_id": "bd84f5027141742857ab3198db2e4fd10a71c646", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2906, "license_type": "permissive", "max_line_length": 89, "num_lines": 90, "path": "/etl.py", "repo_name": "scissorhands/pynal", "src_encoding": "UTF-8", "text": "from requester import Requester\nfrom dbconnector import Connector\nimport json\nimport datetime as dt\n\n\nclass Etl:\n\tdef __init__(self):\n\t\tself.req = Requester()\n\t\tself.connector = Connector()\n\n\tdef get_report_dictionary(self, report):\n\t\tcolumnHeader = report.get('columnHeader', {})\n\t\treturn {\n\t\t\t'columnHeader': columnHeader,\n\t\t\t'dimensionHeaders': columnHeader.get('dimensions', []),\n\t\t\t'metricHeaders': columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []),\n\t\t\t'rows': report.get('data', {}).get('rows', [])\n\t\t}\n\n\tdef formatted_output(self, input):\n\t\tstats = []\n\t\tfor report in input.get('reports', []):\n\t\t\trdictionary = self.get_report_dictionary(report)\n\t\t\tfor row in rdictionary['rows']:\n\t\t\t\tstat = {}\n\t\t\t\tdimensions = row.get('dimensions', [])\n\t\t\t\tdateRangeValues = row.get('metrics', [])\n\t\t\t\tfor header, dimension in zip(rdictionary['dimensionHeaders'], dimensions):\n\t\t\t\t\thd = header.replace('ga:', '')\n\t\t\t\t\tif(hd == 'date'):\n\t\t\t\t\t\tdimension = dt.datetime.strptime(dimension, '%Y%m%d').strftime('%Y-%m-%d')\n\t\t\t\t\tstat[hd] = dimension\n\t\t\t\tfor i, values in enumerate(dateRangeValues):\n\t\t\t\t\tfor metricHeader, value in zip(rdictionary['metricHeaders'], values.get('values') ):\n\t\t\t\t\t\tstat[metricHeader.get('name').replace('ga:', '')] = value\n\t\t\t\tstats.append(stat) \n\t\treturn stats\n\n\tdef retrieve_all_stats(self, destroy_after=True):\n\t\tself.retrieve_hostname_stats(False)\n\t\tself.retrieve_city_stats(False)\n\t\tself.retrieve_region_stats(False)\n\t\tself.retrieve_devices_stats(False)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\n\tdef retrieve_hostname_stats(self, destroy_after=True):\n\t\tprint('getting hostname stats')\n\t\treport = self.req.get_hostname_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_hostname_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\tdef retrieve_city_stats(self, destroy_after=True):\n\t\tprint('getting city stats')\n\t\treport = self.req.get_city_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_city_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\tdef retrieve_region_stats(self, destroy_after=True):\n\t\tprint('getting region stats')\n\t\treport = self.req.get_region_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_region_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\tdef retrieve_devices_stats(self, destroy_after=True):\n\t\tprint('getting devices stats')\n\t\treport = self.req.get_devices_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_device_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\ndef main():\n\tetl = Etl()\n\tetl.retrieve_all_stats()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5431235432624817, "alphanum_fraction": 0.5477855205535889, "avg_line_length": 24.254901885986328, "blob_id": "bcc347017eb61609b6767fee58298e6dc5f6e026", "content_id": "1c0da63f031c9b1d82cf4ec23f48d35b46abfd80", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1287, "license_type": "permissive", "max_line_length": 84, "num_lines": 51, "path": "/index_microservice.py", "repo_name": "scissorhands/pynal", "src_encoding": "UTF-8", "text": "from requester import Requester\nimport json\nreq = Requester()\nlocalTest = False\n\nprint('Loading function')\n\n\ndef respond(err, res=None):\n return {\n 'statusCode': '400' if err else '200',\n 'body': err.message if err else json.dumps(res),\n 'headers': {\n 'Content-Type': 'application/json',\n },\n }\n\n\ndef lambda_handler(event, context):\n operations = {\n 'GET'\n }\n\n operation = event['httpMethod']\n if operation in operations:\n method = event['queryStringParameters']['method']\n api_methods = {\n 'get_hostname_stats',\n 'get_city_stats',\n 'get_region_stats',\n 'get_devices_stats'\n }\n if method in api_methods:\n stats = getattr(req, method)()\n if(localTest):\n print(stats)\n return respond(None, stats)\n else:\n return respond(ValueError(\"Unsupported method '{}'\".format(method)))\n else:\n return respond(ValueError('Unsupported http method \"{}\"'.format(operation)))\n\nif __name__ == '__main__':\n localTest = True\n event = {\n 'httpMethod': 'GET',\n 'queryStringParameters': {\n 'method': 'get_hostname_stats'\n }\n }\n lambda_handler(event, None)" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 12, "blob_id": "40a2ec6ee1d77d4a427eaa5a9e7079a53e4baeb5", "content_id": "d226e11b86f5b5c2cd15ec223d2e30e3321e601d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 27, "license_type": "permissive", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "scissorhands/pynal", "src_encoding": "UTF-8", "text": "# pynal\nPython GAnalytics \n" } ]
6
jesbarlow/CP1404_practicals
https://github.com/jesbarlow/CP1404_practicals
8fff7bcdfbfe18dcd88e11ac4f0c7ed4ae503079
c02d81b463341d149646ed36b4bd5b4b207aa394
683651a16239285624167d116e439f739b672644
refs/heads/master
"2020-03-06T16:33:56.833889"
"2018-04-12T06:15:18"
"2018-04-12T06:15:18"
126,975,370
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5350610017776489, "alphanum_fraction": 0.6021341681480408, "avg_line_length": 40, "blob_id": "f3465123d002e2aa80e5c006c83f32a5e5823dc4", "content_id": "609ed86f39dc481c1b17e9bfad4838038ad9a569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 95, "num_lines": 16, "path": "/prac_5/colour_codes.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "\nCOLOUR_CODES = {\"CadetBlue2\": \"#8ee5ee\", \"CornflowerBlue\": \"#6495ed\", \"Chartreuse4\": \"#458600\",\n \"DarkOliveGreen3\": \"#a2cd5a\", \"DarkTurquoise\": \"#00ced1\", \"Gold1\": \"#ffd700\",\n \"IndianRed2\": \"#eeb363\", \"PaleVioletRed2\": \"#ee799f\", \"RosyBrown4\": \"#8b6969\",\n \"Snow2\": \"#eee9e9\"}\n\ncolour = input(\"Enter the colour name:\")\nif colour in COLOUR_CODES:\n\n print(\"Colour: {} Hex Code: {}\\n\".format(colour, COLOUR_CODES[colour]))\nelse:\n print(\"Invalid Colour\")\n colour = input(\"Enter the colour name:\")\n\n\nfor key, value in COLOUR_CODES.items():\n print(\"Colour Name: {:<15} Hex Code: {:<7}\".format(key, value))" }, { "alpha_fraction": 0.6584699749946594, "alphanum_fraction": 0.6584699749946594, "avg_line_length": 21.9375, "blob_id": "ff56ff8a3c4ab6a76e674c8c9087205b03dd615f", "content_id": "7cdf5f3cbafa825431aea4aa120e4c67b15e0fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 36, "num_lines": 16, "path": "/prac_2/files.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "name_file = open('name.txt', 'w')\nname = input(\"What is your name?: \")\nname_file.write(name)\nname_file.close()\n\nopen_file = open('name.txt', 'r')\nopen_file.read().strip()\nprint (\"Your name is\",name)\nopen_file.close()\n\nout_file = open('numbers.txt', 'r')\nnum_one = int(out_file.readline())\nnum_two = int(out_file.readline())\n\nprint(num_one + num_two)\nout_file.close()" }, { "alpha_fraction": 0.600671112537384, "alphanum_fraction": 0.6073825359344482, "avg_line_length": 20.35714340209961, "blob_id": "a4241bed471950877aa01ce4c0ede3956d5638ae", "content_id": "24a664574bfd35f376fb3756e138b74c2a473bcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/prac_5/word_count.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "sentence = input(\"Enter a sentence:\")\nwords = sentence.split()\ncounting = {}\n\nfor word in words:\n if word in counting:\n counting[word] += 1\n else:\n counting[word] = 1\n\nprint(\"Text: {}\".format(sentence))\n\nfor key, value in counting.items():\n print(\"{} : {}\".format(key,value))" }, { "alpha_fraction": 0.60317462682724, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 22.4375, "blob_id": "393e54847b8081554175f31209811572496c94d5", "content_id": "c693002d388d1bbb6a565c740a9b47dee016183b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 64, "num_lines": 16, "path": "/prac_4/quickpick_lottery_generator.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "import random\n\ndef main():\n\n quick_picks = int(input(\"How many quick picks? \"))\n print_quickpicks(quick_picks)\n\n\ndef print_quickpicks(quick_picks):\n for num in range(quick_picks):\n NUMBERS = [random.randrange(1, 46) for i in range(0, 6)]\n NUMBERS.sort()\n number_line = ['%.2d' % number for number in NUMBERS]\n print(*number_line)\n\nmain()\n\n\n\n" }, { "alpha_fraction": 0.7102908492088318, "alphanum_fraction": 0.7248322367668152, "avg_line_length": 37.91304397583008, "blob_id": "da60d8a920012cd3b85e0f368d30b541319fce12", "content_id": "6a9fa28b407edd79ae6531d670548104e71fbe14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "no_license", "max_line_length": 120, "num_lines": 23, "path": "/prac_2/exceptions.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "\"\"\"\nCP1404/CP5632 - Practical\nAnswer the following questions:\n1. When will a ValueError occur?\n - value errors occur when the input os anything other than a number(including negative numbers),for example - the\n letter a\n2. When will a ZeroDivisionError occur?\n - this will occur whenever the user inputs 0\n3. Could you change the code to avoid the possibility of a ZeroDivisionError?\n - yes, with input validation and a while loop that will just continue asking the user to\n re enter a number until that number is no longer 0\n\"\"\"\n\ntry:\n numerator = int(input(\"Enter the numerator: \"))\n denominator = int(input(\"Enter the denominator: \"))\n fraction = numerator / denominator\n print(fraction)\nexcept ValueError:\n print(\"Numerator and denominator must be valid numbers!\")\nexcept ZeroDivisionError:\n print(\"Cannot divide by zero!\")\nprint(\"Finished.\")" }, { "alpha_fraction": 0.5901639461517334, "alphanum_fraction": 0.6489071249961853, "avg_line_length": 20.558822631835938, "blob_id": "00d995b1290a77419305deda6ed8dcbf48c2f01a", "content_id": "9dec1f1c88925c9ffbb03ab313767353547dc37e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 732, "license_type": "no_license", "max_line_length": 77, "num_lines": 34, "path": "/prac_4/warm_up.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "numbers = [3, 1, 4, 1, 5, 9, 2]\n\n#numbers[0] - the value would be 3\n#numbers[-1] -\n#numbers[3] - the value would be 1\n#numbers[:-1] -\n#numbers[3:4] -\n#5 in numbers - the value would be true\n#7 in numbers - the value would be false\n#\"3\" in numbers - the value would be false\n#numbers + [6, 5, 3] - will print the list adding the new numbers to the end.\n\n\nprint(numbers[0])\nprint(numbers[-1])\nprint(numbers[3])\nprint(numbers[:-1])\nprint(numbers[3:4])\nprint(5 in numbers)\nprint(7 in numbers)\nprint(\"3\" in numbers)\nprint(numbers + [6, 5, 3])\n\nnumbers[0] = '10'\nprint(numbers[0])\nnumbers[-1] = '1'\nprint(numbers[-1])\nprint(numbers[2:])\ncheck_number = '9'\nif 9 in numbers:\n print(\"It's in the list\")\n\nelse:\n print('Not in the list')" }, { "alpha_fraction": 0.5951941013336182, "alphanum_fraction": 0.6062846779823303, "avg_line_length": 24.190475463867188, "blob_id": "f8f154f45783745b7a144130ae5f24ececfbd57e", "content_id": "ffc019d8a5e4e79bf1900d9a29b66badd64daa08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 78, "num_lines": 21, "path": "/Prac_1/shop_calculator.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "items = int(input(\"Please enter the number of items:\"))\nif items <= 0:\n print(\"Invalid number of items\")\n items = input(\"Please enter the number of items:\")\n\nprices = []\ncount = 0\n\nfor i in range(items):\n count = count + 1\n item_cost = float(input(\"What is the price of item {}?: $\".format(count)))\n prices.append(item_cost)\n\nnum = 0\nfor elem in prices:\n num = num + 1\n print(\"The cost of item {} is:\".format(num))\n print (\"${:,.2f}\".format(elem))\n\n\nprint(\"The total cost of all items is: $\",sum(prices))\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5157232880592346, "alphanum_fraction": 0.5188679099082947, "avg_line_length": 13.5, "blob_id": "53b17b9e7e9572f56f7e864bf03f552cd6082799", "content_id": "6eb2f4428b8f11dc7b224a849b65d2b4b7392e2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/Prac_3/print_second_letter_name.py", "repo_name": "jesbarlow/CP1404_practicals", "src_encoding": "UTF-8", "text": "def main():\n\n name = get_name()\n\n print_name(name)\n\n\ndef print_name(name):\n print(name[::2])\n\n\ndef get_name():\n while True:\n name = input(\"What is your name?: \")\n if name.isalpha():\n break\n else:\n print(\"Sorry, i didn't understand that.\")\n return name\n\n\nmain()" } ]
8
rlebras/pytorch-pretrained-BERT
https://github.com/rlebras/pytorch-pretrained-BERT
f00e35de768b30f97ac302146bcfb851331ba029
002e0f88e7533846a6d3b80df1268f270524ff9d
6e86ead66062d5842b1f155b0f4d188f66312f61
refs/heads/master
"2020-04-16T17:44:43.789657"
"2019-01-07T20:56:20"
"2019-01-07T20:56:20"
165,786,637
3
1
null
"2019-01-15T04:43:19"
"2019-01-07T20:56:30"
"2019-01-07T20:56:28"
null
[ { "alpha_fraction": 0.5472529530525208, "alphanum_fraction": 0.5526634454727173, "avg_line_length": 39.125732421875, "blob_id": "a55f4779a1ce032f1fd9e8824b47e9c7fa99c28d", "content_id": "eb681579f4af5119757d9a35978c54f1fa27a1f9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47870, "license_type": "permissive", "max_line_length": 117, "num_lines": 1193, "path": "/examples/run_classifier.py", "repo_name": "rlebras/pytorch-pretrained-BERT", "src_encoding": "UTF-8", "text": "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport csv\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom examples.run_squad import _compute_softmax\nfrom pytorch_pretrained_bert import BertForSequenceClassification\nfrom pytorch_pretrained_bert.file_utils import read_jsonl_lines, write_items, TsvIO\nfrom pytorch_pretrained_bert.modeling import BertForMultipleChoice\nfrom pytorch_pretrained_bert.optimization import BertAdam\nfrom pytorch_pretrained_bert.tokenization import printable_text, convert_to_unicode, BertTokenizer\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputExampleWithList(object):\n \"\"\"A single training/test example for simple multiple choice classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: list. A list containing untokenized text\n text_b: list. containing untokenized text associated of the same size as text_A\n Only must be specified for multiple choice options.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n assert isinstance(text_a, list)\n assert isinstance(text_b, list)\n assert len(text_a) == len(text_b)\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n self.text_c = None\n self.text_d = None\n\n\nclass InputExampleWithListFourFields(object):\n \"\"\"A single training/test example for simple multiple choice classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b, text_c, text_d, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: list. A list containing untokenized text\n text_b: list. containing untokenized text associated of the same size as text_A\n text_c: list. containing untokenized text associated of the same size as text_A\n text_d: list. containing untokenized text associated of the same size as text_A\n Only must be specified for multiple choice options.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n assert isinstance(text_a, list)\n assert isinstance(text_b, list)\n assert text_c is None or isinstance(text_c, list)\n assert text_d is None or isinstance(text_d, list)\n assert len(text_a) == len(text_b)\n if text_c is not None:\n assert len(text_c) == len(text_a)\n if text_d is not None:\n assert len(text_d) == len(text_a)\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.text_c = text_c\n self.text_d = text_d\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n @classmethod\n def _read_jsonl(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n records = []\n with open(input_file, \"r\") as f:\n for line in f:\n obj = json.loads(line)\n records.append(obj)\n return records\n\n\nclass AnliProcessor(DataProcessor):\n \"\"\"Processor for the ANLI data set.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.jsonl\")))\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"train.jsonl\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"valid.jsonl\")), \"dev\")\n\n def get_examples_from_file(self, input_file):\n return self._create_examples(\n self._read_jsonl(input_file), \"to-pred\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, records, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, record) in enumerate(records):\n guid = \"%s-%s-%s\" % (set_type, record['InputStoryid'], \"1\")\n\n beginning = record['InputSentence1']\n ending = record['InputSentence5']\n\n option1 = record['RandomMiddleSentenceQuiz1']\n option2 = record['RandomMiddleSentenceQuiz2']\n\n answer = int(record['AnswerRightEnding']) - 1\n\n option1_context = convert_to_unicode(' '.join([beginning, option1]))\n option2_context = convert_to_unicode(' '.join([beginning, option2]))\n\n label = convert_to_unicode(str(answer))\n examples.append(\n InputExampleWithListFourFields(guid=guid,\n text_a=[option1_context, option2_context],\n text_b=[ending, ending],\n text_c=None,\n text_d=None,\n label=label\n )\n )\n return examples\n\n def label_field(self):\n return \"AnswerRightEnding\"\n\n\nclass AnliProcessor3Option(DataProcessor):\n \"\"\"Processor for the ANLI data set.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.jsonl\")))\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"train.jsonl\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"valid.jsonl\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"test.jsonl\")), \"test\")\n\n def get_examples_from_file(self, input_file):\n return self._create_examples(\n self._read_jsonl(input_file, \"to-pred\")\n )\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\", \"2\"]\n\n def _create_examples(self, records, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, record) in enumerate(records):\n guid = \"%s-%s-%s\" % (set_type, record['InputStoryid'], record['ending'])\n\n beginning = record['InputSentence1']\n ending = record['InputSentence5']\n\n option1 = record['RandomMiddleSentenceQuiz1']\n option2 = record['RandomMiddleSentenceQuiz2']\n option3 = record['RandomMiddleSentenceQuiz3']\n\n answer = int(record['AnswerRightEnding']) - 1\n\n option1_context = convert_to_unicode(' '.join([beginning, option1]))\n option2_context = convert_to_unicode(' '.join([beginning, option2]))\n option3_context = convert_to_unicode(' '.join([beginning, option3]))\n\n label = convert_to_unicode(str(answer))\n\n text_a = [option1_context, option2_context, option3_context]\n text_b = [ending, ending, ending]\n\n examples.append(\n InputExampleWithList(guid=guid,\n text_a=text_a,\n text_b=text_b,\n label=label\n )\n )\n return examples\n\n def label_field(self):\n return \"AnswerRightEnding\"\n\n\nclass AnliWithCSKProcessor(DataProcessor):\n \"\"\"Processor for the ANLI data set.\"\"\"\n\n def __init__(self):\n self._labels = []\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.jsonl\")))\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"train.jsonl\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"valid.jsonl\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"test.jsonl\")), \"test\")\n\n def get_examples_from_file(self, input_file):\n return self._create_examples(\n self._read_jsonl(input_file, \"to-pred\")\n )\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [str(idx) for idx in range(16)]\n\n def _create_examples(self, records, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n num_fields = len(\n [x for x in list(records[0].keys()) if x.startswith('RandomMiddleSentenceQuiz')])\n self._labels = [str(idx) for idx in range(1, num_fields + 1)]\n for (i, record) in enumerate(records):\n guid = \"%s-%s-%s\" % (set_type, record['InputStoryid'], record['ending'])\n\n beginning = record['InputSentence1']\n ending = record['InputSentence5']\n\n text_a = []\n text_b = []\n for idx in range(1, num_fields + 1):\n text_a.append(\n beginning + \" \" + record[\"RandomMiddleSentenceQuiz\" + str(idx)]\n )\n text_b.append(\n ending + \" Because , \" + record['CSK' + str(idx)]\n )\n\n answer = int(record['AnswerRightEnding']) - 1\n label = convert_to_unicode(str(answer))\n\n examples.append(\n InputExampleWithListFourFields(guid=guid,\n text_a=text_a,\n text_b=text_b,\n text_c=None,\n text_d=None,\n label=label\n )\n )\n return examples\n\n def label_field(self):\n return \"AnswerRightEnding\"\n\nclass WSCProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"1\", \"2\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n sentence = convert_to_unicode(line[1]).replace(\"\\\"\", \"\")\n conj = convert_to_unicode(line[2])\n\n idx = sentence.index(conj)\n\n context = sentence[:idx + len(conj)]\n option_str = sentence[idx + len(conj):].strip()\n\n name1 = convert_to_unicode(line[3])\n name2 = convert_to_unicode(line[4])\n\n option1 = option_str.replace(\"_\", name1)\n option2 = option_str.replace(\"_\", name2)\n\n text_a = [context, context]\n text_b = [option1, option2]\n\n label = convert_to_unicode(line[5])\n\n examples.append(\n InputExampleWithList(\n guid=guid,\n text_a=text_a,\n text_b=text_b,\n label=label\n )\n )\n return examples\n\n def get_examples_from_file(self, input_file):\n return self._create_examples(\n self._read_tsv(input_file), \"to-pred\")\n\n def label_field(self):\n return \"answer\"\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line[3])\n text_b = convert_to_unicode(line[4])\n label = convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, convert_to_unicode(line[0]))\n text_a = convert_to_unicode(line[8])\n text_b = convert_to_unicode(line[9])\n label = convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = convert_to_unicode(line[3])\n label = convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass BinaryAnli(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"train-binary.jsonl\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_jsonl(os.path.join(data_dir, \"valid-binary.jsonl\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, records, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, record) in enumerate(records):\n\n guid = \"%s-%s\" % (set_type, i)\n\n beginning = record['InputSentence1']\n ending = record['InputSentence5']\n middle = record['RandomMiddleSentenceQuiz1']\n label = str(record['AnswerRightEnding'])\n\n text_a = convert_to_unicode(beginning)\n text_b = convert_to_unicode(middle + \" \" + ending)\n\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [printable_text(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\n\ndef convert_examples_to_features_mc(examples, label_list, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Converting examples\"):\n inputs = []\n\n tokens_a = [tokenizer.tokenize(t) for t in example.text_a]\n inputs.append(tokens_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = [tokenizer.tokenize(t) for t in example.text_b]\n inputs.append(tokens_b)\n\n tokens_c = None\n if example.text_c:\n tokens_c = [tokenizer.tokenize(t) for t in example.text_c]\n inputs.append(tokens_c)\n\n tokens_d = None\n if example.text_d:\n tokens_d = [tokenizer.tokenize(t) for t in example.text_d]\n inputs.append(tokens_d)\n\n if len(inputs) > 1:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n adjusted_len = max_seq_length - len(inputs) - 1\n _truncate_sequences(adjusted_len, inputs)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n for idx, ta in enumerate(tokens_a):\n tokens_a[idx] = tokens_a[idx][0:(max_seq_length - 2)]\n\n all_tokens = []\n all_token_ids = []\n all_segments = []\n all_masks = []\n for zipped_tokens in zip(*inputs):\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n for idx, field in enumerate(zipped_tokens):\n for token in field:\n tokens.append(token)\n segment_ids.append(idx)\n tokens.append(\"[SEP]\")\n segment_ids.append(idx)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n if len(input_ids) != max_seq_length:\n print(\"FOUND\")\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n all_tokens.append(tokens)\n all_token_ids.append(input_ids)\n all_segments.append(segment_ids)\n all_masks.append(input_mask)\n\n label_id = label_map[example.label]\n if ex_index < 5:\n logger.info(\"\\n\\n\")\n logger.info(\"*** Example {} ***\\n\".format(ex_index))\n logger.info(\"guid: %s\" % (example.guid))\n _ts = all_tokens\n _ids = all_token_ids\n _masks = all_masks\n _segs = all_segments\n\n logger.info(\"\\n\")\n\n for idx, (_t, _id, _mask, _seg) in enumerate(zip(_ts, _ids, _masks, _segs)):\n logger.info(\"\\tOption {}\".format(idx))\n logger.info(\"\\ttokens: %s\" % \" \".join(\n [printable_text(x) for x in _t]))\n logger.info(\"\\tinput_ids: %s\" % \" \".join([str(x) for x in _id]))\n logger.info(\"\\tinput_mask: %s\" % \" \".join([str(x) for x in _mask]))\n logger.info(\n \"\\tsegment_ids: %s\" % \" \".join([str(x) for x in _seg]))\n\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=all_token_ids,\n input_mask=all_masks,\n segment_ids=all_segments,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef _truncate_sequences(max_length, inputs):\n idx = 0\n for ta, tb in zip(inputs[0], inputs[1]):\n _truncate_seq_pair(ta, tb, max_length)\n\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\n\ndef copy_optimizer_params_to_model(named_params_model, named_params_optimizer):\n \"\"\" Utility function for optimize_on_cpu and 16-bits training.\n Copy the parameters optimized on CPU/RAM back to the model on GPU\n \"\"\"\n for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer,\n named_params_model):\n if name_opti != name_model:\n logger.error(\"name_opti != name_model: {} {}\".format(name_opti, name_model))\n raise ValueError\n param_model.data.copy_(param_opti.data)\n\n\ndef set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):\n \"\"\" Utility function for optimize_on_cpu and 16-bits training.\n Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model\n \"\"\"\n is_nan = False\n for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer,\n named_params_model):\n if name_opti != name_model:\n logger.error(\"name_opti != name_model: {} {}\".format(name_opti, name_model))\n raise ValueError\n if param_model.grad is not None:\n if test_nan and torch.isnan(param_model.grad).sum() > 0:\n is_nan = True\n if param_opti.grad is None:\n param_opti.grad = torch.nn.Parameter(\n param_opti.data.new().resize_(*param_opti.data.size()))\n param_opti.grad.data.copy_(param_model.grad.data)\n else:\n param_opti.grad = None\n return is_nan\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.\")\n parser.add_argument(\"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n default=False,\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n default=False,\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\",\n default=False,\n action='store_true',\n help=\"Whether to run prediction on a given dataset.\")\n parser.add_argument(\"--input_file_for_pred\",\n default=None,\n type=str,\n help=\"File to run prediction on.\")\n parser.add_argument(\"--output_file_for_pred\",\n default=None,\n type=str,\n help=\"File to output predictions into.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n default=False,\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumualte before performing a backward/update pass.\")\n parser.add_argument('--optimize_on_cpu',\n default=False,\n action='store_true',\n help=\"Whether to perform optimization and keep the optimizer averages on CPU\")\n parser.add_argument('--fp16',\n default=False,\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=128,\n help='Loss scaling, positive power of 2 values can improve fp16 convergence.')\n\n args = parser.parse_args()\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"anli\": AnliProcessor,\n \"anli3\": AnliProcessor3Option,\n 'anli_csk': AnliWithCSKProcessor,\n 'bin_anli': BinaryAnli,\n 'wsc': WSCProcessor\n }\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n if args.fp16:\n logger.info(\"16-bits training currently not supported in distributed training\")\n args.fp16 = False # (see https://github.com/pytorch/pytorch/pull/13496)\n logger.info(\"device %s n_gpu %d distributed training %r\", device, n_gpu,\n bool(args.local_rank != -1))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n os.makedirs(args.output_dir, exist_ok=True)\n\n task_name = args.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n label_list = processor.get_labels()\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model)\n\n train_examples = None\n num_train_steps = None\n if args.do_train:\n train_examples = processor.get_train_examples(args.data_dir)\n num_train_steps = int(\n len(\n train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)\n\n # Prepare model\n if task_name == 'bin_anli':\n model = BertForSequenceClassification.from_pretrained(args.bert_model, len(label_list))\n else:\n model = BertForMultipleChoice.from_pretrained(args.bert_model,\n len(label_list),\n len(label_list)\n )\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n if args.fp16:\n param_optimizer = [(n, param.clone().detach().to('cpu').float().requires_grad_()) \\\n for n, param in model.named_parameters()]\n elif args.optimize_on_cpu:\n param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \\\n for n, param in model.named_parameters()]\n else:\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'gamma', 'beta']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if n not in no_decay], 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if n in no_decay], 'weight_decay_rate': 0.0}\n ]\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_steps)\n\n global_step = 0\n\n model_save_path = os.path.join(args.output_dir, \"bert-finetuned.model\")\n tr_loss = None\n if args.do_train:\n if task_name.lower().startswith(\"anli\") or task_name.lower().startswith(\"wsc\"):\n train_features = convert_examples_to_features_mc(\n train_examples, label_list, args.max_seq_length, tokenizer)\n else:\n train_features = convert_examples_to_features(\n train_examples, label_list, args.max_seq_length, tokenizer)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler,\n batch_size=args.train_batch_size)\n\n model.train()\n for _ in trange(int(args.num_train_epochs), desc=\"Epoch\"):\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n status_tqdm = tqdm(train_dataloader, desc=\"Iteration\")\n for step, batch in enumerate(status_tqdm):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, label_ids = batch\n loss, _ = model(input_ids, segment_ids, input_mask, label_ids)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.fp16 and args.loss_scale != 1.0:\n # rescale loss for fp16 training\n # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html\n loss = loss * args.loss_scale\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n loss.backward()\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16 or args.optimize_on_cpu:\n if args.fp16 and args.loss_scale != 1.0:\n # scale down gradients for fp16 training\n for param in model.parameters():\n param.grad.data = param.grad.data / args.loss_scale\n is_nan = set_optimizer_params_grad(param_optimizer,\n model.named_parameters(), test_nan=True)\n if is_nan:\n logger.info(\"FP16 TRAINING: Nan in gradients, reducing loss scaling\")\n args.loss_scale = args.loss_scale / 2\n model.zero_grad()\n continue\n optimizer.step()\n copy_optimizer_params_to_model(model.named_parameters(), param_optimizer)\n else:\n optimizer.step()\n model.zero_grad()\n global_step += 1\n status_tqdm.set_description_str(\"Iteration / Training Loss: {}\".format((tr_loss /\n nb_tr_examples)))\n\n torch.save(model, model_save_path)\n\n if args.do_eval:\n if args.do_predict and args.input_file_for_pred is not None:\n eval_examples = processor.get_examples_from_file(args.input_file_for_pred)\n else:\n eval_examples = processor.get_dev_examples(args.data_dir)\n if task_name.lower().startswith(\"anli\") or task_name.lower().startswith(\"wsc\"):\n eval_features = convert_examples_to_features_mc(\n eval_examples, label_list, args.max_seq_length, tokenizer)\n else:\n eval_features = convert_examples_to_features(\n eval_examples, label_list, args.max_seq_length, tokenizer)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n if args.local_rank == -1:\n eval_sampler = SequentialSampler(eval_data)\n else:\n eval_sampler = DistributedSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,\n batch_size=args.eval_batch_size)\n\n logger.info(\"***** Loading model from: {} *****\".format(model_save_path))\n model = torch.load(model_save_path)\n\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n\n eval_predictions = []\n eval_pred_probs = []\n\n logger.info(\"***** Predicting ... *****\".format(model_save_path))\n\n for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n label_ids = label_ids.to(device)\n\n with torch.no_grad():\n tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids)\n\n logits = logits.detach().cpu().numpy()\n label_ids = label_ids.to('cpu').numpy()\n tmp_eval_accuracy = accuracy(logits, label_ids)\n\n eval_predictions.extend(np.argmax(logits, axis=1).tolist())\n\n eval_pred_probs.extend([_compute_softmax(list(l)) for l in logits])\n\n eval_loss += tmp_eval_loss.mean().item()\n eval_accuracy += tmp_eval_accuracy\n\n nb_eval_examples += input_ids.size(0)\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n eval_accuracy = eval_accuracy / nb_eval_examples\n\n result = {'eval_loss': eval_loss,\n 'eval_accuracy': eval_accuracy,\n 'global_step': global_step,\n 'loss': tr_loss / nb_tr_steps if tr_loss is not None else 0.0\n }\n\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if task_name == \"wsc\":\n pred_examples = list(TsvIO.read(args.input_file_for_pred))\n\n else:\n pred_examples = read_jsonl_lines(args.input_file_for_pred)\n\n logger.info(\"***** Eval predictions *****\")\n for record, pred, probs in zip(pred_examples, eval_predictions, eval_pred_probs):\n record['bert_prediction'] = pred\n record['bert_correct'] = pred == (int(record[processor.label_field()]) - 1)\n record['bert_pred_probs'] = probs\n\n write_items([json.dumps(r) for r in pred_examples], args.output_file_for_pred)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5964077115058899, "alphanum_fraction": 0.6022451519966125, "avg_line_length": 33.37036895751953, "blob_id": "5445fda14f657f2ee72c16206e7a9fa39d1d6375", "content_id": "75c949e19fdf778abfb15cdfa1fbbbbb35f7df83", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11135, "license_type": "permissive", "max_line_length": 100, "num_lines": 324, "path": "/pytorch_pretrained_bert/file_utils.py", "repo_name": "rlebras/pytorch-pretrained-BERT", "src_encoding": "UTF-8", "text": "\"\"\"\nUtilities for working with the local dataset cache.\nThis file is adapted from the AllenNLP library at https://github.com/allenai/allennlp\nCopyright by the AllenNLP authors.\n\"\"\"\nimport gzip\nimport csv\nimport os\nimport logging\nimport shutil\nimport tempfile\nimport json\nfrom urllib.parse import urlparse\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union, IO, Callable, Set, List\nfrom hashlib import sha256\nfrom functools import wraps\n\nfrom tqdm import tqdm\n\nimport boto3\nfrom botocore.exceptions import ClientError\nimport requests\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nPYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',\n Path.home() / '.pytorch_pretrained_bert'))\n\n\ndef url_to_filename(url: str, etag: str = None) -> str:\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode('utf-8')\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode('utf-8')\n etag_hash = sha256(etag_bytes)\n filename += '.' + etag_hash.hexdigest()\n\n return filename\n\n\ndef filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise FileNotFoundError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + '.json'\n if not os.path.exists(meta_path):\n raise FileNotFoundError(\"file {} not found\".format(meta_path))\n\n with open(meta_path) as meta_file:\n metadata = json.load(meta_file)\n url = metadata['url']\n etag = metadata['etag']\n\n return url, etag\n\n\ndef cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise FileNotFoundError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))\n\n\ndef split_s3_path(url: str) -> Tuple[str, str]:\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path\n\n\ndef s3_request(func: Callable):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url: str, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise FileNotFoundError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper\n\n\n@s3_request\ndef s3_etag(url: str) -> Optional[str]:\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag\n\n\n@s3_request\ndef s3_get(url: str, temp_file: IO) -> None:\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)\n\n\ndef http_get(url: str, temp_file: IO) -> None:\n req = requests.get(url, stream=True)\n content_length = req.headers.get('Content-Length')\n total = int(content_length) if content_length is not None else None\n progress = tqdm(unit=\"B\", total=total)\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\n\ndef get_from_cache(url: str, cache_dir: str = None) -> str:\n \"\"\"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n\n os.makedirs(cache_dir, exist_ok=True)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3://\"):\n etag = s3_etag(url)\n else:\n response = requests.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise IOError(\"HEAD request failed for url {} with status code {}\"\n .format(url, response.status_code))\n etag = response.headers.get(\"ETag\")\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3://\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, 'wb') as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {'url': url, 'etag': etag}\n meta_path = cache_path + '.json'\n with open(meta_path, 'w') as meta_file:\n json.dump(meta, meta_file)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path\n\n\ndef read_set_from_file(filename: str) -> Set[str]:\n '''\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '''\n collection = set()\n with open(filename, 'r') as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection\n\n\ndef get_file_extension(path: str, dot=True, lower: bool = True):\n ext = os.path.splitext(path)[1]\n ext = ext if dot else ext[1:]\n return ext.lower() if lower else ext\n\n\ndef read_jsonl_lines(input_file: str) -> List[dict]:\n with open(input_file) as f:\n lines = f.readlines()\n return [json.loads(l.strip()) for l in lines]\n\n\ndef write_items(items: List[str], output_file):\n with open(output_file, 'w') as f:\n for concept in items:\n f.write(concept + \"\\n\")\n f.close()\n\n\nclass TsvIO(object):\n @staticmethod\n def read(filename, known_schema=None, sep=\"\\t\", gzipped=False, source=None):\n \"\"\"\n Read a TSV file with schema in the first line.\n :param filename: TSV formatted file\n :param first_line_schema: True if the first line is known to contain the schema of the\n tsv file. False by default.\n :param sep: Separator used in the file. Default is '\\t`\n :return: A list of data records where each record is a dict. The keys of the dict\n correspond to the column name defined in the schema.\n \"\"\"\n first = True\n\n if gzipped:\n fn = gzip.open\n else:\n fn = open\n\n line_num = 0\n\n with fn(filename, 'rt') as f:\n for line in f:\n if first and known_schema is None:\n first = False\n known_schema = line.split(sep)\n known_schema = [s.strip() for s in known_schema]\n else:\n line_num += 1\n data_fields = line.split(sep)\n data = {k.strip(): v.strip() for k, v in zip(known_schema, data_fields)}\n data['source'] = filename if source is None else source\n data['line_num'] = line_num\n yield data\n f.close()\n\n @staticmethod\n def make_str(item, sub_sep=\"\\t\"):\n if isinstance(item, list):\n return sub_sep.join([TsvIO.make_str(i) for i in item])\n else:\n return str(item)\n\n @staticmethod\n def write(records: List[dict], filename, schema=None, sep='\\t', append=False, sub_sep=';'):\n \"\"\"\n Write a TSV formatted file with the provided schema\n :param records: List of records to be written to the file\n populated\n :param filename: Output filename\n :param schema: Order in which fields from the Sentence object will be written\n :param sep: Separator used in the file. Default is '\\t`\n :param append: Whether to use append mode or write a new file\n :param sub_sep: If a field contains a list of items in JSON, this seperator will be used\n to separate values in the list\n :return:\n \"\"\"\n mode = 'a' if append else 'w'\n\n if sep == \"\\t\":\n with open(filename, mode) as f:\n if schema is not None and not append:\n f.write(sep.join(schema) + \"\\n\")\n for record in records:\n f.write(sep.join([TsvIO.make_str(record.__getitem__(field), sub_sep=sub_sep) for\n field in schema]))\n f.write('\\n')\n f.close()\n elif sep == \",\":\n with open(filename, mode) as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=schema)\n writer.writeheader()\n for record in records:\n writer.writerow(record)\n csvfile.close()" }, { "alpha_fraction": 0.7831775546073914, "alphanum_fraction": 0.790654182434082, "avg_line_length": 30.52941131591797, "blob_id": "4f28393b23debee874422e5e6244766e19872c36", "content_id": "34bddf68e9e666a87fb5866f3d6ae234957df1ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 535, "license_type": "permissive", "max_line_length": 91, "num_lines": 17, "path": "/examples/test_data_processor.py", "repo_name": "rlebras/pytorch-pretrained-BERT", "src_encoding": "UTF-8", "text": "from examples.run_classifier import AnliWithCSKProcessor, convert_examples_to_features_mc\nfrom pytorch_pretrained_bert import BertTokenizer\n\ndir = \"../../abductive-nli/data/abductive_nli/one2one-correspondence/anli_with_csk/\"\n\nprocessor = AnliWithCSKProcessor()\n\nexamples = processor.get_train_examples(dir)\n\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\nlabel_list = processor.get_labels()\nmax_seq_length = 128\nfeatures = convert_examples_to_features_mc(examples, label_list, max_seq_length, tokenizer)\n\n\nprint(\"OK\")" } ]
3
MistyW/learngit
https://github.com/MistyW/learngit
2187ea7f2adc0d20f647722e3f14396c35119aeb
3fc8e700014b7a45f6827e8103b900e8f775fce2
acab80462d9eefe6d2c8d21a525a3a01ba3c240f
refs/heads/master
"2020-04-12T03:48:27.989203"
"2018-12-19T01:50:46"
"2018-12-19T01:50:46"
162,278,014
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7666666507720947, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 39, "blob_id": "6b6afd7e4c033f34eccbcedca6aa9ac40e209474", "content_id": "c49fcf144f09417a0e484e0075625bb2066d36cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 120, "license_type": "no_license", "max_line_length": 58, "num_lines": 3, "path": "/1/readme.txt", "repo_name": "MistyW/learngit", "src_encoding": "UTF-8", "text": "if you want to exit this windows,please press Z two times.\nThis is the first time to use the github.\nI am very excited!\n" }, { "alpha_fraction": 0.4229249060153961, "alphanum_fraction": 0.4901185631752014, "avg_line_length": 22.090909957885742, "blob_id": "2e76c4b87ac15f95e29d59cbffe7128da8491c48", "content_id": "7402094596ef3436f1387b3aaad6ac3ea28f327c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/alien_invasion_game/Settings.py", "repo_name": "MistyW/learngit", "src_encoding": "UTF-8", "text": "# _*_ coding: utf-8 _*_\n# __author__ = wmm\n\nclass Settings():\n \"\"\"存储《外星人入侵》的所有类\"\"\"\n def __init__(self):\n \"\"\"初始化游戏的设置\"\"\"\n # 屏幕设置\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)" } ]
2
AllenMkandla/oop_person
https://github.com/AllenMkandla/oop_person
c01c6f1aa30da1880ec5f1fdf15369c9d6663b29
99cfe95bad45ac215843f384bc2af90d4333b1b4
6963d2f7dbfe5f2304023aed031150349cb4bfdd
refs/heads/master
"2021-01-08T19:33:57.653969"
"2020-02-21T11:31:58"
"2020-02-21T11:31:58"
242,122,823
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5424460172653198, "alphanum_fraction": 0.5467625856399536, "avg_line_length": 32.14285659790039, "blob_id": "009524b7c03aa11313dd2dd0a03d979f0f5f125c", "content_id": "8e29e155adec550f6fe81fcadbab383b8396bc6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 106, "num_lines": 21, "path": "/oop.py", "repo_name": "AllenMkandla/oop_person", "src_encoding": "UTF-8", "text": "class Person:\n pass\n\n def __init__(self, name, age, gender, interests):\n\n self.name = name\n self.age = age\n self.gender = gender\n self.interests = interests\n\n \n def hello(self):\n interests_str = 'My interests are '\n for pos in range(len(self.interests)):\n if pos == len(self.interests) - 1:\n interests_str += 'and ' + self.interests[pos] + '.'\n else:\n interests_str += self.interests[pos] + ', ' \n return 'Hello, my name is {} and I am {} years old. {}'.format(self.name, self.age, interests_str)\n\nperson = Person('Ryan', 30, \"male\",['being a hardarse', \"agile\", \"ssd hard drives\"])" } ]
1
763483718/service-outsource
https://github.com/763483718/service-outsource
82f26902c7731d7c6bfecd2ab791cd29c2def14d
ce5d35384d1a24cae8c28ddde45bbaaa37ca2732
c4769e32e65c9be29fc311630d044725e9517f23
refs/heads/master
"2020-04-28T14:17:26.850580"
"2019-04-17T03:00:58"
"2019-04-17T03:00:58"
175,334,032
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6343019008636475, "alphanum_fraction": 0.650602400302887, "avg_line_length": 28.39583396911621, "blob_id": "1669cf1ddf7ab58d20d6f0b60a471f9d31fb94ac", "content_id": "43783fbe3b923044be2fcceff9120689adc0065a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1467, "license_type": "no_license", "max_line_length": 112, "num_lines": 48, "path": "/attitude/NetTool.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\n\n\ndef create_weights(shape, stddev=0.05):\n return tf.Variable(tf.truncated_normal(shape, stddev=stddev))\n\n\ndef create_biases(size, value=0.01): # value 初值\n return tf.Variable(tf.constant(value, shape=[size]))\n\n\n# 创建一个卷积层 input:传入的矩阵\ndef create_convolution_layer(input, filter_size, filter_num, use_MaxPool=True, stride_f=1, stride_m=2, ksize=2):\n weights = create_weights(\n shape=[filter_size, filter_size, (int)(input.shape[3]), filter_num])\n\n biases = create_biases(filter_num)\n\n layer = tf.nn.conv2d(input, weights, strides=[\n 1, stride_f, stride_f, 1], padding='SAME')\n layer += biases\n layer = tf.nn.relu(layer)\n\n if use_MaxPool:\n layer = tf.nn.max_pool(layer, ksize=[1, 2, 2, 1], strides=[\n 1, stride_m, stride_m, 1], padding='SAME')\n\n return layer\n\n\ndef create_flatten_layer(input): # 将图片伸展开\n layer_shape = input.get_shape()\n feature_num = layer_shape[1:4].num_elements()\n\n layer = tf.reshape(input, shape=[-1, feature_num])\n return layer\n\n\ndef create_fc_layer(input, weights_shape, keep_prob, use_relu=True): # 创建一个全链接层\n weights = create_weights(weights_shape)\n biases = create_biases(weights_shape[1])\n layer = tf.matmul(input, weights) + biases\n layer = tf.nn.dropout(layer, keep_prob)\n\n if use_relu:\n layer = tf.nn.relu(layer)\n return layer\n" }, { "alpha_fraction": 0.44714587926864624, "alphanum_fraction": 0.44714587926864624, "avg_line_length": 29.53333282470703, "blob_id": "d00a056b7f8a44dff23c3ab356821047576dba98", "content_id": "458d47b10b9c16cdc41f0280f6a9c12fea1566d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1538, "license_type": "no_license", "max_line_length": 116, "num_lines": 30, "path": "/FaceCompare/FaceCompare/ReadMe.txt", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "========================================================================\r\n 控制台应用程序:FaceCompare 项目概述\r\n========================================================================\r\n\r\n应用程序向导已为您创建了此 FaceCompare 应用程序。\r\n\r\n本文件概要介绍组成 FaceCompare 应用程序的每个文件的内容。\r\n\r\n\r\nFaceCompare.vcxproj\r\n 这是使用应用程序向导生成的 VC++ 项目的主项目文件,其中包含生成该文件的 Visual C++ 的版本信息,以及有关使用应用程序向导选择的平台、配置和项目功能的信息。\r\n\r\nFaceCompare.vcxproj.filters\r\n 这是使用“应用程序向导”生成的 VC++ 项目筛选器文件。它包含有关项目文件与筛选器之间的关联信息。在 IDE 中,通过这种关联,在特定节点下以分组形式显示具有相似扩展名的文件。例如,“.cpp”文件与“源文件”筛选器关联。\r\n\r\nFaceCompare.cpp\r\n 这是主应用程序源文件。\r\n\r\n/////////////////////////////////////////////////////////////////////////////\r\n其他标准文件:\r\n\r\nStdAfx.h, StdAfx.cpp\r\n 这些文件用于生成名为 FaceCompare.pch 的预编译头 (PCH) 文件和名为 StdAfx.obj 的预编译类型文件。\r\n\r\n/////////////////////////////////////////////////////////////////////////////\r\n其他注释:\r\n\r\n应用程序向导使用“TODO:”注释来指示应添加或自定义的源代码部分。\r\n\r\n/////////////////////////////////////////////////////////////////////////////\r\n" }, { "alpha_fraction": 0.707064151763916, "alphanum_fraction": 0.7232663631439209, "avg_line_length": 24.39316177368164, "blob_id": "f9e1f3aa269fba483430221bd99ebeef1a42d0c1", "content_id": "ede7a25301ba9b653ee15db7ca2898b2e6522fce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3136, "license_type": "no_license", "max_line_length": 146, "num_lines": 117, "path": "/FaceCompare/FaceCompare/FaceEngine.cpp", "repo_name": "763483718/service-outsource", "src_encoding": "GB18030", "text": "#pragma once\r\n#include \"stdafx.h\"\r\n#include \"config.h\"\r\n#include \"FaceEngine.h\"\r\n\r\nFaceEngine::FaceEngine()\r\n{\r\n\r\n}\r\n\r\nFaceEngine::~FaceEngine()\r\n{\r\n\r\n}\r\nint FaceEngine::InitEngine()\r\n{\r\n\t//激活接口\r\n\tMRESULT res = ASFActivation(APPID, SDKKEY);\r\n\t//ALActivation fail\r\n\tif (MOK != res && 90114 != res)\r\n\t\treturn res;\r\n\r\n\t//初始化接口\r\n\thandle = NULL;\r\n\tMInt32 mask = ASF_FACE_DETECT | ASF_FACERECOGNITION | ASF_AGE | ASF_GENDER | ASF_FACE3DANGLE;\r\n\tres = ASFInitEngine(ASF_DETECT_MODE_IMAGE, ASF_OP_0_ONLY, 32, 30, mask, &handle);\r\n\treturn res;\r\n}\r\n\r\nint FaceEngine::FaceDetection(ASF_MultiFaceInfo &detectedFaces, IplImage *img)\r\n{\r\n\tint res = ASFDetectFaces(handle, img->width, img->height, ASVL_PAF_RGB24_B8G8R8, (MUInt8*)img->imageData, &detectedFaces);\r\n\r\n\treturn res;\r\n}\r\n\r\nint FaceEngine::ExtractSingleFRFeature(ASF_SingleFaceInfo SingleDetectedFaces, ASF_FaceFeature & feature, IplImage * img)\r\n{\r\n\tint res = ASFFaceFeatureExtract(handle, img->width, img->height, ASVL_PAF_RGB24_B8G8R8, (MUInt8*)img->imageData, &SingleDetectedFaces, &feature);\r\n\t\r\n\treturn 0;\r\n}\r\n\r\nint FaceEngine::ExtractFRFeature(ASF_MultiFaceInfo detectedFaces, ASF_FaceFeature &feature, IplImage *img, int i)\r\n{\r\n\tASF_SingleFaceInfo SingleDetectedFaces = { 0 };\r\n\r\n\tSingleDetectedFaces.faceRect.left = detectedFaces.faceRect[i].left;\r\n\tSingleDetectedFaces.faceRect.top = detectedFaces.faceRect[i].top;\r\n\tSingleDetectedFaces.faceRect.right = detectedFaces.faceRect[i].right;\r\n\tSingleDetectedFaces.faceRect.bottom = detectedFaces.faceRect[i].bottom;\r\n\tSingleDetectedFaces.faceOrient = detectedFaces.faceOrient[i];\r\n\r\n\tint res = ASFFaceFeatureExtract(handle, img->width, img->height, ASVL_PAF_RGB24_B8G8R8, (MUInt8*)img->imageData, &SingleDetectedFaces, &feature);\r\n\r\n\treturn res;\r\n}\r\n\r\nint FaceEngine::FacePairMatching(MFloat &confidenceLevel, ASF_FaceFeature feature1, ASF_FaceFeature feature2)\r\n{\r\n\tint res = ASFFaceFeatureCompare(handle, &feature1, &feature2, &confidenceLevel);\r\n\t\r\n\treturn res;\r\n}\r\n\r\n\r\nint FaceEngine::FaceASFProcess(ASF_MultiFaceInfo detectedFaces, IplImage *img)\r\n{\r\n\tMInt32 lastMask = ASF_AGE | ASF_GENDER | ASF_FACE3DANGLE;\r\n\tint res = ASFProcess(handle, img->width, img->height, ASVL_PAF_RGB24_B8G8R8, (MUInt8*)img->imageData, &detectedFaces, lastMask);\r\n\treturn res;\r\n}\r\n\r\nint FaceEngine::AgeEstimation(ASF_MultiFaceInfo detectedFaces, IplImage *img, ASF_AgeInfo &ageInfo)\r\n{\r\n\t// 获取年龄\r\n\tint res = ASFGetAge(handle, &ageInfo);\r\n\r\n\treturn res;\r\n}\r\n\r\nint FaceEngine::GenderEstimation(ASF_MultiFaceInfo detectedFaces, IplImage *img, ASF_GenderInfo &genderInfo)\r\n{\r\n\r\n\t// 获取性别\r\n\tint res = ASFGetGender(handle, &genderInfo);\r\n\r\n\treturn res;\r\n}\r\n\r\nint FaceEngine::Face3DAngle(ASF_MultiFaceInfo detectedFaces, IplImage *img, ASF_Face3DAngle &angleInfo)\r\n{\r\n\r\n\t// 获取3D角度\r\n\tint res = ASFGetFace3DAngle(handle, &angleInfo);\r\n\r\n\treturn res;\r\n}\r\n\r\n\r\n\r\nconst ASF_VERSION* FaceEngine::GetVersion()\r\n{\r\n\tconst ASF_VERSION* pVersionInfo = ASFGetVersion(handle);\r\n\treturn pVersionInfo;\r\n}\r\n\r\n\r\nint FaceEngine::UnInitEngine()\r\n{\r\n\t//反初始化\r\n\tint res = ASFUninitEngine(handle);\r\n\r\n\t//getchar();\r\n\r\n\treturn res;\r\n}" }, { "alpha_fraction": 0.5517503619194031, "alphanum_fraction": 0.5662100315093994, "avg_line_length": 33.1298713684082, "blob_id": "2b6d6841643e8d2516e5c9e64831e68fcc9bf70d", "content_id": "4a33bb7ad367416d266aaf5c5f3f93c37faad34e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2678, "license_type": "no_license", "max_line_length": 110, "num_lines": 77, "path": "/attitude/predict.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport numpy as np\nimport cv2 as cv\nimport glob\n\nimageSize = 64\nchannels = 3\n\n\nclass Predict():\n def __init__(self, path_session=None, imageSize=64, channels=3, classes=None):\n self._path_session = path_session\n self._imageSize = imageSize\n self._channels = channels\n self._cls = classes\n # self._images = []\n\n def set_config(self, path_session=None, imageSize=None, channels=None, classes=None):\n if path_session != None:\n self._path_session = path_session\n if imageSize != None:\n self._imageSize = imageSize\n if channels != None:\n self._channels = channels\n if classes != None:\n self._cls = classes\n\n def predict(self, paths=None, images=None):\n if paths == None and images == None:\n return\n imgs = []\n if images == None:\n for path in paths:\n print(path)\n img = cv.imread(path)\n img = cv.resize(\n img, (self._imageSize, self._imageSize), 0, 0, cv.INTER_LINEAR)\n img = img.astype(np.float32)\n img = np.multiply(img, 1.0 / 255.0)\n imgs.append(img)\n imgs = np.array(imgs)\n\n with tf.Session() as sess:\n # Step-1: Recreate the network graph. At this step only graph is created.\n saver = tf.train.import_meta_graph(\n './dogs-cats-model/dog-cat.ckpt-10000.meta')\n # Step-2: Now let's load the weights saved using the restore method.\n saver.restore(sess, './dogs-cats-model/dog-cat.ckpt-10000')\n\n # Accessing the default graph which we have restored\n graph = tf.get_default_graph()\n\n # Now, let's get hold of the op that we can be processed to get the output.\n # In the original network y_pred is the tensor that is the prediction of the network\n pred_Y = graph.get_tensor_by_name(\"pred_Y:0\")\n\n # Let's feed the images to the input placeholders\n X = graph.get_tensor_by_name(\"X:0\")\n Y = graph.get_tensor_by_name(\"Y:0\")\n y = np.zeros(shape=[len(imgs), len(self._cls)])\n\n pred = sess.run(pred_Y, feed_dict={X: imgs, Y: y})\n \n for i in pred:\n print(self._cls[i.argmax()])\n\n\ndef main():\n\n filePath = '/Mycomputer/pythonCode/tensorflow/深度学习框架Tensorflow案例实战视频课程【195107】Tensorflow简介与安装/猫狗识别/**.jpg'\n files = glob.glob(filePath)\n\n predict = Predict(classes=['cats', 'dogs'])\n predict.predict(paths=files)\n\n\nmain()\n" }, { "alpha_fraction": 0.6812114715576172, "alphanum_fraction": 0.7004687190055847, "avg_line_length": 37.41389465332031, "blob_id": "4d7d9a8407570cdea1ae1630202e8bfcd6f4cf33", "content_id": "b5667a64c425f7ddb87b1dcff0c6a1a954228018", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39318, "license_type": "no_license", "max_line_length": 176, "num_lines": 1022, "path": "/face++/test.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "test = {'a': 'b', 'c': 'd'}\nfor i in test:\n print(i)\n\n\n{\n \"server\":\"144.34.249.39\", # ss 服务器 IP\n \"server_port\":8989, # 端口\n \"local_address\": \"127.0.0.1\", # 本地 IP\n \"local_port\":1080, # 本地端口\n \"password\":\"chenchai123.\",# 连接 ss 密码\n \"timeout\":300, # 等待超时\n \"method\":\"aes-256-cfb\", # 加密方式\n \"fast_open\": false, # true 或 false。\n \"workers\": 1 # 工作线程数\n}\n\n### VERSION INFO\nset(OpenPose_VERSION_MAJOR 1)\nset(OpenPose_VERSION_MINOR 4)\nset(OpenPose_VERSION_PATCH 0)\nset(OpenPose_VERSION ${OpenPose_VERSION_MAJOR}.${OpenPose_VERSION_MINOR}.${OpenPose_VERSION_PATCH})\n\n\n### OS-DEPENDENT FLAGS\nset(CMAKE_MACOSX_RPATH 1)\n\n\n### CMAKE HEADERS\n# Ubuntu 18 default. After 3.8, no need for find_CUDA\n# https://cmake.org/cmake/help/v3.10/module/FindCUDA.html\n# https://cmake.org/cmake/help/v3.10/command/project.html\n# https://devblogs.nvidia.com/building-cuda-applications-cmake/\nif (${CMAKE_VERSION} VERSION_GREATER 3.9.0)\n cmake_policy(SET CMP0048 NEW)\n project(OpenPose VERSION ${OpenPose_VERSION})\n # # Not tested\n # cmake_policy(SET CMP0048 NEW)\n # set(CUDACXX /usr/local/cuda/bin/nvcc)\n # project(OpenPose VERSION ${OpenPose_VERSION} LANGUAGES CXX CUDA)\n # set(AUTO_FOUND_CUDA TRUE)\n # # else\n # set(AUTO_FOUND_CUDA FALSE)\n# Ubuntu 16 default\nelseif (${CMAKE_VERSION} VERSION_GREATER 3.0.0)\n cmake_policy(SET CMP0048 NEW)\n project(OpenPose VERSION ${OpenPose_VERSION})\nelse (${CMAKE_VERSION} VERSION_GREATER 3.9.0)\n project(OpenPose)\nendif (${CMAKE_VERSION} VERSION_GREATER 3.9.0)\ncmake_minimum_required(VERSION 2.8.7 FATAL_ERROR) # min. cmake version recommended by Caffe\n\n\n### Extra functionality\ninclude(cmake/Utils.cmake)\nif (NOT WIN32 AND NOT UNIX AND NOT APPLE)\n message(FATAL_ERROR \"Unsupported operating system. Only Windows, Mac and Unix systems supported.\")\nendif (NOT WIN32 AND NOT UNIX AND NOT APPLE)\n\n\n### CMAKE_BUILD_TYPE\n# Default: Release\n# Bug fixed: By default, it uses something different to Release, that provokes OpenPose to be about 15% slower than\n# it should be.\n# Is CMAKE_BUILD_TYPE \"Debug\" or \"MinSizeRel\" or \"RelWithDebInfo\"?\nset(CMAKE_BUILD_TYPE_KNOWN FALSE)\nif (${CMAKE_BUILD_TYPE} MATCHES \"Debug\")\n set(CMAKE_BUILD_TYPE_KNOWN TRUE)\nendif (${CMAKE_BUILD_TYPE} MATCHES \"Debug\")\nif (${CMAKE_BUILD_TYPE} MATCHES \"MinSizeRel\")\n set(CMAKE_BUILD_TYPE_KNOWN TRUE)\nendif (${CMAKE_BUILD_TYPE} MATCHES \"MinSizeRel\")\nif (${CMAKE_BUILD_TYPE} MATCHES \"RelWithDebInfo\")\n set(CMAKE_BUILD_TYPE_KNOWN TRUE)\nendif (${CMAKE_BUILD_TYPE} MATCHES \"RelWithDebInfo\")\n# Assign proper CMAKE_BUILD_TYPE\nif (${CMAKE_BUILD_TYPE_KNOWN})\n set(CMAKE_BUILD_TYPE \"Release\" CACHE STRING \"Choose the type of build.\")\nelse (${CMAKE_BUILD_TYPE_KNOWN})\n set(CMAKE_BUILD_TYPE \"Release\" CACHE STRING \"Choose the type of build.\" FORCE)\nendif (${CMAKE_BUILD_TYPE_KNOWN})\nset_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS \"Release\" \"Debug\" \"MinSizeRel\" \"RelWithDebInfo\")\n\n\n### FLAGS\nif (WIN32)\n # TODO -- Makeshift solution -- This prevents rerunning build again\n # https://gitlab.kitware.com/cmake/cmake/issues/16783\n set(CMAKE_SUPPRESS_REGENERATION ON)\n\n string (REPLACE \"/D_WINDOWS\" \"\" CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")\n string (REPLACE \"/DWIN32\" \"\" CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")\n\n # /GL option causes the code to crash -- fix this\n # sdl flags causes error -- error : unknown attribute \\\"guard\\\"\n\n set(CMAKE_CONFIGURATION_TYPES Release Debug CACHE TYPE INTERNAL FORCE)\n\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} /MP\")\n string(REPLACE \"/W3\" \"/W4\" CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")\n string(REPLACE \"/GR\" \"\" CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")\n\n string(REPLACE \"/O2\" \"/Ox\" CMAKE_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE}\")\n string(REPLACE \"/Ob2\" \"\" CMAKE_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE}\")\n set(CMAKE_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE} /Ot /Oi /Gy /Z7\")\n\n set(CMAKE_SHARED_LINKER_FLAGS_RELEASE \"${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG:incremental /OPT:REF /OPT:ICF\")\n\n string(REPLACE \"/MDd\" \"/MD\" CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG}\")\n string(REPLACE \"/Zi\" \"/Z7\" CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG}\")\n string(REPLACE \"/RTC1\" \"\" CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG}\")\nelseif (UNIX)\n # Turn on C++11\n add_definitions(-std=c++11)\n set(CMAKE_CXX_FLAGS_RELEASE \"-O3\")\nelseif (APPLE)\n # Turn on C++11\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -std=c++11\")\n set(CMAKE_CXX_FLAGS_RELEASE \"-O3\")\nendif (WIN32)\n\n# C++ additional flags\nif (CMAKE_COMPILER_IS_GNUCXX)\n\n # Select the Enhanced Instruction Set\n set(INSTRUCTION_SET NONE CACHE STRING \"Enable Enhanced Instruction Set\")\n set_property(CACHE INSTRUCTION_SET PROPERTY STRINGS NONE SSE4 AVX)\n\n if (${INSTRUCTION_SET} MATCHES \"SSE4\")\n add_definitions(\"-DWITH_SSE4\")\n set(SIMD_FLAGS \"${SIMD_FLAGS} -msse4.1\")\n endif (${INSTRUCTION_SET} MATCHES \"SSE4\")\n\n if (${INSTRUCTION_SET} MATCHES \"AVX\")\n add_definitions(\"-DWITH_AVX\")\n set(SIMD_FLAGS \"${SIMD_FLAGS} -mavx\")\n endif (${INSTRUCTION_SET} MATCHES \"AVX\")\n\n message(STATUS \"GCC detected, adding compile flags\")\n set(OP_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${SIMD_FLAGS} -fopenmp -Wpedantic -Wall -Wextra -Wfatal-errors\")\nendif (CMAKE_COMPILER_IS_GNUCXX)\n\n\n### PROJECT OPTIONS\n# Select the DL Framework\nset(DL_FRAMEWORK CAFFE CACHE STRING \"Select Deep Learning Framework.\")\nset_property(CACHE DL_FRAMEWORK PROPERTY STRINGS CAFFE)\n# set_property(CACHE DL_FRAMEWORK PROPERTY STRINGS CAFFE CAFFE2 TENSORFLOW)\n\n# Suboptions for Caffe DL Framework\ninclude(CMakeDependentOption)\nif (${DL_FRAMEWORK} MATCHES \"CAFFE\")\n CMAKE_DEPENDENT_OPTION(BUILD_CAFFE \"Build Caffe as part of OpenPose.\" ON\n \"DL_FRAMEWORK\" ON)\n # OpenPose flags\n add_definitions(-DUSE_CAFFE)\nendif (${DL_FRAMEWORK} MATCHES \"CAFFE\")\n\nif (WIN32)\n # Select the Enhanced Instruction Set\n set(INSTRUCTION_SET NONE CACHE STRING \"Enable Enhanced Instruction Set\")\n set_property(CACHE INSTRUCTION_SET PROPERTY STRINGS NONE SSE SSE2 AVX AVX2 IA32)\n\n # Suboptions for Enhanced Instruction Set\n if (${INSTRUCTION_SET} MATCHES \"SSE\")\n add_definitions(\"/arch:SSE\")\n elseif (${INSTRUCTION_SET} MATCHES \"SSE2\")\n add_definitions(\"/arch:SSE2\")\n elseif (${INSTRUCTION_SET} MATCHES \"AVX\")\n add_definitions(\"/arch:AVX\")\n elseif (${INSTRUCTION_SET} MATCHES \"AVX2\")\n add_definitions(\"/arch:AVX2\")\n elseif (${INSTRUCTION_SET} MATCHES \"IA32\")\n add_definitions(\"/arch:IA32\")\n endif (${INSTRUCTION_SET} MATCHES \"SSE\")\nendif (WIN32)\n\n# Set the acceleration library\nif (APPLE)\n set(GPU_MODE CPU_ONLY CACHE STRING \"Select the acceleration GPU library or CPU otherwise.\")\nelse (APPLE)\n #set(GPU_MODE CUDA CACHE STRING \"Select the acceleration GPU library or CPU otherwise.\")\n set(GPU_MODE CPU_ONLY CACHE STRING \"No GPU, CPU ONLY\")\nendif (APPLE)\n# Display proper options to user\n# if (CUDA_FOUND AND OpenCL_FOUND)\n# set_property(CACHE GPU_MODE PROPERTY STRINGS CUDA OPENCL CPU_ONLY)\n# elseif (CUDA_FOUND)\n# set_property(CACHE GPU_MODE PROPERTY STRINGS CUDA CPU_ONLY)\n# elseif (OpenCL_FOUND)\n# set_property(CACHE GPU_MODE PROPERTY STRINGS OPENCL CPU_ONLY)\n# else ()\n# set_property(CACHE GPU_MODE PROPERTY STRINGS CPU_ONLY)\n# endif (CUDA_FOUND AND OpenCL_FOUND)\nset_property(CACHE GPU_MODE PROPERTY STRINGS CUDA OPENCL CPU_ONLY)\n\n# Look for CUDA\nset(CUDA_FOUND FALSE)\nif (${GPU_MODE} MATCHES \"CUDA\")\n find_package(CUDA)\nendif (${GPU_MODE} MATCHES \"CUDA\")\n# Look for OpenCL\nset(OpenCL_FOUND FALSE)\nset(CUDA_VERSION_MAJOR 0)\nif (${GPU_MODE} MATCHES \"OPENCL\")\n find_package(OpenCL)\nendif (${GPU_MODE} MATCHES \"OPENCL\")\n\n# Code to avoid crash at compiling time if OpenCL is not found\nif (NOT OpenCL_FOUND)\n set(OpenCL_LIBRARIES \"\")\nendif (NOT OpenCL_FOUND)\n# Required for OpenCL in Nvidia graphic cards\nif (CUDA_FOUND AND OpenCL_FOUND AND ${CUDA_VERSION_MAJOR} LESS 9)\n add_definitions(-DLOWER_CL_VERSION)\nendif (CUDA_FOUND AND OpenCL_FOUND AND ${CUDA_VERSION_MAJOR} LESS 9)\n# Handle desired GPU mode option\nif (${GPU_MODE} MATCHES \"CUDA\")\n # OpenPose flags\n add_definitions(-DUSE_CUDA)\n message(STATUS \"Building with CUDA.\")\nelseif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n add_definitions(-DUSE_CPU_ONLY)\n message(STATUS \"Building CPU Only.\")\n # OpenPose flag for Caffe\n add_definitions(-DCPU_ONLY)\nelseif (${GPU_MODE} MATCHES \"OPENCL\")\n # OpenPose flag for Caffe\n add_definitions(-DUSE_OPENCL)\n add_definitions(-DUSE_GREENTEA)\n message(STATUS \"Building with OpenCL.\")\nendif (${GPU_MODE} MATCHES \"CUDA\")\n\n# Intel branch with MKL Support\nif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n if (UNIX AND NOT APPLE)\n OPTION(USE_MKL \"Use MKL Intel Branch.\" ON)\n endif (UNIX AND NOT APPLE)\nendif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n\nif (${USE_MKL})\n # OpenPose flags\n add_definitions(-DUSE_MKL)\n message(STATUS \"Building with MKL support.\")\nendif (${USE_MKL})\n\n# Set/disable profiler\nif (PROFILER_ENABLED)\n add_definitions(-DPROFILER_ENABLED)\nendif (PROFILER_ENABLED)\n\n# Suboptions for GPU architectures\nif (${GPU_MODE} MATCHES \"CUDA\")\n set(CUDA_ARCH Auto CACHE STRING \"Select target NVIDIA GPU achitecture.\")\n set_property(CACHE CUDA_ARCH PROPERTY STRINGS Auto All Manual)\nendif (${GPU_MODE} MATCHES \"CUDA\")\n\n# Suboptions for acceleration library\nif (${GPU_MODE} MATCHES \"CUDA\")\n option(USE_CUDNN \"Build OpenPose with cuDNN library support.\" ON)\nendif (${GPU_MODE} MATCHES \"CUDA\")\n\n# Suboptions for OpenPose 3D Reconstruction module and demo\noption(WITH_3D_RENDERER \"Add OpenPose 3D renderer module (it requires FreeGLUT library).\" OFF)\nif (UNIX AND NOT APPLE)\n option(WITH_CERES \"Add Ceres support for advanced 3-D reconstruction.\" OFF)\nendif (UNIX AND NOT APPLE)\noption(WITH_FLIR_CAMERA \"Add FLIR (formerly Point Grey) camera code (requires Spinnaker SDK already installed).\" OFF)\n# option(WITH_3D_ADAM_MODEL \"Add 3-D Adam model (requires OpenGL, Ceres, Eigen, OpenMP, FreeImage, GLEW, and IGL already installed).\" OFF)\n\n# Faster GUI rendering\n# Note: It seems to work by default in Windows and Ubuntu, but not in Mac nor Android.\n# More info: https://stackoverflow.com/questions/21129683/does-opengl-display-image-faster-than-opencv?answertab=active#tab-top\noption(WITH_OPENCV_WITH_OPENGL \"Much faster GUI display, but you must also enable OpenGL support in OpenCV by configuring OpenCV using CMake with WITH_OPENGL=ON flag.\" OFF)\n\n# Set the acceleration library\nif (WIN32 OR APPLE)\n set(WITH_EIGEN NONE CACHE STRING \"Select the Eigen mode (NONE if not required or DOWNLOAD to let OpenPose download it).\")\n set_property(CACHE WITH_EIGEN PROPERTY STRINGS NONE BUILD)\nelseif (UNIX AND NOT APPLE)\n set(WITH_EIGEN NONE CACHE STRING \"Select the Eigen mode (NONE if not required, APT_GET if you used `sudo apt-get install libeigen3-dev`, BUILD to let OpenPose download it).\")\n set_property(CACHE WITH_EIGEN PROPERTY STRINGS NONE APT_GET BUILD)\nendif (WIN32 OR APPLE)\n\n# # Suboptions for OpenPose tracking\n# if (UNIX AND NOT APPLE)\n# option(WITH_TRACKING \"Add OpenPose 3D tracking module (it requires OpenCV with CUDA support).\" OFF)\n# endif (UNIX AND NOT APPLE)\n\n# Download the models\noption(DOWNLOAD_BODY_25_MODEL \"Download body 25-keypoint (body COCO and 6-keypoint foot) model.\" ON)\noption(DOWNLOAD_BODY_COCO_MODEL \"Download body 18-keypoint COCO model.\" OFF)\noption(DOWNLOAD_BODY_MPI_MODEL \"Download body 15-keypoint MPI model.\" OFF)\noption(DOWNLOAD_FACE_MODEL \"Download face model.\" ON)\noption(DOWNLOAD_HAND_MODEL \"Download hand model.\" ON)\n\n# Asio\n# option(USE_ASIO \"Include Asio header-only library.\" OFF)\n\n# More options\noption(BUILD_EXAMPLES \"Build OpenPose examples.\" ON)\noption(BUILD_DOCS \"Build OpenPose documentation.\" OFF)\noption(BUILD_PYTHON \"Build OpenPose python.\" OFF)\nif (WIN32)\n option(BUILD_BIN_FOLDER \"Copy all required 3rd-party DLL files into {build_directory}/bin. Disable to save some memory.\" ON)\nendif (WIN32)\n\n# Unity\noption(BUILD_UNITY_SUPPORT \"Build OpenPose as a Unity plugin.\" OFF)\n\n# Build as shared library\noption(BUILD_SHARED_LIBS \"Build as shared lib.\" ON)\n\n# Speed profiler\noption(PROFILER_ENABLED \"If enabled, OpenPose will be able to print out speed information at runtime.\" OFF)\n\n# Threads - Pthread\nif (${GPU_MODE} MATCHES \"OPENCL\" OR (UNIX OR APPLE))\n unset(CMAKE_THREAD_LIBS_INIT CACHE)\n find_package(Threads)\nendif (${GPU_MODE} MATCHES \"OPENCL\" OR (UNIX OR APPLE))\n\n# Caffe OpenCL Boost Issue\nif (APPLE)\n if (${GPU_MODE} MATCHES \"OPENCL\" OR BUILD_PYTHON)\n unset(Boost_SYSTEM_LIBRARY CACHE)\n find_package(Boost COMPONENTS system REQUIRED)\n else ()\n set(Boost_SYSTEM_LIBRARY \"\")\n endif ()\nendif (APPLE)\n\n### FIND REQUIRED PACKAGES\nlist(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules\")\n\nif (${GPU_MODE} MATCHES \"CUDA\")\n find_package(CUDA)\nendif (${GPU_MODE} MATCHES \"CUDA\")\n\n# Adding 3D\nif (WITH_OPENCV_WITH_OPENGL)\n # OpenPose flags\n add_definitions(-DUSE_OPENCV_WITH_OPENGL)\nendif (WITH_OPENCV_WITH_OPENGL)\nif (WITH_3D_RENDERER)\n # OpenPose flags\n add_definitions(-DUSE_3D_RENDERER)\nendif (WITH_3D_RENDERER)\nif (WITH_CERES)\n add_definitions(-DUSE_CERES)\nendif (WITH_CERES)\nif (WITH_FLIR_CAMERA)\n # OpenPose flags\n add_definitions(-DUSE_FLIR_CAMERA)\nendif (WITH_FLIR_CAMERA)\nif (WITH_3D_ADAM_MODEL)\n # OpenPose flags\n add_definitions(-DUSE_3D_ADAM_MODEL)\nendif (WITH_3D_ADAM_MODEL)\n\n# Adding tracking\nif (WITH_TRACKING)\n # OpenPose flags\n add_definitions(-DUSE_TRACKING)\nendif (WITH_TRACKING)\n\n# Unity\nif (BUILD_UNITY_SUPPORT)\n # OpenPose flags\n add_definitions(-DUSE_UNITY_SUPPORT)\nendif (BUILD_UNITY_SUPPORT)\n\n# Calibration\n# No Eigen\nif (${WITH_EIGEN} MATCHES \"NONE\")\n if (WITH_CERES)\n message(FATAL_ERROR \"Eigen is required (enable `WITH_EIGEN`) if CERES is enabled.\")\n endif (WITH_CERES)\n# If Eigen used\nelse (${WITH_EIGEN} MATCHES \"NONE\")\n # OpenPose flags\n add_definitions(-DUSE_EIGEN)\n # OpenPose download/builds Eigen\n if (${WITH_EIGEN} MATCHES \"BUILD\")\n # Download it\n set(OP_URL \"http://posefs1.perception.cs.cmu.edu/OpenPose/3rdparty/\")\n set(FIND_LIB_PREFIX ${CMAKE_SOURCE_DIR}/3rdparty/)\n download_zip(\"eigen_2018_05_23.zip\" ${OP_URL} ${FIND_LIB_PREFIX} 29B9B2FD4679D587BB67467F09EE8365)\n # Set path\n set(EIGEN3_INCLUDE_DIRS \"3rdparty/eigen/include/\")\n # Alread installed with apt-get\n elseif (${WITH_EIGEN} MATCHES \"APT_GET\")\n # Eigen\n find_package(PkgConfig)\n pkg_check_modules(EIGEN3 REQUIRED eigen3)\n endif (${WITH_EIGEN} MATCHES \"BUILD\")\nendif (${WITH_EIGEN} MATCHES \"NONE\")\n\nif (UNIX OR APPLE)\n if (${GPU_MODE} MATCHES \"CUDA\")\n include(cmake/Cuda.cmake)\n find_package(CuDNN)\n endif (${GPU_MODE} MATCHES \"CUDA\")\n find_package(GFlags) # For Caffe and OpenPose\n find_package(Glog) # For Caffe\n find_package(Protobuf REQUIRED) # For Caffe\n\n if (OpenCV_CONFIG_FILE)\n include (${OpenCV_CONFIG_FILE})\n # Allow explicitly setting the OpenCV includes and libs\n elseif (OpenCV_INCLUDE_DIRS AND OpenCV_LIBS)\n set(OpenCV_FOUND 1)\n elseif (OpenCV_INCLUDE_DIRS AND OpenCV_LIBS_DIR)\n file(GLOB_RECURSE OpenCV_LIBS \"${OpenCV_LIBS_DIR}*.so\")\n set(OpenCV_FOUND 1)\n else (OpenCV_CONFIG_FILE)\n find_package(OpenCV)\n endif (OpenCV_CONFIG_FILE)\n\n # 3D\n if (WITH_3D_RENDERER)\n # GLUT\n find_package(GLUT REQUIRED)\n # OpenGL\n find_package(OpenGL REQUIRED)\n endif (WITH_3D_RENDERER)\n if (WITH_CERES)\n # Eigen + Ceres\n find_package(Ceres REQUIRED COMPONENTS SuiteSparse)\n endif (WITH_CERES)\n if (WITH_FLIR_CAMERA)\n # Spinnaker\n find_package(Spinnaker)\n if (NOT SPINNAKER_FOUND)\n message(FATAL_ERROR \"Spinnaker not found. Either turn off the `WITH_FLIR_CAMERA` option or specify the path to\n the Spinnaker includes and libs.\")\n endif (NOT SPINNAKER_FOUND)\n endif (WITH_FLIR_CAMERA)\n if (WITH_3D_ADAM_MODEL)\n if (NOT WITH_3D_RENDERER)\n message(FATAL_ERROR \"WITH_3D_RENDERER is required if WITH_3D_ADAM_MODEL is enabled.\")\n endif (NOT WITH_3D_RENDERER)\n find_package(PkgConfig)\n pkg_check_modules(EIGEN3 REQUIRED eigen3)\n # Others: sudo apt-get install libglm-dev\n # http://ceres-solver.org\n find_package(Ceres REQUIRED COMPONENTS SuiteSparse)\n # sudo apt-get install libglew-dev\n find_package(GLEW REQUIRED)\n # find_package(GLUT REQUIRED) # TODO: Duplicated of WITH_3D_RENDERER, clean somehow (like Eigen)\n # git clone --recursive https://github.com/libigl/libigl.git\n # No installation, it's header only\n # TODO: It's header only (as Eigen), do BUILD option too to download it\n find_package(LIBIGL REQUIRED)\n find_package(OpenMP REQUIRED)\n # Only adam/renderer::Renderer::IdleSaveImage() uses it. Make dependency optional in hand_model\n # FIND_LIBRARY(FREE_IMAGE_LIBRARY NAMES libfreeimage.so)\n set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}\")\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}\")\n set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}\")\n endif (WITH_3D_ADAM_MODEL)\n\n # OpenMP\n if (${GPU_MODE} MATCHES \"CPU_ONLY\")\n find_package(OpenMP)\n if (OPENMP_FOUND)\n set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}\")\n set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}\")\n set(CMAKE_EXE_LINKER_FLAGS \"${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}\")\n endif (OPENMP_FOUND)\n endif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n\n if (${GPU_MODE} MATCHES \"CUDA\")\n # Set CUDA Flags\n set(CUDA_NVCC_FLAGS \"${CUDA_NVCC_FLAGS} -std=c++11\")\n\n if (NOT CUDA_FOUND)\n message(STATUS \"CUDA not found.\")\n execute_process(COMMAND cat install_cuda.sh WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/scripts/ubuntu)\n message(FATAL_ERROR \"Install CUDA using the above commands.\")\n endif (NOT CUDA_FOUND)\n\n if (USE_CUDNN AND NOT CUDNN_FOUND)\n message(STATUS \"cuDNN not found.\")\n execute_process(COMMAND cat install_cudnn.sh WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/scripts/ubuntu)\n message(FATAL_ERROR \"Install cuDNN using the above commands. or turn off cuDNN by setting USE_CUDNN to OFF.\")\n endif (USE_CUDNN AND NOT CUDNN_FOUND)\n endif (${GPU_MODE} MATCHES \"CUDA\")\n\n if (NOT GLOG_FOUND)\n message(FATAL_ERROR \"Glog not found. Install Glog from the command line using the command(s) -\\\n sudo apt-get install libgoogle-glog-dev\")\n endif (NOT GLOG_FOUND)\n\n if (NOT GFLAGS_FOUND)\n message(FATAL_ERROR \"GFlags not found. Install GFlags from the command line using the command(s) --\\\n sudo apt-get install libgflags-dev\")\n endif (NOT GFLAGS_FOUND)\n\n if (NOT OpenCV_FOUND)\n message(FATAL_ERROR \"OpenCV not found. Install OpenCV from the command line using the command(s) --\\\n sudo apt-get install libopencv-dev\")\n endif (NOT OpenCV_FOUND)\n\nendif (UNIX OR APPLE)\n\nif (WIN32)\n\n if (\"${CMAKE_VERSION}\" VERSION_GREATER 3.6.3)\n set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT OpenPoseDemo)\n endif (\"${CMAKE_VERSION}\" VERSION_GREATER 3.6.3)\n\n set_property(GLOBAL PROPERTY USE_FOLDERS ON)\n set(FIND_LIB_PREFIX ${CMAKE_SOURCE_DIR}/3rdparty/windows)\n\n # Download Windows 3rd party\n message(STATUS \"Downloading windows dependencies...\")\n set(OP_WIN_URL \"http://posefs1.perception.cs.cmu.edu/OpenPose/3rdparty/windows\")\n set(OP_WIN_DIR \"${CMAKE_SOURCE_DIR}/3rdparty/windows\")\n\n # Download required zip files\n download_zip(\"opencv_310.zip\" ${OP_WIN_URL} ${FIND_LIB_PREFIX} 1e5240a64b814b3c0b822f136be78ad7)\n download_zip(\"caffe3rdparty_2018_06_26.zip\" ${OP_WIN_URL} ${FIND_LIB_PREFIX} 892A39C0CFBAA11CA8648B125852E01F)\n if (${GPU_MODE} MATCHES \"OPENCL\")\n download_zip(\"caffe_opencl_2018_02_13.zip\" ${OP_WIN_URL} ${FIND_LIB_PREFIX} 3ac3e1acf5ee6a4e57920be73053067a)\n elseif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n download_zip(\"caffe_cpu_2018_05_27.zip\" ${OP_WIN_URL} ${FIND_LIB_PREFIX} 87E8401B6DFBAC5B8E909DD20E3B3390)\n else (${GPU_MODE} MATCHES \"OPENCL\")\n download_zip(\"caffe_2018_01_18.zip\" ${OP_WIN_URL} ${FIND_LIB_PREFIX} 4b8e548cc7ea20abea472950dd5301bd)\n endif (${GPU_MODE} MATCHES \"OPENCL\")\n if (WITH_3D_RENDERER)\n download_zip(\"freeglut_2018_01_14.zip\" ${OP_WIN_URL} ${FIND_LIB_PREFIX} BB182187285E06880F0EDE3A39530091)\n endif (WITH_3D_RENDERER)\n message(STATUS \"Windows dependencies downloaded.\")\n\n find_library(OpenCV_LIBS opencv_world310 HINTS ${FIND_LIB_PREFIX}/opencv/x64/vc14/lib)\n find_library(GFLAGS_LIBRARY_RELEASE gflags HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(GFLAGS_LIBRARY_DEBUG gflagsd HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(GLOG_LIBRARY_RELEASE glog HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(GLOG_LIBRARY_DEBUG glogd HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(OpenCV_LIBS opencv_world310 HINTS ${FIND_LIB_PREFIX}/opencv/x64/vc14/lib)\n\n # If OpenPose builds it\n if (BUILD_CAFFE)\n unset(Caffe_INCLUDE_DIRS CACHE)\n unset(Caffe_LIB CACHE)\n unset(Caffe_Proto_LIB CACHE)\n endif (BUILD_CAFFE)\n # OpenCL\n if (${GPU_MODE} MATCHES \"OPENCL\")\n set(VCXPROJ_FILE_GPU_MODE \"_CL\")\n find_library(Caffe_LIB caffe HINTS ${FIND_LIB_PREFIX}/caffe_opencl/lib)\n find_library(Caffe_Proto_LIB caffeproto HINTS ${FIND_LIB_PREFIX}/caffe_opencl/lib)\n # CPU & CUDA\n else (${GPU_MODE} MATCHES \"OPENCL\")\n # CPU\n if (${GPU_MODE} MATCHES \"CPU_ONLY\")\n set(VCXPROJ_FILE_GPU_MODE \"_CPU\")\n find_library(Caffe_LIB caffe HINTS ${FIND_LIB_PREFIX}/caffe_cpu/lib)\n find_library(Caffe_Proto_LIB caffeproto HINTS ${FIND_LIB_PREFIX}/caffe_cpu/lib)\n # CUDA\n else (${GPU_MODE} MATCHES \"CPU_ONLY\")\n set(VCXPROJ_FILE_GPU_MODE \"\")\n find_library(Caffe_LIB caffe HINTS ${FIND_LIB_PREFIX}/caffe/lib)\n find_library(Caffe_Proto_LIB caffeproto HINTS ${FIND_LIB_PREFIX}/caffe/lib)\n endif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n endif (${GPU_MODE} MATCHES \"OPENCL\")\n # Boost DepCopy over required DLL F\n if (${GPU_MODE} MATCHES \"CPU_ONLY\" OR ${GPU_MODE} MATCHES \"OPENCL\" OR BUILD_PYTHON)\n find_library(BOOST_SYSTEM_LIB_RELEASE libboost_system-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(BOOST_SYSTEM_LIB_DEBUG libboost_system-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(BOOST_FILESYSTEM_LIB_RELEASE libboost_filesystem-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n find_library(BOOST_FILESYSTEM_LIB_DEBUG libboost_filesystem-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)\n else ()\n set(BOOST_SYSTEM_LIB_RELEASE \"\")\n set(BOOST_SYSTEM_LIB_DEBUG \"\")\n set(BOOST_FILESYSTEM_LIB_RELEASE \"\")\n set(BOOST_FILESYSTEM_LIB_DEBUG \"\")\n endif ()\n if (WITH_3D_RENDERER)\n find_library(GLUT_LIBRARY freeglut HINTS ${FIND_LIB_PREFIX}/freeglut/lib)\n message(STATUS \"\\${GLUT_LIBRARY} = ${GLUT_LIBRARY}\")\n endif (WITH_3D_RENDERER)\n if (WITH_FLIR_CAMERA)\n find_library(SPINNAKER_LIB spinnaker_v140 HINTS ${FIND_LIB_PREFIX}/spinnaker/lib)\n endif (WITH_FLIR_CAMERA)\n set(Caffe_LIBS ${Caffe_LIB};${Caffe_Proto_LIB})\n set(OpenCV_INCLUDE_DIRS \"3rdparty/windows/opencv/include\")\n # OpenCL\n if (${GPU_MODE} MATCHES \"OPENCL\")\n unset(Caffe_INCLUDE_DIRS CACHE)\n set(Caffe_INCLUDE_DIRS \"3rdparty/windows/caffe_opencl/include;3rdparty/windows/caffe_opencl/include2\" CACHE FILEPATH \"Caffe_INCLUDE_DIRS\")\n # CUDA and CPU\n else (${GPU_MODE} MATCHES \"OPENCL\")\n # CPU\n if (${GPU_MODE} MATCHES \"CPU_ONLY\")\n set(Caffe_INCLUDE_DIRS \"3rdparty/windows/caffe_cpu/include;3rdparty/windows/caffe_cpu/include2\" CACHE FILEPATH \"Caffe_INCLUDE_DIRS\")\n # CUDA\n else (${GPU_MODE} MATCHES \"CPU_ONLY\")\n set(Caffe_INCLUDE_DIRS \"3rdparty/windows/caffe/include;3rdparty/windows/caffe/include2\" CACHE FILEPATH \"Caffe_INCLUDE_DIRS\")\n endif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n endif (${GPU_MODE} MATCHES \"OPENCL\")\n set(Boost_INCLUDE_DIRS \"3rdparty/windows/caffe3rdparty/include/boost-1_61\")\n set(WINDOWS_INCLUDE_DIRS \"3rdparty/windows/caffe3rdparty/include\")\n if (WITH_3D_RENDERER)\n set(GLUT_INCLUDE_DIRS \"3rdparty/windows/freeglut/include\")\n endif (WITH_3D_RENDERER)\n if (WITH_FLIR_CAMERA)\n set(SPINNAKER_INCLUDE_DIRS \"3rdparty/windows/spinnaker/include\")\n endif (WITH_FLIR_CAMERA)\n set(Caffe_FOUND 1)\n\n # Build DLL Must be on if Build Python is on\n if (BUILD_PYTHON)\n if (NOT BUILD_BIN_FOLDER)\n message(FATAL_ERROR \"BUILD_BIN_FOLDER must be turned on to as well to build python library\")\n endif (NOT BUILD_BIN_FOLDER)\n endif (BUILD_PYTHON)\n\n # Auto copy DLLs\n if (BUILD_BIN_FOLDER)\n # Locate DLLs\n # Caffe DLLs\n if (${GPU_MODE} MATCHES \"CUDA\")\n file(GLOB CAFFE_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe/bin/*.dll\")\n elseif (${GPU_MODE} MATCHES \"OPENCL\")\n file(GLOB CAFFE_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe_opencl/bin/*.dll\")\n elseif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n file(GLOB CAFFE_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe_cpu/bin/*.dll\")\n endif ()\n # Caffe 3rd-party DLLs\n file(GLOB CAFFE_3RD_PARTY_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe3rdparty/lib/*.dll\")\n # OpenCV DLLs\n file(GLOB OPENCV_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/opencv/x64/vc14/bin/*.dll\")\n # GLUT DLLs\n file(GLOB GLUT_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/freeglut/bin/*.dll\")\n # Spinnaker DLLs and other files\n file(GLOB SPINNAKER_DLL \"${CMAKE_SOURCE_DIR}/3rdparty/windows/spinnaker/bin/*\")\n # Copy DLLs into same folder\n set(BIN_FOLDER ${CMAKE_BINARY_DIR}/bin)\n file(MAKE_DIRECTORY ${BIN_FOLDER})\n file(COPY ${CAFFE_DLL} DESTINATION ${BIN_FOLDER})\n file(COPY ${OPENCV_DLL} DESTINATION ${BIN_FOLDER})\n file(COPY ${CAFFE_3RD_PARTY_DLL} DESTINATION ${BIN_FOLDER})\n file(COPY ${GLUT_DLL} DESTINATION ${BIN_FOLDER})\n file(COPY ${SPINNAKER_DLL} DESTINATION ${BIN_FOLDER})\n endif (BUILD_BIN_FOLDER)\n\nendif (WIN32)\n\n\n### CAFFE\nif (UNIX OR APPLE)\n\n if (${DL_FRAMEWORK} MATCHES \"CAFFE\")\n\n # Check if the user specified caffe paths\n if (Caffe_INCLUDE_DIRS AND Caffe_LIBS AND NOT BUILD_CAFFE)\n message(STATUS \"\\${Caffe_INCLUDE_DIRS} set by the user to \" ${Caffe_INCLUDE_DIRS})\n message(STATUS \"\\${Caffe_LIBS} set by the user to \" ${Caffe_LIBS})\n set(Caffe_FOUND 1)\n endif (Caffe_INCLUDE_DIRS AND Caffe_LIBS AND NOT BUILD_CAFFE)\n\n # Else build from scratch\n if (BUILD_CAFFE)\n\n # Download Caffe\n message(STATUS \"Caffe will be downloaded from source now. NOTE: This process might take several minutes depending\n on your internet connection.\")\n\n # Check if pulled\n file(GLOB CAFFE_DIR_VALID ${CMAKE_SOURCE_DIR}/3rdparty/caffe/*)\n list(LENGTH CAFFE_DIR_VALID CAFFE_DIR_VALID_LENGTH)\n if (CAFFE_DIR_VALID_LENGTH EQUAL 0)\n execute_process(COMMAND git submodule update --init ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n # execute_process(COMMAND git submodule update --init --recursive --remote) # This would initialize them all\n else (CAFFE_DIR_VALID_LENGTH EQUAL 0)\n message(STATUS \"Caffe has already been downloaded.\")\n endif (CAFFE_DIR_VALID_LENGTH EQUAL 0)\n\n # Build Process\n set(CAFFE_CPU_ONLY OFF)\n if (${GPU_MODE} MATCHES \"CUDA\")\n # execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n execute_process(COMMAND git checkout b5ede48 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n elseif (${GPU_MODE} MATCHES \"CPU_ONLY\")\n if (USE_MKL)\n #execute_process(COMMAND git checkout intel WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n execute_process(COMMAND git checkout b6712ce WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n execute_process(COMMAND sh prepare_mkl.sh WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe/external/mkl\n OUTPUT_VARIABLE rv)\n set( MLIST ${rv} )\n separate_arguments(MLIST)\n list(GET MLIST 0 MKL_PATH)\n message(STATUS ${MKL_PATH})\n file(GLOB MKL_SO\n \"${MKL_PATH}lib/*\"\n )\n file(COPY ${MKL_SO} DESTINATION ${CMAKE_BINARY_DIR}/caffe)\n\n # New MLSL Lib\n #execute_process(COMMAND sh prepare_mlsl.sh WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe/external/mlsl\n # OUTPUT_VARIABLE rv)\n #set( MLIST ${rv} )\n #separate_arguments(MLIST)\n #list(GET MLIST 0 MLSL_PATH)\n #message(STATUS ${MLSL_PATH})\n #file(GLOB MLSL_SO\n # \"${MLSL_PATH}/intel64/lib/*\"\n # )\n #file(COPY ${MLSL_SO} DESTINATION ${CMAKE_BINARY_DIR}/caffe)\n\n set(MKL_LIBS\n #\"${CMAKE_BINARY_DIR}/caffe/libmlsl.so\"\n \"${CMAKE_BINARY_DIR}/caffe/libiomp5.so\"\n \"${CMAKE_BINARY_DIR}/caffe/libmklml_intel.so\"\n \"${CMAKE_BINARY_DIR}/caffe/lib/libmkldnn.so\"\n )\n else (USE_MKL)\n # execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n execute_process(COMMAND git checkout b5ede48 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n endif (USE_MKL)\n set(CAFFE_CPU_ONLY ON)\n set(USE_CUDNN OFF)\n elseif (${GPU_MODE} MATCHES \"OPENCL\")\n execute_process(COMMAND git checkout fe2a1102 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n set(USE_CUDNN OFF)\n endif (${GPU_MODE} MATCHES \"CUDA\")\n\n # Build Caffe\n message(STATUS \"Caffe will be built from source now.\")\n find_package(Caffe)\n include(ExternalProject)\n set(CAFFE_PREFIX caffe)\n set(CAFFE_URL ${CMAKE_SOURCE_DIR}/3rdparty/caffe)\n\n # One for Intel Branch and one for Master\n if (USE_MKL)\n ExternalProject_Add(openpose_lib\n SOURCE_DIR ${CAFFE_URL}\n PREFIX ${CAFFE_PREFIX}\n CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>\n -DMKLDNN_INSTALL_DIR:PATH=<INSTALL_DIR>\n -DUSE_MKL2017_AS_DEFAULT_ENGINE=${CAFFE_CPU_ONLY}\n -DUSE_CUDNN=${USE_CUDNN}\n -DCUDA_ARCH_NAME=${CUDA_ARCH}\n -DCUDA_ARCH_BIN=${CUDA_ARCH_BIN}\n -DCUDA_ARCH_PTX=${CUDA_ARCH_PTX}\n -DCPU_ONLY=${CAFFE_CPU_ONLY}\n -DCMAKE_BUILD_TYPE=Release\n -DBUILD_docs=OFF\n -DBUILD_python=OFF\n -DBUILD_python_layer=OFF\n -DUSE_LEVELDB=OFF\n -DUSE_LMDB=OFF\n -DUSE_OPENCV=OFF)\n # -DOpenCV_DIR=${OpenCV_DIR})\n else (USE_MKL)\n ExternalProject_Add(openpose_lib\n SOURCE_DIR ${CAFFE_URL}\n PREFIX ${CAFFE_PREFIX}\n CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>\n -DUSE_CUDNN=${USE_CUDNN}\n -DCUDA_ARCH_NAME=${CUDA_ARCH}\n -DCUDA_ARCH_BIN=${CUDA_ARCH_BIN}\n -DCUDA_ARCH_PTX=${CUDA_ARCH_PTX}\n -DCPU_ONLY=${CAFFE_CPU_ONLY}\n -DCMAKE_BUILD_TYPE=Release\n -DBUILD_docs=OFF\n -DBUILD_python=OFF\n -DBUILD_python_layer=OFF\n -DUSE_LEVELDB=OFF\n -DUSE_LMDB=OFF\n -DUSE_OPENCV=OFF)\n # -DOpenCV_DIR=${OpenCV_DIR})\n endif (USE_MKL)\n\n ExternalProject_Get_Property(openpose_lib install_dir)\n\n if (NOT Caffe_FOUND)\n add_custom_command(TARGET openpose_lib\n POST_BUILD\n COMMAND ${CMAKE_COMMAND} ${CMAKE_SOURCE_DIR}\n COMMAND $(MAKE)\n WORKING_DIRECTORY ${CMAKE_BINARY_DIR}\n COMMENT \"Rerunning cmake after building Caffe submodule\")\n endif (NOT Caffe_FOUND)\n\n endif (BUILD_CAFFE)\n\n if (NOT Caffe_FOUND AND NOT BUILD_CAFFE)\n message(FATAL_ERROR \"Caffe not found. Either turn on the BUILD_CAFFE option or specify the path of Caffe includes\n and libs using -DCaffe_INCLUDE_DIRS and -DCaffe_LIBS.\")\n endif (NOT Caffe_FOUND AND NOT BUILD_CAFFE)\n\n endif (${DL_FRAMEWORK} MATCHES \"CAFFE\")\n\nendif (UNIX OR APPLE)\n\n### PROJECT INCLUDES\n# Specify the include directories\ninclude_directories(\n include\n ${Protobuf_INCLUDE_DIRS}\n ${GFLAGS_INCLUDE_DIR}\n ${GLOG_INCLUDE_DIR}\n ${OpenCV_INCLUDE_DIRS})\n\nif (USE_ASIO)\n include_directories(${CMAKE_SOURCE_DIR}/3rdparty/asio/include/)\n # OpenPose flags\n add_definitions(-DUSE_ASIO)\n # Tell Asio it is not using Boost\n add_definitions(-DASIO_STANDALONE)\nendif (USE_ASIO)\n\n# Calibration\nif (NOT ${WITH_EIGEN} MATCHES \"NONE\")\n include_directories(\n ${EIGEN3_INCLUDE_DIRS})\nendif (NOT ${WITH_EIGEN} MATCHES \"NONE\")\n\nif (APPLE)\n include_directories(\n \"/usr/local/opt/openblas/include\")\nendif (APPLE)\n\nif (USE_MKL)\n include_directories(\n \"${MKL_PATH}/include/\")\nendif (USE_MKL)\n\nif (Caffe_FOUND)\n include_directories(\n ${Caffe_INCLUDE_DIRS})\nendif (Caffe_FOUND)\n\nif (${GPU_MODE} MATCHES \"CUDA\")\n include_directories(\n ${CUDA_INCLUDE_DIRS})\nelseif (${GPU_MODE} MATCHES \"OPENCL\")\n include_directories(\n ${OpenCL_INCLUDE_DIRS})\nendif (${GPU_MODE} MATCHES \"CUDA\")\n# 3D\nif (WITH_3D_RENDERER)\n include_directories(${GLUT_INCLUDE_DIRS})\nendif (WITH_3D_RENDERER)\nif (WITH_CERES)\n include_directories(${CERES_INCLUDE_DIRS})\nendif (WITH_CERES)\nif (WITH_FLIR_CAMERA)\n include_directories(SYSTEM ${SPINNAKER_INCLUDE_DIRS}) # To remove its warnings, equiv. to -isystem\nendif (WITH_FLIR_CAMERA)\nif (WITH_3D_ADAM_MODEL)\n include_directories(include/adam) # TODO: TEMPORARY - TO BE REMOVED IN THE FUTURE\n include_directories(${CERES_INCLUDE_DIRS})\n include_directories(${EIGEN3_INCLUDE_DIRS})\n include_directories(${IGL_INCLUDE_DIRS})\n include_directories(${LIBIGL_INCLUDE_DIRS})\n include_directories(${GLUT_INCLUDE_DIRS} ${GLEW_INCLUDE_DIRS} ${OPENGL_INCLUDE_DIR})\nendif (WITH_3D_ADAM_MODEL)\n# Windows includes\nif (WIN32)\n include_directories(\n ${Boost_INCLUDE_DIRS}\n ${WINDOWS_INCLUDE_DIRS})\nendif (WIN32)\n\n\n### COLLECT ALL 3RD-PARTY LIBRARIES TO BE LINKED AGAINST\nset(OpenPose_3rdparty_libraries ${OpenCV_LIBS} ${GLOG_LIBRARY})\nif (UNIX OR APPLE)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${GLOG_LIBRARY})\nelseif (WIN32)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}\n debug ${GFLAGS_LIBRARY_DEBUG} optimized ${GFLAGS_LIBRARY_RELEASE}\n debug ${GLOG_LIBRARY_DEBUG} optimized ${GLOG_LIBRARY_RELEASE})\nendif (UNIX OR APPLE)\n# Deep net Framework\nif (${DL_FRAMEWORK} MATCHES \"CAFFE\")\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${Caffe_LIBS} ${GFLAGS_LIBRARY})\nendif (${DL_FRAMEWORK} MATCHES \"CAFFE\")\n# CPU vs. GPU\nif (USE_MKL)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${MKL_LIBS})\nendif (USE_MKL)\nif (${GPU_MODE} MATCHES \"OPENCL\")\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${CMAKE_THREAD_LIBS_INIT} ${OpenCL_LIBRARIES})\nendif (${GPU_MODE} MATCHES \"OPENCL\")\n# Boost\nif (WIN32)\n if (${GPU_MODE} MATCHES \"CPU_ONLY\" OR ${GPU_MODE} MATCHES \"OPENCL\" OR BUILD_PYTHON)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}\n debug ${BOOST_SYSTEM_LIB_RELEASE} optimized ${BOOST_SYSTEM_LIB_RELEASE})\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}\n debug ${BOOST_FILESYSTEM_LIB_RELEASE} optimized ${BOOST_FILESYSTEM_LIB_RELEASE})\n endif ()\nendif (WIN32)\n# 3-D\nif (WITH_3D_ADAM_MODEL)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}\n ${OPENGL_LIBRARIES} ${GLUT_LIBRARY} ${GLEW_LIBRARY} ${FREE_IMAGE_LIBRARY})\nendif (WITH_3D_ADAM_MODEL)\nif (WITH_3D_RENDERER)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${GLUT_LIBRARY} ${OPENGL_LIBRARIES})\nendif (WITH_3D_RENDERER)\nif (WITH_CERES)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${CERES_LIBRARIES})\nendif (WITH_CERES)\nif (WITH_FLIR_CAMERA)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${SPINNAKER_LIB})\nendif (WITH_FLIR_CAMERA)\n# Pthread\nif (UNIX OR APPLE)\n set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} pthread)\nendif (UNIX OR APPLE)\n\nset(examples_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${GFLAGS_LIBRARY})\n\n\n### ADD SUBDIRECTORIES\nif (Caffe_FOUND)\n add_subdirectory(src)\n if (BUILD_EXAMPLES)\n add_subdirectory(examples)\n endif (BUILD_EXAMPLES)\nendif (Caffe_FOUND)\n\n\n### DOWNLOAD MODELS\n# Download the models if flag is set\nmessage(STATUS \"Download the models.\")\n\n# URL to the models\nset(OPENPOSE_URL \"http://posefs1.perception.cs.cmu.edu/OpenPose/models/\")\n\ndownload_model(\"BODY_25\" ${DOWNLOAD_BODY_25_MODEL} pose/body_25/pose_iter_584000.caffemodel\n 78287B57CF85FA89C03F1393D368E5B7) # Body (BODY_25)\ndownload_model(\"body (COCO)\" ${DOWNLOAD_BODY_COCO_MODEL} pose/coco/pose_iter_440000.caffemodel\n 5156d31f670511fce9b4e28b403f2939) # Body (COCO)\ndownload_model(\"body (MPI)\" ${DOWNLOAD_BODY_MPI_MODEL} pose/mpi/pose_iter_160000.caffemodel\n 2ca0990c7562bd7ae03f3f54afa96e00) # Body (MPI)\ndownload_model(\"face\" ${DOWNLOAD_FACE_MODEL} face/pose_iter_116000.caffemodel\n e747180d728fa4e4418c465828384333) # Face\ndownload_model(\"hand\" ${DOWNLOAD_HAND_MODEL} hand/pose_iter_102000.caffemodel\n a82cfc3fea7c62f159e11bd3674c1531) # Hand\n\nmessage(STATUS \"Models Downloaded.\")\n\n\n### PYTHON\nif (BUILD_PYTHON)\n if (WIN32)\n execute_process(COMMAND cmd /c cd ${CMAKE_SOURCE_DIR} & git submodule update --init 3rdparty/pybind11/)\n add_subdirectory(3rdparty/pybind11)\n add_subdirectory(python)\n elseif (UNIX OR APPLE)\n if (Caffe_FOUND)\n execute_process(COMMAND git submodule update --init ${CMAKE_SOURCE_DIR}/3rdparty/pybind11/)\n # execute_process(COMMAND git submodule update --init --recursive --remote) # This would initialize them all\n add_subdirectory(3rdparty/pybind11)\n add_subdirectory(python)\n endif (Caffe_FOUND)\n else (WIN32)\n message(FATAL_ERROR \"Unknown OS.\")\n endif (WIN32)\nendif (BUILD_PYTHON)\n\n\n### GENERATE DOCUMENTATION\nif (UNIX OR APPLE)\n\n if (BUILD_DOCS)\n find_package(Doxygen)\n if (DOXYGEN_FOUND)\n # Set input and output files\n set(DOXYGEN_FILE ${CMAKE_SOURCE_DIR}/doc/doc_autogeneration.doxygen)\n\n # Custom target to build the documentation\n add_custom_target(doc_doxygen ALL\n COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_FILE}\n WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/doc\n COMMENT \"Generating API documentation with Doxygen\"\n VERBATIM)\n else (DOXYGEN_FOUND)\n message(FATAL_ERROR \"Doxygen needs to be installed to generate the doxygen documentation.\")\n endif (DOXYGEN_FOUND)\n endif (BUILD_DOCS)\n\nendif (UNIX OR APPLE)\n\n\n### INSTALL\nif (UNIX OR APPLE)\n if (Caffe_FOUND)\n # Install the headers\n install(DIRECTORY ${CMAKE_SOURCE_DIR}/include/openpose DESTINATION include)\n install(EXPORT OpenPose DESTINATION lib/OpenPose)\n if (BUILD_CAFFE)\n install(DIRECTORY ${CMAKE_BINARY_DIR}/caffe/include/caffe DESTINATION include)\n install(DIRECTORY ${CMAKE_BINARY_DIR}/caffe/lib/ DESTINATION lib)\n endif (BUILD_CAFFE)\n\n # Compute installation prefix relative to this file\n configure_file(\n ${CMAKE_SOURCE_DIR}/cmake/OpenPoseConfig.cmake.in\n ${CMAKE_BINARY_DIR}/cmake/OpenPoseConfig.cmake @ONLY)\n\n install(FILES ${CMAKE_BINARY_DIR}/cmake/OpenPoseConfig.cmake\n DESTINATION lib/OpenPose)\n\n # Uninstall target\n configure_file(\n \"${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in\"\n \"${CMAKE_BINARY_DIR}/cmake_uninstall.cmake\"\n IMMEDIATE @ONLY)\n\n add_custom_target(uninstall\n COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)\n endif (Caffe_FOUND)\nendif (UNIX OR APPLE)" }, { "alpha_fraction": 0.5263484120368958, "alphanum_fraction": 0.5368877649307251, "avg_line_length": 32.25773239135742, "blob_id": "2ec041acc89f14cf0983eb97fe19687da236b38b", "content_id": "8ab7022563e04ae840bd5dde239f9471bceaf47a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3278, "license_type": "no_license", "max_line_length": 102, "num_lines": 97, "path": "/dataSet/face++人脸检测.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "from sklearn.utils import shuffle\nimport requests\nimport cv2 as cv\nimport numpy as np\nimport base64\nimport glob\nimport json\n\n\ndef cutImage(img, x, y, w, h):\n return img[y:y+h, x:x+w]\n\n\nclass API():\n def __init__(self):\n\n self._data = {\"api_key\": \"-em_2KoIyvcsANQ_Lb3lx6XLK1TzYMh8\",\n \"api_secret\": \"Am6aeBkZ4k6xMGLbS9XK7nC07LZvSjPu\"}\n\n self._urls = {'HumanBody_Skeleton': 'https://api-cn.faceplusplus.com/humanbodypp/v1/skeleton',\n 'HumanBody_Detect': 'https://api-cn.faceplusplus.com/humanbodypp/v1/detect',\n 'HumanBody_Segment': 'https://api-cn.faceplusplus.com/humanbodypp/v2/segment',\n 'Face_Detect': 'https://api-cn.faceplusplus.com/facepp/v3/detect',\n 'Face_Compare': 'https://api-cn.faceplusplus.com/facepp/v3/compare'}\n\n # moudle [in] 功能 more_return [in] 增加请求可选项,2纬数组,分别表示key,value\n def request(self, moudle, image=None, filePath=None, more_return=None):\n if np.all(image == None) and filePath == None:\n return\n if np.all(image == None) == None:\n image = cv.imread(filePath)\n if more_return != None:\n self._data[more_return[0]] = more_return[1]\n\n buffer = cv.imencode('.jpg', image)\n files = {\"image_file\": buffer[1]}\n url = self._urls[moudle]\n\n # 发送post请求\n print('send post\\n')\n response = requests.post(url, self._data, files=files)\n print('get response\\n')\n req_con = response.content.decode('utf-8')\n print(req_con)\n\n if moudle == 'Face_Detect':\n return self.Face_Detect(req_con)\n\n def Face_Detect(self, req_con):\n rects = []\n req_json = json.loads(req_con)\n faces = req_json['faces']\n for face in faces:\n rect = {}\n face_rectangle = face['face_rectangle']\n rect['width'] = face_rectangle['width']\n rect['top'] = face_rectangle['top']\n rect['left'] = face_rectangle['left']\n rect['height'] = face_rectangle['height']\n rects.append(rect)\n return rects\n\n\ndef main():\n paths = '/Volumes/Seagate Backup Plus Drive/义乌拍摄/3/**.jpg'\n paths = glob.glob(paths)\n api = API()\n for path in paths:\n img = cv.imread(path)\n shape = img.shape\n img = cv.resize(img, ((int)(shape[1]/2), (int)\n (shape[0]/2)), interpolation=cv.INTER_LINEAR)\n rects = api.request('Face_Detect', image=img)\n p1 = path.rfind('/')\n p2 = path.rfind('.')\n num = path[p1+1:p2]\n facepath = './set/study/face/' + num + '.jpg'\n for i in range(len(rects)):\n rect = rects[i]\n\n cut_img = cutImage(\n img, rect['left'], rect['top'], rect['width'], rect['height'])\n cv.imshow('cut', cut_img)\n\n imgCopy = np.zeros(shape=img.shape, dtype=np.uint8)\n imgCopy = img.copy()\n cv.rectangle(imgCopy, (rect['left'], rect['top']), (\n rect['left']+rect['width'], rect['top']+rect['height']), [255, 100, 100], 1)\n\n cv.imshow('face', imgCopy)\n\n cv.imwrite(facepath, cut_img)\n cv.waitKey()\n del imgCopy\n\n\nmain()\n" }, { "alpha_fraction": 0.5626319646835327, "alphanum_fraction": 0.6111893057823181, "avg_line_length": 44.54166793823242, "blob_id": "2115adf14009cdc9886ef16c51aa5d313ee06be3", "content_id": "3062b328d4f165bf2f24fe915559cca5e5197192", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14374, "license_type": "no_license", "max_line_length": 88, "num_lines": 312, "path": "/attitude/train_Inception.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "\n'''\n 由于训练模型的大小超过了组委会对提交材料的要求,所以没有办法提交模型。\n\n 所以提交模型构建代码\n'''\nimport tensorflow as tf\nimport numpy as np\nimport dataset\nimport NetTool\nimport random\nimport parameter\n\nfilePath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/tset/1']\ntxtPath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/tset/body']\n\nclasses = ['right_sleep', 'right_play_telephone', 'right_study',\n 'left_sleep', 'left_play_telephone', 'left_study',\n 'center_sleep', 'center_play_telephone', 'center_study']\nclass_num = 3\n\nimgSize = parameter.imgSize\nX = tf.placeholder('float', shape=[None, imgSize, imgSize, 3], name='X')\nY = tf.placeholder('float', shape=[None, class_num], name='Y')\n\nfilter1_size = 7\nfilter1_num = 64\n\nfilter2_size = 1\nfilter2_num = 64\n\nfilter3_size = 3\nfilter3_num = 192\n\nfilter1x1_size = 1\n\n\nconv_layer1 = NetTool.create_convolution_layer( # 7x7 64 2 3x3 64 2\n X, filter1_size, filter1_num, stride_f=2, ksize=3, stride_m=2)\n\nconv_layer2 = NetTool.create_convolution_layer( # 1x1 64 1\n conv_layer1, filter2_size, filter2_num, use_MaxPool=False)\n\nconv_layer3 = NetTool.create_convolution_layer( # 3x3 192 1 3x3 192 1\n conv_layer2, filter3_size, filter3_num, stride_f=1, stride_m=2, ksize=3)\n\n# inception1\n########################################################################\ninception1_conv_a = NetTool.create_convolution_layer(\n conv_layer3, filter1x1_size, 64, use_MaxPool=False, stride_f=1)\n\ninception1_conv_b = NetTool.create_convolution_layer(\n conv_layer3, filter1x1_size, 96, use_MaxPool=False, stride_f=1)\ninception1_conv_b = NetTool.create_convolution_layer(\n inception1_conv_b, 3, 128, use_MaxPool=False, stride_f=1)\n\ninception1_conv_c = NetTool.create_convolution_layer(\n conv_layer3, filter1x1_size, 16, use_MaxPool=False, stride_f=1)\ninception1_conv_c = NetTool.create_convolution_layer(\n inception1_conv_c, 5, 32, use_MaxPool=False, stride_f=1)\n\ninception1_conv_d = tf.nn.max_pool(conv_layer3, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception1_conv_d = NetTool.create_convolution_layer(\n inception1_conv_d, 1, 32, stride_f=1, use_MaxPool=False)\n\ninception1_concat = tf.concat([inception1_conv_a, inception1_conv_b,\n inception1_conv_c, inception1_conv_d], 3, name='concat1')\n#######################################################################\n\n# inception2\n#######################################################################\ninception2_conv_a = NetTool.create_convolution_layer(\n inception1_concat, filter1x1_size, 128, use_MaxPool=False, stride_f=1)\n\ninception2_conv_b = NetTool.create_convolution_layer(\n inception1_concat, filter1x1_size, 128, use_MaxPool=False, stride_f=1)\ninception2_conv_b = NetTool.create_convolution_layer(\n inception2_conv_b, 3, 192, use_MaxPool=False, stride_f=1)\n\ninception2_conv_c = NetTool.create_convolution_layer(\n inception1_concat, filter1x1_size, 32, use_MaxPool=False, stride_f=1)\ninception2_conv_c = NetTool.create_convolution_layer(\n inception2_conv_c, 5, 96, use_MaxPool=False, stride_f=1)\n\ninception2_conv_d = tf.nn.max_pool(inception1_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception2_conv_d = NetTool.create_convolution_layer(\n inception2_conv_d, 1, 64, stride_f=1, use_MaxPool=False)\n\ninception2_concat = tf.concat([inception2_conv_a, inception2_conv_b,\n inception2_conv_c, inception2_conv_d], 3, name='concat2')\n\ninception2_concat = tf.nn.max_pool(inception2_concat, ksize=[\n 1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n#######################################################################\n\n# inception3\n#######################################################################\ninception3_conv_a = NetTool.create_convolution_layer(\n inception2_concat, filter1x1_size, 192, use_MaxPool=False, stride_f=1)\n\ninception3_conv_b = NetTool.create_convolution_layer(\n inception2_concat, filter1x1_size, 96, use_MaxPool=False, stride_f=1)\ninception3_conv_b = NetTool.create_convolution_layer(\n inception3_conv_b, 3, 208, use_MaxPool=False, stride_f=1)\n\ninception3_conv_c = NetTool.create_convolution_layer(\n inception2_concat, filter1x1_size, 16, use_MaxPool=False, stride_f=1)\ninception3_conv_c = NetTool.create_convolution_layer(\n inception3_conv_c, 5, 48, use_MaxPool=False, stride_f=1)\n\ninception3_conv_d = tf.nn.max_pool(inception2_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception3_conv_d = NetTool.create_convolution_layer(\n inception3_conv_d, 1, 64, stride_f=1, use_MaxPool=False)\n\ninception3_concat = tf.concat([inception3_conv_a, inception3_conv_b,\n inception3_conv_c, inception3_conv_d], 3, name='concat3')\n#######################################################################\n\n# inception4\n#######################################################################\ninception4_conv_a = NetTool.create_convolution_layer(\n inception3_concat, filter1x1_size, 160, use_MaxPool=False, stride_f=1)\n\ninception4_conv_b = NetTool.create_convolution_layer(\n inception3_concat, filter1x1_size, 112, use_MaxPool=False, stride_f=1)\ninception4_conv_b = NetTool.create_convolution_layer(\n inception4_conv_b, 3, 224, use_MaxPool=False, stride_f=1)\n\ninception4_conv_c = NetTool.create_convolution_layer(\n inception3_concat, filter1x1_size, 24, use_MaxPool=False, stride_f=1)\ninception4_conv_c = NetTool.create_convolution_layer(\n inception4_conv_c, 5, 64, use_MaxPool=False, stride_f=1)\n\ninception4_conv_d = tf.nn.max_pool(inception3_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception4_conv_d = NetTool.create_convolution_layer(\n inception4_conv_d, 1, 64, stride_f=1, use_MaxPool=False)\n\ninception4_concat = tf.concat([inception4_conv_a, inception4_conv_b,\n inception4_conv_c, inception4_conv_d], 3, name='concat4')\n#######################################################################\n\n# inception5\n#######################################################################\ninception5_conv_a = NetTool.create_convolution_layer(\n inception4_concat, filter1x1_size, 128, use_MaxPool=False, stride_f=1)\n\ninception5_conv_b = NetTool.create_convolution_layer(\n inception4_concat, filter1x1_size, 128, use_MaxPool=False, stride_f=1)\ninception5_conv_b = NetTool.create_convolution_layer(\n inception5_conv_b, 3, 256, use_MaxPool=False, stride_f=1)\n\ninception5_conv_c = NetTool.create_convolution_layer(\n inception4_concat, filter1x1_size, 24, use_MaxPool=False, stride_f=1)\ninception5_conv_c = NetTool.create_convolution_layer(\n inception5_conv_c, 5, 64, use_MaxPool=False, stride_f=1)\n\ninception5_conv_d = tf.nn.max_pool(inception4_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception5_conv_d = NetTool.create_convolution_layer(\n inception5_conv_d, 1, 64, stride_f=1, use_MaxPool=False)\n\ninception5_concat = tf.concat([inception5_conv_a, inception5_conv_b,\n inception5_conv_c, inception5_conv_d], 3, name='concat5')\n#######################################################################\n\n# inception6\n#######################################################################\ninception6_conv_a = NetTool.create_convolution_layer(\n inception5_concat, filter1x1_size, 112, use_MaxPool=False, stride_f=1)\n\ninception6_conv_b = NetTool.create_convolution_layer(\n inception5_concat, filter1x1_size, 144, use_MaxPool=False, stride_f=1)\ninception6_conv_b = NetTool.create_convolution_layer(\n inception6_conv_b, 3, 288, use_MaxPool=False, stride_f=1)\n\ninception6_conv_c = NetTool.create_convolution_layer(\n inception5_concat, filter1x1_size, 32, use_MaxPool=False, stride_f=1)\ninception6_conv_c = NetTool.create_convolution_layer(\n inception6_conv_c, 5, 64, use_MaxPool=False, stride_f=1)\n\ninception6_conv_d = tf.nn.max_pool(inception5_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception6_conv_d = NetTool.create_convolution_layer(\n inception6_conv_d, 1, 64, stride_f=1, use_MaxPool=False)\n\ninception6_concat = tf.concat([inception6_conv_a, inception6_conv_b,\n inception6_conv_c, inception6_conv_d], 3, name='concat6')\n#######################################################################\n\n# inception7\n#######################################################################\ninception7_conv_a = NetTool.create_convolution_layer(\n inception6_concat, filter1x1_size, 256, use_MaxPool=False, stride_f=1)\n\ninception7_conv_b = NetTool.create_convolution_layer(\n inception6_concat, filter1x1_size, 160, use_MaxPool=False, stride_f=1)\ninception7_conv_b = NetTool.create_convolution_layer(\n inception7_conv_b, 3, 320, use_MaxPool=False, stride_f=1)\n\ninception7_conv_c = NetTool.create_convolution_layer(\n inception6_concat, filter1x1_size, 32, use_MaxPool=False, stride_f=1)\ninception7_conv_c = NetTool.create_convolution_layer(\n inception7_conv_c, 5, 128, use_MaxPool=False, stride_f=1)\n\ninception7_conv_d = tf.nn.max_pool(inception6_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception7_conv_d = NetTool.create_convolution_layer(\n inception7_conv_d, 1, 128, stride_f=1, use_MaxPool=False)\n\ninception7_concat = tf.concat([inception7_conv_a, inception7_conv_b,\n inception7_conv_c, inception7_conv_d], 3, name='concat7')\n\ninception7_concat = tf.nn.max_pool(inception7_concat, ksize=[\n 1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n#######################################################################\n\n# inception8\n#######################################################################\ninception8_conv_a = NetTool.create_convolution_layer(\n inception7_concat, filter1x1_size, 256, use_MaxPool=False, stride_f=1)\n\ninception8_conv_b = NetTool.create_convolution_layer(\n inception7_concat, filter1x1_size, 160, use_MaxPool=False, stride_f=1)\ninception8_conv_b = NetTool.create_convolution_layer(\n inception8_conv_b, 3, 320, use_MaxPool=False, stride_f=1)\n\ninception8_conv_c = NetTool.create_convolution_layer(\n inception7_concat, filter1x1_size, 32, use_MaxPool=False, stride_f=1)\ninception8_conv_c = NetTool.create_convolution_layer(\n inception8_conv_c, 5, 128, use_MaxPool=False, stride_f=1)\n\ninception8_conv_d = tf.nn.max_pool(inception7_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception8_conv_d = NetTool.create_convolution_layer(\n inception8_conv_d, 1, 128, stride_f=1, use_MaxPool=False)\n\ninception8_concat = tf.concat([inception8_conv_a, inception8_conv_b,\n inception8_conv_c, inception8_conv_d], 3, name='concat8')\n#######################################################################\n\n# inception9\n#######################################################################\ninception9_conv_a = NetTool.create_convolution_layer(\n inception8_concat, filter1x1_size, 384, use_MaxPool=False, stride_f=1)\n\ninception9_conv_b = NetTool.create_convolution_layer(\n inception8_concat, filter1x1_size, 192, use_MaxPool=False, stride_f=1)\ninception9_conv_b = NetTool.create_convolution_layer(\n inception9_conv_b, 3, 384, use_MaxPool=False, stride_f=1)\n\ninception9_conv_c = NetTool.create_convolution_layer(\n inception8_concat, filter1x1_size, 48, use_MaxPool=False, stride_f=1)\ninception9_conv_c = NetTool.create_convolution_layer(\n inception9_conv_c, 5, 128, use_MaxPool=False, stride_f=1)\n\ninception9_conv_d = tf.nn.max_pool(inception8_concat, ksize=[1, 3, 3, 1], strides=[\n 1, 1, 1, 1], padding='SAME')\ninception9_conv_d = NetTool.create_convolution_layer(\n inception9_conv_d, 1, 128, stride_f=1, use_MaxPool=False)\n\ninception9_concat = tf.concat([inception9_conv_a, inception9_conv_b,\n inception9_conv_c, inception9_conv_d], 3, name='concat9')\n\ninception9_concat = tf.nn.avg_pool(inception9_concat, ksize=[\n 1, 7, 7, 1], strides=[1, 1, 1, 1], padding='SAME')\n#######################################################################\n\n# Fully Connected\n#######################################################################\nflatten_layer = NetTool.create_flatten_layer(inception9_concat)\nfc_input_size = flatten_layer.get_shape()[1:4].num_elements()\nfc_layer = NetTool.create_fc_layer(flatten_layer, [fc_input_size, 1000], 0.4)\nout_layer = NetTool.create_fc_layer(fc_layer, [1000, 3], 0.4, use_relu=False)\n\npred_Y = tf.nn.softmax(out_layer, name='pred_Y')\n\nloss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=pred_Y))\noptimizer = tf.train.AdamOptimizer().minimize(loss) # learning_rate=0.0001\n\ntemp = tf.equal(tf.arg_max(pred_Y, 1), tf.arg_max(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(temp, tf.float32))\n\n\nprint('开始加载训练数据集')\ntrainSet = dataset.dataSet(filePath, classes, way='txt', txtPath=txtPath)\nprint('开始加载测试数据集')\ntxtFilePath = '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/body1'\ntestSet = dataset.dataSet(txtFilePath, classes, way='image')\nprint('数据集加载完成')\n\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in range(10001):\n batchX, batchY, _ = trainSet.next_batch(32)\n sess.run(optimizer, feed_dict={X: batchX, Y: batchY})\n if i % 25 == 0:\n _, train_ac = sess.run([optimizer, accuracy], feed_dict={\n X: batchX, Y: batchY})\n batchX, batchY, _ = testSet.next_batch(5)\n a, _ = sess.run([accuracy, optimizer],\n feed_dict={X: batchX, Y: batchY})\n print(i, '\\ttrain_accuracy:\\t',\n train_ac, '\\ttest_accuracy:\\t', a)\n if i % 1000 == 0 and i != 0:\n saver.save(sess, './model/body.ckpt',\n global_step=i)\n" }, { "alpha_fraction": 0.6151220202445984, "alphanum_fraction": 0.6789554357528687, "avg_line_length": 32.86705017089844, "blob_id": "84adcb55d59f1be54e8b0526a73dd97f32e18a86", "content_id": "235e9d4ec0169a14d7245db321e514d6ed6d58d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6085, "license_type": "no_license", "max_line_length": 89, "num_lines": 173, "path": "/attitude/train.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "import dataset\nimport tensorflow as tf\nimport numpy as np\nimport cv2 as cv\nimport NetTool\nimport parameter\n\nfilePath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/3',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/4',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/5'\n ]\n\ntxtPath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/body2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body3',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/body1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/body2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/body4',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/body5'\n ]\n\n\nclasses = ['right_sleep', 'right_play_telephone', 'right_study',\n 'left_sleep', 'left_play_telephone', 'left_study',\n 'center_sleep', 'center_play_telephone', 'center_study']\n\n\nimgSize = parameter.imgSize\nclass_num = 3\n\nfilter1_size = 3\nfilter1_num = 64\n\nfilter2_size = 3\nfilter2_num = 64\n\nfilter3_size = 3\nfilter3_num = 128\n\nfilter4_size = 3\nfilter4_num = 128\n\nfilter5_size = 3\nfilter5_num = 256\n\nfilter6_size = 3\nfilter6_num = 256\n\nfilter7_size = 1\nfilter7_num = 256\n\nfilter8_size = 3\nfilter8_num = 512\n\nfilter9_size = 3\nfilter9_num = 512\n\nfilter10_size = 1\nfilter10_num = 512\n\nfilter11_size = 3\nfilter11_num = 512\n\nfilter12_size = 3\nfilter12_num = 512\n\nfilter13_size = 1\nfilter13_num = 512\n\nfc1_size = 4096\nfc2_size = 4096\nfc3_size = 1000\n\nkeep_prob = 0.7\nbatchSize = 64\n\nX = tf.placeholder('float', shape=[None, imgSize, imgSize, 3], name='X')\nY = tf.placeholder('float', shape=[\n None, class_num], name='Y')\n\n\nconvolution_layer1 = NetTool.create_convolution_layer(\n X, filter1_size, filter1_num)\nconvolution_layer2 = NetTool.create_convolution_layer(\n convolution_layer1, filter2_size, filter2_num, True)\n\n\nconvolution_layer3 = NetTool.create_convolution_layer(\n convolution_layer2, filter3_size, filter3_num)\nconvolution_layer4 = NetTool.create_convolution_layer(\n convolution_layer3, filter4_size, filter4_num, True)\n\n\nconvolution_layer5 = NetTool.create_convolution_layer(\n convolution_layer4, filter5_size, filter5_num)\nconvolution_layer6 = NetTool.create_convolution_layer(\n convolution_layer5, filter6_size, filter6_num)\nconvolution_layer7 = NetTool.create_convolution_layer(\n convolution_layer6, filter7_size, filter7_num, True)\n\n\nconvolution_layer8 = NetTool.create_convolution_layer(\n convolution_layer7, filter8_size, filter8_num)\nconvolution_layer9 = NetTool.create_convolution_layer(\n convolution_layer8, filter9_size, filter9_num)\nconvolution_layer10 = NetTool.create_convolution_layer(\n convolution_layer9, filter10_size, filter10_num, True)\n\n\nconvolution_layer11 = NetTool.create_convolution_layer(\n convolution_layer10, filter11_size, filter11_num)\nconvolution_layer12 = NetTool.create_convolution_layer(\n convolution_layer11, filter12_size, filter12_num)\nconvolution_layer13 = NetTool.create_convolution_layer(\n convolution_layer12, filter13_size, filter13_num, True)\n\n\nflatten_layer = NetTool.create_flatten_layer(convolution_layer13)\n\nfc1_input_size = flatten_layer.get_shape()[1:4].num_elements()\nfc1_layer = NetTool.create_fc_layer(\n flatten_layer, [fc1_input_size, fc1_size], keep_prob)\n\nfc2_input_size = fc1_layer.get_shape()[1:4].num_elements()\nfc2_layer = NetTool.create_fc_layer(\n fc1_layer, [fc2_input_size, fc2_size], keep_prob)\n\nfc3_input_size = fc2_layer.get_shape()[1:4].num_elements()\nfc3_layer = NetTool.create_fc_layer(\n fc2_layer, [fc3_input_size, fc3_size], keep_prob)\n\nout_layer = NetTool.create_fc_layer(\n fc3_layer, [fc3_size, class_num], keep_prob, use_relu=False)\n\npred_Y = tf.nn.softmax(out_layer, name='pred_Y')\n\nloss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=pred_Y))\noptimizer = tf.train.AdamOptimizer().minimize(loss) # learning_rate=0.0001\n\ntemp = tf.equal(tf.arg_max(pred_Y, 1), tf.arg_max(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(temp, tf.float32))\nprint('开始加载训练数据集')\ntrainSet = dataset.dataSet(filePath, classes, way='txt', txtPath=txtPath)\nprint('开始加载测试数据集')\ntxtFilePath = '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/body1'\ntestSet = dataset.dataSet(txtFilePath, classes, way='image')\nprint('数据集加载完成')\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for i in range(10001):\n batchX, batchY, _ = trainSet.next_batch(batchSize)\n # print(type(batchX))\n sess.run([optimizer], feed_dict={X: batchX, Y: batchY})\n if i % 25 == 0:\n _, train_ac = sess.run([optimizer, accuracy], feed_dict={\n X: batchX, Y: batchY})\n batchX, batchY, _ = testSet.next_batch(batchSize)\n a, _ = sess.run([accuracy, optimizer],\n feed_dict={X: batchX, Y: batchY})\n print(i, '\\ttrain_accuracy:\\t',\n train_ac, '\\ttest_accuracy:\\t', a)\n if i % 1000 == 0 and i != 0:\n saver.save(sess, './model/body.ckpt',\n global_step=i)\n" }, { "alpha_fraction": 0.5333774089813232, "alphanum_fraction": 0.5611368417739868, "avg_line_length": 28.66666603088379, "blob_id": "f844ed1939fc07d731bde586355dca8a12578e1d", "content_id": "eccef192e580d90e6b4aefa7a5e7171c6fbc2e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1715, "license_type": "no_license", "max_line_length": 120, "num_lines": 51, "path": "/dataSet/opencv人脸检测.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "import cv2 as cv\nimport sys\nfrom PIL import Image\n\nfrom sklearn.utils import shuffle\nimport requests\nimport numpy as np\nimport base64\nimport glob\nimport json\n\ndef CatchUsbVideo(window_name):\n cv.namedWindow(window_name)\n\n #告诉OpenCV使用人脸识别分类器\n classfier = cv.CascadeClassifier(\"/usr/local/lib/python3.7/site-packages/cv2/data/haarcascade_frontalface_alt2.xml\")\n \n #识别出人脸后要画的边框的颜色,RGB格式\n color = (255, 255, 0)\n \n paths = '/Volumes/Seagate Backup Plus Drive/义乌拍摄/1/**.jpg'\n paths = glob.glob(paths)\n shuffle(paths)\n \n for path in paths:\n img = cv.imread(path)\n \n #将当前帧转换成灰度图像\n grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY) \n \n #人脸检测,1.2和2分别为图片缩放比例和需要检测的有效点数\n faceRects = classfier.detectMultiScale(grey, scaleFactor = 1.2, minNeighbors = 3, minSize = (32, 32))\n if len(faceRects) > 0: #大于0则检测到人脸 \n for faceRect in faceRects: #单独框出每一张人脸\n x, y, w, h = faceRect \n cv.rectangle(img, (x - 10, y - 10), (x + w + 10, y + h + 10), color, 2)\n \n #显示图像\n shape = img.shape\n img = cv.resize(img,((int)(shape[1]/2),(int)(shape[0]/2)),interpolation=cv.INTER_NEAREST)\n cv.imshow(window_name, img) \n c = cv.waitKey()\n if c & 0xFF == ord('q'):\n break \n \n #销毁所有窗口\n cv.destroyAllWindows() \n \ndef main():\n CatchUsbVideo(\"识别人脸区域\")\nmain()\n" }, { "alpha_fraction": 0.7132089138031006, "alphanum_fraction": 0.7167983055114746, "avg_line_length": 23.309091567993164, "blob_id": "4907c84b6d9c5ec34ad3b02de5999d745198d91d", "content_id": "04599d3668a2c46a188348d0b92f968c5ebdf9c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2792, "license_type": "no_license", "max_line_length": 90, "num_lines": 110, "path": "/FaceCompare/FaceCompare/RollCall.h", "repo_name": "763483718/service-outsource", "src_encoding": "WINDOWS-1252", "text": "#pragma once\r\n#include \"tool.h\" \r\n#include <stdlib.h>\r\n#include <stdio.h>\r\n#include <windows.h>\r\n#include <string>\r\n#include <opencv2/highgui/highgui.hpp> \r\n#include <opencv2/imgproc/imgproc.hpp> \r\n#include <opencv2/core/core.hpp>\r\n#include \"cv.h\"\r\n#include \"config.h\"\r\n#include \"FaceEngine.h\"\r\n#include \"merror.h\"\r\n#include \"FileUtils.h\"\r\n#include \"inc\\Arc\\arcsoft_face_sdk.h\"\r\n#include <fstream>\r\n#include <list>\r\n#include <iostream>\r\n\r\nusing namespace std;\r\n\r\nclass Student\r\n{\r\npublic:\r\n\tstring name;\r\n\tASF_FaceFeature faceFeature;\r\n\r\npublic:\r\n\tStudent(string name, ASF_FaceFeature faceFeature)\r\n\t{\r\n\t\tthis->name = name;\r\n\t\tthis->faceFeature.featureSize = faceFeature.featureSize;\r\n\t\tthis->faceFeature.feature = (MByte *)malloc(faceFeature.featureSize);\r\n\t\tmemset(this->faceFeature.feature, 0, faceFeature.featureSize);\r\n\t\tmemcpy(this->faceFeature.feature, faceFeature.feature, faceFeature.featureSize);\r\n\t}\r\n\tStudent(string name) { this->name = name; }\r\n\r\n\tstring getName() { return name; }\r\n\tASF_FaceFeature* getFaceFeature() { return &faceFeature; }\r\n\r\n\tvoid setName(string name) { this->name = name; }\r\n\tvoid setFaceFeature(char* buff,int size) \r\n\t{\t\r\n\t\tfaceFeature.featureSize = size;\r\n\t\tfaceFeature.feature = (MByte *)malloc(size);\r\n\t\tmemset(faceFeature.feature, 0, size);\r\n\t\tmemcpy(faceFeature.feature, buff, size);\r\n\t}\r\n\r\n\r\n\tbool operator ==(const Student &student)\r\n\t{\r\n\t\treturn name == student.name;\r\n\t}\r\n};\r\n\r\n\r\nDWORD WINAPI ImageThread(LPVOID p);\r\nDWORD WINAPI DeteThread(LPVOID p);\r\nDWORD WINAPI FacePairMatchingThread(LPVOID p);\r\n\r\n\r\nclass RollCall\r\n{\r\nprivate:\r\n\tlist<Student> m_listOfStudent;\r\n\tlist<Student> m_listOfNotArraveStudent;\r\n\tlist<Student> m_listOfArraveStudent;\r\n\tlist<IplImage*> m_listOfImage;\r\n\tlist<ASF_FaceFeature> m_listOfFaceFeature;\r\n\tlist<char*> test;\r\n\r\n\tbool m_run = 0;\r\n\tlist<HANDLE> m_listOfImageHandle;\r\n\tlist<HANDLE> m_listOfDeteHandle;\r\n\tlist<HANDLE> m_listOfMatchHandle;\r\n\r\n\tHANDLE m_Image_Empty;\r\n\tHANDLE m_Image_Full;\r\n\tHANDLE m_FaceFeature_Full;\r\n\tHANDLE m_FaceFeature_Empty;\r\n\r\n\tHANDLE m_Image_Mutex; //»¥³âÁ¿\r\n\tHANDLE m_FaceInfo_Mutex;\r\n\r\n\tint ImageThreadImpl();\r\n\tint DeteThreadImpl();\r\n\tint FacePairMatchingThreadImpl();\r\n\r\npublic:\r\n\tRollCall(char* pathOfFeature = \"feature.txt\");\r\n\tint ReadFaceFeature(char* pathOfFeature);\r\n\tint Start(int numOfImageThread = 1, int numOfDeteThread = 1, int numOfCompareThread = 2);\r\n\tint AddFaceFeature(char* pathOfImage);\r\n\tint Terminate();\r\n\r\n\tfriend DWORD WINAPI ImageThread(LPVOID p);\r\n\tfriend DWORD WINAPI DeteThread(LPVOID p);\r\n\tfriend DWORD WINAPI FacePairMatchingThread(LPVOID p);\r\n};\r\n\r\nclass RollCallAdapter\r\n{\r\nprivate:\r\n\tRollCall* rollCall;\r\npublic:\r\n\tRollCallAdapter(RollCall* r) :rollCall(r) {};\r\n\tint Start(int numOfImageThread, int numOfDeteThread, int numOfCompareThread);\r\n};\r\n\r\n" }, { "alpha_fraction": 0.763302743434906, "alphanum_fraction": 0.7697247862815857, "avg_line_length": 38.44444274902344, "blob_id": "13127d97ec4ec61a123a3dde66f2d943aa9892ae", "content_id": "c69f9dffaccfbc7cf6769be942d094990c7d0eee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1142, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/FaceCompare/FaceCompare/FaceEngine.h", "repo_name": "763483718/service-outsource", "src_encoding": "GB18030", "text": "#pragma once\r\n\r\n#include \"stdafx.h\"\r\n#include \"merror.h\"\r\n#include \"arcsoft_face_sdk.h\"\r\n#include <opencv2\\opencv.hpp>\r\n\r\nclass FaceEngine\r\n{\r\npublic:\r\n\tFaceEngine();\r\n\t~FaceEngine();\r\n\tint InitEngine();//初始化\r\n\tint UnInitEngine();//反初始化\r\n\tint FaceDetection(ASF_MultiFaceInfo &detectedFaces, IplImage *img);//人脸检测\r\n\tint ExtractSingleFRFeature(ASF_SingleFaceInfo SingleDetectedFaces, ASF_FaceFeature &feature, IplImage *img);\r\n\tint ExtractFRFeature(ASF_MultiFaceInfo detectedFaces, ASF_FaceFeature &feature, IplImage *img, int i = 0);//提取特征值\r\n\tint FacePairMatching(MFloat &confidenceLevel, ASF_FaceFeature feature1,ASF_FaceFeature feature2);//人脸对比\r\n\r\n\tint FaceASFProcess(ASF_MultiFaceInfo detectedFaces, IplImage *img);//Process\r\n\tint AgeEstimation(ASF_MultiFaceInfo detectedFaces, IplImage *img,ASF_AgeInfo &ageInfo);//年龄\r\n\tint GenderEstimation(ASF_MultiFaceInfo detectedFaces, IplImage *img,ASF_GenderInfo &genderInfo);//性别\r\n\tint Face3DAngle(ASF_MultiFaceInfo detectedFaces, IplImage *img,ASF_Face3DAngle &angleInfo);//3D角度\r\n\tconst ASF_VERSION* GetVersion();\r\nprivate:\r\n\tMHandle handle;\r\n};" }, { "alpha_fraction": 0.5507614016532898, "alphanum_fraction": 0.5761421322822571, "avg_line_length": 17.549999237060547, "blob_id": "8697527dccf0b51fae3b0af3f3dca85295987f25", "content_id": "3e5525c284fdb61578633ba3d66b36693843bbae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 394, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/FaceCompare/FaceCompare/config.h", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "#pragma once\r\n\r\n#define sizeOfFeature 1032\r\n#define sizeOfName 1024\r\n\r\n#define SafeFree(p) { if ((p)) free(p); (p) = NULL; }\r\n#define SafeArrayDelete(p) { if ((p)) delete [] (p); (p) = NULL; }\r\n#define SafeDelete(p) { if ((p)) delete (p); (p) = NULL; }\r\n\r\n\r\n\r\n#ifdef _WIN32\r\n#define APPID \"F7vkKXYJv6H4Bouwm54nJbZy5M9EoxF9PMoSSGx817Yv\"\r\n#define SDKKEY \"HWNECEj34eZVbYAt7NVjmspAoTrYABUamezb47rMCCgE\"\r\n\r\n#else\r\n#define APPID \"F7vkKXYJv6H4Bouwm54nJbZy5M9EoxF9PMoSSGx817Yv\"\r\n#define SDKKEY \"HWNECEj34eZVbYAt7NVjmspAoTrYABUamezb47rMCCgE\"\r\n\r\n#endif // \r\n\r\n" }, { "alpha_fraction": 0.6243228316307068, "alphanum_fraction": 0.6398519277572632, "avg_line_length": 23.2346248626709, "blob_id": "59b9ed3d801d1a270be7620293ec05715f4fbc08", "content_id": "9344dcacb8875fe6babe1a92e56fa50b0638f673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11084, "license_type": "no_license", "max_line_length": 130, "num_lines": 439, "path": "/FaceCompare/FaceCompare/RollCall.cpp", "repo_name": "763483718/service-outsource", "src_encoding": "WINDOWS-1252", "text": "#include \"stdafx.h\"\r\n#include \"RollCall.h\"\r\n\r\n#define IMAGE_N 10\r\n#define FACE_INFO_N 10\r\n\r\n\r\n\r\nvoid myCutOut(IplImage* src, IplImage* dst, int x, int y);\r\nint Judge(list<float> listOfLevel, bool &judge);\r\n\r\nDWORD WINAPI ImageThread(LPVOID p)\r\n{\r\n\tRollCall * pdlg = (RollCall *)p;\r\n\tpdlg->ImageThreadImpl();\r\n\treturn 0;\r\n}\r\n\r\nDWORD WINAPI DeteThread(LPVOID p)\r\n{\r\n\tRollCall * pdlg = (RollCall *)p;\r\n\tpdlg->DeteThreadImpl();\r\n\treturn 0;\r\n}\r\n\r\nDWORD WINAPI FacePairMatchingThread(LPVOID p)\r\n{\r\n\tRollCall * pdlg = (RollCall *)p;\r\n\tpdlg->FacePairMatchingThreadImpl();\r\n\treturn 0;\r\n}\r\n\r\n\r\nRollCall::RollCall(char * pathOfFeature)\r\n{\r\n\tm_Image_Empty = CreateSemaphore(NULL, IMAGE_N, IMAGE_N, NULL);\r\n\tm_Image_Full = CreateSemaphore(NULL, 0, IMAGE_N, NULL);\r\n\r\n\tm_FaceFeature_Empty = CreateSemaphore(NULL, FACE_INFO_N, FACE_INFO_N, NULL);\r\n\tm_FaceFeature_Full = CreateSemaphore(NULL, 0, FACE_INFO_N, NULL);\r\n\r\n\tm_Image_Mutex = CreateMutex(NULL, FALSE, NULL);\r\n\tm_FaceInfo_Mutex = CreateMutex(NULL, FALSE, NULL);\r\n\r\n\tReadFaceFeature(pathOfFeature);\r\n\r\n}\r\n\r\nint RollCall::ReadFaceFeature(char * pathOfFeature)\r\n{\r\n\tchar name[sizeOfName];\r\n\tstring preName;\r\n\tchar *buff = (char*)malloc(sizeOfFeature);\r\n\tstd::ifstream in(pathOfFeature, ios::binary | ios::in);\r\n\tif (!in.is_open()) { return -1; }\r\n\twhile (!in.eof())\r\n\t{\r\n\t\tin.read(name, sizeOfName);\r\n\t\tin.read(buff, sizeOfFeature);\r\n\t\tif (name == preName)\r\n\t\t\tcontinue;\r\n\t\tprintf(\"%s\\n\", name);\r\n\t\tStudent temp(name);\r\n\t\ttemp.setFaceFeature(buff, sizeOfFeature);\r\n\t\tm_listOfStudent.push_back(temp);\r\n\t\tpreName = name;\r\n\t}\r\n\tSafeFree(buff);\r\n\t//for (list<Student>::iterator i = m_listOfStudent.begin(); i != m_listOfStudent.end(); i++)\r\n\t//{\r\n\t//\tprintf(\"lalalal %s\\n\", i->name.c_str());\r\n\t//}\r\n\tprintf(\"\\n\\n\\n\");\r\n\treturn 0;\r\n}\r\n\r\nint RollCall::Start(int numOfImageThread, int numOfDeteThread, int numOfCompareThread)\r\n{\r\n\tm_run = 1;\r\n\t//list<HANDLE>::iterator ImageIter = m_listOfImageHandle.begin();\r\n\t//list<HANDLE>::iterator DeteIter = m_listOfDeteHandle.begin();\r\n\t//list<HANDLE>::iterator MatchIter = m_listOfMatchHandle.begin();\r\n\tHANDLE ImageTest=NULL;\r\n\tHANDLE DeteTest;\r\n\tHANDLE MatchTest;\r\n\tfor (int i = 0; i < numOfImageThread; i++)\r\n\t{\r\n\t\tImageTest = CreateThread(NULL, 0, ImageThread, this, 0, NULL);\r\n\t\tm_listOfImageHandle.push_back(ImageTest);\r\n\t\t//m_listOfImageHandle.push_back(CreateThread(NULL, 0, ImageThread, 0, 0, NULL));\r\n\t\tSleep(500);\r\n\t}\r\n\tfor (int i = 0; i < numOfDeteThread; i++)\r\n\t{\r\n\t\tDeteTest = CreateThread(NULL, 0, DeteThread, this, 0, NULL);\r\n\t\tm_listOfDeteHandle.push_back(DeteTest);\r\n\t\t//m_listOfDeteHandle.push_back(CreateThread(NULL, 0, DeteThread, 0, 0, NULL));\r\n\t\tSleep(500);\r\n\t}\r\n\tfor (int i = 0; i < numOfCompareThread; i++)\r\n\t{\r\n\t\tMatchTest = CreateThread(NULL, 0, FacePairMatchingThread, this, 0, NULL);\r\n\t\tm_listOfMatchHandle.push_back(MatchTest);\r\n\t\t//m_listOfMatchHandle.push_back(CreateThread(NULL, 0, FacePairMatchingThread, 0, 0, NULL));\r\n\t\tSleep(500);\r\n\t}\r\n\r\n\t//printf(\"working...\\n\");\r\n\r\n\tfor (int i = 0; i < numOfImageThread; i++)\r\n\t{\r\n\t\tif (WaitForSingleObject(ImageTest, INFINITE) == WAIT_OBJECT_0)\r\n\t\t{\r\n\t\t\tCloseHandle(m_listOfImageHandle.front());\r\n\t\t\tm_listOfImageHandle.pop_front();\r\n\t\t}\r\n\t}\r\n\r\n\tfor (int i = 0; i < numOfDeteThread; i++)\r\n\t{\r\n\t\tif (WaitForSingleObject(m_listOfDeteHandle.front(), INFINITE) == WAIT_OBJECT_0)\r\n\t\t{\r\n\t\t\tCloseHandle(m_listOfDeteHandle.front());\r\n\t\t\tm_listOfDeteHandle.pop_front();\r\n\t\t}\r\n\t}\r\n\r\n\tfor (int i = 0; i < numOfCompareThread; i++)\r\n\t{\r\n\t\tif (WaitForSingleObject(m_listOfMatchHandle.front(), INFINITE) == WAIT_OBJECT_0)\r\n\t\t{\r\n\t\t\tCloseHandle(m_listOfMatchHandle.front());\r\n\t\t\tm_listOfMatchHandle.pop_front();\r\n\t\t}\r\n\t}\r\n\tCloseHandle(m_Image_Empty);\r\n\tCloseHandle(m_Image_Full);\r\n\tCloseHandle(m_FaceFeature_Full);\r\n\tCloseHandle(m_FaceFeature_Empty);\r\n\tCloseHandle(m_Image_Mutex);\r\n\tCloseHandle(m_FaceInfo_Mutex);\r\n\r\n\tprintf(\"over\\n\");\r\n\r\n\treturn 0;\r\n}\r\n\r\nint RollCall::ImageThreadImpl()\r\n{\r\n\tFileUtils fileUtils;\r\n\tvector<std::string> imagePath;\r\n\tfileUtils.getFile(\"C:\\\\picture\\\\2018-12-20\\\\diandao\", imagePath, \"jpg\");\r\n\t//char file[1024] = \"F:\\\\·þÎñÍâ°ü\\\\picture\\\\2018-12-27\\\\diandao\\\\\";\r\n\t//char path[1024];\r\n\tvector<std::string>::iterator iter = imagePath.begin();\r\n\tIplImage* img = nullptr;\r\n\tfor (;iter!=imagePath.end();iter++)\r\n\t{\r\n\t\t//sprintf(path, \"%s%d.jpg\", file, i);\r\n\t\t\r\n\t\tIplImage* imgBefore = cvLoadImage(iter->c_str());\r\n\t\tif (!imgBefore)\r\n\t\t{\r\n\t\t\tcvReleaseImage(&imgBefore);\r\n\t\t\tcontinue;\r\n\t\t}\r\n\t\timg = cvCreateImage(cvSize(imgBefore->width - imgBefore->width % 4, imgBefore->height), imgBefore->depth, imgBefore->nChannels);\r\n\t\tmyCutOut(imgBefore, img, 0, 0);\r\n\t\tcvReleaseImage(&imgBefore);\r\n\t\tif (WaitForSingleObject(m_Image_Empty, INFINITE) == WAIT_OBJECT_0)\r\n\t\t{\r\n\t\t\tif (WaitForSingleObject(m_Image_Mutex, INFINITE) == WAIT_OBJECT_0)\r\n\t\t\t{\r\n\t\t\t\tm_listOfImage.push_back(img);\r\n\t\t\t}\r\n\t\t}\r\n\t\tReleaseMutex(m_Image_Mutex);\r\n\t\tReleaseSemaphore(m_Image_Full, 1, NULL);\r\n\r\n\t\tSleep(300);\r\n\t}\r\n\tprintf(\"have done reading image\\n\");\r\n\tint a;\r\n\tscanf(\"%d\", &a);\r\n\tm_run = 0;\r\n\treturn 0;\r\n}\r\n\r\nint RollCall::DeteThreadImpl()\r\n{\r\n\tint res = 0;\r\n\tFaceEngine faceHandle;\r\n\tres = faceHandle.InitEngine();\r\n\tif (res != MOK)\r\n\t{\r\n\t\tprintf(\"There is an error when InitEngine at DeteThreadImpl:%d\\n\", res);\r\n\t\treturn res;\r\n\t}\r\n\tASF_MultiFaceInfo faceInfo = { 0 };\r\n\tIplImage* img = nullptr;\r\n\twhile (m_run)\r\n\t{\r\n\t\tif (WaitForSingleObject(m_Image_Full, 500) == WAIT_OBJECT_0)\r\n\t\t{\r\n\t\t\tif (WaitForSingleObject(m_Image_Mutex, 500) == WAIT_OBJECT_0)\r\n\t\t\t{\r\n\t\t\t\timg = m_listOfImage.front();\r\n\t\t\t\tm_listOfImage.pop_front();\r\n\t\t\t}\r\n\t\t\telse continue;\r\n\t\t}\r\n\t\telse continue;\r\n\t\tReleaseMutex(m_Image_Mutex);\r\n\t\tReleaseSemaphore(m_Image_Empty, 1, NULL);\r\n\r\n\t\tres = faceHandle.FaceDetection(faceInfo, img);\r\n\t\tif (res != MOK)\r\n\t\t{\r\n\t\t\tprintf(\"There is an error when faceDetection at DeteThreadImpl%d\\n\", res);\r\n\t\t\tcvReleaseImage(&img);\r\n\t\t\tcontinue;\r\n\t\t}\r\n\t\tif (faceInfo.faceNum == 0)\r\n\t\t{\r\n\t\t\tcvReleaseImage(&img);\r\n\t\t\tcontinue;\r\n\t\t}\r\n\t\tASF_FaceFeature faceFeature = { 0 };\r\n\t\tASF_FaceFeature copyFeature = { 0 };\r\n\t\tfor (int i = 0; i < faceInfo.faceNum; i++)\r\n\t\t{\r\n\t\t\tres = faceHandle.ExtractFRFeature(faceInfo, faceFeature, img, i);\r\n\t\t\tif (res != MOK)\r\n\t\t\t{\r\n\t\t\t\t//printf(\"There is an error when ExtractFRFeature at DeteThreadImpl%d\\n\", res);\r\n\t\t\t\tcontinue;\r\n\t\t\t}\r\n\t\t\tcopyFeature.featureSize = faceFeature.featureSize;\r\n\t\t\tcopyFeature.feature = (MByte*)malloc(faceFeature.featureSize);\r\n\t\t\tmemset(copyFeature.feature, 0, faceFeature.featureSize);\r\n\t\t\tmemcpy(copyFeature.feature, faceFeature.feature, faceFeature.featureSize);\r\n\t\t\tif (WaitForSingleObject(m_FaceFeature_Empty, INFINITE) == WAIT_OBJECT_0)\r\n\t\t\t{\r\n\t\t\t\tif (WaitForSingleObject(m_FaceInfo_Mutex, INFINITE) == WAIT_OBJECT_0)\r\n\t\t\t\t{\r\n\t\t\t\t\tm_listOfFaceFeature.push_back(copyFeature);\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tReleaseMutex(m_FaceInfo_Mutex);\r\n\t\t\tReleaseSemaphore(m_FaceFeature_Full, 1, NULL);\r\n\t\t}\r\n\t\tcvReleaseImage(&img);\r\n\t}\r\n\t\r\n\tfaceHandle.UnInitEngine();\r\n\tif (m_listOfImage.size() != 0)\r\n\t{\r\n\t\tfor (list<IplImage*>::iterator iter = m_listOfImage.begin(); iter != m_listOfImage.end();)\r\n\t\t{\r\n\t\t\tcvReleaseImage(&*iter);\r\n\t\t\tm_listOfImage.erase(iter++);\r\n\t\t}\r\n\t}\r\n\treturn 0;\r\n}\r\n\r\nint RollCall::FacePairMatchingThreadImpl()\r\n{\r\n\tFaceEngine faceHandle;\r\n\tfaceHandle.InitEngine();\r\n\tlist<float> listOflevel; float level = -1;\r\n\tASF_FaceFeature faceFeature = { 0 };\r\n\twhile (m_run)\r\n\t{\r\n\t\tif (WaitForSingleObject(m_FaceFeature_Full, 500) == WAIT_OBJECT_0)\r\n\t\t{\r\n\t\t\tif (WaitForSingleObject(m_FaceInfo_Mutex, 500) == WAIT_OBJECT_0)\r\n\t\t\t{\r\n\t\t\t\tfaceFeature = m_listOfFaceFeature.front();\r\n\t\t\t\tm_listOfFaceFeature.pop_front();\r\n\t\t\t}\r\n\t\t\telse continue;\r\n\t\t}\r\n\t\telse continue;\r\n\t\tReleaseMutex(m_FaceInfo_Mutex);\r\n\t\tReleaseSemaphore(m_FaceFeature_Empty, 1, NULL);\r\n\t\tint max = 0; float maxLevel = -1; int count = 0;\r\n\t\tlist<Student>::iterator iter = m_listOfStudent.begin();\r\n\t\tfor (; iter != m_listOfStudent.end(); iter++)\r\n\t\t{\r\n\t\t\tint res = faceHandle.FacePairMatching(level, faceFeature, iter->faceFeature);\r\n\t\t\tif (res != MOK)\r\n\t\t\t{\r\n\t\t\t\tprintf(\"There is an error when FacePairMatching\\n\");\r\n\t\t\t\tcontinue;\r\n\t\t\t}\r\n\t\t\tif (maxLevel < level)\r\n\t\t\t{\r\n\t\t\t\tmax = count;\r\n\t\t\t\tmaxLevel = level;\r\n\t\t\t}\r\n\t\t\tlistOflevel.push_back(level);\r\n\t\t\tcount++;\r\n\t\t}\r\n\t\tbool judge = 0;\r\n\t\tif (listOflevel.size() == 0)\r\n\t\t{\r\n\t\t\tlistOflevel.clear();\r\n\t\t\tSafeFree(faceFeature.feature);\r\n\t\t\tcontinue;\r\n\t\t}\r\n\t\tint res = Judge(listOflevel, judge);\r\n\t\tif (judge)\r\n\t\t{\r\n\t\t\titer = m_listOfStudent.begin();\r\n\t\t\tint i = 0;\r\n\t\t\tfor (; i <= max; i++)\r\n\t\t\t{\r\n\t\t\t\tif (i != max) iter++;\r\n\t\t\t}\r\n\t\t\tprintf(\"%s\\n\", iter->getName().c_str());\r\n\t\t\tm_listOfArraveStudent.push_back(*iter);\r\n\t\t\tm_listOfStudent.erase(iter);\r\n\t\t}\r\n\t\tlistOflevel.clear();\r\n\t\tSafeFree(faceFeature.feature);\r\n\t\tif (m_listOfStudent.size() == 0)\r\n\t\t{\r\n\t\t\tprintf(\"finish diandao\\n\");\r\n\t\t\t\r\n\t\t\tm_run = 0;\r\n\t\t}\r\n\t}\r\n\tif (m_listOfFaceFeature.size() != 0)\r\n\t{\r\n\t\tfor (list<ASF_FaceFeature>::iterator iter = m_listOfFaceFeature.begin(); iter != m_listOfFaceFeature.end(); )\r\n\t\t{\r\n\t\t\tSafeFree(iter->feature);\r\n\t\t\tm_listOfFaceFeature.erase(iter++);\r\n\t\t}\r\n\t}\r\n\treturn 0;\r\n}\r\n\r\nint RollCall::AddFaceFeature(char * pathOfImage)\r\n{\r\n\tchar name[sizeOfName];\r\n\tchar path[1024];\r\n\tFaceEngine faceHandle;\r\n\tfaceHandle.InitEngine();\r\n\tstd::ofstream out(\"feature_1.txt\", std::ios::binary | std::ios::app);\r\n\twhile (true)\r\n\t{\r\n\t\tprintf(\"please enter the name of picture\\n\");\r\n\t\tscanf(\"%s\", name);\r\n\t\tif (name[0] == 'q'&&name[1] == '\\0')\r\n\t\t{\r\n\t\t\tbreak;\r\n\t\t}\r\n\t\tsprintf(path, \"%s.jpg\", name);\r\n\t\tIplImage* img22 = cvLoadImage(path);\r\n\t\tif (img22 == nullptr)\r\n\t\t{\r\n\t\t\tprintf(\"wrong name\\n\");\r\n\t\t\tcvReleaseImage(&img22);\r\n\t\t\tcontinue;\r\n\t\t}\r\n\t\tIplImage* img = cvCreateImage(cvSize(img22->width - img22->width % 4, img22->height), IPL_DEPTH_8U, img22->nChannels);\r\n\t\tmyCutOut(img22, img, 0, 0);\r\n\t\tcvReleaseImage(&img22);\r\n\r\n\t\tASF_MultiFaceInfo faceInfo = { 0 };\r\n\t\tfaceHandle.FaceDetection(faceInfo, img);\r\n\t\tif (faceInfo.faceNum == 0)\r\n\t\t{\r\n\t\t\tprintf(\"no face\\n\");\r\n\t\t}\r\n\t\tASF_FaceFeature feature = { 0 };\r\n\t\tint res = faceHandle.ExtractFRFeature(faceInfo, feature, img);\r\n\t\tfloat level;\r\n\t\tres = faceHandle.FacePairMatching(level, feature, feature);\r\n\t\tout.write(name, sizeOfName);\r\n\t\tout.write((const char*)feature.feature, sizeOfFeature);\r\n\t\tcvReleaseImage(&img);\r\n\t}\r\n\tout.close();\r\n\tfaceHandle.UnInitEngine();\r\n\treturn 0;\r\n}\r\n\r\nint RollCall::Terminate()\r\n{\r\n\treturn 0;\r\n}\r\n\r\n\r\n\r\n\r\nvoid myCutOut(IplImage* src, IplImage* dst, int x, int y)\r\n{\r\n\tCvSize size = cvSize(dst->width, dst->height);\r\n\tcvSetImageROI(src, cvRect(x, y, size.width, size.height));\r\n\tcvCopy(src, dst);\r\n\tcvResetImageROI(src);\r\n}\r\n\r\n\r\nbool cmp(float a, float b) {\r\n\treturn a > b;\r\n}\r\n\r\nint Judge(list<float> listOfLevel, bool &judge)\r\n{\r\n\tint res = 0;\r\n\tlistOfLevel.sort(cmp);\r\n\tlist<float>::iterator iter = listOfLevel.begin();\r\n\tfloat max = *iter;\r\n\tif (listOfLevel.size() < 2)\r\n\t{\r\n\t\tif(max>0.7)\r\n\t\t\tjudge = 1;\r\n\t\treturn res;\r\n\t}\r\n\titer++;\r\n\tfloat secend = *iter;\r\n\tif (max > 0.7)\r\n\t{\r\n\t\tjudge = 1;\r\n\t\treturn res;\r\n\t}\r\n\tif (max > 0.55&&max / secend > 5)\r\n\t{\r\n\t\tjudge = 1;\r\n\t\treturn res;\r\n\t}\r\n\tjudge = 0;\r\n\r\n\treturn res;\r\n}" }, { "alpha_fraction": 0.5173775553703308, "alphanum_fraction": 0.5608214735984802, "avg_line_length": 27.155555725097656, "blob_id": "279b73b3412f400638e4d0bbdc9a4a8d9fc9ac95", "content_id": "fbdae6d00f27e2b6d757f29962cded037bc0d607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1392, "license_type": "no_license", "max_line_length": 78, "num_lines": 45, "path": "/dataSet/video-picture.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "import cv2\n \ndef video_picture(videoPath, savePath, f):\n \n #获得视频的格式\n videoCapture = cv2.VideoCapture(videoPath)\n \n #获得码率及尺寸\n # fps = videoCapture.get(cv2.cv.CV_CAP_PROP_FPS)\n \n #读帧\n success, frame = videoCapture.read()\n\n count = 0\n num = 1\n while success :\n # cv2.imshow(\"cut_picture\", frame) #显示\n #cv2.waitKey(1000/int(fps)) #延迟\n count += 1\n if count % f == 1:\n path = savePath + str(num) + '.jpg'\n # 获取图片尺寸并计算图片中心点\n #(h, w) = frame.shape[:2]\n #center = (w/2, h/2)\n\n # 将图像旋转180度\n #M = cv2.getRotationMatrix2D(center, 180, 1.0)\n #rotated = cv2.warpAffine(frame, M, (w, h))\n #cv2.imwrite(path, rotated)\n\n cv2.imwrite(path, frame)\n num += 1\n #del rotated\n del frame\n \n success, frame = videoCapture.read() #获取下一帧\n\ndef main():\n videoPath = '/Volumes/Seagate Backup Plus Drive/服务外包/视频/2018-12-27/1.mp4'\n savePath = '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/1/'\n # videoPath = '/Volumes/Seagate Backup Plus Drive/义乌拍摄/3.MP4'\n # savePath = '/Volumes/Seagate Backup Plus Drive/义乌拍摄/3/'\n video_picture(videoPath, savePath, 200)\n\nmain()" }, { "alpha_fraction": 0.49126824736595154, "alphanum_fraction": 0.5224735140800476, "avg_line_length": 38.100746154785156, "blob_id": "8ec5e5921ed1713d3206cd080b6331987d770bf9", "content_id": "16b76112f6039aee77968a0488fca8aa0a6c1b88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10737, "license_type": "no_license", "max_line_length": 123, "num_lines": 268, "path": "/attitude/dataset.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "import cv2 as cv\nimport glob\n# from sklearn.utils import shuffle\nimport random\nimport numpy as np\nimport parameter\n\n\ndef cutImage(img, x, y, w, h):\n return img[y:y+h, x:x+w]\n\n\nclass dataSet(object):\n def __init__(self, filePath, classess, way, imgSize=parameter.imgSize, txtPath=None, bodyPos=None): # bodyPos指左中右\n self._filePath = filePath\n self._imgSize = imgSize\n self._classess = classess\n self._txtPath = txtPath\n self._bodyPos = bodyPos\n\n self._images = []\n self._labels = []\n self._cls = []\n self._pointer = 0\n\n # self.loadImage()\n if way == 'txt':\n self.loadImageByTXT()\n elif way == 'image':\n self.loadImage()\n\n temp = list(zip(self._images, self._labels))\n random.shuffle(temp)\n self._images[:], self._labels[:] = zip(*temp)\n self._images = np.array(self._images)\n self._labels = np.array(self._labels)\n shape = self._images.shape\n self._dataSetSize = self._images.shape[0]\n print(self._dataSetSize)\n\n def Expansion(self, img, label, size=None, pan=10, expansion=1): # expansion 是否扩展\n if size == None:\n size = self._imgSize\n black_img = np.zeros(\n size*size*3, dtype=np.uint8).reshape(size, size, 3)\n shape = img.shape\n bigger = max(shape[0], shape[1])\n if bigger > 200:\n ratio = 200/bigger\n img = cv.resize(\n img, ((int)(shape[1]*ratio), (int)(shape[0]*ratio)), interpolation=cv.INTER_LINEAR)\n shape = img.shape\n panH = 200-shape[0]\n panH = (int)(panH/2)\n panW = 200-shape[1]\n panW = (int)(panW/2)\n\n for h in range(shape[0]):\n for w in range(shape[1]):\n black_img[h+panH][w+panW] = img[h][w]\n\n black_img = black_img.astype(np.float32)\n black_img = np.multiply(black_img, 1.0 / 255.0)\n self._images.append(black_img)\n self._labels.append(label)\n if expansion == 0:\n return\n # cv.imshow('black', black_img)\n pan = (int)(panW/2)\n left_img = cv.warpAffine(black_img, np.float32(\n [[1, 0, -pan], [0, 1, 0]]), (size, size))\n\n left_img = left_img.astype(np.float32)\n left_img = np.multiply(left_img, 1.0 / 255.0)\n self._images.append(left_img)\n self._labels.append(label)\n # cv.imshow('left', left_img)\n\n right_img = cv.warpAffine(black_img, np.float32(\n [[1, 0, pan], [0, 1, 0]]), (size, size))\n right_img = right_img.astype(np.float32)\n right_img = np.multiply(right_img, 1.0 / 255.0)\n self._images.append(right_img)\n self._labels.append(label)\n # cv.imshow('right', right_img)\n\n pan = (int)(panH/2)\n top_img = cv.warpAffine(black_img, np.float32(\n [[1, 0, 0], [0, 1, -pan]]), (size, size))\n top_img = top_img.astype(np.float32)\n top_img = np.multiply(top_img, 1.0 / 255.0)\n self._images.append(top_img)\n self._labels.append(label)\n # cv.imshow('top', top_img)\n\n bottom_img = cv.warpAffine(black_img, np.float32(\n [[1, 0, pan], [0, 1, 0]]), (size, size))\n bottom_img = bottom_img.astype(np.float32)\n bottom_img = np.multiply(bottom_img, 1.0 / 255.0)\n self._images.append(bottom_img)\n self._labels.append(label)\n # cv.imshow('bottom', bottom_img)\n\n # cv.waitKey()\n\n def loadImageByTXT(self):\n for i in range(len(self._txtPath)):\n path = self._txtPath[i] + '/**.txt'\n paths = glob.glob(path)\n for txtPath in paths:\n pos1 = txtPath.rfind('/')\n pos2 = txtPath.rfind('.')\n num = txtPath[pos1+1:pos2]\n imgPath = self._filePath[i] + '/' + str(num) + '.jpg'\n f = open(txtPath, 'r')\n img = cv.imread(imgPath)\n shape = img.shape\n img = cv.resize(img, ((int)(shape[1]/2), (int)\n (shape[0]/2)), interpolation=cv.INTER_LINEAR)\n lines = f.readlines()\n # cv.imshow('img', img)\n for line in lines:\n line_dict = eval(line)\n if self._bodyPos != None:\n if line_dict['status'][0] != self._bodyPos:\n continue\n cut_img = cutImage(\n img, line_dict['left'], line_dict['top'], line_dict['width'], line_dict['height'])\n\n label = np.zeros(3)\n index = self._classess.index(line_dict['status'])\n if index == 3 or index == 6:\n index = 0\n elif index == 4 or index == 7:\n index = 1\n elif index == 5 or index == 8:\n index = 2\n label[index] = 1.0\n\n if line_dict['status'][-5:] != 'study':\n pass\n # self.Expansion(cut_img, label)\n else:\n self.Expansion(cut_img, label, expansion=0)\n del img\n self._images = np.array(self._images)\n self._labels = np.array(self._labels)\n\n def loadImage(self, filePath=None):\n if filePath != None:\n self._filePath = filePath\n for file in self._classess:\n path = self._filePath + '/' + file + '/**.jpg'\n files = glob.glob(path)\n index = self._classess.index(file)\n\n if index == 3 or index == 6:\n index = 0\n elif index == 4 or index == 7:\n index = 1\n elif index == 5 or index == 8:\n index = 2\n\n label = np.zeros(3)\n label[index] = 1.0\n\n for imgPath in files:\n img = cv.imread(imgPath)\n img = cv.resize(\n img, (self._imgSize, self._imgSize), 0, 0, cv.INTER_LINEAR)\n img = img.astype(np.float32)\n img = np.multiply(img, 1.0 / 255.0)\n\n self._images.append(img)\n self._labels.append(label)\n self._cls.append(file)\n self._images = np.array(self._images)\n self._labels = np.array(self._labels)\n # self._cls = np.array(self._cls)\n # self._images, self._labels, self._cls = shuffle(\n # self._images, self._labels, self._cls)\n\n # for i in range(1000):\n # img = self._images[i]\n # lab = self._labels[i]\n # c = self._cls[i]\n # # img = list(img)\n # cv.imshow(str(c), img)\n # cv.waitKey()\n def saveImage(self, filePath):\n count_sleep = 0\n count_telephone = 0\n count_study = 0\n\n for i in range(len(self._labels)):\n if self._labels[i] == 0:\n cv.imwrite(filePath+'\\\\sleep\\\\' +\n str(count_sleep), self._images[i])\n count_sleep += 1\n elif self._labels[i] == 1:\n cv.imwrite(filePath+\"\\\\telephone\\\\\" +\n str(count_telephone), self._images[i])\n count_telephone += 1\n else:\n cv.imwrite(filePath+\"\\\\study\\\\\" +\n str(count_study), self._images[i])\n count_study += 1\n \n\n def next_batch(self, batchSize):\n end = self._pointer + batchSize\n if end > self._dataSetSize:\n assert batchSize <= self._dataSetSize\n end %= self._dataSetSize\n start = self._pointer\n imgA = self._images[start:]\n imgB = self._images[0:end]\n labelA = self._labels[start:]\n labelB = self._labels[0:end]\n clsA = self._cls[start:]\n clsB = self._cls[0:end]\n self._pointer = end\n # print(clsA.shape, clsB.shape, '\\n\\n\\n')\n classes = np.hstack((clsA, clsB))\n labels = np.vstack((labelA, labelB))\n images = np.vstack((imgA, imgB))\n\n return images, labels, classes\n start = self._pointer\n self._pointer = end\n return self._images[start:end], self._labels[start:end], self._cls[start:end]\n\n\n# filePath = '/Mycomputer/pythonCode/tensorflow/深度学习框架Tensorflow案例实战视频课程【195107】Tensorflow简介与安装/猫狗识别/training_data'\n\n# filePath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/3',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/4',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/5'\n # ]\n# txtPath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/body2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body3',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/body1',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-17/body2',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/body4',\n # '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/修改间隔后/body5'\n # ]\n# filePath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/tset/1']\n# txtPath = ['/Volumes/Seagate Backup Plus Drive/服务外包/picture/tset/body']\n\n\n# classes = ['right_sleep', 'right_play_telephone', 'right_study',\n# 'left_sleep', 'left_play_telephone', 'left_study',\n# 'center_sleep', 'center_play_telephone', 'center_study']\n\n# dataSetT = dataSet(filePath, classes, 'txt', txtPath=txtPath)\n# dataSetT = dataSet('/Volumes/Seagate Backup Plus Drive/服务外包/picture/2019-03-05/body1', classes, 'image', txtPath=txtPath)\n# batchX, batchY, _ = dataSetT.next_batch(64)\n# shape = batchX.shape\n# print(shape)\n\n# imgs, labels, classess = dataSetT.next_batch(48)\n" }, { "alpha_fraction": 0.5018602609634399, "alphanum_fraction": 0.5527077317237854, "avg_line_length": 21.259614944458008, "blob_id": "39f70eb0a126a5e900ce79248682979cbc591db7", "content_id": "31ed6253c3520a3edd4ae50b0d5dde30fdd1f1cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2431, "license_type": "no_license", "max_line_length": 122, "num_lines": 104, "path": "/FaceCompare/FaceCompare/FaceCompare.cpp", "repo_name": "763483718/service-outsource", "src_encoding": "GB18030", "text": "#pragma once\r\n#include \"stdafx.h\"\r\n//#include \"tool.h\"\r\n#include \"RollCall.h\"\r\n\r\n\r\nint main(int argc, char** argv)\r\n{\r\n\tRollCall rollCall(\"feature.txt\");\r\n\t//string name;\r\n\t//cin >> name;\r\n\t//rollCall.AddFaceFeature(\"d\");\r\n\t//rollCall.ReadFaceFeature(\"feature_1.txt\");\r\n\trollCall.Start(1,5,1);\r\n\tSleep(5000000);\r\n\r\n\t//system(\"pause\");\r\n\treturn 0;\r\n}\r\n\r\n//int compara(char* shibiezhao = \"C:\\\\picture\\\\2018-12-20\\\\diandao\\\\6.jpg\", char* zhengjianzhao = \"zhangchenyu.jpg\")\r\n//{\r\n//\tIplImage* img = cvLoadImage(shibiezhao);\r\n//\tIplImage* img22 = cvLoadImage(zhengjianzhao);\r\n//\tif (!img || !img22)\r\n//\t{\r\n//\t\tprintf(\"please enter right path or name\\n\");\r\n//\t\tcvReleaseImage(&img);\r\n//\t\tcvReleaseImage(&img22);\r\n//\t\treturn -1;\r\n//\t}\r\n//\tIplImage* img2 = cvCreateImage(cvSize(img22->width - img22->width % 4, img22->height), IPL_DEPTH_8U, img22->nChannels);\r\n//\tmyCutOut(img22, img2, 0, 0);\r\n//\r\n//\tFaceEngine faceHandle;\r\n//\tfaceHandle.InitEngine();\r\n//\tSinglePictureCompare(img2, img, faceHandle);\r\n//\r\n//\tfaceHandle.UnInitEngine();\r\n//\tcvReleaseImage(&img);\r\n//\tcvReleaseImage(&img22);\r\n//\tcvReleaseImage(&img2);\r\n//\r\n//\treturn 0;\r\n//}\r\n//\r\n//int main(int argc, char** argv)\r\n//{\r\n//\r\n//\t//VideoToPicture(\"F:\\\\服务外包\\\\视频\\\\2018-12-20\\\\1.mp4\", \"C:\\\\picture\\\\2018-12-20\\\\1\\\\\");\r\n//\r\n//\ttime_t start, stop;\r\n//\tstart = time(NULL);\r\n//\r\n//\t//DeteFace(\"C:\\\\picture\\\\2018-12-20\\\\diandao\", \"C:\\\\picture\\\\2018-12-20\\\\face\");\r\n//\tint a;\r\n//\tscanf(\"%d\", &a);\r\n//\r\n//\tswitch (a)\r\n//\t{\r\n//\tcase 1:\r\n//\t{\r\n//\t\tint num=-1;\r\n//\t\tchar path[1024];\r\n//\t\tchar name[256];\r\n//\t\t//while (true)\r\n//\t\t//{\r\n//\t\t//\tnum++;\r\n//\t\t//\tif (num == 50)\r\n//\t\t//\t{\r\n//\t\t//\t\tbreak;\r\n//\t\t//\t}\r\n//\t\t//\tsprintf(path, \"C:\\\\picture\\\\2018-12-20\\\\diandao\\\\%d.jpg\", num);\r\n//\t\t//\tcompara(path, \"xujingting.jpg\");\r\n//\t\t//}\r\n//\t\twhile (true) \r\n//\t\t{\r\n//\t\t\tprintf(\"please enter picture number of shibie\\n\");\r\n//\t\t\tscanf(\"%d\", &num);\r\n//\t\t\tif (num == -1)\r\n//\t\t\t{\r\n//\t\t\t\tbreak;\r\n//\t\t\t}\r\n//\t\t\tprintf(\"please enter picture name of zhuce\\n\");\r\n//\t\t\tscanf(\"%s\", &name);\r\n//\t\t\tsprintf(name, \"%s.jpg\", name);\r\n//\t\t\tsprintf(path, \"C:\\\\picture\\\\2018-12-20\\\\diandao\\\\%d.jpg\", num);\r\n//\t\t\tcompara(path, name);\r\n//\t\t}\r\n//\t\tbreak;\r\n//\t}\r\n//\tcase 2:saveFeature(); break;\r\n//\tcase 3:Compara(\"C:\\\\picture\\\\2018-12-20\\\\diandao\"); break;\r\n//\tdefault:\r\n//\t\tbreak;\r\n//\t}\r\n//\r\n//\tstop = time(NULL);\r\n//\tprintf(\"Use Time:%ld\\n\", (stop - start));\r\n//\t\r\n//\tsystem(\"pause\");\r\n//\treturn 0;\r\n//}\r\n//\r\n" }, { "alpha_fraction": 0.5525854229927063, "alphanum_fraction": 0.5726555585861206, "avg_line_length": 24.788732528686523, "blob_id": "5bc9dcb8df471fbe686f6e58345dbf90f57b1c92", "content_id": "189c0b40f7db842f0c4e6499ce220fc61cde34d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 11430, "license_type": "no_license", "max_line_length": 185, "num_lines": 426, "path": "/FaceCompare/FaceCompare/tool.h", "repo_name": "763483718/service-outsource", "src_encoding": "WINDOWS-1252", "text": "//#pragma once\r\n//#include <stdlib.h>\r\n//#include <stdio.h>\r\n//#include <windows.h>\r\n//#include <string>\r\n//#include <opencv2/highgui/highgui.hpp> \r\n//#include <opencv2/imgproc/imgproc.hpp> \r\n//#include <opencv2/core/core.hpp>\r\n//#include \"cv.h\"\r\n//#include \"config.h\"\r\n//#include \"FaceEngine.h\"\r\n//#include \"merror.h\"\r\n//#include \"inc\\Arc\\arcsoft_face_sdk.h\"\r\n//#include <fstream>\r\n//#include <list>\r\n//\r\n//using namespace std;\r\n//using namespace cv;\r\n//\r\n//\r\n//\r\n//struct student {\r\n//\tstring name;\r\n//\tASF_FaceFeature feature;\r\n//\tbool operator ==(const student &d)\r\n//\t{\r\n//\t\treturn name == d.name;\r\n//\t}\r\n//};\r\n//\r\n//void myCutOut(IplImage* src, IplImage* dst, int x, int y)\r\n//{\r\n//\tCvSize size = cvSize(dst->width, dst->height);\r\n//\tcvSetImageROI(src, cvRect(x, y, size.width, size.height));\r\n//\tcvCopy(src, dst); \r\n//\tcvResetImageROI(src);\r\n//}\r\n//\r\n//bool cmp(float a, float b) {\r\n//\treturn a > b;\r\n//}\r\n//\r\n//int Judge(list<float> listOfLevel,bool &judge)\r\n//\t{\r\n//\tint res = 0;\r\n//\tlistOfLevel.sort(cmp);\r\n//\tlist<float>::iterator iter = listOfLevel.begin();\r\n//\tfloat max = *iter;\r\n//\titer++;\r\n//\tfloat secend = *iter;\r\n//\tif (max > 0.7)\r\n//\t{\r\n//\t\tjudge = 1;\r\n//\t\treturn res;\r\n//\t}\r\n//\tif (max > 0.55&&max / secend > 5)\r\n//\t{\r\n//\t\tjudge = 1;\r\n//\t\treturn res;\r\n//\t}\r\n//\tjudge = 0;\r\n//\r\n//\treturn res;\r\n//}\r\n//\r\n//int VideoToPicture(char* videoPath, char* picturePath)\r\n//{\r\n//\tint res = 0;\r\n//\r\n//\tchar imagePath[1024];\r\n//\r\n//\tCvCapture* capture = cvCreateFileCapture(videoPath);\r\n//\tIplImage* img;\r\n//\tint rate = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);\r\n//\tint countFrame = 1;\r\n//\tint count = 1;\r\n//\tprintf(\"ÕýÔÚÖ´ÐУ¬ÇëÉÔºò\\n\");\r\n//\twhile (true)\r\n//\t{\r\n//\r\n//\t\timg = cvQueryFrame(capture);\r\n//\r\n//\t\tif (img == nullptr) break;\r\n//\t\tcountFrame++;\r\n//\t\tif (countFrame%rate != 0)\r\n//\t\t\tcontinue;\r\n//\r\n//\t\tsprintf(imagePath, \"%s%d.jpg\", picturePath, count);\r\n//\t\tcvSaveImage(imagePath, img);\r\n//\t\tcount++;\r\n//\t}\r\n//\r\n//\treturn res;\r\n//}\r\n//\r\n//int SingleDeteFace(char* picturePath, char* dstPath, FaceEngine faceHandle)//by file\r\n//{\r\n//\tint res = 0;\r\n//\r\n//\tif (res != MOK)\r\n//\t{\r\n//\t\tprintf(\"InitEngine error\\terrorCode:%d\\n\", res);\r\n//\t\treturn res;\r\n//\t}\r\n//\tIplImage* img = cvLoadImage(picturePath);\r\n//\tif (img == nullptr) return -1;\r\n//\tASF_MultiFaceInfo faceInfo = { 0 };\r\n//\tfaceHandle.FaceDetection(faceInfo, img);\r\n//\tif (faceInfo.faceNum == 0)\r\n//\t{\r\n//\t\tremove(picturePath);\r\n//\t\treturn res;\r\n//\t}\r\n//\tfor (int i = 0; i < faceInfo.faceNum; i++)\r\n//\t{\r\n//\t\tcvRectangle(img, cvPoint(faceInfo.faceRect[i].left, faceInfo.faceRect[i].top), cvPoint(faceInfo.faceRect[i].right, faceInfo.faceRect[i].bottom), cvScalar(255, 100, 100));\r\n//\t}\r\n//\tcvSaveImage(dstPath, img);\r\n//\r\n//\r\n//\tcvReleaseImage(&img);\r\n//\r\n//\treturn res;\r\n//}\r\n//\r\n//int DeteFace(char* pictureFile, char* dstFile)\r\n//{\r\n//\tint res; int i = 0;\r\n//\tchar path[1024];\r\n//\tchar dstPath[1024];\r\n//\tFaceEngine faceHandle;\r\n//\tfaceHandle.InitEngine();\r\n//\tint count = 0;\r\n//\twhile (true)\r\n//\t{\r\n//\t\tsprintf(path, \"%s\\\\%d.jpg\", pictureFile, i);\r\n//\t\tsprintf(dstPath, \"%s\\\\%d.jpg\", dstFile, i);\r\n//\t\tres = SingleDeteFace(path, dstPath, faceHandle);\r\n//\t\tif (res != -1)\r\n//\t\t{\r\n//\t\t\tcount++;\r\n//\t\t\tif (count == 270)\r\n//\t\t\t\tbreak;\r\n//\t\t}\r\n//\t\ti++;\r\n//\t}\r\n//\tfaceHandle.UnInitEngine();\r\n//\treturn res;\r\n//}\r\n//\r\n//\r\n//int SinglePictureCompare(IplImage* img1, IplImage* img2, FaceEngine faceHandle)\r\n//{\r\n//\tint res = 0;\r\n//\tASF_MultiFaceInfo faceInfo = { 0 };\r\n//\tres = faceHandle.FaceDetection(faceInfo, img1);\r\n//\tif (res != MOK)\r\n//\t{\r\n//\t\tprintf(\"Error:%d\\n\", res);\r\n//\t\treturn res;\r\n//\t}\r\n//\tif (faceInfo.faceNum == 0)\r\n//\t{\r\n//\t\tprintf(\"no face\\n\");\r\n//\t}\r\n//\tASF_FaceFeature feature1 = { 0 };\r\n//\tASF_FaceFeature copyfeature1 = { 0 };\r\n//\tres = faceHandle.ExtractFRFeature(faceInfo, feature1, img1);\r\n//\t//¿½±´feature\r\n//\tcopyfeature1.featureSize = feature1.featureSize;\r\n//\tcopyfeature1.feature = (MByte *)malloc(feature1.featureSize);\r\n//\tmemset(copyfeature1.feature, 0, feature1.featureSize);\r\n//\tmemcpy(copyfeature1.feature, feature1.feature, feature1.featureSize);\r\n//\r\n//\r\n//\r\n//\t//char *buff= (char*)malloc(feature1.featureSize);\r\n//\r\n//\t//std::ifstream i(\"feature.txt\", std::ios::in | std::ios::binary);\r\n//\t//char name[sizeOfName];\r\n//\t//i.read(name, sizeOfName);\r\n//\t//int pos = i.tellg();\r\n//\t//\r\n//\t//i.read(buff, feature1.featureSize);\r\n//\t//pos = i.tellg();\r\n//\t//memcpy(copyfeature1.feature, buff, feature1.featureSize);\r\n//\r\n//\r\n//\tres = faceHandle.FaceDetection(faceInfo, img2);\r\n//\tif (res != MOK)\r\n//\t{\r\n//\t\tprintf(\"Error:%d\\n\", res);\r\n//\t\treturn res;\r\n//\t}\r\n//\tif (faceInfo.faceNum == 0)\r\n//\t{\r\n//\t\tprintf(\"no face\\n\");\r\n//\t}\r\n//\r\n//\tASF_SingleFaceInfo SingleDetectedFaces1 = { 0 };\r\n//\tint max = 0; float maxLevel = 0;\r\n//\tfor (int i = 0; i < faceInfo.faceNum; i++)\r\n//\t{\r\n//\t\tres = faceHandle.ExtractFRFeature(faceInfo, feature1, img2, i);\r\n//\t\tif (res != MOK)\r\n//\t\t{\r\n//\t\t\tcontinue;\r\n//\t\t}\r\n//\t\tMFloat level = 0;\r\n//\t\tres = faceHandle.FacePairMatching(level, feature1, copyfeature1);\r\n//\t\tprintf(\"%d level is :%f\\n\", i, level);\r\n//\t\tif (maxLevel < level)\r\n//\t\t{\r\n//\t\t\tmax = i;\r\n//\t\t\tmaxLevel = level;\r\n//\t\t}\r\n//\t}\r\n//\tcvRectangle(img2, cvPoint(faceInfo.faceRect[max].left, faceInfo.faceRect[max].top), cvPoint(faceInfo.faceRect[max].right, faceInfo.faceRect[max].bottom), cvScalar(100, 100, 255));\r\n//\tcvSaveImage(\"temp.jpg\", img2);\r\n//\r\n//\tSafeFree(copyfeature1.feature);\r\n//\treturn res;\r\n//}\r\n//\r\n//\r\n//int saveFeature()\r\n//{\r\n//\tchar name[sizeOfName];\r\n//\tchar path[1024];\r\n//\tFaceEngine faceHandle;\r\n//\tfaceHandle.InitEngine();\r\n//\tstd::ofstream out(\"feature.txt\", std::ios::binary | std::ios::app);\r\n//\twhile (true)\r\n//\t{\r\n//\t\tprintf(\"please enter the name of picture\\n\");\r\n//\t\tscanf(\"%s\", name);\r\n//\t\tif (name[0] == 'q'&&name[1]=='\\0')\r\n//\t\t{\r\n//\t\t\tbreak;\r\n//\t\t}\r\n//\t\tsprintf(path, \"%s.jpg\", name);\r\n//\t\tIplImage* img22 = cvLoadImage(path);\r\n//\t\tif (img22 == nullptr)\r\n//\t\t{\r\n//\t\t\tprintf(\"wrong name\\n\");\r\n//\t\t\tcvReleaseImage(&img22);\r\n//\t\t\tcontinue;\r\n//\t\t}\r\n//\t\tIplImage* img = cvCreateImage(cvSize(img22->width - img22->width % 4, img22->height), IPL_DEPTH_8U, img22->nChannels);\r\n//\t\tmyCutOut(img22, img, 0, 0);\r\n//\t\tcvReleaseImage(&img22);\r\n//\r\n//\r\n//\t\tASF_MultiFaceInfo faceInfo = { 0 };\r\n//\t\tfaceHandle.FaceDetection(faceInfo, img);\r\n//\t\tif (faceInfo.faceNum == 0)\r\n//\t\t{\r\n//\t\t\tprintf(\"no face\\n\");\r\n//\t\t}\r\n//\t\tASF_FaceFeature feature = { 0 };\r\n//\t\tint res = faceHandle.ExtractFRFeature(faceInfo, feature, img);\r\n//\t\tfloat level;\r\n//\t\tres = faceHandle.FacePairMatching(level, feature, feature);\r\n//\t\tout.write(name, sizeOfName);\r\n//\t\tout.write((const char*)feature.feature, sizeOfFeature);\r\n//\t\tcvReleaseImage(&img);\r\n//\t}\r\n//\tout.close();\r\n//\tfaceHandle.UnInitEngine();\r\n//\treturn 0;\r\n//}\r\n//\r\n//\r\n//int Compara(char* filePath)\r\n//{\r\n//\tint res = 0;\r\n//\t//****************************************************** read feature into list\r\n//\tlist<student> listOfFeature;\r\n//\tchar name[sizeOfName];\r\n//\tstring preName;\r\n//\tchar *buff = (char*)malloc(sizeOfFeature);\r\n//\tstd::ifstream in(\"feature.txt\", ios::binary | ios::in);\r\n//\t\r\n//\twhile (!in.eof())\r\n//\t{\r\n//\t\tin.read(name, sizeOfName);\r\n//\t\tint p = in.tellg();\r\n//\t\tin.read(buff, sizeOfFeature);\r\n//\t\tp = in.tellg();\r\n//\t\tif (name == preName)\r\n//\t\t\tcontinue;\r\n//\t\tstudent temp;\r\n//\t\ttemp.name = name;\r\n//\t\ttemp.feature.featureSize = sizeOfFeature;\r\n//\r\n//\t\ttemp.feature.feature = (MByte *)malloc(sizeOfFeature);\r\n//\t\tmemset(temp.feature.feature, 0, sizeOfFeature);\r\n//\t\tmemcpy(temp.feature.feature, buff, sizeOfFeature);\r\n//\r\n//\t\tlistOfFeature.push_back(temp);\r\n//\t\tpreName = name;\r\n//\r\n//\t\t//SafeFree(temp.feature.feature);\r\n//\t}\r\n//\tSafeFree(buff);\r\n//\tprintf(\"finish read feature\\nCompara ing...\\n\");\r\n//\r\n//\tint size = listOfFeature.size();\r\n//\t//for (int i = 0; i < size; i++)\r\n//\t//{\r\n//\t//\tprintf(\"%s\\n\", listOfFeature.front().name.c_str());\r\n//\t//\tlistOfFeature.pop_front();\r\n//\t//}\r\n//\r\n//\t//****************************************************** get shibie picture and dete\r\n//\tFaceEngine faceHandle;\r\n//\tfaceHandle.InitEngine();\r\n//\tchar path[1024];\r\n//\tfor (int i = 0; i < 50; i++)\r\n//\t{\r\n//\t\tsprintf(path, \"%s\\\\%d.jpg\", filePath, i);\r\n//\t\tIplImage* img22 = cvLoadImage(path);\r\n//\t\tif (img22 == nullptr)\r\n//\t\t{\r\n//\t\t\tcvReleaseImage(&img22);\r\n//\t\t\tcontinue;\r\n//\t\t}\r\n//\t\tIplImage* img = cvCreateImage(cvSize(img22->width - img22->width % 4, img22->height), IPL_DEPTH_8U, img22->nChannels);\r\n//\t\tmyCutOut(img22, img, 0, 0);\r\n//\t\tcvReleaseImage(&img22);\r\n//\r\n//\t\tASF_MultiFaceInfo faceInfo = { 0 };\r\n//\t\tfaceHandle.FaceDetection(faceInfo, img);\r\n//\t\tif (faceInfo.faceNum == 0)\r\n//\t\t{\r\n//\t\t\tprintf(\"no face\\n\");\r\n//\t\t}\r\n//\t\tASF_FaceFeature feature = { 0 };\r\n//\t\tfor (int k = 0; k < faceInfo.faceNum; k++)\r\n//\t\t{\r\n//\t\t\tres = faceHandle.ExtractFRFeature(faceInfo, feature, img, k);\r\n//\t\t\tif (res != MOK)\r\n//\t\t\t{\r\n//\t\t\t\tcontinue;\r\n//\t\t\t}\r\n//\t\t\tfloat level, maxLevel = 0;\r\n//\t\t\tint max = 0;\r\n//\t\t\tstudent maxStudent;\r\n//\t\t\tlist<float> listOfLevel;\r\n//\t\t\tlist<student>::iterator iter = listOfFeature.begin();\r\n//\t\t\tfor (int c=0; iter != listOfFeature.end(); c++)\r\n//\t\t\t{\r\n//\t\t\t\tres = faceHandle.FacePairMatching(level, feature, iter->feature);\r\n//\r\n//\t\t\t\tif (res != MOK)\r\n//\t\t\t\t{\r\n//\t\t\t\t\tprintf(\"There is an error when FacePairMatching\\n\");\r\n//\t\t\t\t\titer++;\r\n//\t\t\t\t\tcontinue;\r\n//\t\t\t\t}\r\n//\t\t\t\tlistOfLevel.push_back(level);\r\n//\t\t\t\tif (level > maxLevel)\r\n//\t\t\t\t{\r\n//\t\t\t\t\tmaxLevel = level;\r\n//\t\t\t\t\tmax = c;\r\n//\t\t\t\t\tmaxStudent = *iter;\r\n//\t\t\t\t}\r\n//\r\n//\t\t\t\t//if (level > 0.5)\r\n//\t\t\t\t//{\r\n//\t\t\t\t//\tIplImage* faceImage = cvCloneImage(img);\r\n//\t\t\t\t//\tcvRectangle(faceImage, cvPoint(faceInfo.faceRect[k].left, faceInfo.faceRect[k].top), cvPoint(faceInfo.faceRect[k].right, faceInfo.faceRect[k].bottom), cvScalar(100, 100, 255));\r\n//\t\t\t\t//\tchar temp[1024];\r\n//\t\t\t\t//\tsprintf(temp, \"%s\\\\%d_%s.jpg\", \"C:\\\\picture\\\\2018-12-20\\\\compara\", i, iter->name.c_str());\r\n//\t\t\t\t//\tcvSaveImage(temp, faceImage);\r\n//\t\t\t\t//\tcvReleaseImage(&faceImage);\r\n//\t\t\t\t//\tprintf(\"%s\\t%d\\n\", iter->name.c_str(), listOfFeature.size());\r\n//\t\t\t\t//\tlistOfFeature.erase(iter++);\r\n//\t\t\t\t//\tcontinue;\r\n//\t\t\t\t//}\r\n//\t\t\t\titer++;\r\n//\t\t\t}\r\n//\t\t\tbool judge = 0;\r\n//\t\t\tJudge(listOfLevel, judge);\r\n//\t\t\tif (judge)\r\n//\t\t\t{\r\n//\t\t\t\tIplImage* faceImage = cvCloneImage(img);\r\n//\t\t\t\tcvRectangle(faceImage, cvPoint(faceInfo.faceRect[k].left, faceInfo.faceRect[k].top), cvPoint(faceInfo.faceRect[k].right, faceInfo.faceRect[k].bottom), cvScalar(100, 100, 255));\r\n//\t\t\t\tchar temp[1024];\r\n//\t\t\t\tsprintf(temp, \"%s\\\\%d_%s.jpg\", \"C:\\\\picture\\\\2018-12-20\\\\compara\", i, maxStudent.name.c_str());\r\n//\t\t\t\tcvSaveImage(temp, faceImage);\r\n//\t\t\t\tcvReleaseImage(&faceImage);\r\n//\t\t\t\tprintf(\"%s\\t%d\\n\", maxStudent.name.c_str(), listOfFeature.size());\r\n//\t\t\t\t//listOfFeature.remove(maxStudent);\r\n//\t\t\t\titer = listOfFeature.begin();\r\n//\t\t\t\tfor (int m = 0; m <= max; m++)\r\n//\t\t\t\t\tif (m != max)iter++;\r\n//\t\t\t\tSafeFree(iter->feature.feature);\r\n//\t\t\t\tlistOfFeature.erase(iter);\r\n//\t\t\t}\r\n//\t\t}\r\n//\t\tcvReleaseImage(&img);\r\n//\t}\r\n//\r\n//\tfaceHandle.UnInitEngine();\r\n//\r\n//\treturn 0;\r\n//}\r\n//\r\n//\r\n////int test()\r\n////{\r\n////\tIplImage* img = cvLoadImage(\"heyu.jpg\");\r\n////\tint count = 0;\r\n////\twhile (true)\r\n////\t{\r\n////\t\tIplImage* imgTemp = cvCreateImage(cvSize(img->width - img->width % 4, img->height), IPL_DEPTH_8U, img->nChannels);\r\n////\t\tmyCutOut(img, imgTemp, 0, 0);\r\n////\t\tcvReleaseImage(&imgTemp);\r\n////\t\tcount++;\r\n////\t\tif (count == 1000)\r\n////\t\t\tbreak;\r\n////\t}\r\n////\tprintf(\"over\\n\");\r\n////\treturn 0;\r\n////}" }, { "alpha_fraction": 0.6661911606788635, "alphanum_fraction": 0.6661911606788635, "avg_line_length": 18.676469802856445, "blob_id": "ee6255c18bbb0394fb4d013754e25156ed0c622d", "content_id": "64e3a41ca3b8ae55dc431bcb48e0fd9a8521f5a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 701, "license_type": "no_license", "max_line_length": 93, "num_lines": 34, "path": "/FaceCompare/FaceCompare/FileUtils.h", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "#pragma once\r\n#include \"stdafx.h\"\r\n#include <stdio.h>\r\n//#include \"merror.h\"\r\n#include <malloc.h>\r\n#include <memory.h>\r\n#include <stdlib.h>\r\n#include <io.h>\r\n#include <direct.h>\r\n#include <vector>\r\n#include <string>\r\n#include <windows.h>\r\n#include <TCHAR.h>\r\nusing std::vector;\r\n\r\nclass FileUtils\r\n{\r\npublic:\r\n\tFileUtils()\r\n\t{\r\n\t}\r\n\r\n\tvoid getFile(const std::string & path, vector<std::string> &vecFileLists, std::string type);\r\n\tbool isEmptyFolder(const std::string & path);\r\n\tvoid getFolders(const std::string & path, vector<std::string> &folderLists);\r\n\tbool haveFile(const std::string & path);\r\n\tvoid MakeDir(std::string download_path, std::string path);\r\n\t~FileUtils()\r\n\t{\r\n\t}\r\n\r\nprivate:\t\r\n\t\r\n};" }, { "alpha_fraction": 0.4790404736995697, "alphanum_fraction": 0.4904288947582245, "avg_line_length": 34.42489242553711, "blob_id": "ddd187d9d052f1e1e87ef892b1c8c726d25665f2", "content_id": "7321561731f1eadb3fda6286eefd192d261ff6e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8326, "license_type": "no_license", "max_line_length": 147, "num_lines": 233, "path": "/dataSet/状态分类.py", "repo_name": "763483718/service-outsource", "src_encoding": "UTF-8", "text": "from sklearn.utils import shuffle\nimport requests\nimport cv2 as cv\nimport numpy as np\nimport base64\nimport glob\nimport json\n\n\ndef cutImage(img, x, y, w, h):\n return img[y:y+h, x:x+w]\n\n\nclass API():\n def __init__(self):\n\n self._data = {\"api_key\": \"-em_2KoIyvcsANQ_Lb3lx6XLK1TzYMh8\",\n \"api_secret\": \"Am6aeBkZ4k6xMGLbS9XK7nC07LZvSjPu\"}\n\n self._urls = {'HumanBody_Skeleton': 'https://api-cn.faceplusplus.com/humanbodypp/v1/skeleton',\n 'HumanBody_Detect': 'https://api-cn.faceplusplus.com/humanbodypp/v1/detect',\n 'HumanBody_Segment': 'https://api-cn.faceplusplus.com/humanbodypp/v2/segment',\n 'Face_Detect': 'https://api-cn.faceplusplus.com/facepp/v3/detect',\n 'Face_Compare': 'https://api-cn.faceplusplus.com/facepp/v3/compare'}\n\n # moudle [in] 功能 more_return [in] 增加请求可选项,2纬数组,分别表示key,value\n def request(self, moudle, image=None, filePath=None, more_return=None):\n if np.all(image == None) and filePath == None:\n return\n if np.all(image == None):\n image = cv.imread(filePath)\n if more_return != None:\n self._data[more_return[0]] = more_return[1]\n\n buffer = cv.imencode('.jpg', image)\n files = {\"image_file\": buffer[1]}\n url = self._urls[moudle]\n\n # 发送post请求\n print('send post\\n')\n response = requests.post(url, self._data, files=files)\n\n print('get response\\n')\n req_con = response.content.decode('utf-8')\n print(req_con)\n if moudle == 'HumanBody_Skeleton':\n return self.HumanBody_Skeleton(req_con)\n if moudle == 'HumanBody_Segment':\n return self.HumanBody_Segment(req_con)\n\n def HumanBody_Segment(self, req_con):\n req_json = json.loads(req_con)\n img_b64decode = base64.b64decode(req_json['result']) # base64解码\n nparr = np.fromstring(img_b64decode, np.uint8)\n img = cv.imdecode(nparr, cv.COLOR_BAYER_BG2RGB)\n cv.imshow('img', img)\n cv.waitKey()\n\n def HumanBody_Skeleton(self, req_con):\n rects = []\n points = []\n lines = []\n req_json = json.loads(req_con)\n skeletons = {}\n try:\n skeletons = req_json['skeletons']\n except KeyError:\n return rects, points, lines\n\n for body in skeletons:\n rect = {}\n point = []\n body_rectangle = body['body_rectangle']\n rect['width'] = body_rectangle['width']\n rect['top'] = body_rectangle['top']\n rect['left'] = body_rectangle['left']\n rect['height'] = body_rectangle['height']\n rects.append(rect)\n landmark = body['landmark']\n\n # 连线数据\n line = []\n line.append([landmark['head'], landmark['neck']])\n line.append([landmark['neck'], landmark['left_shoulder']])\n line.append([landmark['neck'], landmark['right_shoulder']])\n line.append([landmark['left_shoulder'], landmark['left_elbow']])\n line.append([landmark['right_shoulder'], landmark['right_elbow']])\n line.append([landmark['left_elbow'], landmark['left_hand']])\n line.append([landmark['right_elbow'], landmark['right_hand']])\n line.append([landmark['neck'], landmark['left_buttocks']])\n line.append([landmark['neck'], landmark['right_buttocks']])\n lines.append(line)\n\n for i in landmark:\n if i == 'left_knee' or i == 'right_knee' or i == 'left_foot' or i == 'right_foot' or i == 'left_buttocks' or i == 'right_buttocks':\n continue\n temp = landmark[i]\n x = temp['x']\n y = temp['y']\n point.append([x, y])\n points.append(point)\n return rects, points, lines\n\n\ndef main():\n # count = 0\n # print(count, '\\n\\n\\n')\n\n paths = '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/3/**.jpg'\n paths = glob.glob(paths)\n shuffle(paths)\n api = API()\n\n reStart = 0\n lastNum = 1\n for path in paths:\n p1 = path.rfind('/')\n p2 = path.rfind('.')\n num = path[p1+1:p2]\n\n if reStart == 0:\n if num == str(lastNum):\n reStart = 1\n else:\n continue\n\n img = cv.imread(path)\n shape = img.shape\n img = cv.resize(img, ((int)(shape[1]/2), (int)\n (shape[0]/2)), interpolation=cv.INTER_LINEAR)\n rects, points, lines = api.request('HumanBody_Skeleton', image=img)\n\n savePath = '/Volumes/Seagate Backup Plus Drive/服务外包/picture/2018-12-27/body3/'\n savePath = savePath + str(num) + '.txt'\n \n if len(rects) == 0:\n continue\n f = open(savePath, 'a')\n for i in range(len(rects)):\n rect = rects[i]\n\n # count += 1\n\n cut_img = cutImage(\n img, rect['left'], rect['top'], rect['width'], rect['height'])\n # rect_str = str(rect['left'])+str(rect['top']) + \\\n # str(rect['width'])+str(rect['height'])\n cv.imshow('cut', cut_img)\n\n imgCopy = np.zeros(shape=img.shape, dtype=np.uint8)\n imgCopy = img.copy()\n\n point = points[i]\n cv.rectangle(imgCopy, (rect['left'], rect['top']), (\n rect['left']+rect['width'], rect['top']+rect['height']), [255, 100, 100], 1)\n for p in point:\n cv.circle(imgCopy, (p[0]+rect['left'],\n p[1]+rect['top']), 2, [100, 255, 100], 1)\n c = 50\n for l in lines[i]:\n cv.line(imgCopy, (l[0]['x']+rect['left'], l[0]['y']+rect['top']),\n (l[1]['x']+rect['left'], l[1]['y']+rect['top']), [c, c, 200])\n c += 30\n\n cv.imshow(num, imgCopy)\n\n test = cv.waitKey()\n\n if test == ord('a'):\n # CApath = savePath + 'center/sleep/' + str(count) + '.jpg'\n # cv.imwrite(CApath, cut_img)\n rect['status'] = 'center_sleep'\n del imgCopy\n\n elif test == ord('s'):\n # CBpath = savePath + 'center/play telephone/' + str(count) + '.jpg'\n # cv.imwrite(CBpath, cut_img)\n rect['status'] = 'center_play_telephone'\n del imgCopy\n\n elif test == ord('d'):\n # CCpath = savePath + 'center/study/' + str(count) + '.jpg'\n # cv.imwrite(CCpath, cut_img)\n rect['status'] = 'center_study'\n del imgCopy\n\n elif test == ord('q'):\n # LApath = savePath + 'left/sleep/' + str(count) + '.jpg'\n # cv.imwrite(LApath, cut_img)\n rect['status'] = 'left_sleep'\n del imgCopy\n\n elif test == ord('w'):\n # LBpath = savePath + 'left/play telephone/' + str(count) + '.jpg'\n # cv.imwrite(LBpath, cut_img)\n rect['status'] = 'left_play_telephone'\n del imgCopy\n\n elif test == ord('e'):\n # LCpath = savePath + 'left/study/' + str(count) + '.jpg'\n # cv.imwrite(LCpath, cut_img)\n rect['status'] = 'left_study'\n del imgCopy\n\n elif test == ord('z'):\n # RApath = savePath + 'right/sleep/' + str(count) + '.jpg'\n # cv.imwrite(RApath, cut_img)\n rect['status'] = 'right_sleep'\n del imgCopy\n\n elif test == ord('x'):\n # RBpath = savePath + 'right/play telephone/' + str(count) + '.jpg'\n # cv.imwrite(RBpath, cut_img)\n rect['status'] = 'right_play_telephone'\n del imgCopy\n\n elif test == ord('c'):\n # RCpath = savePath + 'right/study/' + str(count) + '.jpg'\n # cv.imwrite(RCpath, cut_img)\n rect['status'] = 'right_study'\n del imgCopy\n\n else:\n continue\n \n f.write(str(rect))\n f.write('\\n')\n f.close()\n\n cv.destroyWindow(num)\n\n\nmain()\n" }, { "alpha_fraction": 0.5607927441596985, "alphanum_fraction": 0.5693626403808594, "avg_line_length": 23.414966583251953, "blob_id": "9a16f5931eaaaf5b6f6d4cbaea9160db7148281c", "content_id": "2f19df3f132f8796e02dfda53b86cde2f1233309", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3792, "license_type": "no_license", "max_line_length": 116, "num_lines": 147, "path": "/FaceCompare/FaceCompare/FileUtils.cpp", "repo_name": "763483718/service-outsource", "src_encoding": "GB18030", "text": "#pragma once\r\n#include \"stdafx.h\"\r\n#include \"FileUtils.h\"\r\n\r\nint Str2CStr(WCHAR src[], int len, char dst[])\r\n{\r\n\treturn WideCharToMultiByte(CP_ACP, NULL, src, -1, dst, len + 1, NULL, NULL);\r\n}\r\n\r\nvoid FileUtils::getFile(const std::string & path, vector<std::string> &vecFileLists, std::string type)\r\n{\r\n\t//文件句柄 \r\n\tlong hFile = 0;\r\n\t//文件信息,_finddata_t需要io.h头文件 \r\n\tstruct _finddata_t fileinfo;\r\n\tstd::string strPath;\r\n\tstd::string p;\r\n\tif ((hFile = _findfirst(p.assign(path).append(\"\\\\*\").c_str(), &fileinfo)) != -1)\r\n\t{\r\n\t\tdo\r\n\t\t{\r\n\t\t\tif ((fileinfo.attrib & _A_SUBDIR))\r\n\t\t\t{\r\n\t\t\t\tif (strcmp(fileinfo.name, \".\") != 0 && strcmp(fileinfo.name, \"..\") != 0)\r\n\t\t\t\t\tgetFile(p.assign(path).append(\"\\\\\").append(fileinfo.name), vecFileLists, type);\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tstd::string namess = fileinfo.name;\r\n\t\t\t\tstd::string str = namess.substr(namess.length() - 3); //截取文件名后3位\r\n\t\t\t\tif (str == type)\r\n\t\t vecFileLists.push_back(p.assign(path).append(\"\\\\\").append(fileinfo.name).c_str());\r\n\t\t\t}\r\n\t\t} while (_findnext(hFile, &fileinfo) == 0);\r\n\t\t_findclose(hFile);\r\n\t}\r\n}\r\n\r\nbool FileUtils::haveFile(const std::string & path)\r\n{\r\n\t//文件句柄 \r\n\tlong hFile = 0;\r\n\tstruct _finddata_t fileinfo;\r\n\tstd::string p;\r\n\tbool bHaveFile = false;\r\n\tif ((hFile = _findfirst(p.assign(path).append(\"\\\\*\").c_str(), &fileinfo)) != -1)\r\n\t{\r\n\t\tdo\r\n\t\t{\r\n\t\t\tif ((fileinfo.attrib & _A_SUBDIR))\r\n\t\t\t{\r\n\t\t\t\t//if (strcmp(fileinfo.name, \".\") != 0 && strcmp(fileinfo.name, \"..\") != 0)\r\n\t\t\t\t//{\r\n\t\t\t\t//\t//folderLists.push_back(p.assign(path).append(\"\\\\\").append(fileinfo.name));\r\n\t\t\t\t////\tgetFolders(p.assign(path).append(\"\\\\\").append(fileinfo.name), folderLists);\r\n\t\t\t\t//}\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tbHaveFile = true;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t} while (_findnext(hFile, &fileinfo) == 0);\r\n\t\t_findclose(hFile);\r\n\t}\r\n\treturn bHaveFile;\r\n}\r\n\r\nvoid FileUtils::getFolders(const std::string & path, vector<std::string> &folderLists)\r\n{\r\n\t//文件句柄 \r\n\tlong hFile = 0;\r\n\tstruct _finddata_t fileinfo;\r\n\tstd::string p;\r\n\tif ((hFile = _findfirst(p.assign(path).append(\"\\\\*\").c_str(), &fileinfo)) != -1)\r\n\t{\r\n\t\tdo\r\n\t\t{\r\n\t\t\tif ((fileinfo.attrib & _A_SUBDIR))\r\n\t\t\t{\r\n\t\t\t\tif (strcmp(fileinfo.name, \".\") != 0 && strcmp(fileinfo.name, \"..\") != 0)\r\n\t\t\t\t{\r\n\r\n\t\t\t\t\tfolderLists.push_back(p.assign(path).append(\"\\\\\").append(fileinfo.name));\r\n\t\t\t\t\tgetFolders(p.assign(path).append(\"\\\\\").append(fileinfo.name), folderLists);\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t} while (_findnext(hFile, &fileinfo) == 0);\r\n\t\t_findclose(hFile);\r\n\t}\r\n}\r\n\r\nbool FileUtils::isEmptyFolder(const std::string & path)\r\n{\r\n\tlong hFile = 0;\r\n\tstruct _finddata_t fileinfo;\r\n\tstd::string p;\r\n\tbool bEmpty = true;\r\n\tif ((hFile = _findfirst(p.assign(path).append(\"\\\\*\").c_str(), &fileinfo)) != -1)\r\n\t{\r\n\t\tdo\r\n\t\t{\r\n\t\t\tif ((fileinfo.attrib & _A_SUBDIR))\r\n\t\t\t{\r\n\t\t\t\tif (strcmp(fileinfo.name, \"noFace\") != 0 && strcmp(fileinfo.name, \".\") != 0 && strcmp(fileinfo.name, \"..\") != 0)\r\n\t\t\t\t{\r\n\t\t\t\t\tbEmpty = false;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\telse\r\n\t\t\t{\r\n\t\t\t\tbEmpty = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t} while (_findnext(hFile, &fileinfo) == 0);\r\n\t\t_findclose(hFile);\r\n\t}\r\n\treturn bEmpty;\r\n}\r\n\r\nvoid FileUtils::MakeDir(std::string download_path, std::string path)\r\n{\r\n\tstd::string temp = path.substr(download_path.length(), path.length() - download_path.length());\r\n\tstd::string temp_path = download_path;\r\n\tstd::string folder_name;\r\n\tint npos;// = temp.find(\"\\\\\");\r\n\twhile (true)\r\n\t{\r\n\t\tnpos = temp.find(\"\\\\\");\r\n\t\tif (npos == -1)\r\n\t\t\tbreak;\r\n\t\t//npos = temp_path.length() + npos;\r\n\t\tfolder_name = temp.substr(0, npos);\r\n\t\ttemp = temp.substr(npos + 1, temp.length() +1 - npos);\r\n\t\ttemp_path = temp_path + folder_name + \"\\\\\";\r\n\t\tif (_access(temp_path.c_str(), 0) == -1)\r\n\t\t{\r\n\t\t\t_mkdir(temp_path.c_str());\r\n\t\t}\r\n\t\t//\tbreak;\r\n\t}\r\n}" } ]
20
tartaruz/Stein-saks-papir
https://github.com/tartaruz/Stein-saks-papir
241f5c4d44866e7419c489b220d6f219b8e83a53
95d481d665c398e5ce2d5775024ddf6c19612031
4235b6a34cdbb37c9e95afb6c5eaeb8031d316e0
refs/heads/master
"2021-08-14T20:13:47.628986"
"2017-11-16T17:39:41"
"2017-11-16T17:39:41"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45158851146698, "alphanum_fraction": 0.4689863920211792, "avg_line_length": 23, "blob_id": "54076d22e221d2231a5cb9daed347f9b28bda47e", "content_id": "3aadd37a73f14f339e8c21caff73b4cfcca4fe7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1324, "license_type": "no_license", "max_line_length": 60, "num_lines": 55, "path": "/stein-saks-papir.py", "repo_name": "tartaruz/Stein-saks-papir", "src_encoding": "UTF-8", "text": "import funk\nfrom time import sleep\nimport os\nclear = lambda: os.system('cls')\nvalg = 0\n\nwhile (valg!=\"avslutt\"):\n sleep(1)\n print()\n funk.velkommen()\n funk.meny()\n print()\n valg = funk.valg()\n clear()\n if valg==\"1\":\n print(\"--------------Spiller 1's tur--------------\")\n pvalg=funk.choose()\n p1=funk.konv(pvalg)\n print(\"Takk! Nå er det spiller2's tur\")\n sleep(2)\n clear()\n print(\"--------------Spiller 2's tur--------------\")\n pvalg=funk.choose()\n p2=funk.konv(pvalg)\n funk.game(p1,p2,1)\n time(5)\n clear()\n elif valg==\"2\":\n print(\"--------------Spiller 1's tur--------------\")\n pvalg=funk.choose()\n p=funk.konv(pvalg)\n print(\"Du valgte\",p,\"! Nå er det maskinens tur\")\n sleep(3)\n clear()\n print(\"--------------Terminator's tur-------------\")\n com=funk.comp()\n funk.revmaskinvalg(com)\n cp=funk.konv(com)\n print()\n print(\"TERMINATOR VALGTE:\",cp.upper())\n funk.game(p,cp,2) #Type 2\n sleep(5)\n clear()\n elif valg==3:\n print(\"3\")\n elif valg==\"help\":\n print(\"help\")\n c=funk.comp()\n print(c)\n else:\n print(\"Wrong, try again \")\n\nclear()\nprint(\"Farvel!\")\ntime.sleep(10)\n\n\n" }, { "alpha_fraction": 0.3482733368873596, "alphanum_fraction": 0.3614988923072815, "avg_line_length": 23.258928298950195, "blob_id": "d8bd876911d98336408f3544fdc8b92559b10371", "content_id": "c006ccbf681b539b857efeb3495013c5de2ec76f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2728, "license_type": "no_license", "max_line_length": 74, "num_lines": 112, "path": "/funk.py", "repo_name": "tartaruz/Stein-saks-papir", "src_encoding": "UTF-8", "text": "import random\nfrom time import sleep#for stein saks papir\n\ndef velkommen():\n print(\"§-----------------------------------------------------------§\")\n print(\"§-----| VELKOMMEN TIL STEIN/SAKS/PAPIR! |-----§\")\n print(\"§-----------------------------------------------------------§\")\n print()\n\ndef valg(): #velg\n valget=str(input(\"Kommando: \"))\n return valget\n\n\n\ndef valgmeny():\n print(\" _______\")\n print(\"---' ____)\")\n print(\" (_____)\")\n print(\" [1] (_____)\")\n print(\" STEIN(____)\")\n print(\"---.__(___)\")\n\n print(\" _______\")\n print(\"---' ____)____\")\n print(\" ______)\")\n print(\" [2] __________)\")\n print(\" SAKS(____)\")\n print(\"---.__(___)\")\n\n print(\" _______\")\n print(\"---' ____)____\")\n print(\" ______)\")\n print(\" [3] _______)\")\n print(\" PAPIR _______)\")\n print(\"---.__________)\")\n\ndef revmaskinvalg(hvilken):\n if hvilken==1:\n print(\" _______ \")\n print(\" ( ____ '---\")\n print(\"(_____) \")\n print(\"(_____) \")\n print(\"(____) \")\n print(\" (___)__.---- \")\n elif hvilken==2:\n print(\" _______ \")\n print(\" ____(____ '----\")\n print(\"(______ \")\n print(\"(__________ \")\n print(\" (____) \")\n print(\" (___)__.---\")\n else: \n print(\" _______ \")\n print(\" ____(____ '---\")\n print(\" (______ \")\n print(\"(_______ \")\n print(\"(_______ \")\n print(\" (__________.--- \")\n \n\n\n\ndef choose():\n valgmeny()\n valg=eval(input(\"Velg[1-2-3]: \"))\n return valg\n\ndef meny(): \n print(\"1: 1vs1\")\n print(\"2: 1vsCom\")\n print(\"3: Help\")\n print(\"4: Avslutt\")\n\ndef comp():\n ran=random.randint(1,3)\n return ran\n\ndef konv(valg):\n if valg==1:\n res=\"stein\"\n elif valg==2:\n res=\"saks\"\n else:\n res=\"papir\"\n return res\n \n\ndef game(valg1,valg2,Gtype): #Gtype viser funksjon hva slags game det er\n if Gtype==1:\n spiller=\"spiller 2\"\n else:\n spiller=\"maskinen\"\n if valg1==valg2:\n print(\"DRAW! Ingen vinnere!\")\n\n elif valg1==\"stein\":\n if valg2==\"saks\":\n print(\"Spiller 1 vant mot\",spiller)\n else:\n print(\"Spiller 1 tapte mot\",spiller)\n \n elif valg1==\"saks\":\n if valg2==\"papir\":\n print(\"Spiller 1 vant mot\",spiller)\n else:\n print(\"Spiller 1 tapte mot\",spiller)\n else: #papir\n if valg2==\"stein\":\n print(\"Spiller 1 vant mot\",spiller)\n else:\n print(\"Spiller 1 tapte mot\",spiller)\n \n" } ]
2
gyubraj/final_project
https://github.com/gyubraj/final_project
25c55c8e0ca03249a1deb8ca94a6eb230e9a896f
ddb5d077af4044b0fefdb78e66cb5c57d12c559d
e35109462ea6d56bc896c7ad3e96dc1fe13be680
refs/heads/master
"2020-09-28T11:44:49.866502"
"2019-12-09T02:58:25"
"2019-12-09T02:58:25"
226,772,005
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5457209944725037, "alphanum_fraction": 0.5633059740066528, "avg_line_length": 42.74359130859375, "blob_id": "c23e0af1af65f755134bf753967939afbcb620d7", "content_id": "bebd5e52a7f8b6c89c384fb42f6a95496e606499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1706, "license_type": "no_license", "max_line_length": 114, "num_lines": 39, "path": "/addBook/migrations/0001_initial.py", "repo_name": "gyubraj/final_project", "src_encoding": "UTF-8", "text": "# Generated by Django 2.2.7 on 2019-12-08 16:34\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Book',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('slug', models.SlugField(blank=True, unique=True)),\n ('bookowner', models.IntegerField()),\n ('bname', models.CharField(max_length=100)),\n ('category', models.CharField(max_length=100)),\n ('writer', models.CharField(max_length=100)),\n ('description', models.TextField()),\n ('image', models.ImageField(upload_to='bphoto')),\n ('condition', models.CharField(max_length=100)),\n ('actual_price', models.IntegerField()),\n ('selling_price', models.IntegerField()),\n ('publication', models.CharField(max_length=100)),\n ('available', models.BooleanField(default=True)),\n ('display_selling_price', models.IntegerField(blank=True, null=True)),\n ('discount', models.BooleanField(default=False)),\n ('discount_percent', models.IntegerField(blank=True, null=True)),\n ('discounted_selling_price', models.IntegerField(blank=True, null=True)),\n ('uploaddate', models.DateField(auto_now_add=True)),\n ('bookbuyer', models.IntegerField(blank=True, null=True)),\n ('solddate', models.DateField(blank=True, null=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7219626307487488, "alphanum_fraction": 0.7242990732192993, "avg_line_length": 34, "blob_id": "261c08e7b56c3272acf01c4162ac2aec2e5ab005", "content_id": "468783b0327bee5d65ef10b6dfdcd881392bfa12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2140, "license_type": "no_license", "max_line_length": 258, "num_lines": 61, "path": "/addBook/views.py", "repo_name": "gyubraj/final_project", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom . models import Book\nfrom django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom django.template.loader import render_to_string\n# Create your views here.\n@login_required\ndef addBook(request):\n return render(request,\"list.html\")\n\ndef book(request):\n return render(request,\"addBookButton.html\")\n\n\n@login_required \ndef sell(request):\n return render(request,\"book_registration.html\")\n# def donate(request):\n# return HttpResponseRedirect(\"/addbook\")\n@login_required\ndef donate(request):\n return render(request,\"donate_book_register.html\")\n\n\n\n\n@login_required\ndef added(request):\n bookowner=request.user.id\n book_name=request.POST['book_name']\n category=request.POST['category']\n writer=request.POST['writer']\n desc=request.POST['description']\n image=request.FILES['book_photo']\n condition=request.POST['condition']\n actualPrice=int(request.POST['actual_price'])\n sellingPrice=int(request.POST['selling_price'])\n publication=request.POST['publication']\n user=request.user.username\n\n displaySellingPrice=sellingPrice+(10*sellingPrice/100)\n \n\n book=Book(bookowner=bookowner,bname=book_name, category=category, writer=writer, description=desc, image=image, condition=condition, actual_price=actualPrice, selling_price=sellingPrice, publication=publication, display_selling_price=displaySellingPrice)\n book.save()\n subject = 'Book Uploaded'\n message = render_to_string('bookaddedmail.html', {\n 'user': user,\n 'Book_Name' : book_name,\n 'Author' : writer,\n 'Price' : sellingPrice,\n 'Publication' : publication,\n })\n from_email=[settings.EMAIL_HOST_USER]\n to_email=[request.user.email]\n send_mail(subject=subject,from_email=from_email,recipient_list=to_email,message=message,fail_silently=False)\n return render(request,\"book_registration.html\")\n \n" } ]
2
guanjz20/MM21_FME_solution
https://github.com/guanjz20/MM21_FME_solution
b314814f4b49e18e9a6cf3cccc1063bb7e8d9f42
7919833a2112c6387a5a49fdcf0c43e75394a0bb
70ea2dc70e9c0aea1fe9af0db8ff362bdfa50676
refs/heads/master
"2023-06-16T05:00:55.101198"
"2021-07-11T07:28:07"
"2021-07-11T07:28:07"
384,887,886
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.6847826242446899, "alphanum_fraction": 0.7228260636329651, "avg_line_length": 40, "blob_id": "a5e65967647d1b4d1d52e5c8e9e783a367222ece", "content_id": "5e09ae5404fe794490f5b17a7445e670a39e79be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 101, "num_lines": 9, "path": "/dataset/params.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "SAMM_ROOT = '/data/gjz_mm21/SAMM'\nCASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels'\n\n# kernel path\nGAUSS_KERNEL_PATH = {\n 'sm_kernel': '/home/gjz/lry_kernels/gauss2D-smooth.npy',\n 'dr1_kernel': '/home/gjz/lry_kernels/gauss1D-derivative1.npy',\n 'dr2_kernel': '/home/gjz/lry_kernels/gauss1D-derivative2.npy'\n}" }, { "alpha_fraction": 0.49173471331596375, "alphanum_fraction": 0.5069209337234497, "avg_line_length": 32.474998474121094, "blob_id": "153f242cc2110fe1c61b323a674bc95a491e6ebc", "content_id": "4b318241e379e977ea51fdb3b9aca99f5e3fd9eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20084, "license_type": "no_license", "max_line_length": 98, "num_lines": 600, "path": "/utils.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport cv2\nfrom timm.utils import reduce_tensor\nimport torch\nimport shutil\nimport numpy as np\nimport os.path as osp\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport torch.distributed as dist\nfrom torch.nn.modules import loss\nfrom datetime import datetime\n\nimport paths\nimport dataset.utils as dataset_utils\n\nsys.setrecursionlimit(10000)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass Focal_Loss(torch.nn.Module):\n def __init__(self, alpha=[], gamma=2, num_class=2, epsilon=1e-7):\n super().__init__()\n if alpha == []:\n self.alpha = torch.ones(num_class)\n else:\n self.alpha = torch.tensor(alpha, dtype=torch.float32)\n self.gamma = gamma\n self.epsilon = epsilon\n\n def forward(self, pred, target):\n assert len(pred.shape) == 2, 'pred shape should be N, num_class'\n assert len(target.shape) == 1, 'target shape should be N'\n pred = torch.softmax(pred, dim=-1)\n target_pred = -F.nll_loss(pred, target, reduction='none')\n loss = -torch.log(target_pred + self.epsilon)\n class_alpha = torch.tensor([self.alpha[c.item()] for c in target],\n dtype=torch.float32).to(loss.device)\n weights = ((1 - target_pred)**self.gamma) * class_alpha\n loss = (weights * loss).mean()\n return loss\n\n\nclass My_loss(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.focal_loss = Focal_Loss(num_class=3)\n\n def forward(self, out, anno_y, label_y):\n anno_x = out[..., 0]\n label_x = out[..., 1:]\n\n if len(anno_x.shape) == 2:\n anno_x = anno_x.reshape(-1)\n anno_y = anno_y.reshape(-1)\n # loss_ccc = -ccc(anno_x, anno_y)[0]\n # loss_mse = F.mse_loss(anno_x, anno_y)\n loss_l1 = F.l1_loss(anno_x, anno_y)\n\n # logits = F.log_softmax(label_x, dim=-1)\n # loss_ce = F.nll_loss(logits, label_y)\n if len(label_x.shape) == 3:\n label_x = label_x.reshape(-1, label_x.shape[-1])\n label_y = label_y.reshape(-1)\n\n # loss_ce = F.cross_entropy(label_x, label_y, reduce='mean')\n # loss_focal = self.focal_loss(label_x, label_y)\n\n # loss = loss_ccc + loss_ce\n # loss = loss_mse + loss_ce\n # loss = loss_mse + loss_focal\n # loss = loss_mse * 100\n # loss = loss_l1 * 100 + loss_focal\n loss = loss_l1 * 1000\n return loss\n\n\ndef ccc(y_pred, y_true, epsilon=1e-7):\n assert len(y_pred.shape) == 1\n true_mean = y_true.mean()\n pred_mean = y_pred.mean()\n v_true = y_true - true_mean\n v_pred = y_pred - pred_mean\n\n rho = (v_pred * v_true).sum() / (torch.sqrt(\n (v_pred**2).sum()) * torch.sqrt((v_true**2).sum()) + epsilon)\n std_predictions = torch.std(y_pred)\n std_gt = torch.std(y_true)\n\n ccc = 2 * rho * std_gt * std_predictions / (\n (std_predictions**2 + std_gt**2 +\n (pred_mean - true_mean)**2) + epsilon)\n return ccc, rho\n\n\ndef img_dirs_filter(img_dirs, dataset):\n '''\n some clips are not labeled...\n '''\n _img_dirs = []\n if dataset == 'SAMM':\n anno_dict = np.load(osp.join(paths.SAMM_LABEL_DIR, 'anno_dict.npy'),\n allow_pickle=True).item()\n elif dataset == 'CASME_2':\n anno_dict = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'anno_dict.npy'),\n allow_pickle=True).item()\n else:\n raise NotImplementedError\n for img_dir in img_dirs:\n if img_dir in anno_dict:\n _img_dirs.append(img_dir)\n else:\n print('clip: {} is not labeled or labeled incorrectly.'.format(\n img_dir))\n return _img_dirs\n\n\ndef get_img_dirs(dataset):\n if dataset == 'SAMM':\n img_dirs = [\n osp.join(paths.SAMM_VIDEO_DIR, name)\n for name in os.listdir(paths.SAMM_VIDEO_DIR)\n ]\n elif dataset == 'CASME_2':\n _img_dirs = [[\n osp.join(paths.CASME_2_VIDEO_DIR, name1, name2)\n for name2 in os.listdir(osp.join(paths.CASME_2_VIDEO_DIR, name1))\n ] for name1 in os.listdir(paths.CASME_2_VIDEO_DIR)]\n img_dirs = []\n for dirs in _img_dirs:\n img_dirs.extend(dirs)\n else:\n raise NotImplementedError\n img_dirs = img_dirs_filter(img_dirs, dataset)\n return img_dirs\n\n\ndef leave_one_out(img_dirs, dataset):\n img_dirs_dict = {}\n img_dirs = sorted(img_dirs)\n if dataset == 'SAMM':\n keys = []\n for img_dir in img_dirs:\n keys.append(osp.basename(img_dir).split('_')[0]) # 006, 007...\n keys = sorted(list(set(keys)))\n for key in keys:\n train_set = []\n val_set = []\n for img_dir in img_dirs:\n if key in img_dir:\n val_set.append(img_dir)\n else:\n train_set.append(img_dir)\n img_dirs_dict[key] = [train_set, val_set]\n elif dataset == 'CASME_2':\n keys = []\n for img_dir in img_dirs:\n keys.append(img_dir.split('/')[-2]) # s15, s16...\n keys = sorted(list(set(keys)))\n for key in keys:\n train_set = []\n val_set = []\n for img_dir in img_dirs:\n if img_dir.split('/')[-2] == key:\n val_set.append(img_dir)\n else:\n train_set.append(img_dir)\n img_dirs_dict[key] = [train_set, val_set]\n else:\n raise NotImplementedError\n return img_dirs_dict\n\n\ndef adjust_learning_rate(optimizer, epoch, lr_strat, wd, lr_steps, factor=0.1):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every N epochs\"\"\"\n decay = factor**(sum(epoch >= np.asarray(lr_steps)))\n lr = lr_strat * decay\n decay = wd\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n param_group['weight_decay'] = decay\n\n\ndef save_checkpoint(state, is_best, save_root, root_model, filename='val'):\n torch.save(\n state,\n '%s/%s/%s_checkpoint.pth.tar' % (save_root, root_model, filename))\n if is_best:\n shutil.copyfile(\n '%s/%s/%s_checkpoint.pth.tar' % (save_root, root_model, filename),\n '%s/%s/%s_best_loss.pth.tar' % (save_root, root_model, filename))\n # print(\"checkpoint saved to\",\n # '%s/%s/%s_best_loss.pth.tar' % (save_root, root_model, filename))\n\n\ndef check_rootfolders(args):\n \"\"\"Create log and model folder\"\"\"\n folders_util = [\n args.root_log, args.root_model, args.root_output, args.root_runs\n ]\n folders_util = [\n \"%s/\" % (args.save_root) + folder for folder in folders_util\n ]\n for folder in folders_util:\n if not os.path.exists(folder):\n print('creating folder ' + folder)\n os.makedirs(folder)\n\n\ndef evaluate(pred_anno_dict,\n pred_label_dict,\n dataset,\n threshold=0.9,\n val_id='all',\n epoch=-1,\n args=None):\n if dataset == 'SAMM':\n pred_gt = np.load(osp.join(paths.SAMM_ROOT, 'pred_gt.npy'),\n allow_pickle=True).item()\n anno_dict = np.load(osp.join(paths.SAMM_ROOT, 'anno_dict.npy'),\n allow_pickle=True).item()\n fps = 200\n elif dataset == 'CASME_2':\n pred_gt = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'pred_gt.npy'),\n allow_pickle=True).item()\n anno_dict = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'anno_dict.npy'),\n allow_pickle=True).item()\n fps = 30\n else:\n raise NotImplementedError\n\n result_dict = {}\n for img_dir, pred_annos in pred_anno_dict.items():\n pred_labels = pred_label_dict[img_dir]\n gt_list = pred_gt[img_dir]\n pred_list = []\n\n # scan all possible peak point\n for peak_idx in range(0, len(pred_annos), fps):\n is_peak = True\n front = peak_idx\n tail = peak_idx\n # label_sum = pred_labels[peak_idx]\n cumsum = pred_annos[peak_idx]\n while is_peak and cumsum < threshold and (\n front > 0 or tail < len(pred_annos) - 1):\n if front - 1 >= 0:\n front -= 1\n cumsum += pred_annos[front]\n # label_sum += pred_labels[front]\n if tail + 1 < len(pred_annos):\n tail += 1\n cumsum += pred_annos[tail]\n # label_sum += pred_labels[tail]\n is_peak = pred_annos[peak_idx] >= pred_annos[\n front] and pred_annos[peak_idx] >= pred_annos[tail]\n if is_peak and cumsum >= threshold:\n # TODO: label func\n pred_list.append([front, tail, -1])\n\n M = len(gt_list)\n N = len(pred_list)\n A = 0\n for [onset, offset, label_gt] in gt_list:\n for [\n front, tail, _\n ] in pred_list: # TODO: if one pred could match more than one gt?\n if front < onset:\n b1 = [front, tail]\n b2 = [onset, offset]\n else:\n b2 = [front, tail]\n b1 = [onset, offset]\n\n # 1\n if b1[1] >= b2[0] and b2[1] >= b1[1]:\n overlap = b1[1] - b2[0] + 1\n union = b2[1] - b1[0] + 1\n elif b1[1] >= b2[1]:\n overlap = b2[1] - b2[0] + 1\n union = b1[1] - b1[0] + 1\n else:\n # no overlap\n overlap = 0\n union = 1\n if overlap / union >= 0.5:\n A += 1\n break\n result_dict[img_dir] = [M, N, A]\n\n ret_info = []\n M = 0\n N = 0\n A = 0\n for key, (m, n, a) in result_dict.items():\n # p = a / n\n # r = a / m\n # f = 2 * r * p / (p + r)\n # ret_info.append('[{}] P: {.4f}, R: {:.4f}, F1: {:.4f}'.format(\n # key, p, r, f))\n M += m\n N += n\n A += a\n\n if M == 0 or N == 0 or A == 0:\n precision = -1.0\n recall = -1.0\n f_score = -1.0\n else:\n precision = A / N\n recall = A / M\n f_score = 2 * recall * precision / (recall + precision)\n ret_info.append('[over all] P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(\n precision, recall, f_score))\n\n # save fig\n column = 3\n fig = plt.figure(figsize=(10,\n ((len(pred_anno_dict) - 1) // column + 1) * 2))\n for i, (img_dir, pred_annos) in enumerate(pred_anno_dict.items()):\n fig.add_subplot((len(pred_anno_dict) - 1) // column + 1, column, i + 1)\n plt.plot(pred_annos, 'b-', alpha=0.5)\n plt.plot(anno_dict[img_dir], 'r-', alpha=0.5)\n fig.tight_layout()\n plt.savefig(\n osp.join(args.save_root, args.root_output,\n '{}_anno_{}.pdf'.format(val_id, epoch)))\n plt.close('all')\n\n return ret_info, f_score, (M, N, A)\n\n\ndef evaluate_bi_labels(pred_and_gt, val_id, epoch, args):\n keys = sorted(list(pred_and_gt.keys()))\n imgs_dirs = sorted(list(set([osp.dirname(img_p) for img_p in keys])))\n result_dict = {}\n for imgs_dir in imgs_dirs:\n result_dict[imgs_dir] = []\n img_ps = dataset_utils.scan_jpg_from_img_dir(imgs_dir)\n for img_p in img_ps:\n result_dict[imgs_dir].append(pred_and_gt.get(\n img_p, [0, 0])) # [pred, target]\n result_dict[imgs_dir] = np.asarray(result_dict[imgs_dir])\n\n precision, recall, f_score, MNA, result_dict, match_regions_record = evaluate_pred_and_gt(\n result_dict, args)\n\n # visulization\n if args.local_rank == 0:\n column = 3\n fig = plt.figure(figsize=(10,\n ((len(imgs_dirs) - 1) // column + 1) * 2))\n for i, imgs_dir in enumerate(imgs_dirs):\n fig.add_subplot((len(imgs_dirs) - 1) // column + 1, column, i + 1)\n data = result_dict[imgs_dir]\n pred = data[:, 0]\n target = data[:, 1]\n plt.plot(pred, 'b-', alpha=0.5)\n plt.plot(target, 'r-', alpha=0.5) # gt\n plt.title(osp.basename(imgs_dir))\n fig.tight_layout()\n out_dir = osp.join(args.save_root, args.root_output, val_id)\n os.makedirs(out_dir, exist_ok=True)\n plt.savefig(osp.join(out_dir, 'bi_label_{}.pdf'.format(epoch)))\n plt.close('all')\n\n return precision, recall, f_score, MNA, match_regions_record\n\n\ndef extend_front(front, pred, patience):\n assert pred[front] > 0\n d = patience\n while d > 0:\n if front + d < len(pred) and pred[front + d] > 0:\n return extend_front(front + d, pred, patience)\n d -= 1\n return front\n\n\ndef evaluate_pred_and_gt(result_dict, args):\n if args.dataset == 'SAMM':\n # patience = 25\n pred_gt = np.load(osp.join(paths.SAMM_ROOT, 'pred_gt.npy'),\n allow_pickle=True).item()\n elif args.dataset == 'CASME_2':\n pred_gt = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'pred_gt.npy'),\n allow_pickle=True).item()\n # patience = 10\n else:\n raise NotImplementedError\n\n M = 0\n N = 0\n A = 0\n match_regions_record = {}\n for imgs_dir, data in result_dict.items():\n pred = data[:, 0]\n target = data[:, 1]\n\n found_regions = []\n match_regions = [\n ] # gt_onset, gt_offset, pred_onset, pred_offset, TP/FP\n front = 0\n while front < len(pred):\n tail = front\n if pred[front] > 0:\n tail = extend_front(front, pred, args.patience)\n if front < tail: # find one region\n found_regions.append([front, tail])\n front = tail + args.patience\n\n # modify result_dict\n pred = np.zeros_like(pred)\n for front, tail in found_regions:\n pred[front:tail] = 1\n data[:, 0] = pred\n result_dict[imgs_dir] = data\n\n # eval precision, recall, f_score\n gt_list = pred_gt[imgs_dir]\n m = len(gt_list)\n n = len(found_regions)\n a = 0\n # TODO: determine whether one predicted region is macro or micro-expression\n gt_regions_mark = np.zeros(m)\n found_regions_mark = np.zeros(n)\n for mg, [onset, offset, label_gt] in enumerate(gt_list):\n # label_gt: 1->macro, 2->micro\n for mf, [front, tail] in enumerate(\n found_regions\n ): # TODO: if one found region can match more than one gt region\n if front < onset:\n b1 = [front, tail]\n b2 = [onset, offset]\n else:\n b1 = [onset, offset]\n b2 = [front, tail]\n\n # 1\n if b1[1] >= b2[0] and b2[1] >= b1[1]:\n overlap = b1[1] - b2[0] + 1\n union = b2[1] - b1[0] + 1\n elif b1[1] >= b2[1]:\n overlap = b2[1] - b2[0] + 1\n union = b1[1] - b1[0] + 1\n else: # no overlap\n overlap = 0\n union = 1\n if overlap / union >= 0.5:\n a += 1\n found_regions_mark[mf] = 1\n gt_regions_mark[mg] = 1\n match_regions.append([onset, offset, front, tail, 'TP'])\n break\n for mg in range(m):\n if gt_regions_mark[mg] == 0:\n onset, offset, _ = gt_list[mg]\n match_regions.append([onset, offset, '-', '-', 'FN'])\n for mf in range(n):\n if found_regions_mark[mf] == 0:\n front, tail = found_regions[mf]\n match_regions.append(['-', '-', front, tail, 'FP'])\n match_regions_record[imgs_dir] = match_regions\n M += m\n N += n\n A += a\n # NOTE: if one found region can match more than one gt region, TP+FP may be greater than n\n\n # result of the participant\n if A == 0 or N == 0:\n precision = -1.0\n recall = -1.0\n f_score = -1.0\n else:\n precision = A / N\n recall = A / M\n f_score = 2 * precision * recall / (precision + recall)\n return precision, recall, f_score, (M, N,\n A), result_dict, match_regions_record\n\n\ndef calculate_metric_from_dict_MNA(MNA_all):\n M = 0\n N = 0\n A = 0\n for k, mna in MNA_all.items():\n m, n, a = mna\n M += m\n N += n\n A += a\n try:\n precision = A / N\n recall = A / M\n f_score = 2 * precision * recall / (precision + recall)\n except:\n precision = -1.0\n recall = -1.0\n f_score = -1.0\n return precision, recall, f_score\n\n\ndef synchronize():\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n dist.barrier()\n\n\ndef reduce_loss(loss, args):\n if args.distributed:\n loss = reduce_tensor(loss.data, float(args.world_size))\n return loss\n\n\ndef synchronize_pred_and_gt(pred_and_gt, epoch, args, remove=True):\n if args.distributed:\n out_dir = osp.join(args.save_root, args.root_runs,\n 'temp_{}'.format(epoch))\n if args.local_rank == 0:\n os.makedirs(out_dir, exist_ok=True)\n synchronize() # make dir done\n np.save(\n osp.join(out_dir,\n 'temp_pred_and_gt_{}.npy'.format(args.local_rank)),\n pred_and_gt)\n synchronize() # save done\n if args.local_rank == 0:\n pred_and_gt = {}\n for name in os.listdir(out_dir):\n data = np.load(osp.join(out_dir, name),\n allow_pickle=True).item()\n pred_and_gt.update(data)\n np.save(osp.join(out_dir, 'temp_pred_and_gt_merge.npy'),\n pred_and_gt)\n synchronize() # merge done\n else:\n synchronize() # start read\n pred_and_gt = np.load(osp.join(out_dir,\n 'temp_pred_and_gt_merge.npy'),\n allow_pickle=True).item()\n synchronize() # read done\n if remove and args.local_rank == 0:\n shutil.rmtree(out_dir)\n return pred_and_gt\n\n\ndef synchronize_f_score(f_score, args):\n assert isinstance(f_score, float)\n if args.distributed:\n f_score = torch.tensor(f_score).cuda()\n assert f_score.dtype == torch.float32\n synchronize() # wait tensor allocation\n dist.broadcast(f_score, src=0)\n f_score = f_score.item()\n return f_score\n\n\ndef synchronize_list(list_obj, args):\n assert isinstance(list_obj, (list, tuple))\n if args.distributed:\n list_obj = torch.tensor(list_obj, dtype=torch.int32).cuda()\n synchronize() # wait tensor allocation\n dist.broadcast(list_obj, src=0)\n list_obj = list_obj.cpu().numpy().tolist()\n return list_obj\n\n\ndef delete_records(total_MNA, match_regions_record_all, val_id):\n # keys1 = list(total_MNA.keys())\n keys2 = list(match_regions_record_all.keys())\n rm_key = val_id\n\n del total_MNA[rm_key]\n for k in keys2:\n if k.split('/')[-2] == rm_key or osp.basename(k).split(\n '_')[0] == rm_key:\n del match_regions_record_all[k]\n return total_MNA, match_regions_record_all" }, { "alpha_fraction": 0.7414966225624084, "alphanum_fraction": 0.7868480682373047, "avg_line_length": 48.11111068725586, "blob_id": "906ef3af26662cb2d3760f31e771f69b11c77b8d", "content_id": "c0ba4fd9a9587e7f3ebddcede52cb70292f0f03b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 115, "num_lines": 9, "path": "/paths.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "# SAMM\nSAMM_ROOT = '/data/gjz_mm21/SAMM'\nSAMM_LABEL_DIR = SAMM_ROOT\nSAMM_VIDEO_DIR = '/data/gjz_mm21/SAMM/SAMM_longvideos'\n\n# CASME_2\nCASME_2_ROOT = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped'\nCASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels'\nCASME_2_VIDEO_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/longVideoFaceCropped'" }, { "alpha_fraction": 0.49591565132141113, "alphanum_fraction": 0.5101633667945862, "avg_line_length": 36.0704231262207, "blob_id": "942beebcda0fc8eb5fa61afaaa343aef38a68a60", "content_id": "2d23a48c75557013c20a131697faa2d9d54440c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10528, "license_type": "no_license", "max_line_length": 81, "num_lines": 284, "path": "/dataset/me_dataset.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "from unicodedata import name\nimport cv2\nimport os\nimport pdb\nimport torch\nimport time\nimport pywt\nimport glob\nimport numpy as np\nimport os.path as osp\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\nfrom torch import nn as nn\n\nfrom . import params\nfrom . import utils\n\nWT_CHANNEL = 4\nsm_kernel = np.load(params.GAUSS_KERNEL_PATH['sm_kernel'])\ndr1_kernel = np.load(params.GAUSS_KERNEL_PATH['dr1_kernel'])\ndr2_kernel = np.load(params.GAUSS_KERNEL_PATH['dr2_kernel'])\ndr1_kernel = dr1_kernel[:, None, None]\ndr2_kernel = dr2_kernel[:, None, None]\n\n\nclass SAMMDataset(Dataset):\n def __init__(self,\n mode,\n img_dirs,\n seq_len=64,\n step=32,\n time_len=12,\n input_size=256,\n data_aug=False,\n data_option=None,\n dataset_name='SAMM'):\n super().__init__()\n self.dataset_name = dataset_name\n self.mode = mode\n self.seq_len = seq_len\n self.step = step\n assert mode == 'train' or (mode == 'test'\n and self.seq_len <= self.step)\n\n self.time_len = time_len # observate time_len//2 frames before and after\n self.size = input_size if data_option == 'diff' else input_size * 2\n self.img_dirs = img_dirs # imgs files dirs\n if not isinstance(self.img_dirs, list):\n self.img_dirs = [self.img_dirs]\n self.img_ps_dict = self._get_img_ps_dict()\n self.seq_list = self._get_seq_list()\n self.label_dict = np.load(osp.join(params.SAMM_ROOT, 'label_dict.npy'),\n allow_pickle=True).item()\n self.anno_dict = np.load(osp.join(params.SAMM_ROOT, 'anno_dict.npy'),\n allow_pickle=True).item()\n # print('Load {} clips, {} frames from {}'.format(\n # len(self.seq_list),\n # len(self.seq_list) * self.seq_len, dataset_name))\n\n self.transform = utils.get_group_transform(\n mode) if data_aug else utils.Identity()\n self.data_option = data_option\n\n def _get_img_ps_dict(self):\n ret_dict = {}\n for img_dir in self.img_dirs:\n img_ps = utils.scan_jpg_from_img_dir(img_dir)\n ret_dict[img_dir] = tuple(img_ps)\n return ret_dict\n\n def _get_seq_list(self):\n ret_list = []\n for img_dir, img_ps in self.img_ps_dict.items():\n front = 0\n tail = front + self.seq_len # [front, tail), tail not include\n while tail <= len(img_ps):\n ret_list.append([img_dir, front,\n tail]) # (img dir, front_idx, tail_idx)\n front += self.step\n tail = front + self.seq_len\n return ret_list\n\n def __len__(self):\n return len(self.seq_list)\n\n def __getitem__(self, index):\n img_dir, front, tail = self.seq_list[\n index] # [front, tail), tail not include\n seq_info = (img_dir, front, tail)\n\n # insert and append extra imgs for temporal conv\n _old_len = len(self.img_ps_dict[img_dir])\n img_ps = list(self.img_ps_dict[img_dir][front:tail])\n for i in range(1, self.time_len // 2 + 1):\n img_ps.insert(0, self.img_ps_dict[img_dir][max(0, front - i)])\n img_ps.append(self.img_ps_dict[img_dir][min(\n _old_len - 1, tail - 1 + i)])\n _cur_len = len(self.img_ps_dict[img_dir])\n assert _old_len == _cur_len # make sure the dict has not been changed\n\n # read seqence features, annos and labels\n img_features = np.stack([\n np.load(p.replace('.jpg', '.npy'))\n for p in img_ps[self.time_len // 2:-self.time_len // 2]\n ], 0)\n annos = self.anno_dict[img_dir][front:tail]\n labels = self.label_dict[img_dir][front:tail]\n assert img_features.shape == (self.seq_len, 2048) # resnet50 features\n\n # read sequence imgs\n flat_imgs = np.empty(\n (self.seq_len + (self.time_len // 2) * 2, self.size, self.size),\n dtype=np.float32)\n for i, p in enumerate(img_ps):\n img = cv2.imread(p, cv2.IMREAD_GRAYSCALE)\n if not img.shape[0] == img.shape[1]:\n # crop to square\n h, w = img.shape\n wide = abs(h - w) // 2\n if h > w:\n img = img[wide:wide + w, :]\n else:\n img = img[:, wide:wide + h]\n try:\n assert img.shape[0] == img.shape[1]\n except:\n print('Error in cropping image {}'.format(p))\n img = cv2.resize(img, (self.size, self.size))\n flat_imgs[i] = img\n\n # transform\n flat_imgs = self.transform(flat_imgs)\n if self.data_option is not None and 'wt' in self.data_option:\n flat_wts = np.stack([dwt2(img) for img in flat_imgs], 0)\n\n # expand falt imgs\n i = 0\n front = 0\n tail = front + self.time_len # [front, tail], tail include\n if self.data_option is not None and 'wt' in self.data_option:\n seq_wts = np.empty((self.seq_len, self.time_len + 1, WT_CHANNEL,\n self.size // 2, self.size // 2),\n dtype=np.float32)\n elif self.data_option == 'diff':\n seq_imgs = np.empty(\n (self.seq_len, self.time_len + 1, self.size, self.size),\n dtype=np.float32)\n while tail < len(flat_imgs):\n if self.data_option is not None and 'wt' in self.data_option:\n seq_wts[i] = flat_wts[front:tail + 1].copy()\n elif self.data_option == 'diff':\n seq_imgs[i] = flat_imgs[front:tail + 1].copy()\n i += 1\n front += 1\n tail += 1\n assert i == self.seq_len\n\n # data options\n if self.data_option == 'diff':\n ret_coefs = np.stack([get_diff(imgs) for imgs in seq_imgs], 0)\n elif self.data_option == 'wt_diff':\n ret_coefs = np.stack([get_diff(coefs) for coefs in seq_wts],\n 0).reshape(self.seq_len,\n self.time_len * WT_CHANNEL,\n self.size // 2, self.size // 2)\n elif self.data_option == 'wt_dr':\n ret_coefs = seq_wts.transpose(0, 2, 1, 3, 4)\n ret_coefs = np.asarray([[\n get_smoothing_and_dr_coefs(coefs_dim2)\n for coefs_dim2 in coefs_dim1\n ] for coefs_dim1 in ret_coefs])\n assert ret_coefs.shape[:3] == (self.seq_len, WT_CHANNEL, 3 * 2)\n ret_coefs = ret_coefs.transpose(0, 2, 1, 3, 4)\n ret_coefs = ret_coefs.reshape(self.seq_len, -1, self.size // 2,\n self.size // 2)\n elif self.data_option is None:\n print('Require data option...')\n exit()\n else:\n raise NotImplementedError\n\n ret_coefs = torch.FloatTensor(ret_coefs)\n img_features = torch.FloatTensor(img_features)\n annos = torch.FloatTensor(annos)\n labels = torch.LongTensor(labels)\n return ret_coefs, img_features, annos, labels, seq_info\n\n\nclass CASME_2Dataset(SAMMDataset):\n def __init__(self,\n mode,\n img_dirs,\n seq_len=64,\n step=32,\n time_len=12,\n input_size=256,\n data_aug=False,\n data_option=None,\n dataset_name='CASME_2'):\n super().__init__(mode,\n img_dirs,\n seq_len=seq_len,\n step=step,\n time_len=time_len,\n input_size=input_size,\n data_aug=data_aug,\n data_option=data_option,\n dataset_name=dataset_name)\n self.label_dict = np.load(osp.join(params.CASME_2_LABEL_DIR,\n 'label_dict.npy'),\n allow_pickle=True).item()\n self.anno_dict = np.load(osp.join(params.CASME_2_LABEL_DIR,\n 'anno_dict.npy'),\n allow_pickle=True).item()\n\n\nclass SAMMImageDataset(Dataset):\n def __init__(self, img_ps):\n super().__init__()\n self.img_ps = img_ps\n self.bi_label = np.load(\n osp.join(params.SAMM_ROOT, 'bi_label.npy'),\n allow_pickle=True).item() # imgs_dir -> [<target img_p> ... ]\n\n def __len__(self):\n return len(self.img_ps)\n\n def __getitem__(self, index):\n img_p = self.img_ps[index]\n npy_p = img_p.replace('.jpg', '.npy')\n feature = np.load(npy_p)\n feature = torch.tensor(feature, dtype=torch.float32)\n\n imgs_dir = osp.dirname(img_p)\n label = 1 if img_p in self.bi_label[\n imgs_dir] else 0 # 1 for spotting region\n label = torch.tensor(label, dtype=torch.long)\n\n return feature, label, img_p\n\n\nclass CASME_2ImageDataset(SAMMImageDataset):\n def __init__(self, img_ps):\n super().__init__(img_ps)\n self.bi_label = np.load(\n osp.join(params.CASME_2_LABEL_DIR, 'bi_label.npy'),\n allow_pickle=True).item() # imgs_dir -> [<target img_p> ... ]\n\n\ndef get_diff(imgs):\n if len(imgs.shape) == 3:\n assert imgs.shape[1] == imgs.shape[2] # imgs\n elif len(imgs.shape) == 4:\n assert imgs.shape[2] == imgs.shape[\n 3] and imgs.shape[1] == WT_CHANNEL # wt_coefs\n imgs1 = imgs[:-1]\n imgs2 = imgs[1:]\n return imgs2 - imgs1\n\n\ndef dwt2(img, wave_name='haar'):\n assert isinstance(img, np.ndarray)\n coefs = pywt.dwt2(img, wave_name)\n coefs = np.array([coefs[0], *coefs[1]])\n return coefs # (4, w//2, h//2)\n\n\ndef get_smoothing_and_dr_coefs(imgs):\n '''\n GAUSS_KERNEL_PATH\n '''\n\n global sm_kernel, dr1_kernel, dr2_kernel\n sm_imgs = np.array([cv2.filter2D(img, -1, sm_kernel) for img in imgs])\n dr_ks = dr1_kernel.shape[0]\n\n dr1_res = []\n dr2_res = []\n for i in range(len(imgs) - dr_ks + 1):\n _imgs = sm_imgs[i:i + dr_ks]\n dr1_res.append((_imgs * dr1_kernel).sum(axis=0))\n dr2_res.append((_imgs * dr2_kernel).sum(axis=0))\n res = np.stack((*dr1_res, *dr2_res), 0)\n return res\n" }, { "alpha_fraction": 0.47280335426330566, "alphanum_fraction": 0.49435147643089294, "avg_line_length": 35.212120056152344, "blob_id": "ce876d402883b35fb8be0aede679e305c19eb460", "content_id": "49b4df8442a0f2e9795e6988405b86eab54ceec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9560, "license_type": "no_license", "max_line_length": 79, "num_lines": 264, "path": "/model/network.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport glob\nimport os\nimport os.path as osp\n\nfrom torch.serialization import load\n\n\nclass MLP(nn.Module):\n def __init__(self, hidden_units, dropout=0.3):\n super(MLP, self).__init__()\n input_feature_dim = hidden_units[0]\n num_layers = len(hidden_units) - 1\n assert num_layers > 0\n assert hidden_units[-1] == 256\n fc_list = []\n for hidden_dim in hidden_units[1:]:\n fc_list += [\n nn.Dropout(dropout),\n nn.Linear(input_feature_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n ]\n input_feature_dim = hidden_dim\n self.mlp = nn.Sequential(*fc_list)\n\n def forward(self, input_tensor):\n bs, num_frames, feature_dim = input_tensor.size()\n input_tensor = input_tensor.reshape(bs * num_frames, feature_dim)\n out = self.mlp(input_tensor)\n return out.reshape(bs, num_frames, -1)\n\n\nclass Temporal_Net(nn.Module):\n def __init__(self, input_size, num_channels, hidden_units, dropout,\n feature):\n super().__init__()\n assert input_size in [112, 128, 224, 256]\n self.feature = feature # return feature before classification\n\n # 4 layers conv net\n self.conv_net = []\n self.conv_net.append(\n self._make_conv_layer(num_channels, 2**6, stride=2))\n for i in range(7, 10):\n self.conv_net.append(\n self._make_conv_layer(2**(i - 1), 2**i, stride=2))\n self.conv_net = nn.Sequential(*self.conv_net)\n\n last_conv_width = input_size // (2**4)\n last_conv_dim = 2**9\n self.dropout = nn.Dropout2d(p=0.2)\n # self.avgpool = nn.AvgPool2d(\n # kernel_size=[last_conv_width, last_conv_width])\n fc_list = []\n fc_list += [\n nn.Linear(last_conv_dim, hidden_units[0]),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(hidden_units[0]),\n nn.Dropout(dropout)\n ]\n for i in range(0, len(hidden_units) - 2):\n fc_list += [\n nn.Linear(hidden_units[i], hidden_units[i + 1]),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(hidden_units[i + 1]),\n nn.Dropout(dropout)\n ]\n self.fc = nn.Sequential(*fc_list)\n\n # not used\n final_norm = nn.BatchNorm1d(1, eps=1e-6, momentum=0.1)\n self.classifier = nn.Sequential(\n nn.Linear(hidden_units[-2], hidden_units[-1]), final_norm)\n\n def _make_conv_layer(self, in_c, out_c, kernel_size=3, stride=2):\n ks = kernel_size\n conv_layer = nn.Sequential(\n nn.Conv2d(in_c, out_c, kernel_size=(ks, ks), padding=ks // 2),\n nn.BatchNorm2d(out_c,\n eps=1e-05,\n momentum=0.1,\n affine=True,\n track_running_stats=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_c,\n out_c,\n kernel_size=(ks, ks),\n padding=ks // 2,\n stride=stride),\n nn.BatchNorm2d(out_c,\n eps=1e-05,\n momentum=0.1,\n affine=True,\n track_running_stats=True),\n nn.ReLU(inplace=True),\n )\n return conv_layer\n\n def forward(self, wt_data):\n bs, num_frames, num_channel, W0, H0 = wt_data.size()\n wt_data = wt_data.reshape(bs * num_frames, num_channel, W0, H0)\n conv_out = self.conv_net(wt_data)\n avgpool = F.adaptive_avg_pool2d(conv_out, (1, 1))\n # avgpool = self.avgpool(conv_out)\n avgpool = avgpool.reshape(bs * num_frames, -1)\n out = self.fc(avgpool)\n if self.feature:\n return out\n else:\n out = self.classifier(out)\n return out\n\n\nclass Two_Stream_RNN(nn.Module):\n def __init__(self,\n mlp_hidden_units=[2048, 256, 256],\n dropout=0.3,\n inchannel=12,\n size=256,\n outchannel=4):\n super().__init__()\n self.mlp = MLP(mlp_hidden_units)\n self.temporal_net = Temporal_Net(size,\n inchannel,\n hidden_units=[256, 256, 1],\n dropout=0.3,\n feature=True)\n\n self.transform = nn.Sequential(nn.Linear(512, 256),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(256),\n nn.Dropout(dropout))\n self.rnns = nn.GRU(256,\n 128,\n bidirectional=True,\n num_layers=2,\n dropout=0.3,\n batch_first=True)\n self.classifier = nn.Sequential(nn.Dropout(dropout),\n nn.Linear(256, outchannel),\n nn.BatchNorm1d(outchannel), nn.ReLU())\n _init_weights(self)\n\n def forward(self, temp_data, rgb_data, return_feature=False):\n bs, num_frames = rgb_data.size(0), rgb_data.size(1)\n\n # spatial features\n features_cnn = self.mlp(rgb_data)\n features_spatial = features_cnn.reshape(bs, num_frames, -1)\n\n # temporal features\n features_temporal = self.temporal_net(temp_data)\n features_temporal = features_temporal.reshape(bs, num_frames, -1)\n features = torch.cat([features_spatial, features_temporal], dim=-1)\n features = self.transform(features.reshape(bs * num_frames, -1))\n features = features.reshape(bs, num_frames, -1)\n\n # rnn combination\n outputs_rnns, _ = self.rnns(features)\n outputs_rnns = outputs_rnns.reshape(bs * num_frames, -1)\n out = self.classifier(outputs_rnns)\n out = out.reshape(bs, num_frames, -1)\n\n if return_feature:\n return out\n\n # anno transforms\n out[..., 0] = torch.log(out[..., 0] + 1)\n return out\n\n\nclass Two_Stream_RNN_Cls(Two_Stream_RNN):\n def __init__(self,\n mlp_hidden_units=[2048, 256, 256],\n dropout=0.3,\n inchannel=12,\n size=256,\n outchannel=2):\n super().__init__(mlp_hidden_units=mlp_hidden_units,\n dropout=dropout,\n inchannel=inchannel,\n size=size,\n outchannel=outchannel)\n\n self.classifier = nn.Sequential(nn.Dropout(dropout),\n nn.Linear(256, outchannel))\n _init_weights(self)\n\n def forward(self, temp_data, rgb_data):\n out = super().forward(temp_data, rgb_data, return_feature=True)\n return out\n\n\nclass ResNet50_Cls(nn.Module):\n def __init__(self, num_class=2):\n super().__init__()\n self.fc = nn.Sequential(nn.Linear(2048, 512), nn.Dropout(0.5),\n nn.Linear(512, num_class))\n\n def forward(self, x):\n assert x.shape[-1] == 2048\n x = self.fc(x)\n return x\n\n\ndef _init_weights(model):\n for k, m in model.named_modules():\n if isinstance(m, (nn.Conv3d, nn.Conv2d, nn.Conv1d)):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_out',\n nonlinearity='relu')\n # nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.BatchNorm3d, nn.BatchNorm2d, nn.BatchNorm1d)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.Linear)):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\ndef load_pretrained_model(model, path, load_bn):\n model_dict = model.state_dict()\n state_dict = torch.load(path, map_location='cpu')['state_dict']\n state_dict = {\n k.replace('wt_net', 'temporal_net', 1): v\n for k, v in state_dict.items()\n }\n\n # bn filter\n if not load_bn:\n bn_keys = []\n for k in state_dict.keys():\n if 'running_mean' in k:\n bn_name = '.'.join(k.split('.')[:-1])\n for name in [\n 'weight', 'bias', 'running_mean', 'running_var',\n 'num_batches_tracked'\n ]:\n bn_keys.append(bn_name + '.' + name)\n state_dict = {k: v for k, v in state_dict.items() if k not in bn_keys}\n\n # # module name rank adjust\n # for k, v in state_dict.items():\n # if 'mlp.mlp.5' in k:\n # state_dict[k.replace('mlp.mlp.5', 'mlp.mlp.4')] = v\n # del state_dict[k]\n # if 'temporal_net.fc.4' in k:\n # state_dict[k.replace('temporal_net.fc.4',\n # 'temporal_net.fc.3')] = v\n # del state_dict[k]\n\n # classifier filter\n state_dict = {k: v for k, v in state_dict.items() if 'classifier' not in k}\n\n model_dict.update(state_dict)\n model.load_state_dict(model_dict)\n return model\n" }, { "alpha_fraction": 0.4660499393939972, "alphanum_fraction": 0.4773358106613159, "avg_line_length": 39.94696807861328, "blob_id": "021ef3e1bf9fd389ed0418bfc8683ca3225d2639", "content_id": "b68ada59d4ad5f1423afe4bc32340edd18a67ec1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5405, "license_type": "no_license", "max_line_length": 77, "num_lines": 132, "path": "/config.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import argparse\n\nparser = argparse.ArgumentParser(description=\"x\")\nparser.add_argument('--store_name', type=str, default=\"\")\nparser.add_argument('--save_root', type=str, default=\"\")\nparser.add_argument('--tag', type=str, default=\"\")\nparser.add_argument('--snap', type=str, default=\"\")\nparser.add_argument('--dataset',\n type=str,\n default=\"\",\n choices=['SAMM', 'CASME_2'])\nparser.add_argument('--data_aug', action='store_true')\nparser.add_argument('--distributed', action='store_true')\nparser.add_argument('--amp', action='store_true')\nparser.add_argument(\"--local_rank\", default=0, type=int)\nparser.add_argument(\"--seed\", default=111, type=int)\nparser.add_argument('--finetune_list',\n default=[],\n type=str,\n nargs=\"+\",\n help='finetune subjects')\nparser.add_argument(\"--patience\",\n default=15,\n type=int,\n help='front extend patience')\n\n# ========================= Model Configs ==========================\nparser.add_argument('--hidden_units',\n default=[2048, 256, 256],\n type=int,\n nargs=\"+\",\n help='hidden units set up')\nparser.add_argument('--length', type=int, default=64)\nparser.add_argument('--step', type=int, default=64)\nparser.add_argument('-L',\n type=int,\n default=12,\n help='the number of input difference images')\nparser.add_argument('--input_size', type=int, default=112)\nparser.add_argument('--data_option',\n type=str,\n choices=['diff', 'wt_diff', 'wt_dr'])\n\n# ========================= Learning Configs ==========================\nparser.add_argument('--epochs',\n default=25,\n type=int,\n metavar='N',\n help='number of total epochs to run')\nparser.add_argument(\n '--early_stop', type=int,\n default=3) # if validation loss didn't improve over 3 epochs, stop\nparser.add_argument('-b',\n '--batch_size',\n default=16,\n type=int,\n metavar='N',\n help='mini-batch size (default: 16)')\nparser.add_argument('--lr', default=1e-2, type=float)\nparser.add_argument('--lr_decay_factor', default=0.1, type=float)\nparser.add_argument('--lr_steps',\n default=[2, 5],\n type=float,\n nargs=\"+\",\n metavar='LRSteps',\n help='epochs to decay learning rate by factor')\nparser.add_argument('--optim', default='SGD', type=str)\nparser.add_argument('--momentum',\n default=0.9,\n type=float,\n metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay',\n '--wd',\n default=5e-4,\n type=float,\n metavar='W',\n help='weight decay (default: 5e-4)')\nparser.add_argument('--clip-gradient',\n '--gd',\n default=20,\n type=float,\n metavar='W',\n help='gradient norm clipping (default: 20)')\nparser.add_argument('--focal_alpha', default=[1., 1.], type=float, nargs=\"+\")\n\n# ========================= Monitor Configs ==========================\nparser.add_argument('--print-freq',\n '-p',\n default=50,\n type=int,\n metavar='N',\n help='print frequency (default: 50) iteration')\nparser.add_argument('--eval-freq',\n '-ef',\n default=1,\n type=int,\n metavar='N',\n help='evaluation frequency (default: 1) epochs')\n\n# ========================= Runtime Configs ==========================\nparser.add_argument('-j',\n '--workers',\n default=0,\n type=int,\n metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--resume',\n default='',\n type=str,\n metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--delete_last',\n action='store_true',\n help='delete the last recorded subject')\nparser.add_argument('-t',\n '--test',\n dest='test',\n action='store_true',\n help='evaluate model on test set')\nparser.add_argument('--start-epoch',\n default=0,\n type=int,\n metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--gpus', type=str, default=None)\nparser.add_argument('--root_log', type=str, default='log')\nparser.add_argument('--root_model', type=str, default='model')\nparser.add_argument('--root_output', type=str, default='output')\nparser.add_argument('--root_runs', type=str, default='runs')\nparser.add_argument('--load_pretrained', type=str, default='')\nparser.add_argument('--load_bn', action='store_true')\n" }, { "alpha_fraction": 0.7419999837875366, "alphanum_fraction": 0.7860000133514404, "avg_line_length": 44.45454406738281, "blob_id": "0dceef23ee03d0fb3bd5773c3e30c419bd225782", "content_id": "d34feb6577b2484bd302fe0c48f2f2fd639c1296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 500, "license_type": "no_license", "max_line_length": 115, "num_lines": 11, "path": "/preprocess/params.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "# CASME_2\nCASME_2_ROOT = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped'\nCASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels'\nCASME_2_VIDEO_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/longVideoFaceCropped'\n\n# SAMM\nSAMM_ROOT = '/data/gjz_mm21/SAMM'\nSAMM_VIDEO_DIR = '/data/gjz_mm21/SAMM/SAMM_longvideos'\n\n# resnet50 features\nMODEL_DIR = '/home/gjz/fmr_backbone/pytorch-benchmarks/ferplus'\n" }, { "alpha_fraction": 0.5232155323028564, "alphanum_fraction": 0.5440055727958679, "avg_line_length": 29.70212745666504, "blob_id": "f89b24b736d5b1dd26f14c8f820913f20935da00", "content_id": "29eea28b828c88e310657a82358a39e0fa51907e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1443, "license_type": "no_license", "max_line_length": 75, "num_lines": 47, "path": "/submit.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport os.path as osp\n\ndataset = 'CASME_2'\n# dataset = 'SAMM'\nsubmit_name = 'submit_{}.csv'.format(dataset)\nresult_dir_name = 'results'\nsubmit_npy_name = 'match_regions_record_all.npy'\nsubmit_id = 'done_exp_cls_ca_20210708-215035'\n\n\ndef convert_key(k, dataset):\n if dataset == 'CASME_2':\n k = osp.basename(k)[:7]\n elif dataset == 'SAMM':\n k = osp.basename(k)\n else:\n raise NotImplementedError\n return k\n\n\ndata = np.load(osp.join('.', result_dir_name, submit_id, 'output',\n submit_npy_name),\n allow_pickle=True).item()\n\nmetric = {'TP': 0, 'FN': 0, 'FP': 0}\nwith open(submit_name, 'w') as f:\n if dataset == 'CASME_2':\n f.write('2\\r\\n')\n elif dataset == 'SAMM':\n f.write('1\\r\\n')\n else:\n raise NotImplementedError\n for k, v in data.items():\n k = convert_key(k, dataset)\n assert isinstance(v[0], list)\n for line in v:\n f.write(','.join([k, *[str(x) for x in line]]) + '\\r\\n')\n metric[line[-1]] += 1\n\nprecision = metric['TP'] / (metric['TP'] + metric['FP'])\nrecall = metric['TP'] / (metric['TP'] + metric['FN'])\nf_score = 2 * precision * recall / (precision + recall)\nprint('TP: {}, FP: {}, FN: {}'.format(metric['TP'], metric['FP'],\n metric['FN']))\nprint('P: {:.4f}, R: {:.4f}, F: {:.4f}'.format(precision, recall, f_score))\n" }, { "alpha_fraction": 0.4896477162837982, "alphanum_fraction": 0.49830037355422974, "avg_line_length": 36.410404205322266, "blob_id": "75a3a6c78e1d47d1b8631d8258d259cdb7a80e59", "content_id": "c0566db0c3ea90660b7736157b7ad231118264e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6472, "license_type": "no_license", "max_line_length": 78, "num_lines": 173, "path": "/trainer_cls.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import time\nfrom matplotlib.pyplot import winter\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nimport utils\nimport dataset.utils as dataset_utils\nimport dataset.params as DATASET_PARAMS\n\n\ndef train(dataloader, model, criterion, optimizer, epoch, logger, args,\n amp_autocast, loss_scaler):\n batch_time = utils.AverageMeter()\n data_time = utils.AverageMeter()\n losses = utils.AverageMeter()\n end = time.time()\n model.train()\n for i, data_batch in enumerate(dataloader):\n data_time.update(time.time() - end)\n temp_data, img_features, annos, labels, _ = data_batch\n batch_size = temp_data.shape[0]\n\n # # TODO: skip all zero samples\n # if (labels == 0).all() and np.random.rand() <= 0.7:\n # end = time.time()\n # # print('skip all zeros batch...')\n # continue\n # keep_ids = []\n # for bi in range(batch_size):\n # if not ((labels[bi] == 0).all() and np.random.rand() <= 0.5):\n # keep_ids.append(bi)\n # # print('skip {} samples...'.format(batch_size - len(keep_ids)))\n # batch_size = len(keep_ids) # m batch_size\n # if batch_size == 0:\n # end = time.time()\n # # print('skip all zeros batch...')\n # continue\n # keep_ids = np.asarray(keep_ids)\n # temp_data = temp_data[keep_ids]\n # img_features = img_features[keep_ids]\n # annos = annos[keep_ids]\n # labels = labels[keep_ids]\n\n # label preprocess\n labels[labels > 0] = 1 # 1, 2 -> 1\n\n temp_data = temp_data.cuda()\n img_features = img_features.cuda()\n # annos = annos.cuda()\n labels = labels.cuda()\n\n with amp_autocast():\n out = model(temp_data, img_features)\n # flat labels\n out = out.reshape(batch_size * args.length, -1)\n labels = labels.reshape(-1)\n loss = criterion(out, labels)\n\n # backward + step\n optimizer.zero_grad()\n if loss_scaler is None:\n loss.backward()\n optimizer.step()\n else:\n loss_scaler(loss, optimizer)\n\n # distirbuted reduce\n utils.reduce_loss(loss, args)\n losses.update(loss.item(), temp_data.size(0))\n batch_time.update(time.time() - end)\n\n if args.local_rank == 0 and (i % args.print_freq == 0\n or i == len(dataloader) - 1):\n output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n epoch,\n i + 1,\n len(dataloader),\n batch_time=batch_time,\n data_time=data_time,\n loss=losses,\n lr=optimizer.param_groups[-1]['lr']))\n logger.info(output)\n torch.cuda.synchronize()\n end = time.time()\n\n\ndef validate(dataloader, model, criterion, logger, args, amp_autocast):\n batch_time = utils.AverageMeter()\n losses = utils.AverageMeter()\n model.eval()\n end = time.time()\n # outs = []\n # annos = []\n # labels = []\n # pred_anno_dict = {} # imgs_dir -> anno values\n # pred_label_dict = {} # imgs_dir -> labels\n # anno_dict = {}\n # label_dict = {}\n pred_and_gt = {} # img_p -> [pred, target]\n\n for i, data_batch in enumerate(dataloader):\n temp_data, img_features, annos, labels, seq_info = data_batch\n\n # label preprocess\n labels[labels > 0] = 1 # 1, 2 -> 1\n\n batch_size = labels.shape[0]\n temp_data = temp_data.cuda()\n img_features = img_features.cuda()\n # annos = annos.cuda()\n labels = labels.cuda()\n\n with torch.no_grad():\n with amp_autocast():\n out = model(temp_data, img_features)\n loss = criterion(out.reshape(batch_size * args.length, -1),\n labels.reshape(-1)).float()\n\n if not torch.isnan(loss).any():\n # distirbuted reduce\n utils.reduce_loss(loss, args)\n\n losses.update(loss.item(), temp_data.size(0))\n batch_time.update(time.time() - end)\n if args.local_rank == 0 and (i % args.print_freq == 0\n or i == len(dataloader) - 1):\n output = ('Val: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n i + 1,\n len(dataloader),\n batch_time=batch_time,\n loss=losses))\n logger.info(output)\n torch.cuda.synchronize()\n\n # record\n img_dirs, fronts, tails = seq_info\n for batch_idx in range(batch_size):\n img_dir = img_dirs[batch_idx]\n front = fronts[batch_idx].item()\n tail = tails[batch_idx].item()\n # assert batch_size == 1, 'batch size should be 1'\n\n img_dir_ps = dataset_utils.scan_jpg_from_img_dir(img_dir)\n # if not img_dir in pred_label_dict:\n # pred_anno_dict[img_dir] = np.zeros(len(img_dir_ps))\n # pred_label_dict[img_dir] = np.zeros(len(img_dir_ps))\n # anno_dict = [img_dir] = np.zeros(len(img_dir_ps))\n # label_dict = [img_dir] = np.zeros(len(img_dir_ps))\n\n pred_label = torch.argmax(out[batch_idx], dim=-1).reshape(-1)\n label = labels[batch_idx].reshape(-1)\n\n for j in range(front, tail):\n img_p = img_dir_ps[j]\n pred_and_gt[img_p] = [\n pred_label[j - front].item(), label[j - front].item()\n ]\n\n # pred_anno_dict[img_dir][front:tail] += pred_annos\n # assert (pred_label_dict[img_dir][front:tail] == 0\n # ).all(), 'should be no overlap'\n # pred_label_dict[img_dir][front:tail] += pred_labels\n # anno_dict[img_dir][front:tail] += annos\n # label_dict[img_dir][front:tail] += labels\n end = time.time()\n\n return losses.avg, pred_and_gt\n" }, { "alpha_fraction": 0.5714861750602722, "alphanum_fraction": 0.5970962047576904, "avg_line_length": 32.060001373291016, "blob_id": "a9bea4f62ce230bf07f8e7a876469f1935090fdd", "content_id": "bc02dcfaf0f457798decd7af168e034b1391e1cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4959, "license_type": "no_license", "max_line_length": 78, "num_lines": 150, "path": "/preprocess/casme_2_label_generation.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "'''\ngenerate the emotion intensity of each frame\n'''\n\n# %%\nimport pdb\nimport os\nimport os.path as osp\nfrom numpy.core.numeric import ones, ones_like\nfrom numpy.lib.function_base import percentile\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport params\n\n# %% ID2NAME and NAME2ID\n\n# CASME_2_PID2NAME/NAME2PID\ndf = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'naming_rule1.csv'))\ndata = df.values\nCASME_2_PID2NAME = {str(line[-1]): str(line[1]) for line in data}\nCASME_2_NAME2PID = {str(line[1]): str(line[-1]) for line in data}\ndel df\ndel data\n\n# CASME_2_VID2NAME\ndf = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'naming_rule2.csv'))\ndata = df.values\nCASME_2_VID2NAME = {'{:04d}'.format(line[0]): str(line[1]) for line in data}\nCASME_2_NAME2VID = {str(line[1]): '{:04d}'.format(line[0]) for line in data}\ndel df\ndel data\n\nsave_dict_dir = osp.join(params.CASME_2_ROOT, 'ID2NAME2ID')\nos.makedirs(save_dict_dir, exist_ok=True)\nfor p, d in zip(\n ['pid2name', 'name2pid', 'vid2name', 'name2vid'],\n [CASME_2_PID2NAME, CASME_2_NAME2PID, CASME_2_VID2NAME, CASME_2_NAME2VID]):\n np.save(osp.join(save_dict_dir, p + '.npy'), d)\n\n# %% main\nanno_dict = {}\nlabel_dict = {} # 0: none, 1: macro, 2: micro\npred_gt = {} # [[onset, offset, label],...]\nbi_label_dict = {} # store all the img_ps fall into the spotting interval\ndf = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'CASFEcode_final.csv'))\ndata = df.values\nfor row in data:\n # construct imgs dir for current row data\n pid = str(row[0])\n vname = row[1].split('_')[0]\n pname = CASME_2_PID2NAME[pid]\n vid = CASME_2_NAME2VID[vname]\n name_code = pname[1:]\n imgs_file_head = name_code + '_' + vid\n for file_name in os.listdir(osp.join(params.CASME_2_VIDEO_DIR, pname)):\n if file_name.startswith(imgs_file_head):\n imgs_dir = osp.join(params.CASME_2_VIDEO_DIR, pname, file_name)\n break\n\n # update emotion intensity and label\n imgs_name = [\n name\n for name in sorted(os.listdir(imgs_dir),\n key=lambda x: int(x.split('.')[0].split('_')[-1]))\n if '.jpg' in name\n ] # first img name: img_1.jpg\n\n onset, apex, offset = row[2:2 + 3]\n onset, apex, offset = int(onset), int(apex), int(offset)\n if onset > 0 and apex > 0 and offset > 0:\n pass\n elif onset > 0 and apex > 0 and offset == 0:\n offset = min(len(imgs_name), apex + (apex - onset))\n elif onset > 0 and apex == 0 and offset > 0:\n apex = (onset + offset) // 2\n else:\n raise Exception\n\n try:\n assert onset < apex and apex < offset\n except:\n print('[Error][{}] onset: {}, apex: {}, offset: {}, '.format(\n imgs_dir, onset, apex, offset))\n continue # skip this row\n\n if not imgs_dir in anno_dict:\n anno_dict[imgs_dir] = np.zeros(len(imgs_name))\n label_dict[imgs_dir] = np.zeros(len(imgs_name))\n pred_gt[imgs_dir] = []\n bi_label_dict[imgs_dir] = []\n\n # convert start index from 1 to 0\n onset -= 1\n apex -= 1\n offset -= 1\n\n # intensity\n sigma = min(offset - apex, apex - onset) // 2\n mu = apex\n func = lambda x: np.exp(-(x - mu)**2 / 2 / sigma / sigma\n ) / sigma / np.sqrt(2 * np.pi)\n # func = lambda x: (x - onset) / (apex - onset) if x >= apex else (\n # offset - x) / (offset - apex)\n\n cumsum = 0\n for i in range(onset, offset + 1):\n anno_dict[imgs_dir][i] += func(i)\n cumsum += anno_dict[imgs_dir][i]\n if cumsum < 0:\n pdb.set_trace()\n # print('onset2offset cumsum: {:.2f}'.format(cumsum))\n # label\n label_dict[imgs_dir][onset:offset +\n 1] = 1 if 'macro' in str(row[-2]).lower() else 2\n # pred_gt\n pred_gt[imgs_dir].append(\n [onset, offset + 1, 1 if 'macro' in str(row[-2]).lower() else 2])\n # bi_label\n bi_label_dict[imgs_dir].extend(\n [osp.join(imgs_dir, name) for name in imgs_name[onset:offset + 1]])\n\nnp.save(osp.join(params.CASME_2_LABEL_DIR, 'anno_dict.npy'), anno_dict)\nnp.save(osp.join(params.CASME_2_LABEL_DIR, 'label_dict.npy'), label_dict)\nnp.save(osp.join(params.CASME_2_LABEL_DIR, 'pred_gt.npy'), pred_gt)\nnp.save(osp.join(params.CASME_2_LABEL_DIR, 'bi_label.npy'), bi_label_dict)\n\n# %% visulization\n# fig = plt.figure(figsize=(30, 50))\n# for i, (k, v) in enumerate(anno_dict.items()):\n# fig.add_subplot((len(anno_dict) - 1) // 5 + 1, 5, i + 1)\n# plt.plot(v)\n# fig.tight_layout()\n# plt.savefig('./CASME_2_annos.pdf')\n# plt.show()\n\ncolumn = 5\nfig = plt.figure(figsize=(30, ((len(label_dict) - 1) // column + 1) * 2))\nfor i, (k, v) in enumerate(label_dict.items()):\n v[v > 0] = 1 # 1,2 -> 1\n fig.add_subplot((len(label_dict) - 1) // column + 1, column, i + 1)\n plt.plot(v, 'r-')\n plt.title(osp.basename(k))\nfig.tight_layout()\nout_dir = './preprocess'\nplt.savefig(osp.join(out_dir, 'ca_bi_label.pdf'))\nplt.close('all')\n\n# %%\n" }, { "alpha_fraction": 0.561196506023407, "alphanum_fraction": 0.5809549689292908, "avg_line_length": 30.413793563842773, "blob_id": "d58707eafc77be447ca132ff1c032806c2ac5921", "content_id": "a18e67da9496e5b4d29c1c12d161454422062674", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3644, "license_type": "no_license", "max_line_length": 77, "num_lines": 116, "path": "/preprocess/samm_2_label_generation.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "'''\ngenerate the emotion intensity of each frame\n'''\n\n# %%\nimport os\nimport pdb\nimport os.path as osp\nfrom numpy.core.numeric import ones\nfrom numpy.lib.function_base import percentile\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport params\n\n# %% main\nanno_dict = {} # intensity\nlabel_dict = {} # 0: none, 1: macro, 2: micro\npred_gt = {} # [[onset, offset, label],...]\nbi_label_dict = {} # store all the img_ps fall into the spotting interval\ndf = pd.read_csv(osp.join(params.SAMM_ROOT, 'SAMM_labels.csv'))\ndata = df.values\nfor row in data:\n # construct imgs dir for current row data\n file_name = row[1][:5]\n imgs_dir = osp.join(params.SAMM_VIDEO_DIR, file_name)\n assert osp.exists(imgs_dir)\n\n # update emotion intensity and label\n imgs_name = [\n name\n for name in sorted(os.listdir(imgs_dir),\n key=lambda x: int(x.split('.')[0].split('_')[-1]))\n if '.jpg' in name\n ] # first img name: xxx_x_0001.jpg\n\n onset, apex, offset = row[3:3 + 3]\n onset, apex, offset = int(onset), int(apex), int(offset)\n if onset > 0 and apex > 0 and offset > 0:\n pass\n elif onset > 0 and apex > 0 and offset == -1:\n offset = min(len(imgs_name), apex + (apex - onset))\n elif onset > 0 and apex == -1 and offset > 0:\n apex = (onset + offset) // 2\n else:\n raise Exception\n\n try:\n assert onset < apex and apex < offset\n except:\n print('[Error][{}] onset: {}, apex: {}, offset: {}, '.format(\n imgs_dir, onset, apex, offset))\n continue # skip this row\n\n if not imgs_dir in anno_dict:\n anno_dict[imgs_dir] = np.zeros(len(imgs_name))\n label_dict[imgs_dir] = np.zeros(len(imgs_name))\n pred_gt[imgs_dir] = []\n bi_label_dict[imgs_dir] = []\n\n # convert start index from 1 to 0\n onset -= 1\n apex -= 1\n offset -= 1\n\n # intensity\n sigma = min(offset - apex, apex - onset) // 2 + 1e-7\n if sigma <= 0:\n pdb.set_trace()\n\n mu = apex\n func = lambda x: np.exp(-(x - mu)**2 / 2 / sigma / sigma\n ) / sigma / np.sqrt(2 * np.pi)\n cumsum = 0\n for i in range(onset, offset + 1):\n anno_dict[imgs_dir][i] += func(i)\n cumsum += anno_dict[imgs_dir][i]\n # print('onset2offset cumsum: {:.2f}'.format(cumsum))\n # label\n label_dict[imgs_dir][onset:offset +\n 1] = 1 if 'macro' in str(row[-2]).lower() else 2\n # pred_gt\n pred_gt[imgs_dir].append(\n [onset, offset + 1, 1 if 'macro' in str(row[-2]).lower() else 2])\n # bi_label\n bi_label_dict[imgs_dir].extend(\n [osp.join(imgs_dir, name) for name in imgs_name[onset:offset + 1]])\n\nnp.save(osp.join(params.SAMM_ROOT, 'anno_dict.npy'), anno_dict)\nnp.save(osp.join(params.SAMM_ROOT, 'label_dict.npy'), label_dict)\nnp.save(osp.join(params.SAMM_ROOT, 'pred_gt.npy'), pred_gt)\nnp.save(osp.join(params.SAMM_ROOT, 'bi_label.npy'), bi_label_dict)\n\n# %% visulization\n# fig = plt.figure(figsize=(30, 50))\n# for i, (k, v) in enumerate(anno_dict.items()):\n# fig.add_subplot((len(anno_dict) - 1) // 5 + 1, 5, i + 1)\n# plt.plot(v)\n# fig.tight_layout()\n# plt.savefig('./SAMM_annos.pdf')\n# plt.show()\n\ncolumn = 5\nfig = plt.figure(figsize=(30, ((len(label_dict) - 1) // column + 1) * 2))\nfor i, (k, v) in enumerate(label_dict.items()):\n v[v > 0] = 1 # 1,2 -> 1\n fig.add_subplot((len(label_dict) - 1) // column + 1, column, i + 1)\n plt.plot(v, 'r-')\n plt.title(osp.basename(k))\nfig.tight_layout()\nout_dir = './preprocess'\nplt.savefig(osp.join(out_dir, 'sa_bi_label.pdf'))\nplt.close('all')\n\n# %%\n" }, { "alpha_fraction": 0.5780442953109741, "alphanum_fraction": 0.5917009711265564, "avg_line_length": 32.49853515625, "blob_id": "22a49f60e6a8c17228e38dc8e1418faeb2c3df17", "content_id": "54adfda2012f108b99af958167450218e51e3c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11423, "license_type": "no_license", "max_line_length": 80, "num_lines": 341, "path": "/preprocess/CNN_feature_extraction.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "from __future__ import division\nfrom typing import Iterable\nimport cv2\nimport os\nimport time\nimport six\nimport sys\nfrom tqdm import tqdm\nimport argparse\nimport pickle\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nimport torch.utils.data\nimport os.path as osp\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nfrom glob import glob\nimport numbers\nfrom PIL import Image, ImageOps\nimport random\nimport params\n# for torch lower version\nimport torch._utils\nfrom torch.nn import functional as F\ntry:\n torch._utils._rebuild_tensor_v2\nexcept AttributeError:\n\n def _rebuild_tensor_v2(storage, storage_offset, size, stride,\n requires_grad, backward_hooks):\n tensor = torch._utils._rebuild_tensor(storage, storage_offset, size,\n stride)\n tensor.requires_grad = requires_grad\n tensor._backward_hooks = backward_hooks\n return tensor\n\n torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2\nglobal parsed\nimport torch.utils.data as data\n\n# multi thread setting\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\ncv2.ocl.setUseOpenCL(False)\ncv2.setNumThreads(0)\n\n\nclass SAMMDataset(data.Dataset):\n def __init__(self, data_root, transform=None):\n super().__init__()\n self.img_ps = glob(osp.join(data_root, '*/*.jpg'))\n self.transform = transform\n\n def __len__(self):\n return len(self.img_ps)\n\n def __getitem__(self, index):\n img = Image.open(self.img_ps[index]).convert('RGB')\n img = self.transform(img) if self.transform is not None else img\n return img, self.img_ps[index]\n\n\nclass CASME_2Dataset(SAMMDataset):\n def __init__(self, data_root, transform=None):\n super().__init__(data_root, transform)\n self.img_ps = glob(osp.join(data_root, '*/*/*.jpg'))\n\n\ndef load_module_2or3(model_name, model_def_path):\n \"\"\"Load model definition module in a manner that is compatible with\n both Python2 and Python3\n\n Args:\n model_name: The name of the model to be loaded\n model_def_path: The filepath of the module containing the definition\n\n Return:\n The loaded python module.\"\"\"\n if six.PY3:\n import importlib.util\n spec = importlib.util.spec_from_file_location(model_name,\n model_def_path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n import importlib\n dirname = os.path.dirname(model_def_path)\n sys.path.insert(0, dirname)\n module_name = os.path.splitext(os.path.basename(model_def_path))[0]\n mod = importlib.import_module(module_name)\n return mod\n\n\ndef load_model(model_name, MODEL_DIR):\n \"\"\"Load imoprted PyTorch model by name\n\n Args:\n model_name (str): the name of the model to be loaded\n\n Return:\n nn.Module: the loaded network\n \"\"\"\n model_def_path = osp.join(MODEL_DIR, model_name + '.py')\n weights_path = osp.join(MODEL_DIR, model_name + '.pth')\n mod = load_module_2or3(model_name, model_def_path)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n return net\n\n\ndef compose_transforms(meta,\n resize=256,\n center_crop=True,\n override_meta_imsize=False):\n \"\"\"Compose preprocessing transforms for model\n\n The imported models use a range of different preprocessing options,\n depending on how they were originally trained. Models trained in MatConvNet\n typically require input images that have been scaled to [0,255], rather\n than the [0,1] range favoured by PyTorch.\n\n Args:\n meta (dict): model preprocessing requirements\n resize (int) [256]: resize the input image to this size\n center_crop (bool) [True]: whether to center crop the image\n override_meta_imsize (bool) [False]: if true, use the value of `resize`\n to select the image input size, rather than the properties contained\n in meta (this option only applies when center cropping is not used.\n\n Return:\n (transforms.Compose): Composition of preprocessing transforms\n \"\"\"\n normalize = transforms.Normalize(mean=meta['mean'], std=meta['std'])\n im_size = meta['imageSize']\n assert im_size[0] == im_size[1], 'expected square image size'\n if center_crop:\n transform_list = [\n transforms.Resize(resize),\n transforms.CenterCrop(size=(im_size[0], im_size[1]))\n ]\n else:\n if override_meta_imsize:\n im_size = (resize, resize)\n transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))]\n\n transform_list += [transforms.ToTensor()]\n if meta['std'] == [1, 1, 1]: # common amongst mcn models\n transform_list += [lambda x: x * 255.0]\n transform_list.append(normalize)\n return transforms.Compose(transform_list)\n\n\ndef augment_transforms(meta,\n resize=256,\n random_crop=True,\n override_meta_imsize=False):\n normalize = transforms.Normalize(mean=meta['mean'], std=meta['std'])\n im_size = meta['imageSize']\n assert im_size[0] == im_size[1], 'expected square image size'\n if random_crop:\n v = random.random()\n transform_list = [\n transforms.Resize(resize),\n RandomCrop(im_size[0], v),\n RandomHorizontalFlip(v)\n ]\n else:\n if override_meta_imsize:\n im_size = (resize, resize)\n transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))]\n transform_list += [transforms.ToTensor()]\n if meta['std'] == [1, 1, 1]: # common amongst mcn models\n transform_list += [lambda x: x * 255.0]\n transform_list.append(normalize)\n return transforms.Compose(transform_list)\n\n\nclass RandomCrop(object):\n def __init__(self, size, v):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.v = v\n\n def __call__(self, img):\n\n w, h = img.size\n th, tw = self.size\n x1 = int((w - tw) * self.v)\n y1 = int((h - th) * self.v)\n #print(\"print x, y:\", x1, y1)\n assert (img.size[0] == w and img.size[1] == h)\n if w == tw and h == th:\n out_image = img\n else:\n out_image = img.crop(\n (x1, y1, x1 + tw, y1 +\n th)) #same cropping method for all images in the same group\n return out_image\n\n\nclass RandomHorizontalFlip(object):\n \"\"\"Randomly horizontally flips the given PIL.Image with a probability of 0.5\n \"\"\"\n def __init__(self, v):\n self.v = v\n return\n\n def __call__(self, img):\n if self.v < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n #print (\"horiontal flip: \",self.v)\n return img\n\n\ndef get_vec(model, layer_name, image):\n bs = image.size(0)\n if parsed.layer_name == 'pool5_full':\n layer_name = 'pool5'\n layer = model._modules.get(layer_name)\n if parsed.layer_name == 'fc7':\n layer_output_size = 4096\n my_embedding = torch.zeros(bs, layer_output_size)\n elif parsed.layer_name == 'fc8':\n my_embedding = torch.zeros(bs, 7)\n elif parsed.layer_name == 'pool5' or parsed.layer_name == 'pool5_full':\n my_embedding = torch.zeros([bs, 512, 7, 7])\n elif parsed.layer_name == 'pool4':\n my_embedding = torch.zeros([bs, 512, 14, 14])\n elif parsed.layer_name == 'pool3':\n my_embedding = torch.zeros([bs, 256, 28, 28])\n elif parsed.layer_name == 'pool5_7x7_s1':\n my_embedding = torch.zeros([bs, 2048, 1, 1])\n elif parsed.layer_name == 'conv5_3_3x3_relu':\n my_embedding = torch.zeros([bs, 512, 7, 7])\n\n def copy_data(m, i, o):\n my_embedding.copy_(o.data)\n\n h = layer.register_forward_hook(copy_data)\n h_x = model(image)\n h.remove()\n if parsed.layer_name == 'pool5' or parsed.layer_name == 'conv5_3_3x3_relu':\n GAP_layer = nn.AvgPool2d(kernel_size=[7, 7], stride=(1, 1))\n my_embedding = GAP_layer(my_embedding)\n return F.relu(my_embedding.squeeze())\n\n\ndef get_frame_index(frame_path):\n frame_name = frame_path.split('/')[-1]\n frame_num = int(frame_name.split('.')[0].split('_')[-1])\n return frame_num\n\n\ndef predict(data_loader, layer_name, model, des_dir):\n with torch.no_grad():\n for ims, img_path in tqdm(data_loader):\n ims = ims.cuda()\n output = get_vec(model, layer_name, ims)\n if not len(output.shape) == 2:\n output = [\n output,\n ]\n img_path = [\n img_path,\n ]\n for feature, path in zip(output, img_path):\n basename = osp.basename(path)\n des_basename = basename.split('.')[0] + '.npy'\n des_path = path.replace(basename, des_basename)\n np.save(des_path, feature)\n\n\ndef feature_extraction(model, loader, des_dir):\n model.eval()\n predict(loader, parsed.layer_name, model, des_dir)\n\n\ndef main():\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n MODEL_DIR = params.MODEL_DIR\n model_name = 'resnet50_ferplus_dag'\n model = load_model(model_name, MODEL_DIR)\n model = model.cuda()\n\n meta = model.meta\n preproc_transforms = compose_transforms(\n meta, center_crop=False) if not parsed.augment else augment_transforms(\n meta, random_crop=True)\n\n if parsed.dataset == 'SAMM':\n dataset = SAMMDataset(params.SAMM_VIDEO_DIR, preproc_transforms)\n # parsed.save_root = params.SAMM_FEATURE_DIR\n elif parsed.dataset == 'CASME_2':\n dataset = CASME_2Dataset(params.CASME_2_VIDEO_DIR, preproc_transforms)\n # parsed.save_root = params.CASME_2_FEATURE_DIR\n else:\n raise NotImplementedError\n\n data_loader = torch.utils.data.DataLoader(dataset,\n batch_size=4,\n num_workers=0,\n pin_memory=False)\n\n des_dir = None\n # des_dir = osp.join(\n # parsed.save_root, '_'.join([\n # '{}_features'.format(model_name), 'fps=' + str(parsed.fps),\n # parsed.layer_name\n # ]))\n # os.makedirs(des_dir, exist_ok=True)\n feature_extraction(model, data_loader, des_dir)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run.')\n parser.add_argument('--refresh',\n dest='refresh',\n action='store_true',\n help='refresh feature cache')\n parser.add_argument('--fps',\n type=int,\n default=0,\n help='frames per second to extract')\n parser.add_argument('--layer_name', type=str, default='pool5_7x7_s1')\n parser.add_argument(\n '--augment',\n action=\"store_true\",\n help='whether to extract augmented features for train set only ')\n\n parser.add_argument('--dataset', type=str, default='')\n parsed = parser.parse_args()\n\n parsed.dataset = 'SAMM'\n main()\n" }, { "alpha_fraction": 0.6612529158592224, "alphanum_fraction": 0.6670533418655396, "avg_line_length": 26.80645179748535, "blob_id": "1fa558cc612f6c471d7a63b38e6052da15b3b949", "content_id": "18fa21c97a0a0aa5fd2d9f6e919bb4fca26c3743", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 862, "license_type": "no_license", "max_line_length": 78, "num_lines": 31, "path": "/preprocess/openface/face_crop_align.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import os\nimport os.path as osp\nfrom tqdm import tqdm\nfrom glob import glob\n\nfrom video_processor import Video_Processor\nimport params\n\n# OpenFace parameters\nsave_size = 224\nOpenFace_exe = params.OpenFace_exe\nquiet = True\nnomask = True\ngrey = False\ntracked_vid = False\nnoface_save = False\n\n# dataset\nvideo_root = params.video_root\n\n# main\nvideo_processor = Video_Processor(save_size, nomask, grey, quiet, tracked_vid,\n noface_save, OpenFace_exe)\nvideo_ps = list(glob(osp.join(video_root, '*/*mp4')))\nvideo_ps.extend(list(glob(osp.join(video_root, '*/*avi'))))\n\nfor video_p in tqdm(video_ps):\n video_name = os.path.basename(video_p).split('.')[0]\n opface_output_dir = os.path.join(os.path.dirname(video_p),\n video_name + \"_opface\")\n video_processor.process(video_p, opface_output_dir)\n" }, { "alpha_fraction": 0.5098814368247986, "alphanum_fraction": 0.5204216241836548, "avg_line_length": 41.16666793823242, "blob_id": "cbfd761519634ec30ef3510fc125d4bb1ca9bf95", "content_id": "1999d247d1ffacd7635ca71b142e3fe071870378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/model/utils.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "import torch.nn as nn\n\ndef init_weights(model):\n for k, m in model.named_modules():\n if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_out',\n nonlinearity='relu')\n # nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n" }, { "alpha_fraction": 0.534990131855011, "alphanum_fraction": 0.5427176356315613, "avg_line_length": 36.01601791381836, "blob_id": "cac3526b274c70a2aac8b446caaa0e96ec4ca392", "content_id": "b414051a8d4a13db9d12b83b48e9063d571b7edf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16176, "license_type": "no_license", "max_line_length": 107, "num_lines": 437, "path": "/main_cls.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom genericpath import exists\nimport os\nfrom typing import Final\nimport cv2\nimport sys\nfrom matplotlib.pyplot import xcorr\nfrom numpy.random import f, sample, shuffle\n\nfrom torch.utils.data import dataset\nfrom config import parser\n\nif len(sys.argv) > 1:\n # use shell args\n args = parser.parse_args()\n print('Use shell args.')\nelse:\n # Debug\n args_list = [\n '--dataset',\n 'SAMM',\n '--print-freq',\n '1',\n '--snap',\n 'debug',\n '--data_option',\n 'wt_diff',\n '--gpus',\n '0',\n '--batch_size',\n '2',\n '--input_size',\n '128',\n '--length',\n '64',\n '-L',\n '12',\n '--workers',\n '0',\n ]\n args = parser.parse_args(args_list)\n# os setting\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\ncv2.ocl.setUseOpenCL(False)\ncv2.setNumThreads(0)\nif args.gpus is not None:\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n\nimport re\nimport logging\nimport time\nimport torch\nimport os.path as osp\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nimport torch.distributed as dist\nfrom torch.nn import DataParallel\nfrom torch.nn.parallel import DistributedDataParallel\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom pprint import pformat\nfrom timm.utils import setup_default_logging, NativeScaler, reduce_tensor, distribute_bn\nfrom timm.data.distributed_sampler import OrderedDistributedSampler\nfrom contextlib import suppress\n\nfrom model.network import Two_Stream_RNN_Cls, load_pretrained_model\nfrom dataset.me_dataset import SAMMDataset, CASME_2Dataset\nimport utils\nimport trainer_cls as trainer\n\n# torch.multiprocessing.set_start_method('spawn')\ntorch.backends.cudnn.benchmark = True\n\n# check resume\nRESUME = osp.exists(args.resume)\n\n# check finetune\nif len(args.finetune_list) > 0:\n assert RESUME\n FINETUNE = True\nelse:\n FINETUNE = False\n\n_logger = logging.getLogger('train')\n# resume\nif RESUME:\n setattr(args, 'save_root', 'results/{}'.format(osp.basename(args.resume)))\nelse:\n snapshot_name = '_'.join(\n [args.snap, datetime.now().strftime(\"%Y%m%d-%H%M%S\")])\n if len(args.store_name) == 0:\n args.store_name = snapshot_name\n setattr(args, 'save_root', 'results/{}'.format(args.store_name))\n# make dirs\nif args.local_rank == 0:\n utils.check_rootfolders(args)\nelse:\n time.sleep(1)\n# setup logging\nsetup_default_logging(\n log_path=os.path.join(args.save_root, args.root_log, 'run.log'))\n_logger.info(\"save experiment to :{}\".format(args.save_root))\n# save args\nif args.local_rank == 0:\n args_string = pformat(args.__dict__)\n _logger.info(args_string)\n\n # reset random\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\ntorch.cuda.manual_seed_all(args.seed)\nnp.random.seed(args.seed)\n\n# if distributed\nif args.distributed and 'WORLD_SIZE' in os.environ:\n args.distributed = int(os.environ['WORLD_SIZE']) > 1\nargs.device = 'cuda'\nargs.world_size = 1\nargs.rank = 0 # global rank\nif args.distributed:\n args.device = 'cuda:%d' % args.local_rank\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(backend='nccl', init_method='env://')\n args.world_size = dist.get_world_size()\n args.rank = dist.get_rank()\n _logger.info(\n 'Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'\n % (args.rank, args.world_size))\n# else:\n# _logger.info('Training with a single process on 1 GPUs.')\nassert args.rank >= 0\nutils.synchronize()\n\n# loss_fn\ncriterion = utils.Focal_Loss(alpha=args.focal_alpha)\n\n# leave one subject out cross validation\nimg_dirs = utils.get_img_dirs(args.dataset)\nimg_dirs_dict = utils.leave_one_out(\n img_dirs, args.dataset) # key -> [train_set, val_set]\n\n# finetuen and resume\nif RESUME:\n total_MNA = np.load(osp.join(args.resume, args.root_output,\n 'cross_validation_MNA_dict.npy'),\n allow_pickle=True).item()\n match_regions_record_all = np.load(osp.join(\n args.resume, args.root_output, 'match_regions_record_all.npy'),\n allow_pickle=True).item()\n if not FINETUNE:\n keys1 = list(total_MNA.keys())\n # keys2 = list(match_regions_record_all.keys())\n rm_key = keys1[-1] # after python 3.6, order is guaranteed\n if args.delete_last:\n # delete the last subject results\n total_MNA, match_regions_record_all = utils.delete_records(\n total_MNA, match_regions_record_all, rm_key)\n if args.local_rank == 0:\n _logger.info('resume from subject {} (include)'.format(rm_key))\n elif args.local_rank == 0:\n _logger.info('resume from subject {} (not include)'.format(rm_key))\n else:\n if args.local_rank == 0:\n _logger.info('finetune subjects: [{}]'.format(','.join(\n args.finetune_list)))\nelse:\n total_MNA = {} # store all cross-validation results\n match_regions_record_all = {}\nutils.synchronize()\n\nfor vi, (val_id, [train_dirs, val_dirs]) in enumerate(img_dirs_dict.items()):\n # leave {val_id} out...\n\n # FINETUNE has higher priority than RESUME\n if FINETUNE and (val_id not in args.finetune_list):\n continue # skip subjects that do not need finetune\n if RESUME and (not FINETUNE) and (val_id in total_MNA):\n continue # skip from resume\n\n if val_id in args.finetune_list:\n # delete records\n total_MNA, match_regions_record_all = utils.delete_records(\n total_MNA, match_regions_record_all, val_id)\n\n if args.data_option == 'diff':\n inchannel = args.L\n elif args.data_option == 'wt_diff':\n inchannel = 4 * args.L\n elif args.data_option == 'wt_dr':\n inchannel = (\n args.L + 1 - 11 +\n 1) * 2 * 4 # gauss kernel size = 11, *2 = dr1,dr2, *4 = 4 bands\n\n # amp\n amp_autocast = suppress # do nothing\n loss_scaler = None\n if args.amp:\n amp_autocast = torch.cuda.amp.autocast\n loss_scaler = NativeScaler()\n if args.local_rank == 0:\n _logger.info(\n 'Using native Torch AMP. Training in mixed precision.')\n else:\n if args.local_rank == 0:\n _logger.info('AMP not enabled. Training in float32.')\n\n # model\n model = Two_Stream_RNN_Cls(mlp_hidden_units=args.hidden_units,\n inchannel=inchannel,\n outchannel=2)\n # load pretrained\n if osp.exists(args.load_pretrained):\n model = load_pretrained_model(model, args.load_pretrained,\n args.load_bn)\n if args.local_rank == 0:\n _logger.info('Load pretrained model from {}[load_bn: {}]'.format(\n args.load_pretrained, args.load_bn))\n # pytorch_total_params = sum(p.numel() for p in model.parameters()\n # if p.requires_grad)\n # print(\"Total Params: {}\".format(pytorch_total_params))\n model = model.cuda()\n\n # setup synchronized BatchNorm for distributed training\n if args.distributed:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n # if args.local_rank == 0:\n # _logger.info(\n # 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '\n # 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.'\n # )\n\n # optimizer\n if args.optim == 'SGD':\n optimizer = torch.optim.SGD(\n [p for p in model.parameters() if p.requires_grad],\n args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.optim == 'Adam':\n optimizer = torch.optim.Adam(\n [p for p in model.parameters() if p.requires_grad],\n args.lr,\n weight_decay=args.weight_decay)\n else:\n raise NotImplementedError\n\n # setup distributed training\n if args.distributed:\n model = DistributedDataParallel(model,\n device_ids=[args.local_rank],\n find_unused_parameters=True)\n else:\n model = DataParallel(model).cuda()\n\n # dataset\n Dataset = SAMMDataset if args.dataset == 'SAMM' else CASME_2Dataset\n\n def create_dataset():\n train_dataset = Dataset(\n mode='train',\n img_dirs=train_dirs,\n seq_len=args.length,\n step=args.step,\n # step=1000, # !!\n time_len=args.L,\n input_size=args.input_size,\n data_aug=args.data_aug,\n data_option=args.data_option)\n val_dataset = Dataset(\n mode='test',\n img_dirs=val_dirs,\n seq_len=args.length,\n step=args.length, # assert no overlap\n # step=1000, # !!\n time_len=args.L,\n input_size=args.input_size,\n data_aug=False,\n data_option=args.data_option)\n return train_dataset, val_dataset\n\n train_dataset, val_dataset = create_dataset()\n if args.distributed:\n val_sampler = OrderedDistributedSampler(val_dataset)\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n val_sampler = None\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(train_dataset,\n shuffle=train_sampler is None,\n sampler=train_sampler,\n batch_size=args.batch_size,\n drop_last=False,\n num_workers=args.workers,\n pin_memory=False)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=args.batch_size,\n shuffle=False,\n sampler=val_sampler,\n num_workers=0,\n pin_memory=False,\n drop_last=False)\n\n if args.local_rank == 0:\n _logger.info('<' * 10 + ' {} '.format(val_id) + '<' * 10)\n best_f_score = -1000.0\n best_loss = 1000.0\n val_accum_epochs = 0\n for epoch in range(args.epochs):\n if train_sampler is not None:\n train_sampler.set_epoch(epoch)\n utils.adjust_learning_rate(optimizer, epoch, args.lr,\n args.weight_decay, args.lr_steps,\n args.lr_decay_factor)\n trainer.train(train_loader, model, criterion, optimizer, epoch,\n _logger, args, amp_autocast, loss_scaler)\n utils.synchronize()\n\n # bn syn\n if args.distributed:\n if args.local_rank == 0:\n _logger.info(\"Distributing BatchNorm running means and vars\")\n distribute_bn(model, args.world_size,\n True) # true for reduce, false for broadcast\n\n # logging\n if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:\n loss_val, pred_and_gt = trainer.validate(val_loader, model,\n criterion, _logger, args,\n amp_autocast)\n\n # distributed synchronize\n pred_and_gt = utils.synchronize_pred_and_gt(\n pred_and_gt, epoch, args)\n\n # eval\n if args.local_rank == 0:\n precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels(\n pred_and_gt, val_id, epoch, args)\n else:\n f_score = -10.0\n MNA = (0, 0, 0)\n # precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels(\n # pred_and_gt, val_id, epoch, args)\n utils.synchronize()\n\n # synchronize\n f_score = utils.synchronize_f_score(f_score, args)\n _logger.info('f_score of processor {}: {:.4f}'.format(\n args.local_rank, f_score))\n MNA = utils.synchronize_list(MNA, args)\n _logger.info('MNA of processor {}: {}'.format(\n args.local_rank, MNA))\n\n is_equal_score = f_score == best_f_score\n is_best_loss = loss_val < best_loss\n best_loss = min(loss_val, best_loss)\n is_best_score = f_score > best_f_score\n best_f_score = max(best_f_score, f_score)\n\n # save checkpoint\n if args.local_rank == 0:\n _logger.info(\n 'Test[{}]: loss_val: {:.4f} (best: {:.4f}), f-score: {:.4f} (best: {:.4f})'\n .format(epoch, loss_val, best_loss, f_score, best_f_score))\n utils.save_checkpoint(\n {\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n },\n is_best_score,\n args.save_root,\n args.root_model,\n filename=val_id)\n utils.synchronize()\n\n if is_best_score or (is_equal_score and\n MNA[1] < total_MNA.get(val_id, [0, 0, 0])[1]):\n val_accum_epochs = 0\n total_MNA.update(\n {val_id:\n MNA}) # processor 0 need this record for branch selection\n if args.local_rank == 0:\n match_regions_record_all.update(\n match_regions_record\n ) # only processor 0 need this record\n out_dir = osp.join(args.save_root, args.root_output,\n val_id)\n os.makedirs(out_dir, exist_ok=True)\n np.save(osp.join(out_dir, 'match_regions_record_best.npy'),\n match_regions_record)\n # all\n np.save(\n osp.join(args.save_root, args.root_output,\n 'cross_validation_MNA_dict.npy'), total_MNA)\n np.save(\n osp.join(args.save_root, args.root_output,\n 'match_regions_record_all.npy'),\n match_regions_record_all)\n precision, recall, f_score = utils.calculate_metric_from_dict_MNA(\n total_MNA)\n _logger.info(\n 'Test[all] Avg f-score now: {:.4f}'.format(f_score))\n utils.synchronize()\n else:\n val_accum_epochs += 1\n\n if val_accum_epochs >= args.early_stop:\n _logger.info(\n \"validation ccc did not improve over {} epochs, stop processor {}\"\n .format(args.early_stop, args.local_rank))\n break\n if args.local_rank == 0:\n precision_all, recall_all, f_score_all = utils.calculate_metric_from_dict_MNA(\n total_MNA)\n _logger.critical(\n '[{}][{}]/[{}] f_score: {:.4f}, precision_all: {:.4f}, recall_all: {:.4f}, f_score_all: {:.4f}'\n .format(val_id, vi + 1, len(img_dirs_dict), best_f_score,\n precision_all, recall_all, f_score_all))\n\n# store results\nif args.local_rank == 0:\n np.save(\n osp.join(args.save_root, args.root_output,\n 'cross_validation_MNA_dict.npy'), total_MNA)\n np.save(\n osp.join(args.save_root, args.root_output,\n 'match_regions_record_all.npy'), match_regions_record_all)\n _logger.info('ALL DONE')\nexit()\n" }, { "alpha_fraction": 0.6775384545326233, "alphanum_fraction": 0.7353846430778503, "avg_line_length": 51.41935348510742, "blob_id": "1993a376cbafbb88c4a7545370cf303e4a338353", "content_id": "1b965acd20239a1ef32f3843d04780d7f54196aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1625, "license_type": "no_license", "max_line_length": 213, "num_lines": 31, "path": "/README.md", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "# Solution of ACM MM21 FME challenge - spotting track\n## Requirements\n- python 3.9.5\n- torch 1.8.0\n- timm 0.4.12\n- pytorch-benchmarks (clone from [github](https://github.com/albanie/pytorch-benchmarks))\n\n## Run - CAS(ME)$^2$ as example\n\n### Pretrianed Weights\n1. Download the weights of resnet50 pretrained on VGGFACE2 and FER$+$ from [here](https://www.robots.ox.ac.uk/~albanie/pytorch-models.html).\n2. Download the weights of our model pretrained on Aff-wild from [here](https://cloud.tsinghua.edu.cn/d/58af3b49570741ab82f3/). Alternatively, you can download the Aff-wild dataset and train the model by yourself.\n3. Change the paths of pretrained model properly.\n\n### Dataset and Pre-processing\n1. Download and extract the dataset.\n2. Download the csv files reorganized by us from [here](https://cloud.tsinghua.edu.cn/d/58af3b49570741ab82f3/).\n3. Change the data paths properly.\n4. Run `python preprocess/casme_2_label_generation.py` for label generation.\n5. Run `python CNN_feature_extraction.py` for spatial feature extraction.\n\n### Train with leave-one-subject-out cross-validaiton\n```\nnohup python -m torch.distributed.launch --nproc_per_node 2 main_cls.py --distributed --amp \\\n--dataset CASME_2 --snap exp_cls_ca --print-freq 50 --gpus 0,1 \\\n--data_option wt_diff --workers 8 --batch_size 8 --input_size 128 --length 64 --step 64 -L 12 \\\n--optim SGD --lr 1e-3 --lr_steps 3 5 --lr_decay_factor 0.3 \\\n--patience 10 --focal_alpha 1 14 --early_stop 3 --epochs 8 \\\n--load_pretrained /home/gjz/fmr_backbone/pretrained_models/wtdf1_wt_size:112_length:64_L:12/model/fold_0_best_loss.pth.tar \\\n> running_cls_ca.log 2>&1 &\n```\n" }, { "alpha_fraction": 0.5090850591659546, "alphanum_fraction": 0.5211448669433594, "avg_line_length": 30.897436141967773, "blob_id": "bae4e177e46df89c3fbf076b213468ecbae61576", "content_id": "23487b23baf4cb755d9127912f31cfeb41807db8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6219, "license_type": "no_license", "max_line_length": 77, "num_lines": 195, "path": "/dataset/utils.py", "repo_name": "guanjz20/MM21_FME_solution", "src_encoding": "UTF-8", "text": "from albumentations.augmentations.transforms import GaussNoise\nimport cv2\nimport os\nimport numpy as np\nimport os.path as osp\nimport albumentations as alb\n# from torch._C import Ident\n# from torch.nn.modules.linear import Identity\n\n\nclass IsotropicResize(alb.DualTransform):\n def __init__(self,\n max_side,\n interpolation_down=cv2.INTER_AREA,\n interpolation_up=cv2.INTER_CUBIC,\n always_apply=False,\n p=1):\n super(IsotropicResize, self).__init__(always_apply, p)\n self.max_side = max_side\n self.interpolation_down = interpolation_down\n self.interpolation_up = interpolation_up\n\n def apply(self,\n img,\n interpolation_down=cv2.INTER_AREA,\n interpolation_up=cv2.INTER_CUBIC,\n **params):\n return isotropically_resize_image(\n img,\n size=self.max_side,\n interpolation_down=interpolation_down,\n interpolation_up=interpolation_up)\n\n def apply_to_mask(self, img, **params):\n return self.apply(img,\n interpolation_down=cv2.INTER_NEAREST,\n interpolation_up=cv2.INTER_NEAREST,\n **params)\n\n def get_transform_init_args_names(self):\n return (\"max_side\", \"interpolation_down\", \"interpolation_up\")\n\n\nclass Identity():\n def __init__(self):\n pass\n\n def __call__(self, x):\n return x\n\n\nclass GroupTrainTransform():\n def __init__(self):\n self.ImageCompression = alb.ImageCompression(quality_lower=60,\n quality_upper=100,\n p=1),\n self.GaussNoise = alb.GaussNoise(p=1),\n self.GaussianBlur = alb.GaussianBlur(blur_limit=(3, 5), p=1),\n self.HorizontalFlip = alb.HorizontalFlip(p=1),\n self.LightChange = alb.OneOf([\n alb.RandomBrightnessContrast(),\n alb.FancyPCA(),\n alb.HueSaturationValue()\n ],\n p=1),\n self.ShiftRotate = alb.ShiftScaleRotate(\n shift_limit=0.1,\n scale_limit=0.2,\n rotate_limit=10,\n border_mode=cv2.BORDER_CONSTANT,\n p=1),\n\n def _apply_aug(imgs, aug_method):\n for i, img in enumerate(imgs):\n imgs[i] = aug_method(image=img)['image']\n return imgs\n\n def __call__(self, imgs):\n # img compress\n if np.random.random() < 0.3:\n imgs = self._apply_aug(imgs, self.ImageCompression)\n # gauss noise\n if np.random.random() < 0.1:\n imgs = self._apply_aug(imgs, self.GaussNoise)\n # gauss blur\n if np.random.random() < 0.05:\n imgs = self._apply_aug(imgs, self.GaussianBlur)\n # flip\n if np.random.random() < 0.5:\n imgs = self._apply_aug(imgs, self.HorizontalFlip)\n # light\n if np.random.random() < 0.5:\n imgs = self._apply_aug(imgs, self.LightChange)\n # shift rotate\n if np.random.random() < 0.5:\n imgs = self._apply_aug(imgs, self.ShiftRotate)\n return imgs\n\n\nclass GroupTestTransform(Identity):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\ndef get_group_transform(mode):\n if mode == 'train':\n return GroupTrainTransform()\n elif mode == 'test':\n return GroupTestTransform()\n else:\n raise (NotImplementedError)\n\n\ndef isotropically_resize_image(img,\n size,\n interpolation_down=cv2.INTER_AREA,\n interpolation_up=cv2.INTER_CUBIC):\n h, w = img.shape[:2]\n if max(w, h) == size:\n return img\n if w > h:\n scale = size / w\n h = h * scale\n w = size\n else:\n scale = size / h\n w = w * scale\n h = size\n interpolation = interpolation_up if scale > 1 else interpolation_down\n resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation)\n return resized\n\n\ndef get_transform(mode, size):\n if mode == 'train':\n return get_train_transform(size)\n elif mode == 'test':\n return get_test_transform(size)\n else:\n raise (NotImplementedError)\n\n\ndef get_test_transform(size):\n return alb.Compose([\n IsotropicResize(max_side=size),\n alb.PadIfNeeded(min_height=size,\n min_width=size,\n border_mode=cv2.BORDER_CONSTANT),\n ])\n\n\ndef get_train_transform(size):\n return alb.Compose([\n # alb.GaussNoise(p=0.1),\n # alb.GaussianBlur(blur_limit=(3, 5), p=0.1),\n alb.HorizontalFlip(),\n alb.OneOf([\n IsotropicResize(max_side=size,\n interpolation_down=cv2.INTER_AREA,\n interpolation_up=cv2.INTER_CUBIC),\n IsotropicResize(max_side=size,\n interpolation_down=cv2.INTER_AREA,\n interpolation_up=cv2.INTER_LINEAR),\n IsotropicResize(max_side=size,\n interpolation_down=cv2.INTER_LINEAR,\n interpolation_up=cv2.INTER_LINEAR),\n ],\n p=1),\n alb.PadIfNeeded(min_height=size,\n min_width=size,\n border_mode=cv2.BORDER_CONSTANT),\n # alb.OneOf([\n # alb.RandomBrightnessContrast(),\n # alb.FancyPCA(),\n # alb.HueSaturationValue()\n # ],\n # p=0.5),\n # alb.ToGray(p=0.2),\n # alb.ShiftScaleRotate(shift_limit=0.1,\n # scale_limit=0.1,\n # rotate_limit=5,\n # border_mode=cv2.BORDER_CONSTANT,\n # p=0.5),\n ])\n\n\n\ndef scan_jpg_from_img_dir(img_dir):\n img_ps = [\n osp.join(img_dir, name)\n for name in sorted(os.listdir(img_dir),\n key=lambda x: int(x.split('.')[0].split('_')[-1]))\n if '.jpg' in name # !! sort key\n ]\n return img_ps" } ]
17
gowtham59/fgh
https://github.com/gowtham59/fgh
af53de51866921a32a9e004c5c299f55a8619ee2
46d60b54f5825fe7e5cde5b600ff7a9bff22b50a
1855eef2c423d183811618d2e6e94ea7c02979d8
refs/heads/master
"2020-06-17T18:06:29.114874"
"2019-07-09T12:16:52"
"2019-07-09T12:16:52"
196,002,037
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49295774102211, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 16.75, "blob_id": "c855f61b9679a795e12f92b2f928e115e3c6c4de", "content_id": "8f838598fd36e2eced08bd12e92a766763bc304a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/g.py", "repo_name": "gowtham59/fgh", "src_encoding": "UTF-8", "text": "f12,f22=input().split()\nf22=int(f22)\nfor y in range(f22):\n print(f12)\n" } ]
1
wendeehsu/MangoClassification
https://github.com/wendeehsu/MangoClassification
fd0befdd062dd41ab94d01431f85cb69395c694c
ba147fae2bd8fd941ea82ce078e001a93e9a2737
6653ff3afb6983e42105228c0efa060c1c5c9866
refs/heads/master
"2022-10-10T09:39:30.139943"
"2020-06-10T15:43:33"
"2020-06-10T15:43:33"
260,643,462
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.66291344165802, "alphanum_fraction": 0.6790992021560669, "avg_line_length": 29.223403930664062, "blob_id": "f52cb2823349ab0a85d8b90f7a5fa24bc17cdd27", "content_id": "7f0dce455c884def0427e53ed6d5fe7da4d69b25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2928, "license_type": "no_license", "max_line_length": 97, "num_lines": 94, "path": "/train_incept.py", "repo_name": "wendeehsu/MangoClassification", "src_encoding": "UTF-8", "text": "\"\"\"# Load libraries\"\"\"\n\nimport os, shutil\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport pandas as pd\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.layers import Activation, Dense, GlobalAveragePooling2D, Dropout\nfrom keras.layers.core import Flatten\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nimport tensorflow as tf\n\n\"\"\"# Check files\"\"\"\n\npath = \"./C1-P1_Train/\"\nclass_names = [\"A\",\"B\",\"C\"]\nclassNum = len(class_names)\n\n\"\"\"# Load Data\"\"\"\ntraindf=pd.read_csv(\"train.csv\", header=None)\ntraindf = traindf.rename(columns={0: \"name\", 1: \"class\"})\nprint(traindf.head())\ntarget_size = (224,224)\nbatch_size = 20\n\n#ImageDataGenerator() 可以做一些影像處理的動作 \ndatagen = ImageDataGenerator(\n rescale = 1./255,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n brightness_range=[0.2,1.0],\n fill_mode='nearest',\n validation_split=0.2)\n\n#以 batch 的方式讀取資料\ntrain_batches = datagen.flow_from_dataframe(\n dataframe=traindf,\n directory=path,\n x_col=\"name\",\n y_col=\"class\",\n target_size = target_size, \n batch_size = batch_size,\n subset='training')\n\nvalid_batches = datagen.flow_from_dataframe(\n dataframe=traindf,\n directory=path,\n x_col=\"name\",\n y_col=\"class\",\n target_size = target_size,\n batch_size = batch_size,\n subset='validation')\n\n\"\"\"# Build model\"\"\"\n\nnet = InceptionV3(include_top=False, weights=\"imagenet\")\nx = net.output\nx = GlobalAveragePooling2D()(x)\nx = Dropout(0.5)(x)\noutput_layer = Dense(classNum, activation='softmax')(x)\n\nFREEZE_LAYERS = 2\n# 設定凍結與要進行訓練的網路層\nnet_final = Model(inputs=net.input, outputs=output_layer)\nfor layer in net_final.layers[:FREEZE_LAYERS]:\n layer.trainable = False\nfor layer in net_final.layers[FREEZE_LAYERS:]:\n layer.trainable = True\n\n# 使用 Adam optimizer,以較低的 learning rate 進行 fine-tuning\nnet_final.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])\nhistory = net_final.fit_generator(train_batches,\n steps_per_epoch = train_batches.samples // batch_size,\n validation_data=valid_batches,\n validation_steps = valid_batches.samples // batch_size,\n epochs=30)\n\nnet_final.save(\"models/mango_Incept.h5\")\n\nSTEP_SIZE_VALID = valid_batches.n // valid_batches.batch_size\nresult = net_final.evaluate_generator(generator=valid_batches, steps=STEP_SIZE_VALID, verbose=1)\nprint(\"result = \", result)\n\n# plot metrics\nplt.plot(history.history['accuracy'])\nplt.show()\nplt.savefig('accuracy.jpg')\n\n" }, { "alpha_fraction": 0.7291169166564941, "alphanum_fraction": 0.7416467666625977, "avg_line_length": 30.037036895751953, "blob_id": "524d92329bd76cb7fd24dbefaea34345f425d218", "content_id": "dd79b5f6cf51cc2194511eccaad6e297f9463a33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1716, "license_type": "no_license", "max_line_length": 87, "num_lines": 54, "path": "/test.py", "repo_name": "wendeehsu/MangoClassification", "src_encoding": "UTF-8", "text": "import os, shutil\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow.python.keras.models import load_model\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\npath = \"/C1-P1_Dev\"\nclass_names = [\"A\",\"B\",\"C\"]\n\ndic = {}\nfor className in class_names:\n dir = path+\"/\"+className\n files = os.listdir(dir)\n imageNum = len(files)\n randomNums = random.sample(range(imageNum), imageNum)\n dic[className] = imageNum\n\nplt.bar(range(len(dic)), list(dic.values()), align='center')\nplt.xticks(range(len(dic)), list(dic.keys()))\nprint(dic)\nplt.show()\n\ntarget_size = (224,224)\nbatch_size = 1\n\n#ImageDataGenerator() 可以做一些影像處理的動作 \ndatagen = ImageDataGenerator(rescale = 1./255,)\n\n#以 batch 的方式讀取資料\npredict_batches = datagen.flow_from_directory(\n path,\n shuffle=False,\n target_size = target_size, \n batch_size = batch_size,\n classes = class_names)\n\nresnet = load_model(\"models/mango_resnet152.h5\")\n# print(resnet.summary())\nfilenames = predict_batches.filenames\nnb_samples = len(filenames)\npredict = resnet.predict(predict_batches, steps = nb_samples, verbose = 1)\ny_pred = np.argmax(predict, axis=1)\nprint('confusion matrix')\nprint(confusion_matrix(predict_batches.classes, y_pred))\nprint(classification_report(predict_batches.classes, y_pred, target_names=class_names))\n" }, { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 55, "blob_id": "e979c8f8dcd343195d8468657b2a4112488e418f", "content_id": "097273b68c876572f154428364b7444b5d2f9fba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 112, "license_type": "no_license", "max_line_length": 89, "num_lines": 2, "path": "/README.md", "repo_name": "wendeehsu/MangoClassification", "src_encoding": "UTF-8", "text": "# MangoClassification\nUse machine learning to classify mango into the corresponding type indicating its quality\n" }, { "alpha_fraction": 0.6323529481887817, "alphanum_fraction": 0.6518382430076599, "avg_line_length": 26.46464729309082, "blob_id": "ada996c5ee34b0bb3917dfee3f743ea67766e4e2", "content_id": "652b297cb5076871303861247859dcabe33885cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2872, "license_type": "no_license", "max_line_length": 97, "num_lines": 99, "path": "/train.py", "repo_name": "wendeehsu/MangoClassification", "src_encoding": "UTF-8", "text": "\"\"\"# Load libraries\"\"\"\n\nimport os, shutil\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport pandas as pd\nfrom keras.applications.resnet import ResNet152\nfrom keras.layers.core import Dense, Flatten\nfrom keras.layers import Activation,Dropout\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\"\"\"# Check files\"\"\"\n\npath = \"./C1-P1_Train/\"\nclass_names = [\"A\",\"B\",\"C\"]\nclassNum = len(class_names)\n\n\"\"\"# Load Data\"\"\"\ntraindf=pd.read_csv(\"train.csv\", header=None)\ntraindf = traindf.rename(columns={0: \"name\", 1: \"class\"})\nprint(traindf.head())\ntarget_size = (224,224)\nbatch_size = 20\n\n#ImageDataGenerator() 可以做一些影像處理的動作 \ndatagen = ImageDataGenerator(\n rescale = 1./255,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n brightness_range=[0.2,1.0],\n fill_mode='nearest',\n validation_split=0.2)\n\n#以 batch 的方式讀取資料\ntrain_batches = datagen.flow_from_dataframe(\n dataframe=traindf,\n directory=path,\n x_col=\"name\",\n y_col=\"class\",\n target_size = target_size, \n batch_size = batch_size,\n subset='training')\n\nvalid_batches = datagen.flow_from_dataframe(\n dataframe=traindf,\n directory=path,\n x_col=\"name\",\n y_col=\"class\",\n target_size = target_size,\n batch_size = batch_size,\n subset='validation')\n\n\"\"\"# Build model\"\"\"\n\n# 凍結網路層數\nFREEZE_LAYERS = 2\nnet = ResNet152(include_top=False, \n weights=\"imagenet\", \n input_tensor=None,\n input_shape=(target_size[0],target_size[1],classNum),\n classes=classNum)\nx = net.output\nx = Flatten()(x)\n\n# 增加 Dense layer,以 softmax 產生個類別的機率值\n# x = Dense(256, activation='softmax', name='output2_layer')(x)\n\n# 增加 DropOut layer\nx = Dropout(0.5)(x)\noutput_layer = Dense(classNum, activation='softmax', name='softmax')(x)\n\n# 設定凍結與要進行訓練的網路層\nnet_final = Model(inputs=net.input, outputs=output_layer)\nfor layer in net_final.layers[:FREEZE_LAYERS]:\n layer.trainable = False\nfor layer in net_final.layers[FREEZE_LAYERS:]:\n layer.trainable = True\n\n# 使用 Adam optimizer,以較低的 learning rate 進行 fine-tuning\nnet_final.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])\n\n# 輸出整個網路結構\n# print(net_final.summary())\n\n\"\"\"# Train\"\"\"\n\n# 訓練模型\nhistory = net_final.fit(train_batches,\n steps_per_epoch = train_batches.samples // batch_size,\n validation_data = valid_batches,\n validation_steps = valid_batches.samples // batch_size,\n epochs = 30)\n\nnet_final.save(\"models/mango_resnet152.h5\")\n\n" } ]
4
ubombar/financial-simulations
https://github.com/ubombar/financial-simulations
2d15e11e9ec48861f0abc2c076431ca78baa5b6b
139d606c283608266abec54b08bb5ef785f43d01
885b8b066f048a10b12ca25c4500d0311bec45fe
refs/heads/main
"2023-06-18T19:07:52.602894"
"2021-07-18T13:44:25"
"2021-07-18T13:44:25"
387,055,438
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5297592878341675, "alphanum_fraction": 0.5525164008140564, "avg_line_length": 29.677852630615234, "blob_id": "ac2525f32c3f39a8f04e90c8f7a4d24dbcab871c", "content_id": "2ce826728e8a1226fddeffbf9f0b816d5bbb7367", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4570, "license_type": "no_license", "max_line_length": 143, "num_lines": 149, "path": "/market.py", "repo_name": "ubombar/financial-simulations", "src_encoding": "UTF-8", "text": "from typing import Any, Callable, List, Tuple\nimport uuid\nimport heapq\nimport random\nimport datetime\n\nclass Transaction():\n def __init__(self, mid: uuid.UUID, oid: uuid.UUID, recv: float, asset1: str, send: float, asset2: str) -> None:\n self.id = uuid.uuid1()\n self.mid = mid\n self.oid = oid\n self.asset1 = asset1\n self.asset2 = asset2\n self.recv = recv\n self.send = send\n self.datetime = datetime.datetime.now()\n\n @property\n def rate(self):\n # This will only work on seller's transactions!\n return self.recv / self.send\n\n def __str__(self) -> str:\n return \"rate: {:0.3f}; {:0.2f} {} for {:0.2f} {} @ {}\".format(self.rate, self.recv, self.asset1, self.send, self.asset2, self.datetime)\n\n def __repr__(self) -> str:\n return str(self)\n\nclass Offer():\n def __init__(self, mid:uuid.UUID, vol: float, r: float, handle: Callable[[Transaction], Any]) -> None:\n self.mid = mid\n self.id = uuid.uuid1()\n self.r = r \n self.vol = vol\n self.handle = handle\n \n def __str__(self) -> str:\n return \"(r: {1:0.3f} vol: {0:0.2f})\".format(self.vol, self.r)\n\n def __lt__(self, other):\n return (self.r < other.r)\n\n def __repr__(self) -> str:\n return str(self)\n\nclass Market():\n def __init__(self, asset1: str, asset2: str) -> None:\n self.id = uuid.uuid1()\n\n self.asset1 = asset1 \n self.asset2 = asset2 \n\n self.s: List[Offer] = []\n self.d: List[Offer] = [] \n\n self.transactions: List[Transaction] = []\n \n @property\n def name(self):\n return \"{}/{}\".format(self.asset1, self.asset2)\n \n def offer(self, op: str, vol: float, r: float, handle: Callable[[Transaction], Any]) -> uuid.UUID:\n offer = Offer(self.id, vol, r, handle)\n\n if op in 'sell':\n heapq.heappush(self.s, offer)\n else:\n heapq.heappush(self.d, offer)\n\n ps = 0\n pd = len(self.d) - 1\n\n while ps < len(self.s) and pd >= 0:\n seller = self.s[ps]\n buyer = self.d[pd]\n\n (tseller, tbuyer, status) = self.match(seller, buyer)\n\n if status == None: break # no transaction can occur!\n\n seller.vol -= tseller.send # update the balance\n buyer.vol -= tbuyer.send # update the balance\n\n self.transactions.append(tseller) # only add sellers transaction\n\n # surplus supply, delete demand\n # equal trade, delete demand and supply\n # shortage supply, delete supply\n if status in {0, 1}: \n self.d.remove(buyer)\n pd -= 1\n\n if status in {1, 2}:\n self.s.remove(seller)\n ps += 1\n\n return offer.id\n\n def match(self, seller: Offer, buyer: Offer) -> Tuple[Transaction, Transaction, int]:\n if seller.r > buyer.r: return (None, None, None)\n\n state = 0 # 0: surplus, 1: equal, 2: shortage\n\n rhat = (seller.r + buyer.r) / 2 \n\n to_buyer1 = buyer.vol / rhat # buyer recieves apples\n to_seller1 = buyer.vol # seller get dollars\n\n to_seller2 = seller.vol * rhat\n to_buyer2 = seller.vol\n\n if to_buyer1 < to_buyer2: # Surplus supply\n state = 0\n tr1 = Transaction(self.id, seller.id, to_seller1, self.asset2, to_buyer1, self.asset1)\n tr2 = Transaction(self.id, buyer.id, to_buyer1, self.asset1, to_seller1, self.asset2)\n elif to_buyer1 == to_buyer2: # Efficient trade\n state = 1\n tr1 = Transaction(self.id, seller.id, to_seller1, self.asset2, to_buyer1, self.asset1)\n tr2 = Transaction(self.id, buyer.id, to_buyer1, self.asset1, to_seller1, self.asset2)\n else: # Shortage on supply \n state = 2\n tr1 = Transaction(self.id, seller.id, to_seller2, self.asset2, to_buyer2, self.asset1)\n tr2 = Transaction(self.id, buyer.id, to_buyer2, self.asset1, to_seller2, self.asset2)\n\n return (tr1, tr2, state)\n\n def remove(self, offerid: int) -> bool:\n return False\n\n\nif __name__ == \"__main__\":\n m = Market(\"apple\", \"dolar\")\n\n random.seed(0)\n\n for i in range(1000):\n seller = random.random() > 0.5\n r = random.random() * 1 + 10\n vol = random.random() * 100 + 100\n\n op = 'sell' if seller else 'buy'\n m.offer(op, vol, r, lambda t: None)\n\n xrate = []\n for t in m.transactions:\n xrate.append(t.recv / t.send)\n print(t)\n\n # print(xrate)" } ]
1
liuhongbo830117/ntire2018_adv_rgb2hs
https://github.com/liuhongbo830117/ntire2018_adv_rgb2hs
c2dbffa857a133f8a5b588090ae8a9aba703252a
f8cc32891afc3bc99fb70d6d30e867a9ed6452b9
4cf4f06bf72333bf0cd8bfd7c76d45ef84b9224e
refs/heads/master
"2021-09-10T12:23:11.280025"
"2018-03-26T08:16:51"
"2018-03-26T08:16:51"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.595033586025238, "alphanum_fraction": 0.6008214950561523, "avg_line_length": 36.985816955566406, "blob_id": "e97ea7a190806a7099e3907989fd697297aee5e3", "content_id": "958f2152c84831f20489c79b9e6648a097bb6f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5356, "license_type": "no_license", "max_line_length": 119, "num_lines": 141, "path": "/models/mylosses.py", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom torch.nn.modules import loss\nfrom torch.nn import functional as F\nimport torch\nfrom torch.autograd import Variable\n\nclass RelMAELoss(loss._Loss):\n r\"\"\"Creates a criterion that measures the mean squared error between\n `n` elements in the input `x` and target `y`.\n\n The loss can be described as:\n\n .. math::\n \\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad\n l_n = \\left( x_n - y_n \\right)^2,\n\n where :math:`N` is the batch size. If reduce is ``True``, then:\n\n .. math::\n \\ell(x, y) = \\begin{cases}\n \\operatorname{mean}(L), & \\text{if}\\; \\text{size_average} = \\text{True},\\\\\n \\operatorname{sum}(L), & \\text{if}\\; \\text{size_average} = \\text{False}.\n \\end{cases}\n\n `x` and `y` arbitrary shapes with a total of `n` elements each.\n\n The sum operation still operates over all the elements, and divides by `n`.\n\n The division by `n` can be avoided if one sets the internal variable\n `size_average` to ``False``.\n\n To get a batch of losses, a loss per batch element, set `reduce` to\n ``False``. These losses are not averaged and are not affected by\n `size_average`.\n\n Args:\n size_average (bool, optional): By default, the losses are averaged\n over observations for each minibatch. However, if the field\n size_average is set to ``False``, the losses are instead summed for\n each minibatch. Only applies when reduce is ``True``. Default: ``True``\n reduce (bool, optional): By default, the losses are averaged\n over observations for each minibatch, or summed, depending on\n size_average. When reduce is ``False``, returns a loss per batch\n element instead and ignores size_average. Default: ``True``\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Target: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> loss = nn.MSELoss()\n >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)\n >>> target = autograd.Variable(torch.randn(3, 5))\n >>> output = loss(input, target)\n >>> output.backward()\n \"\"\"\n def __init__(self, size_average=True, reduce=True):\n super(RelMAELoss, self).__init__(size_average)\n self.reduce = reduce\n\n def forward(self, input, target):\n input = (input + 1) / 2.0 * 4095.0\n target = (target + 1) / 2.0 * 4095.0\n loss._assert_no_grad(target)\n abs_diff = torch.abs(target - input)\n relative_abs_diff = abs_diff / (target + np.finfo(float).eps)\n rel_mae = torch.mean(relative_abs_diff)\n\n #from eval:\n # compute MRAE\n # diff = gt - rc\n # abs_diff = np.abs(diff)\n # relative_abs_diff = np.divide(abs_diff, gt + np.finfo(float).eps) # added epsilon to avoid division by zero.\n # MRAEs[f] = np.mean(relative_abs_diff)\n return rel_mae\n\n\nclass ZeroGanLoss(loss._Loss):\n r\"\"\"Creates a criterion that measures the mean squared error between\n `n` elements in the input `x` and target `y`.\n\n The loss can be described as:\n\n .. math::\n \\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad\n l_n = \\left( x_n - y_n \\right)^2,\n\n where :math:`N` is the batch size. If reduce is ``True``, then:\n\n .. math::\n \\ell(x, y) = \\begin{cases}\n \\operatorname{mean}(L), & \\text{if}\\; \\text{size_average} = \\text{True},\\\\\n \\operatorname{sum}(L), & \\text{if}\\; \\text{size_average} = \\text{False}.\n \\end{cases}\n\n `x` and `y` arbitrary shapes with a total of `n` elements each.\n\n The sum operation still operates over all the elements, and divides by `n`.\n\n The division by `n` can be avoided if one sets the internal variable\n `size_average` to ``False``.\n\n To get a batch of losses, a loss per batch element, set `reduce` to\n ``False``. These losses are not averaged and are not affected by\n `size_average`.\n\n Args:\n size_average (bool, optional): By default, the losses are averaged\n over observations for each minibatch. However, if the field\n size_average is set to ``False``, the losses are instead summed for\n each minibatch. Only applies when reduce is ``True``. Default: ``True``\n reduce (bool, optional): By default, the losses are averaged\n over observations for each minibatch, or summed, depending on\n size_average. When reduce is ``False``, returns a loss per batch\n element instead and ignores size_average. Default: ``True``\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Target: :math:`(N, *)`, same shape as the input\n\n Examples::\n\n >>> loss = nn.MSELoss()\n >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)\n >>> target = autograd.Variable(torch.randn(3, 5))\n >>> output = loss(input, target)\n >>> output.backward()\n \"\"\"\n\n def __init__(self, size_average=True, reduce=True):\n super(ZeroGanLoss, self).__init__(size_average)\n self.reduce = reduce\n\n def forward(self, input, target):\n # zero = Variable(torch.Tensor([0]).double())\n zeros = input * 0.\n return torch.sum(zeros)\n" }, { "alpha_fraction": 0.6447442173957825, "alphanum_fraction": 0.6897132992744446, "avg_line_length": 40.046512603759766, "blob_id": "b8521b19f08126114da9550f9b7324f695086e3d", "content_id": "09bfd655fb5f831ae7c8c38c432232cc685a73db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1779, "license_type": "no_license", "max_line_length": 175, "num_lines": 43, "path": "/README.md", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "# Requirements\n\n* python 3.6\n* Python packages: pytorch (torch, torchvision), skimage, spectral, colour, numpy, h5py, PIL, dominate, scipy, hdf5storage, tqdm, joblib \n\n# Execution instructions\n\n* Download the code\n\n```\n$ git clone https://github.com/aitorshuffle/ntire2018_adv_rgb2hs.git\n$ cd ntire2018_adv_rgb2hs\n```\n\n* Place the input RGB images to be processed in the ```datasets/icvl_ntire2018/NTIRE2018_Test_Clean/``` and/or ```datasets/icvl_ntire2018/NTIRE2018_Test_RealWorld``` directory\n\n* Run the rgb to hyperspectral conversion:\n\n\t* Make the execution scripts executable: \n\t```\n\tntire2018_adv_rgb2hs$ chmod 777 ./scripts/test_ntire2018_adv_rgb2hs_Clean.sh\n\tntire2018_adv_rgb2hs$ chmod 777 ./scripts/test_ntire2018_adv_rgb2hs_RealWorld.sh\n\t```\n\t\n\t* Run the execution script for each track: \n * Clean track:\n ```\n ntire2018_adv_rgb2hs$ ./scripts/test_ntire2018_adv_rgb2hs_clean.sh \n ```\n\n * RealWorld track:\n ```\n ntire2018_adv_rgb2hs$ ./scripts/test_ntire2018_adv_rgb2hs_clean.sh\n ```\n* Output results will be generated in:\n * Clean track: ```results/29```\n * RealWorld track: ```results/34```\n Each of these contain an images directory, with the predicted hyperspectral mat file in the required format and one RGB image triplet per test image:\n \t * ```TEST_IMG_NAME.mat```: predicted hyperspectral mat file \n * ```TEST_IMG_NAME_real_A.png```: input RGB image\n * ```TEST_IMG_NAME_fake_B.png```: predicted hyperspectral image rendered as sRGB\n * ```TEST_IMG_NAME_real_B.png```: Ground truth hyperspectral image rendered as sRGB. Only makes sense for validation. At test time\n\tThere will also be a ```index.html``` web page rendering all the mentioned rgb triplets. \n " }, { "alpha_fraction": 0.5535633563995361, "alphanum_fraction": 0.5841709971427917, "avg_line_length": 48.74324417114258, "blob_id": "5ca51278848cf297444eb568b23e3f19fa821307", "content_id": "2999f28b237349167e56db41a7dccd8fa7b1f8b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11043, "license_type": "no_license", "max_line_length": 262, "num_lines": 222, "path": "/data/icvl_dataset.py", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "import os.path\nimport random\nimport torchvision.transforms as transforms\nimport torch\n# import torch.nn.functional as F\nfrom data.base_dataset import BaseDataset\nfrom data.image_folder import make_dataset_from_dir_list\nfrom PIL import Image, ImageOps\nimport h5py\nimport numpy as np\nimport spectral\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\nfrom util.spectral_color import dim_ordering_tf2th, dim_ordering_th2tf\n\nclass IcvlNtire2018Dataset(BaseDataset):\n def initialize(self, opt):\n self.opt = opt\n self.challenge = opt.challenge # 'Clean' or 'RealWorld'\n self.root = opt.dataroot # e.g. icvl_ntire2018\n assert (opt.phase in ['train', 'Validate', 'Test'])\n self.dirlist_rgb = [os.path.join(self.root, 'NTIRE2018_Train1_' + self.challenge), os.path.join(self.root, 'NTIRE2018_Train2_' + self.challenge)] if opt.phase == 'train' else [os.path.join(self.root, 'NTIRE2018_' + opt.phase + '_' + self.challenge)] # A\n self.dirlist_hs = [os.path.join(self.root, 'NTIRE2018_Train1_Spectral'), os.path.join(self.root, 'NTIRE2018_Train2_Spectral')] if opt.phase == 'train' else [os.path.join(self.root, 'NTIRE2018_' + opt.phase + '_Spectral')] # B\n\n self.paths_rgb = sorted(make_dataset_from_dir_list(self.dirlist_rgb))\n self.paths_hs = sorted(make_dataset_from_dir_list(self.dirlist_hs))\n # self.dir_AB = os.path.join(opt.dataroot, opt.phase)\n # self.AB_paths = sorted(make_dataset(self.dir_AB))\n\n # print('RETURN TO FULL SIZE PATHS_hs and RGB') #fixme\n # self.paths_rgb = self.paths_rgb[:5]\n # self.paths_hs = self.paths_hs[:5]\n\n # to handle envi files, so that we can do partial loads\n self.use_envi = opt.use_envi\n if self.use_envi:\n # update self.dirlist_hs\n self.dirlist_hs_mat = self.dirlist_hs\n self.dirlist_hs = [os.path.join(self.root, 'NTIRE2018_Train_Spectral_envi')]\n\n print(spectral.io.envi.get_supported_dtypes())\n if opt.generate_envi_files:\n self.generate_envi_files(overwrite_envi=opt.overwrite_envi)\n # update self.paths_hs with the hdr files\n self.paths_hs = sorted(make_dataset_from_dir_list(self.dirlist_hs))\n # for dir_hs in self.dirlist_hs:\n # if not os.path.exists(dir_hs):\n\n assert(opt.resize_or_crop == 'resize_and_crop')\n\n def __getitem__(self, index):\n # AB_path = self.AB_paths[index]\n # AB = Image.open(AB_path).convert('RGB')\n # AB = AB.resize((self.opt.loadSize * 2, self.opt.loadSize), Image.BICUBIC)\n # AB = transforms.ToTensor()(AB)\n\n # load rgb image\n path_rgb = self.paths_rgb[index]\n rgb = Image.open(path_rgb)#.convert('RGB')\n # fixme set it between 0,1?\n # rgb = transforms.ToTensor()(rgb) # rgb.shape: torch.Size([3, 1392, 1300])\n\n # sample crop locations\n # w = rgb.shape[2] # over the tensor already\n # h = rgb.shape[1] # over the tensor already\n w = rgb.width #store them in self so as to accesswhile testing for cropping final result\n h = rgb.height\n \n w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1))\n h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1))\n\n # actually crop rgb image\n if self.opt.phase.lower() == 'train':\n if self.opt.challenge.lower() == 'realworld':\n # print('realworld<----------------------------------jitter')\n rgb = transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.01)(rgb)\n\n rgb = transforms.ToTensor()(rgb) # rgb.shape: torch.Size([3, 1392, 1300])\n\n # train on random crops\n rgb_crop = rgb[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize] # rgb_crop is created as a tensor already\n\n else:\n topdown_pad = (1536 - h) // 2\n leftright_pad = (1536 - w) // 2\n full_img_padding = (leftright_pad, topdown_pad, leftright_pad, topdown_pad)\n rgb_crop = ImageOps.expand(rgb, full_img_padding)\n rgb_crop = transforms.ToTensor()(rgb_crop)\n\n ## load hs image\n if self.opt.phase == 'train':\n path_hs = self.paths_hs[index]\n if self.use_envi:\n hs = spectral.io.envi.open(path_hs) # https://github.com/spectralpython/spectral/blob/master/spectral/io/envi.py#L282 not loaded yet until read_subregion\n # hs.shape: Out[3]: (1392, 1300, 31) (nrows, ncols, nbands)\n # check dimensions and crop hs image (actually read only that one\n # print(rgb.shape)\n # print(hs.shape)\n assert (rgb.shape[1] == hs.shape[0] and rgb.shape[2] == hs.shape[1])\n hs_crop = (hs.read_subregion(row_bounds=(h_offset, h_offset + self.opt.fineSize), col_bounds=(w_offset, w_offset + self.opt.fineSize))).astype(float)\n # hs_crop.shape = (h,w,c)=(256,256,31) here\n hs_crop = hs_crop / 4095. * 255 # 4096: db max. totensor expects in [0, 255]\n hs_crop = transforms.ToTensor()(hs_crop) # convert ndarray (h,w,c) [0,255]-> torch tensor (c,h,w) [0.0, 1.0] #move to GPU only the 256,256 crop!good!\n else:\n mat = h5py.File(path_hs) # b[{'rgb', 'bands', 'rad'}] # Shape: (Bands, Cols, Rows) <-> (bands, samples, lines)\n hs = mat['rad'].value # ndarray (c,w,h)\n hs = np.transpose(hs) # reverse axis order. ndarray (h,w,c). totensor expects this shape\n hs = hs / 4095. * 255 #4096: db max. totensor expects in [0, 255]\n\n hs = transforms.ToTensor()(hs) # convert ndarray (h,w,c) [0,255] -> torch tensor (c,h,w) [0.0, 1.0] #fixme why move everything and not only the crop to the gpu?\n\n # check dimensions and crop hs image\n # assert(rgb.shape[1] == hs.shape[1] and rgb.shape[2] == hs.shape[2])\n if self.opt.phase == 'train':\n # train on random crops\n hs_crop = hs[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]\n else:\n # Validate or Test\n hs_crop = hs #will pad on the net\n # topdown_pad = (1536 - 1392) // 2\n # leftright_pad = (1536 - 1300) // 2\n # hs_crop = F.pad(hs, (leftright_pad, leftright_pad, topdown_pad, topdown_pad))\n\n\n rgb_crop = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(rgb_crop) #fixme still valid in icvl?\n if self.opt.phase == 'train':\n hs_crop = transforms.Normalize(tuple([0.5] * 31), tuple([0.5] * 31))(hs_crop)\n\n if self.opt.which_direction == 'BtoA':\n input_nc = self.opt.output_nc\n output_nc = self.opt.input_nc\n else:\n input_nc = self.opt.input_nc\n output_nc = self.opt.output_nc\n\n if (not self.opt.no_flip) and random.random() < 0.5:\n idx = [i for i in range(rgb_crop.size(2) - 1, -1, -1)]\n idx = torch.LongTensor(idx)\n rgb_crop = rgb_crop.index_select(2, idx)\n if self.opt.phase == 'train':\n hs_crop = hs_crop.index_select(2, idx)\n\n if input_nc == 1: # RGB to gray\n tmp = rgb_crop[0, ...] * 0.299 + rgb_crop[1, ...] * 0.587 + rgb_crop[2, ...] * 0.114\n rgb_crop = tmp.unsqueeze(0)\n\n if self.opt.phase == 'train':\n if output_nc == 1: # RGB to gray\n tmp = hs_crop[0, ...] * 0.299 + hs_crop[1, ...] * 0.587 + hs_crop[2, ...] * 0.114\n hs_crop = tmp.unsqueeze(0)\n\n if self.opt.phase == 'train':\n return_dict = {'A': rgb_crop, 'B': hs_crop,\n 'A_paths': path_rgb, 'B_paths': path_hs}\n\n else:\n # we just use the rgb paths instead, won't use them anyway. nasty, I know\n return_dict = {'A': rgb_crop, 'B': rgb_crop,\n 'A_paths': path_rgb, 'B_paths': path_rgb}\n\n if self.opt.phase == 'Validate' or self.opt.phase == 'Test':\n return_dict['full_img_padding'] = full_img_padding\n\n return return_dict\n\n\n def generate_single_envi_file(self, fpath_hs_mat, overwrite_envi=False):\n dir_hs = self.dirlist_hs[0] # for brevity\n hsmat = h5py.File(fpath_hs_mat) # b[{'rgb', 'bands', 'rad'}] # Shape: (Bands, Cols, Rows) <-> (bands, samples, lines)\n hsnp = hsmat['rad'].value # hs image numpy array # ndarray (c,w,h)spec\n # hdr = io.envi.read_envi_header(file='data/envi_template.hdr')\n # hdr = self.update_hs_metadata(metadata=hdr, wl=hsmat['bands'].value.flatten())\n hdr_file = os.path.join(dir_hs, os.path.splitext(os.path.basename(fpath_hs_mat))[0] + '.hdr')\n spectral.io.envi.save_image(hdr_file=hdr_file, image=np.transpose(hsnp).astype(np.int16), force=overwrite_envi,\n dtype=np.int16) # dtype int16 range: [-32000, 32000]\n\n def generate_envi_files(self, overwrite_envi=False):\n\n if not os.path.exists(self.dirlist_hs[0]):\n os.makedirs(self.dirlist_hs[0])\n\n nb_free_cores=1\n Parallel(n_jobs=-1 - nb_free_cores)(\n delayed(self.generate_single_envi_file)(fpath_hs_mat=fpath_hs_mat, overwrite_envi=overwrite_envi) for fpath_hs_mat in tqdm(self.paths_hs))\n\n def create_base_hdr(self):\n hdr=[]\n \"\"\"\n http://www.harrisgeospatial.com/docs/ENVIHeaderFiles.html#Example\n data_Type: The type of data representation:\n\n1 = Byte: 8-bit unsigned integer\n2 = Integer: 16-bit signed integer\n3 = Long: 32-bit signed integer\n4 = Floating-point: 32-bit single-precision\n5 = Double-precision: 64-bit double-precision floating-point\n6 = Complex: Real-imaginary pair of single-precision floating-point\n9 = Double-precision complex: Real-imaginary pair of double precision floating-point\n12 = Unsigned integer: 16-bit\n13 = Unsigned long integer: 32-bit\n14 = 64-bit long integer (signed)\n15 = 64-bit unsigned long integer (unsigned)\"\"\"\n return hdr\n\n def update_hs_metadata(self, metadata, wl):\n\n metadata['interleave'] = 'bsq' # (Rows, Cols, Bands) <->(lines, samples, bands)\n # metadata['lines'] = int(metadata['lines']) - 4 # lines = rows. Lines <= 1300\n # metadata['samples'] = 1392 # samples = cols. Samples are 1392 for the whole dataset\n # metadata['bands'] = len(wl)\n metadata['data type'] = 4 #5 = Double-precision: 64-bit double-precision floating-point http://www.harrisgeospatial.com/docs/ENVIHeaderFiles.html#Example\n metadata['wavelength'] = wl\n metadata['default bands'] = [5, 15, 25]\n metadata['fwhm'] = np.diff(wl)\n metadata['vroi'] = [1, len(wl)]\n return metadata\n\n def __len__(self):\n return len(self.paths_rgb)\n\n def name(self):\n return 'icvl_ntire2018_dataset'\n" }, { "alpha_fraction": 0.8410256505012512, "alphanum_fraction": 0.8410256505012512, "avg_line_length": 26.571428298950195, "blob_id": "1b7a8fc3d9ae88896ac020da58c3052065735131", "content_id": "446d8519486dbc6c1cbdcdbebed719af8d6e7d67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 195, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/data/aligned_dataset.py", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "import os.path\nimport random\nimport torchvision.transforms as transforms\nimport torch\nfrom data.base_dataset import BaseDataset\nfrom data.image_folder import make_dataset\nfrom PIL import Image\n\n\n" }, { "alpha_fraction": 0.6464020013809204, "alphanum_fraction": 0.6532257795333862, "avg_line_length": 24.203125, "blob_id": "8f19c9ddb50aea33aeb713ef83218165847b6686", "content_id": "2d85362197c72cc3d60bd494a981416dc036e191", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1612, "license_type": "no_license", "max_line_length": 109, "num_lines": 64, "path": "/eval/evaluation.py", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "# Evaluation script for the NTIRE 2018 Spectral Reconstruction Challenge\n#\n# * Provide input and output directories as arguments\n# * Validation files should be found in the '/ref' subdirectory of the input dir\n# * Input validation files are expected in the v7.3 .mat format\n\n\nimport h5py as h5py\nimport numpy as np\nimport sys\nimport os\n\n\nMRAEs = {}\nRMSEs = {}\n\n\ndef get_ref_from_file(filename):\n matfile = h5py.File(filename, 'r')\n mat={}\n for k, v in matfile.items():\n mat[k] = np.array(v)\n return mat['rad']\n\n\n#input and output directories given as arguments\n[_, input_dir, output_dir] = sys.argv\n\nvalidation_files = os.listdir(input_dir +'/ref')\n\nfor f in validation_files:\n # Read ground truth data\n if not(os.path.splitext(f)[1] in '.mat'):\n print('skipping '+f)\n continue\n gt = get_ref_from_file(input_dir + '/ref/' + f)\n # Read user submission\n rc = get_ref_from_file(input_dir + '/res/' + f)\n\n # compute MRAE\n diff = gt-rc\n abs_diff = np.abs(diff)\n relative_abs_diff = np.divide(abs_diff,gt+np.finfo(float).eps) # added epsilon to avoid division by zero.\n MRAEs[f] = np.mean(relative_abs_diff)\n\n # compute RMSE\n square_diff = np.power(diff,2)\n RMSEs[f] = np.sqrt(np.mean(square_diff))\n\n\n print(f)\n print(MRAEs[f])\n print(RMSEs[f])\n\n\nMRAE = np.mean(MRAEs.values())\nprint(\"MRAE:\\n\"+MRAE.astype(str))\nRMSE = np.mean(RMSEs.values())\nprint(\"\\nRMSE:\\n\"+RMSE.astype(str))\n\n\nwith open(output_dir + '/scores.txt', 'w') as output_file:\n output_file.write(\"MRAE:\"+MRAE.astype(str))\n output_file.write(\"\\nRMSE:\"+RMSE.astype(str))" }, { "alpha_fraction": 0.5799086689949036, "alphanum_fraction": 0.5814307332038879, "avg_line_length": 32.69230651855469, "blob_id": "9f8fb4cc56d4d24e23c9e243951657405b16ced8", "content_id": "778d679078aecafffa58195aaab373e7f15252af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1314, "license_type": "no_license", "max_line_length": 121, "num_lines": 39, "path": "/eval/select_model.py", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os\nimport sacred\nimport glob\nfrom sacred import Experiment\nex = Experiment('rename_to_samename')\n\[email protected]\ndef config():\n results_home_dir = os.path.abspath('/home/aitor/dev/adv_rgb2hs_pytorch/results')\n\[email protected]\ndef select_model(results_home_dir):\n res_dir_list = glob.glob(results_home_dir + '/*')\n dfall_list = []\n for res_dir in res_dir_list:\n exp = os.path.basename(res_dir)\n fpath = os.path.join(res_dir, 'scores.txt')\n\n try:\n f = open(fpath)\n except IOError:\n print(fpath + ' does not exist')\n else:\n with f:\n content = f.readlines()\n content = [x.strip() for x in content]\n results = dict([elem.split(':') for elem in content])\n results = {k: [v] for k, v in results.items()} # from_dict() needs iterable as value per key/column name\n results['exp'] = [exp]\n dfbuff = pd.DataFrame.from_dict(results)\n dfbuff = dfbuff.set_index('exp')\n dfall_list.append(dfbuff)\n dfall = pd.concat(dfall_list)\n dfall = dfall.astype(float)\n print(dfall.sort_values(by='RMSE', ascending=True))\n print(dfall.sort_values(by='MRAE', ascending=True))\n pass\n" }, { "alpha_fraction": 0.6033940315246582, "alphanum_fraction": 0.6260957717895508, "avg_line_length": 33.89019775390625, "blob_id": "f4dd7bcb53e62414855ecfa6c0762f89f81762ec", "content_id": "69134780204a2aae152a30425a236039b1aac33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8898, "license_type": "no_license", "max_line_length": 189, "num_lines": 255, "path": "/util/spectral_color.py", "repo_name": "liuhongbo830117/ntire2018_adv_rgb2hs", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nfrom colour.plotting import *\nimport colour\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom skimage.color import colorconv\nfrom spectral import *\n\n### to avoid importing pyresources.assemple data\ndef dim_ordering_tf2th(img_list_ndarray):\n \"\"\"\n convert ndarray with dimensions ordered as tf to th\n 'tf' expects (nb_imgs, nb_rows, nb_cols, nb_channels) < -- compatible with plt.imshow(img_list[0,:,:,:])\n 'th' expects (nb_imgs, nb_channels, nb_rows, nb_cols)\n\n Parameters\n ----------\n img_list_ndarray: ndarray\n Input ndarray of dimensions coherent with 'tf': (nb_imgs, nb_rows, nb_cols, nb_channels)\n\n Returns\n -------\n img_ndarray: ndarray\n Output ndarray of dimensions coherent with 'th': (nb_imgs, nb_channels, nb_rows, nb_cols)\n \"\"\"\n if len(img_list_ndarray.shape) == 4:\n img_list_ndarray = np.rollaxis(img_list_ndarray, 3, 1)\n elif len(img_list_ndarray.shape) == 3: # single image\n img_list_ndarray = np.rollaxis(img_list_ndarray, 2, 0)\n else:\n raise NotImplementedError('Input must be 3 or 4 dimnesional ndarray')\n\n\n\n return img_list_ndarray\n\n\ndef dim_ordering_th2tf(img_list_ndarray):\n \"\"\"\n convert ndarray with dimensions ordered as th to tf\n 'tf' expects (nb_imgs, nb_rows, nb_cols, nb_channels) < -- compatible with plt.imshow(img_list[0,:,:,:])\n 'th' expects (nb_imgs, nb_channels, nb_rows, nb_cols)\n\n Parameters\n ----------\n img_list_ndarray: ndarray\n Input ndarray of dimensions coherent with 'th': (nb_imgs, nb_channels, nb_rows, nb_cols)\n\n Returns\n -------\n img_ndarray: ndarray\n Output ndarray of dimensions coherent with 'tf': (nb_imgs, nb_rows, nb_cols, nb_channels)\n \"\"\"\n if len(img_list_ndarray.shape) == 4:\n img_list_ndarray = np.rollaxis(img_list_ndarray, 1, 4)\n elif len(img_list_ndarray.shape) == 3: # single image\n img_list_ndarray = np.rollaxis(img_list_ndarray, 0, 3)\n else:\n raise NotImplementedError('Input must be 3 or 4 dimnesional ndarray')\n\n return img_list_ndarray\n\n\ndef spectral2XYZ_img_vectorized(cmfs, R):\n \"\"\"\n \n Parameters\n ----------\n cmfs\n R: np.ndarray (nb_pixels, 3) in [0., 1.]\n\n Returns\n -------\n\n \"\"\"\n\n x_bar, y_bar, z_bar = colour.tsplit(cmfs) # tested: OK. x_bar is the double one, the rightmost one (red). z_bar is the leftmost one (blue)\n plt.close('all')\n plt.plot(np.array([z_bar, y_bar, x_bar]).transpose())\n plt.savefig('cmf_cie1964_10.png')\n plt.close('all')\n # illuminant. We assume that the captured R is reflectance with illuminant E (although it really is not, it is reflected radiance with an unknown illuminant, but the result is the same)\n S = colour.ILLUMINANTS_RELATIVE_SPDS['E'].values[20:81:2] / 100. # Equal-energy radiator (ones) sample_spectra_from_hsimg 300 to xxx with delta=5nm\n # print S\n\n # dw = cmfs.shape.interval\n dw = 10\n\n k = 100 / (np.sum(y_bar * S) * dw)\n\n X_p = R * x_bar * S * dw # R(N,31) * x_bar(31,) * S(31,) * dw(1,)\n Y_p = R * y_bar * S * dw\n Z_p = R * z_bar * S * dw\n\n XYZ = k * np.sum(np.array([X_p, Y_p, Z_p]), axis=-1)\n XYZ = np.rollaxis(XYZ, 1, 0) # th2tf() but for 2D input\n\n return XYZ\n\ndef spectral2XYZ_img(hs, cmf_name, image_data_format='channels_last'):\n \"\"\"\n Convert spectral image input to XYZ (tristimulus values) image\n\n Parameters\n ----------\n hs: numpy.ndarray\n 3 dimensional numpy array containing the spectral information in either (h,w,c) ('channels_last') or (c,h,w) ('channels_first') formats \n cmf_name: basestring\n String describing the color matching functions to be used\n image_data_format: basestring {'channels_last', 'channels_first'}. Default: 'channels_last'\n Channel dimension ordering of the input spectral image. the rgb output will follow the same dim ordering format\n\n Returns\n -------\n XYZ: numpy.ndarray\n 3 dimensional numpy array containing the tristimulus value information in either (h,w,3) ('channels_last') or (3,h,w) ('channels_first') formats\n\n \"\"\"\n if image_data_format == 'channels_first':\n hs = dim_ordering_th2tf(hs) # th2tf (convert to channels_last\n\n elif image_data_format == 'channels_last':\n pass\n else:\n raise AttributeError('Wrong image_data_format parameter ' + image_data_format)\n\n # flatten\n h, w, c = hs.shape\n hs = hs.reshape(-1, c)\n\n cmfs = get_cmfs(cmf_name=cmf_name, nm_range=(400., 700.), nm_step=10, split=False)\n\n XYZ = spectral2XYZ_img_vectorized(cmfs, hs) # (nb_px, 3)\n\n # recover original shape (needed to call to xyz2rgb()\n XYZ = XYZ.reshape((h, w, 3))\n\n if image_data_format == 'channels_first':\n # convert back to channels_first\n XYZ = dim_ordering_tf2th(XYZ)\n\n return XYZ\n\n\ndef spectral2sRGB_img(spectral, cmf_name, image_data_format='channels_last'):\n \"\"\"\n Convert spectral image input to rgb image\n \n Parameters\n ----------\n spectral: numpy.ndarray\n 3 dimensional numpy array containing the spectral information in either (h,w,c) ('channels_last') or (c,h,w) ('channels_first') formats \n cmf_name: basestring\n String describing the color matching functions to be used\n image_data_format: basestring {'channels_last', 'channels_first'}. Default: 'channels_last'\n Channel dimension ordering of the input spectral image. the rgb output will follow the same dim ordering format\n\n Returns\n -------\n rgb: numpy.ndarray\n 3 dimensional numpy array containing the spectral information in either (h,w,3) ('channels_last') or (3,h,w) ('channels_first') formats\n \n \"\"\"\n\n XYZ = spectral2XYZ_img(hs=spectral, cmf_name=cmf_name, image_data_format=image_data_format)\n\n if image_data_format == 'channels_first':\n XYZ = dim_ordering_th2tf(XYZ) # th2tf (convert to channels_last\n\n elif image_data_format == 'channels_last':\n pass\n else:\n raise AttributeError('Wrong image_data_format parameter ' + image_data_format)\n\n #we need to pass in channels_last format to xyz2rgb\n sRGB = colorconv.xyz2rgb(XYZ/100.)\n\n if image_data_format == 'channels_first':\n # convert back to channels_first\n sRGB = dim_ordering_tf2th(sRGB)\n\n\n return sRGB\n\n\ndef save_hs_as_envi(fpath, hs31, image_data_format_in='channels_last'):#, image_data_format_out='channels_last'):\n #output is always channels_last\n if image_data_format_in == 'channels_first':\n hs31 = dim_ordering_th2tf(hs31)\n elif image_data_format_in != 'channels_last':\n raise AttributeError('Wrong image_data_format_in')\n\n # dst_dir = os.path.dirname(fpath)\n\n hdr_fpath = fpath + '.hdr'\n wl = np.arange(400, 701, 10)\n\n hs31_envi = envi.create_image(hdr_file=hdr_fpath,\n metadata=generate_metadata(wl=wl),\n shape=hs31.shape, # Must be in (Rows, Cols, Bands)\n force=True,\n dtype=np.float32, # np.float32, 32MB/img np.ubyte: 8MB/img\n ext='.envi31')\n mm = hs31_envi.open_memmap(writable=True)\n mm[:, :, :] = hs31\n\n\ndef generate_metadata(wl):\n md = dict()\n md['interleave'] = 'bsq' # (Rows, Cols, Bands) <->(lines, samples, bands)\n md['data type'] = 12\n md['wavelength'] = wl\n md['default bands'] = [22, 15, 6] # for spectral2dummyRGB\n md['fwhm'] = np.diff(wl)\n # md['vroi'] = [1, len(wl)]\n\n return md\n\ndef load_envi(fpath_envi, fpath_hdr=None):\n if fpath_hdr is None:\n fpath_hdr = os.path.splitext(fpath_envi)[0] + '.hdr'\n\n hs = io.envi.open(fpath_hdr, fpath_envi)\n\n return hs\n\n\ndef get_cmfs(cmf_name='cie1964_10', nm_range=(400., 700.), nm_step=10, split=True):\n\n if cmf_name == 'cie1931_2':\n cmf_full_name = 'CIE 1931 2 Degree Standard Observer'\n elif cmf_name == 'cie1931_10':\n cmf_full_name = 'CIE 1931 10 Degree Standard Observer'\n elif cmf_name == 'cie1964_2':\n cmf_full_name = 'CIE 1964 2 Degree Standard Observer'\n elif cmf_name == 'cie1964_10':\n cmf_full_name = 'CIE 1964 10 Degree Standard Observer'\n else:\n raise AttributeError('Wrong cmf name')\n cmfs = colour.STANDARD_OBSERVERS_CMFS[cmf_full_name]\n\n # subsample and trim range\n ix_wl_first = np.where(cmfs.wavelengths == nm_range[0])[0][0]\n ix_wl_last = np.where(cmfs.wavelengths == nm_range[1] + 1.)[0][0]\n cmfs = cmfs.values[ix_wl_first:ix_wl_last:int(nm_step), :] # make sure the nm_step is an int\n\n if split:\n x_bar, y_bar, z_bar = colour.tsplit(cmfs) #tested: OK. x_bar is the double one, the rightmost one (red). z_bar is the leftmost one (blue)\n return x_bar, y_bar, z_bar\n else:\n return cmfs\n\n" } ]
7
charchitjain/Vector-Class
https://github.com/charchitjain/Vector-Class
7aeabd8f52d2190891ad64bc2620d45576d82f5e
23da4d17be241c7284bfa1e64a0a75991ec60729
1b1505f059489b7fbbe137351bfa710f672273a1
refs/heads/master
"2021-04-28T01:59:23.615053"
"2018-02-21T04:31:11"
"2018-02-21T04:31:11"
122,292,191
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7976190447807312, "alphanum_fraction": 0.7976190447807312, "avg_line_length": 41, "blob_id": "c8b7d2e9487513ee9710b9ff04f821563c8694ee", "content_id": "b7c217dcdad3f63a0ec73a47b1f5865ec7f6e19b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 68, "num_lines": 2, "path": "/README.md", "repo_name": "charchitjain/Vector-Class", "src_encoding": "UTF-8", "text": "# Vector-Class\nThis program will create a vector class which will help in rotation.\n" }, { "alpha_fraction": 0.5335856080055237, "alphanum_fraction": 0.5449385046958923, "avg_line_length": 38.75471878051758, "blob_id": "c545bb348c62b9c87e7a6623286cfc255f9dd0e9", "content_id": "e5a669518d252fd1ce941d053be826d3ba8e2bb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2114, "license_type": "no_license", "max_line_length": 97, "num_lines": 53, "path": "/vector.Py", "repo_name": "charchitjain/Vector-Class", "src_encoding": "UTF-8", "text": "+import math\n+\n+class Vector(object):\n+ \n+ def __init__(self, *args):\n+ if len(args)==0: self.values = (0,0)\n+ else: self.values = args\n+ \n+ def norm(self):\n+ \"\"\" Returns the norm (length, magnitude) of the vector \"\"\"\n+ return math.sqrt(sum( comp**2 for comp in self ))\n+ \n+ def argument(self):\n+ \"\"\" Returns the argument of the vector, the angle clockwise from +y.\"\"\"\n+ arg_in_rad = math.acos(Vector(0,1)*self/self.norm())\n+ arg_in_deg = math.degrees(arg_in_rad)\n+ if self.values[0]<0: return 360 - arg_in_deg\n+ else: return arg_in_deg\n+\n+ def normalize(self):\n+ \"\"\" Returns a normalized unit vector \"\"\"\n+ norm = self.norm()\n+ normed = tuple( comp/norm for comp in self )\n+ return Vector(*normed)\n+ \n+ def rotate(self, *args):\n+ \"\"\" Rotate this vector. If passed a number, assumes this is a \n+ 2D vector and rotates by the passed value in degrees. Otherwise,\n+ assumes the passed value is a list acting as a matrix which rotates the vector.\n+ \"\"\"\n+ if len(args)==1 and type(args[0]) == type(1) or type(args[0]) == type(1.):\n+ # So, if rotate is passed an int or a float...\n+ if len(self) != 2:\n+ raise ValueError(\"Rotation axis not defined for greater than 2D vector\")\n+ return self._rotate2D(*args)\n+ elif len(args)==1:\n+ matrix = args[0]\n+ if not all(len(row) == len(v) for row in matrix) or not len(matrix)==len(self):\n+ raise ValueError(\"Rotation matrix must be square and same dimensions as vector\")\n+ return self.matrix_mult(matrix)\n+ \n+ def _rotate2D(self, theta):\n+ \"\"\" Rotate this vector by theta in degrees.\n+ \n+ Returns a new vector.\n+ \"\"\"\n+ theta = math.radians(theta)\n+ # Just applying the 2D rotation matrix\n+ dc, ds = math.cos(theta), math.sin(theta)\n+ x, y = self.values\n+ x, y = dc*x - ds*y, ds*x + dc*y\n+ return Vector(x, y)\n+ \n" } ]
2
eric-z-lin/DIAYN-PyTorch
https://github.com/eric-z-lin/DIAYN-PyTorch
45e3c460f16b51ec6b1db1c7656f9cff8f0ba33c
fe6709f27f87d9e839c85a207cd8b281e0dfa540
2e225c8567bea5b00338944413b0a0aaab13c9d5
refs/heads/main
"2023-07-13T20:25:17.435470"
"2021-08-22T09:25:07"
"2021-08-22T09:25:07"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5278477668762207, "alphanum_fraction": 0.532325804233551, "avg_line_length": 35.45918273925781, "blob_id": "fc47b810afa1eeecd854d54e9e93213885d895e1", "content_id": "ef20c671141ec264d522572840574ed7466a0f8e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3573, "license_type": "permissive", "max_line_length": 123, "num_lines": 98, "path": "/main.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "import gym\nfrom Brain import SACAgent\nfrom Common import Play, Logger, get_params\nimport numpy as np\nfrom tqdm import tqdm\nimport mujoco_py\n\n\ndef concat_state_latent(s, z_, n):\n z_one_hot = np.zeros(n)\n z_one_hot[z_] = 1\n return np.concatenate([s, z_one_hot])\n\n\nif __name__ == \"__main__\":\n params = get_params()\n\n test_env = gym.make(params[\"env_name\"])\n n_states = test_env.observation_space.shape[0]\n n_actions = test_env.action_space.shape[0]\n action_bounds = [test_env.action_space.low[0], test_env.action_space.high[0]]\n\n params.update({\"n_states\": n_states,\n \"n_actions\": n_actions,\n \"action_bounds\": action_bounds})\n print(\"params:\", params)\n test_env.close()\n del test_env, n_states, n_actions, action_bounds\n\n env = gym.make(params[\"env_name\"])\n\n p_z = np.full(params[\"n_skills\"], 1 / params[\"n_skills\"])\n agent = SACAgent(p_z=p_z, **params)\n logger = Logger(agent, **params)\n\n if params[\"do_train\"]:\n\n if not params[\"train_from_scratch\"]:\n episode, last_logq_zs, np_rng_state, *env_rng_states, torch_rng_state, random_rng_state = logger.load_weights()\n agent.hard_update_target_network()\n min_episode = episode\n np.random.set_state(np_rng_state)\n env.np_random.set_state(env_rng_states[0])\n env.observation_space.np_random.set_state(env_rng_states[1])\n env.action_space.np_random.set_state(env_rng_states[2])\n agent.set_rng_states(torch_rng_state, random_rng_state)\n print(\"Keep training from previous run.\")\n\n else:\n min_episode = 0\n last_logq_zs = 0\n np.random.seed(params[\"seed\"])\n env.seed(params[\"seed\"])\n env.observation_space.seed(params[\"seed\"])\n env.action_space.seed(params[\"seed\"])\n print(\"Training from scratch.\")\n\n logger.on()\n for episode in tqdm(range(1 + min_episode, params[\"max_n_episodes\"] + 1)):\n z = np.random.choice(params[\"n_skills\"], p=p_z)\n state = env.reset()\n state = concat_state_latent(state, z, params[\"n_skills\"])\n episode_reward = 0\n logq_zses = []\n\n max_n_steps = min(params[\"max_episode_len\"], env.spec.max_episode_steps)\n for step in range(1, 1 + max_n_steps):\n\n action = agent.choose_action(state)\n next_state, reward, done, _ = env.step(action)\n next_state = concat_state_latent(next_state, z, params[\"n_skills\"])\n agent.store(state, z, done, action, next_state)\n logq_zs = agent.train()\n if logq_zs is None:\n logq_zses.append(last_logq_zs)\n else:\n logq_zses.append(logq_zs)\n episode_reward += reward\n state = next_state\n if done:\n break\n\n logger.log(episode,\n episode_reward,\n z,\n sum(logq_zses) / len(logq_zses),\n step,\n np.random.get_state(),\n env.np_random.get_state(),\n env.observation_space.np_random.get_state(),\n env.action_space.np_random.get_state(),\n *agent.get_rng_states(),\n )\n\n else:\n logger.load_weights()\n player = Play(env, agent, n_skills=params[\"n_skills\"])\n player.evaluate()\n" }, { "alpha_fraction": 0.618030846118927, "alphanum_fraction": 0.6192170977592468, "avg_line_length": 25.34375, "blob_id": "7eb49700b6d6e14e9cd3b8af5599f16b71b2e30b", "content_id": "36f4ae911892f08b6360021e0227507f767694f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 843, "license_type": "permissive", "max_line_length": 85, "num_lines": 32, "path": "/Brain/replay_memory.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "import random\nfrom collections import namedtuple\n\nTransition = namedtuple('Transition', ('state', 'z', 'done', 'action', 'next_state'))\n\n\nclass Memory:\n def __init__(self, buffer_size, seed):\n self.buffer_size = buffer_size\n self.buffer = []\n self.seed = seed\n random.seed(self.seed)\n\n def add(self, *transition):\n self.buffer.append(Transition(*transition))\n if len(self.buffer) > self.buffer_size:\n self.buffer.pop(0)\n assert len(self.buffer) <= self.buffer_size\n\n def sample(self, size):\n return random.sample(self.buffer, size)\n\n def __len__(self):\n return len(self.buffer)\n\n @staticmethod\n def get_rng_state():\n return random.getstate()\n\n @staticmethod\n def set_rng_state(random_rng_state):\n random.setstate(random_rng_state)\n" }, { "alpha_fraction": 0.6251002550125122, "alphanum_fraction": 0.6367281675338745, "avg_line_length": 38.27558898925781, "blob_id": "f42540a0f7eb0cd6f1694928d69e54fb75b96fc1", "content_id": "bdbfb4c449776f9371290b9ed40ee6a9c271c4f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4988, "license_type": "permissive", "max_line_length": 112, "num_lines": 127, "path": "/Brain/model.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "from abc import ABC\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.distributions import Normal\n\n\ndef init_weight(layer, initializer=\"he normal\"):\n if initializer == \"xavier uniform\":\n nn.init.xavier_uniform_(layer.weight)\n elif initializer == \"he normal\":\n nn.init.kaiming_normal_(layer.weight)\n\n\nclass Discriminator(nn.Module, ABC):\n def __init__(self, n_states, n_skills, n_hidden_filters=256):\n super(Discriminator, self).__init__()\n self.n_states = n_states\n self.n_skills = n_skills\n self.n_hidden_filters = n_hidden_filters\n\n self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters)\n init_weight(self.hidden1)\n self.hidden1.bias.data.zero_()\n self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)\n init_weight(self.hidden2)\n self.hidden2.bias.data.zero_()\n self.q = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_skills)\n init_weight(self.q, initializer=\"xavier uniform\")\n self.q.bias.data.zero_()\n\n def forward(self, states):\n x = F.relu(self.hidden1(states))\n x = F.relu(self.hidden2(x))\n logits = self.q(x)\n return logits\n\n\nclass ValueNetwork(nn.Module, ABC):\n def __init__(self, n_states, n_hidden_filters=256):\n super(ValueNetwork, self).__init__()\n self.n_states = n_states\n self.n_hidden_filters = n_hidden_filters\n\n self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters)\n init_weight(self.hidden1)\n self.hidden1.bias.data.zero_()\n self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)\n init_weight(self.hidden2)\n self.hidden2.bias.data.zero_()\n self.value = nn.Linear(in_features=self.n_hidden_filters, out_features=1)\n init_weight(self.value, initializer=\"xavier uniform\")\n self.value.bias.data.zero_()\n\n def forward(self, states):\n x = F.relu(self.hidden1(states))\n x = F.relu(self.hidden2(x))\n return self.value(x)\n\n\nclass QvalueNetwork(nn.Module, ABC):\n def __init__(self, n_states, n_actions, n_hidden_filters=256):\n super(QvalueNetwork, self).__init__()\n self.n_states = n_states\n self.n_hidden_filters = n_hidden_filters\n self.n_actions = n_actions\n\n self.hidden1 = nn.Linear(in_features=self.n_states + self.n_actions, out_features=self.n_hidden_filters)\n init_weight(self.hidden1)\n self.hidden1.bias.data.zero_()\n self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)\n init_weight(self.hidden2)\n self.hidden2.bias.data.zero_()\n self.q_value = nn.Linear(in_features=self.n_hidden_filters, out_features=1)\n init_weight(self.q_value, initializer=\"xavier uniform\")\n self.q_value.bias.data.zero_()\n\n def forward(self, states, actions):\n x = torch.cat([states, actions], dim=1)\n x = F.relu(self.hidden1(x))\n x = F.relu(self.hidden2(x))\n return self.q_value(x)\n\n\nclass PolicyNetwork(nn.Module, ABC):\n def __init__(self, n_states, n_actions, action_bounds, n_hidden_filters=256):\n super(PolicyNetwork, self).__init__()\n self.n_states = n_states\n self.n_hidden_filters = n_hidden_filters\n self.n_actions = n_actions\n self.action_bounds = action_bounds\n\n self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters)\n init_weight(self.hidden1)\n self.hidden1.bias.data.zero_()\n self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters)\n init_weight(self.hidden2)\n self.hidden2.bias.data.zero_()\n\n self.mu = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_actions)\n init_weight(self.mu, initializer=\"xavier uniform\")\n self.mu.bias.data.zero_()\n\n self.log_std = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_actions)\n init_weight(self.log_std, initializer=\"xavier uniform\")\n self.log_std.bias.data.zero_()\n\n def forward(self, states):\n x = F.relu(self.hidden1(states))\n x = F.relu(self.hidden2(x))\n\n mu = self.mu(x)\n log_std = self.log_std(x)\n std = log_std.clamp(min=-20, max=2).exp()\n dist = Normal(mu, std)\n return dist\n\n def sample_or_likelihood(self, states):\n dist = self(states)\n # Reparameterization trick\n u = dist.rsample()\n action = torch.tanh(u)\n log_prob = dist.log_prob(value=u)\n # Enforcing action bounds\n log_prob -= torch.log(1 - action ** 2 + 1e-6)\n log_prob = log_prob.sum(-1, keepdim=True)\n return (action * self.action_bounds[1]).clamp_(self.action_bounds[0], self.action_bounds[1]), log_prob\n" }, { "alpha_fraction": 0.5491886138916016, "alphanum_fraction": 0.5583164095878601, "avg_line_length": 44.5076904296875, "blob_id": "856373cab1f92596b78079fb2a8f6131a307ce91", "content_id": "33ecbffe2114eb2071eb776b63cb36d0b0dab368", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5916, "license_type": "permissive", "max_line_length": 113, "num_lines": 130, "path": "/Common/logger.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nimport psutil\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\nimport os\nimport datetime\nimport glob\n\n\nclass Logger:\n def __init__(self, agent, **config):\n self.config = config\n self.agent = agent\n self.log_dir = self.config[\"env_name\"][:-3] + \"/\" + datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.start_time = 0\n self.duration = 0\n self.running_logq_zs = 0\n self.max_episode_reward = -np.inf\n self._turn_on = False\n self.to_gb = lambda in_bytes: in_bytes / 1024 / 1024 / 1024\n\n if self.config[\"do_train\"] and self.config[\"train_from_scratch\"]:\n self._create_wights_folder(self.log_dir)\n self._log_params()\n\n @staticmethod\n def _create_wights_folder(dir):\n if not os.path.exists(\"Checkpoints\"):\n os.mkdir(\"Checkpoints\")\n os.mkdir(\"Checkpoints/\" + dir)\n\n def _log_params(self):\n with SummaryWriter(\"Logs/\" + self.log_dir) as writer:\n for k, v in self.config.items():\n writer.add_text(k, str(v))\n\n def on(self):\n self.start_time = time.time()\n self._turn_on = True\n\n def _off(self):\n self.duration = time.time() - self.start_time\n\n def log(self, *args):\n if not self._turn_on:\n print(\"First you should turn the logger on once, via on() method to be able to log parameters.\")\n return\n self._off()\n\n episode, episode_reward, skill, logq_zs, step, *rng_states = args\n\n self.max_episode_reward = max(self.max_episode_reward, episode_reward)\n\n if self.running_logq_zs == 0:\n self.running_logq_zs = logq_zs\n else:\n self.running_logq_zs = 0.99 * self.running_logq_zs + 0.01 * logq_zs\n\n ram = psutil.virtual_memory()\n assert self.to_gb(ram.used) < 0.98 * self.to_gb(ram.total), \"RAM usage exceeded permitted limit!\"\n\n if episode % (self.config[\"interval\"] // 3) == 0:\n self._save_weights(episode, *rng_states)\n\n if episode % self.config[\"interval\"] == 0:\n print(\"E: {}| \"\n \"Skill: {}| \"\n \"E_Reward: {:.1f}| \"\n \"EP_Duration: {:.2f}| \"\n \"Memory_Length: {}| \"\n \"Mean_steps_time: {:.3f}| \"\n \"{:.1f}/{:.1f} GB RAM| \"\n \"Time: {} \".format(episode,\n skill,\n episode_reward,\n self.duration,\n len(self.agent.memory),\n self.duration / step,\n self.to_gb(ram.used),\n self.to_gb(ram.total),\n datetime.datetime.now().strftime(\"%H:%M:%S\"),\n ))\n\n with SummaryWriter(\"Logs/\" + self.log_dir) as writer:\n writer.add_scalar(\"Max episode reward\", self.max_episode_reward, episode)\n writer.add_scalar(\"Running logq(z|s)\", self.running_logq_zs, episode)\n writer.add_histogram(str(skill), episode_reward)\n writer.add_histogram(\"Total Rewards\", episode_reward)\n\n self.on()\n\n def _save_weights(self, episode, *rng_states):\n torch.save({\"policy_network_state_dict\": self.agent.policy_network.state_dict(),\n \"q_value_network1_state_dict\": self.agent.q_value_network1.state_dict(),\n \"q_value_network2_state_dict\": self.agent.q_value_network2.state_dict(),\n \"value_network_state_dict\": self.agent.value_network.state_dict(),\n \"discriminator_state_dict\": self.agent.discriminator.state_dict(),\n \"q_value1_opt_state_dict\": self.agent.q_value1_opt.state_dict(),\n \"q_value2_opt_state_dict\": self.agent.q_value2_opt.state_dict(),\n \"policy_opt_state_dict\": self.agent.policy_opt.state_dict(),\n \"value_opt_state_dict\": self.agent.value_opt.state_dict(),\n \"discriminator_opt_state_dict\": self.agent.discriminator_opt.state_dict(),\n \"episode\": episode,\n \"rng_states\": rng_states,\n \"max_episode_reward\": self.max_episode_reward,\n \"running_logq_zs\": self.running_logq_zs\n },\n \"Checkpoints/\" + self.log_dir + \"/params.pth\")\n\n def load_weights(self):\n model_dir = glob.glob(\"Checkpoints/\" + self.config[\"env_name\"][:-3] + \"/\")\n model_dir.sort()\n checkpoint = torch.load(model_dir[-1] + \"/params.pth\")\n self.log_dir = model_dir[-1].split(os.sep)[-1]\n self.agent.policy_network.load_state_dict(checkpoint[\"policy_network_state_dict\"])\n self.agent.q_value_network1.load_state_dict(checkpoint[\"q_value_network1_state_dict\"])\n self.agent.q_value_network2.load_state_dict(checkpoint[\"q_value_network2_state_dict\"])\n self.agent.value_network.load_state_dict(checkpoint[\"value_network_state_dict\"])\n self.agent.discriminator.load_state_dict(checkpoint[\"discriminator_state_dict\"])\n self.agent.q_value1_opt.load_state_dict(checkpoint[\"q_value1_opt_state_dict\"])\n self.agent.q_value2_opt.load_state_dict(checkpoint[\"q_value2_opt_state_dict\"])\n self.agent.policy_opt.load_state_dict(checkpoint[\"policy_opt_state_dict\"])\n self.agent.value_opt.load_state_dict(checkpoint[\"value_opt_state_dict\"])\n self.agent.discriminator_opt.load_state_dict(checkpoint[\"discriminator_opt_state_dict\"])\n\n self.max_episode_reward = checkpoint[\"max_episode_reward\"]\n self.running_logq_zs = checkpoint[\"running_logq_zs\"]\n\n return checkpoint[\"episode\"], self.running_logq_zs, *checkpoint[\"rng_states\"]\n" }, { "alpha_fraction": 0.6606781482696533, "alphanum_fraction": 0.6769779920578003, "avg_line_length": 43.47089767456055, "blob_id": "1e28b3460ee79a081ff43d3b7a0e6119bf7a3343", "content_id": "e995fa20a9801c455c743a46b6face39c71905f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8901, "license_type": "permissive", "max_line_length": 759, "num_lines": 189, "path": "/README.md", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) \n\n# DIAYN-PyTorch\n\nWhile intelligent creatures can explore their environments and learn useful skills without supervision, many RL algorithms are heavily on the basis that acquiring skills is only achieved via defining them as explicit reward functions to learn.\n \nThus, in order to translate the natural behavior of creatures in learning **diverse** skills to a suitable mathematical formulation, DIAYN (Diversity is All You Need) was proposed for learning useful skills **without any domain-specific reward function**.\n \nInstead of the real reward of the environment, DIAYN optimizes the following objective:\n\n<p align=\"center\">\n <img src=\"Results/equation.png\", height=40>\n</p>\n\nthat _`z`_ is the skill that the agent is learning and, since we desire learned skills to be **as diverse as possible**, _`z`_ is modeled by a Uniform random variable that has the highest standard variation.\n \nThe above equation simply implies that the reward of any diverse task is equal to measuring how hard recognizing the skill _`z`_ is, given the state _`s`_ that the agent has visited compared to the real distribution over _`z`_ (which is Uniform distribution in DIAYN paper.) \nThe bigger r<sub>z</sub>(s, a) is, the more ambiguous skill _`z`_ is thus, the state _`s`_ should be visited more for task _`z`_ so, the agent finally acquires this skill.\n\nConcurrently to learn r<sub>z</sub>(s, a), any conventional RL method can be utilized to learn a policy and DIAYN uses SAC.\n\n**This repository is a PyTorch implementation of Diversity is All You Need and the SAC part of the code is based on [this repo](https://github.com/alirezakazemipour/SAC).**\n\n## Results\n> x-axis in all of the corresponding plots in this section are counted by number episode.\n\n### Hopper\n>number of skills = 20\n\n<p align=\"center\">\n <img src=\"Results/Hopper/running_logq.png\">\n</p>\n\nsimilar to the environment's goal| Emergent behavior| Emergent behavior\n:-----------------------:|:-----------------------:|:-----------------------:\n![](Gifs/Hopper/skill8.gif)| ![](Gifs/Hopper/skill2.gif)| ![](Gifs/Hopper/skill9.gif)\nReward distribution|Reward distribution|Reward distribution\n![](Results/Hopper/skill8.png)| ![](Results/Hopper/skill2.png)| ![](Results/Hopper/skill9.png)\n\n### BipedalWalker\n>number of skills = 50\n\n<p align=\"center\">\n <img src=\"Results/BipedalWalker/running_logq.png\">\n</p>\n\nsimilar to the environment's goal| Emergent behavior| Emergent behavior\n:-----------------------:|:-----------------------:|:-----------------------:\n![](Gifs/BipedalWalker/skill11.gif)| ![](Gifs/BipedalWalker/skill7.gif)| ![](Gifs/BipedalWalker/skill40.gif)\nReward distribution|Reward distribution|Reward distribution\n![](Results/BipedalWalker/skill11.png)| ![](Results/BipedalWalker/skill7.png)| ![](Results/BipedalWalker/skill40.png)\n\n### MountainCarContinuous\n>number of skills = 20\n\n<p align=\"center\">\n <img src=\"Results/MountainCar/running_logq.png\">\n</p>\n\nsimilar to the environment's goal| Emergent behavior| Emergent behavior\n:-----------------------:|:-----------------------:|:-----------------------:\n![](Gifs/MountainCar/skill3.gif)| ![](Gifs/MountainCar/skill7.gif)| ![](Gifs/MountainCar/skill8.gif)\nReward distribution|Reward distribution|Reward distribution\n![](Results/MountainCar/skill3.png)| ![](Results/MountainCar/skill7.png)| ![](Results/MountainCar/skill8.png)\n\n## Dependencies\n- gym == 0.17.3\n- mujoco-py == 2.0.2.13\n- numpy == 1.19.2\n- opencv_contrib_python == 4.4.0.44\n- psutil == 5.5.1\n- torch == 1.6.0\n- tqdm == 4.50.0\n\n## Installation\n```bash\npip3 install -r requirements.txt\n```\n## Usage\n### How to run\n```bash\nusage: main.py [-h] [--env_name ENV_NAME] [--interval INTERVAL] [--do_train]\n [--train_from_scratch] [--mem_size MEM_SIZE]\n [--n_skills N_SKILLS] [--reward_scale REWARD_SCALE]\n [--seed SEED]\n\nVariable parameters based on the configuration of the machine or user's choice\n\noptional arguments:\n -h, --help show this help message and exit\n --env_name ENV_NAME Name of the environment.\n --interval INTERVAL The interval specifies how often different parameters\n should be saved and printed, counted by episodes.\n --do_train The flag determines whether to train the agent or play\n with it.\n --train_from_scratch The flag determines whether to train from scratch or\n continue previous tries.\n --mem_size MEM_SIZE The memory size.\n --n_skills N_SKILLS The number of skills to learn.\n --reward_scale REWARD_SCALE The reward scaling factor introduced in SAC.\n --seed SEED The randomness' seed for torch, numpy, random & gym[env].\n```\n- **In order to train the agent with default arguments , execute the following command and use `--do_train` flag, otherwise the agent would be tested** (You may change the memory capacity, the environment and number of skills to learn based on your desire.):\n```shell\npython3 main.py --mem_size=1000000 --env_name=\"Hopper-v3\" --interval=100 --do_train --n_skills=20\n```\n- **If you want to keep training your previous run, execute the followoing:**\n```shell\npython3 main.py --mem_size=1000000 --env_name=\"Hopper-v3\" --interval=100 --do_train --n_skills=20 --train_from_scratch\n```\n### An important Note!!!\n- **When I tried to keep training from checkpoints to continue my previous run, I observed some undesirable behavior from the discriminator that its loss rapidly converged towards 0 however, after some epochs it again returned to its correct previous training phase. I suspect since at the beginning of training from checkpoints the replay memory is empty and familiar experiences (according to the policy) gradually get added to it, the trained discriminator from the previous run can easily recognize their true skills until the replay memory gets populated big enough and contains newer and more novel transitions. Thus, I recommend running your whole training monotonically and avoid using checkpoints and successive pausing though, it is been provided.**\n\n## Environments tested\n- [x] Hopper-v3\n- [x] bipedalWalker-v3\n- [x] MountainCarContinuous-v0\n- [ ] HalfCheetah-v3 \n\n## Structure\n```bash\n├── Brain\n│   ├── agent.py\n│   ├── __init__.py\n│   ├── model.py\n│   └── replay_memory.py\n├── Checkpoints\n│   ├── BipedalWalker\n│   │   └── params.pth\n│   ├── Hopper\n│   │   └── params.pth\n│   └── MountainCar\n│   └── params.pth\n├── Common\n│   ├── config.py\n│   ├── __init__.py\n│   ├── logger.py\n│   └── play.py\n├── Gifs\n│   ├── BipedalWalker\n│   │   ├── skill11.gif\n│   │   ├── skill40.gif\n│   │   └── skill7.gif\n│   ├── Hopper\n│   │   ├── skill2.gif\n│   │   ├── skill8.gif\n│   │   └── skill9.gif\n│   └── MountainCar\n│   ├── skill3.gif\n│   ├── skill7.gif\n│   └── skill8.gif\n├── LICENSE\n├── main.py\n├── README.md\n├── requirements.txt\n└── Results\n ├── BipedalWalker\n │   ├── running_logq.png\n │   ├── skill11.png\n │   ├── skill40.png\n │   └── skill7.png\n ├── equation.png\n ├── Hopper\n │   ├── running_logq.png\n │   ├── skill2.png\n │   ├── skill8.png\n │   └── skill9.png\n ├── MountainCar\n │   ├── running_logq.png\n │   ├── skill3.png\n │   ├── skill7.png\n │   └── skill8.png\n └── r_z.png\n```\n1. _Brain_ dir consists of the neural network structure and the agent decision-making core.\n2. _Common_ consists of minor codes that are common for most RL codes and do auxiliary tasks like logging and... .\n3. _main.py_ is the core module of the code that manages all other parts and makes the agent interact with the environment.\n\n## Reference\n\n1. [_Diversity is All You Need: Learning Skills without a Reward Function_, Eysenbach, 2018](https://arxiv.org/abs/1802.06070)\n\n## Acknowledgment\n**Big thanks to:**\n\n1. [@ben-eysenbach ](https://github.com/ben-eysenbach) for [sac](https://github.com/ben-eysenbach/sac).\n2. [@p-christ](https://github.com/p-christ) for [DIAYN.py](https://github.com/p-christ/Deep-Reinforcement-Learning-Algorithms-with-PyTorch/blob/master/agents/hierarchical_agents/DIAYN.py).\n3. [@johnlime](https://github.com/johnlime) for [RlkitExtension](https://github.com/johnlime/RlkitExtension).\n4. [@Dolokhow](https://github.com/Dolokhow) for [rl-algos-tf2 ](https://github.com/Dolokhow/rl-algos-tf2).\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 26, "blob_id": "ba4412f1a1c078f238d85d86dffdd853ae29957f", "content_id": "27bcd2b20107011b38f9fd01f8fbd28a0fcffaf7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/Common/__init__.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "from .config import get_params\nfrom .play import Play\nfrom .logger import Logger" }, { "alpha_fraction": 0.5680726170539856, "alphanum_fraction": 0.5872930884361267, "avg_line_length": 49.621620178222656, "blob_id": "506bc988f9df99fe0fcd48d83f9ec82c66f227f9", "content_id": "faa6342347d19919f2649cea97c4f241f0d7f60d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "permissive", "max_line_length": 117, "num_lines": 37, "path": "/Common/config.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "import argparse\n\n\ndef get_params():\n parser = argparse.ArgumentParser(\n description=\"Variable parameters based on the configuration of the machine or user's choice\")\n\n parser.add_argument(\"--env_name\", default=\"BipedalWalker-v3\", type=str, help=\"Name of the environment.\")\n parser.add_argument(\"--interval\", default=20, type=int,\n help=\"The interval specifies how often different parameters should be saved and printed,\"\n \" counted by episodes.\")\n parser.add_argument(\"--do_train\", action=\"store_true\",\n help=\"The flag determines whether to train the agent or play with it.\")\n parser.add_argument(\"--train_from_scratch\", action=\"store_false\",\n help=\"The flag determines whether to train from scratch or continue previous tries.\")\n parser.add_argument(\"--mem_size\", default=int(1e+6), type=int, help=\"The memory size.\")\n parser.add_argument(\"--n_skills\", default=50, type=int, help=\"The number of skills to learn.\")\n parser.add_argument(\"--reward_scale\", default=1, type=float, help=\"The reward scaling factor introduced in SAC.\")\n parser.add_argument(\"--seed\", default=123, type=int,\n help=\"The randomness' seed for torch, numpy, random & gym[env].\")\n\n parser_params = parser.parse_args()\n\n # Parameters based on the DIAYN and SAC papers.\n # region default parameters\n default_params = {\"lr\": 3e-4,\n \"batch_size\": 256,\n \"max_n_episodes\": 5000,\n \"max_episode_len\": 1000,\n \"gamma\": 0.99,\n \"alpha\": 0.1,\n \"tau\": 0.005,\n \"n_hiddens\": 300\n }\n # endregion\n total_params = {**vars(parser_params), **default_params}\n return total_params\n" }, { "alpha_fraction": 0.5192770957946777, "alphanum_fraction": 0.5349397659301758, "avg_line_length": 32.8775520324707, "blob_id": "fe1459e397f412ef7427a2f0cb4716c24494f211", "content_id": "a78c3bdfc0633eb5c180156431a6cd77974183e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "permissive", "max_line_length": 99, "num_lines": 49, "path": "/Common/play.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "# from mujoco_py.generated import const\nfrom mujoco_py import GlfwContext\nimport cv2\nimport numpy as np\nimport os\n\nGlfwContext(offscreen=True)\n\n\nclass Play:\n def __init__(self, env, agent, n_skills):\n self.env = env\n self.agent = agent\n self.n_skills = n_skills\n self.agent.set_policy_net_to_cpu_mode()\n self.agent.set_policy_net_to_eval_mode()\n self.fourcc = cv2.VideoWriter_fourcc(*'XVID')\n if not os.path.exists(\"Vid/\"):\n os.mkdir(\"Vid/\")\n\n @staticmethod\n def concat_state_latent(s, z_, n):\n z_one_hot = np.zeros(n)\n z_one_hot[z_] = 1\n return np.concatenate([s, z_one_hot])\n\n def evaluate(self):\n\n for z in range(self.n_skills):\n video_writer = cv2.VideoWriter(f\"Vid/skill{z}\" + \".avi\", self.fourcc, 50.0, (250, 250))\n s = self.env.reset()\n s = self.concat_state_latent(s, z, self.n_skills)\n episode_reward = 0\n for _ in range(self.env.spec.max_episode_steps):\n action = self.agent.choose_action(s)\n s_, r, done, _ = self.env.step(action)\n s_ = self.concat_state_latent(s_, z, self.n_skills)\n episode_reward += r\n if done:\n break\n s = s_\n I = self.env.render(mode='rgb_array')\n I = cv2.cvtColor(I, cv2.COLOR_RGB2BGR)\n I = cv2.resize(I, (250, 250))\n video_writer.write(I)\n print(f\"skill: {z}, episode reward:{episode_reward:.1f}\")\n video_writer.release()\n self.env.close()\n cv2.destroyAllWindows()\n" }, { "alpha_fraction": 0.5824999809265137, "alphanum_fraction": 0.5880263447761536, "avg_line_length": 46.2049674987793, "blob_id": "94d1d6fb516b8b0874c65250bc1cf9c3d39a7a58", "content_id": "a978aa38f184ab04173c9cf44cd94c6d4c450405", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7600, "license_type": "permissive", "max_line_length": 118, "num_lines": 161, "path": "/Brain/agent.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom .model import PolicyNetwork, QvalueNetwork, ValueNetwork, Discriminator\nimport torch\nfrom .replay_memory import Memory, Transition\nfrom torch import from_numpy\nfrom torch.optim.adam import Adam\nfrom torch.nn.functional import log_softmax\n\n\nclass SACAgent:\n def __init__(self,\n p_z,\n **config):\n self.config = config\n self.n_states = self.config[\"n_states\"]\n self.n_skills = self.config[\"n_skills\"]\n self.batch_size = self.config[\"batch_size\"]\n self.p_z = np.tile(p_z, self.batch_size).reshape(self.batch_size, self.n_skills)\n self.memory = Memory(self.config[\"mem_size\"], self.config[\"seed\"])\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n torch.manual_seed(self.config[\"seed\"])\n self.policy_network = PolicyNetwork(n_states=self.n_states + self.n_skills,\n n_actions=self.config[\"n_actions\"],\n action_bounds=self.config[\"action_bounds\"],\n n_hidden_filters=self.config[\"n_hiddens\"]).to(self.device)\n\n self.q_value_network1 = QvalueNetwork(n_states=self.n_states + self.n_skills,\n n_actions=self.config[\"n_actions\"],\n n_hidden_filters=self.config[\"n_hiddens\"]).to(self.device)\n\n self.q_value_network2 = QvalueNetwork(n_states=self.n_states + self.n_skills,\n n_actions=self.config[\"n_actions\"],\n n_hidden_filters=self.config[\"n_hiddens\"]).to(self.device)\n\n self.value_network = ValueNetwork(n_states=self.n_states + self.n_skills,\n n_hidden_filters=self.config[\"n_hiddens\"]).to(self.device)\n\n self.value_target_network = ValueNetwork(n_states=self.n_states + self.n_skills,\n n_hidden_filters=self.config[\"n_hiddens\"]).to(self.device)\n self.hard_update_target_network()\n\n self.discriminator = Discriminator(n_states=self.n_states, n_skills=self.n_skills,\n n_hidden_filters=self.config[\"n_hiddens\"]).to(self.device)\n\n self.mse_loss = torch.nn.MSELoss()\n self.cross_ent_loss = torch.nn.CrossEntropyLoss()\n\n self.value_opt = Adam(self.value_network.parameters(), lr=self.config[\"lr\"])\n self.q_value1_opt = Adam(self.q_value_network1.parameters(), lr=self.config[\"lr\"])\n self.q_value2_opt = Adam(self.q_value_network2.parameters(), lr=self.config[\"lr\"])\n self.policy_opt = Adam(self.policy_network.parameters(), lr=self.config[\"lr\"])\n self.discriminator_opt = Adam(self.discriminator.parameters(), lr=self.config[\"lr\"])\n\n def choose_action(self, states):\n states = np.expand_dims(states, axis=0)\n states = from_numpy(states).float().to(self.device)\n action, _ = self.policy_network.sample_or_likelihood(states)\n return action.detach().cpu().numpy()[0]\n\n def store(self, state, z, done, action, next_state):\n state = from_numpy(state).float().to(\"cpu\")\n z = torch.ByteTensor([z]).to(\"cpu\")\n done = torch.BoolTensor([done]).to(\"cpu\")\n action = torch.Tensor([action]).to(\"cpu\")\n next_state = from_numpy(next_state).float().to(\"cpu\")\n self.memory.add(state, z, done, action, next_state)\n\n def unpack(self, batch):\n batch = Transition(*zip(*batch))\n\n states = torch.cat(batch.state).view(self.batch_size, self.n_states + self.n_skills).to(self.device)\n zs = torch.cat(batch.z).view(self.batch_size, 1).long().to(self.device)\n dones = torch.cat(batch.done).view(self.batch_size, 1).to(self.device)\n actions = torch.cat(batch.action).view(-1, self.config[\"n_actions\"]).to(self.device)\n next_states = torch.cat(batch.next_state).view(self.batch_size, self.n_states + self.n_skills).to(self.device)\n\n return states, zs, dones, actions, next_states\n\n def train(self):\n if len(self.memory) < self.batch_size:\n return None\n else:\n batch = self.memory.sample(self.batch_size)\n states, zs, dones, actions, next_states = self.unpack(batch)\n p_z = from_numpy(self.p_z).to(self.device)\n\n # Calculating the value target\n reparam_actions, log_probs = self.policy_network.sample_or_likelihood(states)\n q1 = self.q_value_network1(states, reparam_actions)\n q2 = self.q_value_network2(states, reparam_actions)\n q = torch.min(q1, q2)\n target_value = q.detach() - self.config[\"alpha\"] * log_probs.detach()\n\n value = self.value_network(states)\n value_loss = self.mse_loss(value, target_value)\n\n logits = self.discriminator(torch.split(next_states, [self.n_states, self.n_skills], dim=-1)[0])\n p_z = p_z.gather(-1, zs)\n logq_z_ns = log_softmax(logits, dim=-1)\n rewards = logq_z_ns.gather(-1, zs).detach() - torch.log(p_z + 1e-6)\n\n # Calculating the Q-Value target\n with torch.no_grad():\n target_q = self.config[\"reward_scale\"] * rewards.float() + \\\n self.config[\"gamma\"] * self.value_target_network(next_states) * (~dones)\n q1 = self.q_value_network1(states, actions)\n q2 = self.q_value_network2(states, actions)\n q1_loss = self.mse_loss(q1, target_q)\n q2_loss = self.mse_loss(q2, target_q)\n\n policy_loss = (self.config[\"alpha\"] * log_probs - q).mean()\n logits = self.discriminator(torch.split(states, [self.n_states, self.n_skills], dim=-1)[0])\n discriminator_loss = self.cross_ent_loss(logits, zs.squeeze(-1))\n\n self.policy_opt.zero_grad()\n policy_loss.backward()\n self.policy_opt.step()\n\n self.value_opt.zero_grad()\n value_loss.backward()\n self.value_opt.step()\n\n self.q_value1_opt.zero_grad()\n q1_loss.backward()\n self.q_value1_opt.step()\n\n self.q_value2_opt.zero_grad()\n q2_loss.backward()\n self.q_value2_opt.step()\n\n self.discriminator_opt.zero_grad()\n discriminator_loss.backward()\n self.discriminator_opt.step()\n\n self.soft_update_target_network(self.value_network, self.value_target_network)\n\n return -discriminator_loss.item()\n\n def soft_update_target_network(self, local_network, target_network):\n for target_param, local_param in zip(target_network.parameters(), local_network.parameters()):\n target_param.data.copy_(self.config[\"tau\"] * local_param.data +\n (1 - self.config[\"tau\"]) * target_param.data)\n\n def hard_update_target_network(self):\n self.value_target_network.load_state_dict(self.value_network.state_dict())\n self.value_target_network.eval()\n\n def get_rng_states(self):\n return torch.get_rng_state(), self.memory.get_rng_state()\n\n def set_rng_states(self, torch_rng_state, random_rng_state):\n torch.set_rng_state(torch_rng_state.to(\"cpu\"))\n self.memory.set_rng_state(random_rng_state)\n\n def set_policy_net_to_eval_mode(self):\n self.policy_network.eval()\n\n def set_policy_net_to_cpu_mode(self):\n self.device = torch.device(\"cpu\")\n self.policy_network.to(self.device)\n" }, { "alpha_fraction": 0.8518518805503845, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 27, "blob_id": "1e5c021e1e4d5e4dac869940a89ac618ceea2a69", "content_id": "3f75bbd116041e7818e6d1f8a52f79de34278cae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27, "license_type": "permissive", "max_line_length": 27, "num_lines": 1, "path": "/Brain/__init__.py", "repo_name": "eric-z-lin/DIAYN-PyTorch", "src_encoding": "UTF-8", "text": "from .agent import SACAgent" } ]
10
CellProfiling/cpias
https://github.com/CellProfiling/cpias
23e0f4a07c73c5e2e838590fd956d7dd8997a0a5
e2d9426436573b40625287101570b849ce9f4a38
8292191bed53756d565f7e7d66b1e57632c8d218
refs/heads/master
"2020-12-28T04:09:06.439130"
"2020-02-07T19:01:31"
"2020-02-07T19:01:31"
238,176,961
0
0
Apache-2.0
"2020-02-04T10:13:53"
"2020-02-07T18:46:08"
"2020-02-07T19:01:32"
Python
[ { "alpha_fraction": 0.6879240274429321, "alphanum_fraction": 0.6879240274429321, "avg_line_length": 22.774192810058594, "blob_id": "7780c19c99a5757cb3d517d413a5ee02cde435e3", "content_id": "45af13113748b76dff6737ce6168b834565ba369", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 737, "license_type": "permissive", "max_line_length": 81, "num_lines": 31, "path": "/cpias/cli/__init__.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "# type: ignore\n\"\"\"Provide a CLI.\"\"\"\nimport logging\n\nimport click\n\nfrom cpias import __version__\nfrom cpias.cli.client import run_client\nfrom cpias.cli.server import start_server\n\nSETTINGS = dict(help_option_names=[\"-h\", \"--help\"])\n\n\[email protected](\n options_metavar=\"\", subcommand_metavar=\"<command>\", context_settings=SETTINGS\n)\[email protected](\"--debug\", is_flag=True, help=\"Start server in debug mode.\")\[email protected]_option(__version__)\[email protected]_context\ndef cli(ctx, debug):\n \"\"\"Run CPIAS server.\"\"\"\n ctx.obj = {}\n ctx.obj[\"debug\"] = debug\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n\ncli.add_command(start_server)\ncli.add_command(run_client)\n" }, { "alpha_fraction": 0.7195767164230347, "alphanum_fraction": 0.7195767164230347, "avg_line_length": 26, "blob_id": "d93b8991c3777b34e621456bb1a46172bc865ebd", "content_id": "e2a0a3f2f8ade2dbd66d793fa781b12197d5099f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "permissive", "max_line_length": 42, "num_lines": 7, "path": "/cpias/__init__.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide a server for image analysis.\"\"\"\nfrom .const import VERSION\nfrom .message import Message\nfrom .server import CPIAServer\n\n__all__ = [\"Message\", \"CPIAServer\"]\n__version__ = VERSION\n" }, { "alpha_fraction": 0.6826625466346741, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 28.363636016845703, "blob_id": "df4d59a9508f2e3d2d791b5880606ff5dfbd636d", "content_id": "f1370f6fac148919a8ab9753eeb70214e8cf3c32", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 646, "license_type": "permissive", "max_line_length": 86, "num_lines": 22, "path": "/cpias/cli/client.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "# type: ignore\n\"\"\"Provide a CLI to start a client.\"\"\"\nimport asyncio\n\nimport click\n\nfrom cpias.cli.common import common_tcp_options\nfrom cpias.client import tcp_client\n\nDEFAULT_MESSAGE = '{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"planet\": \"world\"}}\\n'\n\n\[email protected](options_metavar=\"<options>\")\[email protected](\"--message\", default=DEFAULT_MESSAGE, help=\"Message to send to server.\")\n@common_tcp_options\[email protected]_context\ndef run_client(ctx, message, host, port):\n \"\"\"Run an async tcp client to connect to the server.\"\"\"\n debug = ctx.obj[\"debug\"]\n asyncio.run(\n tcp_client(message, host=host, port=port), debug=debug,\n )\n" }, { "alpha_fraction": 0.6994991898536682, "alphanum_fraction": 0.72120201587677, "avg_line_length": 16.114286422729492, "blob_id": "bd2acae80e07f19cfb8c3f9d5b989dffd46ef3b4", "content_id": "d342b8dd6b0dfda2a9f78190be87dcb9ffb96828", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 599, "license_type": "permissive", "max_line_length": 57, "num_lines": 35, "path": "/tox.ini", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py37, py38, lint, mypy\nskip_missing_interpreters = True\n\n[travis]\npython =\n 3.7: py37, lint, mypy\n\n[testenv]\ncommands =\n pytest --timeout=30 --cov=cpias --cov-report= {posargs}\ndeps =\n -rrequirements.txt\n -rrequirements_test.txt\n\n[testenv:lint]\nbasepython = python3\nignore_errors = True\ncommands =\n black --check ./\n flake8 cpias tests\n pylint cpias tests\n pydocstyle cpias tests\ndeps =\n -rrequirements.txt\n -rrequirements_lint.txt\n -rrequirements_test.txt\n\n[testenv:mypy]\nbasepython = python3\ncommands =\n mypy cpias\ndeps =\n -rrequirements.txt\n -rrequirements_mypy.txt\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 19.799999237060547, "blob_id": "4ba69dbe6d8e8c9f225b4f6d5eb98242d4fb7cac", "content_id": "df4295b870c2e271b3a8d8223f3f0ff7019e3d59", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "permissive", "max_line_length": 46, "num_lines": 5, "path": "/cpias/exceptions.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide exceptions.\"\"\"\n\n\nclass CPIASError(Exception):\n \"\"\"Represent a generic CPIAS exception.\"\"\"\n" }, { "alpha_fraction": 0.630403459072113, "alphanum_fraction": 0.630403459072113, "avg_line_length": 28.53191566467285, "blob_id": "7cecc3e2561c3615dc5f9b4f4a4d79926f05d5b7", "content_id": "7b62057bef92a63813b8b5816dbe0e8df6f648b3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "permissive", "max_line_length": 84, "num_lines": 47, "path": "/cpias/commands/__init__.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide commands to the server.\"\"\"\nfrom functools import wraps\nfrom types import ModuleType\nfrom typing import Callable, Mapping\n\nimport pkg_resources\nimport voluptuous as vol\nfrom voluptuous.humanize import humanize_error\n\nfrom cpias.const import LOGGER\nfrom cpias.message import Message\n\n\ndef get_commands() -> Mapping[str, ModuleType]:\n \"\"\"Return a dict of command modules.\"\"\"\n commands = {\n entry_point.name: entry_point.load()\n for entry_point in pkg_resources.iter_entry_points(\"cpias.commands\")\n }\n return commands\n\n\ndef validate(schema: dict) -> Callable:\n \"\"\"Return a decorator for argument validation.\"\"\"\n\n vol_schema = vol.Schema(schema)\n\n def decorator(func: Callable) -> Callable:\n \"\"\"Decorate a function and validate its arguments.\"\"\"\n\n @wraps(func)\n async def check_args(server, message, **data): # type: ignore\n \"\"\"Check arguments.\"\"\"\n try:\n data = vol_schema(data)\n except vol.Invalid as exc:\n err = humanize_error(data, exc)\n LOGGER.error(\n \"Received invalid data for command %s: %s\", message.command, err\n )\n return Message(client=message.client, command=\"invalid\", data=data)\n\n return await func(server, message, **data)\n\n return check_args\n\n return decorator\n" }, { "alpha_fraction": 0.570742130279541, "alphanum_fraction": 0.5718099474906921, "avg_line_length": 29.20967674255371, "blob_id": "168e3e9187fd3ec031f8158b51ea1ffd24342800", "content_id": "53d0c03e3574950e0593fc2ce11ac00bd0ae1685", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "permissive", "max_line_length": 86, "num_lines": 62, "path": "/cpias/message.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide a model for messages sent and received by the server.\"\"\"\nfrom __future__ import annotations\n\nimport json\nfrom enum import Enum\nfrom typing import Optional, cast\n\nfrom .const import LOGGER\n\n\nclass Message:\n \"\"\"Represent a client/server message.\"\"\"\n\n def __init__(self, *, client: str, command: str, data: dict) -> None:\n \"\"\"Set up message instance.\"\"\"\n self.client = client\n self.command = command\n self.data = data\n self.copy = self.__copy__\n\n def __copy__(self) -> Message:\n \"\"\"Copy message.\"\"\"\n msg_data = self.encode()\n new_msg = cast(Message, self.decode(msg_data))\n return new_msg\n\n def __repr__(self) -> str:\n \"\"\"Return the representation.\"\"\"\n return (\n f\"{type(self).__name__}(client={self.client}, command={self.command}, \"\n f\"data={self.data})\"\n )\n\n @classmethod\n def decode(cls, data: str) -> Optional[Message]:\n \"\"\"Decode data into a message.\"\"\"\n # '{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"param1\": \"world\"}}'\n try:\n parsed_data = json.loads(data.strip())\n except ValueError:\n LOGGER.error(\"Failed to parse message data: %s\", data)\n return None\n if not isinstance(parsed_data, dict):\n LOGGER.error(\"Incorrect message data: %s\", parsed_data)\n return None\n params: dict = {\n block.name: parsed_data.get(block.value) for block in MessageBlock\n }\n return cls(**params)\n\n def encode(self) -> str:\n \"\"\"Encode message into a data string.\"\"\"\n compiled_msg = {attr.value: getattr(self, attr.name) for attr in MessageBlock}\n return f\"{json.dumps(compiled_msg)}\\n\"\n\n\nclass MessageBlock(Enum):\n \"\"\"Represent a message block.\"\"\"\n\n client = \"cli\"\n command = \"cmd\"\n data = \"dta\"\n" }, { "alpha_fraction": 0.701298713684082, "alphanum_fraction": 0.701298713684082, "avg_line_length": 24.66666603088379, "blob_id": "91a38b23402f338b7695117082bc7084151917a2", "content_id": "fd8e39646857fb30eb5492d0eab25d3d9d04d09a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "permissive", "max_line_length": 48, "num_lines": 21, "path": "/cpias/cli/server.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "# type: ignore\n\"\"\"Provide a CLI to start the server.\"\"\"\nimport asyncio\n\nimport click\n\nfrom cpias.cli.common import common_tcp_options\nfrom cpias.server import CPIAServer\n\n\[email protected](options_metavar=\"<options>\")\n@common_tcp_options\[email protected]_context\ndef start_server(ctx, host, port):\n \"\"\"Start an async tcp server.\"\"\"\n debug = ctx.obj[\"debug\"]\n server = CPIAServer(host=host, port=port)\n try:\n asyncio.run(server.start(), debug=debug)\n except KeyboardInterrupt:\n asyncio.run(server.stop(), debug=debug)\n" }, { "alpha_fraction": 0.5280407667160034, "alphanum_fraction": 0.538237452507019, "avg_line_length": 26.459999084472656, "blob_id": "783bc27b5c9e0d521976cec6ffc9e1a8b4c00634", "content_id": "8b0e38dd78f6d6f1a9657171cc87b5cb4cc30a27", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1373, "license_type": "permissive", "max_line_length": 88, "num_lines": 50, "path": "/cpias/client.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide a test client for the CPIAServer.\"\"\"\nimport asyncio\n\nfrom cpias.const import LOGGER\n\n\nasync def tcp_client(message: str, host: str = \"127.0.0.1\", port: int = 8555) -> None:\n \"\"\"Connect to server and send message.\"\"\"\n reader, writer = await asyncio.open_connection(host, port)\n data = await reader.readline()\n version_msg = data.decode()\n LOGGER.debug(\"Version message: %s\", version_msg.strip())\n\n LOGGER.info(\"Send: %r\", message)\n writer.write(message.encode())\n await writer.drain()\n\n data = await reader.readline()\n LOGGER.info(\"Received: %r\", data.decode())\n\n LOGGER.debug(\"Closing the connection\")\n writer.close()\n await writer.wait_closed()\n\n\nif __name__ == \"__main__\":\n asyncio.run(\n tcp_client('{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"planet\": \"world\"}}\\n'),\n debug=True,\n )\n asyncio.run(\n tcp_client(\n '{\"cli\": \"client-1\", \"cmd\": \"hello_slow\", \"dta\": {\"planet\": \"slow\"}}\\n'\n ),\n debug=True,\n )\n asyncio.run(\n tcp_client(\n '{\"cli\": \"client-1\", \"cmd\": \"hello_persistent\", '\n '\"dta\": {\"planet\": \"Mars\"}}\\n'\n ),\n debug=True,\n )\n asyncio.run(\n tcp_client(\n '{\"cli\": \"client-1\", \"cmd\": \"hello_process\", '\n '\"dta\": {\"planet\": \"Neptune\"}}\\n'\n ),\n debug=True,\n )\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.699999988079071, "avg_line_length": 14, "blob_id": "bd540fe5753d7df54c08565317d3c81930f8c412", "content_id": "6b3dba00eafbc81628bf8c08c8fca691916e6977", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 30, "license_type": "permissive", "max_line_length": 18, "num_lines": 2, "path": "/requirements.txt", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "click==7.0\nvoluptuous==0.11.7\n" }, { "alpha_fraction": 0.6265864968299866, "alphanum_fraction": 0.6339345574378967, "avg_line_length": 32.266666412353516, "blob_id": "95b1bc04db93ce357bc9dada6670a50887b2bd6b", "content_id": "fa4093ab33c5cb234d7cc720cb22fa19f0d37f4b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1497, "license_type": "permissive", "max_line_length": 66, "num_lines": 45, "path": "/setup.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Set up file for cpias package.\"\"\"\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nPROJECT_DIR = Path(__file__).parent.resolve()\nREADME_FILE = PROJECT_DIR / \"README.md\"\nLONG_DESCR = README_FILE.read_text(encoding=\"utf-8\")\nVERSION = (PROJECT_DIR / \"cpias\" / \"VERSION\").read_text().strip()\nGITHUB_URL = \"https://github.com/CellProfiling/cpias\"\nDOWNLOAD_URL = f\"{GITHUB_URL}/archive/master.zip\"\n\n\nsetup(\n name=\"cpias\",\n version=VERSION,\n description=\"Provide a server for image analysis\",\n long_description=LONG_DESCR,\n long_description_content_type=\"text/markdown\",\n author=\"Martin Hjelmare\",\n author_email=\"[email protected]\",\n url=GITHUB_URL,\n download_url=DOWNLOAD_URL,\n packages=find_packages(exclude=[\"contrib\", \"docs\", \"tests*\"]),\n python_requires=\">=3.7\",\n install_requires=[\"click\", \"voluptuous\"],\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\"cpias = cpias.cli:cli\"],\n \"cpias.commands\": [\"hello = cpias.commands.hello\"],\n },\n license=\"Apache-2.0\",\n zip_safe=False,\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n" }, { "alpha_fraction": 0.6764705777168274, "alphanum_fraction": 0.6911764740943909, "avg_line_length": 28.14285659790039, "blob_id": "7f68f908e66831ef318ad9f4a6ea220d3d4ffb23", "content_id": "2108a31612dba21324150526176c256b16a537e4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "permissive", "max_line_length": 65, "num_lines": 7, "path": "/cpias/const.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide constants for cpias.\"\"\"\nimport logging\nfrom pathlib import Path\n\nVERSION = (Path(__file__).parent / \"VERSION\").read_text().strip()\nAPI_VERSION = \"1.0.0\"\nLOGGER = logging.getLogger(__package__)\n" }, { "alpha_fraction": 0.5118576884269714, "alphanum_fraction": 0.5316205620765686, "avg_line_length": 21, "blob_id": "28ea9dcf6bd69585f5a7fea2f024a078166264ab", "content_id": "299e15eef3a0050141279ddd6c0a567a07f67701", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 506, "license_type": "permissive", "max_line_length": 47, "num_lines": 23, "path": "/cpias/cli/common.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "# type: ignore\n\"\"\"Provide common CLI options.\"\"\"\nimport click\n\n\ndef common_tcp_options(func):\n \"\"\"Supply common tcp connection options.\"\"\"\n func = click.option(\n \"-p\",\n \"--port\",\n default=8555,\n show_default=True,\n type=int,\n help=\"TCP port of the connection.\",\n )(func)\n func = click.option(\n \"-H\",\n \"--host\",\n default=\"127.0.0.1\",\n show_default=True,\n help=\"TCP address of the server.\",\n )(func)\n return func\n" }, { "alpha_fraction": 0.6073781251907349, "alphanum_fraction": 0.6086956262588501, "avg_line_length": 27.37383270263672, "blob_id": "92b860c2d20c97b651424255991de915d6edd74f", "content_id": "1935e52c66f036d98c00af6324100acd94684a0b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3036, "license_type": "permissive", "max_line_length": 82, "num_lines": 107, "path": "/cpias/process.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide process tools.\"\"\"\nimport asyncio\nimport signal\nfrom multiprocessing import Pipe, Process\nfrom multiprocessing.connection import Connection\nfrom time import sleep\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Tuple\n\nfrom cpias.const import LOGGER\nfrom cpias.exceptions import CPIASError\n\nif TYPE_CHECKING:\n from cpias.server import CPIAServer\n\n\nclass ReceiveError(CPIASError):\n \"\"\"Error raised when receving from a process failed.\"\"\"\n\n\ndef create_process(\n server: \"CPIAServer\", create_callback: Callable, *args: Any\n) -> Tuple[Callable, Callable]:\n \"\"\"Create a persistent process.\"\"\"\n parent_conn, child_conn = Pipe()\n prc = Process(target=func_wrapper, args=(create_callback, child_conn, *args))\n prc.start()\n\n def stop_process() -> None:\n \"\"\"Stop process.\"\"\"\n prc.terminate()\n\n server.on_stop(stop_process)\n\n async def async_recv() -> Any:\n \"\"\"Receive data from the process connection asynchronously.\"\"\"\n while True:\n if not prc.is_alive() or parent_conn.poll():\n break\n await asyncio.sleep(0.5)\n\n if not prc.is_alive():\n raise ReceiveError\n try:\n return await server.add_executor_job(parent_conn.recv)\n except EOFError as exc:\n LOGGER.debug(\"Nothing more to receive\")\n raise ReceiveError from exc\n\n async def async_send(data: Dict[Any, Any]) -> None:\n \"\"\"Send data to the process.\"\"\"\n parent_conn.send(data)\n\n return async_recv, async_send\n\n\ndef func_wrapper(create_callback: Callable, conn: Connection, *args: Any) -> None:\n \"\"\"Wrap a function with connection to receive and send data.\"\"\"\n running = True\n\n # pylint: disable=unused-argument\n def handle_signal(signum: int, frame: Any) -> None:\n \"\"\"Handle signal.\"\"\"\n nonlocal running\n running = False\n conn.close()\n\n signal.signal(signal.SIGTERM, handle_signal)\n signal.signal(signal.SIGINT, handle_signal)\n\n try:\n callback = create_callback(*args)\n except Exception as exc: # pylint: disable=broad-except\n LOGGER.error(\"Failed to create callback: %s\", exc)\n return\n\n while running:\n\n while running:\n if conn.poll():\n break\n sleep(0.5)\n\n try:\n data = conn.recv()\n except EOFError:\n LOGGER.debug(\"Nothing more to receive\")\n break\n except OSError:\n LOGGER.debug(\"Connection is closed\")\n break\n try:\n result = callback(data)\n except Exception as exc: # pylint: disable=broad-except\n LOGGER.error(\"Failed to run callback: %s\", exc)\n break\n\n if not running:\n break\n try:\n conn.send(result)\n except ValueError:\n LOGGER.error(\"Failed to send result %s\", result)\n except OSError:\n LOGGER.debug(\"Connection is closed\")\n break\n\n LOGGER.debug(\"Exiting process\")\n" }, { "alpha_fraction": 0.7098425030708313, "alphanum_fraction": 0.7129921317100525, "avg_line_length": 25.45833396911621, "blob_id": "f5cfbd3f073ae5bb217ae63d1cee7ec38f3e5da8", "content_id": "ba3ce4b4c4e893e6a1dd8a30ef3e4bb6dd817ece", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2540, "license_type": "permissive", "max_line_length": 157, "num_lines": 96, "path": "/README.md", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "# cpias\n\nCell Profiling Image Analysis Server\n\n## Install\n\n```sh\ngit clone https://github.com/CellProfiling/cpias.git\ncd cpias\npip install .\n```\n\n## Run\n\n- Open a terminal, we call it terminal 1. In terminal 1, start the server.\n\n```sh\ncpias --help\n# Start the server\ncpias start-server\n```\n\n- Open another terminal, we call it terminal 2. In terminal 2 run the client.\n\n```sh\n# Run the client\ncpias run-client\n```\n\n## Add new commands\n\nNew commands should preferably be added in a standalone package, by using a `setup.py` file and the `entry_points` interface.\n`cpias` will look for entry points registered under `\"cpias.commands\"`.\n\n```py\n# setup.py\n...\nentry_points={\n \"cpias.commands\": [\"hello = cpias.commands.hello\"],\n},\n...\n```\n\nSee the [`setup.py`](setup.py) file of this package for a real example.\nSee the packaging [docs](https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata) for details.\n\nThis will load all the command modules specified in the list, eg `cpias.commands.hello`.\nInside each module there should be a function defined named `register_command`. It should accept one positional argument, `server`.\nThis is the `cpias` server instance.\n\n```py\ndef register_command(server: \"CPIAServer\") -> None:\n \"\"\"Register the hello command.\"\"\"\n server.register_command(\"hello\", hello)\n```\n\nSee the [`hello.py`](cpias/commands/hello.py) command included in this package for examples of different types of commands.\n\n## Message structure\n\n`cpias` uses a json serialized format for the messages sent over the socket.\nEach message should contain three items in the json object, `cli`, `cmd` and `dta`.\n\n- Here's an example message as json.\n\n```json\n{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"param1\": \"world\"}}\n```\n\n- Here's the same message serialized and with line break to mark message end. It's the serialized version of the message that should be sent over the socket.\n\n```py\n'{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"planet\": \"world\"}}\\n'\n```\n\n- The `cli` item should mark the client id.\n- The `cmd` item should mark the command id.\n- The `dta` item should hold another json object with arbitrary data items. Only requirement is that the message can be serialized.\nEach item in `dta` will be passed to the command function as a named argument.\n\n## Development\n\n```sh\npip install -r requirements_dev.txt\n# Use the makefile for common dev tasks\nmake\n```\n\n- Here's a list of development tools we use.\n - black\n - flake8\n - pylint\n - pydocstyle\n - mypy\n - pytest\n - tox\n" }, { "alpha_fraction": 0.5892642736434937, "alphanum_fraction": 0.5904647707939148, "avg_line_length": 32.31999969482422, "blob_id": "89c5f0cc023297391f1d6ff543b9a0def1ec3f3d", "content_id": "1823ced04c813748e159ad0cfb388dcb9295d5d0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5831, "license_type": "permissive", "max_line_length": 88, "num_lines": 175, "path": "/cpias/server.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide an image analysis server.\"\"\"\nimport asyncio\nimport concurrent.futures\nimport logging\nfrom typing import Any, Callable, Coroutine, Dict, Optional\n\nfrom .commands import get_commands\nfrom .const import API_VERSION, LOGGER, VERSION\nfrom .message import Message\n\n\nclass CPIAServer:\n \"\"\"Represent an image analysis server.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, host: str = \"localhost\", port: int = 8555) -> None:\n \"\"\"Set up server instance.\"\"\"\n self.host = host\n self.port = port\n self.server: Optional[asyncio.AbstractServer] = None\n self.serv_task: Optional[asyncio.Task] = None\n self.commands: Dict[str, Callable] = {}\n self._on_stop_callbacks: list = []\n self._pending_tasks: list = []\n self._track_tasks = False\n self.store: dict = {}\n\n async def start(self) -> None:\n \"\"\"Start server.\"\"\"\n LOGGER.debug(\"Starting server\")\n commands = get_commands()\n for module in commands.values():\n module.register_command(self) # type: ignore\n\n server = await asyncio.start_server(\n self.handle_conn, host=self.host, port=self.port\n )\n self.server = server\n\n async with server:\n self.serv_task = asyncio.create_task(server.serve_forever())\n LOGGER.info(\"Serving at %s:%s\", self.host, self.port)\n await self.serv_task\n\n async def stop(self) -> None:\n \"\"\"Stop the server.\"\"\"\n LOGGER.info(\"Server shutting down\")\n self._track_tasks = True\n for stop_callback in self._on_stop_callbacks:\n stop_callback()\n\n self._on_stop_callbacks.clear()\n await self.wait_for_tasks()\n\n if self.serv_task is not None:\n self.serv_task.cancel()\n await asyncio.sleep(0) # Let the event loop cancel the task.\n\n def on_stop(self, callback: Callable) -> None:\n \"\"\"Register a callback that should be called on server stop.\"\"\"\n self._on_stop_callbacks.append(callback)\n\n def register_command(self, command_name: str, command_func: Callable) -> None:\n \"\"\"Register a command function.\"\"\"\n self.commands[command_name] = command_func\n\n async def handle_conn(\n self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n \"\"\"Handle a connection.\"\"\"\n # Send server version and server api version as welcome message.\n version_msg = f\"CPIAServer version: {VERSION}, api version: {API_VERSION}\\n\"\n writer.write(version_msg.encode())\n await writer.drain()\n\n await self.handle_comm(reader, writer)\n\n LOGGER.debug(\"Closing the connection\")\n writer.close()\n await writer.wait_closed()\n\n async def handle_comm(\n self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n \"\"\"Handle communication between client and server.\"\"\"\n addr = writer.get_extra_info(\"peername\")\n while True:\n data = await reader.readline()\n if not data:\n break\n msg = Message.decode(data.decode())\n if not msg:\n # TODO: Send invalid message message. # pylint: disable=fixme\n continue\n\n cmd_func = self.commands.get(msg.command)\n\n if cmd_func is None:\n LOGGER.warning(\"Received unknown command %s from %s\", msg.command, addr)\n # TODO: Send unknown command message. # pylint: disable=fixme\n continue\n\n LOGGER.debug(\"Received %s from %s\", msg, addr)\n LOGGER.debug(\"Executing command %s\", msg.command)\n\n reply = await cmd_func(self, msg, **msg.data)\n\n LOGGER.debug(\"Sending: %s\", reply)\n data = reply.encode().encode()\n writer.write(data)\n await writer.drain()\n\n def add_executor_job(self, func: Callable, *args: Any) -> Coroutine:\n \"\"\"Schedule a function to be run in the thread pool.\n\n Return a task.\n \"\"\"\n loop = asyncio.get_running_loop()\n task = loop.run_in_executor(None, func, *args)\n if self._track_tasks:\n self._pending_tasks.append(task)\n\n return task\n\n async def run_process_job(self, func: Callable, *args: Any) -> Any:\n \"\"\"Run a job in the process pool.\"\"\"\n loop = asyncio.get_running_loop()\n\n with concurrent.futures.ProcessPoolExecutor() as pool:\n task = loop.run_in_executor(pool, func, *args)\n if self._track_tasks:\n self._pending_tasks.append(task)\n result = await task\n\n return result\n\n def create_task(self, coro: Coroutine) -> asyncio.Task:\n \"\"\"Schedule a coroutine on the event loop.\n\n Use this helper to make sure the task is cancelled on server stop.\n Return a task.\n \"\"\"\n task = asyncio.create_task(coro)\n\n if self._track_tasks:\n self._pending_tasks.append(task)\n\n return task\n\n async def wait_for_tasks(self) -> None:\n \"\"\"Wait for all pending tasks.\"\"\"\n await asyncio.sleep(0)\n while self._pending_tasks:\n LOGGER.debug(\"Waiting for pending tasks\")\n pending = [task for task in self._pending_tasks if not task.done()]\n self._pending_tasks.clear()\n if pending:\n await asyncio.wait(pending)\n else:\n await asyncio.sleep(0)\n\n\ndef main() -> None:\n \"\"\"Run server.\"\"\"\n logging.basicConfig(level=logging.DEBUG, format=\"%(name)s: %(message)s\")\n server = CPIAServer()\n try:\n asyncio.run(server.start(), debug=True)\n except KeyboardInterrupt:\n asyncio.run(server.stop(), debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5770700573921204, "alphanum_fraction": 0.5872611403465271, "avg_line_length": 22.08823585510254, "blob_id": "a6c1052bba1054c29cc82cd950cdfd0b9306d2cb", "content_id": "91724668bc2c43340d74fd52d7c9e5be064b2386", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "permissive", "max_line_length": 84, "num_lines": 34, "path": "/tests/test_message.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide tests for message.\"\"\"\nfrom cpias.message import Message\n\n\ndef test_message_decode():\n \"\"\"Test message decode.\"\"\"\n msg = Message.decode(\n '{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"param1\": \"world\"}}'\n )\n\n assert msg.client == \"client-1\"\n assert msg.command == \"hello\"\n assert msg.data == {\"param1\": \"world\"}\n\n\ndef test_decode_bad_message():\n \"\"\"Test decode bad message.\"\"\"\n msg = Message.decode(\"bad\")\n\n assert not msg\n\n msg = Message.decode('[\"val1\", \"val2\"]')\n\n assert not msg\n\n\ndef test_message_encode():\n \"\"\"Test message encode.\"\"\"\n msg_string = '{\"cli\": \"client-1\", \"cmd\": \"hello\", \"dta\": {\"param1\": \"world\"}}\\n'\n msg = Message.decode(msg_string)\n\n msg_encoded = msg.encode()\n\n assert msg_encoded == msg_string\n" }, { "alpha_fraction": 0.6502923965454102, "alphanum_fraction": 0.6511695981025696, "avg_line_length": 24.714284896850586, "blob_id": "2b4f8c34e2232808d587fee33a350e31744c4452", "content_id": "297ca6e44511f28079a3203d0f527400dd3cb25e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3420, "license_type": "permissive", "max_line_length": 84, "num_lines": 133, "path": "/cpias/commands/hello.py", "repo_name": "CellProfiling/cpias", "src_encoding": "UTF-8", "text": "\"\"\"Provide the hello command.\"\"\"\nfrom typing import TYPE_CHECKING, Callable, Optional, Tuple\n\nfrom cpias.commands import validate\nfrom cpias.const import LOGGER\nfrom cpias.message import Message\nfrom cpias.process import ReceiveError, create_process\n\nif TYPE_CHECKING:\n from cpias.server import CPIAServer\n\n# pylint: disable=unused-argument\n\n\ndef register_command(server: \"CPIAServer\") -> None:\n \"\"\"Register the hello command.\"\"\"\n server.register_command(\"hello\", hello)\n server.register_command(\"hello_slow\", hello_slow)\n server.register_command(\"hello_persistent\", hello_persistent)\n server.register_command(\"hello_process\", hello_process)\n\n\n@validate({\"planet\": str})\nasync def hello(\n server: \"CPIAServer\", message: Message, planet: Optional[str] = None\n) -> Message:\n \"\"\"Run the hello command.\"\"\"\n if planet is None:\n planet = \"Jupiter\"\n LOGGER.info(\"Hello %s!\", planet)\n return message\n\n\n@validate({\"planet\": str})\nasync def hello_slow(\n server: \"CPIAServer\", message: Message, planet: Optional[str] = None\n) -> Message:\n \"\"\"Run the slow hello command.\"\"\"\n if planet is None:\n planet = \"Jupiter\"\n\n result = await server.run_process_job(do_cpu_work)\n\n LOGGER.info(\"Hello %s! The result is %s\", planet, result)\n\n reply = message.copy()\n reply.data[\"result\"] = result\n\n return reply\n\n\n@validate({\"planet\": str})\nasync def hello_persistent(\n server: \"CPIAServer\", message: Message, planet: Optional[str] = None\n) -> Message:\n \"\"\"Run the persistent hello command.\n\n This command creates a state the first time it's run.\n \"\"\"\n if planet is None:\n planet = \"Jupiter\"\n\n if \"hello_persistent_state\" not in server.store:\n server.store[\"hello_persistent_state\"] = create_state()\n\n command_task = server.store[\"hello_persistent_state\"]\n\n old_planet, new_planet = command_task(planet)\n\n LOGGER.info(\n \"Hello! The old planet was %s. The new planet is %s\", old_planet, new_planet\n )\n\n reply = message.copy()\n reply.data[\"old_planet\"] = old_planet\n reply.data[\"new_planet\"] = new_planet\n\n return reply\n\n\n@validate({\"planet\": str})\nasync def hello_process(\n server: \"CPIAServer\", message: Message, planet: Optional[str] = None\n) -> Message:\n \"\"\"Run the process hello command.\n\n This command creates a process the first time it's run.\n \"\"\"\n if planet is None:\n planet = \"Jupiter\"\n\n if \"hello_process\" not in server.store:\n server.store[\"hello_process\"] = create_process(server, create_state)\n\n recv, send = server.store[\"hello_process\"]\n\n await send(planet)\n\n try:\n old_planet, new_planet = await recv()\n except ReceiveError:\n return message\n\n LOGGER.info(\n \"Hello! The old planet was %s. The new planet is %s\", old_planet, new_planet\n )\n\n reply = message.copy()\n reply.data[\"old_planet\"] = old_planet\n reply.data[\"new_planet\"] = new_planet\n\n return reply\n\n\ndef do_cpu_work() -> int:\n \"\"\"Do work that should run in the process pool.\"\"\"\n return sum(i * i for i in range(10 ** 7))\n\n\ndef create_state() -> Callable:\n \"\"\"Initialize state.\"\"\"\n\n state: str = \"init\"\n\n def change_state(new_state: str) -> Tuple[str, str]:\n \"\"\"Do work that should change state.\"\"\"\n nonlocal state\n old_state = state\n state = new_state\n\n return old_state, new_state\n\n return change_state\n" } ]
18
Kw4dr4t/WebMovies
https://github.com/Kw4dr4t/WebMovies
0ea5a9751c770e6dfa4c7866a4ed27f7b4b0de15
6157f3750984376e3d322a7f413689f10c4f2c6a
bfca19c89fca924ce9bc3ab748470e2113dd39d9
refs/heads/master
"2023-03-07T08:58:04.874854"
"2021-02-22T08:31:13"
"2021-02-22T08:31:13"
337,423,588
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5030580759048462, "alphanum_fraction": 0.5795106887817383, "avg_line_length": 35.33333206176758, "blob_id": "c93764e582947d461b2ca0aab9e45f0078fcf5cf", "content_id": "6b529ca2d2e0fc84b7549a14386ede5248a0ba47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 309, "num_lines": 18, "path": "/WebMovies/migrations/0006_auto_20210209_1401.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.6 on 2021-02-09 14:01\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('WebMovies', '0005_auto_20210209_0759'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='additionalinfo',\n name='genre',\n field=models.PositiveSmallIntegerField(choices=[(8, 'Historical'), (4, 'Crime'), (7, 'Fantasy'), (3, 'Comedy'), (13, 'Wester'), (11, 'Science Fiction'), (10, 'Romance'), (5, 'Drama'), (2, 'Animation'), (0, 'Other'), (12, 'Thriller'), (9, 'Horror'), (6, 'Experimental'), (1, 'Action')], default=0),\n ),\n ]\n" }, { "alpha_fraction": 0.7634408473968506, "alphanum_fraction": 0.7634408473968506, "avg_line_length": 17.600000381469727, "blob_id": "6b6c0b5777bdf9e808738f666d68289b55688e3d", "content_id": "efe54bd2a8e027ec539ebd204b9fbdee2c7acf55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/WebMovies/apps.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass WebmoviesConfig(AppConfig):\n name = 'WebMovies'\n" }, { "alpha_fraction": 0.5038759708404541, "alphanum_fraction": 0.5839793086051941, "avg_line_length": 20.5, "blob_id": "7daf4a379e14869c8bd87851247d44c37efe28a4", "content_id": "f44e2894ffb7cced39e66ea6d86ee1e066465a58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/WebMovies/migrations/0003_movie_description.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.6 on 2021-02-04 08:22\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('WebMovies', '0002_auto_20210204_0806'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='movie',\n name='description',\n field=models.TextField(default=''),\n ),\n ]\n" }, { "alpha_fraction": 0.5401554107666016, "alphanum_fraction": 0.5673575401306152, "avg_line_length": 26.571428298950195, "blob_id": "790181bfcfa5bda1ff17936adb58909249674929", "content_id": "6d9d40f42c59be73596458082d54b093623349d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 93, "num_lines": 28, "path": "/WebMovies/migrations/0004_auto_20210204_0835.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.6 on 2021-02-04 08:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('WebMovies', '0003_movie_description'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='movie',\n name='imdb_rating',\n field=models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True),\n ),\n migrations.AddField(\n model_name='movie',\n name='poster',\n field=models.ImageField(blank=True, null=True, upload_to='posters'),\n ),\n migrations.AddField(\n model_name='movie',\n name='premiere',\n field=models.DateField(blank=True, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6747289299964905, "alphanum_fraction": 0.6822351813316345, "avg_line_length": 25.086956024169922, "blob_id": "6c8fe50619fb218ffb5dc516ca4a25b5eab474e1", "content_id": "c373cbcc581885643182a7af5f0318b7aab19a63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 81, "num_lines": 46, "path": "/WebMovies/views.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404, render, redirect\nfrom django.http import HttpResponse\nfrom WebMovies.models import Movie\nfrom .forms import MovieForm\nfrom django.contrib.auth.decorators import login_required\n\n\ndef all_movies(request):\n movies_all = Movie.objects.all()\n return render(request, \"movies.html\", {\"movies\": movies_all})\n\n\n@login_required\ndef new_movie(request):\n\n form = MovieForm(request.POST or None, request.FILES or None)\n\n if form.is_valid():\n form.save()\n return redirect(all_movies)\n\n return render(request, \"movie_form.html\", {\"form\": form, \"new\": True})\n\n\n@login_required\ndef edit_movie(request, id):\n\n movie = get_object_or_404(Movie, pk=id)\n form = MovieForm(request.POST or None, request.FILES or None, instance=movie)\n\n if form.is_valid():\n form.save()\n return redirect(all_movies)\n\n return render(request, \"movie_form.html\", {\"form\": form, \"new\": False})\n\n\n@login_required\ndef delete_movie(request, id):\n movie = get_object_or_404(Movie, pk=id)\n\n if request.method == \"POST\":\n movie.delete()\n return redirect(all_movies)\n\n return render(request, \"confirm.html\", {\"movie\": movie})" }, { "alpha_fraction": 0.5445719361305237, "alphanum_fraction": 0.5895851850509644, "avg_line_length": 40.96296310424805, "blob_id": "8a086816d0d0fd9e3a5b4ae5f0cc0e9b480e8242", "content_id": "d2d25637171050a07cc0f8b1a04253d397baef73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1133, "license_type": "no_license", "max_line_length": 318, "num_lines": 27, "path": "/WebMovies/migrations/0005_auto_20210209_0759.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.6 on 2021-02-09 07:59\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('WebMovies', '0004_auto_20210204_0835'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AdditionalInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('duration', models.PositiveIntegerField(default=0)),\n ('genre', models.PositiveSmallIntegerField(choices=[(8, 'Historical'), (4, 'Crime'), (3, 'Comedy'), (5, 'Drama'), (11, 'Science Fiction'), (0, 'Other'), (9, 'Horror'), (1, 'Action'), (6, 'Experimental'), (10, 'Romance'), (7, 'Fantasy'), (12, 'Thriller'), (13, 'Wester'), (2, 'Animation')], default=0)),\n ],\n ),\n migrations.AddField(\n model_name='movie',\n name='additional_info',\n field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='WebMovies.additionalinfo'),\n ),\n ]\n" }, { "alpha_fraction": 0.6879432797431946, "alphanum_fraction": 0.6879432797431946, "avg_line_length": 23.941177368164062, "blob_id": "54e6f8a3e88d5168b6d241b31e4a47f9a8ffa2fc", "content_id": "ca70e3aa062480e0a1be73550765b00111f34884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 423, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/WebMovies/admin.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import AdditionalInfo, Movie\n\n# Register your models here.\n# admin.site.register(Movie)\n\n\[email protected](Movie)\nclass MovieAdmin(admin.ModelAdmin):\n # fields = [\"Title\", \"Description\", \"Year\"]\n # exclude = [\"Description\"]\n list_display = [\"title\", \"imdb_rating\", \"year\"]\n list_filter = (\"year\",)\n search_fields = (\"title\",)\n\n\nadmin.site.register(AdditionalInfo)" }, { "alpha_fraction": 0.7678571343421936, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 55, "blob_id": "b2ad3eb50630b6f10859080c4e3f702368299588", "content_id": "ca4100418124c3c7b7d3b289e5c4d9de8df65f3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 112, "license_type": "no_license", "max_line_length": 99, "num_lines": 2, "path": "/README.md", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "# WebMovies\nFirst project. Made in Django, Bootstrap. Web site with CRUD usage. Can add movies, update, delete.\n" }, { "alpha_fraction": 0.5809670090675354, "alphanum_fraction": 0.6024558544158936, "avg_line_length": 30.0238094329834, "blob_id": "4fd17ed3327846920b0dc40fe3657b9dd2eb7ccb", "content_id": "c400dbaee898dfaeb19e06f8b09ec4bdc8c2b84a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 74, "num_lines": 42, "path": "/WebMovies/models.py", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass AdditionalInfo(models.Model):\n GENRES = {\n (0, \"Other\"),\n (1, \"Action\"),\n (2, \"Animation\"),\n (3, \"Comedy\"),\n (4, \"Crime\"),\n (5, \"Drama\"),\n (6, \"Experimental\"),\n (7, \"Fantasy\"),\n (8, \"Historical\"),\n (9, \"Horror\"),\n (10, \"Romance\"),\n (11, \"Science Fiction\"),\n (12, \"Thriller\"),\n (13, \"Wester\"),\n }\n duration = models.PositiveIntegerField(default=0)\n genre = models.PositiveSmallIntegerField(default=0, choices=GENRES)\n\n\nclass Movie(models.Model):\n title = models.CharField(max_length=64, blank=False, unique=True)\n year = models.PositiveSmallIntegerField(default=2000, blank=True)\n description = models.TextField(default=\"\")\n premiere = models.DateField(auto_now=False, null=True, blank=True)\n imdb_rating = models.DecimalField(\n max_digits=4, decimal_places=2, null=True, blank=True\n )\n poster = models.ImageField(upload_to=\"posters\", null=True, blank=True)\n additional_info = models.OneToOneField(\n AdditionalInfo, on_delete=models.CASCADE, null=True, blank=True\n )\n\n def __str__(self):\n return self.title_with_year()\n\n def title_with_year(self):\n return \"{} ({})\".format(self.title, self.year)\n" }, { "alpha_fraction": 0.5344506502151489, "alphanum_fraction": 0.5418994426727295, "avg_line_length": 24.619047164916992, "blob_id": "da00b55614b514cbc0c4c16af627689c25dd6628", "content_id": "9d0f27b55ca67dc230419231b3730a3e71219348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 537, "license_type": "no_license", "max_line_length": 110, "num_lines": 21, "path": "/templates/confirm.html", "repo_name": "Kw4dr4t/WebMovies", "src_encoding": "UTF-8", "text": "{% extends 'main.html' %}\n\n{% block title %} Confirm {% endblock %}\n\n{% block page %}\n<div class=\"mx-auto card\" style=\"width: 42rem;\">\n <div class=\"card-body\">\n <h3 class=\"card-title\">You want to delete \"{{movie.title}}\"?</h3>\n <form method=\"post\">\n {%csrf_token%}\n <button type=\"submit\" class=\"btn btn-danger\">Yes, I'm sure</button>\n <a href=\"{% url 'all_movies' %}\" class=\"btn btn-primary\"><i class=\"fas fa-angle-left\"></i>Back</a>\n\n </form>\n\n </div>\n</div>\n\n\n\n{% endblock %}" } ]
10
Adeline-Wei/pixnet_chatbot
https://github.com/Adeline-Wei/pixnet_chatbot
bdf06971d40f84309a7eccf71dca2a554bd34a21
3fada3c0084344b83095e08bdc0f8a0e714b6137
dbf3860d29ae92c1851fe9a8453bc8e0e5da91bf
refs/heads/master
"2021-01-20T12:31:10.927150"
"2017-09-09T11:37:43"
"2017-09-09T11:37:43"
101,717,307
0
0
null
"2017-08-29T04:01:39"
"2017-08-28T15:26:00"
"2017-08-28T15:25:58"
null
[ { "alpha_fraction": 0.5811966061592102, "alphanum_fraction": 0.6016144156455994, "avg_line_length": 37.09090805053711, "blob_id": "47aa0e6340235cc20528657f7767b3398a7d4627", "content_id": "2358d3b2a817b5399d3d3d9167016e7ba35d01b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2108, "license_type": "no_license", "max_line_length": 115, "num_lines": 55, "path": "/content_similarity/emotion_query.py", "repo_name": "Adeline-Wei/pixnet_chatbot", "src_encoding": "UTF-8", "text": "from elasticsearch import Elasticsearch\nimport elasticsearch.helpers\nfrom datetime import datetime\nimport urllib.request\nimport json\nimport time\n\ndef queryEmotion(content_list):\n # Prepare query data\n query = {\"data\":[]}\n for text in content_list:\n query[\"data\"].append({\"message\":text})\n\n header={'Content-Type': 'application/json'}\n req = urllib.request.Request(url='http://192.168.2.100:5678/chuck/couple_all', headers=header, method='POST')\n query_str = json.dumps(query)\n # Query\n res = urllib.request.urlopen(req, query_str.encode())\n res_json = json.loads(res.read().decode())\n\n return res_json\n\ndef organize_emotion(emotion_json):\n emotion_dict = {'wow':{'count':0, 'content':[]},\n 'love':{'count':0, 'content':[]},\n 'haha':{'count':0, 'content':[]},\n 'sad':{'count':0, 'content':[]},\n 'angry':{'count':0, 'content':[]}}\n\n for sentence_res in emotion_json['data']:\n if sentence_res['ambiguous']!= True:\n if sentence_res['emotion1'] == 'angry':\n if sentence_res['emotion2'] == 'haha' or sentence_res['emotion2'] == 'love': continue \n emotion_dict[sentence_res['emotion1']]['count'] = emotion_dict[sentence_res['emotion1']]['count'] +1\n emotion_dict[sentence_res['emotion1']]['content'].append(sentence_res['message'])\n \n return emotion_dict\n\nif __name__ == '__main__':\n \n\n es = Elasticsearch([{'host': '192.168.2.10', 'port': 9200}])\n\n # Take all result\n docs = list(elasticsearch.helpers.scan(es, index=\"pixnet\", doc_type='food'))\n total_content = [(doc['_id'],doc['_source'].get('content')) for doc in docs]\n print('Total index: ', len(total_content))\n\n print('Query Emotion for each sentence')\n\n for pid, content in total_content:\n print('Quering & Updating', pid)\n emotion_dict = organize_emotion(queryEmotion(content.split('。')))\n es.update(index='pixnet', doc_type='food', id=total_content[0][0], body={\"doc\": {\"emotion\": emotion_dict}})\n time.sleep(0.5)\n\n \n \n" } ]
1
suenklerhaw/seoeffekt
https://github.com/suenklerhaw/seoeffekt
29f7e9517fafefbaa25b8c65d16eb19c78238b50
aceda4d53488ff6d050609828bd3cc54e7723f85
2204dc57296a11952832f20849461ab9117b4c82
refs/heads/main
"2023-01-25T00:32:38.526018"
"2022-06-07T13:00:42"
"2022-06-07T13:00:42"
318,489,287
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5553385615348816, "alphanum_fraction": 0.5579426884651184, "avg_line_length": 28.538461685180664, "blob_id": "1820feebfa2b1b12e701d98b7b0c7b52447aadd2", "content_id": "133ffe917114ac878dc2c443954c195b92c5415a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1536, "license_type": "permissive", "max_line_length": 165, "num_lines": 52, "path": "/apps/indicators/sources.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check if domain is in one of the sources lists with categorized domains\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef sources(hash, result_url, result_main):\n\n sources_categories = ['source ads', 'source company', 'source known', 'source news', 'source not optimized', 'source search engine', 'source shop', 'source top']\n\n def get_netloc(url):\n parsed = urlparse(url)\n return parsed.netloc\n\n for source_category in sources_categories:\n\n def get_sources(source_category):\n sources_file = '../../evaluations/'+source_category+'.csv'\n sources = []\n sources_loc = []\n with open(sources_file, 'r') as csvfile:\n urls = csv.reader(csvfile)\n\n for u in urls:\n sources.append(u)\n\n for s in sources:\n s_url = s[0]\n netloc = get_netloc(s_url)\n if(netloc):\n sources_loc.append(netloc)\n\n return sources_loc\n\n sources = get_sources(source_category)\n\n result_url = result_url.replace('www.', '')\n result_main = result_main.replace('www.', '')\n\n module = source_category\n\n value = '0'\n\n check_evaluations_result(hash, module, value)\n\n for s in sources:\n s = s.replace('www.', '')\n if s in result_url or s in result_main:\n value = '1'\n Evaluations.UpdateEvaluationResult(value, today, hash, module)\n" }, { "alpha_fraction": 0.7655172348022461, "alphanum_fraction": 0.7724137902259827, "avg_line_length": 17.125, "blob_id": "92389a76b901c9b40badcf5088c03ef78145f2bc", "content_id": "a3f7ea1205c44d6e5a1e903d8d4eb9a91f371b13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "permissive", "max_line_length": 36, "num_lines": 8, "path": "/apps/results/delete_unassigned.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to delete unassigned results\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nStudies.deleteunassignedResults()\n" }, { "alpha_fraction": 0.6884422302246094, "alphanum_fraction": 0.7060301303863525, "avg_line_length": 15.583333015441895, "blob_id": "02b113124e48435719af178912c4b2b19cf90097", "content_id": "b3bc5f985cf2dc046c3b71a68c607c4314355ba1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "permissive", "max_line_length": 86, "num_lines": 24, "path": "/apps/main/main_sources.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to save html source codes and to measure the loading speed of a webpage\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\n\ndef speed():\n call([\"python3\", \"proc_speed.py\"])\n\ndef sources():\n call([\"python3\", \"proc_scraper.py\"])\n\n\n\nprocess5 = threading.Thread(target=speed)\n\nprocess6 = threading.Thread(target=sources)\n\nprocess5.start()\n\nprocess6.start()\n" }, { "alpha_fraction": 0.611751139163971, "alphanum_fraction": 0.6163594722747803, "avg_line_length": 24.52941131591797, "blob_id": "9ada66589b0d526f31f883e8716243bb69a84835", "content_id": "cc8303ea9108387c945b3d55587f763c3081d86f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 868, "license_type": "permissive", "max_line_length": 128, "num_lines": 34, "path": "/db/sources.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#Class for sources table\n\nclass Sources:\n\n def __init__(self, cursor):\n self.cursor = cursor\n\n\n#read from db\n\n\n#check existing pagespeed of stored sources\n def getSpeed(cursor, hash):\n sql= \"SELECT sources_speed from sources WHERE sources_hash=%s LIMIT 1\"\n data = (hash,)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getSourcesURLs(cursor, hash):\n sql= \"SELECT sources_urls from sources WHERE sources_hash=%s LIMIT 1\"\n data = (hash,)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def resetSources(cursor):\n cursor.execute(\n \"UPDATE sources SET sources_source = NULL, sources_speed = NULL WHERE sources_source = '-1' or sources_source = '1'\"\n )\n\n\n def deleteSources(cursors, queries_id):\n pass\n" }, { "alpha_fraction": 0.498108446598053, "alphanum_fraction": 0.503783106803894, "avg_line_length": 21.338027954101562, "blob_id": "a8c601e314ad8b3106266e3b03bbbaad83cbdc55", "content_id": "38f081e7e9d97080409fad6fe44ed482b44e36f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1586, "license_type": "permissive", "max_line_length": 79, "num_lines": 71, "path": "/apps/indicators/robots.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check seo in robots.txt\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef robots(hash, result_main, main_hash):\n\n #print(\"robots\")\n\n\n def get_results_main_hash(main_hash):\n hashes = Results.getResultHashesOnMain(main_hash)\n return hashes\n\n\n robots_url = result_main+'robots.txt'\n\n module = 'robots_txt'\n\n res_hashes = get_results_main_hash(main_hash)\n\n if (not Evaluations.getEvaluationModule(hash, module)):\n\n try:\n source = Results.saveResult(robots_url)\n s = source.lower()\n\n #print(s)\n\n value = '0'\n\n\n p = \"*crawl-delay*\"\n if Helpers.matchText(s, p):\n value = '1'\n\n p = \"*user agent*\"\n if Helpers.matchText(s, p):\n value = '1'\n\n\n p = \"*user-agent:*\"\n if Helpers.matchText(s, p):\n value = '1'\n\n p = \"*sitemap*\"\n if Helpers.matchText(s, p):\n value = '1'\n\n p = \"*noindex*\"\n if Helpers.matchText(s, p):\n value = '1'\n\n p = \"*seo*\"\n if Helpers.matchText(s, p):\n value = '1'\n\n except:\n value = '-1'\n\n Evaluations.insertEvaluationResult(hash, module, value, today)\n\n for r_h in res_hashes:\n\n if (not Evaluations.getEvaluationModuleResult(r_h, module, value)):\n #print(r_h)\n #print(robots_url)\n Evaluations.insertEvaluationResult(r_h, module, value, today)\n" }, { "alpha_fraction": 0.5110469460487366, "alphanum_fraction": 0.5182023644447327, "avg_line_length": 30.86400032043457, "blob_id": "73db51b8dca05c56b9483ffc0dea830043fb1935", "content_id": "57091359a89dba351e44238dac9acef76cf82f8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7969, "license_type": "permissive", "max_line_length": 290, "num_lines": 250, "path": "/libs/scraper.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#done!!!\n#sys libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\nwith open('../../config/global_vars.ini', 'r') as f:\n array = json.load(f)\n\n#app funcs\n\ndef get_studies():\n studies = Studies.getStudiesScraper()\n return studies\n\ndef get_open_jobs_queries(study_id):\n queries = Queries.getOpenQueriesStudy(study_id)\n return queries\n\ndef get_queries(study_id):\n #get all queries from study\n queries = Queries.getQueriesNoScrapers(study_id)\n return queries\n\n\ndef generate_scraping_job(query, scraper):\n print(query)\n query_string = query[1]\n query_id = query[4]\n study_id = query[0]\n number_multi = int(scraper.number_multi)\n number_div = int(scraper.number_div)\n result_pages = int(scraper.results_range/number_multi)\n search_engine = scraper.search_engine\n start_add = int(scraper.start_add)\n check_jobs = Scrapers.getScrapingJobs(query_id, study_id, search_engine)\n if not check_jobs:\n for i in range(result_pages):\n if start_add > 0:\n i = i + start_add\n start = int(i * number_multi / number_div)\n try:\n Scrapers.insertScrapingJobs(query_id, study_id, query_string, search_engine, (start * number_div), date.today())\n print('Scraper Job: '+query_string+' SE:'+search_engine+' start:'+str(start)+' created')\n except:\n break;\n\n\n\n\n\n\ndef scrape_query(query, scraper):\n\n\n\n today = date.today()\n jobs = Scrapers.getScrapingJobsByQueryProgressSE(query, 0, scraper.search_engine)\n\n\n for job in jobs:\n\n search_engine = job[3]\n search_query = job[2]\n search_query = search_query.replace(' ', '+')\n start = job[4]\n query_id = job[0]\n study_id = job[1]\n job_id = job[7]\n\n print(start)\n\n progress = 2\n\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, progress)\n\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+search_engine+\".log\", \"Start Scraping\", 1)\n built_query = scraper.search_url+search_query+scraper.start+str(start)+scraper.language+scraper.number_parameter\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+search_engine+\".log\", built_query, 1)\n\n res = Scrapers.scrapeQuery(built_query, scraper.xpath, start, scraper.filter)\n\n if res:\n results = res[0]\n source = res[1]\n print(results)\n\n if (results == 'filtered'):\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, 1)\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Max Results', 1)\n exit()\n\n else:\n Scrapers.updateScrapingJob(job_id, 1)\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Start Scraping Results', 1)\n\n if scraper.serp_filter != \"\":\n tree = html.fromstring(source)\n serp_element = tree.xpath(scraper.serp_filter)\n serp = Helpers.html_escape(source)\n if serp_element:\n check_serp = Results.getSERP(query_id)\n if not check_serp:\n Results.insertSERP(query_id, serp, 1, today)\n\n\n\n results_position = 1\n\n for result in results:\n\n url = result\n\n check_url = Results.getURL(query_id, study_id, url, search_engine)\n\n if (not check_url):\n\n url_meta = Results.getResultMeta(url, str(study_id), search_engine, str(query_id))\n hash = url_meta[0]\n ip = url_meta[1]\n main = url_meta[2]\n main_hash = Helpers.computeMD5hash(main+str(study_id)+search_engine+str(query_id))\n contact_url = \"0\"\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", url, 1)\n contact_hash = \"0\"\n contact_url = \"0\"\n\n\n last_position = Results.getLastPosition(query_id, study_id, search_engine, today)\n\n if last_position:\n results_position = last_position[0][0] + 1\n\n\n if Results.getPosition(query_id, study_id, search_engine, results_position):\n results_position = results_position + 1\n\n\n Results.insertResult(query_id, study_id, job_id, 0, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, datetime.now(), 1, results_position)\n\n check_sources = Results.getSource(hash)\n if not check_sources:\n Results.insertSource(hash, None, None, None, today, 0)\n\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Insert Result', 1)\n else:\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Error Scraping Job', 1)\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, -1)\n Results.deleteResultsNoScrapers(query_id, search_engine)\n exit()\n\n\n\n\n#app controller\n\ntry:\n #if not(Scrapers.getScrapingJobsByProgress(-1)):\n\n\n\n scrapers = Scrapers.generateScrapers()\n\n studies = get_studies()\n\n\n\n for s in studies:\n\n\n\n to_scrape = []\n study_id = s[-3]\n\n\n studies_scrapers = s[-1]\n\n\n if studies_scrapers:\n\n\n if \";\" in studies_scrapers:\n studies_scrapers = studies_scrapers.split(\";\")\n\n for sc in studies_scrapers:\n to_scrape.append(sc)\n else:\n to_scrape.append(studies_scrapers)\n\n for ts in to_scrape:\n\n\n if ts !=\"Bing_API\" and ts !=\"Google_Selenium\" and ts !=\"Google_Selenium_SV\":\n\n queries = get_queries(study_id)\n\n\n for q in queries:\n query_db = Queries.getQuerybyID(q)\n query_id = query_db[0][-2]\n\n job = 0\n check_jobs = Scrapers.getScrapingJobsBySE(query_id, ts)\n count_jobs = check_jobs[0][0]\n if count_jobs == 0:\n job = 1\n\n if job == 1:\n for s in scrapers:\n if s.search_engine == ts:\n generate_scraping_job(query_db[0], s)\n\n\n\n o = []\n\n #open queries prüfen der anzahl der jobs anhand der möglichen anzahl an jobs, nicht scrapen, wenn nicht alle jobs bereits erstellt sind; abhängig von der maximalen anzahl berechnet aus max results und result pages bzw. einfach vom der start position des letzten jobs\n\n if not(Scrapers.getScrapingJobsByProgressSE(-1, ts)):\n\n open_queries = Queries.getOpenQueriesStudybySE(study_id, ts)\n\n\n\n if open_queries:\n random.shuffle(open_queries)\n o = open_queries[0]\n\n\n\n if o:\n for s in scrapers:\n if s.search_engine == ts:\n check_error = Scrapers.getScrapingJobsByQueryProgressSE(o, -1, ts)\n check_progress = Scrapers.getScrapingJobsByQueryProgressSE(o, 2, ts)\n if not check_error and not check_progress:\n print(o)\n scrape_query(o, s)\n\n\n\n\n\n\n\n\n\nexcept Exception as e:\n print(e)\n" }, { "alpha_fraction": 0.6172769069671631, "alphanum_fraction": 0.6184210777282715, "avg_line_length": 25.484848022460938, "blob_id": "19528e6e0a040a66a967cbcb5cd87127c677ec99", "content_id": "317dde828f95abb492d830bf4ad1fd3e95fb977d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1748, "license_type": "permissive", "max_line_length": 96, "num_lines": 66, "path": "/libs/studies.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport os.path\n\n#tool libs\nsys.path.insert(0, '..')\nfrom db.connect import DB\n\nsys.path.insert(0, '..')\nfrom db.studies import Studies as DB_Studies\n\nfrom libs.helpers import Helpers\n\nclass Studies:\n def __init__(self):\n self.data = []\n\n def getStudies():\n db = DB()\n rows = DB_Studies.getStudies(db.cursor)\n db.DBDisconnect()\n return rows\n\n def getStudiesScraper():\n db = DB()\n rows = DB_Studies.getStudiesScraper(db.cursor)\n db.DBDisconnect()\n return rows\n\n def getStudy(study_id):\n db = DB()\n rows = DB_Studies.getStudy(db.cursor, study_id)\n db.DBDisconnect()\n return rows\n\n def getStudybyName(study_name):\n db = DB()\n rows = DB_Studies.getStudybyName(db.cursor, study_name)\n db.DBDisconnect()\n return rows\n\n def getStudybyNamenotID(study_name, study_id):\n db = DB()\n rows = DB_Studies.getStudybyNamenotID(db.cursor, study_name, study_id)\n db.DBDisconnect()\n return rows\n\n def insertStudy(study_name, study_description, today, study_se):\n db = DB()\n DB_Studies.insertStudy(db.cursor, study_name, study_description, today, study_se)\n db.DBDisconnect()\n\n def updateStudy(studies_name, studies_comment, studies_se, studies_id):\n db = DB()\n DB_Studies.updateStudy(db.cursor, studies_name, studies_comment, studies_se, studies_id)\n db.DBDisconnect()\n\n def deleteStudy(study_id):\n db = DB()\n DB_Studies.deleteStudy(db.cursor, study_id)\n db.DBDisconnect()\n\n def deleteunassignedResults():\n db = DB()\n DB_Studies.deleteunassignedResults(db.cursor)\n db.DBDisconnect()\n" }, { "alpha_fraction": 0.6482269763946533, "alphanum_fraction": 0.6567375659942627, "avg_line_length": 27.200000762939453, "blob_id": "739ea40d0aa5a1ada7d20fedc04bb427d62afe3f", "content_id": "df31c9be27e3959d77bfd80526fa812323ff45eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 705, "license_type": "permissive", "max_line_length": 82, "num_lines": 25, "path": "/apps/indicators/description.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check for description\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef description(hash, tree):\n\n xpath_meta = \"//meta[@name='description']/@content\"\n xpath_og_property = \"//meta[@property='og:description']/@content\"\n xpath_og_name = \"//meta[@name='og:description']/@content\"\n module = 'check description'\n value = '0'\n\n meta_content = str(tree.xpath(xpath_meta))\n og_property_content = str(tree.xpath(xpath_og_property))\n og_name = str(tree.xpath(xpath_og_name))\n\n if(len(meta_content) > 5 or len(og_property_content) > 5 or len(og_name) > 5):\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.5483906865119934, "alphanum_fraction": 0.5573806762695312, "avg_line_length": 29.133779525756836, "blob_id": "96c12189b1e967fc8e5dde724f20754b6214dac1", "content_id": "97e3efab9606dffa4bd9e1e60352539571809fe4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9010, "license_type": "permissive", "max_line_length": 189, "num_lines": 299, "path": "/apps/scraper/google_selenium_sv.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "import sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.by import By\n\n\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom lxml import html\n\n\nimport time\n\nimport random\n\nimport pickle\n\nimport os\n\ndef generate_scraping_job(query, scraper):\n print(query)\n query_string = query[1]\n query_string = query_string.strip()\n query_id = query[4]\n study_id = query[0]\n search_engine = scraper\n #result_pages = 100\n result_pages = 3\n number_multi = 10\n start = 0\n check_jobs = Scrapers.getScrapingJobs(query_id, study_id, search_engine)\n\n if not check_jobs:\n Scrapers.insertScrapingJobs(query_id, study_id, query_string, search_engine, start, date.today())\n print('Scraper Job: '+query_string+' SE:'+search_engine+' start:'+str(start)+' created')\n\n #exit()\n\n\ndef scrape_query(query, scraper):\n\n today = date.today()\n jobs = Scrapers.getScrapingJobsByQueryProgressSE(query, 0, scraper)\n\n\n for job in jobs:\n\n print(job)\n search_engine = job[3]\n search_query = job[2]\n start = job[4]\n query_id = job[0]\n study_id = job[1]\n job_id = job[7]\n\n progress = 2\n\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, progress)\n\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+search_engine+\".log\", \"Start Scraping\", 1)\n\n os.environ['MOZ_HEADLESS'] = '0'\n\n options = Options()\n #options.add_argument('--ignore-certificate-errors-spki-list')\n #options.add_argument('--ignore-ssl-errors')\n #options.add_argument('--ignore-certificate-errors')\n #options.add_argument('--allow-insecure-localhost')\n options.log.level = 'error'\n\n profile = webdriver.FirefoxProfile()\n\n profile.set_preference(\"browser.safebrowsing.blockedURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.downloads.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.forbiddenURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.malware.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.phishing.enabled\", True)\n profile.set_preference(\"dom.webnotifications.enabled\", False);\n\n profile.add_extension(extension='/home/sebastian/alpha/extensions/i_dont_care_about_cookies-3.2.7-an+fx.xpi')\n\n driver = webdriver.Firefox(firefox_profile=profile, options=options)\n\n driver.get('https://www.google.com/webhp?hl=sv')\n\n\n\n sleeper = random.randint(3,10)\n\n\n time.sleep(sleeper)\n\n try:\n element = driver.find_element_by_id(\"L2AGLb\")\n element.click()\n\n cookies_file = \"cookies_google.pkl\"\n\n if os.path.exists(cookies_file):\n if os.path.getmtime(cookies_file) < now - 1 *86400:\n os.remove(cookies_file)\n pickle.dump(driver.get_cookies(), open(cookies_file, \"wb\"))\n\n else:\n cookies = pickle.load(open(cookies_file, \"rb\"))\n\n for cookie in cookies:\n driver.add_cookie(cookie)\n else:\n pickle.dump(driver.get_cookies(), open(cookies_file, \"wb\"))\n\n for cookie in cookies:\n driver.add_cookie(cookie)\n\n except:\n pass\n\n sleeper = random.randint(3,10)\n\n time.sleep(sleeper)\n\n search = driver.find_element_by_name('q')\n search.send_keys(search_query)\n search.send_keys(Keys.RETURN)\n\n sleeper = random.randint(6,10)\n\n time.sleep(sleeper)\n\n check_source = driver.page_source\n\n #print(check_source)\n\n if str(check_source).find(str(\"g-recaptcha\")) > 0:\n print(\"CAPTCHA\")\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Error Scraping Job', 1)\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, -1)\n Results.deleteResultsNoScrapers(query_id, search_engine)\n driver.quit()\n exit()\n\n xpath_res = \"//div[@class='tF2Cxc']/div[@class='yuRUbf']/a/@href\"\n\n pages = []\n\n results = []\n\n source = driver.page_source\n\n tree = html.fromstring(source)\n urls = tree.xpath(xpath_res)\n\n for url in urls:\n results.append(url)\n\n number_pages = 4\n\n x = range(2, number_pages)\n\n for n in x:\n r = str(n)\n page = 'Page '+r\n pages.append(page)\n\n\n for p in pages:\n\n print(p)\n\n xpath = \"//a[@aria-label='{}']\".format(p)\n\n try:\n\n paging = driver.find_element_by_xpath(xpath)\n\n paging.click()\n\n sleeper = random.randint(6,10)\n\n time.sleep(sleeper)\n\n source = driver.page_source\n\n check_source = driver.page_source\n\n #print(check_source)\n\n if str(check_source).find(str(\"g-recaptcha\")) > 0:\n print(\"CAPTCHA\")\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Error Scraping Job', 1)\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, -1)\n Results.deleteResultsNoScrapers(query_id, search_engine)\n driver.quit()\n exit()\n\n tree = html.fromstring(source)\n urls = tree.xpath(xpath_res)\n\n for url in urls:\n results.append(url)\n\n except:\n pass\n\n print(results)\n\n driver.quit()\n\n if results:\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, 1)\n results_position = 0\n for result in results:\n url = result\n check_url = Results.getURL(query_id, study_id, url, search_engine)\n\n if (not check_url):\n results_position = results_position + 1\n url_meta = Results.getResultMeta(url, str(study_id), search_engine, str(query_id))\n hash = url_meta[0]\n ip = url_meta[1]\n main = url_meta[2]\n main_hash = Helpers.computeMD5hash(main+str(study_id)+search_engine+str(query_id))\n contact_url = \"0\"\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", url, 1)\n contact_hash = \"0\"\n contact_url = \"0\"\n\n Results.insertResult(query_id, study_id, job_id, 0, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, datetime.now(), 1, results_position)\n\n check_sources = Results.getSource(hash)\n if not check_sources:\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Insert Result', 1)\n Results.insertSource(hash, None, None, None, today, 0)\n\n\n\n\ntry:\n studies = Studies.getStudiesScraper()\n\n for s in studies:\n if s[-1] == \"Google_Selenium_SV\":\n scraper = \"Google_Selenium_SV\"\n\n studies_id = s[-3]\n\n print(studies_id)\n\n queries = Queries.getQueriesStudy(studies_id)\n\n scraping_jobs = Scrapers.getScrapingJobsByStudyQueries(studies_id)\n\n c_q = len(queries)\n c_s = scraping_jobs[0][0]\n\n if c_s < c_q:\n\n for q in queries:\n query_id = q[-2]\n\n job = 0\n check_jobs = Scrapers.getScrapingJobsBySE(query_id, scraper)\n count_jobs = check_jobs[0][0]\n if count_jobs == 0:\n job = 1\n\n if job == 1:\n generate_scraping_job(q, scraper)\n\n if not(Scrapers.getScrapingJobsByProgressSE(-1, scraper)):\n\n open_queries = Queries.getOpenQueriesStudybySE(studies_id, scraper)\n\n if open_queries:\n random.shuffle(open_queries)\n o = open_queries[0]\n\n\n\n if o:\n check_progress = Scrapers.getScrapingJobsByQueryProgressSE(o, 2, scraper)\n if not check_progress:\n print(o)\n scrape_query(o, scraper)\n exit()\n\nexcept Exception as e:\n print(\"Error\")\n print(e)\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7318295836448669, "avg_line_length": 20, "blob_id": "bba99d270e079da4050e2398760898dde1fd327f", "content_id": "10aa43bd4a8448918e0373523b2c4039bde7ce99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 399, "license_type": "permissive", "max_line_length": 70, "num_lines": 19, "path": "/apps/main/proc_indicators.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to collect seo indicators and to classify the documents\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef indicators():\n call([\"python3\", \"job_indicators.py\"])\n\ndef classifier():\n call([\"python3\", \"job_classifier.py\"])\n\n\nprocess1 = threading.Thread(target=indicators)\nprocess2 = threading.Thread(target=classifier)\n\nprocess1.start()\nprocess2.start()\n" }, { "alpha_fraction": 0.4761703908443451, "alphanum_fraction": 0.4854491651058197, "avg_line_length": 24.223403930664062, "blob_id": "f0384f256add1d052201e2e102e7355fe52e5ae0", "content_id": "6a958d4c8bd63b1aa7ad049ba6ee5202d6af45cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2371, "license_type": "permissive", "max_line_length": 137, "num_lines": 94, "path": "/apps/sources/pagespeed_dep.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nimport urllib.request\nfrom urllib.error import HTTPError\n\n\ndef get_results():\n hashes = Results.getSourcesSpeedNULL()\n return hashes\n\n\ndef pagespeed(hash, url):\n\n check = Sources.getSpeed(hash)\n speed = -1\n\n\n\n if not check[0][0]:\n print(url)\n print(hash)\n\n check_source = Results.getResultsSource(hash)\n\n #print(check_source[0][0])\n\n\n if check_source[0][0] != '-1':\n\n Results.insertSpeed(hash, '-100')\n\n try:\n urllib.request.urlretrieve(url)\n\n except:\n\n Results.insertSpeed(hash, speed)\n\n print(speed)\n\n else:\n try:\n options = Options()\n options.add_argument('--ignore-certificate-errors-spki-list')\n options.add_argument('--ignore-ssl-errors')\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--allow-insecure-localhost')\n options.add_argument('--disable-extensions')\n options.add_argument('--no-sandbox')\n options.add_argument('-headless')\n options.log.level = 'error'\n options.headless = True\n\n driver = webdriver.Firefox(options=options)\n driver.set_page_load_timeout(20)\n driver.set_script_timeout(20)\n driver.get(url)\n\n\n speed = driver.execute_script(\n \"\"\"\n var loadTime = ((window.performance.timing.domComplete- window.performance.timing.navigationStart)/1000);\n return loadTime;\n \"\"\"\n )\n driver.quit()\n\n\n\n Results.insertSpeed(hash, speed)\n\n print(speed)\n\n except Exception as e:\n driver.quit()\n Results.insertSpeed(hash, speed)\n\n #exit()\n\n else:\n Results.insertSpeed(hash, speed)\n print(speed)\n\nresults = get_results()\n\nprint(results)\n\nfor r in results:\n hash = r[0]\n url = r[1]\n pagespeed(hash, url)\n" }, { "alpha_fraction": 0.48287293314933777, "alphanum_fraction": 0.48287293314933777, "avg_line_length": 28.19354820251465, "blob_id": "81d901091c57c86809997123e4d0f52b4d232bec", "content_id": "26bfb6c44e802ebc029eab30501ccc6ca509fbdb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 905, "license_type": "permissive", "max_line_length": 178, "num_lines": 31, "path": "/config/evaluation.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n \"_comment\": \"config file for all evaluation modules; text-match: simple text match objects using csv files with keywords; crawler: crawlers to save specific urls of a website\",\n \"text-match\":{\n \"tools analytics\":{\n \"source\":\"../../evaluations/analytics.csv\"\n },\n \"tools seo\":{\n \"source\":\"../../evaluations/plugins.csv\"\n },\n \"tools caching\":{\n \"source\":\"../../evaluations/caching.csv\"\n },\n \"tools content\":{\n \"source\":\"../../evaluations/content.csv\"\n },\n \"tools social\":{\n \"source\":\"../../evaluations/social.csv\"\n },\n \"tools ads\":{\n \"source\":\"../../evaluations/ads.csv\"\n }\n },\n \"crawler\":{\n \"contact\":{\n \"config\":\"../../config/contact.ini\"\n },\n \"samples\":{\n \"config\":\"../../config/samples.ini\"\n }\n }\n}\n" }, { "alpha_fraction": 0.5311993956565857, "alphanum_fraction": 0.543247640132904, "avg_line_length": 30.95977020263672, "blob_id": "99aa4b95093ce8ceb3c7657ae759fc0deb24ee16", "content_id": "5aff4af7e6263480bbafe9f470c613941cd21707", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5561, "license_type": "permissive", "max_line_length": 193, "num_lines": 174, "path": "/apps/scraper/bing_api.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to scraper bing api\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\ndef generate_scraping_job(query, scraper):\n\n query_string = query[1]\n query_id = query[4]\n study_id = query[0]\n search_engine = scraper\n result_pages = 20\n number_multi = 50\n check_jobs = Scrapers.getScrapingJobs(query_id, study_id, search_engine)\n if not check_jobs:\n\n for r in range(result_pages):\n start = r * number_multi\n print(start)\n\n try:\n Scrapers.insertScrapingJobs(query_id, study_id, query_string, search_engine, start, date.today())\n print('Scraper Job: '+query_string+' SE:'+search_engine+' start:'+str(start)+' created')\n except:\n break;\n\n\n\ndef scrape_query(query, scraper):\n\n today = date.today()\n jobs = Scrapers.getScrapingJobsByQueryProgressSE(query, 0, scraper)\n\n\n subscription_key = \"b175056d732742038339a83743658448\"\n assert subscription_key\n\n search_url = \"https://api.bing.microsoft.com/v7.0/search\"\n\n for job in jobs:\n\n search_engine = job[3]\n search_query = job[2]\n start = job[4]\n query_id = job[0]\n study_id = job[1]\n job_id = job[7]\n\n progress = 2\n\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, progress)\n\n sleeper = random.randint(3,10)\n\n time.sleep(sleeper)\n\n\n #headers = {\"Ocp-Apim-Subscription-Key\": subscription_key, \"X-Search-ClientIP\":\"217.111.88.182\"}\n headers = {\"Ocp-Apim-Subscription-Key\": subscription_key}\n params = {\"q\": search_query, \"textDecorations\": True, \"textFormat\": \"HTML\", \"count\": 50, \"offset\": start, \"responseFilter\": \"Webpages\"}\n\n try:\n response = requests.get(search_url, headers=headers, params=params)\n response.raise_for_status()\n search_results = response.json()\n web_results = search_results['webPages']\n\n\n except:\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Error Scraping Job', 1)\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, -1)\n Results.deleteResultsNoScrapers(query_id, search_engine)\n exit()\n\n\n results = []\n\n for w in web_results['value']:\n results.append(w['url'])\n\n\n if results:\n\n results_check = results[-1]\n\n check_url = Results.getURL(query_id, study_id, results_check, search_engine)\n\n if check_url:\n Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, 1)\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Max Results', 1)\n exit()\n\n\n else:\n Scrapers.updateScrapingJob(job_id, 1)\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Start Scraping Results', 1)\n\n results_position = 1\n\n for result in results:\n\n url = result\n\n check_url = Results.getURL(query_id, study_id, url, search_engine)\n\n if (not check_url):\n\n url_meta = Results.getResultMeta(url, str(study_id), search_engine, str(query_id))\n hash = url_meta[0]\n ip = url_meta[1]\n main = url_meta[2]\n main_hash = Helpers.computeMD5hash(main+str(study_id)+search_engine+str(query_id))\n contact_url = \"0\"\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", url, 1)\n contact_hash = \"0\"\n contact_url = \"0\"\n\n\n last_position = Results.getLastPosition(query_id, study_id, search_engine, today)\n\n if last_position:\n results_position = last_position[0][0] + 1\n\n\n if Results.getPosition(query_id, study_id, search_engine, results_position):\n results_position = results_position + 1\n\n\n Results.insertResult(query_id, study_id, job_id, 0, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, datetime.now(), 1, results_position)\n\n check_sources = Results.getSource(hash)\n if not check_sources:\n Results.insertSource(hash, None, None, None, today, 0)\n\n Helpers.saveLog(\"../../logs/\"+str(study_id)+\"_\"+search_query+\".log\", 'Insert Result', 1)\n\n\n\nstudies = Studies.getStudiesScraper()\n\nfor s in studies:\n if \"Bing_API\" in s[-1]:\n scraper = \"Bing_API\"\n\n studies_id = s[-3]\n queries = Queries.getQueriesStudy(studies_id)\n\n for q in queries:\n query_id = q[-2]\n\n job = 0\n check_jobs = Scrapers.getScrapingJobsBySE(query_id, scraper)\n count_jobs = check_jobs[0][0]\n if count_jobs == 0:\n job = 1\n\n if job == 1:\n generate_scraping_job(q, scraper)\n\n open_queries = Queries.getOpenQueriesStudybySE(studies_id, scraper)\n\n if open_queries:\n random.shuffle(open_queries)\n o = open_queries[0]\n\n\n if o:\n check_progress = Scrapers.getScrapingJobsByQueryProgressSE(o, 2, scraper)\n if not check_progress:\n print(o)\n scrape_query(o, scraper)\n" }, { "alpha_fraction": 0.5396487712860107, "alphanum_fraction": 0.5646620392799377, "avg_line_length": 28.359375, "blob_id": "be1abdcbb96bef6c133e59501c19eef1257aba4c", "content_id": "3eb20ed5b6934f5c6de77f1f9d48a0a91dba35f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 3762, "license_type": "permissive", "max_line_length": 355, "num_lines": 128, "path": "/config/scraper.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n\t\"_comment\": \"configuration file for scrapers using xpath to save search results; search engine = search engine to scrape (also name of the scraper); result_range = max. number for serps to scrape; search_url: get url to perform a search; start = get parameter to identify the current serp; xpath = xpath expression to extract the organic search results\",\n\t\"scraper\": {\n\t\t\"google\": {\n\t\t\t\"search_engine\": \"Google\",\n\t\t\t\"results_range\": 1000,\n\t\t\t\"search_url\": \"https://www.google.de/search?q=\",\n\t\t\t\"start_parameter\": \"&start=\",\n\t\t\t\"start_add\": 0,\n\t\t\t\"number_parameter_100\": \"&num=100\",\n\t\t\t\"number_multi_100\": 100,\n\t\t\t\"number_div_100\": 1,\n\t\t\t\"number_parameter\": \"\",\n\t\t\t\"number_multi\": 10,\n\t\t\t\"number_div\": 1,\n\t\t\t\"xpath\": \"//div[@class='tF2Cxc']/div[@class='yuRUbf']/a/@href\",\n\t\t\t\"filter\": \"filter=0;Probiere es mit anderen Suchbegriffen.;Damit du nur die relevantesten Ergebnisse erhältst, wurden einige Einträge ausgelassen\",\n\t\t\t\"serp_filter\": \"\",\n\t\t\t\"language\": \"&lr=lang_de\",\n\t\t\t\"lang_country\": \"&cr=countryDE\"\n\t\t},\n\t\t\"google40\": {\n\t\t\t\"search_engine\": \"Google40\",\n\t\t\t\"results_range\": 40,\n\t\t\t\"search_url\": \"https://www.google.de/search?q=\",\n\t\t\t\"start_parameter\": \"&start=\",\n\t\t\t\"start_add\": 0,\n\t\t\t\"number_parameter\": \"\",\n\t\t\t\"number_multi\": 10,\n\t\t\t\"number_div\": 1,\n\t\t\t\"xpath\": \"//div[@class='tF2Cxc']/div[@class='yuRUbf']/a/@href\",\n\t\t\t\"filter\": \"filter=0;Probiere es mit anderen Suchbegriffen.;Damit du nur die relevantesten Ergebnisse erhältst, wurden einige Einträge ausgelassen\",\n\t\t\t\"serp_filter\": \"\",\n\t\t\t\"language\": \"\"\n\t\t},\n\t\t\"gesundbund\": {\n\t\t\t\"search_engine\": \"GesundBund\",\n\t\t\t\"results_range\": 70,\n\t\t\t\"search_url\": \"https://www.google.de/search?q=\",\n\t\t\t\"start_parameter\": \"&start=\",\n\t\t\t\"start_add\": 0,\n\t\t\t\"number_parameter\": \"&num=100\",\n\t\t\t\"number_multi\": 10,\n\t\t\t\"number_div\": 1,\n\t\t\t\"xpath\": \"//div[@class='tF2Cxc']/div[@class='yuRUbf']/a/@href\",\n\t\t\t\"filter\": \"filter=0\",\n\t\t\t\"serp_filter\":\"//div[@class='SzZmKb']\",\n\t\t\t\"language\": \"\"\n\t\t},\n\t\t\"Bing\": {\n\t\t\t\"search_engine\": \"Bing\",\n\t\t\t\"results_range\": 20,\n\t\t\t\"search_url\": \"http://www.bing.com/search?q=\",\n\t\t\t\"start_parameter\": \"&first=\",\n\t\t\t\"start_add\": 0,\n\t\t\t\"number_parameter\": \"\",\n\t\t\t\"number_multi\": 10,\n\t\t\t\"number_div\": 1,\n\t\t\t\"xpath\": \"//li[@class='b_algo']//h2/a/@href\",\n\t\t\t\"filter\": \"filter=0\",\n\t\t\t\"serp_filter\":\"\",\n\t\t\t\"language\": \"\"\n\t\t\t},\n\n\t\t\t\"Bing API\": {\n\t\t\t\t\"search_engine\": \"Bing_API\",\n\t\t\t\t\"results_range\": 1000,\n\t\t\t\t\"start_add\": 0,\n\t\t\t\t\"number_multi\": 50,\n\t\t\t\t\"number_div\": 20,\n\t\t\t\t\"search_url\": \"\",\n\t\t\t\t\"start_parameter\": \"\",\n\t\t\t\t\"start_add\": 0,\n\t\t\t\t\"number_parameter\": \"\",\n\t\t\t\t\"xpath\": \"\",\n\t\t\t\t\"filter\": \"\",\n\t\t\t\t\"serp_filter\":\"\",\n\t\t\t\t\"language\": \"\"\n\t\t\t\t},\n\n\t\t\t\t\"Google Selenium\": {\n\t\t\t\t\t\"search_engine\": \"Google_Selenium\",\n\t\t\t\t\t\"results_range\": 1000,\n\t\t\t\t\t\"start_add\": 0,\n\t\t\t\t\t\"number_multi\": 10,\n\t\t\t\t\t\"number_div\": 1,\n\t\t\t\t\t\"search_url\": \"\",\n\t\t\t\t\t\"start_parameter\": \"\",\n\t\t\t\t\t\"start_add\": 0,\n\t\t\t\t\t\"number_parameter\": \"\",\n\t\t\t\t\t\"xpath\": \"\",\n\t\t\t\t\t\"filter\": \"\",\n\t\t\t\t\t\"serp_filter\":\"\",\n\t\t\t\t\t\"language\": \"\"\n\t\t\t\t\t},\n\n\t\t\t\t\t\"Google Selenium_SV\": {\n\t\t\t\t\t\t\"search_engine\": \"Google_Selenium_SV\",\n\t\t\t\t\t\t\"results_range\": 1000,\n\t\t\t\t\t\t\"start_add\": 0,\n\t\t\t\t\t\t\"number_multi\": 10,\n\t\t\t\t\t\t\"number_div\": 1,\n\t\t\t\t\t\t\"search_url\": \"\",\n\t\t\t\t\t\t\"start_parameter\": \"\",\n\t\t\t\t\t\t\"start_add\": 0,\n\t\t\t\t\t\t\"number_parameter\": \"\",\n\t\t\t\t\t\t\"xpath\": \"\",\n\t\t\t\t\t\t\"filter\": \"\",\n\t\t\t\t\t\t\"serp_filter\":\"\",\n\t\t\t\t\t\t\"language\": \"\"\n\t\t\t\t\t\t},\n\n\t\t\t\"Metager\": {\n\t\t\t\t\"search_engine\": \"Metager\",\n\t\t\t\t\"results_range\": 10,\n\t\t\t\t\"search_url\": \"https://metager.de/meta/meta.ger3?submit-query=&focus=web&eingabe=\",\n\t\t\t\t\"start_parameter\": \"&s=\",\n\t\t\t\t\"start_add\": 0,\n\t\t\t\t\"number_parameter\": \"\",\n\t\t\t\t\"number_multi\": 10,\n\t\t\t\t\"number_div\": 1,\n\t\t\t\t\"xpath\": \"//h2[@class='result-title']/a/@href\",\n\t\t\t\t\"filter\": \"filter=0\",\n\t\t\t\t\"serp_filter\":\"\",\n\t\t\t\t\"language\": \"\"\n\t\t}\n}\n}\n" }, { "alpha_fraction": 0.5785828232765198, "alphanum_fraction": 0.5855087637901306, "avg_line_length": 23.376623153686523, "blob_id": "740c4fb5be58e8ae3376586ff175f967593dff3c", "content_id": "b14ebbd85d4a8e1d70eae969e7e83fbf34e14959", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1877, "license_type": "permissive", "max_line_length": 82, "num_lines": 77, "path": "/apps/indicators/keyword_density.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to calculate keyword_density\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef truncate(n, decimals=0):\n multiplier = 10 ** decimals\n return int(n * multiplier) / multiplier\n\ndef keyword_density(hash, query, soup, check_query):\n\n w_counter = 0\n kw_counter = 0\n kw_density = 0\n\n if check_query:\n\n query_split = query.split()\n q_patterns = []\n for q in query_split:\n q_patterns.append('*'+q+'*')\n\n # kill all script and style elements\n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n\n # get text\n text = soup.get_text()\n\n # break into lines and remove leading and trailing space on each\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text = ''.join(chunk for chunk in chunks if chunk)\n\n text = ' '.join(text.split())\n\n source_list = text.split(' ')\n\n w_counter = len(source_list)\n\n kw_counter = 0\n\n for q in q_patterns:\n for w in source_list:\n if Helpers.matchText(w, q):\n kw_counter = kw_counter + 1\n\n kw_density = kw_counter / w_counter * 100\n\n kw_density = truncate(kw_density, 3)\n\n kw_counter_v = str(kw_counter)\n w_counter_v = str(w_counter)\n kw_density_v = str(kw_density)\n\n module = \"check kw_count\"\n value = kw_counter_v\n\n check_evaluations_result(hash, module, value)\n\n\n module = \"check word_count\"\n value = w_counter_v\n\n check_evaluations_result(hash, module, value)\n\n\n module = \"check kw_density\"\n value = kw_density_v\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.7106227278709412, "alphanum_fraction": 0.7252747416496277, "avg_line_length": 15.058823585510254, "blob_id": "abb544f1dd5c0d8aaf35a17250f827d846880a4a", "content_id": "4f693603fd463f90e2e9daa5a0d9565d0a9096b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "permissive", "max_line_length": 51, "num_lines": 17, "path": "/apps/main/main_selenium.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to start the Google selenium scraper\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\n\ndef google_selenium():\n call([\"python3\", \"proc_google_selenium.py\"])\n\n\nprocess5 = threading.Thread(target=google_selenium)\n\n\nprocess5.start()\n" }, { "alpha_fraction": 0.6877934336662292, "alphanum_fraction": 0.7100939154624939, "avg_line_length": 20.846153259277344, "blob_id": "2f9181fd376a18ff75c635324bf96dc295f4b2f5", "content_id": "52c944b6e983fd9170a9164d17e3f603319d2247", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "permissive", "max_line_length": 51, "num_lines": 39, "path": "/apps/main/main.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to start all apps\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef indicators():\n call([\"python3\", \"proc_indicators.py\"])\n\ndef scraper():\n call([\"python3\", \"proc_scraper.py\"])\n\ndef speed():\n call([\"python3\", \"proc_speed.py\"])\n\ndef bing_api():\n call([\"python3\", \"proc_bing.py\"])\n\ndef google_selenium():\n call([\"python3\", \"proc_google_selenium.py\"])\n\n\ndef unassigned():\n call([\"python3\", \"proc_unassigned.py\"])\n\nprocess1 = threading.Thread(target=scraper)\nprocess2 = threading.Thread(target=indicators)\nprocess3 = threading.Thread(target=speed)\nprocess4 = threading.Thread(target=bing_api)\nprocess5 = threading.Thread(target=google_selenium)\nprocess6 = threading.Thread(target=unassigned)\n\nprocess1.start()\nprocess2.start()\nprocess3.start()\nprocess4.start()\nprocess5.start()\nprocess6.start()\n" }, { "alpha_fraction": 0.5602094531059265, "alphanum_fraction": 0.5641361474990845, "avg_line_length": 20.828571319580078, "blob_id": "ef073a0de3b27b5b51b298549a0194abbdd79318", "content_id": "d4030607293704206da96de138f8c19cc2a3fd68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "permissive", "max_line_length": 63, "num_lines": 35, "path": "/apps/indicators/keywords.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to count keywords\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n#open config file with possible keyword positions in a document\nwith open('../../config/kw.ini', 'r') as f:\n array = json.load(f)\n\nkw_array = array['keywords']\n\ntoday = date.today()\n\ndef kw(hash, tree, query, check_query):\n kw = array['keywords']\n\n for k, v in kw.items():\n key = k\n counter = 0\n xpath = v\n content = tree.xpath(xpath)\n\n if check_query:\n pattern = '*'+query+'*'\n for c in content:\n if (Helpers.matchText(c, pattern)):\n counter = counter + 1\n\n module = key\n value = str(counter)\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.5732032060623169, "alphanum_fraction": 0.581188976764679, "avg_line_length": 19.490909576416016, "blob_id": "f725a943dcfd4b7bc1c58aec0d5366fe042e6456", "content_id": "1d66cdf32a1f3351acaa1dc271d7c6db8853ed3c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1127, "license_type": "permissive", "max_line_length": 58, "num_lines": 55, "path": "/apps/indicators/links.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script count internal and external links\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef is_valid(url):\n try:\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)\n except:\n return False\n\ndef links(hash, result_main, html_source):\n link = \"\"\n internal_links = 0\n external_links = 0\n i = '0'\n e = '0'\n link_list = list()\n urls = html_source[0]\n urls_split = urls.split(\"[url]\")\n\n for u in urls_split:\n\n link_split = u.split(\" \")\n link = (link_split[-1])\n link_list.append(link)\n link_list.sort()\n\n for href in link_list:\n if is_valid(href):\n if result_main in href:\n internal_links = internal_links + 1\n else:\n external_links = external_links + 1\n\n i = str(internal_links)\n e = str(external_links)\n\n\n module = 'check internal links'\n value = i\n\n check_evaluations_result(hash, module, value)\n\n\n module = 'check external links'\n value = e\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6304348111152649, "alphanum_fraction": 0.6376811861991882, "avg_line_length": 18.714284896850586, "blob_id": "d824cbad540d8f325b3de5d93b834fbdb5ba5f54", "content_id": "6174b22975ba61d3b250854dfd1a569214a24b8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "permissive", "max_line_length": 62, "num_lines": 21, "path": "/apps/indicators/canonical.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check the number of canonical links\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef canonical(hash, tree):\n\n xpath = '//a[@rel=\"canonical\"] | //link[@rel=\"canonical\"]'\n module = 'check canonical'\n counter = 0\n\n res = tree.xpath(xpath)\n\n for r in res:\n counter = counter + 1\n\n value = str(counter)\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.8264585733413696, "alphanum_fraction": 0.8346339464187622, "avg_line_length": 157.2941131591797, "blob_id": "2efd1c3bc9fca9389e80297b5ed58495d6c56555", "content_id": "a7bf506bf3cb2cde65f23c38bd20a664c2c61550", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2696, "license_type": "permissive", "max_line_length": 794, "num_lines": 17, "path": "/README.md", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "### Der Effekt der Suchmaschinenoptimierung auf die Suchergebnisse von Web-Suchmaschinen: Modellentwicklung, empirische Überprüfung und Triangulation mit Nutzer/innen- und Expert/inneneinschätzungen (SEO-Effekt)\n\nThe overall goal of the project is to describe and explain the role of search engine optimization from the perspective of the participating stakeholder groups by analysing search results/search result pages for optimized content as well as quantitative and qualitative surveys of search engine users, search engine optimizers and content providers. Thus the external influence on the results of commercial search engines can be described and quantified for the first time. The project contributes to theory building in information science by extending existing information-seeking models by a component of external influence on the search results.\n\nThe project focuses on informative content; it examines how documents that play a role in answering information-oriented search queries can be influenced externally. This sets the project apart from pure questions of online marketing, where the focus is on the optimization itself and not on the consequences for the compilation of result sets.\n\nTo measure the effect of search engine optimization, a software will be developed that can automatically query search engines and analyze the returned results. The results of the search result analyses carried out using it are combined with findings from the survey and further investigations of search engine users, search engine optimisers and content providers in order to obtain a comprehensive picture of the influence of search engine optimisation. Methodologically, the project is characterized by a triangulation of methods of data analysis from computer science and social science methods. The interdisciplinary basis of the analysis is unique and will significantly advance the understanding of search engines in general and the influence search engine optimization has in particular.\n\nWith search engine optimization, the project addresses a highly relevant topic for information seeking in society, which to a considerable extent takes place via commercial search engines. The expected empirical and theoretical results contribute to a better understanding of information seeking in the context of modern web infrastructures. At the level of transfer into practice, the results will be relevant to issues like consumer protection.\n\nFunding period: 05/2019 bis 07/2021\n\nFunded by: German Research Foundation (DFG – Deutsche Forschungsgemeinschaft), grant number 417552432.\n\nContacts: [SearchStudies](https://searchstudies.org)\n\nResearch data: [OSF](https://osf.io/jyv9r/)\n" }, { "alpha_fraction": 0.6235646605491638, "alphanum_fraction": 0.6255616545677185, "avg_line_length": 26.067567825317383, "blob_id": "671a9dc8fbadf7ce30337aa063a0c1684932a131", "content_id": "5c32e6a8087e09f846b99cae438c6bef5cfd2776", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2003, "license_type": "permissive", "max_line_length": 114, "num_lines": 74, "path": "/apps/classifier/classifier.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "# Decision Tree Classification\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n#load classifiers from config file\n\ntry:\n\n with open('../../config/classifier.ini', 'r') as f:\n array = json.load(f)\n\n classifier = array['classifier']\n\n\n hashes_check = []\n\n evaluations_modules = Evaluations.getEvaluationModules()\n\n number_of_indicators = len(evaluations_modules)\n\n hashes = Evaluations.getResultstoClassify(number_of_indicators)\n\n if(Evaluations.getUnassigned()):\n hashes_check = Evaluations.getResultstoClassifyCheck()\n\n\n #classify results using all available classifiers\n for c in classifier:\n classifier_id = c\n classification_result = \"unassigned\"\n Evaluations.deleteDupClassifiedData()\n\n\n if(hashes):\n for h in hashes:\n hash = h[0]\n if (not Evaluations.getClassificationResultValue(hash, classifier_id, classification_result)):\n Evaluations.insertClassificationResult(hash, classification_result, classifier_id, today)\n #print(hash)\n #print(classification_result)\n\n\n if(hashes_check):\n for hc in hashes_check:\n hash = hc[0]\n classes = hc[1]\n if classifier_id not in classes:\n if (not Evaluations.getClassificationResultValue(hash, classifier_id, classification_result)):\n Evaluations.insertClassificationResult(hash, classification_result, classifier_id, today)\n #print(hash)\n #print(classification_result)\n\n\n\n\n hashes_to_classify = Evaluations.getResultstoUpdateClassification(classifier_id, classification_result)\n\n\n\n if(hashes_to_classify):\n class_module = __import__(classifier_id)\n class_module.classify(classifier_id, hashes_to_classify)\n\n\nexcept Exception as e:\n print(e)\n print('error')\n\nelse:\n exit()\n" }, { "alpha_fraction": 0.6834862232208252, "alphanum_fraction": 0.6949541568756104, "avg_line_length": 18.81818199157715, "blob_id": "c21b515d6f8772620b782f5cea27e1ff3ae710f9", "content_id": "b90df4fa6795249746d83c3d73d1470b21f0e6f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "permissive", "max_line_length": 47, "num_lines": 22, "path": "/apps/import/insert_queries.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "# Import Search Queries\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n#select csv file with search queries\nqueries_file = \"search_queries.csv\"\n\n#select study\nstudy_id = 10\n\n#open csv file with queries\nwith open(queries_file, 'r') as csvfile:\n queries = csv.reader(csvfile)\n\n #insert queries to database\n for query in queries:\n print(query[0])\n Queries.insertQuery(study_id, query[0])\n" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.7291666865348816, "avg_line_length": 15, "blob_id": "0a1b3337ee0b3cca8b58a7a309f09ae88d9fd65d", "content_id": "239b54b563ff0e1625927b93024dd8344cadac98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "permissive", "max_line_length": 24, "num_lines": 6, "path": "/apps/scraper/reset_sources.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nSources.resetSources()\n" }, { "alpha_fraction": 0.7084870934486389, "alphanum_fraction": 0.7269372940063477, "avg_line_length": 22.565217971801758, "blob_id": "2b30e3755703c2b9fa6d33b45d90c10364aefb81", "content_id": "91a3ce6d5fa4a083b3bcc89b358f020cfafeea34", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "permissive", "max_line_length": 54, "num_lines": 23, "path": "/apps/main/proc_google_selenium.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to scrape Google selenium\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef google_selenium():\n call([\"python3\", \"job_google_selenium.py\"])\n\ndef google_selenium_sv():\n call([\"python3\", \"job_google_selenium_sv.py\"])\n\ndef reset_scraper():\n call([\"python3\", \"job_reset_scraper.py\"])\n\nprocess1 = threading.Thread(target=google_selenium)\nprocess2 = threading.Thread(target=reset_scraper)\nprocess3 = threading.Thread(target=google_selenium_sv)\n\nprocess1.start()\nprocess2.start()\nprocess3.start()\n" }, { "alpha_fraction": 0.6296851634979248, "alphanum_fraction": 0.638680636882782, "avg_line_length": 24.653846740722656, "blob_id": "062f9b0caa7a645b0040e959f447fdbf21d8a1a0", "content_id": "4a53faa0ac457e6ac3ab9e064eee18717b8d8a22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 667, "license_type": "permissive", "max_line_length": 85, "num_lines": 26, "path": "/apps/indicators/title.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check title tag in html\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef title(hash, tree):\n xpath_title = \"//title/text()\"\n xpath_meta_title = \"//meta[@name='title']/@content\"\n xpath_og_title = \"//meta[@property='og:title']/@content\"\n module = 'check title'\n\n value = '0'\n\n check_title = str(tree.xpath(xpath_title))\n check_meta_title = str(tree.xpath(xpath_meta_title))\n check_og_title = str(tree.xpath(xpath_og_title))\n\n if len(check_title) > 2 or len(check_meta_title) > 2 or len(check_og_title) > 2:\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6039419770240784, "alphanum_fraction": 0.6136109828948975, "avg_line_length": 27.606382369995117, "blob_id": "f675d4102903b37f13c767fb8e2a2c78195c7bba", "content_id": "96d3f0d9334eb20ab6771d6319a416f6b858135c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2689, "license_type": "permissive", "max_line_length": 121, "num_lines": 94, "path": "/apps/sources/pagespeed.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nimport urllib.request\nfrom urllib.error import HTTPError\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nimport time\n\n\n\ndef get_results():\n hashes = Results.getSourcesSpeedNULL()\n return hashes\n\n\ndef pagespeed(hash, url):\n\n check = Sources.getSpeed(hash)\n speed = -1\n\n if not check[0][0]:\n print(url)\n print(hash)\n\n check_source = Results.getResultsSource(hash)\n\n #print(check_source[0][0])\n\n\n if check_source[0][0] != '-1':\n\n Results.insertSpeed(hash, '-100')\n\n os.environ['MOZ_HEADLESS'] = '0'\n options = Options()\n #options.add_argument('--ignore-certificate-errors-spki-list')\n #options.add_argument('--ignore-ssl-errors')\n #options.add_argument('--ignore-certificate-errors')\n #options.add_argument('--allow-insecure-localhost')\n\n options.log.level = 'error'\n\n profile = webdriver.FirefoxProfile()\n\n\n\n profile.add_extension(extension='/home/sebastian/alpha/extensions/i_dont_care_about_cookies-3.2.7-an+fx.xpi')\n\n driver = webdriver.Firefox(firefox_profile=profile, options=options)\n\n driver.set_page_load_timeout(60)\n\n\n try:\n driver.get(url)\n time.sleep(10)\n ''' Use Navigation Timing API to calculate the timings that matter the most '''\n\n navigationStart = driver.execute_script(\"return window.performance.timing.navigationStart\")\n responseStart = driver.execute_script(\"return window.performance.timing.responseStart\")\n domComplete = driver.execute_script(\"return window.performance.timing.domComplete\")\n loadStart = driver.execute_script(\"return window.performance.timing.domInteractive\")\n EventEnd = driver.execute_script(\"return window.performance.timing.loadEventEnd\")\n\n\n ''' Calculate the performance'''\n backendPerformance_calc = responseStart - navigationStart\n frontendPerformance_calc = domComplete - responseStart\n loadingTime = EventEnd - navigationStart\n speed = loadingTime / 1000\n\n print(speed)\n driver.quit()\n Results.insertSpeed(hash, speed)\n\n except:\n print(speed)\n Results.insertSpeed(hash, speed)\n driver.quit()\n\n\n\n\nresults = get_results()\n\nfor r in results:\n hash = r[0]\n url = r[1]\n pagespeed(hash, url)\n" }, { "alpha_fraction": 0.5524813532829285, "alphanum_fraction": 0.5548318028450012, "avg_line_length": 28.96177101135254, "blob_id": "3eb83eb9924c4fb68565f7daa980c9d9e668f187", "content_id": "13485782e631e447aa5ff644f2d9cbbf23fe6326", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14891, "license_type": "permissive", "max_line_length": 189, "num_lines": 497, "path": "/libs/scrapers.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport os.path\nimport json\nfrom datetime import date\nimport random\nimport time\nimport csv\n\n#scraping libs\nfrom os.path import isfile, join\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nfrom lxml import html\n\n#from fake_useragent import UserAgent\n\n#tool libs\nsys.path.insert(0, '..')\nfrom db.connect import DB\n\nsys.path.insert(0, '..')\nfrom db.scrapers import Scrapers as DB_Scrapers\n\nfrom libs.helpers import Helpers\n\nclass Scrapers:\n def __init__(self, search_engine, results_range, search_url, start_parameter, start_add, number_parameter, number_multi, number_div, xpath, filter, serp_filter, language):\n self.__search_engine = search_engine\n self.__results_range = results_range\n self.__search_url = search_url\n self.__start_parameter = start_parameter\n self.__start_add = start_add\n self.__number_parameter = number_parameter\n self.__number_multi = number_multi\n self.__number_div = number_div\n self.__xpath = xpath\n self.__filter = filter\n self.__serp_filter = serp_filter\n self.__language = language\n\n def __getResultsRange(self):\n return self.__results_range\n\n results_range = property(__getResultsRange)\n\n def __getSearchEngine(self):\n return self.__search_engine\n\n search_engine = property(__getSearchEngine)\n\n def __getSearchURL(self):\n return self.__search_url\n\n search_url = property(__getSearchURL)\n\n\n def __getXpath(self):\n return self.__xpath\n\n xpath = property(__getXpath)\n\n def __getStartParameter(self):\n return self.__start_parameter\n\n start = property(__getStartParameter)\n\n def __getStartAdd(self):\n return self.__start_add\n\n start_add = property(__getStartAdd)\n\n def __getNumberParameter(self):\n return self.__number_parameter\n\n number_parameter = property(__getNumberParameter)\n\n def __getNumberMulti(self):\n return self.__number_multi\n\n number_multi = property(__getNumberMulti)\n\n def __getNumberDiv(self):\n return self.__number_div\n\n number_div = property(__getNumberDiv)\n\n def __getFilterParameter(self):\n return self.__filter\n\n filter = property(__getFilterParameter)\n\n def __getSERPFilterParameter(self):\n return self.__serp_filter\n\n serp_filter = property(__getSERPFilterParameter)\n\n def __getLanguage(self):\n return self.__language\n\n language = property(__getLanguage)\n\n def genProxies():\n\n proxy_url = \"https://hidemy.name/de/proxy-list/?type=s#list\"\n\n os.environ['MOZ_HEADLESS'] = '0'\n\n profile = webdriver.FirefoxProfile()\n\n profile.set_preference(\"browser.safebrowsing.blockedURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.downloads.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.forbiddenURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.malware.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.phishing.enabled\", True)\n profile.set_preference(\"dom.webnotifications.enabled\", False);\n\n\n driver = webdriver.Firefox(firefox_profile=profile)\n\n driver.set_page_load_timeout(60)\n\n driver.get(proxy_url)\n\n source = driver.page_source\n\n driver.quit()\n\n tree = html.fromstring(source)\n\n ips = \"//div[@class='table_block']//tr//td[1]/text()\"\n ports = \"//div[@class='table_block']//tr//td[2]/text()\"\n\n\n ip = tree.xpath(ips)\n\n port = tree.xpath(ports)\n\n y = 0\n\n proxies = []\n\n for i in ip:\n proxy = i+\":\"+port[y]\n y = y + 1\n proxies.append(proxy)\n\n\n proxies = proxies[1:]\n\n file = \"proxies.csv\"\n\n with open(file,'w+') as f:\n f.close()\n\n\n with open(file,'a+') as f:\n for p in proxies:\n f.write(p+'\\n')\n f.close()\n\n return proxies\n\n\n def scrapeQuery(query, search_xpath, start, filter):\n\n def extractSearchResults(source, xpath):\n tree = html.fromstring(source)\n urls = tree.xpath(xpath)\n return urls\n\n\n def useProxy(query, search_xpath, start, filter):\n\n\n\n\n proxy_file = \"proxies.csv\"\n\n proxies = []\n\n with open(proxy_file, newline='') as inputfile:\n for row in csv.reader(inputfile):\n proxies.append(row[0])\n\n\n for p in proxies:\n\n print(p)\n\n try:\n\n today = date.today()\n string_today = str(today)\n results = []\n\n\n firefox_capabilities = webdriver.DesiredCapabilities.FIREFOX\n firefox_capabilities['marionette'] = True\n\n PROXY = p\n\n firefox_capabilities['proxy'] = {\n \"proxyType\": \"MANUAL\",\n \"httpProxy\": PROXY,\n \"ftpProxy\": PROXY,\n \"sslProxy\": PROXY\n }\n\n os.environ['MOZ_HEADLESS'] = '0'\n\n options = Options()\n\n '''\n ua = UserAgent()\n userAgent = ua.random\n print(userAgent)\n options.add_argument(f'user-agent={userAgent}')\n '''\n\n options.add_argument(\"user-data-dir=selenium\")\n options.log.level = 'error'\n\n profile = webdriver.FirefoxProfile()\n\n profile.set_preference(\"browser.safebrowsing.blockedURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.downloads.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.forbiddenURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.malware.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.phishing.enabled\", True)\n profile.set_preference(\"dom.webnotifications.enabled\", False);\n\n #profile.add_extension(extension='/home/sebastian/alpha/extensions/i_dont_care_about_cookies-3.2.7-an+fx.xpi')\n\n driver = webdriver.Firefox(firefox_profile=profile, options=options, capabilities=firefox_capabilities)\n\n driver.set_page_load_timeout(60)\n\n sleeper = random.randint(3,10)\n\n time.sleep(sleeper)\n\n print(query)\n\n driver.get(query)\n\n source = driver.page_source\n\n source = Helpers.changeCoding(source)\n\n print(source)\n\n xpath = search_xpath\n\n urls = extractSearchResults(source, xpath)\n\n print(urls)\n\n driver.quit()\n\n if str(source).find(str(\"g-recaptcha-response\")) > 0:\n print(\"CAPTCHA\")\n pass\n\n else:\n\n xpath = search_xpath\n\n urls = extractSearchResults(source, xpath)\n\n i = start\n\n if urls:\n for url in urls:\n i = i + 1\n results.append(url)\n\n search_results = list(dict.fromkeys(results))\n\n res = []\n\n if search_results:\n res = [search_results, source]\n return res\n else:\n if str(source).find(str(filter)) > 0:\n res = [\"filtered\", source]\n return res\n else:\n print(source)\n return False\n\n\n except:\n driver.quit()\n pass\n\n\n today = date.today()\n string_today = str(today)\n results = []\n\n check_filter = filter.split(';')\n\n os.environ['MOZ_HEADLESS'] = '0'\n\n\n options = Options()\n\n '''\n ua = UserAgent()\n userAgent = ua.random\n print(userAgent)\n options.add_argument(f'user-agent={userAgent}')\n '''\n\n options.add_argument(\"user-data-dir=selenium\")\n options.log.level = 'error'\n\n profile = webdriver.FirefoxProfile()\n\n profile.set_preference(\"browser.safebrowsing.blockedURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.downloads.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.forbiddenURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.malware.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.phishing.enabled\", True)\n profile.set_preference(\"dom.webnotifications.enabled\", False);\n\n #profile.add_extension(extension='/home/sebastian/alpha/extensions/i_dont_care_about_cookies-3.2.7-an+fx.xpi')\n\n driver = webdriver.Firefox(firefox_profile=profile, options=options)\n\n driver.set_page_load_timeout(60)\n\n sleeper = random.randint(3,10)\n\n time.sleep(sleeper)\n\n driver.get(query)\n\n source = driver.page_source\n\n source = Helpers.changeCoding(source)\n\n xpath = search_xpath\n\n urls = extractSearchResults(source, xpath)\n\n driver.quit()\n\n i = start\n\n if urls:\n for url in urls:\n i = i + 1\n results.append(url)\n\n search_results = list(dict.fromkeys(results))\n\n res = []\n\n\n if search_results:\n res = [search_results, source]\n return res\n else:\n if str(source).find(str(\"g-recaptcha-response\")) > 0:\n print(\"CAPTCHA\")\n print(source)\n return False\n else:\n res = [\"filtered\", source]\n return res\n '''\n for c in check_filter:\n if str(source).find(str(c)) > 0:\n res = [\"filtered\", source]\n return res\n '''\n #useProxy(query, search_xpath, start, filter)\n\n\n\n\n\n\n\n\n\n def generateScrapers():\n #noch dynamischer generieren einfach nach anzahl der scraper, nicht mit google_config und bing_config\n with open('../../config/scraper.ini', 'r') as f:\n array = json.load(f)\n\n scrapers_json = array['scraper']\n\n scrapers = []\n\n for scraper in scrapers_json:\n config = scrapers_json[scraper]\n search_engine = config['search_engine']\n results_range = config['results_range']\n search_url = config['search_url']\n start_parameter = config['start_parameter']\n start_add = config['start_add']\n number_parameter = config['number_parameter']\n number_multi = config['number_multi']\n number_div = config['number_div']\n xpath = config['xpath']\n filter = config['filter']\n serp_filter = config['serp_filter']\n language = config['language']\n scrapers.append(Scrapers(search_engine, results_range, search_url, start_parameter, start_add, number_parameter, number_multi, number_div, xpath, filter, serp_filter, language))\n\n return scrapers\n\n def getScrapingJobsByProgress(progress):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsByProgress(db.cursor, progress)\n db.DBDisconnect()\n return rows\n\n def getScrapingJobsByProgressSE(progress, se):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsByProgressSE(db.cursor, progress, se)\n db.DBDisconnect()\n return rows\n\n def insertScrapingJobs(query_id, study_id, query_string, search_engine, start, today):\n db = DB()\n rows = DB_Scrapers.insertScrapingJobs(db.cursor, query_id, study_id, query_string, search_engine, start, today)\n db.DBDisconnect()\n\n def getScrapingJobsByQueryProgress(query_id, progress):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsByQueryProgress(db.cursor, query_id, progress)\n db.DBDisconnect()\n return rows\n\n def getScrapingJobsByQueryProgressSE(query_id, progress, se):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsByQueryProgressSE(db.cursor, query_id, progress, se)\n db.DBDisconnect()\n return rows\n\n def getScrapingJobsByQuery(query):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsByQuery(db.cursor, query)\n db.DBDisconnect()\n return rows\n\n def getScrapingJobsBySE(query_id, search_engine):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsBySE(db.cursor, query_id, search_engine)\n db.DBDisconnect()\n return rows\n\n\n def updateScrapingJobQuery(query_id, progress):\n db = DB()\n DB_Scrapers.updateScrapingJobQuery(db.cursor, query_id, progress)\n db.DBDisconnect()\n\n def updateScrapingJobQuerySeJobId(query_id, progress, se, job_id):\n db = DB()\n DB_Scrapers.updateScrapingJobQuerySeJobId(db.cursor, query_id, progress, se, job_id)\n db.DBDisconnect()\n\n def updateScrapingJobQuerySearchEngine(query_id, search_engine, progress):\n db = DB()\n DB_Scrapers.updateScrapingJobQuerySearchEngine(db.cursor, query_id, search_engine, progress)\n db.DBDisconnect()\n\n def updateScrapingJob(job_id, progress):\n db = DB()\n DB_Scrapers.updateScrapingJob(db.cursor, job_id, progress)\n db.DBDisconnect()\n\n def resetScrapingJobs():\n db = DB()\n DB_Scrapers.resetScrapingJobs(db.cursor)\n db.DBDisconnect()\n\n def getScrapingJobs(query_id, study_id, search_engine):\n db = DB()\n DB_Scrapers.getScrapingJobs(db.cursor, query_id, study_id, search_engine)\n db.DBDisconnect()\n\n def getScrapingJobsByStudyQueries(study):\n db = DB()\n rows = DB_Scrapers.getScrapingJobsByStudyQueries(db.cursor, study)\n db.DBDisconnect()\n return rows\n" }, { "alpha_fraction": 0.6442428231239319, "alphanum_fraction": 0.6463547945022583, "avg_line_length": 43.235294342041016, "blob_id": "9480a6915ea0f29e83e2b2f9a000081039b7e284", "content_id": "a10d59a393a9c8db77654f0622b6af1b98228f00", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12784, "license_type": "permissive", "max_line_length": 455, "num_lines": 289, "path": "/db/results.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#Class for results table\n\nclass Results:\n\n def __init__(self, cursor):\n self.cursor = cursor\n\n def getURL(cursor, query_id, study_id, results_url, results_se):\n sql= \"select results_id from results where results_queries_id = %s AND results_studies_id= %s AND results_url = %s AND results_se = %s LIMIT 1\"\n data = (query_id, study_id, results_url, results_se)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getLastPosition(cursor, query_id, study_id, results_se, today):\n sql= \"select results_position from results where results_queries_id = %s AND results_studies_id\t= %s AND results_se = %s AND results_date = %s ORDER BY results_id DESC LIMIT 1\"\n data = (query_id, study_id, results_se, today)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n\n def getPosition(cursor, query_id, study_id, search_engine, results_position):\n sql= \"select results_position from results where results_queries_id = %s AND results_studies_id\t= %s AND results_se = %s AND results_position = %s ORDER BY results_id DESC LIMIT 1\"\n data = (query_id, study_id, search_engine, results_position)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n\n\n def insertResult(cursor, query_id, study_id, job_id, upload, ip, hash, main_hash, contact_hash, search_engine, url, main, contact, today, timestamp, progress, results_position):\n cursor.execute(\n \"INSERT INTO results (results_queries_id, results_studies_id, results_scrapers_id, results_import, results_ip, results_hash, results_main_hash, results_contact_hash, results_se, results_url, results_main, results_contact, results_date, results_timestamp, results_progress, results_position) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT DO NOTHING;\", # remove parenthesis here, which ends the execute call\n (query_id, study_id, job_id, upload, ip, hash, main_hash, contact_hash, search_engine, url, main, contact, today, timestamp, progress, results_position,)\n )\n\n def insertSource(cursor, hash, source, urls, comments, date, progress):\n cursor.execute(\n \"INSERT INTO sources (sources_hash, sources_source, sources_urls, sources_comments, sources_date, sources_progress) VALUES (%s, %s, %s, %s, %s, %s);\",\n (hash, source, urls, comments, date, progress,)\n )\n\n\n\n def updateSources(cursor, hash, source, urls, comments, date, progress):\n try:\n cursor.execute(\n \"UPDATE sources SET sources_source = %s, sources_urls = %s, sources_comments = %s, sources_date = %s, sources_progress = %s WHERE sources_hash = %s\",\n (source, urls, comments, date, progress, hash,)\n )\n except:\n print(\"exit_error\")\n cursor.execute(\n \"UPDATE sources SET sources_source = %s, sources_urls = %s, sources_comments = %s, sources_date = %s, sources_progress = %s WHERE sources_hash = %s\",\n (\"-1\", \"-1\", \"-1\", date, progress, hash,)\n )\n\n def insertSpeed(cursor, hash, speed):\n cursor.execute(\n \"UPDATE sources SET sources_speed = %s WHERE sources_hash = %s\",\n (speed, hash,)\n )\n\n def getSpeed(cursor, hash):\n sql= \"SELECT sources_speed FROM sources WHERE sources_hash = %s LIMIT 1\"\n data = (hash,)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getSource(cursor, hash):\n sql= \"SELECT sources_id from sources WHERE sources_hash=%s LIMIT 1\"\n data = (hash,)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getRecentSource(cursor, hash):\n #sql = \"SELECT sources_id, sources_date from sources WHERE sources_hash=%s AND sources_date < NOW() - INTERVAL %s day LIMIT 1\"\n sql = \"SELECT sources_id, sources_date from sources WHERE sources_hash=%s AND sources_source IS NOT NULL LIMIT 1\"\n\n #data = (hash, days)\n\n data = (hash,)\n\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n\n\n def getResultsSource(cursor, hash):\n sql= \"SELECT sources_source, sources_urls, sources_comments, sources_date from sources WHERE sources_hash=%s AND sources_source !='0'\"\n data = (hash)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n\n def getAllResultsIdsByStudy(cursor, results_studies_id):\n sql= \"SELECT * from results WHERE results_studies_id=%s\"\n data = (results_studies_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def countResultsbyStudy(cursor, studies_id):\n sql = \"SELECT COUNT(results_id) FROM results, sources WHERE results_studies_id=%s AND results_hash = sources_hash\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def countResultsbyStudySE(cursor, studies_id, se):\n sql = \"SELECT COUNT(results_id) FROM results, sources WHERE results_studies_id=%s AND results_hash = sources_hash AND results_se =%s\"\n data = (studies_id, se)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n\n def countClassifiedResultsbyStudy(cursor, studies_id):\n sql = \"SELECT COUNT(DISTINCT classifications_id) FROM classifications, results WHERE classifications_hash = results_hash AND results_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def countClassifiedResultsbyStudySE(cursor, studies_id, se):\n sql = \"SELECT COUNT(DISTINCT classifications_id) FROM classifications, results WHERE classifications_hash = results_hash AND results_studies_id = %s AND results_se =%s\"\n data = (studies_id, se)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def countFailedResultsbyStudy(cursor, studies_id):\n sql = \"SELECT COUNT(DISTINCT results_id) FROM sources, results WHERE sources_hash = results_hash AND results_studies_id = %s AND sources_source = '-1'\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def countResultsQuery(cursor, results_queries_id):\n sql = \"SELECT COUNT(results_id) FROM results WHERE results_queries_id = %s\"\n data = (results_queries_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def countClassifiedResultsbyQuery(cursor, results_queries_id):\n sql = \"SELECT COUNT(DISTINCT classifications_id) FROM classifications, results WHERE classifications_hash = results_hash AND results_queries_id = %s\"\n data = (results_queries_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n\n def getResultsIdsByStudyContact(cursor, results_studies_id, results_contact):\n sql= \"SELECT * from results WHERE results_studies_id=%s AND results_contact=%s LIMIT 500\"\n data = (results_studies_id, results_contact)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getResultById(cursor, results_id):\n sql= \"SELECT results_id, results_hash from results WHERE results_id=%s\"\n data = (results_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getResultByHash(cursor, hash):\n sql= \"SELECT results_main, results_main_hash from results WHERE results_hash=%s\"\n cursor.execute(sql,hash)\n rows = cursor.fetchall()\n return rows\n\n def getRecentResultByHash(cursor, hash):\n sql= \"SELECT * from results WHERE results_hash=%s ORDER BY results_date DESC LIMIT 1\"\n data = (hash)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n\n\n def insertEvaluationResult(cursor, evaluations_results_id, evaluations_module, evaluations_result):\n cursor.execute(\"INSERT INTO evaluations VALUES(%s,%s,%s) ON CONFLICT DO NOTHING;\", (evaluations_results_id, evaluations_module, evaluations_result,))\n\n def getResultsSourcesNULL(cursor):\n sql= \"SELECT results_hash, results_main_hash, results_url, results_main, results_id FROM results TABLESAMPLE SYSTEM_ROWS(50000) LEFT JOIN sources ON results_hash = sources_hash WHERE sources_source is NULL\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n\n\n def insertContactResult(cursor, contact_url, contact_hash, results_id):\n cursor.execute(\n \"update results SET results_contact= %s, results_contact_hash = %s where results_id = %s\",\n (contact_url, contact_hash, results_id)\n )\n\n\n def updateContactProgress(cursor, results_contact, results_id):\n cursor.execute(\n \"update results SET results_contact= %s where results_id = %s\",\n (results_contact, results_id)\n )\n\n\n\n def getResults(cursor):\n sql= \"select results_id, results_position, results_queries_id, results_url, results_main, results_hash from results\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n\n\n def getSourcesSpeedNULL(cursor):\n sql = \"select distinct ON(sources_hash) sources_hash, results_url from sources, results TABLESAMPLE SYSTEM_ROWS(2000) where results_hash = sources_hash and sources_source IS NOT NULL and sources_source !='0' and sources_speed IS NULL\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n\n def getResultHashesOnMain(cursor, main_hash):\n sql = \"select distinct on(results_hash) results_hash from sources, results where results_hash = sources_hash and results_main_hash = %s and sources_source IS NOT NULL and sources_source !='0'\"\n cursor.execute(sql, (main_hash,))\n rows = cursor.fetchall()\n return rows\n\n def getSERP(cursor, query_id):\n sql= \"SELECT serps_queries_id from serps WHERE serps_queries_id=%s\"\n cursor.execute(sql,(query_id,))\n rows = cursor.fetchall()\n return rows\n\n def insertSERP(cursor, query_id, serp, serp_scraper, today):\n cursor.execute(\"INSERT INTO serps (serps_queries_id, serps_result, serps_scrapers_result, serps_date) VALUES (%s, %s, %s, %s) ON CONFLICT DO NOTHING;\", (query_id, serp, serp_scraper, today,))\n\n def deleteResults(cursor, queries_id, results_se):\n\n sql= \"DELETE FROM sources USING results WHERE results_hash = sources_hash AND results_queries_id = %s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE FROM classifications USING results WHERE results_hash = classifications_hash AND results_queries_id = %s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE FROM evaluations USING results WHERE results_hash = evaluations_results_hash AND results_queries_id = %s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE FROM serps WHERE serps_queries_id = %s AND serps_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE from results WHERE results_queries_id=%s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql=\"DELETE FROM scrapers WHERE scrapers_queries_id\t=%s AND scrapers_se=%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n def deleteResultsNoScrapers(cursor, queries_id, results_se):\n sql= \"DELETE FROM sources USING results WHERE results_hash = sources_hash AND results_queries_id = %s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE FROM classifications USING results WHERE results_hash = classifications_hash AND results_queries_id = %s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE FROM evaluations USING results WHERE results_hash = evaluations_results_hash AND results_queries_id = %s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE FROM serps WHERE serps_queries_id = %s AND serps_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n\n sql= \"DELETE from results WHERE results_queries_id=%s AND results_se =%s\"\n data = (queries_id, results_se)\n cursor.execute(sql,(data))\n" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.6977777481079102, "avg_line_length": 16.30769157409668, "blob_id": "c782b622ea2a8715e0b142264559a2b2ec507675", "content_id": "3105f2931c6de153d7c8d271888810377ea20eac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "permissive", "max_line_length": 44, "num_lines": 13, "path": "/apps/main/proc_bing.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to scrape bing\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef bing_api():\n call([\"python3\", \"job_bing_api.py\"])\n\nprocess1 = threading.Thread(target=bing_api)\n\nprocess1.start()\n" }, { "alpha_fraction": 0.6238532066345215, "alphanum_fraction": 0.6345565915107727, "avg_line_length": 23.22222137451172, "blob_id": "0d756436e592298b2eb2a091ac9b6b01c5314304", "content_id": "ecc060666b8f212d0b1883cde51059bd57f1d9b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "permissive", "max_line_length": 79, "num_lines": 27, "path": "/apps/main/job_speed.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#background scheduler to measure the loading speed of a webpage\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\njob_defaults = {\n 'coalesce': False,\n 'max_instances': 2\n}\n\ndef job():\n os.chdir('../sources/')\n os.system('python3 pagespeed.py')\n\nif __name__ == '__main__':\n scheduler = BackgroundScheduler(job_defaults=job_defaults)\n scheduler.add_job(job, 'interval', seconds=60)\n scheduler.start()\n print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))\n\n try:\n while True:\n time.sleep(2)\n except (KeyboardInterrupt, SystemExit):\n scheduler.shutdown()\n" }, { "alpha_fraction": 0.631472110748291, "alphanum_fraction": 0.6321489214897156, "avg_line_length": 26.36111068725586, "blob_id": "3d652f403ba927f48327ebe75bd8efcbee2b49e9", "content_id": "01620d833361b8a22ea84b96aa19b7bf9c1fde71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2955, "license_type": "permissive", "max_line_length": 76, "num_lines": 108, "path": "/libs/queries.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport os.path\n\n#tool libs\nsys.path.insert(0, '..')\nfrom db.connect import DB\n\nsys.path.insert(0, '..')\nfrom db.queries import Queries as DB_Queries\n\nfrom libs.helpers import Helpers\n\n# class for queries functions; mainly to read and write database content\nclass Queries:\n def __init__(self):\n self.data = []\n\n#read from db\n\n def getQueriesStudy(study_id):\n db = DB()\n rows = DB_Queries.getQueriesStudy(db.cursor, study_id)\n db.DBDisconnect()\n return rows\n\n def getQueriesIdStudy(study_id):\n db = DB()\n rows = DB_Queries.getQueriesIdStudy(db.cursor, study_id)\n db.DBDisconnect()\n return rows\n\n def countQueriesStudy(studies_id):\n db = DB()\n rows = DB_Queries.countQueriesStudy(db.cursor, studies_id)\n db.DBDisconnect()\n return rows\n\n#function to read all queries of a study\n def getQueriesNoScrapers(study_id):\n db = DB()\n rows = DB_Queries.getQueriesNoScrapers(db.cursor, study_id)\n db.DBDisconnect()\n return rows\n\n#function to read all unprocessed queries\n def getOpenQueriesStudy(study_id):\n db = DB()\n rows = DB_Queries.getOpenQueriesStudy(db.cursor, study_id)\n db.DBDisconnect()\n return rows\n\n def getOpenQueriesStudybySE(study_id, se):\n db = DB()\n rows = DB_Queries.getOpenQueriesStudybySE(db.cursor, study_id, se)\n db.DBDisconnect()\n return rows\n\n def getOpenErrrorQueriesStudy(study_id):\n db = DB()\n rows = DB_Queries.getOpenErrrorQueriesStudy(db.cursor, study_id)\n db.DBDisconnect()\n return rows\n\n#open one specific query\n def getQuery(study_id, query):\n db = DB()\n rows = DB_Queries.getQuery(db.cursor, study_id, query)\n db.DBDisconnect()\n return rows\n\n def getQuerybyID(query_id):\n db = DB()\n rows = DB_Queries.getQuerybyID(db.cursor, query_id)\n db.DBDisconnect()\n return rows\n\n#open query of a result\n def getQuerybyResult(results_id):\n db = DB()\n rows = DB_Queries.getQuerybyResult(db.cursor, results_id)\n db.DBDisconnect()\n return rows\n\n def deleteQuery(studies_id, query):\n db = DB()\n rows = DB_Queries.deleteQuery(db.cursor, studies_id, query)\n db.DBDisconnect()\n\n\n def deleteQuerybyId(studies_id, queries_id):\n db = DB()\n rows = DB_Queries.deleteQuerybyId(db.cursor, studies_id, queries_id)\n db.DBDisconnect()\n\n#write to db\n\n#function to write query to db\n def insertQuery(study_id, query, date):\n db = DB()\n DB_Queries.insertQuery(db.cursor, study_id, query, date)\n db.DBDisconnect()\n\n#function to write query to db with aditional information\n def insertQueryVal(study_id, query, comment, date):\n db = DB()\n DB_Queries.insertQueryVal(db.cursor, study_id, query, comment, date)\n db.DBDisconnect()\n" }, { "alpha_fraction": 0.6133217811584473, "alphanum_fraction": 0.6280276775360107, "avg_line_length": 20.811321258544922, "blob_id": "92f2be752e90a15f4de6363aa4c6ef681f5c7c2e", "content_id": "2ff23efd51d9c1eaa3373bf2a2d478db3fca8697", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1156, "license_type": "permissive", "max_line_length": 185, "num_lines": 53, "path": "/apps/import/import.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nfile = \"../../evaluations/source known.csv\"\n\nstudy_id = 9\n\nquery_id = 0\n\nscrapers_id = 0\n\nsearch_engine = \"Import\"\n\nresults_position = 0\n\nresults_import = 1\n\njob_id = 0\n\ntoday = date.today()\n\ntimestamp = datetime.now()\n\n\n\nwith open(file, 'r') as csvfile:\n csv_result = csv.reader(csvfile, delimiter=',', quotechar='\"')\n source = list(csv_result)\n\nfor url in source:\n\n url = url[0]\n\n check_url = Results.getURL(query_id, study_id, url, search_engine)\n if (not check_url):\n\n url_meta = Results.getResultMeta(url)\n hash = url_meta[0]\n ip = url_meta[1]\n main = url_meta[2]\n main_hash = Helpers.computeMD5hash(main)\n contact_url = \"0\"\n contact_hash = \"0\"\n contact_url = \"0\"\n\n Results.insertResult(query_id, study_id, job_id, results_import, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, timestamp, 1, results_position)\n\n check_sources = Results.getSource(hash)\n if not check_sources:\n Results.insertSource(hash, None, None, None, today, 0)\n" }, { "alpha_fraction": 0.6350364685058594, "alphanum_fraction": 0.6423357725143433, "avg_line_length": 16.869565963745117, "blob_id": "b32beabcbb5d7189a8398cff5d28a5e5f8c4ea63", "content_id": "2a7ddbb8d7426d8ad372a8063d3a42bcb6759f2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "permissive", "max_line_length": 49, "num_lines": 23, "path": "/apps/indicators/https.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check the use of https\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef get_scheme(url):\n parsed = urlparse(url)\n return parsed.scheme\n\n\ndef https(result_url, hash):\n scheme = get_scheme(result_url)\n module = 'check https'\n value = '0'\n\n if scheme == 'https':\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6014492511749268, "alphanum_fraction": 0.6050724387168884, "avg_line_length": 18.034482955932617, "blob_id": "92457b153ba23fd6eaaba3d5e133b054f3ecbbdf", "content_id": "88c9d46e4a9c0a3e13dd513a618cdc2e95f5cba3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "permissive", "max_line_length": 52, "num_lines": 29, "path": "/apps/indicators/url_length.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to calculate url url length\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef url_length(hash, result_url):\n\n value = '-1'\n module = 'check url_length'\n\n\n\n result_url = result_url.replace(\"www.\", \"\")\n\n url = result_url\n\n if (Helpers.matchText(result_url, \"https://*\")):\n url = result_url.replace(\"https://\", \"\")\n\n elif(Helpers.matchText(result_url, \"http://*\")):\n url = result_url.replace(\"http://\", \"\")\n\n\n value = str(len(url))\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6082677245140076, "alphanum_fraction": 0.6161417365074158, "avg_line_length": 19.31999969482422, "blob_id": "d2bf613f9e85fc9ee842926b570953693e0dbfad", "content_id": "e2ec04e5dd30bb9c4e10715d25ec4df44ae34533", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "permissive", "max_line_length": 49, "num_lines": 25, "path": "/apps/indicators/wordpress.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check wordpress\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef wordpress(hash, tree):\n xpath = \"//meta[@name='generator']/@content\"\n module = 'check wordpress'\n content = tree.xpath(xpath)\n check = str(content)\n check = check.lower()\n\n value = '0'\n\n if len(check) > 1:\n pattern = \"*wordpress*\"\n if Helpers.matchText(check, pattern):\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.49497559666633606, "alphanum_fraction": 0.503588855266571, "avg_line_length": 26.86400032043457, "blob_id": "f00c18a7819caf1c40c56bedff7aa0420f984d01", "content_id": "dafd47fda42b719375e3f49929294e9f5936badd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3483, "license_type": "permissive", "max_line_length": 200, "num_lines": 125, "path": "/apps/indicators/identical_title.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check the use of https\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef identical_title(hash, result_main):\n print(\"identical_title\")\n\n def check_identical_title(hash, result_main):\n\n def check_title(tree):\n\n\n title = \"\"\n\n xpath_title = \"//title/text()\"\n xpath_meta_title = \"//meta[@name='title']/@content\"\n xpath_og_title = \"//meta[@property='og:title']/@content\"\n\n check_title = str(tree.xpath(xpath_title))\n check_meta_title = str(tree.xpath(xpath_meta_title))\n check_og_title = str(tree.xpath(xpath_og_title))\n\n if len(check_title) > 4 or len(check_meta_title) > 4 or len(check_og_title) > 4:\n if len(check_og_title) > 4:\n title = check_og_title\n elif len(check_meta_title) > 4:\n title = check_meta_title\n else:\n title = check_title\n\n\n title = title.replace(\"'\", \"\")\n title = title.replace('\"', \"\")\n title = title.replace(':', \"\")\n title = title.replace(',', \"\")\n\n title = title.strip()\n\n print(title)\n\n\n return title\n\n\n results_urls = str(Sources.getSourcesURLs(hash))\n\n list_results_urls = list(results_urls.split(\"[url]\"))\n\n list_results_urls = list(dict.fromkeys(list_results_urls))\n\n results_links = []\n\n if len(list_results_urls) > 20:\n list_results_urls = list_results_urls[:20]\n\n for l in list_results_urls:\n url_split = l.split(\" \")\n if len(url_split) == 2:\n try:\n if result_main in url_split[1]:\n if not Helpers.matchText(url_split[1], '*javascript*') and not Helpers.matchText(url_split[1], '*None*') and url_split[1] != result_main and Helpers.validate_url(url_split[1]):\n results_links.append(url_split[1])\n except:\n pass\n\n\n\n\n results_links = list(dict.fromkeys(results_links))\n\n number_of_links = 3\n n = 0\n\n if len(results_links) < number_of_links:\n number_of_links = len(results_links)\n\n results_source = Results.getResultsSource(hash)\n code = Helpers.html_unescape(results_source[0][0])\n code = code.lower()\n tree = html.fromstring(code)\n\n title = check_title(tree)\n\n identical_title_num = 0\n\n\n while n < number_of_links:\n\n url_to_check = results_links[n]\n\n print(url_to_check)\n\n n+=1\n try:\n source = Results.saveResult(url_to_check)\n if source != 'error':\n code = source.lower()\n tree = html.fromstring(code)\n link_title = check_title(tree)\n if title == link_title:\n identical_title_num = 1\n number_of_links = 3\n else:\n number_of_links += 1\n\n\n except:\n number_of_links += 1\n\n if identical_title_num > 0:\n value = '1'\n else:\n value = '0'\n\n return value\n\n\n module = \"check identical title\"\n value = check_identical_title(hash, result_main)\n print(\"identical:\")\n print(value)\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.620633065700531, "alphanum_fraction": 0.6321219205856323, "avg_line_length": 38.128440856933594, "blob_id": "a4cde9e7c9160fc801bf400fdc2eb0360e120c67", "content_id": "46fb5d68dda93e0a1c0ff6c78d318b8eef571fc9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4265, "license_type": "permissive", "max_line_length": 347, "num_lines": 109, "path": "/apps/classifier/rules_old.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "# Rule-based Classifier\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\n#function to classify\n\ndef classify(classifier_id, hashes):\n\n #functions to check the usage of titles in the document\n\n for h in hashes:\n\n hash = h[0]\n results_url = h[1]\n results_main = h[2]\n results_speed = h[3]\n\n evaluations_results = Evaluations.getEvaluationsResults(hash)\n\n dict_results = {}\n\n for e in evaluations_results:\n evaluations_module = e[0]\n evaluations_result = e[1]\n\n #indicators for rule based classification\n dict_results.update({evaluations_module: evaluations_result})\n\n \n\n #print(dict_results)\n\n #convert dict elements for rule based classification\n\n #sources:\n source_not_optimized = int(dict_results['source not optimized'])\n source_news = int(dict_results['source news'])\n source_known = int(dict_results['source known'])\n source_search_engine = int(dict_results['source search engine'])\n source_shop = int(dict_results['source shop'])\n source_top = int(dict_results['source top'])\n source_ads = int(dict_results['source ads'])\n source_company = int(dict_results['source company'])\n\n #indicators:\n indicator_https = int(dict_results['check https'])\n indicator_robots_txt = int(dict_results['robots_txt'])\n indicator_sitemap = int(dict_results['check sitemap'])\n indicator_nofollow = int(dict_results['check nofollow'])\n indicator_speed = float(results_speed)\n indicator_canonical = int(dict_results['check canonical'])\n indicator_viewport = int(dict_results['check viewport'])\n indicator_og = int(dict_results['check og'])\n indicator_micros = int(dict_results['micros counter'])\n indicator_title = int(dict_results['check title'])\n indicator_identical_title = int(dict_results['check identical title'])\n indicator_description = int(dict_results['check description'])\n indicator_speed = results_speed\n\n #plugins and tools\n tools_analytics = int(dict_results['tools analytics count'])\n tools_seo = int(dict_results['tools seo count'])\n tools_caching = int(dict_results['tools caching count'])\n tools_content = int(dict_results['tools content count'])\n tools_social = int(dict_results['tools social count'])\n tools_ads = int(dict_results['tools ads count'])\n\n #classification\n classification_count = 0\n not_optimized = 0\n optimized = 0\n probably_optimized = 0\n probably_not_optimized = 0\n classification_result = \"uncertain\"\n\n #most_probably_not_optimized\n if source_not_optimized == 1:\n not_optimized = 1\n classification_result = 'not optimized'\n classification_count += 1\n\n #most probably optimized\n if not_optimized == 0 and (tools_seo > 0 or source_known == 1 or source_news == 1 or source_ads == 1 or indicator_micros > 0):\n optimized = 1\n classification_result = 'optimized'\n classification_count += 1\n\n #probably optimized\n if optimized == 0 and not_optimized == 0 and (tools_analytics > 0 or source_shop == 1 or source_company == 1 or indicator_https == 1 or indicator_og == 1 or indicator_viewport == 1 or indicator_robots_txt == 1 or indicator_sitemap == 1 or indicator_nofollow > 0 or indicator_canonical > 0 or (indicator_speed < 3 and indicator_speed > 0)):\n probably_optimized = 1\n classification_result = 'probably_optimized'\n classification_count += 1\n\n #probably_not_optimized\n if optimized == 0 and not_optimized == 0 and (indicator_title == 0 or indicator_description == 0 or indicator_identical_title == 1 or indicator_og != 1 or indicator_speed > 60):\n probably_not_optimized = 1\n classification_result = 'probably_not_optimized'\n classification_count += 1\n\n Evaluations.updateClassificationResult(hash, classification_result, classifier_id, today)\n\n print(results_url)\n print(hash)\n print(classification_result)\n" }, { "alpha_fraction": 0.5336058139801025, "alphanum_fraction": 0.5426884889602661, "avg_line_length": 25.85365867614746, "blob_id": "60c22a46cd82112a4bcfe3afc40d63633925941f", "content_id": "06262aac2dd197af6ef1f7de0a03265538e2c706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2202, "license_type": "permissive", "max_line_length": 86, "num_lines": 82, "path": "/apps/indicators/plugins.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check seo plugins and analytics tools in html code\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef get_tools():\n\n with open('../../config/evaluation.ini', 'r') as f:\n array = json.load(f)\n\n text_match_json = array[\"text-match\"]\n text_match_tools = []\n i = -1\n\n\n for text_match in text_match_json:\n i+=1\n name = text_match\n source = text_match_json[text_match][\"source\"]\n with open(source, 'r') as csvfile:\n csv_result = csv.reader(csvfile, delimiter=',', quotechar='\"')\n source = list(csv_result)\n tool = {\n \"name\": name,\n \"source\": source\n }\n text_match_tools.append(tool)\n\n return text_match_tools\n\n\ndef plugins(hash, html_source, html_comments):\n tools = get_tools()\n\n for text_match_tool in tools:\n plugins = []\n module = text_match_tool[\"name\"]\n matches = text_match_tool[\"source\"]\n value = '0'\n\n module_count = text_match_tool[\"name\"] + ' count'\n count_value = '0'\n\n\n check_evaluations_result(hash, module, value)\n\n\n check_evaluations_result(hash, module_count, count_value)\n\n for check in matches:\n obj = check[0]\n pattern = check[1]\n for comment in html_comments:\n if(len(comment) < 3000):\n if Helpers.matchText(comment, pattern):\n plugins.append([module, obj])\n\n\n for check in matches:\n obj = check[0]\n pattern = check[1]\n for snip in html_source:\n if(len(snip) < 3000):\n if Helpers.matchText(snip, pattern):\n plugins.append([module, obj])\n\n\n plugins = Helpers.remove_duplicates_from_list(plugins)\n\n if(len(plugins) > 0):\n plug = \"\"\n for p in plugins:\n plug = plug+p[1]+'###'\n value = plug[:-3]\n count_value = str(len(plugins))\n Evaluations.UpdateEvaluationResult(value, today, hash, module)\n Evaluations.UpdateEvaluationResult(count_value, today, hash, module_count)\n" }, { "alpha_fraction": 0.536631166934967, "alphanum_fraction": 0.5472761392593384, "avg_line_length": 21.492958068847656, "blob_id": "30f891a8ddcff9bfa573a7c9094ae0670a164e5f", "content_id": "6c2df5177644df260a68f19e7bb021f939e1b387", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1597, "license_type": "permissive", "max_line_length": 66, "num_lines": 71, "path": "/apps/indicators/micro.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check micro data\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nmicro_file = '../../evaluations/micro.csv'\n\ntoday = date.today()\n\ndef get_micros():\n micros_list = []\n with open(micro_file, 'r') as csvfile:\n micros = csv.reader(csvfile)\n for m in micros:\n modul = m[0]\n pattern = m[1]\n item = (modul, pattern)\n micros_list.append(item)\n return micros_list\n\n\ndef micros(hash, html_comments, html_source):\n micros_list = get_micros()\n micros_save = []\n\n\n for ms in micros_list:\n obj = ms[0]\n pattern = ms[1]\n\n for comment in html_comments:\n if(len(comment) < 3000):\n if Helpers.matchText(comment, pattern):\n micros_save.append([obj])\n for s in html_source:\n if(len(s) < 3000):\n if Helpers.matchText(s, pattern):\n micros_save.append([obj])\n\n micros_save = Helpers.remove_duplicates_from_list(micros_save)\n\n res = ''\n\n if(len(micros_save) == 0):\n module = 'micros'\n value = '0'\n\n check_evaluations_result(hash, module, value)\n\n module = 'micros counter'\n value = '0'\n\n check_evaluations_result(hash, module, value)\n\n else:\n for m in micros_save:\n res = '#'+res+m[0]\n\n module = 'micros'\n\n check_evaluations_result(hash, module, res)\n\n\n module = 'micros counter'\n value = len(micros_save)\n value = str(value)\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6564837694168091, "alphanum_fraction": 0.6677057147026062, "avg_line_length": 23.303030014038086, "blob_id": "e915f6aef9b2c351895456d34bfe11c1408ee07d", "content_id": "312ca9e67047a02d984ac1e8bd33dd736db0dc9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1604, "license_type": "permissive", "max_line_length": 185, "num_lines": 66, "path": "/apps/import/import_urls.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "# Import URLs\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n#constants: don't change the values of the following variables\n\n#query_id = 0 for imported urls: don't change\nquery_id = 0\n\n#scrapers_id for imported urls: don't change\nscrapers_id = 0\n\n#search engine for imported urls: don't change\nsearch_engine = \"Import\"\n\n#results_position for imported urls: don't change\nresults_position = 0\n\n#flag for imported urls: don't change\nresults_import = 1\n\n#job_id for imported urls: don't change\njob_id = 0\n\n\n\n#select csv file with urls\nfile = \"../../evaluations/source known.csv\"\n\n#select study_id: choose study id for imported urls\nstudy_id = 3\n\ntoday = date.today()\n\ntimestamp = datetime.now()\n\n#open csv file\nwith open(file, 'r') as csvfile:\n csv_result = csv.reader(csvfile, delimiter=',', quotechar='\"')\n source = list(csv_result)\n\n#insert urls to db\nfor url in source:\n\n url = url[0]\n\n check_url = Results.getURL(query_id, study_id, url, search_engine)\n if (not check_url):\n\n url_meta = Results.getResultMeta(url)\n hash = url_meta[0]\n ip = url_meta[1]\n main = url_meta[2]\n main_hash = Helpers.computeMD5hash(main)\n contact_url = \"0\"\n contact_hash = \"0\"\n contact_url = \"0\"\n\n Results.insertResult(query_id, study_id, job_id, results_import, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, timestamp, 1, results_position)\n\n check_sources = Results.getSource(hash)\n if not check_sources:\n Results.insertSource(hash, None, None, None, today, 0)\n" }, { "alpha_fraction": 0.6446991562843323, "alphanum_fraction": 0.65329509973526, "avg_line_length": 17.36842155456543, "blob_id": "bc80a2cc2a1be1ff8ae768cbe66415d0bd6fb2d7", "content_id": "6e00e6dbe8cab272a135488fcc8387b9db2c5671", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "permissive", "max_line_length": 49, "num_lines": 19, "path": "/apps/indicators/viewport.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check the viewport\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef viewport(hash, code):\n module = 'check viewport'\n pattern = '*meta*name*viewport*'\n value = '0'\n\n if Helpers.matchText(code, pattern):\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6547449827194214, "alphanum_fraction": 0.6547449827194214, "avg_line_length": 41.79130554199219, "blob_id": "0aafb700dbaef934cfbc9192b2e14a8ac017cd07", "content_id": "aa0ec77e51f8b17357e00837f1f2f902a5d6bcf8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4921, "license_type": "permissive", "max_line_length": 183, "num_lines": 115, "path": "/db/studies.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#Class for studies table\nclass Studies:\n def __init__(self, cursor):\n self.cursor = cursor\n\n def getStudies(cursor):\n cursor.execute(\"SELECT * from studies\")\n rows = cursor.fetchall()\n return rows\n\n def getStudiesScraper(cursor):\n cursor.execute(\"SELECT * from studies WHERE import IS NULL\")\n rows = cursor.fetchall()\n return rows\n\n def getStudy(cursor, id):\n sql= \"SELECT * from studies WHERE studies_id=%s\"\n data = (id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getStudybyName(cursor, studies_name):\n sql= \"SELECT studies_name, studies_comment, studies_date, studies_se, studies_id from studies WHERE studies_name=%s\"\n data = (studies_name)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getStudybyNamenotID(cursor, studies_name, studies_id):\n sql= \"SELECT studies_name from studies WHERE studies_name=%s AND studies_id != %s\"\n data = (studies_name, studies_id)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def updateStudy(cursor, studies_name, studies_comment, studies_se, studies_id):\n cursor.execute(\n \"UPDATE studies SET studies_name= %s, studies_comment = %s, studies_se = %s WHERE studies_id = %s\",\n (studies_name, studies_comment, studies_se, studies_id)\n )\n\n\n def insertStudy(cursor, studies_name, studies_comment, studies_date, studies_se):\n cursor.execute(\"INSERT INTO studies (studies_name, studies_comment, studies_date, studies_se) VALUES(%s,%s,%s,%s);\", (studies_name, studies_comment, studies_date, studies_se))\n\n def deleteStudy(cursor, studies_id):\n #delete from studies\n sql= \"DELETE from studies WHERE studies_id=%s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from classifications\n sql = \"DELETE from classifications USING results WHERE classifications_hash = results_hash AND results_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from scrapers\n sql = \"DELETE from scrapers USING results WHERE scrapers_studies_id = results_studies_id AND results_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from sources\n sql = \"DELETE from sources USING results WHERE sources_hash = results_hash AND results_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from serps\n sql = \"DELETE from serps USING queries WHERE serps_queries_id = queries_id AND queries_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from evaluations\n sql = \"DELETE from evaluations USING results WHERE evaluations_results_hash = results_hash AND results_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from queries\n sql = \"DELETE from queries WHERE queries_studies_id = %s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #delete from results\n sql= \"DELETE from results WHERE results_studies_id=%s\"\n data = (studies_id)\n cursor.execute(sql,(data,))\n\n #function to delete deleteunassignedResults = results which are not related to a study\n def deleteunassignedResults(cursor):\n #delete from classifications\n sql = \"DELETE from classifications USING results WHERE classifications_hash = results_hash AND NOT EXISTS(SELECT * FROM studies WHERE studies_id = results_studies_id)\"\n cursor.execute(sql)\n\n #delete from scrapers\n sql = \"DELETE from scrapers USING results WHERE scrapers_queries_id = results_queries_id AND NOT EXISTS(SELECT * FROM studies WHERE studies_id = results_studies_id)\"\n cursor.execute(sql)\n\n #delete from sources\n sql = \"DELETE from sources USING results WHERE sources_hash = results_hash AND NOT EXISTS(SELECT * FROM studies WHERE studies_id = results_studies_id)\"\n cursor.execute(sql)\n\n #delete from serps\n sql = \"DELETE from serps USING queries WHERE serps_queries_id = queries_id AND NOT EXISTS(SELECT * FROM studies WHERE studies_id = queries_studies_id)\"\n cursor.execute(sql)\n\n #delete from evaluations\n sql = \"DELETE from evaluations USING results WHERE evaluations_results_hash = results_hash AND NOT EXISTS(SELECT * FROM studies WHERE studies_id = results_studies_id)\"\n cursor.execute(sql)\n\n #delete from queries\n sql = \"DELETE from queries WHERE NOT EXISTS(SELECT * FROM studies WHERE studies_id = queries_studies_id)\"\n\n #delete from results\n sql= \"DELETE from results WHERE NOT EXISTS(SELECT * FROM studies WHERE studies_id = results_studies_id)\"\n cursor.execute(sql)\n" }, { "alpha_fraction": 0.605400025844574, "alphanum_fraction": 0.6224919557571411, "avg_line_length": 25.5592098236084, "blob_id": "eda7c27e3be0c5723a885d834834b6f1ab3a54e2", "content_id": "7130e111eab1203fd54dcb43dde929db3f9f6991", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4038, "license_type": "permissive", "max_line_length": 125, "num_lines": 152, "path": "/apps/classifier/decision_tree_learner.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "# Decision Tree Classification Learner\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n# ### 1. Data Preparation\n\n# In[ ]:\n\n\n# loading dataset\ndf = pd.read_csv('all_res.csv', encoding='latin-1', low_memory=False)\n\n#df = df[:2000]\n\n# removing duplicates (ignoring first 3 columns)\ndf.drop_duplicates(subset=df.columns.to_list()[3:], inplace=True)\n\n# converting columns names to lowercase\ndf.rename(columns=lambda x: x.lower(), inplace=True)\n\n\n\n# adding new column url_length\n#df['url length'] = df['url'].str.len() - df['main'].str.len()\n\n# removing columns that can't be used in ml\n# it removes categorical columns as well as columns like id, query_id, etc.\nid_cols = ['study', 'id', 'hash', 'query_id', 'query', 'rules', 'decision_tree', 'position', 'tools social', 'tools caching']\nnon_numeric_cols = df.select_dtypes('object').columns.to_list()\ncombined = [*id_cols, *non_numeric_cols]\nto_drop = list(dict.fromkeys(combined))\ndf.drop(columns=to_drop, inplace=True)\n\n\n# set all error encoded speed values to -1\ndf.loc[df['speed'] < 0, 'speed'] = -1\n\n# replace missing value codes with -1\ndf.replace(-100, -1, inplace=True)\ndf.fillna(-1, inplace=True)\n\n# apply classification rules to dataset\n# create new column and assign -1 to all rows\ndf['seo class'] = -1\n\n# 0: nicht optimiert\ndf['seo class'] = np.where(df['source not optimized'] == 1, 0, df['seo class'])\n\n# 3: höchstwahrscheinlich optimiert\ndf['seo class'] = np.where((df['seo class'] != 0) & (\n (df['tools seo count'] > 0) |\n (df['source known'] == 1) |\n (df['source news'] == 1) |\n (df['source ads'] == 1) |\n (df['micros counter'] > 0)),\n 3,\n df['seo class'])\n\n# 2: wahrscheinlich optimiert\ndf['seo class'] = np.where((df['seo class'] == -1) & (\n (df['tools analytics count'] > 0) |\n (df['source shop'] == 1) |\n (df['source company'] == 1) |\n (df['check https'] == 1) |\n (df['check og'] == 1) |\n (df['check viewport'] == 1) |\n (df['robots_txt'] == 1) |\n (df['check sitemap'] == 1) |\n (df['check nofollow'] > 0) |\n (df['check canonical'] > 0) |\n ((df['speed'] > 0) & (df['speed'] < 3))),\n 2,\n df['seo class'])\n\n# 1: wahrscheinlich nicht optimiert\ndf['seo class'] = np.where((df['seo class'] == -1) & (\n (df['check title'] != 1) |\n (df['check description'] != 1) |\n (df['check identical title'] == 1) |\n (df['speed'] > 60) |\n (df['check og'] != 1),\n 1,\n df['seo class'])\n\n# save cleaned dataset // uncomment if it needs to be saved\n#df.to_csv('data/data_prepared.csv', index=False)\n\n\n# ### 2. Classification\n\n# In[ ]:\n\n\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_validate, StratifiedShuffleSplit\nfrom sklearn.tree import DecisionTreeClassifier\nfrom imblearn.over_sampling import RandomOverSampler\n\n# load dataset // uncomment if previous block wasn't run\n#df = pd.read_csv('data/data_prepared.csv')\n\n# remove missing values\ndf = df[~df.lt(0).any(1)]\n\n# splitting X and y (predictor and target)\nX = df.drop(columns=['seo class'])\ny = df['seo class']\n\n# oversample to balance classes\nsampler = RandomOverSampler(random_state=42)\nX, y = sampler.fit_resample(X, y)\n\n# creating list of metrics to assess\nmetrics = ['accuracy',\n 'precision_macro',\n 'recall_macro',\n 'f1_macro']\n\n# creating a stratified shuffle split for cross validation\nsplit = StratifiedShuffleSplit(n_splits=5, test_size=0.66, random_state=22)\n\n# setting classifier to decision tree algorithm\nclf = DecisionTreeClassifier()\n\n#fit data before saving\nclf.fit(X, y)\n\n# train/test algorithm with 5-fold cross validation\ncv = cross_validate(clf, X, y, scoring=metrics, cv=split)\n\n\n# In[ ]:\n\n# prints metrics\nfor k, v in cv.items():\n print(k, v.mean())\n\n\n# ### 3. Exporting Model\n\n# In[ ]:\n\n\n# saves trained model as joblib file\nfrom joblib import dump\ndump(clf, 'dt_classifier.joblib')\n" }, { "alpha_fraction": 0.7124077677726746, "alphanum_fraction": 0.7184809446334839, "avg_line_length": 22.187166213989258, "blob_id": "817451af15d16cb66657119ccc722c95cfa51b5b", "content_id": "e5763916e06e1da317e827ec1c37947593be1cf3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 13008, "license_type": "permissive", "max_line_length": 186, "num_lines": 561, "path": "/install/install_db.sql", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "--\n-- PostgreSQL database dump\n--\n\n-- Dumped from database version 11.10 (Debian 11.10-1.pgdg100+1)\n-- Dumped by pg_dump version 13.1 (Debian 13.1-1.pgdg100+1)\n\nSET statement_timeout = 0;\nSET lock_timeout = 0;\nSET idle_in_transaction_session_timeout = 0;\nSET client_encoding = 'UTF8';\nSET standard_conforming_strings = on;\nSELECT pg_catalog.set_config('search_path', '', false);\nSET check_function_bodies = false;\nSET xmloption = content;\nSET client_min_messages = warning;\nSET row_security = off;\n\n--\n-- Name: tablefunc; Type: EXTENSION; Schema: -; Owner: -\n--\n\nCREATE EXTENSION IF NOT EXISTS tablefunc WITH SCHEMA public;\n\n\n--\n-- Name: EXTENSION tablefunc; Type: COMMENT; Schema: -; Owner:\n--\n\nCOMMENT ON EXTENSION tablefunc IS 'functions that manipulate whole tables, including crosstab';\n\n\n--\n-- Name: tsm_system_rows; Type: EXTENSION; Schema: -; Owner: -\n--\n\nCREATE EXTENSION IF NOT EXISTS tsm_system_rows WITH SCHEMA public;\n\n\n--\n-- Name: EXTENSION tsm_system_rows; Type: COMMENT; Schema: -; Owner:\n--\n\nCOMMENT ON EXTENSION tsm_system_rows IS 'TABLESAMPLE method which accepts number of rows as a limit';\n\n\n--\n-- Name: _final_median(numeric[]); Type: FUNCTION; Schema: public; Owner: seo\n--\n\nCREATE FUNCTION public._final_median(numeric[]) RETURNS numeric\n LANGUAGE sql IMMUTABLE\n AS $_$\n SELECT AVG(val)\n FROM (\n SELECT val\n FROM unnest($1) val\n ORDER BY 1\n LIMIT 2 - MOD(array_upper($1, 1), 2)\n OFFSET CEIL(array_upper($1, 1) / 2.0) - 1\n ) sub;\n$_$;\n\n\nALTER FUNCTION public._final_median(numeric[]) OWNER TO seo;\n\n--\n-- Name: median(numeric); Type: AGGREGATE; Schema: public; Owner: seo\n--\n\nCREATE AGGREGATE public.median(numeric) (\n SFUNC = array_append,\n STYPE = numeric[],\n INITCOND = '{}',\n FINALFUNC = public._final_median\n);\n\n\nALTER AGGREGATE public.median(numeric) OWNER TO seo;\n\nSET default_tablespace = '';\n\n--\n-- Name: classifications; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.classifications (\n classifications_hash text,\n classifications_result text,\n classifications_date date,\n classifications_id integer NOT NULL,\n classifications_classification text\n);\n\n\nALTER TABLE public.classifications OWNER TO seo;\n\n--\n-- Name: classifications_classifications_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.classifications_classifications_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.classifications_classifications_id_seq OWNER TO seo;\n\n--\n-- Name: classifications_classifications_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.classifications_classifications_id_seq OWNED BY public.classifications.classifications_id;\n\n\n--\n-- Name: evaluations; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.evaluations (\n evaluations_results_hash character(32),\n evaluations_module text,\n evaluations_result text,\n evaluations_date date,\n evaluations_progress integer,\n evaluations_id integer NOT NULL\n);\n\n\nALTER TABLE public.evaluations OWNER TO seo;\n\n--\n-- Name: evaluations_evaluations_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.evaluations_evaluations_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.evaluations_evaluations_id_seq OWNER TO seo;\n\n--\n-- Name: evaluations_evaluations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.evaluations_evaluations_id_seq OWNED BY public.evaluations.evaluations_id;\n\n\n--\n-- Name: queries; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.queries (\n queries_studies_id integer,\n queries_query text,\n queries_comment text,\n queries_progress integer,\n queries_id integer NOT NULL,\n queries_date text\n);\n\n\nALTER TABLE public.queries OWNER TO seo;\n\n--\n-- Name: queries_queries_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.queries_queries_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.queries_queries_id_seq OWNER TO seo;\n\n--\n-- Name: queries_queries_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.queries_queries_id_seq OWNED BY public.queries.queries_id;\n\n\n--\n-- Name: results; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.results (\n results_queries_id integer,\n results_studies_id integer,\n results_scrapers_id integer,\n results_import integer,\n results_ip text,\n results_hash character(32),\n results_main_hash text,\n results_contact_hash text,\n results_se text,\n results_position integer,\n results_url text,\n results_main text,\n results_contact text,\n results_date date,\n results_timestamp timestamp without time zone,\n results_progress integer,\n results_id integer NOT NULL\n);\n\n\nALTER TABLE public.results OWNER TO seo;\n\n--\n-- Name: results_results_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.results_results_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.results_results_id_seq OWNER TO seo;\n\n--\n-- Name: results_results_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.results_results_id_seq OWNED BY public.results.results_id;\n\n\n--\n-- Name: scrapers; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.scrapers (\n scrapers_queries_id integer,\n scrapers_studies_id integer,\n scrapers_queries_query text,\n scrapers_se text,\n scrapers_start integer,\n scrapers_date date,\n scrapers_progress integer,\n scrapers_id integer NOT NULL\n);\n\n\nALTER TABLE public.scrapers OWNER TO seo;\n\n--\n-- Name: scrapers_scrapers_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.scrapers_scrapers_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.scrapers_scrapers_id_seq OWNER TO seo;\n\n--\n-- Name: scrapers_scrapers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.scrapers_scrapers_id_seq OWNED BY public.scrapers.scrapers_id;\n\n\n--\n-- Name: serps; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.serps (\n serps_queries_id integer,\n serps_result text,\n serps_scrapers_result integer,\n serps_date date,\n serps_id integer NOT NULL,\n serps_se text\n);\n\n\nALTER TABLE public.serps OWNER TO seo;\n\n--\n-- Name: serps_serps_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.serps_serps_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.serps_serps_id_seq OWNER TO seo;\n\n--\n-- Name: serps_serps_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.serps_serps_id_seq OWNED BY public.serps.serps_id;\n\n\n--\n-- Name: sources; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.sources (\n sources_hash text,\n sources_source text,\n sources_urls text,\n sources_comments text,\n sources_date date,\n sources_progress integer,\n sources_id integer NOT NULL,\n sources_speed real\n);\n\n\nALTER TABLE public.sources OWNER TO seo;\n\n--\n-- Name: sources_sources_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.sources_sources_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.sources_sources_id_seq OWNER TO seo;\n\n--\n-- Name: sources_sources_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.sources_sources_id_seq OWNED BY public.sources.sources_id;\n\n\n--\n-- Name: studies; Type: TABLE; Schema: public; Owner: seo\n--\n\nCREATE TABLE public.studies (\n studies_name text,\n studies_comment text,\n studies_date date,\n studies_id integer NOT NULL,\n import integer,\n studies_se text\n);\n\n\nALTER TABLE public.studies OWNER TO seo;\n\n--\n-- Name: studies_studies_id_seq; Type: SEQUENCE; Schema: public; Owner: seo\n--\n\nCREATE SEQUENCE public.studies_studies_id_seq\n AS integer\n START WITH 1\n INCREMENT BY 1\n NO MINVALUE\n NO MAXVALUE\n CACHE 1;\n\n\nALTER TABLE public.studies_studies_id_seq OWNER TO seo;\n\n--\n-- Name: studies_studies_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: seo\n--\n\nALTER SEQUENCE public.studies_studies_id_seq OWNED BY public.studies.studies_id;\n\n\n--\n-- Name: classifications classifications_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.classifications ALTER COLUMN classifications_id SET DEFAULT nextval('public.classifications_classifications_id_seq'::regclass);\n\n\n--\n-- Name: evaluations evaluations_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.evaluations ALTER COLUMN evaluations_id SET DEFAULT nextval('public.evaluations_evaluations_id_seq'::regclass);\n\n\n--\n-- Name: queries queries_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.queries ALTER COLUMN queries_id SET DEFAULT nextval('public.queries_queries_id_seq'::regclass);\n\n\n--\n-- Name: results results_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.results ALTER COLUMN results_id SET DEFAULT nextval('public.results_results_id_seq'::regclass);\n\n\n--\n-- Name: scrapers scrapers_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.scrapers ALTER COLUMN scrapers_id SET DEFAULT nextval('public.scrapers_scrapers_id_seq'::regclass);\n\n\n--\n-- Name: serps serps_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.serps ALTER COLUMN serps_id SET DEFAULT nextval('public.serps_serps_id_seq'::regclass);\n\n\n--\n-- Name: sources sources_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.sources ALTER COLUMN sources_id SET DEFAULT nextval('public.sources_sources_id_seq'::regclass);\n\n\n--\n-- Name: studies studies_id; Type: DEFAULT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.studies ALTER COLUMN studies_id SET DEFAULT nextval('public.studies_studies_id_seq'::regclass);\n\n\n--\n-- Name: classifications classifications_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.classifications\n ADD CONSTRAINT classifications_pkey PRIMARY KEY (classifications_id);\n\n\n--\n-- Name: evaluations evaluations_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.evaluations\n ADD CONSTRAINT evaluations_pkey PRIMARY KEY (evaluations_id);\n\n\n--\n-- Name: queries queries_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.queries\n ADD CONSTRAINT queries_pkey PRIMARY KEY (queries_id);\n\n\n--\n-- Name: queries queries_queries_studies_id_queries_query_key; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.queries\n ADD CONSTRAINT queries_queries_studies_id_queries_query_key UNIQUE (queries_studies_id, queries_query);\n\n\n--\n-- Name: results results_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.results\n ADD CONSTRAINT results_pkey PRIMARY KEY (results_id);\n\n\n--\n-- Name: results results_results_queries_id_results_hash_results_se_results__key; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.results\n ADD CONSTRAINT results_results_queries_id_results_hash_results_se_results__key UNIQUE (results_queries_id, results_hash, results_se, results_url);\n\n\n--\n-- Name: scrapers scrapers_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.scrapers\n ADD CONSTRAINT scrapers_pkey PRIMARY KEY (scrapers_id);\n\n\n--\n-- Name: scrapers scrapers_scrapers_queries_id_scrapers_studies_id_scrapers_q_key; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.scrapers\n ADD CONSTRAINT scrapers_scrapers_queries_id_scrapers_studies_id_scrapers_q_key UNIQUE (scrapers_queries_id, scrapers_studies_id, scrapers_queries_query, scrapers_se, scrapers_start);\n\n\n--\n-- Name: serps serps_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.serps\n ADD CONSTRAINT serps_pkey PRIMARY KEY (serps_id);\n\n\n--\n-- Name: sources sources_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.sources\n ADD CONSTRAINT sources_pkey PRIMARY KEY (sources_id);\n\n\n--\n-- Name: studies studies_pkey; Type: CONSTRAINT; Schema: public; Owner: seo\n--\n\nALTER TABLE ONLY public.studies\n ADD CONSTRAINT studies_pkey PRIMARY KEY (studies_id);\n\n\n--\n-- Name: indx001; Type: INDEX; Schema: public; Owner: seo\n--\n\nCREATE INDEX indx001 ON public.evaluations USING btree (evaluations_results_hash);\n\n\n--\n-- Name: indx002; Type: INDEX; Schema: public; Owner: seo\n--\n\nCREATE INDEX indx002 ON public.results USING btree (results_hash);\n\n\n--\n-- PostgreSQL database dump complete\n--\n" }, { "alpha_fraction": 0.5995671153068542, "alphanum_fraction": 0.6147186160087585, "avg_line_length": 17.479999542236328, "blob_id": "1f48eff646da9428b227de3931b3e8e9feb74b98", "content_id": "1dc0dafc0bf2a8e7e362bea1382ecc735b24ed17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "permissive", "max_line_length": 49, "num_lines": 25, "path": "/apps/indicators/sitemap.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check sitemap\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef sitemap(hash, code):\n pattern = \"*sitemap*\"\n module = 'check sitemap'\n value = '0'\n sitemap_counter = 0\n\n if (Helpers.matchText(code, pattern)):\n sitemap_counter = sitemap_counter + 1\n\n if sitemap_counter > 0:\n value = '1'\n else:\n value = '0'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.4268929362297058, "alphanum_fraction": 0.42721933126449585, "avg_line_length": 22.212121963500977, "blob_id": "529d3a4df83aa1d840defb9af7aaac56131f59d5", "content_id": "722c68e67dad4ad6c3f62d5a1715dfda9c29e04b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3064, "license_type": "permissive", "max_line_length": 62, "num_lines": 132, "path": "/install/install_sqlite.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "import sqlite3 as sl\n\ncon = sl.connect('../seo_effect.db')\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE study (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n description TEXT,\n search_engines TEXT,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE source (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n hash TEXT,\n source TEXT,\n urls TEXT,\n comments TEXT,\n speed FLOAT,\n progress INTEGER,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE serp (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n query_id INTEGER,\n result TEXT,\n search_engine TEXT,\n progress INTEGER,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE scraper (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n study_id INTEGER,\n query_id INTEGER,\n query TEXT,\n search_engine TEXT,\n position INTEGER,\n progress INTEGER,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE result (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n study_id INTEGER,\n query_id INTEGER,\n scraper_id INTEGER,\n import INTEGER,\n ip TEXT,\n hash TEXT,\n main_hash TEXT,\n search_engine TEXT,\n position INTEGER,\n url TEXT,\n main_url TEXT,\n progress INTEGER,\n timestamp TIMESTAMP,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE query (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n study_id INTEGER,\n query TEXT,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE evaluation (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n hash TEXT,\n module TEXT,\n result TEXT,\n progress INTEGER,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n\ntry:\n with con:\n con.execute(\"\"\"\n CREATE TABLE classification (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n classification TEXT,\n hash TEXT,\n result TEXT,\n date DATE\n );\n \"\"\")\nexcept:\n pass\n" }, { "alpha_fraction": 0.7278911471366882, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 15.333333015441895, "blob_id": "10f1d65a2f25e08ba9f384bc743f4f4b0ecb83d7", "content_id": "363afcb5b84580075a8fceee225cd33f62ce5170", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "permissive", "max_line_length": 28, "num_lines": 9, "path": "/apps/scraper/reset_scraper.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\nScrapers.resetScrapingJobs()\n#Scrapers.genProxies()\n" }, { "alpha_fraction": 0.6165643930435181, "alphanum_fraction": 0.6257668733596802, "avg_line_length": 15.300000190734863, "blob_id": "f0bd63f9ccfe1bff6eb8a5b182bbdec5f5e72fbb", "content_id": "65a811504e725fc1c4bd48bbdbea5807a41c37c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "permissive", "max_line_length": 49, "num_lines": 20, "path": "/apps/indicators/og.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check open graph tags\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef og(hash, code):\n module = 'check og'\n pattern = '*og:*'\n value = '0'\n\n if Helpers.matchText(code, pattern):\n value = '1'\n\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.5543766617774963, "alphanum_fraction": 0.5809018611907959, "avg_line_length": 14.708333015441895, "blob_id": "b38532c0f07e25a371f832241635ecc89030e7df", "content_id": "69260f752b4a22e52a822f301dcefaac2ff362b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 377, "license_type": "permissive", "max_line_length": 49, "num_lines": 24, "path": "/apps/indicators/h1.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check h1 headings\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n\ndef h1(hash, tree):\n\n xpath = \"//h1/text()\"\n module = 'check h1'\n counter = 0\n value = '0'\n\n res = tree.xpath(xpath)\n\n for r in res:\n counter = counter + 1\n\n if counter > 0:\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6447368264198303, "alphanum_fraction": 0.6469298005104065, "avg_line_length": 23, "blob_id": "3dccf5045a24ad1b6cec6f19a9ee133e27dc5071", "content_id": "40ce18394dcd030e9cc6c33953afafa53815d6a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 912, "license_type": "permissive", "max_line_length": 123, "num_lines": 38, "path": "/libs/sources.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport os.path\n\n#tool libs\nsys.path.insert(0, '..')\nfrom db.connect import DB\n\nsys.path.insert(0, '..')\nfrom db.sources import Sources as DB_Sources\n\nfrom libs.helpers import Helpers\n\n# class for evaluatios functions; mainly to read and write database content; the library is necessary to process indicators\nclass Sources:\n def __init__(self):\n self.data = []\n\n#helper to check for loading speed entries in the sources table\n def getSpeed(hash):\n db = DB()\n rows = DB_Sources.getSpeed(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def getSourcesURLs(hash):\n db = DB()\n rows = DB_Sources.getSourcesURLs(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def resetSources():\n db = DB()\n DB_Sources.resetSources(db.cursor)\n db.DBDisconnect()\n\n def deleteSources(query_id):\n pass\n" }, { "alpha_fraction": 0.5592960715293884, "alphanum_fraction": 0.5768936276435852, "avg_line_length": 20.42622947692871, "blob_id": "e98007c331b74d551bddfee38787f3c38bcf0083", "content_id": "b744b4290e974c398617accd2fbdd6ef8aea1c53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1308, "license_type": "permissive", "max_line_length": 92, "num_lines": 61, "path": "/apps/scraper/save_sources.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n#tool libs\n\n\nwith open('../../config/global_vars.ini', 'r') as f:\n array = json.load(f)\n\n\ntoday = date.today()\n\n\ndef save_content(url, hash, main):\n\n if(not Results.getSource(hash)):\n Results.insertSource(hash, \"0\", \"0\", \"0\", today, 0)\n\n try:\n source = Results.saveResult(url)\n\n if source == 'error':\n Results.updateSources(hash, \"-1\", \"-1\", \"-1\", date.today(), 1)\n else:\n content = Results.getContent(source, main)\n Results.updateSources(hash, content[0], content[1], content[2], date.today(), 1)\n\n except:\n Results.updateSources(hash, \"-1\", \"-1\", \"-1\", date.today(), 1)\n pass\n\n\ndef get_results():\n results = Results.getResultsSourcesNULL()\n return results\n\n\ndef insert_sources(results):\n counter = 0\n for result in results:\n counter = counter + 1\n\n hash = result[0]\n main_hash = result[1]\n url = result[2]\n main = result[3]\n #print(hash)\n #noch datumsprüfung umsetzen....\n if(not Results.getRecentSource(hash)):\n print(url)\n print(hash)\n save_content(url, hash, main)\n\nresults = get_results()\nprint(results)\n\n\ninsert_sources(results)\n" }, { "alpha_fraction": 0.6568858623504639, "alphanum_fraction": 0.6573141813278198, "avg_line_length": 39.599998474121094, "blob_id": "fc55b78c2328784675948f292b3525b256682ebf", "content_id": "c27d231cf90e644cdddf721c39a58329ab05c253", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4669, "license_type": "permissive", "max_line_length": 257, "num_lines": 115, "path": "/db/scrapers.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#Class for scrapers table\nclass Scrapers:\n def __init__(self, cursor):\n self.cursor = cursor\n\n#read from db\n\n#read all scaping jobs by progress\n def getScrapingJobsByProgress(cursor, progress):\n sql= \"SELECT * from scrapers WHERE scrapers_progress=%s ORDER BY RANDOM()\"\n data = (progress)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#read all scaping jobs by progress\n def getScrapingJobsByProgressSE(cursor, progress, se):\n sql= \"SELECT * from scrapers WHERE scrapers_progress=%s AND scrapers_se =%s ORDER BY RANDOM()\"\n data = (progress, se)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n#read scaping jobs by progress and query\n def getScrapingJobsByQueryProgress(cursor, query_id, progress):\n sql= \"SELECT * FROM scrapers WHERE scrapers_queries_id = %s AND scrapers_progress = %s ORDER BY scrapers_start, scrapers_se ASC\"\n data = (query_id, progress)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n\n def getScrapingJobsByQueryProgressSE(cursor, query_id, progress, se):\n sql= \"SELECT * FROM scrapers WHERE scrapers_queries_id = %s AND scrapers_progress = %s AND scrapers_se = %s ORDER BY scrapers_start, scrapers_se\"\n data = (query_id, progress, se)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n#read scraping jobs by query\n def getScrapingJobsByQuery(cursor, query_id):\n sql= \"SELECT * FROM scrapers WHERE scrapers_queries_id = %s ORDER BY scrapers_id\"\n data = (query_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#read scraping jobs by Search Engine\n def getScrapingJobsBySE(cursor, query_id, search_engine):\n sql= \"SELECT count(scrapers_id) FROM scrapers WHERE scrapers_queries_id = %s AND scrapers_se =%s\"\n data = (query_id, search_engine)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getScrapingJobsByStudyQueries(cursor, study):\n sql= \"SELECT count(distinct(scrapers_queries_id)) from scrapers, queries WHERE scrapers_queries_id = queries_id AND scrapers_studies_id=%s\"\n data = (study)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#write to db\n\n#generate scraping joby by queries\n def insertScrapingJobs(cursor, query_id, study_id, query_string, search_engine, start, today):\n cursor.execute(\n \"INSERT INTO scrapers (scrapers_queries_id, scrapers_studies_id, scrapers_queries_query, scrapers_se, scrapers_start, scrapers_date, scrapers_progress) VALUES (%s, %s, %s, %s, %s, %s, %s);\", # remove parenthesis here, which ends the execute call\n (query_id, study_id, query_string, search_engine, start, today, 0)\n )\n\n#update status of scraping job\n def updateScrapingJob(cursor, job_id, progress):\n cursor.execute(\n \"UPDATE scrapers SET scrapers_progress = %s WHERE scrapers_id = %s\",\n (progress, job_id)\n )\n\n#update status of scraping job by query; important for queries with a limited range of search results\n def updateScrapingJobQuery(cursor, query_id, progress):\n cursor.execute(\n \"UPDATE scrapers SET scrapers_progress = %s WHERE scrapers_queries_id = %s\",\n (progress, query_id)\n )\n\n#update status of scraping job by query; important for queries with a limited range of search results\n def updateScrapingJobQuerySeJobId(cursor, query_id, progress, se, job_id):\n cursor.execute(\n \"UPDATE scrapers SET scrapers_progress = %s WHERE scrapers_queries_id = %s AND scrapers_se = %s AND scrapers_id >= %s\",\n (progress, query_id, se, job_id)\n )\n\n\n#update scraping job by query and search engine\n def updateScrapingJobQuerySearchEngine(cursor, query_id, search_engine, progress):\n cursor.execute(\n \"UPDATE scrapers SET scrapers_progress = %s WHERE scrapers_queries_id = %s AND scrapers_se =%s\",\n (progress, query_id, search_engine)\n )\n\n#reset scraper_jobs\n\n def resetScrapingJobs(cursor):\n\n cursor.execute(\n \"DELETE FROM scrapers WHERE scrapers_progress = -1\"\n )\n\n\n def getScrapingJobs(cursor, query_id, study_id, search_engine):\n sql= \"SELECT scrapers_id FROM scrapers WHERE scrapers_queries_id = %s AND scrapers_studies_id =%s AND scrapers_se = %s\"\n data = (query_id, study_id, search_engine)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n" }, { "alpha_fraction": 0.576301634311676, "alphanum_fraction": 0.5942549109458923, "avg_line_length": 15.878787994384766, "blob_id": "3e63fe81aa48b3706543a1b9989f7c84e0e07d4f", "content_id": "0102be2decc97bce3aaca681bf1810d79917d456", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 557, "license_type": "permissive", "max_line_length": 49, "num_lines": 33, "path": "/apps/indicators/nofollow.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check nofollow links\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef nofollow(hash, tree):\n\n xpath = '//a[@rel=\"nofollow\"]'\n module = 'check nofollow'\n counter = 0\n\n res1 = tree.xpath(xpath)\n\n for r in res1:\n counter = counter + 1\n\n\n xpath_2 = '/meta[@name=\"robots\"]/@content'\n\n res2 = tree.xpath(xpath_2)\n\n for r in res2:\n if r == 'nofollow':\n counter = counter + 1\n\n value = str(counter)\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.6945606470108032, "alphanum_fraction": 0.7112970948219299, "avg_line_length": 17.384614944458008, "blob_id": "cfe955868255014b23d5e3c2a959c3de56bb4802", "content_id": "066f0219e4f3d8074971404dee490e8beeac521b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "permissive", "max_line_length": 52, "num_lines": 13, "path": "/apps/main/proc_speed.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to measure loading speed of a webpage\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef speed():\n call([\"python3\", \"job_speed.py\"])\n\nprocess1 = threading.Thread(target=speed)\n\nprocess1.start()\n" }, { "alpha_fraction": 0.5384615659713745, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 17.200000762939453, "blob_id": "77ef2ccbe1983c42096d0614b2de79be12498bf1", "content_id": "d8fd446403ddc8a6be2474f76f1c95a6dfd31b47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 91, "license_type": "permissive", "max_line_length": 48, "num_lines": 5, "path": "/config/global_vars.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n \"_comment\": \"config file for all global_vars\",\n \"update\": \"0\",\n \"indicators\": \"55\"\n}\n" }, { "alpha_fraction": 0.631130039691925, "alphanum_fraction": 0.631130039691925, "avg_line_length": 54.17647171020508, "blob_id": "f737ce97b4178bc2cb80b900c10956d0e6984c91", "content_id": "3d72bf68d6edf0492d0780e0bb0ef4408a9e6db5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 938, "license_type": "permissive", "max_line_length": 178, "num_lines": 17, "path": "/config/kw.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n \"_comment\": \"config file for all evaluation modules; text-match: simple text match objects using csv files with keywords; crawler: crawlers to save specific urls of a website\",\n \"keywords\":{\n \"check kw_in_title\":\"//title/text()\",\n \"check kw_in_title-og\":\"//meta[@property='og:title']/@content\",\n \"check kw_in_title-meta\":\"//meta[@name='title']/@content\",\n \"check kw_in_meta-content\":\"//meta/@content\",\n \"check kw_in_meta-properties\":\"//meta/@property\",\n \"check kw_in_meta-description\":\"//meta[@name='description']/@content\",\n \"check kw_in_meta-og\":\"//meta[@property='og:description']/@content\",\n \"check kw_in_source\":\"//body//*/text()\",\n \"check kw_in_link-text\":\"//a/text()\",\n \"check kw_in_href\":\"//a/@href\",\n \"check kw_in_description-og-property\":\"//meta[@property='og:description']/@content\",\n \"check kw_in_description-og-name\":\"//meta[@name='og:description']/@content\"\n }\n}\n" }, { "alpha_fraction": 0.7157894968986511, "alphanum_fraction": 0.7263157963752747, "avg_line_length": 14.833333015441895, "blob_id": "c9557e5defdaccacfa93beeb5c14661b63164970", "content_id": "d1018954153dcf615d5900f34057a712674f90b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "permissive", "max_line_length": 24, "num_lines": 6, "path": "/apps/scraper/gen_proxies.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\nScrapers.genProxies()\n" }, { "alpha_fraction": 0.698924720287323, "alphanum_fraction": 0.7188940048217773, "avg_line_length": 22.25, "blob_id": "1bb4486dcb771e13f5875c9f024eeb2d40a1ef40", "content_id": "c934c6f917f8d18513144569e61a6ad4e232777a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 651, "license_type": "permissive", "max_line_length": 56, "num_lines": 28, "path": "/apps/main/proc_scraper.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sub processes to scrape using the normal Google scraper\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef save_sources():\n call([\"python3\", \"job_save_sources.py\"])\n\ndef scraper():\n call([\"python3\", \"job_scraper.py\"])\n\ndef reset_scraper():\n call([\"python3\", \"job_reset_scraper.py\"])\n\ndef reset_sources():\n call([\"python3\", \"job_reset_sources.py\"])\n\nprocess1 = threading.Thread(target=scraper)\nprocess2 = threading.Thread(target=save_sources)\nprocess3 = threading.Thread(target=reset_scraper)\nprocess4 = threading.Thread(target=reset_sources)\n\nprocess1.start()\nprocess2.start()\nprocess3.start()\nprocess4.start()\n" }, { "alpha_fraction": 0.5979053378105164, "alphanum_fraction": 0.5998324155807495, "avg_line_length": 30.161880493164062, "blob_id": "99b873545a2104169019e9b013e5fa17c98a3237", "content_id": "ec7c83f1979301258348c91dacd9df190694378c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11935, "license_type": "permissive", "max_line_length": 194, "num_lines": 383, "path": "/libs/results.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport os.path\nimport json\n\n#scraping libs\nfrom urllib.parse import urlsplit\nfrom os.path import isfile, join\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.firefox.options import Options\nimport time\n\nfrom lxml import html\nfrom bs4 import BeautifulSoup, Comment\nimport lxml.html\nimport os\n\nfrom urllib.parse import urlsplit\nfrom urllib.parse import urlparse\nimport urllib.parse\nimport socket\n\n#tool libs\nsys.path.insert(0, '..')\nfrom db.connect import DB\n\nsys.path.insert(0, '..')\nfrom db.results import Results as DB_Results\n\nfrom libs.helpers import Helpers\n\n# class for results functions; mainly to read and write database content but also to save the source code of URLs\nclass Results:\n\n def __init__(self, cursor):\n self.cursor = cursor\n\n def saveResult(url):\n\n os.environ['MOZ_HEADLESS'] = '0'\n options = Options()\n #options.add_argument('--ignore-certificate-errors-spki-list')\n #options.add_argument('--ignore-ssl-errors')\n #options.add_argument('--ignore-certificate-errors')\n #options.add_argument('--allow-insecure-localhost')\n options.add_argument(\"user-data-dir=selenium\")\n options.log.level = 'error'\n\n profile = webdriver.FirefoxProfile()\n\n profile.set_preference(\"browser.safebrowsing.blockedURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.downloads.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.forbiddenURIs.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.malware.enabled\", True)\n profile.set_preference(\"browser.safebrowsing.phishing.enabled\", True)\n profile.set_preference(\"dom.webnotifications.enabled\", False);\n\n profile.add_extension(extension='/home/sebastian/alpha/extensions/i_dont_care_about_cookies-3.2.7-an+fx.xpi')\n\n driver = webdriver.Firefox(firefox_profile=profile, options=options)\n\n driver.set_page_load_timeout(60)\n\n\n try:\n driver.get(url)\n time.sleep(10)\n source = driver.page_source\n\n\n except:\n source = \"error\"\n\n driver.quit()\n\n source = Helpers.changeCoding(source)\n\n return source\n\n def getResultMeta(url, study_id, search_engine, query_id):\n meta = []\n study_id = str(study_id)\n query_id = str(query_id)\n compute_hash = url+study_id+search_engine+query_id\n hash = Helpers.computeMD5hash(compute_hash)\n try:\n parsed_uri = urlparse(url)\n #o = urllib.parse.urlsplit(url)\n #hostname = o.hostname\n hostname = '{uri.netloc}'.format(uri=parsed_uri)\n ip = socket.gethostbyname(hostname)\n except:\n ip = \"-1\"\n main = '{0.scheme}://{0.netloc}/'.format(urlsplit(url))\n meta = [hash, ip, main]\n return meta\n\n def getContent(source, main):\n content = []\n comments = \"\"\n urls = \"\"\n\n\n\n if (source !=\"error\"):\n #extract all comments in source code\n soup = BeautifulSoup(source, 'lxml')\n comments_bs4 = soup.findAll(text=lambda text:isinstance(text, Comment))\n\n\n for c in comments_bs4:\n c = Helpers.html_escape(c)\n if c and c != \" \":\n comments = comments+'[comment_source]'+c\n\n\n soup_urls = []\n tags = soup.find_all('a')\n\n for tag in tags:\n link_text = str(tag.string).strip()\n href = str(tag.get('href')).strip()\n if \"http\" not in href:\n href = href.lstrip('/')\n href = main+href\n\n link = \"[url]\"+link_text+\" \"+href\n if not Helpers.matchText(link, '*mailto:*'):\n link = Helpers.html_escape(link)\n if link and link != \" \":\n urls = urls+link\n\n content = [Helpers.html_escape(str(source)), urls, comments]\n return content\n\n\n def getContactUrl(urls, main):\n\n with open('../config/evaluation.ini', 'r') as f:\n array = json.load(f)\n\n config_file = array[\"crawler\"][\"contact\"][\"config\"]\n\n with open(config_file, 'r') as f:\n array = json.load(f)\n\n keywords = array[\"keywords\"]\n\n contact_urls = []\n\n urls = urls.split('[url]')\n\n for keyword in keywords:\n\n for url in urls:\n\n if(url):\n href = url.split(\" \")\n if len(href) > 1:\n link = href[1]\n anchor = href[0]\n pattern = \"*\"+keyword+\"*\"\n if Helpers.matchText(link, pattern) or Helpers.matchText(anchor, pattern):\n contact_urls.append([link, keyword])\n\n contact_urls = Helpers.remove_duplicates_from_list(contact_urls)\n\n if(contact_urls):\n check = contact_urls[0][0]\n\n if Helpers.matchText(check, \"*http*//*\"):\n contact_url = check\n else:\n\n if Helpers.matchText(check, \"*..*\"):\n check = check.replace('..', '')\n check = \"\".join(check.split())\n contact_url = main+check\n\n else:\n contact_url = \"-1\"\n\n return contact_url\n\n def getRecentSource(hash):\n db = DB()\n rows = DB_Results.getRecentSource(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def getResultsSource(hash):\n db = DB()\n rows = DB_Results.getResultsSource(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def insertSource(hash, source, urls, comments, today, progress):\n db = DB()\n DB_Results.insertSource(db.cursor, hash, source, urls, comments, today, progress)\n db.DBDisconnect()\n\n def updateSources(hash, source, urls, comments, today, progress):\n db = DB()\n DB_Results.updateSources(db.cursor, hash, source, urls, comments, today, progress)\n db.DBDisconnect()\n\n def insertSpeed(hash, speed):\n db = DB()\n DB_Results.insertSpeed(db.cursor, hash, speed)\n db.DBDisconnect()\n\n def getSpeed(hash):\n db = DB()\n rows = DB_Results.getSpeed(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def getResultsSourcesNULL():\n db = DB()\n rows = DB_Results.getResultsSourcesNULL(db.cursor)\n db.DBDisconnect()\n return rows\n\n def insertResult(query_id, study_id, job_id, upload, ip, hash, main_hash, contact_hash, search_engine, url, main, contact, today, timestamp, progress, results_position):\n db = DB()\n DB_Results.insertResult(db.cursor, query_id, study_id, job_id, upload, ip, hash, main_hash, contact_hash, search_engine, url, main, contact, today, timestamp, progress, results_position)\n db.DBDisconnect()\n\n def getAllResultsIdsByStudy(results_studies_id):\n db = DB()\n rows = DB_Results.getAllResultsIdsByStudy(db.cursor, results_studies_id)\n db.DBDisconnect()\n return rows\n\n def getResultsIdsByStudyContact(results_studies_id, results_contact):\n db = DB()\n rows = DB_Results.getResultsIdsByStudyContact(db.cursor, results_studies_id, results_contact)\n db.DBDisconnect()\n return rows\n\n def getResultById(results_id):\n db = DB()\n rows = DB_Results.getResultById(db.cursor, results_id)\n db.DBDisconnect()\n return rows\n\n def getResultByHash(hash):\n db = DB()\n rows = DB_Results.getResultByHash(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def insertContactResult(contact_url, contact_hash, results_id):\n db = DB()\n DB_Results.insertContactResult(db.cursor, contact_url, contact_hash, results_id)\n db.DBDisconnect()\n\n\n def updateContactProgress(results_contact, results_id):\n db = DB()\n DB_Results.updateContactProgress(db.cursor, results_contact, results_id)\n db.DBDisconnect()\n\n\n def getRecentResultByHash(hash):\n db = DB()\n rows = DB_Results.getRecentResultByHash(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def getResults():\n db = DB()\n rows = DB_Results.getResults(db.cursor)\n db.DBDisconnect()\n return rows\n\n\n def getSourcesSpeedNULL():\n db = DB()\n rows = DB_Results.getSourcesSpeedNULL(db.cursor)\n db.DBDisconnect()\n return rows\n\n def getLastPosition(query_id, study_id, results_se, today):\n db = DB()\n rows = DB_Results.getLastPosition(db.cursor, query_id, study_id, results_se, today)\n db.DBDisconnect()\n return rows\n\n def countResultsbyStudy(studies_id):\n db = DB()\n rows = DB_Results.countResultsbyStudy(db.cursor, studies_id)\n db.DBDisconnect()\n return rows\n\n def countResultsbyStudySE(studies_id, se):\n db = DB()\n rows = DB_Results.countResultsbyStudySE(db.cursor, studies_id, se)\n db.DBDisconnect()\n return rows\n\n def countResultsQuery(results_queries_id):\n db = DB()\n rows = DB_Results.countResultsQuery(db.cursor, results_queries_id)\n db.DBDisconnect()\n return rows\n\n def countClassifiedResultsbyQuery(results_queries_id):\n db = DB()\n rows = DB_Results.countClassifiedResultsbyQuery(db.cursor, results_queries_id)\n db.DBDisconnect()\n return rows\n\n def countClassifiedResultsbyStudy(studies_id):\n db = DB()\n rows = DB_Results.countClassifiedResultsbyStudy(db.cursor, studies_id)\n db.DBDisconnect()\n return rows\n\n def countClassifiedResultsbyStudySE(studies_id, se):\n db = DB()\n rows = DB_Results.countClassifiedResultsbyStudySE(db.cursor, studies_id, se)\n db.DBDisconnect()\n return rows\n\n def countFailedResultsbyStudy(studies_id):\n db = DB()\n rows = DB_Results.countFailedResultsbyStudy(db.cursor, studies_id)\n db.DBDisconnect()\n return rows\n\n def getPosition(query_id, study_id, search_engine, results_position):\n db = DB()\n rows = DB_Results.getPosition(db.cursor, query_id, study_id, search_engine, results_position)\n db.DBDisconnect()\n return rows\n\n def getResultHashesOnMain(main_hash):\n db = DB()\n rows = DB_Results.getResultHashesOnMain(db.cursor, main_hash)\n db.DBDisconnect()\n return rows\n\n def getSource(hash):\n db = DB()\n rows = DB_Results.getSource(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def getURL(query_id, study_id, results_url, results_se):\n db = DB()\n rows = DB_Results.getURL(db.cursor, query_id, study_id, results_url, results_se)\n db.DBDisconnect()\n return rows\n\n def getSERP(query_id):\n db = DB()\n DB_Results.getSERP(db.cursor, query_id)\n db.DBDisconnect()\n\n def insertSERP(query_id, serp, serp_scraper, today):\n db = DB()\n DB_Results.insertSERP(db.cursor, query_id, serp, serp_scraper, today)\n db.DBDisconnect()\n\n\n def getLastPosition(query_id, study_id, search_engine, results_position):\n db = DB()\n rows = DB_Results.getLastPosition(db.cursor, query_id, study_id, search_engine, results_position)\n db.DBDisconnect()\n return rows\n\n def deleteResults(queries_id, results_se):\n db = DB()\n DB_Results.deleteResults(db.cursor, queries_id, results_se)\n db.DBDisconnect()\n\n def deleteResultsNoScrapers(queries_id, results_se):\n db = DB()\n DB_Results.deleteResultsNoScrapers(db.cursor, queries_id, results_se)\n db.DBDisconnect()\n" }, { "alpha_fraction": 0.5522967576980591, "alphanum_fraction": 0.5591977834701538, "avg_line_length": 23.15104103088379, "blob_id": "a7e01730e7adf6634fd2df37d6d9893eb5590785", "content_id": "95d20963b3f110ed4fc204a0c88a705854b58692", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4637, "license_type": "permissive", "max_line_length": 75, "num_lines": 192, "path": "/apps/indicators/indicators.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#main app to collect technical seo indicators from a webpage\n\n#include libs\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\n#apps for seo indicators\nfrom https import https\n\nfrom micro import micros\n\nfrom og import og\n\nfrom viewport import viewport\n\nfrom sitemap import sitemap\n\nfrom wordpress import wordpress\n\nfrom canonical import canonical\n\nfrom nofollow import nofollow\n\nfrom h1 import h1\n\nfrom keywords import kw\n\nfrom kw_in_url import kw_in_url\n\nfrom description import description\n\nfrom title import title\n\nfrom title_h1 import title_h1\n\nfrom links import links\n\nfrom keyword_density import keyword_density\n\nfrom plugins import plugins\n\nfrom sources import sources\n\nfrom robots import robots\n\nfrom url_length import url_length\n\nfrom identical_title import identical_title\n\ntry:\n\n def get_result_hashes():\n if update == \"0\":\n hashes = Evaluations.getResultHashesNoUpdate(number_indicators)\n else:\n hashes = Evaluations.getResultHashes()\n return hashes\n\n def get_result_source(hash):\n try:\n source = Results.getResultsSource(hash)\n html_source = Helpers.html_unescape(source[0][1])\n soup = BeautifulSoup(html_source, 'lxml')\n html_source = soup.get_text().strip()\n html_source = html_source.split('\\n')\n html_source = set(html_source)\n html_source = list(html_source)\n html_comments = Helpers.html_unescape(source[0][2])\n html_comments = html_comments.split(\"[comment_source]\")\n html_comments = set(html_comments)\n html_comments = list(html_comments)\n code = Helpers.html_unescape(source[0][0])\n code = code.lower()\n tree = html.fromstring(code)\n source_array = [code, tree, html_source, html_comments, soup]\n return source_array\n except:\n return False\n\n def get_result_meta(hash):\n meta = Results.getRecentResultByHash(hash)\n return meta\n\n def get_result_query(meta):\n results_id = meta[0][-1]\n query_row = Queries.getQuerybyResult(results_id)\n try:\n query = query_row[0][0]\n query = query.lower()\n return query\n except:\n return False\n\n #create url list\n hashes = get_result_hashes()\n random.shuffle(hashes)\n\n #analyze every url from list\n for h in hashes:\n print(h)\n hash = h\n\n\n result_source = get_result_source(hash)\n\n if result_source:\n\n code = result_source[0]\n tree = result_source[1]\n html_source = result_source[2]\n html_comments = result_source[3]\n soup = result_source[4]\n\n #call functions\n meta = get_result_meta(hash)\n for m in meta:\n result_id = m[0]\n main_hash = m[6]\n result_url = m[10]\n result_main = m[11]\n\n query = get_result_query(meta)\n\n if query:\n check_query = True\n else:\n query = \"-1\"\n check_query = False\n\n #print('canonical')\n canonical(hash, tree)\n\n #print('kw')\n kw(hash,tree, query, check_query)\n kw_in_url(hash,result_url, query, check_query)\n keyword_density(hash,query, soup, check_query)\n\n #print('title_h1')\n title_h1(hash,tree)\n\n #print('viewport')\n viewport(hash,code)\n\n #print('description')\n description(hash,tree)\n\n #print('title')\n title(hash,tree)\n\n #print('links')\n links(hash,result_main, html_source)\n\n #print('plugins')\n plugins(hash,html_source, html_comments)\n\n #print('https')\n https(result_url, hash)\n\n #print('micros')\n micros(hash,html_comments, html_source)\n\n #print('og')\n og(hash,code)\n\n #print('sitemap')\n sitemap(hash,code)\n\n #print('wordpress')\n wordpress(hash,tree)\n\n #print('nofollow')\n nofollow(hash,tree)\n\n #print('h1')\n h1(hash,tree)\n\n #print('sources')\n sources(hash, result_url, result_main)\n\n #print('robots')\n robots(hash, result_main, main_hash)\n\n #print('url_length')\n url_length(hash, result_url)\n\n #print('identical_title')\n identical_title(hash, result_main)\n\n\nexcept Exception as e:\n print(e)\n" }, { "alpha_fraction": 0.7168111801147461, "alphanum_fraction": 0.721878170967102, "avg_line_length": 54.16149139404297, "blob_id": "5013e51754d4daaa1e3a2ed49665fde754953fb8", "content_id": "9f97681756fa8841a29ae53a8aded3124e84f2fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8881, "license_type": "permissive", "max_line_length": 390, "num_lines": 161, "path": "/db/evaluations.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#class for evaluations table\n\nclass Evaluations:\n def __init__(self, cursor):\n self.cursor = cursor\n\n\n#read from db\n\n\n#check existing module entries in the evaluations table\n def getEvaluationModule(cursor, hash, evaluations_module):\n sql= \"SELECT evaluations_module from evaluations WHERE evaluations_results_hash=%s AND evaluations_module=%s LIMIT 1\"\n data = (hash, evaluations_module)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getEvaluationModuleResult(cursor, hash, evaluations_module, evaluations_result):\n #sql= \"SELECT evaluations_module from evaluations WHERE evaluations_results_hash=%s AND evaluations_module=%s AND evaluations_result=%s AND evaluations_date < NOW() - INTERVAL %s day LIMIT 1\"\n sql= \"SELECT evaluations_module from evaluations WHERE evaluations_results_hash=%s AND evaluations_module=%s AND evaluations_result=%s LIMIT 1\"\n #data = (hash, evaluations_module, evaluations_result, days)\n data = (hash, evaluations_module, evaluations_result)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getResultHashes(cursor):\n sql = \"SELECT distinct(results_hash) FROM results TABLESAMPLE SYSTEM_ROWS(100000) JOIN sources ON results_hash = sources_hash LEFT JOIN evaluations ON results_hash = evaluations_results_hash WHERE sources_source IS NOT NULL and sources_source != '-1'\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n#read hashes with less than 53 evaluation results to select the unprocessed results and to check for missing values\n def getResultHashesNoUpdate(cursor, indicators):\n sql = \"SELECT results_hash FROM results TABLESAMPLE SYSTEM_ROWS(2000) JOIN sources ON results_hash = sources_hash LEFT JOIN evaluations ON results_hash = evaluations_results_hash WHERE sources_source IS NOT NULL and sources_source != '-1' GROUP BY 1 HAVING COUNT(evaluations_module) < %s\"\n data = (indicators)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getResultwithIndicators(cursor, hash):\n sql = \"SELECT distinct(results_hash), evaluations.* FROM results, evaluations, sources WHERE results_hash = evaluations_results_hash AND sources_hash = evaluations_results_hash AND results_hash = %s AND sources_speed IS NOT NULL\"\n data = (hash)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#read hashes with indicators and pagespeed\n def getResultstoClassify(cursor, indicators):\n\n #sql = \"SELECT results_hash, results_url, results_main, sources_speed FROM results left join classifications on results_hash = classifications_hash JOIN sources ON sources_hash = results_hash JOIN evaluations ON results_hash = evaluations_results_hash WHERE classifications_hash IS NULL AND sources_speed IS NOT NULL GROUP BY 1,2,3,4 HAVING COUNT(evaluations_module) >= %s\"\n\n sql = \"SELECT results_hash, results_url, results_main, sources_speed FROM results left join classifications on results_hash = classifications_hash JOIN sources ON sources_hash = results_hash JOIN evaluations ON results_hash = evaluations_results_hash WHERE classifications_hash IS NULL AND sources_speed IS NOT NULL GROUP BY 1,2,3,4 HAVING COUNT(DISTINCT(evaluations_module)) >= %s\"\n data = (indicators)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getResultstoClassifyCheck(cursor):\n\n sql=\"select classifications_hash, string_agg(classifications_classification, ',') FROM classifications WHERE classifications_result != 'unassigned' GROUP BY 1 HAVING COUNT(classifications_classification) = 1 ORDER BY RANDOM() LIMIT 200\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n def getResultstoUpdateClassification(cursor, classifier_id, classifier_result):\n\n #sql = \"SELECT results_hash, results_url, results_main, sources_speed FROM results left join classifications on results_hash = classifications_hash JOIN sources ON sources_hash = results_hash JOIN evaluations ON results_hash = evaluations_results_hash WHERE classifications_hash IS NULL AND sources_speed IS NOT NULL GROUP BY 1,2,3,4 HAVING COUNT(evaluations_module) >= %s\"\n\n sql = \"SELECT classifications_hash, results_url, results_main, sources_speed FROM results join classifications on results_hash = classifications_hash JOIN sources ON sources_hash = results_hash WHERE classifications_classification = %s AND classifications_result = %s GROUP BY 1,2,3,4\"\n data = (classifier_id, classifier_result)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getUnassigned(cursor):\n\n sql = \"SELECT classifications_id FROM classifications WHERE classifications_result = 'unassigned' LIMIT 1\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n\n def getEvaluationsResults(cursor, hash):\n sql = \"SELECT evaluations_module, evaluations_result FROM evaluations WHERE evaluations_results_hash = %s\"\n data = (hash)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getEvaluationModules(cursor):\n sql = \"SELECT DISTINCT(evaluations_module) FROM evaluations\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows\n\n\n#write to db\n\n\n#insert indicators, results of url categories and plugins evaluations to the table\n def insertEvaluationResult(cursor, evaluations_results_hash, evaluations_module, evaluations_result, evaluations_date):\n cursor.execute(\"INSERT INTO evaluations VALUES(%s,%s,%s, %s) ON CONFLICT DO NOTHING;\", (evaluations_results_hash, evaluations_module, evaluations_result, evaluations_date,))\n\n#update evaulation results\n def UpdateEvaluationResult(cursor, value, date, hash, module):\n cursor.execute(\n \"UPDATE evaluations SET evaluations_result= %s, evaluations_date = %s WHERE evaluations_results_hash = %s and evaluations_module = %s\",\n (value, date, hash, module)\n )\n\n def insertClassificationResult(cursor, hash, result, classifications_classification, today):\n cursor.execute(\"INSERT INTO classifications (classifications_hash, classifications_result, classifications_classification, classifications_date) VALUES(%s,%s,%s,%s) ON CONFLICT DO NOTHING;\", (hash, result, classifications_classification, today,))\n\n\n def updateClassificationResult(cursor, hash, result, classifications_classification, today):\n cursor.execute(\n \"UPDATE classifications SET classifications_result= %s, classifications_date = %s WHERE classifications_hash = %s and classifications_classification = %s\",\n (result, today, hash, classifications_classification)\n )\n\n def getClassificationResult(cursor, hash, classifications_classification):\n sql = \"SELECT classifications_result FROM classifications WHERE classifications_hash = %s AND classifications_classification = %s LIMIT 1\"\n data = (hash, classifications_classification)\n cursor.execute(sql,data)\n rows = cursor.fetchall()\n return rows\n\n def getClassificationResultValue(cursor, hash, classifications_classification, classifier_result):\n sql = \"SELECT classifications_result FROM classifications WHERE classifications_hash = %s AND classifications_classification = %s AND classifications_result = %s LIMIT 1\"\n data = (hash, classifications_classification, classifier_result)\n cursor.execute(sql,data)\n rows = cursor.fetchall()\n return rows\n\n#delete from db\n\n#remove duplicates\n def deleteDuplicates(cursor):\n sql = \"DELETE FROM evaluations WHERE evaluations_id IN (SELECT evaluations_id FROM(SELECT evaluations_id,ROW_NUMBER() OVER( PARTITION BY evaluations_results_hash,evaluations_module ORDER BY evaluations_id) AS row_num FROM evaluations) t WHERE t.row_num > 1 );\"\n cursor.execute(sql)\n\n\n def deleteDupClassifiedData(cursor):\n sql = \"DELETE FROM classifications WHERE classifications_id IN (SELECT classifications_id FROM(SELECT classifications_id,ROW_NUMBER() OVER( PARTITION BY classifications_hash,classifications_classification ORDER BY classifications_id) AS row_num FROM classifications) t WHERE t.row_num > 1 )\"\n cursor.execute(sql)\n\n def getEvaluationsDate(cursor, hash, module):\n sql= \"SELECT evaluations_date from evaluations WHERE evaluations_results_hash=%s AND evaluations_module=%s LIMIT 1\"\n data = (hash, module)\n cursor.execute(sql,data)\n rows = cursor.fetchall()\n return rows\n\n\n def deleteEvaluations(cursor, queries_id):\n pass\n\n def deleteClassifications(cursor, queries_id):\n pass\n" }, { "alpha_fraction": 0.596308171749115, "alphanum_fraction": 0.6035313010215759, "avg_line_length": 26.688888549804688, "blob_id": "c8f6989c3297df6370070fcf0f26e034d4528f46", "content_id": "da43353731233f95cfd539812938c153c7ff3511", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2492, "license_type": "permissive", "max_line_length": 78, "num_lines": 90, "path": "/apps/indicators/title_h1.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script to check title tags in headers\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef title_h1(hash, tree):\n\n xpath_title = \"//title/text()\"\n xpath_meta_title = \"//meta[@name='title']/@content\"\n xpath_og_title = \"//meta[@property='og:title']/@content\"\n xpath_h1 = \"//h1/text()\"\n module = 'check title'\n value = '0'\n\n title = tree.xpath(xpath_title)\n check_title = str(title)\n check_title = check_title.lower()\n check_title = check_title.strip()\n check_title = Helpers.html_escape(check_title)\n\n meta_title = tree.xpath(xpath_meta_title)\n check_meta_title = str(meta_title)\n check_meta_title = check_meta_title.lower()\n check_meta_title = check_meta_title.strip()\n check_meta_title = Helpers.html_escape(check_meta_title)\n\n og_title = tree.xpath(xpath_og_title)\n check_og_title = str(og_title)\n check_og_title = check_og_title.lower()\n check_og_title = check_og_title.strip()\n check_og_title = Helpers.html_escape(check_og_title)\n\n h1 = tree.xpath(xpath_h1)\n\n ct_value_counter = 0\n ct_value = '0'\n ct_module = 'check title_h1_identical'\n\n\n\n check_evaluations_result(hash, ct_module, ct_value)\n\n cth_value_counter = 0\n cth_value = '0'\n\n cth_module = 'check title_h1_match'\n\n\n check_evaluations_result(hash, cth_module, cth_value)\n\n for h in h1:\n h = str(h)\n h = h.lower()\n h = h.strip()\n h = Helpers.html_escape(h)\n check_title = re.sub('\\W+',' ', check_title)\n check_meta_title = re.sub('\\W+',' ', check_meta_title)\n check_og_title = re.sub('\\W+',' ', check_og_title)\n h = re.sub('\\W+',' ', h)\n\n if check_title == h or check_meta_title == h or check_og_title == h:\n ct_value_counter = ct_value_counter + 1\n\n ct_value = str(ct_value_counter)\n\n Evaluations.UpdateEvaluationResult(ct_value, today, hash, ct_module)\n\n\n pattern = '*'+check_title+'*'\n if (Helpers.matchText(h, pattern)):\n cth_value_counter = cth_value_counter + 1\n\n pattern = '*'+check_meta_title+'*'\n if (Helpers.matchText(h, pattern)):\n cth_value_counter = cth_value_counter + 1\n\n pattern = '*'+check_og_title+'*'\n if (Helpers.matchText(h, pattern)):\n cth_value_counter = cth_value_counter + 1\n\n\n cth_value = str(cth_value_counter)\n\n\n Evaluations.UpdateEvaluationResult(cth_value, today, hash, cth_module)\n" }, { "alpha_fraction": 0.6036363840103149, "alphanum_fraction": 0.610909104347229, "avg_line_length": 21.91666603088379, "blob_id": "81a032f21aa8be53bfa695e8080884c8b5925772", "content_id": "b01506801290340b02a232b04486da86b442be99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 825, "license_type": "permissive", "max_line_length": 75, "num_lines": 36, "path": "/db/connect.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#Class to connect to the Database\n\nimport psycopg2\nimport json\nimport os\n\nconfig_path = '/home/alpha/config/'\n\nwith open(config_path+'db.ini', 'r') as f:\n array = json.load(f)\n\ndbname = array['dbname']\nuser = array['user']\nhost = array['host']\npassword = array['password']\n\n\ndb_connection = (\"dbname='\"+dbname+\"' user='\"+user+\"' host='\"+host+\"' \" + \\\n \"password='\"+password+\"' connect_timeout=3000\")\n\n\nclass DB:\n def __init__(self, db_string = db_connection):\n self.__connection = psycopg2.connect(db_string)\n self.__connection.autocommit = True\n self.__cursor = self.__connection.cursor()\n\n def __getCursor(self):\n return self.__cursor\n\n\n def DBDisconnect(self):\n self.__cursor.close()\n self.__connection.close()\n\n cursor = property(__getCursor)\n" }, { "alpha_fraction": 0.5369003415107727, "alphanum_fraction": 0.5398523807525635, "avg_line_length": 27.526315689086914, "blob_id": "dd578c761b4fc12aff67a371e5f97fceaed92786", "content_id": "81e6594a178c1e141675545960e675752cfc8cb7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2710, "license_type": "permissive", "max_line_length": 78, "num_lines": 95, "path": "/libs/helper.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport hashlib\nfrom datetime import date\nfrom datetime import datetime\nimport fnmatch\n\nimport urllib.request\nfrom urllib.error import HTTPError\n\n#class for supportive functions for repetitive tasks\nclass Helpers:\n def __init__(self):\n self.data = []\n\n#compute an unique hash for urls\n def computeMD5hash(string):\n m = hashlib.md5()\n m.update(string.encode('utf-8'))\n return m.hexdigest()\n\n#change coding for html source codes\n def changeCoding(source):\n if type(source) == str:\n source = source.encode('utf-8')\n else:\n source = source.decode('utf-8')\n\n return str(source, 'utf-8', 'ignore')\n\n#function to create log files for scraping tasks\n def saveLog(file_name, content, show):\n log_now = datetime.now()\n log_now = log_now.strftime('%Y-%m-%d_%H%M%S')\n log_path = os.getcwd() + \"//\" + file_name\n with open(log_path,'a+') as f:\n log_now = log_now+'\\n'\n f.write(log_now)\n if(show == 1):\n print(log_now)\n c = content+'\\n'\n f.write(c)\n if(show == 1):\n print(c)\n f.close()\n\n#replace html symbols to prepare the source code to write it into the database\n def html_escape(text):\n html_escape_table = {\n \"&\": \"&amp;\",\n '\"': \"&quot;\",\n \"'\": \"&apos;\",\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n \"#\": \"&hash;\"\n }\n \"\"\"Produce entities within text.\"\"\"\n return \"\".join(html_escape_table.get(c,c) for c in text)\n\n#conver html symbols back\n def html_unescape(text):\n text = text.replace(\"&lt;\", \"<\")\n text = text.replace(\"&gt;\", \">\")\n text = text.replace(\"&quot;\", '\"')\n text = text.replace(\"&apos;\", \"'\")\n text = text.replace(\"&hash;\", \"#\")\n text = text.replace(\"&amp;\", \"&\")\n return text\n\n#text matching function to check for plugins and tools in source code\n def matchText(text, pattern):\n text = text.lower()\n pattern= pattern.lower()\n check = fnmatch.fnmatch(text, pattern)\n return check\n\n#helper to remove duplicates in lists\n def remove_duplicates_from_list(a_list):\n b_set = set(tuple(x) for x in a_list)\n b = [ list(x) for x in b_set ]\n b.sort(key = lambda x: a_list.index(x) )\n return b\n\n#helper to validate urls\n def get_netloc(url):\n parsed = urlparse(url)\n return parsed.netloc\n\n def validate_url(url):\n try:\n urllib.request.urlretrieve(url)\n except:\n return False\n else:\n return True\n" }, { "alpha_fraction": 0.7269076108932495, "alphanum_fraction": 0.7269076108932495, "avg_line_length": 61.25, "blob_id": "5ca7d964025e407b2288ec07232176147118de76", "content_id": "900ec6665d88b497538629c84087b075cf60f56f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 250, "license_type": "permissive", "max_line_length": 178, "num_lines": 4, "path": "/config/contact.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n \"_comment\": \"config file for all evaluation modules; text-match: simple text match objects using csv files with keywords; crawler: crawlers to save specific urls of a website\",\n \"keywords\": [\"impressum\", \"kontakt\", \"über uns\", \"datenschutz\"]\n}\n" }, { "alpha_fraction": 0.6813008189201355, "alphanum_fraction": 0.6821138262748718, "avg_line_length": 31.156862258911133, "blob_id": "4cd20778388d204a252b11530282c17e34c7f2be", "content_id": "c8dfe7ecbab3f7b2c4357bcbcd15b41ae74d9f6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4920, "license_type": "permissive", "max_line_length": 131, "num_lines": 153, "path": "/libs/evaluations.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#sys libs\nimport os, sys\nimport os.path\n\n#tool libs\nsys.path.insert(0, '..')\nfrom db.connect import DB\n\nsys.path.insert(0, '..')\nfrom db.evaluations import Evaluations as DB_Evaluations\n\nfrom libs.helpers import Helpers\n\n# class for evaluations functions; mainly to read and write database content; the library is necessary to process indicators\nclass Evaluations:\n def __init__(self):\n self.data = []\n\n\n#read from db\n\n#helper to check for existing module entries in the evaluations table\n def getEvaluationModule(hash, evaluations_module):\n db = DB()\n rows = DB_Evaluations.getEvaluationModule(db.cursor, hash, evaluations_module)\n db.DBDisconnect()\n return rows\n\n#helper to check for existing module and result entries in the evaluations table\n def getEvaluationModuleResult(hash, evaluations_module, evaluations_result):\n db = DB()\n rows = DB_Evaluations.getEvaluationModuleResult(db.cursor, hash, evaluations_module, evaluations_result)\n db.DBDisconnect()\n return rows\n\n#read hashes with less than 53 evaluation results to select the unprocessed results and to check for missing values\n def getResultHashes():\n db = DB()\n rows = DB_Evaluations.getResultHashes(db.cursor)\n db.DBDisconnect()\n return rows\n\n def getResultHashesNoUpdate(number_indicators):\n db = DB()\n rows = DB_Evaluations.getResultHashesNoUpdate(db.cursor, number_indicators)\n db.DBDisconnect()\n return rows\n\n def getEvaluationsDate(hash, module):\n db = DB()\n rows = DB_Evaluations.getEvaluationsDate(db.cursor, hash, module)\n db.DBDisconnect()\n return rows\n\n\n def getResultstoClassify(indicators):\n db = DB()\n rows = DB_Evaluations.getResultstoClassify(db.cursor, indicators)\n db.DBDisconnect()\n return rows\n\n def getResultstoClassifyCheck():\n db = DB()\n rows = DB_Evaluations.getResultstoClassifyCheck(db.cursor)\n db.DBDisconnect()\n return rows\n\n def getUnassigned():\n db = DB()\n rows = DB_Evaluations.getUnassigned(db.cursor)\n db.DBDisconnect()\n return rows\n\n\n def getResultstoUpdateClassification(classifier_id, classifier_result):\n db = DB()\n rows = DB_Evaluations.getResultstoUpdateClassification(db.cursor, classifier_id, classifier_result)\n db.DBDisconnect()\n return rows\n\n def getResultwithIndicators(hash):\n db = DB()\n rows = DB_Evaluations.getResultwithIndicators(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n def getEvaluationsResults(hash):\n db = DB()\n rows = DB_Evaluations.getEvaluationsResults(db.cursor, hash)\n db.DBDisconnect()\n return rows\n\n\n def getClassificationResult(hash, classifications_classification):\n db = DB()\n rows = DB_Evaluations.getClassificationResult(db.cursor, hash, classifications_classification)\n db.DBDisconnect()\n return rows\n\n def getClassificationResultValue(hash, classifications_classification, classifications_result):\n db = DB()\n rows = DB_Evaluations.getClassificationResultValue(db.cursor, hash, classifications_classification, classifications_result)\n db.DBDisconnect()\n return rows\n\n def getEvaluationModules():\n db = DB()\n rows = DB_Evaluations.getEvaluationModules(db.cursor)\n db.DBDisconnect()\n return rows\n \n#write to db\n\n#insert indicators, results of url categories and plugins evaluations to the table\n def insertEvaluationResult(hash, module, value, today):\n db = DB()\n DB_Evaluations.insertEvaluationResult(db.cursor, hash, module, value, today)\n db.DBDisconnect()\n\n#update evaulation results\n def UpdateEvaluationResult(value, date, hash, module):\n db = DB()\n DB_Evaluations.UpdateEvaluationResult(db.cursor, value, date, hash, module)\n db.DBDisconnect()\n\n def insertClassificationResult(hash, result, classifications_classification, today):\n db = DB()\n DB_Evaluations.insertClassificationResult(db.cursor, hash, result, classifications_classification, today)\n db.DBDisconnect()\n\n def updateClassificationResult(hash, result, classifications_classification, today):\n db = DB()\n DB_Evaluations.updateClassificationResult(db.cursor, hash, result, classifications_classification, today)\n db.DBDisconnect()\n#delete from db\n\n#function to remove duplicates\n def deleteDuplicates():\n db = DB()\n rows = DB_Evaluations.deleteDuplicates(db.cursor)\n db.DBDisconnect()\n\n def deleteDupClassifiedData():\n db = DB()\n rows = DB_Evaluations.deleteDupClassifiedData(db.cursor)\n db.DBDisconnect()\n\n\n def deleteEvaluations(queries_id):\n pass\n\n def deleteClassifications(queries_id):\n pass\n" }, { "alpha_fraction": 0.6544540524482727, "alphanum_fraction": 0.6558908224105835, "avg_line_length": 38.028038024902344, "blob_id": "8775118f0c053f5fbe368d08f66c2de0352365b7", "content_id": "a8ebf68621f9935ada57eda981501fd420ec5d8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4176, "license_type": "permissive", "max_line_length": 194, "num_lines": 107, "path": "/db/queries.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#Class for search queries table\nclass Queries:\n def __init__(self, cursor):\n self.cursor = cursor\n\n\n#read from db\n\n\n\n#function to read all queries of a study\n def getQueriesStudy(cursor, study_id):\n sql= \"SELECT * from queries WHERE queries_studies_id=%s\"\n data = study_id\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def countQueriesStudy(cursor, studies_id):\n sql= \"SELECT count(queries_id) from queries WHERE queries_studies_id=%s\"\n data = studies_id\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getQueriesIdStudy(cursor, study_id):\n sql= \"SELECT DISTINCT(queries_id) from queries WHERE queries_studies_id=%s\"\n data = (study_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n def getQueriesNoScrapers(cursor, study_id):\n sql= \"SELECT DISTINCT(queries_id) from queries LEFT JOIN scrapers on scrapers_queries_id = queries_id WHERE queries_studies_id=%s AND scrapers_se IS NULL AND scrapers_queries_id IS NULL\"\n data = (study_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#function to read all unprocessed queries: todo make scraper_start flexible according possible results from scraper\n def getOpenQueriesStudy(cursor, study_id):\n sql= \"SELECT distinct(queries_id) from queries, scrapers WHERE queries_studies_id=%s AND queries_id = scrapers_queries_id AND scrapers_progress = 0 AND scrapers_start = 990\"\n data = (study_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#function to read all unprocessed queries\n def getOpenQueriesStudybySE(cursor, study_id, se):\n sql= \"SELECT distinct(queries_id) from queries, scrapers WHERE queries_studies_id=%s AND queries_id = scrapers_queries_id AND scrapers_progress = 0 AND scrapers_se = %s\"\n data = (study_id, se)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n def getOpenErrrorQueriesStudy(cursor, study_id):\n sql= \"SELECT distinct(queries_id) from queries, scrapers WHERE queries_studies_id=%s AND queries_id = scrapers_queries_id AND scrapers_progress != 1\"\n data = (study_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#open one specific query\n def getQuery(cursor, study_id, query):\n sql= \"SELECT * from queries WHERE queries_studies_id=%s AND queries_query=%s\"\n data = (study_id, query)\n cursor.execute(sql,(data))\n rows = cursor.fetchall()\n return rows\n\n\n def getQuerybyID(cursor, query_id):\n sql= \"SELECT * from queries WHERE queries_id=%s\"\n data = (query_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n#open query of a result\n def getQuerybyResult(cursor, results_id):\n sql = \"SELECT queries_query FROM queries, results where queries_id = results_queries_id and results_id = %s\"\n data = (results_id)\n cursor.execute(sql,(data,))\n rows = cursor.fetchall()\n return rows\n\n\n def deleteQuery(cursor, studies_id, query):\n sql= \"DELETE from queries WHERE queries_studies_id=%s AND queries_query = %s\"\n data = (studies_id, query)\n cursor.execute(sql,(data))\n\n def deleteQuerybyId(cursor, studies_id, queries_id):\n sql= \"DELETE from queries WHERE queries_studies_id=%s AND queries_id = %s\"\n data = (studies_id, queries_id)\n cursor.execute(sql,(data))\n\n#write to db\n\n\n#function to write query to db\n def insertQuery(cursor, studies_id, query, date):\n cursor.execute(\"INSERT INTO queries (queries_studies_id, queries_query, queries_date) VALUES(%s,%s,%s);\", (studies_id, query, date,))\n\n#function to write query to db with aditional information\n def insertQueryVal(cursor, studies_id, query, comment, date):\n cursor.execute(\"INSERT INTO queries (queries_studies_id, queries_query, queries_comment, queries_date) VALUES(%s,%s,%s,%s);\", (studies_id, query, comment, date,))\n" }, { "alpha_fraction": 0.6489361524581909, "alphanum_fraction": 0.6489361524581909, "avg_line_length": 22.5, "blob_id": "151fb242fdd434604ba5f4b7f60d2029b203981a", "content_id": "43136e9fb90b98838b00ff7b0705ec9e23341868", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 94, "license_type": "permissive", "max_line_length": 51, "num_lines": 4, "path": "/config/classifier.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n \"_comment\":\"configuration file for classifiers\",\n \"classifier\":[\"rules_old\",\"rules\"]\n}\n" }, { "alpha_fraction": 0.5627705454826355, "alphanum_fraction": 0.5692640542984009, "avg_line_length": 21, "blob_id": "5e8485009d3c60708399ef6cfdd84a1a6e49d988", "content_id": "7818ed411eafde96cb75fe156e261b537a064903", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "permissive", "max_line_length": 53, "num_lines": 21, "path": "/apps/indicators/kw_in_url.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "#script check keywords in url\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ntoday = date.today()\n\ndef kw_in_url(hash, result_url, query, check_query):\n url = result_url.lower()\n module = 'check kw_in_url'\n value = '0'\n\n if check_query:\n pattern = '*'+query+'*'\n if (Helpers.matchText(url, pattern)):\n value = '1'\n\n check_evaluations_result(hash, module, value)\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.625, "avg_line_length": 31, "blob_id": "ff479ab7255abff3a2699e2859f32e3c72f69553", "content_id": "82568779c405f2c9273b90b5642f68c4d672877f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 224, "license_type": "permissive", "max_line_length": 119, "num_lines": 7, "path": "/config/db.ini", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "{\n \"_comment\": \"Database Connection; change the configuration here to use a shared database or run it on a localhost\",\n \"dbname\": \"seoeffekt\",\n \"user\": \"seo\",\n \"host\": \"your_host\",\n \"password\": \"your_password\"\n}\n" }, { "alpha_fraction": 0.5661243200302124, "alphanum_fraction": 0.5711305737495422, "avg_line_length": 24.774192810058594, "blob_id": "ddcbd2b019824707e260344a7cd420cbcbbf30f6", "content_id": "d1f3299331e3fb1ab100e8d93233072351b895a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "permissive", "max_line_length": 144, "num_lines": 93, "path": "/apps/classifier/decision_tree.py", "repo_name": "suenklerhaw/seoeffekt", "src_encoding": "UTF-8", "text": "# Decision Tree Classifier\n\n#include libs\n\nimport sys\nsys.path.insert(0, '..')\nfrom include import *\n\ndef classify(classifier_id, hashes):\n\n #convert nominal values to numerical values\n def is_float(value):\n try:\n float(value)\n return True\n except:\n return False\n\n def is_int(value):\n try:\n int(value)\n return True\n except:\n return False\n\n #load results with features and check if all features available: skip results without the number of all indicators (skip incomplete results)\n for h in hashes:\n hash = h[0]\n\n result = Evaluations.getResultwithIndicators(hash)\n\n\n #add loading speed as indicator\n speed = Results.getSpeed(hash)\n speed = speed[0][0]\n\n data_dict = {}\n\n if len(result) == int(number_indicators):\n\n for r in result:\n\n\n feature = r[2]\n value = r[3]\n\n\n\n\n data_dict.update( {feature: value} )\n\n df = pd.DataFrame([data_dict])\n\n df.rename(columns=lambda x: x.lower(), inplace=True)\n\n df['speed'] = float(speed)\n\n df.loc[df['speed'] < 0, 'speed'] = -1\n\n pd.set_option('display.max_columns', None)\n\n #remove cols which are no part of the model\n id_cols = ['micros', 'tools ads', 'tools analytics', 'tools caching', 'tools content', 'tools seo', 'tools social']\n\n df.drop(columns=id_cols, inplace=True)\n\n #load indicators as features\n features = df.columns.values\n\n #load model\n model = load('dt_classifier.joblib')\n\n #predict the seo probability\n def predict_func(model, df, features):\n return model.predict(df[features])\n\n\n predict_vals = predict_func(model, df, features)\n\n\n\n #assign seo probability to nominal value\n seo_classes = {0:'not_optimized', 1:'probably_not_optimized', 2:'probably_optimized', 3:'optimized'}\n\n for p in predict_vals:\n print(p)\n df['seo'] = seo_classes.get(p)\n classification_result = seo_classes.get(p)\n\n #save predicted value to database\n Evaluations.updateClassificationResult(hash, classification_result, classifier_id, today)\n print(hash)\n print(classification_result)\n" } ]
72
scharron/santiago
https://github.com/scharron/santiago
f3b789864bd3f4785f3377d1c9bdc64807a2d71f
e7238190e77541def8a53b01557d3c8bc29f9936
f601731eafc9a7e5925d9815423eb87bcde7fcd9
refs/heads/master
"2016-09-06T18:05:48.365645"
"2013-09-20T14:47:01"
"2013-09-20T14:47:01"
12,975,691
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4991394281387329, "alphanum_fraction": 0.5051634907722473, "avg_line_length": 33.17647171020508, "blob_id": "62e0071dd221e87381aedf6a4e9c6d9af66bbb3b", "content_id": "973730ee1d3b0e593e2bfc27a1e30c1ff81d23f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 78, "num_lines": 34, "path": "/santiago-shape.py", "repo_name": "scharron/santiago", "src_encoding": "UTF-8", "text": "import shpUtils\nimport re\nshpRecords = shpUtils.loadShapefile('santiago/cl_13comunas_geo.shp')\n\nall_coords = []\n\nfor i in range(0, len(shpRecords)):\n coords = []\n name = re.sub(\"[^\\w\\s]\", \"\",\n shpRecords[i]['dbf_data'][\"NOMBRE\"].lower().strip())\n if name in (\"san jose de maipo\", \"lo barnechea\", \"curacavi\", \"melipilla\",\n \"maria pinto\", \"pirque\", \"buin\", \"el monte\", \"talagante\",\n \"lampa\", \"colina\", \"peaflor\"):\n print(name)\n else:\n print(name)\n for j in range(0,\n len(shpRecords[i]['shp_data']['parts'][0]['points'])):\n tempx = float(\n shpRecords[i]['shp_data']['parts'][0]['points'][j]['x'])\n tempy = float(\n shpRecords[i]['shp_data']['parts'][0]['points'][j]['y'])\n coords.append((tempx, tempy))\n\n coords = [\"[%s, %s]\" % row for row in coords]\n coords = ',\\n '.join(coords)\n all_coords.append(\"{'name':'\" + name + \"','coords':[\" + coords + \"]}\")\n\nall_coords = ',\\n'.join(all_coords)\n\n\nsantiago = open(\"santiago.js\", \"w\")\ncontent = \"var santiago = [\" + all_coords + \"];\"\nsantiago.write(content)\n" }, { "alpha_fraction": 0.730720579624176, "alphanum_fraction": 0.738305926322937, "avg_line_length": 34.95454406738281, "blob_id": "1315aae746758404c6d4e7b9d39c5d8df5972fb2", "content_id": "e66e3271e0f8524319ddad20f3d4652cc8adccdd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 791, "license_type": "no_license", "max_line_length": 136, "num_lines": 22, "path": "/README.md", "repo_name": "scharron/santiago", "src_encoding": "UTF-8", "text": "Santiago\n========\n\n* process.py and santiago-shape.py are scripts to generate viz files from raw data\n* index.html is the main html page, with the simple d3.js code to produce the map\n* helpers.js contains a lot of functions used when generating the map\n* mouse.helpers.js contains stuff to interact with the map\n\n\nsantiago-shape.py\n-----------------\nThis generates the json shape for the city. It uses files from [here](http://www.rulamahue.cl/mapoteca/fichas/chile_geo/ficha13geo.html)\nThis works only using python 2.7\n\nprocess.py\n----------\nThis generates other files needed to the final visualisation from the various csv files.\nThis works only using python 3.X\n\nFollow us\n---------\n[@samuelcharron](http://twitter.com/samuelcharron) &amp; [@datapublica](http://twitter.com/datapublica)\n" }, { "alpha_fraction": 0.5161986947059631, "alphanum_fraction": 0.5275378227233887, "avg_line_length": 21.86419677734375, "blob_id": "d4f63a294233e6b3948ad5c7b8f7318ee9e69f7a", "content_id": "733f441ed477e1eec667c87baf289af58a3523d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1852, "license_type": "no_license", "max_line_length": 72, "num_lines": 81, "path": "/process.py", "repo_name": "scharron/santiago", "src_encoding": "UTF-8", "text": "import csv\n\ndata = {}\n\ntrafic = []\n\nstations = {}\nstations_r = csv.reader(open(\"SantiagoTrafic.csv\", \"r\", newline=\"\"))\nnext(stations_r)\nfor row in stations_r:\n name = row[0]\n id = row[1]\n stations[name] = {\n \"id\": row[1],\n \"name\": name,\n \"trafic\": float(row[4]),\n \"lines\": [],\n \"rank\": 0,\n \"latitude\": row[3],\n \"longitude\": row[2],\n }\n trafic.append(float(row[4]))\n\ntrafic.sort(reverse=True)\n\nlines = {}\norder_r = csv.reader(open(\"SantiagoStationsOrder.csv\", \"r\", newline=\"\"))\nnext(order_r)\nfor row in order_r:\n line = row[0]\n name = row[1]\n stations[name][\"lines\"].append(line)\n if line not in lines:\n lines[line] = []\n lines[line].append((name, int(row[4])))\n\ndata[\"lines\"] = []\nfor name, lstations in lines.items():\n lstations.sort(key=lambda x: x[1])\n lstations = [stations[e[0]][\"id\"] for e in lstations]\n data[\"lines\"].append({\n \"key\": name,\n \"paths\": [lstations],\n })\n\n\ndata[\"freq\"] = {}\nfor name, station in stations.items():\n data[\"freq\"][station[\"id\"]] = {\n \"name\": name,\n \"lines\": station[\"lines\"],\n \"rank\": trafic.index(station[\"trafic\"]),\n \"key\": station[\"id\"],\n \"latitude\": station[\"latitude\"],\n \"longitude\": station[\"longitude\"],\n \"connexion\": {},\n \"trafic\": station[\"trafic\"]\n }\n\nimport json\n\njson.dump(data, open(\"santiago.json\", \"w\"), indent=2)\n\n\nlines_r = csv.reader(open(\"SantiagoLines.csv\", \"r\", newline=\"\"))\nnext(lines_r)\ncss = open(\"santiago.colors.css\", \"w\")\nfor row in lines_r:\n css.write(\"\"\"\n .line_%(line)s {\n fill: #%(fill)s;\n stroke: #%(stroke)s;\n background-color: #%(bg)s;\n }\n\n \"\"\" % {\n \"line\": row[0][:2] + row[0][2:].lower(),\n \"fill\": row[7],\n \"stroke\": row[7],\n \"bg\": row[7],\n })\n" } ]
3
eggeek/Automata
https://github.com/eggeek/Automata
2df8b2add0d1473b6351b311b5f24874f635fced
9f51e94fda8316d24102ce153c1c3bda3698fa98
3d24a269b2b2d14c472e08c39893ce2f62fcd277
refs/heads/master
"2016-08-06T19:55:47.253978"
"2015-03-25T02:51:03"
"2015-03-25T02:51:03"
32,837,045
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47051793336868286, "alphanum_fraction": 0.49256306886672974, "avg_line_length": 26.202247619628906, "blob_id": "a34f3eb4291fe3ef5402ec4d13ef07ab57e30382", "content_id": "b30e2ead5f857f17bc8041a7853185d96cdc89fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7530, "license_type": "no_license", "max_line_length": 134, "num_lines": 267, "path": "/PA1_python/src/epsnfa.py", "repo_name": "eggeek/Automata", "src_encoding": "UTF-8", "text": "import sys\r\nimport traceback\r\nimport os\r\nimport string\r\n\r\nmaxn = 200 # maximum number of states\r\nsymbol = 2 # number of symbols ('0','1')\r\nepssymbol = 2\r\n\r\n'''g[s1][i][s2]=True if and only if there's an edge with symbol i from state s1 to s2\r\n i: 0 is '0', 1 is '1', 2 is epsilon\r\n For fixed state s1 and a symbol c, it is not necessary to exist s2 such that\r\n g[s1][c][s2]=True. If no such s2 exists, we deem that getting c at state s1 will\r\n make the Epsilon-NFA go into a non-final \"dead\" state and will directly make the\r\n the string not accepted.'''\r\n\r\ng = [[[False] * maxn for i in range(symbol + 1)] for j in range(maxn)]\r\n\r\n''' closure[s1][s2] is True if and only if s2 is in CL(s1)'''\r\nclosure = [[False] * maxn for i in range(maxn)]\r\n\r\n'''nextpa[i]=i if the regular expression at position i is not '('\r\n nextpa[i]=j if the regular expression at position i is '(' and jth position holds the corresponding ')'\r\n'''\r\nnextpa = [0] * 100\r\n\r\nstate = 0 # current number of states\r\n\r\n# add edge from s1 to s2 with symbol c\r\n\r\n\r\ndef addEdge(s1, c, s2):\r\n global g\r\n g[s1][c][s2] = True\r\n\r\n# increase the number of states of NFA by 1\r\n\r\n\r\ndef incCapacity():\r\n global state\r\n global g\r\n\r\n for i in range(state + 1):\r\n for j in range(symbol + 1):\r\n g[i][j][state] = False\r\n g[state][j][i] = False\r\n state = state + 1\r\n return state - 1\r\n\r\n# unite two Epsilon-NFAs, with start state s1 and s2, final state t1 and t2, respectively\r\n# return an array of length 2, where the first element is the start state of the combined NFA. the second being the final state\r\n\r\n\r\ndef union(s1, t1, s2, t2):\r\n st = [0] * 2\r\n\r\n # Please fill in the program here\r\n ss = incCapacity()\r\n addEdge(ss, epssymbol, s1)\r\n addEdge(ss, epssymbol, s2)\r\n\r\n tt = incCapacity()\r\n addEdge(t1, epssymbol, tt)\r\n addEdge(t2, epssymbol, tt)\r\n\r\n st[0] = ss\r\n st[1] = tt\r\n return st\r\n\r\n# concatenation of two Epsilon-NFAs, with start state s1 and s2, final state t1 and t2, respectively\r\n# return an array of length 2, where the first element is the start state of the combined NFA. the second being the final state\r\n\r\n\r\ndef concat(s1, t1, s2, t2):\r\n st = [0] * 2\r\n addEdge(t1, epssymbol, s2)\r\n # Please fill in the program here\r\n st[0] = s1\r\n st[1] = t2\r\n return st\r\n\r\n# Closure of a Epsilon-NFA, with start state s and final state t\r\n# return an array of length 2, where the first element is the start state of the closure Epsilon-NFA. the second being the final state\r\n\r\n\r\ndef clo(s, t):\r\n st = [0] * 2\r\n # Please fill in the program here\r\n ss = incCapacity()\r\n tt = incCapacity()\r\n addEdge(ss, epssymbol, s)\r\n addEdge(ss, epssymbol, t)\r\n addEdge(t, epssymbol, s)\r\n addEdge(t, epssymbol, tt)\r\n\r\n st[0] = ss\r\n st[1] = tt\r\n return st\r\n\r\n# Calculate the closure: CL()\r\n\r\n\r\ndef calc_closure():\r\n global closure\r\n global symbol\r\n queue = [0] * maxn\r\n\r\n for i in range(state):\r\n for j in range(state):\r\n closure[i][j] = False\r\n # Breadth First Search\r\n head = -1\r\n tail = 0\r\n queue[0] = i\r\n closure[i][i] = True\r\n while (head < tail):\r\n head = head + 1\r\n j = queue[head]\r\n # search along epsilon edge\r\n for k in range(state):\r\n if ((not closure[i][k]) and (g[j][symbol][k])):\r\n tail = tail + 1\r\n queue[tail] = k\r\n closure[i][k] = True\r\n\r\n'''parse a regular expression from position s to t, returning the corresponding\r\n Epsilon-NFA. The array of length 2 contains the start state at the first position\r\n and the final state at the second position'''\r\n\r\n\r\ndef parse(re, s, t):\r\n # single symbol\r\n if (s == t):\r\n st = [0] * 2\r\n st[0] = incCapacity()\r\n st[1] = incCapacity()\r\n # epsilon\r\n if (re[s] == 'e'):\r\n addEdge(st[0], symbol, st[1])\r\n else:\r\n addEdge(st[0], string.atoi(re[s]), st[1])\r\n return st\r\n\r\n #(....)\r\n if ((re[s] == '(')and(re[t] == ')')):\r\n if (nextpa[s] == t):\r\n return parse(re, s + 1, t - 1)\r\n\r\n # RE1+RE2\r\n i = s\r\n while (i <= t):\r\n i = nextpa[i]\r\n\r\n if ((i <= t)and(re[i] == '+')):\r\n st1 = parse(re, s, i - 1)\r\n st2 = parse(re, i + 1, t)\r\n st = union(st1[0], st1[1], st2[0], st2[1])\r\n return st\r\n i = i + 1\r\n\r\n # RE1.RE2\r\n i = s\r\n while (i <= t):\r\n i = nextpa[i]\r\n\r\n if ((i <= t) and (re[i] == '.')):\r\n st1 = parse(re, s, i - 1)\r\n st2 = parse(re, i + 1, t)\r\n st = concat(st1[0], st1[1], st2[0], st2[1])\r\n return st\r\n i = i + 1\r\n\r\n #(RE)*\r\n st1 = parse(re, s, t - 1)\r\n st = clo(st1[0], st1[1])\r\n return st\r\n\r\n# calculate the corresponding ')' of '('\r\n\r\n\r\ndef calc_next(re):\r\n global nextpa\r\n nextpa = [0] * len(re)\r\n for i in range(len(re)):\r\n if (re[i] == '('):\r\n k = 0\r\n j = i\r\n while (True):\r\n if (re[j] == '('):\r\n k = k + 1\r\n if (re[j] == ')'):\r\n k = k - 1\r\n if (k == 0):\r\n break\r\n j = j + 1\r\n nextpa[i] = j\r\n else:\r\n nextpa[i] = i\r\n\r\n\r\ndef test(cur, finalstate, level, length, num):\r\n global closure\r\n global g\r\n nextone = [False] * state\r\n if (level >= length):\r\n return cur[finalstate]\r\n if ((num & (1 << level)) > 0):\r\n c = 1\r\n else:\r\n c = 0\r\n for i in range(state):\r\n if (cur[i]):\r\n for j in range(state):\r\n if (g[i][c][j]):\r\n for k in range(state):\r\n nextone[k] = (nextone[k] or closure[j][k])\r\n\r\n empty = True # test if the state set is already empty\r\n for i in range(state):\r\n if (nextone[i]):\r\n empty = False\r\n if (empty):\r\n return False\r\n return test(nextone, finalstate, level + 1, length, num)\r\n\r\n\r\ndef Start(filename):\r\n global state\r\n global g\r\n result = ''\r\n # read data case line by line from file\r\n try:\r\n br = open(filename, 'r')\r\n for re in br:\r\n print 'Processing ' + re + '...'\r\n re = re.strip()\r\n calc_next(re)\r\n state = 0\r\n nfa = parse(re, 0, len(re) - 1)\r\n # calculate closure\r\n calc_closure()\r\n # test 01 string of length up to 6\r\n for length in range(1, 6 + 1):\r\n for num in range(0, (1 << length)):\r\n if (test(closure[nfa[0]], nfa[1], 0, length, num)):\r\n for i in range(length):\r\n if ((num & (1 << i)) > 0):\r\n result = result + '1'\r\n else:\r\n result = result + '0'\r\n result = result + \"\\n\"\r\n # Close the input stream\r\n br.close()\r\n except:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n print \"*** print_exception:\"\r\n traceback.print_exception(exc_type, exc_value,\r\n exc_traceback, limit=2, file=sys.stdout)\r\n result = result + 'error'\r\n return result\r\n\r\n\r\ndef main(filepath):\r\n return Start('testRE.in')\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1])\r\n" } ]
1
kewenchao/harts
https://github.com/kewenchao/harts
09f64f60d30c263f7569931642633552d26b00fe
474f626c3052b6a9260ec3b12eecca7bc9e59d7e
6a78254126e1c6475c6cbfeea4eaaa6baa08912c
refs/heads/master
"2020-04-06T17:23:46.254945"
"2018-11-15T05:39:46"
"2018-11-15T05:39:46"
157,657,524
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6135865449905396, "alphanum_fraction": 0.6228999495506287, "avg_line_length": 34.79084777832031, "blob_id": "49c3ffd6b20464df62a1119eb34fcf3579f96740", "content_id": "4867524a2319e38fd20eb8797f7a7a404a91340f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5476, "license_type": "no_license", "max_line_length": 134, "num_lines": 153, "path": "/TAAS_HARTS.py", "repo_name": "kewenchao/harts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# !coding=utf8\n\nimport logging\nimport os\nimport re\nimport requests\nimport shutil\nimport stat\nimport subprocess\nimport uuid\n\nlogger = logging.getLogger(__name__)\n\ntest_suite = \"CLA_PCIe_TestSuite_V0_1\"\ncampaign_loc = \"/home/hcloud/CLA/testsuites/CLA_PCIe_TestSuite_V0_1/testcases/CLA_PCIe_TestSuite_V0_1.xml\"\nHARTS_node_list = [\"IPC_MEM_PCIE_01\", \"7560_CDD_PCIE_02\"]\nconfig_loc = \"/home/hcloud/CLA/config/tc_flow_control.properties\"\nmail_level = \"2\"\nsubmitter = \"autobuild_server\"\n\n\ndef get_iosm_source(url, username, password):\n apply_patch_url = url + \"/apply_patch.sh\"\n\n # Get the script\n os.system('wget {} --http-user={} --http-password={}'.format(apply_patch_url, username, password))\n\n # Read the script file\n patch_script = open('apply_patch.sh', 'r')\n repo_commands = patch_script.read()\n patch_script.close()\n\n # write commnads to be executed into a new file after chaning the repo sync command\n patch_script_edit = open('apply_patch_edit.sh', 'w')\n for line in repo_commands.splitlines():\n if line[0:9] == 'repo sync':\n line += ' kernel/modules/iosm'\n elif 'wget' in line:\n line += ' --http-user={} --http-password={}'.format(username, password)\n patch_script_edit.write(line + \"\\n\")\n\n patch_script_edit.close()\n\n st = os.stat('apply_patch_edit.sh')\n os.chmod('apply_patch_edit.sh', st.st_mode | stat.S_IEXEC)\n\n # execute the newly created bash file\n subprocess.call(['./apply_patch_edit.sh'], cwd=None)\n\n\ndef get_latest_hcloud_tool_version(url, default):\n try:\n res = r'<a .*?>(.*?.tar.gz)</a>'\n result = requests.get(url)\n content = result.content\n version_list = re.findall(res, content)\n version_list.sort()\n hcloud_tool_version = version_list[-1].replace('.tar.gz', '')\n except Exception as e:\n logger.error(\"Get hcloud_tool_version fail,Exception: {}\".format(e))\n return default\n return hcloud_tool_version\n\n\ndef submit_sessions(**kwargs):\n \"\"\"\n HARTS Campaign submission script\n\n Sample command to run it on linux terminal\n\n python start_external_test.py art_output_URL=\"https://jfstor001.jf.intel.com/artifactory/cactus-absp-jf/mmr1_ice17-autobuild/1106\"\n\n Kwargs:\n art_output_URL (str): Link to the autobuild page\n owner (str): \"patch owner\"\n\n Returns:\n result (bool): result of the submission\n message (str): output message of the submission\n\n Raises:\n RunTimeError: If invalid key arguments or invalid driver file or invalid test bench name are used or\n creating a driver tarball is failed\n \"\"\"\n # Maximum test duration in minutes\n timeout = \"1440\"\n expected_args = ['art_output_URL', 'owner']\n\n for args in expected_args:\n if args not in kwargs:\n logger.info(\"Valid Arguments:\")\n for arr in expected_args:\n logger.info(\"{}\".format(arr + \"\\r\\n\"))\n return False\n\n temp_dir = \"temp-\" + str(uuid.uuid4())\n os.mkdir(temp_dir)\n os.chdir(temp_dir)\n\n # Prepare tar.gz\n get_iosm_source(kwargs['art_output_URL'], kwargs['username'], kwargs['password'])\n\n # Make a release TAR file\n os.system('make -C kernel/modules/iosm -f Makefile_dev iosm_internal_release ARCHIVE=\"imc_ipc.tar.gz\"')\n for file in os.listdir(\"kernel/modules/iosm/\"):\n if file.endswith(\".tar.gz\"):\n driver = \"kernel/modules/iosm/\" + file\n break\n\n # get hcloud_tool\n url = 'http://musxharts003.imu.intel.com/artifactory/harts-sit-swtools-imc-mu/hcloud_job_submission-release/'\n hcloud_tool_version = get_latest_hcloud_tool_version(\n url, default='hcloud-tools-5.0.2-1840_5_1707')\n os.system(\"wget {}{}.tar.gz\".format(url, hcloud_tool_version))\n os.system(\"tar -xvzf {}.tar.gz\".format(hcloud_tool_version))\n hcloud_tool = \"./{}/bin\".format(hcloud_tool_version)\n\n patch_set = kwargs['revision'].split('/')[1]\n patch_num = kwargs['revision'].split('/')[0]\n job_name = \"Patch_Set_%s\" % patch_set + \"_rev_\" + patch_num\n\n mailto = kwargs['owner']\n\n final_result = True\n returns = \"\"\n for HARTS_node_name in HARTS_node_list:\n # Submit the campaign\n arguments_for_submission = hcloud_tool + \"/hcloud-campaign-submit\" + \" --node \" + HARTS_node_name + \" --user-name \" \\\n + submitter + ' --test-set-name ' + submitter + \"_\" + job_name + \" --mailto \" \\\n + mailto + ' --mlevel ' + mail_level + ' --exec-time-limit ' + timeout \\\n + \" --copy-to-target \" + driver + \":driver/\" + \" --test-engine CLA \" \\\n + \"--test-engine-params \\\"-ts \" + campaign_loc\n\n arguments_for_submission += \" -config \" + config_loc + \"\\\"\"\n\n logger.info(\"call: %s\" % str(arguments_for_submission))\n\n p = subprocess.Popen(arguments_for_submission, stdout=subprocess.PIPE, shell=True)\n ret, err = p.communicate()\n returns = returns + (\"Command output for the submission to HARTS Node %s: \" % HARTS_node_name) + ret + \"\\r\\n\"\n if err is None:\n result = True\n else:\n logger.info(\"The command returned error: %s\" % err)\n logger.info(\"The submission to HARTS Node %s has failed!\" % HARTS_node_name)\n result = False\n final_result = result and final_result\n\n # Remove the temporary directory\n os.chdir(\"../\")\n shutil.rmtree(temp_dir)\n return final_result, returns\n" } ]
1
MonicaHsu/PredictHomeValues
https://github.com/MonicaHsu/PredictHomeValues
e96f59d797138aad8b5a42d762f139d3bfbda30d
ae1cda4c61eecf66171dbfbb285e163c59123bcf
da1cb8da748e79a9bfda4a8f18e1b1d1b3d6393d
refs/heads/master
"2020-04-07T14:54:24.785550"
"2014-08-19T01:58:34"
"2014-08-19T01:58:34"
21,374,300
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5592653155326843, "alphanum_fraction": 0.5845617055892944, "avg_line_length": 30.163768768310547, "blob_id": "4a78ab8b54ade5a8f65579d31f4eee219153f583", "content_id": "79b9315eeca6e6a1f9da0d4f542cb75eafa7f830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21505, "license_type": "no_license", "max_line_length": 148, "num_lines": 690, "path": "/home_price_regression.py", "repo_name": "MonicaHsu/PredictHomeValues", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[2]:\n\nimport numpy as np\nimport string\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport sklearn\nfrom sklearn import linear_model\nfrom scipy import sparse\nfrom scipy import linalg\nfrom sklearn.datasets.samples_generator import make_regression\nfrom sklearn.linear_model import Lasso\nfrom sklearn.ensemble import RandomForestRegressor \nimport pandas as pd\nfrom pandas.tools.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom pylab import *\nimport pylab\nimport ggplot\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport mpld3\nimport seaborn\n\n\n# In[3]:\n\n# listings is file object with a row per address\n# this scrapes redfin details page per address and writes the result to training_file\n# note: this writes to the file one row at a time in case the scrape fails mid-way\ndef createtrainingset(listings, training_file, crime_):\n \n browser = webdriver.Firefox()\n browser.get('http://www.redfin.com/CA/Burlingame/3077-Mariposa-Dr-94010/home/2048468')\n time.sleep(35) #this gives you a chance to log into the website :p\n \n outf = open(training_file,'a')\n line = listings.readline()\n while line:\n listing = line.split('\\t')\n if listing[0]=='Past Sale':\n home_id = listing[2]\n len(listing)\n redfin_url = listing[24]\n lat, lon = (listing[-3] , listing[-2])\n year_built, zip_code, list_price, beds, baths, sqft, dom, parking, orig_list_price = (\n listing[12] , listing[5], listing[6], listing[7],\n listing[8], listing[10], listing[15], listing[13], listing[21]\n )\n browser.get(redfin_url)\n t=browser.page_source\n l=browser.find_elements_by_class_name('as-data-section')\n redfin_vars = [x.text for x in l]\n #gd=browser.find_elements_by_id('propertyDetailsId')\n #details = [x.text for x in gd]\n #highschool = ','.join(details).split('\\n')[241]\n \n counter = 0\n if abs(float(lat)) > 1:\n g = [float(lat),float(lon)]\n for k in range(len(crime_)):\n if abs(abs(g[0])-abs(float(crime_[k][1]))) < 0.0047 and abs(abs(g[1])-abs(float(crime_[k][0]))) < 0.0037: \n counter = counter + float(crime_[k][2])\n if len(redfin_vars) == 4 and len(redfin_vars[0]) > 0: #checks for active listings\n def get_num(x):\n return x.split('\\n')[0].split()[0].replace(',','')\n views = get_num(redfin_vars[0])\n favs = get_num(redfin_vars[1])\n xouts = get_num(redfin_vars[2])\n home_summary = [home_id, year_built, zip_code, list_price, beds, \n baths, sqft, dom, parking, orig_list_price, views, \n favs, xouts, lat, lon, counter]\n outf.write(','.join(str(e) for e in home_summary)+ \"\\n\")\n line = listings.readline()\n listings.close()\n outf.close()\n\n\n# In[4]:\n\ndef parse_text_rows(data, walk, filter_lots=True): \n out_data = []\n for line in data:\n d = {}\n line_parts = line.split(',')\n\n \n if len(line_parts) < 6:\n continue\n \n if line_parts[3]:\n if int(line_parts[3]) < 100000:\n continue\n \n if filter_lots:\n if not line_parts[6]:\n continue\n \n d['home'] = line_parts[0]\n \n if (line_parts[1]):\n if int(line_parts[1]) > 1400:\n d['year_built'] = int(line_parts[1])\n else:\n continue\n else:\n continue\n \n if line_parts[2]:\n d['zipcode'] = (line_parts[2])\n else:\n continue\n\n if line_parts[3] and line_parts[6]:\n d['sale_price'] = int(line_parts[3])\n d['sqft'] = int(line_parts[6])\n d['_price_per_sqft'] = (1.0*int(line_parts[3])/int(line_parts[6]))\n if line_parts[9]:\n d['list_price'] = (1.0*int(line_parts[9])/int(line_parts[6]))\n else:\n continue\n else:\n continue\n \n if line_parts[4]:\n d['beds'] = int(line_parts[4])\n else:\n continue\n\n if line_parts[5]:\n d['baths'] = float(line_parts[5])\n else:\n continue\n \n if line_parts[8]:\n if int(line_parts[8]) < 10:\n d['parking'] = int(line_parts[8])\n else:\n continue\n else:\n continue\n \n if line_parts[9]:\n d['list_price'] = int(line_parts[9])\n else:\n continue\n \n if line_parts[10]:\n d['views'] = int(line_parts[10])\n else:\n continue\n \n if int(line_parts[10]) > 0:\n if 1.0*int(line_parts[11])/int(line_parts[10]) < 0.7:\n d['fav_per_view'] = 1.0*int(line_parts[11])/int(line_parts[10])\n else:\n continue\n else:\n continue\n \n if int(line_parts[10]) > 0:\n if 1.0*int(line_parts[12])/int(line_parts[10]) < 0.7:\n d['xout_per_view'] = 1.0*int(line_parts[12])/int(line_parts[10])\n else:\n continue\n else:\n continue\n \n if (line_parts[15]) > 0:\n d['crime_score'] = float(line_parts[15])\n else:\n continue\n\n L = len(walk_contents)\n sim_val = 1000\n for i in range(L):\n #latlong_diff = abs(float(walk[i].split(',')[-2])-float(line_parts[-3])) + abs(float(walk[i].split(',')[-1])-float(line_parts[-2]))\n latlong_diff = sqrt((float(walk[i].split(',')[-2])-float(line_parts[-3]))**2 + (float(walk[i].split(',')[-1])-float(line_parts[-2]))**2)\n if latlong_diff < sim_val:\n sim_val = latlong_diff\n ind = i\n \n d['latitude'] = float(line_parts[13]) + (1e-4)*(rand())\n d['longitude'] = float(line_parts[14]) + (1e-4)*(rand())\n \n if line_parts[7]:\n d['dom'] = int(line_parts[7])\n else:\n d['dom'] = 0\n \n if line_parts[12]:\n d['xouts'] = int(line_parts[12])\n else:\n d['xouts'] = 0\n \n if line_parts[11]:\n d['favs'] = int(line_parts[11])\n else:\n d['favs'] = 0\n\n d['walk_score'] = int(walk[ind].split(',')[2]) \n d['transit_score'] = int(walk[ind].split(',')[3]) \n d['bike_score'] = int(walk[ind].split(',')[4]) \n\n out_data.append(d)\n return out_data\n\n\n# In[5]:\n\ndef linearregfit(x,y):\n regr = linear_model.LinearRegression()\n regr.fit(x, y)\n rcoeff = regr.coef_\n rscore = regr.score(x,y)\n\n #Calculate standard error for each coeffiecient (code from stackoverflow question 20938154)\n MSE = np.mean((y - regr.predict(x).T)**2)\n var_est = MSE * np.diag(np.linalg.pinv(np.dot(x.T,x)))\n SE_est = np.sqrt(var_est)\n\n return rcoeff, rscore, SE_est\n\n\n# In[6]:\n\nif __name__=='__main__':\n\n OBTAIN_TRAINING_SET = False\n PLOTTER = False\n LOGPLOTTER = False\n \n raw_data = 'SF_listings.txt' #input list of addresses\n crime_file = 'clustered_crime_data.csv' #from data.sfgov.org (clustered with kmeans in Matlab)\n walk_score_file = 'walkscore_full.txt'\n \n training_file = 'SF_past_listings_minus_test.txt' #(to be) decorated with scraped data\n\n crime_data = open(crime_file,'r')\n crime_contents = crime_data.readlines()\n crime_strip = [x.split(',') for x in crime_contents[1:len(crime_contents)]]\n close(crime_file)\n \n if OBTAIN_TRAINING_SET:\n homelistings = open(raw_data, \"r\")\n # per house, scrape data and write to file incrementally\n createtrainingset(homelistings, training_file,crime_strip)\n homelistings.close()\n\n trainingset = open(training_file,'r')\n data = trainingset.readlines()\n trainingset.close()\n \n walk_scores = open(walk_score_file,'r')\n walk_contents = walk_scores.readlines()\n close(walk_score_file)\n # unique-ify, note address is in each line\n data = list(set([item.strip() + '\\n' for item in data]))\n \n # turn data into a list of dicts, [ {year built:..., price per sqft: ...}, ]\n data = parse_text_rows(data,walk_contents)\n\n df_ = pd.DataFrame(data)\n \n #dataframes to compare\n \n data_for_plotting = df_.drop(['favs','xouts','dom','latitude','longitude','zipcode','sale_price','list_price','home'],axis=1)\n data_with_redfinvars = df_.drop(['favs','xouts','dom','latitude','longitude','_price_per_sqft','zipcode','sale_price','home'],axis=1)\n data_without_redfinvars = data_with_redfinvars.drop(['fav_per_view','xout_per_view','views'],axis=1)\n redfin_only = data_without_redfinvars.drop(['walk_score','transit_score','bike_score','crime_score'],axis=1)\n simple_plotting = df_.drop(['favs','xouts','dom','latitude','longitude','fav_per_view','baths','beds','sqft','zipcode','sale_price','parking',\n 'list_price','walk_score','bike_score','crime_score','views','home'],axis=1)\n\n\n\n# In[7]:\n\nget_ipython().magic(u'matplotlib inline')\nfont = {'weight' : 'bold',\n 'size' : 12}\n\nmatplotlib.rc('font', **font)\n\n\n# In[8]:\n\npylab.figure()\n\nn, bins, patches = pylab.hist(df_['sale_price'], 30, histtype='barstacked')\nplt.xlabel('Sale Price')\nplt.ylabel('Number of Homes')\nsavefig(\"Sale price histograme\",format='png')\nfig = gcf()\nhtml = mpld3.fig_to_html(fig)\npylab.show()\n\n\n## Extract a new test sets from the training set:\n\n# In[9]:\n\nDIVY = False\n\nif DIVY: \n fa = np.ones(300)\n\n for i in range(300):\n fa[i] = int((len(data) - 0)*rand())\n \n training_file = 'SF_past_listings_scraped.txt' #decorated with scraped data\n trainingset = open(training_file,'r')\n \n new_test = open('SF_past_listings_scraped_test_4.txt','a')\n training_minus_test = open('SF_past_listings_minus_test.txt','a')\n\n \n data = trainingset.readlines()\n trainingset.close()\n for j in range(len(data)):\n if j in fa:\n new_test.write(data[j])\n else:\n training_minus_test.write(data[j])\n\n\n## Analysis:\n\n# In[10]:\n\nif PLOTTER:\n scatter_matrix(data_for_plotting, alpha=0.2, diagonal='kde',figsize=(15,11))\n savefig(\"ScatterMatrix.png\",format='png')\n plt.show()\n\n\n# In[11]:\n\nif LOGPLOTTER:\n axl = scatter_matrix(data_for_plotting, alpha=0.2, diagonal='kde',figsize=(15,11))\n for i, axs in enumerate(axl):\n for j, ax in enumerate(axs):\n ax.set_xscale('log')\n #ax.set_yscale('log')\n savefig(\"SemilogScatterMatrix.png\",format='png')\n plt.show()\n\n\n## Multiple Linear Regression Analysis:\n\n# In[12]:\n\nprint('MULTIPLE LINEAR REGRESSION WITHOUT DECORATED VARIABLES')\nprint('-------------------------------------------------------------')\nprint(data_without_redfinvars.columns)\n(rcoeff, rscore, SE_est) = linearregfit(redfin_only,df_.ix[:,0])\nprint(\"Regression Coefficients\")\nprint(rcoeff)\nprint(\"Standard Error for each coefficient\")\nprint(SE_est)\nprint(\"Regression Score\")\nprint(rscore)\nprint('')\n\nprint('MULTIPLE LINEAR REGRESSION INCLUDING DECORATED VARIABLES')\nprint('-------------------------------------------------------------')\nprint(data_with_redfinvars.columns)\n(rcoeff, rscore, SE_est) = linearregfit(data_with_redfinvars,df_.ix[:,0])\nprint(\"Regression Coefficients\")\nprint(rcoeff)\nprint(\"Standard Error for each coefficient\")\nprint(SE_est)\nprint(\"Regression Score\")\nprint(rscore)\nprint('')\n\n\n## Calculate Confidence Intervals (95%) for MLR\n\n# In[13]:\n\nconfid_int_upper = [0]*len(SE_est)\nfor i in range(len(SE_est)):\n confid_int_upper[i] = list(rcoeff)[i]+(list(SE_est)[i]*1.96) \n\nrcoeff_list = list(rcoeff)\nconfid_int_list = list(confid_int_upper)\nfig = plt.figure(figsize=(10,10))\nax1 = plt.subplot(2,1,2)\nax1.set_ylim([-1,len(rcoeff)+1])\nplt.errorbar(rcoeff_list, range(len(rcoeff_list)), xerr=confid_int_list,\n linestyle='None', marker=\"o\", color=\"purple\",\n markersize=5, linewidth=1.75, capsize=20\n)\ngroup_labels = data_with_redfinvars.columns\nplt.yticks(np.arange(len(rcoeff_list)))\nax1.set_yticklabels(group_labels)\nplt.xlabel('Coefficient Confidence Intervals (95%)')\nsavefig(\"confindence_intervals_full.png\",format='png')\n\nplt.show()\ndel(rcoeff_list[11])\ndel(confid_int_list[11])\ndel(rcoeff_list[4])\ndel(confid_int_list[4])\n\n\nax2 = plt.subplot(1,2,2)\n\nax2.set_ylim([-1,len(rcoeff)+1])\n\nplt.errorbar(rcoeff_list,range(len(rcoeff_list)),xerr=confid_int_list,\n linestyle='None',marker=\"o\", color=\"purple\",\n markersize=5,linewidth=1.75,capsize=20\n)\ngroup_labels = ['Baths','Beds','bike_score','crime_score','list_price','parking','sqft','transit_score','views','walk_score','year_built']\nplt.yticks(np.arange(10))\nax2.set_yticklabels(group_labels)\nplt.xlabel('Coefficient Confidence Intervals (95%)')\nsavefig(\"confindence_intervals.png\",format='png')\nplt.show()\n\n\n## RF Regression for data WITH decorated variables:\n\n# In[15]:\n\nclf = RandomForestRegressor(n_estimators=1000,max_features=12, verbose=0)\nclf = clf.fit(data_with_redfinvars,df_.ix[:,0])\nG = clf.feature_importances_\n\n\nfor i in range(len(data_with_redfinvars.columns)):\n print [data_with_redfinvars.columns[i], G[i]]\n#year built, walk score, transit score, xout_per_view, views, \nscore = clf.score(data_with_redfinvars,df_.ix[:,0])\nprint(['R2',score])\n\ntest = open('SF_past_listings_scraped_test_4.txt','r')\ntest_contents = test.readlines()\ntest_data = parse_text_rows(test_contents,walk_contents)\n\nclose('SF_past_listings_scraped_test_4.txt')\ndf_test = pd.DataFrame(test_data)\ndf_compare = pd.DataFrame(test_data)\n\n\ndf_test = df_test.drop(['favs','xouts','dom','latitude','longitude','home','zipcode','sale_price','_price_per_sqft'],axis=1)\nest = clf.predict(df_test)\n\nerror_array = []\nsale_list_array = []\nfor i in range(len(test_data)):\n err = abs(est[i] - df_compare['_price_per_sqft'][i])\n err2 = (est[i] - df_compare['_price_per_sqft'][i])\n temp = err2/df_compare['_price_per_sqft'][i]\n percent_error = err/df_compare['_price_per_sqft'][i]\n if percent_error > 1:\n print [ df_compare['zipcode'][i], df_compare['sale_price'][i]]\n error_array.append(percent_error*100)\n sale_list_array.append(temp)\nprint(['percent error',mean(error_array)])\nprint(['median error',median(error_array)])\nprint(['n',len(df_)])\n\n\n# In[16]:\n\npylab.figure()\n\nn, bins, patches = pylab.hist(error_array, 30, histtype='barstacked')\nplt.xlabel('Percent Error in Predictions on Blind Test Set')\nplt.ylabel('Number of Homes')\nsavefig(\"percent_error_in_predictions\",format='png')\nfig = gcf()\nhtml = mpld3.fig_to_html(fig)\nopen(\"percenterror_d3.html\", 'w').write(html)\npylab.show()\nmean(error_array)\n\n\n# In[17]:\n\nwithin5 = 0\nwithin10 = 0\nwithin15 = 0\nfor i in range(len(error_array)):\n if error_array[i]/100 <= .05:\n within5 = within5 + 1\n within10 = within10 + 1\n within15 = within15 + 1\n else:\n if error_array[i]/100 <= .1:\n within10 = within10 + 1\n within15 = within15 + 1\n else:\n if error_array[i]/100 <= .15:\n within15 = within15 + 1\nprint (1.0*within5/len(error_array), 1.0*within10/len(error_array), 1.0*within15/len(error_array))\n\n\n# In[60]:\n\nN = len(G)\n\nind = np.arange(N) # the x locations for the groups\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots(figsize=(20,10))\nrects1 = ax.bar(ind, sorted(G,reverse=True), width, color='m')\nplt.xticks(np.arange(len(G)))\n\nax.set_ylabel('Scores')\nax.set_title('Relative importance of vars')\nax.set_xticklabels(['transit score','year built','list price','walk score','sqft','fav_per_view',\n 'bike score','crime score','views','beds','xouts_per_view',\n 'baths','parking'])\nsavefig(\"var_rank.png\",format='png')\nplt.show()\n\n\n## Test the model in absence of some parameters\n\n# In[18]:\n\nTEST_MODEL = False\n\nif TEST_MODEL:\n\n clf = RandomForestRegressor(n_estimators=1000)\n\n data_missing_stuff = data_with_redfinvars.drop(['list_price'\n ],axis=1)\n clf = clf.fit(data_missing_stuff,df_.ix[:,0])\n G = clf.feature_importances_\n\n\n #year built, walk score, transit score, xout_per_view, views, \n score = clf.score(data_missing_stuff,df_.ix[:,0])\n print(['R2',score])\n\n test = open('SF_past_listings_scraped_test_4.txt','r')\n test_contents = test.readlines()\n test_data = parse_text_rows(test_contents,walk_contents)\n\n close('SF_past_listings_scraped_test_2.txt')\n df_test = pd.DataFrame(test_data)\n df_compare = pd.DataFrame(test_data)\n\n\n df_test = df_test.drop(['list_price',\n 'favs','xouts',\n 'dom','latitude','longitude','home',\n 'zipcode','sale_price','_price_per_sqft'],axis=1)\n est = clf.predict(df_test)\n print df_test.columns\n error_array = []\n sale_list_array = []\n for i in range(len(test_data)):\n err = abs(est[i] - df_compare['_price_per_sqft'][i])\n err2 = (est[i] - df_compare['_price_per_sqft'][i])\n temp = err2/df_compare['_price_per_sqft'][i]\n percent_error = err/df_compare['_price_per_sqft'][i]\n error_array.append(percent_error*100)\n sale_list_array.append(temp)\n print(['percent error',mean(error_array)])\n print(['median error',median(error_array)])\n\n\n within5 = 0\n within10 = 0\n within20 = 0\n for i in range(len(error_array)):\n if error_array[i]/100 <= .05:\n within5 = within5 + 1\n within10 = within10 + 1\n within20 = within20 + 1\n else:\n if error_array[i]/100 <= .1:\n within10 = within10 + 1\n within20 = within20 + 1\n else:\n if error_array[i]/100 <= .2:\n within20 = within20 + 1\n print (1.0*within5/len(error_array), 1.0*within10/len(error_array), \n 1.0*within20/len(error_array))\n\n\n## Print Predictions to file\n\n# In[19]:\n\nSF_current_listings = open('SF_new_listings_625_scraped.txt','r')\nSF_current_listings_contents = SF_current_listings.readlines()\ntest_data = parse_text_rows(SF_current_listings_contents,walk_contents)\nclose('SF_new_listings_625_scraped.txt')\ndf_current = pd.DataFrame(test_data)\ndf_current_compare = pd.DataFrame(test_data)\n\ndbfile = open('SF_current_listings_predicted_625_staggered.csv','a')\n\ndf_current = df_current.drop(['favs','xouts','dom','latitude','longitude','home','zipcode','sale_price','_price_per_sqft'],axis=1)\n\nest = clf.predict(df_current)\n\nPRINT_DB = True\n\n\n\nif PRINT_DB:\n for i in range(len(test_data)):\n \n G = [df_current_compare['home'][i], df_current_compare['year_built'][i], \n df_current_compare['zipcode'][i],df_current_compare['list_price'][i],\n df_current_compare['beds'][i], df_current_compare['baths'][i],\n df_current_compare['sqft'][i],df_current_compare['dom'][i],\n df_current_compare['parking'][i], df_current_compare['sale_price'][i],\n df_current_compare['views'][i], df_current_compare['favs'][i],\n df_current_compare['xouts'][i], df_current_compare['latitude'][i],\n df_current_compare['longitude'][i], df_current_compare['crime_score'][i]]\n\n\n\n diff = 100*((est[i]*df_current_compare['sqft'][i]-\n df_current_compare['list_price'][i])/(df_current_compare['list_price'][i]))\n\n \n if diff >= 5:\n color = \"green\"\n if diff > -5 and diff < 5:\n color = \"orange\"\n if diff <= -5:\n color = \"red\"\n \n G.extend([est[i],df_current.ix[i,2],df_current.ix[i,8],df_current.ix[i,10],color])\n\n dbfile.write(','.join(str(e) for e in G)+ \"\\n\")\n\n\n# In[20]:\n\ndifference_array = []\npercent_difference_array = []\nlist_sale_array = []\nfive=0\nafive=0\nbfive=0\nfor i in range(len(df_)):\n diff = df_['sale_price'][i] - df_['list_price'][i]\n pdiff = 100*((diff*1.0)/df_['sale_price'][i])\n if abs(pdiff) <5:\n five = five + 1\n else:\n if pdiff >=5:\n afive = afive + 1\n else:\n if pdiff <= -5:\n bfive = bfive + 1\n if abs(pdiff) < 90:\n difference_array.append(diff)\n percent_difference_array.append(pdiff)\n\n\n# In[21]:\n\npylab.figure()\n\nn, bins, patches = pylab.hist(difference_array\n , 75, histtype='barstacked')\nplt.xlabel('sale price minus list price')\nplt.ylabel('Number of Homes')\nsavefig(\"difference_array\",format='png')\npylab.show()\n\n\n# In[22]:\n\npylab.figure()\n\nn, bins, patches = pylab.hist(sort(percent_difference_array)[1:150], 22, histtype='barstacked',color=['crimson'])\nn, bins, patches = pylab.hist(sort(percent_difference_array)[151:730], 5, histtype='barstacked',color=['orange'])\nn, bins, patches = pylab.hist(sort(percent_difference_array)[731:], 20, histtype='barstacked',color=['Chartreuse'])\n\nplt.xlabel('Sale Price as Percent Above Asking Price')\nplt.ylabel('Number of Homes')\nsavefig(\"percent_difference_array.png\",format='png')\npylab.show()\n\n" }, { "alpha_fraction": 0.5548402667045593, "alphanum_fraction": 0.5760610103607178, "avg_line_length": 37.09090805053711, "blob_id": "a22b90c17c235504471a3e71d8b67d095154a0ff", "content_id": "b3f121fbfd7d6d6f2f1810abc8598b2fca8e27d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4194, "license_type": "no_license", "max_line_length": 130, "num_lines": 110, "path": "/redfin_scraper.py", "repo_name": "MonicaHsu/PredictHomeValues", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[8]:\n\nimport numpy as np\nimport string\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport sklearn\nfrom sklearn import linear_model\nimport pandas as pd\nfrom pandas.tools.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom pylab import *\nimport ggplot\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom re import sub\nfrom decimal import Decimal\n\n\n# In[16]:\n\n# listings is file object with a row per address\n# this scrapes redfin details page per address and writes the result to training_file\n# note: this writes to the file one row at a time in case the scrape fails mid-way\ndef createtrainingset(listings, training_file, crime_):\n outf = open(training_file,'a')\n line = listings.readline()\n while line:\n listing = line.split('\\t')\n if listing[0]=='MLS Listing':\n #if listing[0]=='Past Sale' or listing[0]=='MLS Listing':\n home_id = listing[2]\n redfin_url = listing[24]\n lat, lon = (listing[-3] , listing[-2])\n year_built, zip_code, list_price, beds, baths, sqft, dom, parking, orig_list_price = (\n listing[12] , listing[5], listing[6], listing[7],\n listing[8], listing[10], listing[15], listing[13], listing[21]\n )\n \n if listing[10] and listing[7] and listing[8] and listing[12] and listing[-2] and listing[-3]:\n browser.get(redfin_url)\n t=browser.page_source\n l=browser.find_elements_by_class_name('as-data-section')\n\n redfin_vars = [x.text for x in l]\n gd=browser.find_elements_by_id('propertyDetailsId')\n details = [x.text for x in gd]\n\n if len(','.join(details).split('\\n')) >= 2:\n lsp = ','.join(details).split('\\n')[2]\n lastsoldprice = Decimal(sub(r'[^\\d.]', '', lsp))\n else:\n lastsoldprice = ''\n\n counter = 0\n if abs(float(lat)) > 1:\n g = [float(lat),float(lon)]\n for k in range(len(crime_)):\n if abs(abs(g[0])-abs(float(crime_[k][1]))) < 0.0047 and abs(abs(g[1])-abs(float(crime_[k][0]))) < 0.0037: \n counter = counter + float(crime_[k][2])\n if len(redfin_vars) == 4 and len(redfin_vars[0]) > 0: #checks for complete listings\n def get_num(x):\n return x.split('\\n')[0].split()[0].replace(',','')\n views = get_num(redfin_vars[0])\n favs = get_num(redfin_vars[1])\n xouts = get_num(redfin_vars[2])\n home_summary = [home_id, year_built, zip_code, lastsoldprice, beds, \n baths, sqft, dom, parking, orig_list_price, views, \n favs, xouts, lat, lon, counter]\n outf.write(','.join(str(e) for e in home_summary)+ \"\\n\")\n\n line = listings.readline()\n listings.close()\n outf.close()\n\n\n# In[20]:\n\nif __name__=='__main__':\n\n browser = webdriver.Firefox()\n browser.get('http://www.redfin.com/CA/Burlingame/3077-Mariposa-Dr-94010/home/2048468')\n time.sleep(25) \n \n OBTAIN_TRAINING_SET = True\n \n raw_data = 'SF_new_listings_625_chunked.txt' #input list of addresses\n training_file = 'SF_new_listings_625_scraped.txt' #decorated with scraped data\n \n crime_file = 'clustered_crime_data.csv' #from data.sfgov.org (clustered with kmeans in Matlab)\n crime_data = open(crime_file,'r')\n crime_contents = crime_data.readlines()\n crime_strip = [x.split(',') for x in crime_contents[1:len(crime_contents)]]\n close(crime_file)\n \n if OBTAIN_TRAINING_SET:\n homelistings = open(raw_data, \"r\")\n # per house, scrape data and write to file incrementally\n createtrainingset(homelistings, training_file,crime_strip)\n homelistings.close()\n\n trainingset = open(training_file,'r')\n\n\n# In[ ]:\n\n\n\n" } ]
2
diekhans/synteny-play-ground
https://github.com/diekhans/synteny-play-ground
8a13780d578296d041119a83f342af675714caaf
f9afba1e237f2f326b6dd80518e55fa16be8d081
23050580808605d52c77b40bf4407f2c62091107
refs/heads/master
"2021-09-15T16:52:18.991575"
"2018-06-07T08:46:44"
"2018-06-07T08:46:44"
106,366,396
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8083003759384155, "alphanum_fraction": 0.8083003759384155, "avg_line_length": 23.095237731933594, "blob_id": "1cc250bc1855fe67d04faf0c2b777a8e207e6dbb", "content_id": "18cb7919cc1ba3c2739c316df7bc048b538fffcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 506, "license_type": "no_license", "max_line_length": 60, "num_lines": 21, "path": "/bin/run_transloc_breaks_for_all.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n./get_transloc_breaks.sh AcinonyxJubatus cheetah\n\n./get_transloc_breaks.sh PumaConcolor puma\n\n./get_transloc_breaks.sh PantheraOnca jaguar\n\n./get_transloc_breaks.sh PantheraPardus leopard\n\n./get_transloc_breaks.sh PantheraLeo lion\n\n./get_transloc_breaks.sh PantheraTigris tiger\n\n./get_transloc_breaks.sh PrionailurusBengalensis leopard_cat\n\n./get_transloc_breaks.sh PrionailurusViverrinus fishing_cat\n\n./get_transloc_breaks.sh CaracalCaracal caracal\n\n./get_transloc_breaks.sh LynxPardina lynx\n" }, { "alpha_fraction": 0.6099376082420349, "alphanum_fraction": 0.6963514089584351, "avg_line_length": 36.531532287597656, "blob_id": "47e02040a91029a68e028a59910a9a22c8231c8f", "content_id": "8ccbc2873edbd9bea945a24b28f586e6e2fe82ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4166, "license_type": "no_license", "max_line_length": 80, "num_lines": 111, "path": "/bin/get_stats_blocks.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nprefix=\"/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/\"\n\nbed=$prefix/caracal/CaracalCaracal.FelisCatus.blocks.bed\necho 'caracal'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout| bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2420813187}' tmp\nprintf '\\n'\n\nbed=$prefix/cheetah/AcinonyxJubatus.FelisCatus.blocks.bed\necho 'cheetah'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2375874546}' tmp\nprintf '\\n'\n\nbed=$prefix/hyena/CrocutaCrocuta.FelisCatus.blocks.bed\necho 'hyena'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2367466658}' tmp\nprintf '\\n'\n\nbed=$prefix/lynx/LynxPardina.FelisCatus.blocks.bed \necho 'lynx'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2413209059}' tmp\nprintf '\\n'\n\nbed=$prefix/fishing_cat/PrionailurusViverrinus.FelisCatus.blocks.bed \necho 'fishing cat'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2445568386}' tmp\nprintf '\\n'\n\nbed=$prefix/puma/PumaConcolor.FelisCatus.blocks.bed \necho 'puma'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2525216212}' tmp\nprintf '\\n'\n\nbed=$prefix/lion/PantheraLeo.FelisCatus.blocks.bed\necho 'lion'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2442522584}' tmp\nprintf '\\n'\n\nbed=$prefix/jaguar/PantheraOnca.FelisCatus.blocks.bed\necho 'jaguar'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2405344986}' tmp\nprintf '\\n'\n\nbed=$prefix/leopard/PantheraPardus.FelisCatus.blocks.bed\necho 'leopard'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2578022254}' tmp\nprintf '\\n'\n\nbed=$prefix/tiger/PantheraTigris.FelisCatus.blocks.bed\necho 'tiger'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2391065193}' tmp\nprintf '\\n'\n\nbed=$prefix/leopard_cat/PrionailurusBengalensis.FelisCatus.blocks.bed\necho 'leopard cat'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2488918643}' tmp\nprintf '\\n'\n\nbed=$prefix/dog/CanisFamiliaris.FelisCatus.blocks.bed\necho 'dog'\nwc -l $bed\ncut -f4,5,6 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2419229919}' tmp\ncut -f1,2,3 $bed | bedSort stdin stdout | bedtools merge -i stdin > tmp\nawk '{sum+=$3-$2} END {print sum/2410976875}' tmp\nprintf '\\n'\n" }, { "alpha_fraction": 0.7620500326156616, "alphanum_fraction": 0.7620500326156616, "avg_line_length": 56.16279220581055, "blob_id": "6676b8732a2cd17f114cbb8644de458d4b6f9af6", "content_id": "9a0c834d8d29583fd65c5e82f595dfa38e716cdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4917, "license_type": "no_license", "max_line_length": 170, "num_lines": 86, "path": "/bin/group_filter_breaks_Ns_transloc.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndata_prefix=/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc leopard_cat fishing_cat \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/leopard_cat.fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar tiger\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.tiger.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar tiger caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.tiger.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar tiger caracal lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.tiger.caracal.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar tiger caracal lynx puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.tiger.caracal.lynx.puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\n#for sergeys' tree\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar tiger caracal puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.tiger.caracal.puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\necho 'attention!'\necho 'check the the dog genome was used as outgroup here - manually add it to the outgroups in the script if necessary'\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion leopard jaguar tiger caracal lynx puma cheetah leopard_cat fishing_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.leopard.jaguar.tiger.caracal.lynx.puma.cheetah.leopard_cat.fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lion \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lion.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc leopard \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/leopard.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc jaguar\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/jaguar.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc tiger\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/tiger.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc fishing_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --clustering_type ovl --suffix prefilterNs --transloc leopard_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_transloc_ovl/leopard_cat.bed\nrm ${data_prefix}/*.bed\n\n" }, { "alpha_fraction": 0.8372092843055725, "alphanum_fraction": 0.8372092843055725, "avg_line_length": 20.5, "blob_id": "5b1ef98fb6d78080a6a70c948803894a2ba665d5", "content_id": "9c5474e207de3de6f8b58b5d476194d9e8272b9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 21, "num_lines": 2, "path": "/README.md", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "# synteny-play-ground\nsome experimentation\n" }, { "alpha_fraction": 0.7712363600730896, "alphanum_fraction": 0.7762826085090637, "avg_line_length": 107.09091186523438, "blob_id": "ae886b87ac50f2a29c1fa6f9c06d1fbf48c94be9", "content_id": "c18cd388b33fe114e3949bf3c6a53c8ef55da194", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1189, "license_type": "no_license", "max_line_length": 283, "num_lines": 11, "path": "/doc/dag-algorithm.md", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "1. Filter list of psl blocks by query sequence id and sort them by qStart and tStart.<br />\nBuild a graph upon it where vertices are blocks and edges are constructed based on possible candidates of the next vertices <br /> \nCandiate blocks are the closest next syntenic block and all blocks overlapping it (currently used).<br />\nThe goal is to find a set of paths covering DAG based on their maximal weight and most continuous of them. <br />\n2. Search for a vertex with the maximal weight: <br />\n * initial weight of each vertex is initialized as the size of the corresponding psl block\n * the weight of each edge coming into the vertex i equals to the length of the corresponding psl block (initial weight of the vertex i)\n * for each vertex A consider its possible next blocks (descendants) update weight of each descendant-candidate vertex B in case the weight of an edge coming into B + weight of the previous vertex A is greater then weight of B. In case of update store the id of the previous vertex.\n3. Find the vertex with maximal weight and trace back.\n4. Remove the vertices that comprise the best path from the graph\n5. If not all vertices are in some paths then go to 2.\n" }, { "alpha_fraction": 0.7856048941612244, "alphanum_fraction": 0.7856048941612244, "avg_line_length": 19.40625, "blob_id": "f90ddb6adbfa2af89594d018bd57210f3390015b", "content_id": "146245974c0f5a784f3b8fd9418e1b858cf0f65f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 653, "license_type": "no_license", "max_line_length": 60, "num_lines": 32, "path": "/bin/run_filtered_breaks_for_all.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\necho 'cheetah'\n./get_filtered_breaks.sh AcinonyxJubatus cheetah\n\necho 'puma'\n./get_filtered_breaks.sh PumaConcolor puma\n\necho 'jaguar'\n./get_filtered_breaks.sh PantheraOnca jaguar\n\necho 'leopard'\n./get_filtered_breaks.sh PantheraPardus leopard\n\necho 'lion'\n./get_filtered_breaks.sh PantheraLeo lion\n\necho 'tiger'\n./get_filtered_breaks.sh PantheraTigris tiger\n\necho 'leopard cat'\n./get_filtered_breaks.sh PrionailurusBengalensis leopard_cat\n\necho 'fishing cat'\n./get_filtered_breaks.sh PrionailurusViverrinus fishing_cat\n\necho 'caracal'\n./get_filtered_breaks.sh CaracalCaracal caracal\n\necho 'lynx'\n./get_filtered_breaks.sh LynxPardina lynx\n" }, { "alpha_fraction": 0.686339259147644, "alphanum_fraction": 0.726372241973877, "avg_line_length": 32.65277862548828, "blob_id": "3bf7304de9f69c5e71d16e63be4485c0c39c578d", "content_id": "732d237d90cbc7ede4fadf1883e3faef69976be1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2423, "license_type": "no_license", "max_line_length": 104, "num_lines": 72, "path": "/doc/design.md", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "### Special cases in resolving synteny\n\n\n**Duplications**\n\ncan be resolved unambiguously <br>\n`Genome A chr1 1 2 3`<br>\n`Genome A chr2 4 2 5`<br>\n`Genome B chr3 1 2 3`<br>\n`Genome B chr4 4 2 5`<br>\n\nstill can be resolved unambiguosly for 1 2 3 context<br>\nwhat about the other pair (4 2 5 and 7 5 8)? <br>\n`Genome A chr1 1 2 3`<br>\n`Genome A chr2 4 2 5`<br>\n`Genome B chr3 1 2 3`<br>\n`Genome B chr4 7 5 8`<br>\n\nif such case is even possible?<br>\nanyway it doesn't look like synteny in all the contextes below<br>\n`Genome A chr1 1 2 3`<br>\n`Genome A chr2 4 2 5`<br>\n`Genome B chr3 6 2 7`<br>\n`Genome B chr4 8 5 9`<br>\n\nOne can be resolved, another one is a clear duplication<br>\n`Genome A chr1 1 2 3`<br>\n`Genome A chr2 4 2 5`<br>\n`Genome B chr3 1 2 3`<br>\n\nIf 6 is small enough then synteny is continued<br>\n`Genome A chr1 1 2 3`<br>\n`Genome A chr2 4 2 5`<br>\n`Genome B chr3 1 2 6 3`<br>\n`Genome B chr4 4 2 5`<br>\n\n\n### Discussion\n\n* A synteny is define as a contiguously aligned region in two genomes with indels\n and inversions of no more than 1000bp.\n* The pairwise alignments produce by HAL (PSL records) chain the blocks, however this\n does not necessary match the desired definition of synteny.\n* PSL blocks maybe fragmented by HAL due to fragmentation in other genomes.\n* Duplication ares represented in multiple PSL records, as PSL only represents\n linear increasing alignments.\n\n\n### Algorithm\n\nSynteny algorithm generates putative synteny blocks between to of the extant\nspecies from a HAL alignment.\n\n1. Use halLiftover to generate PSLs covering the entire genome or a test region.\n2. Split all PSL into a single set of gapless blocks, each block represented by\n pairs of query/target tuples of equal length. Strand-specific coordinates for the\n blocks are used, as this makes the dynamic programming easier:<br>\n `(qname qstrand qstart qend)`<br>\n `(tname tstrand tstart tend)`\n3. Partition blocks into lists of blocks that could possibly contain syntenic blocks.\n This is referred to as a _synteny pool_.\n These have the same set of query and target sequences and strands and are indexed by<br>\n `(qname qstrand tname tstrand)`\n5. Sort each _synteny pool_ by qstart.\n4. Define zero of more synteny block for each _synteny pool_:\n 1. xxx\n \n\n\n\n### Thinking\n[Longest increasing subsequence algorithm](https://en.wikipedia.org/wiki/Longest_increasing_subsequence)\n" }, { "alpha_fraction": 0.5990049839019775, "alphanum_fraction": 0.6117744445800781, "avg_line_length": 37.89677429199219, "blob_id": "808f347e3b71a6e9495042e3c9c5be6babe9eda8", "content_id": "40b12b679e1143245cc5890e40bd8f2d1dcbc790", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6030, "license_type": "no_license", "max_line_length": 134, "num_lines": 155, "path": "/bin/find_breakpoints.py", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nfrom pycbio.hgdata.psl import PslReader\nfrom pycbio.hgdata.psl import PslBlock\nfrom pycbio.hgdata.psl import Psl\nfrom argparse import ArgumentParser\nfrom itertools import groupby\nfrom Bio import SeqIO\n\n\ndef extract_blocks(psl_file):\n entries = []\n for psl in PslReader(psl_file):\n #entry = (psl.qName, int(psl.qStart), int(psl.qEnd), psl.tName, int(psl.tStart), int(psl.tEnd), psl.strand)\n #entries.append(entry)\n entries.append(psl)\n return entries\n\ndef print_out(blocks, filename):\n with open(filename, 'w') as f:\n for b in blocks:\n f.write(b+'\\n')\n\ndef parse_bed(bed):\n d = {}\n with open(bed) as f:\n for line in f:\n line = line.strip().split()\n if not line:\n continue\n d[line[0]] = (int(line[1]), int(line[2]))\n return d\n\n#check if previous scaffold ended and we came to the beginning of the new one\ndef scaffold_end_start(prev_block, prev_global_size, this_block, this_global_size):\n threshold = 1000 \n if prev_block.strand == '++':\n prev_end = prev_global_size - prev_block.qEnd\n else:\n prev_end = prev_block.qStart\n\n if this_block.strand == '++':\n this_start = this_block.qStart\n else:\n this_start = this_global_size - this_block.qEnd\n #print prev_block.qName, prev_block.strand, prev_end, this_block.qName, this_block.strand, this_start\n return prev_end < threshold or this_start < threshold\n\ndef overlaps(b, accumulated):\n for a in accumulated:\n if b.tStart == a.tEnd or a.tStart == b.tEnd:\n continue\n if b.tStart <= a.tStart <= b.tEnd or \\\n a.tStart <= b.tStart <= a.tEnd or \\\n a.tStart <= b.tStart <= b.tEnd <= a.tEnd or \\\n b.tStart <= a.tStart <= a.tEnd <= b.tEnd:\n return True\n return False\n\ndef group_overlapping(sorted_blocks_target):\n res = []\n accumulated = []\n for b in sorted_blocks_target:\n if not accumulated or overlaps(b, accumulated):\n accumulated.append(b)\n else:\n res.append(list(accumulated))\n accumulated = [b]\n return res\n\n\ndef check_abundance_Ns_genome(fasta, seqid, start, end):\n return float(fasta[seqid][start:end].seq.count('N'))/(end-start)\n\n#look into neighborhood on both ends of breakpoints for target and for query\n#can not look inside the breakpoint region because some of them are overlapping \n#and it finally causes a mess if end - start + 1 == 0 because they overlap for 1bp\n# -> it's a mess\ndef check_abundance_Ns_for_both(query, target, prev_block, b):\n break_start = prev_block.tEnd - 500\n break_end = prev_block.tEnd + 500\n seqid = prev_block.tName\n target_ns_1 = check_abundance_Ns_genome(target, seqid, break_start, break_end)\n break_start = b.tStart - 500\n break_end = b.tStart + 500\n target_ns_2 = check_abundance_Ns_genome(target, seqid, break_start, break_end)\n target_ns = max(abs(target_ns_1), abs(target_ns_2))\n if prev_block.strand == '++':\n break_start = prev_block.qEnd - 500\n break_end = prev_block.qEnd + 500\n else:\n break_start = prev_block.qStart - 500\n brak_end = prev_block.qStart + 500\n seqid = prev_block.qName\n query_ns_1 = check_abundance_Ns_genome(query, seqid, break_start, break_end)\n if prev_block.strand == '++':\n break_start = b.qStart - 500\n break_end = b.qStart + 500\n else:\n break_start = b.qEnd - 500\n break_end = b.qEnd + 500\n seqid = b.qName #in case of translocation\n query_ns_2 = check_abundance_Ns_genome(query, seqid, break_start, break_end)\n query_ns = max(abs(query_ns_1), abs(query_ns_2))\n return (query_ns, target_ns)\n\ndef find_breaks(blocks, query, fasta_target, fasta_query):\n breaks = []\n #first group by target sequence id\n blocks = sorted(blocks, key=lambda x:x.tName)\n for target, blocks_target in groupby(blocks, key=lambda x:x.tName) :\n blocks_target = list(blocks_target)\n #sort by target start\n sorted_blocks_target = sorted(blocks_target, key=lambda x: x.tStart)\n prev_block = ''\n #group repeats in target together\n for repeat_blocks in group_overlapping(sorted_blocks_target): \n if len(repeat_blocks) > 1:\n #prev_block = sorted(repeat_blocks, key=lambda x: x[5])[-1]\n continue\n b = repeat_blocks[0]\n if prev_block and not scaffold_end_start(prev_block, query[prev_block.qName][1],\\\n b, query[b.qName][1]):\n ns = check_abundance_Ns_for_both(fasta_query, fasta_target, prev_block, b)\n breaks.append((prev_block, b, ns[0], ns[1]))\n # print prev_block.qName, prev_block.qEnd, query[prev_block.qName][1], b.qName, b.qStart, query[b.qName][0]\n prev_block = b\n #exit()\n return breaks\n\nif __name__ == '__main__':\n #TARGET IS REFERENCE\n parser = ArgumentParser()\n parser.add_argument('psl')\n parser.add_argument('--fasta_target')\n parser.add_argument('--fasta_query')\n parser.add_argument('bed_query',help='bed file for queries chromosomes')\n args = parser.parse_args()\n blocks = extract_blocks(args.psl)\n query_chroms = parse_bed(args.bed_query)\n fasta_target = SeqIO.to_dict(SeqIO.parse(open(args.fasta_target),'fasta'))\n fasta_query = SeqIO.to_dict(SeqIO.parse(open(args.fasta_query),'fasta'))\n breaks = find_breaks(blocks, query_chroms, fasta_target, fasta_query)\n for b in breaks:\n #tName, tStart, tEnd, qNameEnd, qStart, qNameStart, qEnd, ifDeletionInQuery, qNsrate, tNsrate\n if b[0].strand == '++':\n qEnd = b[0].qEnd\n else:\n qEnd = b[1].qStart\n if b[1].strand == '++':\n qStart = b[1].qStart\n else:\n qStart = b[1].qEnd\n print '\\t'.join(map(str,[b[0].tName, b[0].tEnd, b[1].tStart, b[0].qName, qEnd, b[1].qName, qStart, b[2], b[3]]))\n\n" }, { "alpha_fraction": 0.5884578824043274, "alphanum_fraction": 0.634815514087677, "avg_line_length": 57.72222137451172, "blob_id": "25bdb3697317270e6c194d5e872df346c3a89850", "content_id": "e6aae0ba2f4107da5896ab47d28036cea63820eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2114, "license_type": "no_license", "max_line_length": 140, "num_lines": 36, "path": "/bin/count_ltrs_density.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#data_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/prefilterNs_notree/'\ndata_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/prefilterNs/'\nltrs='/hive/groups/recon/projs/felidae_comp/analysis/transposons/LTRs/FelCat-ltrs.bed'\n\nfor x in `ls ${data_prefix}/*.bed | grep -v comments`; \ndo\n basename $x\n #cat $x | awk '{print $1\"\\t\"$2-500\"\\t\"$2+500\"\\n\"$1\"\\t\"$3-500\"\\t\"$3+500}' | bedSort /dev/stdin ${data_prefix}/tmp\n #cat $x | awk '($3-$2>10000) {print $0}' | bedSort /dev/stdin ${data_prefix}/tmp\n cat $x | awk '($3-$2>1000) {print $0}' | awk '{print $1\"\\t\"$2\"\\t\"$2+500\"\\n\"$1\"\\t\"$3-500\"\\t\"$3}' | bedSort /dev/stdin ${data_prefix}/tmp\n bedtools merge -i ${data_prefix}/tmp > ${data_prefix}/breaks_neighb.tmp\n echo 'got break regions:'\n wc -l ${data_prefix}/breaks_neighb.tmp\n total=$(awk '{sum+=$3-$2} END {print sum}' ${data_prefix}/breaks_neighb.tmp)\n echo 'density in breaks:'\n bedtools intersect -a ${data_prefix}/breaks_neighb.tmp -b ${ltrs} | awk -v t=$total '{sum+=$3-$2} END {print sum/t}'\n #cat ${data_prefix}/breaks_neighb.tmp | awk '{print $1\"\\t\"$2-5000\"\\t\"$2\"\\n\"$1\"\\t\"$3\"\\t\"$3+5000}' | bedSort /dev/stdin ${data_prefix}/tmp\n cat $x | awk '($3-$2>1000) {print $0}' | awk '{print $1\"\\t\"$2-500\"\\t\"$2\"\\n\"$1\"\\t\"$3\"\\t\"$3+500}' | bedSort /dev/stdin ${data_prefix}/tmp\n bedtools merge -i ${data_prefix}/tmp > ${data_prefix}/breaks_neighb.tmp\n total=$(awk '{sum+=$3-$2} END {print sum}' ${data_prefix}/breaks_neighb.tmp)\n echo 'density in no-breaks:'\n bedtools intersect -a ${data_prefix}/breaks_neighb.tmp -b ${ltrs} | awk -v t=$total '{sum+=$3-$2} END {print sum/t}'\n\n #segments=$(awk 'END{print NR}' ${data_prefix}/breaks_neighb.tmp)\n #have_ltrs=$(bedtools intersect -u -a ${data_prefix}/breaks_neighb.tmp -b ${ltrs} | wc -l)\n #echo 'neighb having at least one related ltr'\n #echo ${have_ltrs}/${segments}\n rm ${data_prefix}/breaks_neighb.tmp ${data_prefix}/tmp\ndone\n\n\n\necho 'average ltrs density per felcat genome:'\nawk '{sum+=$3-$2} END {print sum/2641342258}' $ltrs\n" }, { "alpha_fraction": 0.7573179602622986, "alphanum_fraction": 0.7573179602622986, "avg_line_length": 45.46511459350586, "blob_id": "ebb8385f13f4fc48f568702c12055c1efe9b2961", "content_id": "cc860f891fb1dce152c257f61af60cd20458f1cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3997, "license_type": "no_license", "max_line_length": 137, "num_lines": 86, "path": "/bin/group_filter_breaks_Ns.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndata_prefix=/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard_cat fishing_cat \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/leopard_cat.fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar tiger\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.tiger.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar tiger caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.tiger.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar tiger caracal lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.tiger.caracal.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar tiger caracal lynx puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.tiger.caracal.lynx.puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\n#for sergeys' tree\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar tiger caracal puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.tiger.caracal.puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\necho 'attention!'\necho 'check the the dog genome was used as outgroup here - manually add it to the outgroups in the script if necessary'\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion leopard jaguar tiger caracal lynx puma cheetah leopard_cat fishing_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.leopard.jaguar.tiger.caracal.lynx.puma.cheetah.leopard_cat.fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lion.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/leopard.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs jaguar\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/jaguar.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs tiger\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/tiger.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs fishing_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs/leopard_cat.bed\nrm ${data_prefix}/*.bed\n\n" }, { "alpha_fraction": 0.6339473128318787, "alphanum_fraction": 0.6365119814872742, "avg_line_length": 40.63106918334961, "blob_id": "c0e9f01ffaa532215bd3418a05b1464d358f8854", "content_id": "6087e4749b0ccf861bd20dcef1c8097fa774f2e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4289, "license_type": "no_license", "max_line_length": 114, "num_lines": 103, "path": "/bin/breakpoints_group_filter.py", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\nimport subprocess\nimport os\n\nlatin_names = ['PantheraLeo', 'PantheraPardus', 'PantheraOnca', 'PantheraTigris', \\\n 'CaracalCaracal', 'LynxPardina', 'PumaConcolor', 'AcinonyxJubatus', \\\n 'PrionailurusBengalensis', 'PrionailurusViverrinus']\n#may be add dog , 'CanisFamiliaris'\n#or hyena 'CrocutaCrocuta' \ncommon_names = ['lion', 'leopard', 'jaguar', 'tiger', 'caracal', 'lynx', 'puma', \\\n 'cheetah', 'leopard_cat', 'fishing_cat']\n#, 'dog'\n#hyena\n\ndata_prefix = '/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/'\n\n#add extra column to bedfile telling which genome it belongs to\ndef label_bed(common_name, latin_name, chrom, tempfile, suffix, ref_name='FelisCatus'):\n #get bedfile with breaks\n path = os.path.join(data_prefix, common_name, '.'.join([latin_name,ref_name ,str(chrom), suffix, 'bed']))\n command = 'awk \\'{print $0\\\"\\t' + common_name +'\\\"}\\' ' + path +' >> ' + tempfile\n subprocess.call(command, shell=True)\n\n#find common breaks in a group of genomes stored in filename\ndef merge(filename):\n result = filename+'.merged'\n command = ' '.join(['bedtools merge -i', filename, '-c 4 -o collapse >', result])\n subprocess.call(command, shell=True)\n return result\n\ndef rm(name):\n os.remove(name) if os.path.exists(name) else None\n\ndef preprocess_group(groups, chrom, tempfile, suffix):\n rm(tempfile)\n #if not groups:\n # f = open(tempfile, 'w')\n # f.close()\n for g in groups:\n label_bed(g[0], g[1], chrom, tempfile, suffix) \n sorted_group = sort(tempfile)\n rm(tempfile)\n #print 'tmp files:', tempfile\n merged_group = merge(sorted_group)\n #print 'tmp files:', sorted_group\n rm(sorted_group)\n return merged_group\n\ndef sort(filename):\n result = filename+'.sorted'\n command = ' '.join(['bedSort', filename, result])\n subprocess.call(command, shell=True)\n return result\n\ndef subtract(fileA, fileB, result):\n command = ' '.join(['bedtools subtract -A -a', fileA, '-b', fileB, '>', result])\n subprocess.call(command, shell=True)\n\ndef extract_group_specific(overlap_file, group):\n group_file = overlap_file + '.group'\n with open(overlap_file) as f:\n with open(group_file, 'w') as outf:\n for line in f:\n line = line.strip()\n if len(line.split()[-1].split(',')) == len(group):\n outf.write(line+'\\n')\n return group_file\n\ndef run(group_genomes, outgroup_genomes, suffix):\n for chrom in range(0,20):\n print 'processing chromosome', chrom\n tempfile_group = os.path.join(data_prefix,'groups')\n result_group = preprocess_group(group_genomes, chrom, tempfile_group, suffix)\n if outgroup_genomes:\n tempfile_outgroups = os.path.join(data_prefix,'outgroups')\n result_outgroups = preprocess_group(outgroup_genomes, chrom, tempfile_outgroups, suffix='all')\n filtered_group_file = extract_group_specific(result_group, group_genomes)\n final_result = os.path.join(data_prefix,'.'.join(map(lambda x: x[0], group_genomes) + [str(chrom),'bed']))\n if outgroup_genomes:\n subtract(filtered_group_file, result_outgroups, final_result)\n else:\n command = ' '.join(['mv', filtered_group_file, final_result])\n subprocess.call(command, shell=True)\n print 'result at', final_result\n #print 'tmp files:', result_group, result_outgroups\n #print 'tmp_file:', filtered_group_file\n rm(filtered_group_file)\n rm(result_group)\n if outgroup_genomes:\n rm(result_outgroups)\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--suffix', default='all', choices=['all','transloc','filtered_breaks', 'filterNs'])\n parser.add_argument('group',nargs='+',help='list of common names \\\n for the group of species to analyze')\n args = parser.parse_args()\n sorted_names = zip(common_names, latin_names)\n group_genomes = filter(lambda x: x[0] in args.group, sorted_names)\n outgroup_genomes = filter(lambda x: x[0] not in args.group, sorted_names)\n run(group_genomes, outgroup_genomes, args.suffix) \n" }, { "alpha_fraction": 0.5869715809822083, "alphanum_fraction": 0.5954030752182007, "avg_line_length": 36, "blob_id": "1966698aaf3ed08c5ad90f2b219aec93be4e769a", "content_id": "d6c8902d9d6bc983da0ff95f88b81c564d04bf50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8658, "license_type": "no_license", "max_line_length": 111, "num_lines": 234, "path": "/bin/psl_merger.py", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nfrom pycbio.hgdata.psl import PslReader\nfrom pycbio.hgdata.psl import PslBlock\nfrom pycbio.hgdata.psl import Psl\nfrom argparse import ArgumentParser\nfrom itertools import groupby,chain\nimport time\nimport datetime\n\ndef get_blocks_set(psl_file):\n blocks = []\n for psl in PslReader(psl_file):\n blocks += psl.blocks\n return set(blocks)\n\n#assume a.start < b.start\ndef are_syntenic(a, b):\n return a.qEnd <= b.qStart and\\\n a.tEnd <= b.tStart and\\\n a.psl.tName == b.psl.tName and\\\n a.psl.strand == b.psl.strand\n\n\ndef is_not_overlapping_ordered_pair(a, b, threshold=5000):\n return are_syntenic(a, b) and\\\n 0 <= b.qStart - a.qEnd < threshold and\\\n 0 <= b.tStart - a.tEnd < threshold \n\ndef merge_group(group, merged):\n group = sorted(group, key=lambda x: x.qStart)\n mergeable = [group[0]]\n for i in range(1,len(group)):\n if is_not_overlapping_ordered_pair(group[i-1], group[i]):\n mergeable.append(group[i])\n else:\n merged.append(mergeable)\n mergeable = [group[i]]\n merged.append(mergeable)\n\ndef merge(psl):\n print 'getting block set....'\n blocks = get_blocks_set(psl)\n print 'sorting block set....'\n blocks = sorted(blocks, key=lambda x: x.psl.qName)\n blocks_grouped_by_query = map(lambda x:list(x[1]), groupby(blocks, key=lambda x:x.psl.qName)) \n merged = []\n i = 0\n for group in blocks_grouped_by_query:\n merge_group(group, merged)\n i += 1\n return merged\n \ndef get_next(pos, query_group, max_anchor_distance=5000):\n f = []\n for i in range(pos+1, len(query_group)):\n if is_not_overlapping_ordered_pair(query_group[pos], query_group[i], max_anchor_distance):\n if not f:\n f.append(i)\n elif f and is_not_overlapping_ordered_pair(query_group[f[0]], query_group[i], max_anchor_distance):\n return f\n else:\n f.append(i)\n return f \n\ndef dfs(i, group, path, paths, used) :\n used.add(group[i])\n assert i not in path , \"{} not in {}\".format(i, path)\n path.append(i)\n nexts = get_next(i, group)\n assert set(nexts) & set(path) == set()\n if not nexts:\n assert not map(lambda x: group[x], path) in paths, \"{}\".format(group[i].psl.qName)\n paths.append(map(lambda x: group[x], path))\n for e in nexts:\n dfs(e, group, path, paths, used)\n path.pop()\n\ndef depth_merge(psl):\n blocks = get_blocks_set(psl)\n blocks = sorted(blocks, key=lambda x: x.psl.qName)\n blocks_grouped_by_query = map(lambda x:list(x[1]), groupby(blocks, key=lambda x:x.psl.qName))\n paths = []\n for group in blocks_grouped_by_query:\n print 'processing group', group[0].psl.qName\n group = sorted(group, key=lambda x: x.qStart)\n used = set()\n for i in range(len(group)):\n if not group[i] in used:\n dfs(i, group, [], paths, used)\n return paths\n\ndef best_routes(merged):\n selected = []\n weighted_routes = zip(merged, map(lambda path: sum(map(lambda x: x.qEnd - x.qStart, path)), merged))\n weighted_routes = sorted(weighted_routes, key=lambda x:x[1], reverse=True)\n used = set()\n for route,_ in weighted_routes:\n if not set(route) & used:\n selected.append(route)\n used.update(route)\n return selected\n\n'''\ndag is a dict that for a given vertex stores all its possible next verties\nhidden is a set of vertices that are already in paths\n'''\ndef weigh_dag(group, dag, hidden, max_anchor_distance):\n #weight of the edge equals length \n #of the next block\n #weight of a vertice equals \n #estimated weight: w_j < w_i + w_e(ij) =>\n #updated w_j\n #also remember how we came here: \n #(prev_vertex, weight)\n weighted_dag = {}\n for i in range(len(group)):\n if i in hidden:\n continue\n #print i, group[i], len(group)\n if not i in dag:\n nexts = get_next(i, group, max_anchor_distance)\n dag[i] = nexts\n else:\n nexts = dag[i]\n #if never visited this vertex then \n #weight of it equals to its size\n #because otherwise we will never count its size\n if not i in weighted_dag:\n weighted_dag[i] = (-1, group[i].size)\n for j in nexts:\n if j in hidden:\n continue\n alternative_weight = weighted_dag[i][1] + group[j].size\n if not j in weighted_dag or weighted_dag[j][1] < alternative_weight: \n #w_i + weight of the next edge \n weighted_dag[j] = (i, alternative_weight)\n return weighted_dag\n\ndef traceback(weighted_dag, hidden, group):\n #get the heaviest path weight\n start_vertex = max(weighted_dag.items(), key=lambda x:x[1][1])[0]\n path = [start_vertex]\n prev_vertex = weighted_dag[start_vertex][0]\n while prev_vertex != -1:\n path.append(prev_vertex)\n prev_vertex = weighted_dag[prev_vertex][0]\n hidden.update(set(path))\n return map(lambda x: group[x], path)[::-1]\n\ndef dag_merge(psl, min_block_breath, max_anchor_distance):\n blocks = get_blocks_set(psl)\n blocks = sorted(blocks, key=lambda x: x.psl.qName)\n blocks_grouped_by_query = map(lambda x:list(x[1]), groupby(blocks, key=lambda x:x.psl.qName))\n paths = []\n for group in blocks_grouped_by_query:\n dag = {}\n #set of hidden vertices\n hidden = set()\n #print 'processing group', group[0].psl.qName\n group = sorted(group, key=lambda x: x.qStart)\n while len(group) != len(hidden):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n #print st, len(group), len(hidden)\n weighted_dag = weigh_dag(group, dag, hidden, max_anchor_distance)\n path = traceback(weighted_dag, hidden, group)\n if not path:\n break\n qLen = path[-1].qEnd - path[0].qStart\n tLen = path[-1].tEnd - path[0].tStart\n if qLen >= min_block_breath and tLen >= min_block_breath:\n paths.append(path)\n #think if we should keep path that is not complies with lengths bounds\n #we could traverse these vertices later in other paths\n #for e in path:\n # group.remove(e)\n return paths\n \ndef construct_psl(blocks):\n psl = Psl()\n psl.match = sum(map(lambda x: x.qEnd-x.qStart, blocks))\n psl.misMatch = 0\n psl.repMatch = 0\n psl.nCount = 0\n psl.qNumInsert = len(filter(lambda x: x>0, map(lambda x: x[1].qStart-x[0].qEnd, zip(blocks,blocks[1:]))))\n psl.qBaseInsert = sum(map(lambda x: x[1].qStart-x[0].qEnd, zip(blocks,blocks[1:])))\n psl.tNumInsert = len(filter(lambda x: x>0, map(lambda x: x[1].tStart-x[0].tEnd, zip(blocks,blocks[1:]))))\n psl.tBaseInsert = sum(map(lambda x: x[1].tStart-x[0].tEnd, zip(blocks,blocks[1:])))\n psl.qName = blocks[0].psl.qName\n psl.qSize = blocks[0].psl.qSize\n psl.qStart = blocks[0].qStart\n psl.qEnd = blocks[-1].qEnd\n psl.tName = blocks[0].psl.tName\n psl.tSize = blocks[0].psl.tSize\n psl.strand = blocks[0].psl.strand\n if psl.strand == '++':\n psl.tStart = blocks[0].tStart\n psl.tEnd = blocks[-1].tEnd\n elif psl.strand == '+-':\n psl.tEnd = psl.tSize - blocks[0].tStart\n psl.tStart = psl.tSize - blocks[-1].tEnd\n psl.blockCount = len(blocks)\n for b in blocks:\n b.psl = psl\n psl.blocks = blocks\n return psl\n \nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('alg', help='type of algorithm: simple/recursion/dag')\n parser.add_argument('min_block_size', nargs='?', default=5000, type=int)\n parser.add_argument('max_anchor_distance', nargs='?', default=5000, type=int)\n parser.add_argument('psl')\n parser.add_argument('out')\n args = parser.parse_args()\n if args.alg == 'simple':\n merged = merge(args.psl)\n elif args.alg == 'recursion':\n print 'performing dfs...'\n merged = depth_merge(args.psl)\n print 'extracting best routes...'\n merged = best_routes(merged) \n elif args.alg == 'dag':\n print 'dag merge...'\n print 'using min_block_size = ', args.min_block_size, \\\n 'max_anchor_distance =', args.max_anchor_distance \n merged = dag_merge(args.psl, args.min_block_size, args.max_anchor_distance)\n print 'storing output...'\n with open(args.out, 'w') as f:\n for blocks in merged:\n psl = construct_psl(blocks)\n f.write('\\t'.join(psl.toRow())+'\\n')\n" }, { "alpha_fraction": 0.6222362518310547, "alphanum_fraction": 0.6260265111923218, "avg_line_length": 38.54999923706055, "blob_id": "9156810f0c6a7aedbe51d33dc6557037c5e9fd6f", "content_id": "7cc44155039c3ddbd970f620f93b49f7a16a4377", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1583, "license_type": "no_license", "max_line_length": 87, "num_lines": 40, "path": "/bin/run_jobs.py", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nfrom argparse import ArgumentParser\n\nfrom cluster_utils import run_joblist\n\nprefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground'\n\ndef make_joblist(specie, common_name, max_jobs=4):\n chromosomes = range(0,20)\n chromosomes = map(str, chromosomes)\n #chromosomes = map(lambda x: 'scaffold'+x, chromosomes)\n joblist=os.path.join(prefix,'data/jobs.txt')\n with open(joblist, 'w') as f:\n for c in chromosomes:\n script = os.path.join(prefix,'bin/psl_merger_wrapper.sh')\n folder=os.path.join(prefix,'data/felidae/',common_name)\n if not os.path.isdir(folder):\n #os.makedirs(folder)\n raise Exception('No input data folder', folder)\n data = os.path.join(folder,specie+'.FelisCatus.'+c+'.psl')\n #output = data.split('psl')[0] + 'merged.psl' \n folder=os.path.join(prefix,'data/human/',common_name)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n output = os.path.join(folder,specie+'.FelisCatus.'+c+'.merged.psl')\n line = ' '.join([script, data, output])\n f.write(line+'\\n')\n return joblist\n \nif __name__ == '__main__' :\n parser = ArgumentParser()\n parser.add_argument('specie', help='name in cactus file, example: AcinonyxJubatus')\n parser.add_argument('common_name', help='name for use, example: cheetah')\n args = parser.parse_args()\n joblist = make_joblist(args.specie, args.common_name)\n #print joblist\n run_joblist(joblist, 4)\n\n" }, { "alpha_fraction": 0.5833682417869568, "alphanum_fraction": 0.5936933159828186, "avg_line_length": 41.40828323364258, "blob_id": "de1e1efeb0a4ab69fc9cb47e0f602e29001f67a4", "content_id": "beed4b11f1a7aa953debe2efe55d56ca1a8016b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7167, "license_type": "no_license", "max_line_length": 115, "num_lines": 169, "path": "/bin/breakpoints_group_filter_clustering.py", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\nimport os\nfrom collections import defaultdict\nimport numpy as np\nfrom itertools import groupby\nfrom Bio import SeqIO\n\nlatin_names = ['PantheraLeo', 'PantheraPardus', 'PantheraOnca', 'PantheraTigris', \\\n 'CaracalCaracal', 'LynxPardina', 'PumaConcolor', 'AcinonyxJubatus', \\\n 'PrionailurusBengalensis', 'PrionailurusViverrinus']\n#may be add dog , 'CanisFamiliaris'\n#or hyena 'CrocutaCrocuta' \ncommon_names = ['lion', 'leopard', 'jaguar', 'tiger', 'caracal', 'lynx', 'puma', \\\n 'cheetah', 'leopard_cat', 'fishing_cat']\n#, 'dog'\n#hyena\n\ndata_prefix = '/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/'\nsizes_prefix = '/hive/groups/recon/projs/felidae_comp/analysis/cactus/11-01-2017/bed/'\n\nclass Breakpoint:\n def __init__(self, chrom, start, end, qChrom1, qStart, qChrom2, qEnd, queryNs, targetNs):\n self.chrom = chrom\n self.start = start\n self.end = end\n self.qChrom1 = qChrom1\n self.qStart = qStart\n self.qChrom2 = qChrom2\n self.qEnd = qEnd\n self.middle = self.start+float(end - start)/2\n self.queryNs = queryNs\n self.targetNs = targetNs\n\n def is_transloc(self):\n return self.qChrom1 != self.qChrom2\n\ndef load_genome_breakpoints(path, genome, backwards):\n breaks = []\n with open(path) as f:\n for line in f:\n line = line.strip().split()\n b = Breakpoint(line[0], int(line[1]), int(line[2]), line[3], int(line[4]), line[5],\\\n int(line[6]), float(line[7]), float(line[8]))\n breaks.append(b)\n backwards[b].append(genome)\n #backwards[(b.start, b.end)].append(genome)\n return sorted(breaks, key=lambda x:x.start)\n\ndef load_breakpoints(data_prefix, chrom, groups, outgroups, suffix):\n backwards = defaultdict(list)\n for genome in groups + outgroups:\n path = os.path.join(data_prefix, genome[0], genome[1]+'.FelisCatus.'+str(chrom)+'.'+suffix+'.bed')\n load_genome_breakpoints(path, genome, backwards)\n return backwards\n\n#b1.start <= b2.start\ndef if_overlaps(b1, b2):\n #if b1.end == b2.start:\n # return False\n return b1.end >= b2.start - 1000\n\ndef cluster_by_overlap(points):\n clusters = []\n cur = [points[0]]\n for p in points[1:]:\n if if_overlaps(cur[-1],p):\n cur.append(p)\n else:\n clusters.append(list(cur))\n cur = [p]\n return clusters \n\ndef cluster_lin(points, threshold=1000):\n clusters = []\n cur = [points[0]]\n for p in points[1:]:\n if abs(cur[-1].middle-p.middle) < threshold:\n cur.append(p)\n else:\n clusters.append(list(cur))\n cur = [p]\n return clusters \n\ndef run(group_genomes, outgroup_genomes, suffix, transloc, clustering_type):\n for chrom in range(0,20):\n #for chrom in range(0,1):\n #first load all the genomes\n backwards= load_breakpoints(data_prefix, chrom, group_genomes, outgroup_genomes, suffix)\n if not backwards.keys():\n continue\n r = cluster_lin(sorted(backwards.keys(), key=lambda x:x.middle)) if clustering_type=='middle' \\\n else cluster_by_overlap(sorted(backwards.keys(), key=lambda x:(x.start, x.end))) \n r = sorted(r, key=lambda x: len(x))\n #print 'number of points', len(backwards.keys())\n #print 'number of clusters', len(r)\n path = os.path.join(data_prefix, '.'.join(map(lambda x: x[0], group_genomes)+[str(chrom),'bed']))\n get_breaks_for_group(group_genomes, r, backwards, path, transloc)\n \n \ndef get_breaks_for_group(group, clusters, backwards, path, transloc, threshold_Ns=0.30):\n #i = 1\n with open(path, 'w') as f:\n for c in clusters:\n cur_genomes = set()\n not_ok_start_end = False\n for gs in map(lambda x: backwards[x], c):\n cur_genomes.update(gs)\n not_ok_breaks = filter(lambda x: x.queryNs > threshold_Ns or x.targetNs > threshold_Ns, c)\n not_transloc_c = filter(lambda x: not x.is_transloc(), c)\n if cur_genomes == set(group) and not not_ok_breaks:\n if not transloc or transloc and not not_transloc_c:\n chrom = c[0].chrom \n start = min(map(lambda x: x.start, c))\n end = max(map(lambda x: x.end, c))\n l = \"\\t\".join(map(str,[chrom, start, end]))\n f.write(l+'\\n')\n\ndef run_for_all(all_genomes, suffix, transloc, clustering_type):\n for chrom in range(0,20):\n #for chrom in range(0,1):\n #first load all the genomes\n backwards= load_breakpoints(data_prefix, chrom, all_genomes, [], suffix)\n if not backwards.keys():\n continue\n r = cluster_lin(sorted(backwards.keys(), key=lambda x:x.middle)) if clustering_type=='middle' \\\n else cluster_by_overlap(sorted(backwards.keys(), key=lambda x:(x.start, x.end))) \n r = sorted(r, key=lambda x: len(x))\n #print chrom, 'number of points', len(backwards.keys())\n #print chrom, 'number of clusters', len(r)\n path = os.path.join(data_prefix, '.'.join(['all_breaks',str(chrom),'bed']))\n get_breaks_for_all(r, backwards, path, transloc)\n \n \ndef get_breaks_for_all(clusters, backwards, path, transloc, threshold_Ns=0.30):\n #i = 1\n print path\n with open(path, 'w') as f:\n for c in clusters:\n cur_genomes = set()\n not_ok_start_end = False\n for gs in map(lambda x: backwards[x], c):\n cur_genomes.update(gs)\n not_ok_breaks = filter(lambda x: x.queryNs > threshold_Ns or x.targetNs > threshold_Ns, c)\n not_transloc_c = filter(lambda x: not x.is_transloc(), c)\n if not not_ok_breaks and (not transloc or transloc and not not_transloc_c):\n chrom = c[0].chrom \n start = min(map(lambda x: x.start, c))\n end = max(map(lambda x: x.end, c))\n l = \"\\t\".join(map(str,[chrom, start, end, '.'.join(sorted(map(lambda x: x[0], cur_genomes)))]))\n f.write(l+'\\n')\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--suffix', choices=['prefilterNs'])\n parser.add_argument('--transloc', default=False, action='store_true',help='report only translocs in query?')\n parser.add_argument('--clustering_type', default='middle', choices=['middle', 'ovl']) \n parser.add_argument('group',nargs='*',help='list of common names \\\n for the group of species to analyze')\n args = parser.parse_args()\n sorted_names = zip(common_names, latin_names)\n if args.group:\n group_genomes = filter(lambda x: x[0] in args.group, sorted_names)\n outgroup_genomes = filter(lambda x: x[0] not in args.group, sorted_names)\n run(group_genomes, outgroup_genomes, args.suffix, args.transloc, args.clustering_type) \n else:\n run_for_all(sorted_names, args.suffix, args.transloc, args.clustering_type)\n" }, { "alpha_fraction": 0.7481019496917725, "alphanum_fraction": 0.7481019496917725, "avg_line_length": 44.51852035522461, "blob_id": "7e1799abe61abebc665836c6205e46c4a3b33baa", "content_id": "dc5f4b20157d65939c66dd3689d048289520936c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3688, "license_type": "no_license", "max_line_length": 137, "num_lines": 81, "path": "/bin/group_filter_breaks_all.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndata_prefix=/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae\n\n./breakpoints_group_filter.py --suffix filtered_breaks leopard_cat fishing_cat \ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/leopard_cat.fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard jaguar\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.jaguar.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard jaguar tiger\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.jaguar.tiger.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard jaguar tiger caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.jaguar.tiger.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard jaguar tiger caracal lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.jaguar.tiger.caracal.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard jaguar tiger caracal lynx puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.jaguar.tiger.caracal.lynx.puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\necho 'attention!'\necho 'should rerun group filter for only domestic cat genome using dog as outgroup - manually change the outgroups in script'\n./breakpoints_group_filter.py --suffix filtered_breaks lion leopard jaguar tiger caracal lynx puma cheetah leopard_cat fishing_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.leopard.jaguar.tiger.caracal.lynx.puma.cheetah.leopard_cat.fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks puma cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/puma.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lion \ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lion.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks leopard \ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/leopard.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks jaguar\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/jaguar.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks tiger\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/tiger.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks puma\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks fishing_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/fishing_cat.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter.py --suffix filtered_breaks leopard_cat\ncat ${data_prefix}/*.bed > ${data_prefix}/filtered_breaks/leopard_cat.bed\nrm ${data_prefix}/*.bed\n\n" }, { "alpha_fraction": 0.6628205180168152, "alphanum_fraction": 0.6897435784339905, "avg_line_length": 59, "blob_id": "92f63e2f7cfb7a7458de1872b2cc8ef24444415e", "content_id": "183f9018b465154020a9b762fe182def4c3f1930", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 780, "license_type": "no_license", "max_line_length": 252, "num_lines": 13, "path": "/bin/get_filtered_breaks.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\norg=$2\nlatin_name=$1\ndata_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/'\nbin_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/bin'\ncactus_prefix='/hive/groups/recon/projs/felidae_comp/analysis/cactus/11-01-2017/bed/'\nfor c in {0..19}; do\n echo 'scaffold' $c\n ${bin_prefix}/find_breakpoints.py --all ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.merged.psl ${cactus_prefix}/${latin_name}.bed | awk '{print $1\"\\t\"$3\"\\t\"$4\"\\t\"$6}' > ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.filtered_breaks.bed\ndone\n#cat ${data_prefix}/${org}/${latin_name}.FelisCatus.transloc.[0-9]* > ${data_prefix}/${org}/${latin_name}.FelisCatus.transloc.bed\n#rm ${data_prefix}/${org}/${latin_name}.FelisCatus.transloc.[0-9]*\n" }, { "alpha_fraction": 0.631205677986145, "alphanum_fraction": 0.655210018157959, "avg_line_length": 90.6500015258789, "blob_id": "7bed591a6e7145c93fb36026cc5bafa6bcd29da4", "content_id": "623b2bb1777323f3a52cf9195d15c82b8f49e184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1833, "license_type": "no_license", "max_line_length": 335, "num_lines": 20, "path": "/bin/get_filtered_breaks_Ns.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\norg=$2\nlatin_name=$1\nquery_fasta=$3\ntarget_fasta='/hive/groups/recon/projs/felidae_comp/assemblies/FelisCatus/FelCat_8.0.fixed.fa.masked'\ndata_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/'\nbin_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/bin'\ncactus_prefix='/hive/groups/recon/projs/felidae_comp/analysis/cactus/11-01-2017/bed/'\n#rm /hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/filterNs/${latin_name}.bed\nfor c in {0..19}; do\n echo 'scaffold' $c\n ${bin_prefix}/find_breakpoints.py --fasta_target ${target_fasta} --fasta_query ${query_fasta} ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.merged.psl ${cactus_prefix}/${latin_name}.bed > ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.prefilterNs.bed\n \n #cut -f 1,2,3 ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.prefilterNs.bed | diff ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.all.bed /dev/stdin | grep '>' | sed 's/> //' | awk '($3!=$2) {print $0}' | awk '{if ($3<$2) {print $1\"\\t\"$3\"\\t\"$2} else {print $0}}' > ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp\n #cat ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.prefilterNs.bed | awk '{if ($3<$2) {print $1\"\\t\"$3\"\\t\"$2\"\\t\"$4\"\\t\"$5\"\\t\"$6} else {print $0}}' > ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp3 \n #bedtools subtract -f 1.0 -a ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp3 -b ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp > ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp2\n #mv ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp2 ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.prefilterNs.bed\n #rm ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp ${data_prefix}/${org}/${latin_name}.FelisCatus.${c}.tmp3\ndone\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.7354838848114014, "avg_line_length": 37.75, "blob_id": "351c905768bc51df7c06cd74dec36b63f61cb955", "content_id": "bd6d4565414ee7da52bccc967b569a09a4363fc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 155, "license_type": "no_license", "max_line_length": 103, "num_lines": 4, "path": "/bin/psl_merger_wrapper.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\nsource ~/.bashrc\nset -beEu -o pipefail\n/hive/groups/recon/projs/felidae_comp/synteny-play-ground/bin/psl_merger.py dag 100000 100000 \"$1\" \"$2\"\n" }, { "alpha_fraction": 0.6426553726196289, "alphanum_fraction": 0.6426553726196289, "avg_line_length": 31.930233001708984, "blob_id": "eea30d307a35fa3a438ab4dab944cac88a381384", "content_id": "893a48e0a0172238c2d982efce11da671e1a9bf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1416, "license_type": "no_license", "max_line_length": 99, "num_lines": 43, "path": "/bin/psl2bed.py", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\nfrom pycbio.hgdata.psl import PslReader\nfrom pycbio.hgdata.psl import PslBlock\nfrom pycbio.hgdata.psl import Psl\nfrom argparse import ArgumentParser\nfrom itertools import groupby\n\n\ndef convert_blocks(psl_file):\n queries = []\n targets = []\n for psl in PslReader(psl_file):\n query = '\\t'.join(map(str, [psl.qName, psl.qStart, psl.qEnd]))\n queries.append(query)\n target = '\\t'.join(map(str, [psl.tName, psl.tStart, psl.tEnd]))\n targets.append(target)\n return queries, targets\n\ndef print_out(blocks, filename):\n with open(filename, 'w') as f:\n for b in blocks:\n f.write(b+'\\n')\n\ndef print_out_together(psl_file):\n for psl in PslReader(psl_file):\n print '\\t'.join(map(str, [psl.qName, psl.qStart, psl.qEnd, \\\n psl.tName, psl.tStart, psl.tEnd]))\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('psl')\n parser.add_argument('queries',nargs='?', help='bed file for queries')\n parser.add_argument('targets',nargs='?', help='bed file for targets')\n parser.add_argument('--one_file', action='store_true', help='put query and target in one file')\n args = parser.parse_args()\n if args.one_file:\n print_out_together(args.psl)\n else:\n queries,targets = convert_blocks(args.psl)\n print_out(queries, args.queries)\n print_out(targets, args.targets)\n" }, { "alpha_fraction": 0.639323353767395, "alphanum_fraction": 0.6910309791564941, "avg_line_length": 65.61701965332031, "blob_id": "a3b6aab6b07d546d698a7c1eb29d4e06299801b0", "content_id": "811268717ede01c91ebecb421fd30f74eee02fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3133, "license_type": "no_license", "max_line_length": 169, "num_lines": 47, "path": "/bin/run_filtered_breaks_Ns_for_all.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfasta_prefix='/hive/groups/recon/projs/felidae_comp/assemblies/'\n#data_prefix='/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae/filterNs/'\n\necho 'cheetah'\n./get_filtered_breaks_Ns.sh AcinonyxJubatus cheetah ${fasta_prefix}/FelisCatus/FelCat_8.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}AcinonyxJubatus.bed > ${data_prefix}AcinonyxJubatus.filtered.bed\n\necho 'puma'\n./get_filtered_breaks_Ns.sh PumaConcolor puma ${fasta_prefix}/PumaConcolor/mountain_lion_17Sep2015_MifDV.fasta.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PumaConcolor.bed > ${data_prefix}PumaConcolor.filtered.bed\n\necho 'jaguar'\n./get_filtered_breaks_Ns.sh PantheraOnca jaguar ${fasta_prefix}/PantheraOnca/PanOnc_1.0.fixed.fa.masked \n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PantheraOnca.bed > ${data_prefix}PantheraOnca.filtered.bed\n\necho 'leopard'\n./get_filtered_breaks_Ns.sh PantheraPardus leopard ${fasta_prefix}/PantheraPardus/PanPar_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PantheraPardus.bed > ${data_prefix}PantheraPardus.filtered.bed\n\necho 'lion'\n./get_filtered_breaks_Ns.sh PantheraLeo lion ${fasta_prefix}/PantheraLeo/2016-06-22/PanLeo_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PantheraLeo.bed > ${data_prefix}PantheraLeo.filtered.bed\n\necho 'tiger'\n./get_filtered_breaks_Ns.sh PantheraTigris tiger ${fasta_prefix}/PantheraTigris/2016-06-22/PanTig_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PantheraTigris.bed > ${data_prefix}PantheraTigris.filtered.bed\n\necho 'leopard cat'\n./get_filtered_breaks_Ns.sh PrionailurusBengalensis leopard_cat ${fasta_prefix}/PrionailurusBengalensis/PriBen_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PrionailurusBengalensis.bed > ${data_prefix}PrionailurusBengalensis.filtered.bed\n\necho 'fishing cat'\n./get_filtered_breaks_Ns.sh PrionailurusViverrinus fishing_cat ${fasta_prefix}/PrionailurusViverrinus/PriViv_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}PrionailurusViverrinus.bed > ${data_prefix}PrionailurusViverrinus.filtered.bed\n\necho 'caracal'\n./get_filtered_breaks_Ns.sh CaracalCaracal caracal ${fasta_prefix}/CaracalCaracal/CarCar_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}CaracalCaracal.bed > ${data_prefix}CaracalCaracal.filtered.bed\n\necho 'lynx'\n./get_filtered_breaks_Ns.sh LynxPardina lynx ${fasta_prefix}/LynxPardina/LynPar_1.0.fixed.fa.masked\n#awk '($4 == \"False\" && $5 <= 0.39 && $6 <= 0.39 ) {print $1\"\\t\"$2\"\\t\"$3}' ${data_prefix}LynxPardina.bed > ${data_prefix}LynxPardina.filtered.bed\n\necho 'dog'\n./get_filtered_breaks_Ns.sh CanisFamiliaris dog ${fasta_prefix}/outgroups/CanisFamiliaris/CanFam_3.1.fixed.fa.masked \n" }, { "alpha_fraction": 0.7533889412879944, "alphanum_fraction": 0.7533889412879944, "avg_line_length": 43.09195327758789, "blob_id": "04e87300133632b482ce7e3d19abab7ae6f08fe8", "content_id": "291eac8b19553092d28633811ac9c7db8e4ffd5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3836, "license_type": "no_license", "max_line_length": 82, "num_lines": 87, "path": "/bin/group_filter_breaks_Ns_notree.sh", "repo_name": "diekhans/synteny-play-ground", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ndata_prefix=/hive/groups/recon/projs/felidae_comp/synteny-play-ground/data/felidae\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion caracal \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/lion.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/lion.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/lion.puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lion cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/lion.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs tiger caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/tiger.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs tiger lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/tiger.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs tiger puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/tiger.puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs tiger cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/tiger.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs jaguar caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/jaguar.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs jaguar lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/jaguar.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs jaguar puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/jaguar.puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs jaguar cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/jaguar.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard caracal\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/leopard.caracal.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard lynx\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/leopard.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/leopard.puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs leopard cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/leopard.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs caracal lynx \ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/caracal.lynx.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs caracal puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/caracal.puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs caracal cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/caracal.cheetah.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lynx puma\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/lynx.puma.bed\nrm ${data_prefix}/*.bed\n\n./breakpoints_group_filter_clustering.py --suffix prefilterNs lynx cheetah\ncat ${data_prefix}/*.bed > ${data_prefix}/prefilterNs_notree/lynx.cheetah.bed\nrm ${data_prefix}/*.bed\n" } ]
21
benedict-chan/temp
https://github.com/benedict-chan/temp
a9a529e4b791734859567a955bd6cd5e1a7585c9
24824ff362835e53d44aafe8cbad2d93491c76be
384dc54127cd7a2cbea03b66654c9f35b88815e3
refs/heads/master
"2020-04-06T04:31:22.523840"
"2015-08-05T02:03:13"
"2015-08-05T02:03:13"
28,120,388
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6041814088821411, "alphanum_fraction": 0.6215450167655945, "avg_line_length": 24.432432174682617, "blob_id": "260388b1459268d80d34c8d29542ec93cc91f2dd", "content_id": "f79e6b550b5ae546416c27437bf4fb0772a5f080", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2822, "license_type": "no_license", "max_line_length": 106, "num_lines": 111, "path": "/testenv/scraphead.py", "repo_name": "benedict-chan/temp", "src_encoding": "UTF-8", "text": "import requests\nimport time\nimport sys\nimport datetime\nimport smtplib\nfrom pprint import pprint\n\nimport lxml.html\nimport re\nimport traceback\n\n\nGMAIL_USERNAME = \"\"\nGMAIL_PASSWORD = \"\"\n\nSPECIAL_MSG = \"\"\"\n<br />\n<br />\nIn case you need to add it to the shopping cart manually\n<br />\nhttp://www.nikestore.com.hk/shoppingcart\n<br />\n<br />\n$j.post(\"/shoppingcart/add\", {upc: \"xxxxxxx\",count: 1}, function(j) { <br />\n var n = j.shoppingCommand; <br />\n console.dir(n); <br />\n}); <br />\n<br />\n<br />\n<br />\n<br />\n\"\"\"\n\ndef send_simple_email(recipient, email_subject, body_of_email):\n\tsession = smtplib.SMTP('smtp.gmail.com', 587)\n\tsession.ehlo()\n\tsession.starttls()\n\tsession.login(GMAIL_USERNAME, GMAIL_PASSWORD)\n\n\theaders = \"\\r\\n\".join([\"from: \" + GMAIL_USERNAME,\n\t \"subject: \" + email_subject,\n\t \"to: \" + recipient,\n\t \"mime-version: 1.0\",\n\t \"content-type: text/html\"])\n\n\t# body_of_email can be plaintext or html! \n\tcontent = headers + \"\\r\\n\\r\\n\" + body_of_email\n\tsession.sendmail(GMAIL_USERNAME, recipient, content)\n\tpass\n\n#skuCodeDialog = \"768929-623\"\ndef keep_request_page():\n\tprint 'Requesting: %s' % datetime.datetime.now()\n\t#skuCodeDialog = \"768861-601\"\n\t#url = \"http://www.nikestore.com.hk/product/%s/detail.htm?pdpRecommend=false&preSkuCode=\" % skuCodeDialog\n\turl = \"http://www.nikestore.com.hk/product/fair/WT7vXzY9.htm?pdpRecommend=false&preSkuCode=\"\n\tresp = requests.get(url=url, allow_redirects=False)\n\tif resp.status_code == 200:\n\t\tprint 'Exists!!'\n\t\thas_upc = False\n\t\tbody = url\n\t\ttry:\n\t\t\ttree = lxml.html.fromstring(resp.text)\n\t\t\tfor li in tree.cssselect(\".select-box-size li\"):\n\t\t\t\thas_upc = True\n\t\t\t\tsize = re.sub(r'\\W+', '', li.text_content())\n\t\t\t\tupc = li.attrib[\"currupc\"]\n\t\t\t\tif size == \"95\":\n\t\t\t\t\tnew_item = \"<br /> Size %s, upc code: <span style='color: #ff0000'> %s </span> <br />\" % (size, upc)\n\t\t\t\telse:\n\t\t\t\t\tnew_item = \"<br /> Size %s, upc code: %s <br />\" % (size, upc)\n\t\t\t\tbody = body + new_item\n\t\t\t\tpass\n\t\texcept Exception, e:\n\t\t\tprint 'Fail!!'\n\t\t\tif body:\n\t\t\t\tpprint(body)\n\t\t\t\tpass\n\t\t\ttraceback.print_exc()\n\t\tfinally:\n\t\t\tpass\n\t\tbody = body + SPECIAL_MSG\n\t\tif has_upc:\n\t\t\tsend_simple_email(\"\", \"Your Nike's Link\", body)\n\t\t\t#send_simple_email(\"\", \"Your Nike's Link\", body)\n\t\t\tprint body\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\t\tpass\n\telse:\n\t\tprint 'Not exists'\n\t\t#print resp.headers[\"location\"]\n\t\treturn True\n\tresp.close()\n\tpass\n\n\ndef start_process():\n\tcounter = 10 #loop for 5 minute * 12 * 64 hours\n\tkeep_request = True\n\twhile keep_request and counter > 0:\n\t\tkeep_request = keep_request_page()\n\t\tcounter = counter - 1\n\t\ttime.sleep(600) #sleep for 60 seconds\n\t\tprint 'Still have %s times to go' % counter\n\t\tsys.stdout.flush()\n\tpass\n\nif __name__ == \"__main__\":\n\tstart_process()" }, { "alpha_fraction": 0.5801169872283936, "alphanum_fraction": 0.5976608395576477, "avg_line_length": 22.55555534362793, "blob_id": "f75b578e0c40eba139382cd6a8753a853e3ed06c", "content_id": "9c17e474c0701fb7564e4330f9bfdb59644c1cb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 855, "license_type": "no_license", "max_line_length": 72, "num_lines": 36, "path": "/testenv/phantomtest.js", "repo_name": "benedict-chan/temp", "src_encoding": "UTF-8", "text": "var system = require('system');\n\n\nvar page = require('webpage').create();\n\npage.onConsoleMessage = function(msg) {\n system.stderr.writeLine('console: ' + msg);\n};\n\npage.onLoadFinished = function() {\n console.log(\"page.onLoadFinished\");\n page.evaluate(function(){\n\t\t$j.ajax({\n\t\t\tasync: false,\n\t\t\turl: \"http://www.nikestore.com.hk/shoppingcart/add\", \n\t\t\tdata: {upc: \"00659658169586\",count: 1},\n\t\t\ttype: 'post',\n\t\t\tsuccess: function(data){\n\t\t\t\t\t\t\tconsole.log('done');\n\t\t\t\t\t\t var n = data.shoppingCommand;\n\t\t\t\t\t\t console.log(n.availableQty);\n\t\t\t\t\t\t console.log(n.skuName);\n\t\t\t\t\t\t console.log(n.skuCode);\n\t\t\t\t\t\t console.log(n.subtotal);\n\t\t\t\t\t\t}\n\t\t\t});\n });\n};\n\npage.open('http://www.nikestore.com.hk/shoppingcart', function(status) {\n console.log(\"Status: \" + status);\n if(status === \"success\") {\n\n }\n phantom.exit();\n});\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5609609484672546, "alphanum_fraction": 0.6708709001541138, "avg_line_length": 28.73214340209961, "blob_id": "38e2bfe1c2a00b430f0d66008372740c7e5ed844", "content_id": "8756c4e84188f18d1c5db4d6eb08b442631eccb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 135, "num_lines": 56, "path": "/testenv/niketest.js", "repo_name": "benedict-chan/temp", "src_encoding": "UTF-8", "text": "\n/*\n1. Load page and get upc\n2. Go to this page\n3. Run this in command\nhttp://www.nikestore.com.hk/shoppingcart/\n\nupc_your_size = \"\";\n$j.post(\"/shoppingcart/add\", {upc: upc_your_size, count: 1}, function(j) {\n var n = j.shoppingCommand;\n console.dir(n);\n omnitureCartAddedHandler(\"768861-601\", \"1249\");\n});\n\n\n*/\n$j.post(\"/shoppingcart/add\", {upc: \"00659658169586\",count: 1}, function(j) {\n var n = j.shoppingCommand;\n console.dir(n);\n omnitureCartAddedHandler(\"768929-623\", \"1299\");\n});\n\n$j.post(\"/shoppingcart/add\", {upc: \"00659658169586\",count: 1}, function(j) {\n var n = j.shoppingCommand;\n console.dir(n);\n});\n\n768861-601 site:www.nikestore.com.hk\n\nhttp://www.nikestore.com.hk/product/768929-623/detail.htm?pdpRecommend=false&preSkuCode=\nhttp://www.nikestore.com.hk/product/768861-601/detail.htm?pdpRecommend=false&preSkuCode=\nomnitureQuickBuyAddToCart(\"768861-601\", \"1249\")\n\nloxia.syncXhrPost( \"/product/validateSku\", {skuCode: \"768861-601\"});\n var e = _contextPath + \"/product/\" + d + \"/dialog\";\n var b = loxia.syncXhrPost(\"/product/768861-601/dialog\", { isSameSku: false,pdpRecommend: true});\n\n\n var f = loxia.syncXhrPost(\"/transaction/doValidatorQsGoToTransactionCheck2\", {upc: g,count: 1});\n\n\nc = \"768929-623\", a = \"1299\"\nfunction omnitureCartAddedHandler(c, a) {\n var b = {eVar28: p3 + \":Add to cart\",events: \"scAdd,event22=\" + a,products: \";\" + c + \";;\",prop3: p3 + \":Add to cart\",prop17: p17};\n trackLink(b, \"cartAddedSuccess\")\n}\n\neVar28: \"PDP:Add to cart\"\neVar70: \"D=c5\"\nevents: \"scAdd,event22=1299\"\nproducts: \";768929-623;;\"\nprop3: \"PDP:Add to cart\"\nprop15: \"hk_cn\"\nprop17: \"PDP\"\n\n\n00659658169593" } ]
3
yueyoum/bulk_create_test
https://github.com/yueyoum/bulk_create_test
1821c66ec649f75d3b2bfad341aff733a23b995e
16a35ca130d7dfb783e69be1ac923107e8598196
d60784746a563d99a49cc29a4946a3fe19a07593
refs/heads/master
"2021-01-19T06:25:58.925166"
"2016-06-20T10:35:34"
"2016-06-20T10:35:34"
61,538,540
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7028112411499023, "alphanum_fraction": 0.7121820449829102, "avg_line_length": 27.69230842590332, "blob_id": "9064c136ad32c51f801923f80678e84449f05405", "content_id": "18ce5ad9f6322f4b808973edf67881e093dfd905", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 747, "license_type": "no_license", "max_line_length": 57, "num_lines": 26, "path": "/myapp/admin.py", "repo_name": "yueyoum/bulk_create_test", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom import_export import resources\nfrom import_export.admin import ImportExportModelAdmin\n\nfrom myapp.models import TestModel\n\nclass ResourceTestModel_1(resources.ModelResource):\n class Meta:\n model = TestModel\n\n def before_import(self, *args, **kwargs):\n self._meta.model.objects.all().delete()\n\n def get_or_init_instance(self, instance_loader, row):\n return (self.init_instance(row), True)\n\nclass ResourceTestModel_2(resources.ModelResource):\n class Meta:\n model = TestModel\n bulk_replace = True\n\[email protected](TestModel)\nclass AdminTestModel(ImportExportModelAdmin):\n resource_class = ResourceTestModel_2\n list_display = ('id', 'f1', 'f2', 'f3', 'f4',)\n\n" }, { "alpha_fraction": 0.6586102843284607, "alphanum_fraction": 0.6797583103179932, "avg_line_length": 22.64285659790039, "blob_id": "f2c875130d67ef81604470e1b13d69c078944c21", "content_id": "3631e1b3505cb0391ae9fd61d9540295647a7da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 46, "num_lines": 14, "path": "/myapp/models.py", "repo_name": "yueyoum/bulk_create_test", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass TestModel(models.Model):\n id = models.IntegerField(primary_key=True)\n f1 = models.CharField(max_length=255)\n f2 = models.IntegerField()\n f3 = models.TextField()\n f4 = models.IntegerField()\n\n class Meta:\n db_table = 'test_table'\n" }, { "alpha_fraction": 0.4761255085468292, "alphanum_fraction": 0.5075033903121948, "avg_line_length": 24.275861740112305, "blob_id": "9edf80dfc20e7eb807ba193cedcc409463a1ca5f", "content_id": "ee109023b1d9e452abec3ce18d2b778d0908478a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/myapp/migrations/0001_initial.py", "repo_name": "yueyoum/bulk_create_test", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.6 on 2016-06-20 09:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='TestModel',\n fields=[\n ('id', models.IntegerField(primary_key=True, serialize=False)),\n ('f1', models.CharField(max_length=255)),\n ('f2', models.IntegerField()),\n ('f3', models.TextField()),\n ('f4', models.IntegerField()),\n ],\n options={\n 'db_table': 'test_table',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5896033048629761, "alphanum_fraction": 0.7222982048988342, "avg_line_length": 25.563636779785156, "blob_id": "a8c76367db61bf357ac3bf6af4bb779372a968d2", "content_id": "b5ab7234f3b29282283e6855472d2120635bc771", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1462, "license_type": "no_license", "max_line_length": 94, "num_lines": 55, "path": "/README.md", "repo_name": "yueyoum/bulk_create_test", "src_encoding": "UTF-8", "text": "## Benchmark for [this](https://github.com/django-import-export/django-import-export/pull/473)\n\n### The expected behavior:\n\nUse the imported data totally. Not keep old data.\n\n\nThe are two way to do this:\n\n1. override `before_import` and `get_or_init_instance`.\n2. using the new option: `bulk_repace`\n\nin file [myapp/admin.py](myapp/admin.py), \n\n`ResourceTestModel_1` is the first way.\n\n`ResourceTestModel_2` is the second way.\n\n\n\n### Benchmark\n\n`TestModel-2016-06-20.xls` is the test data which contains 10K rows.\n\nusing mysql database. (`pymysql` as driver).\n\n\nthis is the runserver log:\n\n1. First way\n\n```\n[TIME MEASURE] /admin/myapp/testmodel/import/: 11.0680501461\n[20/Jun/2016 10:16:32] \"POST /admin/myapp/testmodel/import/ HTTP/1.1\" 200 5599231\n[TIME MEASURE] /admin/myapp/testmodel/process_import/: 96.4507160187\n[20/Jun/2016 10:18:20] \"POST /admin/myapp/testmodel/process_import/ HTTP/1.1\" 302 0\n[TIME MEASURE] /admin/myapp/testmodel/: 0.127159118652\n[20/Jun/2016 10:18:20] \"GET /admin/myapp/testmodel/ HTTP/1.1\" 200 46114\n```\n\nTotal cost 107 seconds.\n\n2. Second way\n\n```\n[TIME MEASURE] /admin/myapp/testmodel/import/: 1.76946806908\n[20/Jun/2016 10:19:20] \"POST /admin/myapp/testmodel/import/ HTTP/1.1\" 302 0\n[TIME MEASURE] /admin/myapp/testmodel/: 0.116591215134\n[20/Jun/2016 10:19:20] \"GET /admin/myapp/testmodel/ HTTP/1.1\" 200 45954\n```\n\nTotal cost 1.8 seconds.\n\n\n#### The way 2 (using `bulk_replace` option) is about 60 times faster than way 1.\n\n" }, { "alpha_fraction": 0.5451776385307312, "alphanum_fraction": 0.5878172516822815, "avg_line_length": 17.58490562438965, "blob_id": "b316703cd3a349915d2ff0ab092fdb052258759d", "content_id": "83b43fabe108a171905a928e3c43a6c8f818828b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 985, "license_type": "no_license", "max_line_length": 67, "num_lines": 53, "path": "/set_random_data.py", "repo_name": "yueyoum/bulk_create_test", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wang Chao <[email protected]>\nFilename: set_random_data.py\nDate created: 2016-06-20 17:45:27\nDescription:\n\n\"\"\"\n\nimport os\nimport sys\nimport uuid\nimport random\nimport pymysql\n\npymysql.install_as_MySQLdb()\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"mytest1.settings\")\n\nimport django\ndjango.setup()\n\nfrom myapp.models import TestModel\n\ntry:\n AMOUNT = int(sys.argv[1])\nexcept:\n AMOUNT = 10000\n\ndef create_random_data():\n data = []\n for i in range(1, AMOUNT+1):\n data.append({\n 'id': i,\n 'f1': str(uuid.uuid4()),\n 'f2': random.randint(1, 10000),\n 'f3': str(uuid.uuid4()),\n 'f4': random.randint(1, 10000),\n })\n\n return data\n\ndef set_data():\n TestModel.objects.all().delete()\n\n data = create_random_data()\n objs = [TestModel(**d) for d in data]\n\n TestModel.objects.bulk_create(objs)\n\nif __name__ == '__main__':\n set_data()\n" }, { "alpha_fraction": 0.6312949657440186, "alphanum_fraction": 0.6618704795837402, "avg_line_length": 24.227272033691406, "blob_id": "f132a3ecd2b7e9c144ca33a6085c3f953003853a", "content_id": "62b0210276d899163970e525792acbec310610c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 556, "license_type": "no_license", "max_line_length": 73, "num_lines": 22, "path": "/mytest1/middleware.py", "repo_name": "yueyoum/bulk_create_test", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wang Chao <[email protected]>\nFilename: middleware.py\nDate created: 2016-06-20 18:11:01\nDescription:\n\n\"\"\"\n\nimport time\n\nclass TimeMeasureRequestMiddleware(object):\n def process_request(self, request):\n request._time_measure_star_at = time.time()\n\nclass TimeMeasureResponseMiddleware(object):\n def process_response(self, request, response):\n time_passed = time.time() - request._time_measure_star_at\n print \"[TIME MEASURE] {0}: {1}\".format(request.path, time_passed)\n\n return response\n\n" } ]
6
maduenyo/Vega
https://github.com/maduenyo/Vega
3331f4497833cbc1ae96ca285f379949dc00f408
73ab01797ba1605c586f7f39ddfc5ea06e61e1ff
d86202ea6e7d3d14af4372b4425686c96e8b8c36
refs/heads/master
"2020-06-06T05:30:20.224073"
"2014-05-25T08:32:12"
"2014-05-25T08:32:12"
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5171695947647095, "avg_line_length": 34.55555725097656, "blob_id": "3d12b9e8b0fd31f606c5d41755baae1a19e6d5ec", "content_id": "e01ffd2d7d393448e96453942292b3d08dfb604e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 961, "license_type": "no_license", "max_line_length": 96, "num_lines": 27, "path": "/Whatsapp/vega_Whatsapp.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# coding: latin-1\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport sys\nimport pdb\n\nimport vega_Path\nimport vega_Log_mod\nimport vega_ScreenHeader_mod\nimport vega_SetVariables_mod\nimport vega_Whatsapp_mod\n\n# Set variables -------------------------------------\nvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n# Starting execution --------------------------------\nvega_ScreenHeader_mod.funPrintHeader (\" STARTING EXECUTION\", vLogFile=vPathLog)\nvega_ScreenHeader_mod.funPrintHeader (\"Whatsapp\", vLogFile=vPathLog)\n\n# Startup Whatsapp ----------------------------------\nvega_Whatsapp_mod.main()\n\n# End of execution ----------------------------------\nvega_ScreenHeader_mod.funPrintHeader (\" END OF EXECUTION\", vLogFile=vPathLog)\nvega_Log_mod.funAppendToLog ( \"\", vLogFile=vPathLog)\n\n" }, { "alpha_fraction": 0.4498428702354431, "alphanum_fraction": 0.4520183801651001, "avg_line_length": 32.0880012512207, "blob_id": "b3ce33accb7a9d191cb66ae7375395b4ea2ad5d8", "content_id": "3094e48a6c7a0a0ca8c8ce110729a7280372991f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4137, "license_type": "no_license", "max_line_length": 108, "num_lines": 125, "path": "/CommonFunctions/vega_CommonFunctions_mod.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# ---------------------------------------------------\n# - Get own IP and Hostname -\n# ---------------------------------------------------\ndef GetOwnIpAndHostname ():\n\n sHostname = socket.gethostname()\n sIPAddress = socket.gethostbyname(sHostname)\n\n return sIPAddress, sHostname\n\n\n# ---------------------------------------------------\n# - VLookUp Table 2 -\n# ---------------------------------------------------\ndef funVLookUpTable2 (vDbConn, vTableName, vInputColNameList, vInputValueMatrix, vOutputColName):\n\n\t# Set variables -----------------------------\n\tvFilterMatrix = [ str(tuple(vInputValueList)).replace(',)',')') for vInputValueList in vInputValueMatrix ]\n\n\t# Set SQL Query -----------------------------\n\tvIndex = 0\n\tvSqlQueryInitial = \"SELECT * FROM \" + vTableName # + \" WHERE \"\n\tvSqlQuery = vSqlQueryInitial \n\tfor vInputColName in vInputColNameList:\n\n\t\t# Check for wildcard ------------------------\n\t\tif '*' not in vFilterMatrix[vIndex]:\n\n\t\t\t# Check if vSqlQuery needs \" WHERE\" ---------\n\t\t\tif vSqlQuery == vSqlQueryInitial:\n\t\t\t\tvSqlQuery = vSqlQuery + \" WHERE \"\n\n\t\t\t# Set variables -----------------------------\n\t\t\tvSqlQuery = vSqlQuery + vInputColName + \" IN \" + vFilterMatrix[vIndex] + \" AND \"\n\n\t\t# Set variables -----------------------------\n\t\tvIndex = vIndex + 1\n\n\t# Remove last \" AND \" from vSqlQuery ----------------\n\tvSqlQuery = vSqlQuery[:-5]\n\n\t# Get records -------------------------------\n\tvCursor = vDbConn.cursor()\n\tvCursor.execute(vSqlQuery)\n\tvData = vCursor.fetchall()\n\n\t# Get List ----------------------------------\n\tvTableDict = funGetTableDictionary (vDbConn, vTableName)\n\tvOutputCol = vTableDict[vOutputColName]\n\tvOutputList = [ vRow[vOutputCol] for vRow in vData ]\n\n\t# Exit --------------------------------------\n\treturn vOutputList\n\n\n# ---------------------------------------------------\n# - VLookUp Table -\n# ---------------------------------------------------\ndef funVLookUpTable (vDbConn, vTableName, vInputColName, vInputValueList, vOutputColName):\n\n\t# Set variables -----------------------------\n\tvSqlQuery = \"SELECT * FROM \" + vTableName\n\tif '*' not in vInputValueList:\n\t\tvSqlQuery = vSqlQuery + \" WHERE \" + vInputColName + \" IN \" + str(tuple(vInputValueList)).replace(',)',')')\n\n\t# Get records -------------------------------\n\tvCursor = vDbConn.cursor()\n\tvCursor.execute(vSqlQuery)\n\tvData = vCursor.fetchall()\n\n\t# Get List ----------------------------------\n\tvTableDict = funGetTableDictionary (vDbConn, vTableName)\n\tvOutputCol = vTableDict[vOutputColName]\n\tvOutputList = [ vRow[vOutputCol] for vRow in vData ]\n\n\t# Exit --------------------------------------\n\treturn vOutputList\n\n\n# ---------------------------------------------------\n# - Get Table Dictionary -\n# ---------------------------------------------------\ndef funGetTableDictionary (vDbConn, vTableName):\n\n\twith vDbConn:\n\n\t\t# Set variables -------------------------------------\n\t\tvTableDictionary = {}\n\n\t\t# Get header of table -------------------------------\n\t\tvCursor = vDbConn.cursor()\n\t\tvCursor.execute(\"SELECT * FROM \" + vTableName)\n\t\tvColumnNameList = [vColName[0] for vColName in vCursor.description]\n\n\t\t# Create dictionary ---------------------------------\n\t\tvCol = 0\n\t\tfor vColumName in vColumnNameList:\n\t\t\tvTableDictionary.update ( { vColumName : vCol } )\n\t\t\tvCol = vCol + 1\n\n\t\t# Exit ----------------------------------------------\n\t\treturn vTableDictionary\n\n\n# ---------------------------------------------------\n# - Get Timestamp -\n# ---------------------------------------------------\ndef funGetTimestamp ():\n\n\t# Set variables -------------------------------------\n\tvToday = datetime.datetime.now()\n\tvTimeStamp = vToday.strftime('%Y%m%d %H:%M.%S')\n\t\n\t# Exit ----------------------------------------------\n\treturn vTimeStamp\n\n\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport datetime\nimport socket\nimport pdb\n\n" }, { "alpha_fraction": 0.7521310448646545, "alphanum_fraction": 0.7549723982810974, "avg_line_length": 41.43262481689453, "blob_id": "86e5cedf619cfb18bb09d41a02b8a1f354130c11", "content_id": "cc9da617f8b23d43427e315f5e7a29382deb04b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5983, "license_type": "no_license", "max_line_length": 155, "num_lines": 141, "path": "/Examples/CmdClient.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "'''\nCopyright (c) <2012> Tarek Galal <[email protected]>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this \nsoftware and associated documentation files (the \"Software\"), to deal in the Software \nwithout restriction, including without limitation the rights to use, copy, modify, \nmerge, publish, distribute, sublicense, and/or sell copies of the Software, and to \npermit persons to whom the Software is furnished to do so, subject to the following \nconditions:\n\nThe above copyright notice and this permission notice shall be included in all \ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR \nA PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF \nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\nfrom Yowsup.connectionmanager import YowsupConnectionManager\nimport time, datetime, sys\nimport os.path\nimport pdb\n\nif sys.version_info >= (3, 0):\n\traw_input = input\n\nclass WhatsappCmdClient:\n\n\tbAuthFinished = False\n\tbAuthSuccess = False\n\tbSetProfileFile = False\n\tbSetProfileSuccess = False\n\tbSetStatus = False\n\tbDisconnected = True\n\tsDisconnectReason = \"\"\n\tlvMessage = []\n\t\n\tdef __init__(self, phoneNumber, keepAlive = False, sendReceipts = False):\n\t\tself.sendReceipts = sendReceipts\n\t\tself.phoneNumber = phoneNumber\n\t\tself.jid = \"%[email protected]\" % phoneNumber\n\t\tself.myjid=\"\"\n\t\t\n\t\tself.sentCache = {}\n\t\tWhatsappCmdClient.lvMessage = []\n\t\t\n\t\tself.connectionManager = YowsupConnectionManager(False)\n\t\tself.connectionManager.setAutoPong(keepAlive)\n\t\tself.signalsInterface = self.connectionManager.getSignalsInterface()\n\t\tself.methodsInterface = self.connectionManager.getMethodsInterface()\n\t\t\n\t\tself.signalsInterface.registerListener(\"auth_success\", self.onAuthSuccess)\n\t\tself.signalsInterface.registerListener(\"auth_fail\", self.onAuthFailed)\n\t\tself.signalsInterface.registerListener(\"message_received\", self.onMessageReceived)\n\t\tself.signalsInterface.registerListener(\"receipt_messageSent\", self.onMessageSent)\n\t\tself.signalsInterface.registerListener(\"presence_updated\", self.onPresenceUpdated)\n\t\tself.signalsInterface.registerListener(\"disconnected\", self.onDisconnected)\n\t\tself.signalsInterface.registerListener(\"profile_setPictureSuccess\",self.onPictureSuccess)\n\t\tself.signalsInterface.registerListener(\"profile_setPictureError\",self.onPictureError)\n\t\tself.signalsInterface.registerListener(\"contact_gotProfilePicture\",self.onProfilePicture),\n\t\tself.signalsInterface.registerListener(\"profile_setStatusSuccess\",self.onStatusSuccess)\n\t\t\n\t\t#0: method, 1: parameter name in prompt, 2: position in prompt \n\t\t#self.commandMappings = {\"lastseen\": (lambda: self.methodsInterface.call(\"presence_request\", (self.jid,)),\"\",6),\n\t\t\t\t\t\t\t\t#\"available\": (lambda: self.methodsInterface.call(\"presence_sendAvailable\"),\"\",1),\n\t\t\t\t\t\t\t\t#\"unavailable\": (lambda: self.methodsInterface.call(\"presence_sendUnavailable\"),\"\",2),\n\t\t\t\t\t\t\t\t#\"setprofile\": (lambda file: self.setProfile(file,),\"filename\",4),\n\t\t\t\t\t\t\t\t#\"setstatus\": (lambda status: self.setStatus(status,),\"status\",5),\n\t\t\t\t\t\t\t\t#\"getprofile\": (lambda: self.methodsInterface.call(\"contact_getProfilePicture\", (self.jid,)),\"\",3),\n\t\t\t\t\t\t\t\t#\"exit\": \t (self.done,\"\",7)\n\t\t\t\t\t\t\t\t #}\n\n\t\t#self.done = False\n\t\t#self.signalsInterface.registerListener(\"receipt_messageDelivered\", lambda jid, messageId: self.methodsInterface.call(\"delivered_ack\", (jid, messageId)))\n\t\n\tdef login(self, username, password):\n\t\tself.username = username\n\t\tself.myjid = \"%[email protected]\" % username\n\t\tself.methodsInterface.call(\"auth_login\", (username, password))\n\n\tdef onAuthSuccess(self, username):\n\t\tself.methodsInterface.call(\"ready\")\n\t\tWhatsappCmdClient.bAuthFinished = True\n\t\tWhatsappCmdClient.bAuthSuccess = True\n\t\tWhatsappCmdClient.bDisconnected = False\n\n\tdef onAuthFailed(self, username, err):\n\t\tWhatsappCmdClient.bAuthFinished = True\n\t\tWhatsappCmdClient.bAuthSuccess = False\n\n\tdef setProfile(self,file):\n\t\tif os.path.isfile(file):\n\t\t\tself.methodsInterface.call(\"profile_setPicture\", (file,))\n\t\t\tWhatsappCmdClient.bSetProfileFile = True\n\t\telse:\n\t\t\tWhatsappCmdClient.bSetProfileFile = False\n\t\t\t\n\tdef onPictureSuccess(self, id):\n\t\tWhatsappCmdClient.bSetProfileSuccess = True\n\n\tdef onPictureError(self, error):\n\t\tWhatsappCmdClient.bSetProfileSuccess = False\n\n\tdef onMessageReceived(self, messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):\n\t\tformattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d %H:%M.%S')\n\t\tWhatsappCmdClient.lvMessage.append((jid, formattedDate, messageContent, messageId))\n\t\t\n\t\tif wantsReceipt and self.sendReceipts:\n\t\t\tself.methodsInterface.call(\"message_ack\", (jid, messageId))\n\n\tdef onMessageSent(self, jid, messageId):\n\t\tpass\n\n\tdef sendMessage (self, jid, message):\n\t\tmsgId = self.methodsInterface.call(\"message_send\", (jid, message))\n\t\tself.sentCache[msgId] = [int(time.time()), message]\n\n\tdef setStatus(self, status):\n\t\tself.methodsInterface.call(\"profile_setStatus\", (status,)),\n\t\tWhatsappCmdClient.bSetStatus = False\n\n\tdef onStatusSuccess(self, jid, messageId):\n\t\tself.methodsInterface.call(\"message_ack\", (jid, messageId))\n\t\tWhatsappCmdClient.bSetStatus = True\n\n\tdef onDisconnected(self, reason):\n\t\tWhatsappCmdClient.bDisconnected = True\n\t\tWhatsappCmdClient.sDisconnectReason = reason\n\n\n\n\n\t\t\n\tdef onProfilePicture(self, jid, pictureid, path):\n\t\tprint(\"Got profile picture of %s: id: %s path:%s\" % (jid, pictureid, path))\n\n\tdef onPresenceUpdated(self, jid, lastSeen):\n\t\tformattedDate = datetime.datetime.fromtimestamp(long(time.time()) - lastSeen).strftime('%d-%m-%Y %H:%M')\n\t\tself.onMessageReceived(0, jid, \"LAST SEEN RESULT: %s\"%formattedDate, long(time.time()), False, None, False)\n" }, { "alpha_fraction": 0.44119036197662354, "alphanum_fraction": 0.4463863968849182, "avg_line_length": 32.87200164794922, "blob_id": "f1ff8c20806e7be7ba6df598f662b38ebf92db79", "content_id": "f20467b36a943f52efdc21e544beda931388e61c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4234, "license_type": "no_license", "max_line_length": 144, "num_lines": 125, "path": "/Log/vega_Log_mod.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# ---------------------------------------------------\n# - Append to Log -\n# ---------------------------------------------------\ndef funAdjustStringLength (vString, vLength):\n\n\t# Check length --------------------------------------\n\tif vLength < 10:\n\n\t\t# Exit ----------------------------------------------\n\t\treturn vString\n\n\t# Check string lenght -------------------------------\n\tif len(vString) < vLength:\n\n\t\t# Add padding to string -----------------------------\n\t\tvString = vString + \" \" * (vLength - len(vString))\n\n\telif len(vString) > vLength:\n\n\t\t# Cut string length ---------------------------------\n\t\tvString = vString[0:vLength-3] + '...'\n\n\t# Exit ----------------------------------------------\n\treturn vString\n\n\n# ---------------------------------------------------\n# - Append to Log -\n# ---------------------------------------------------\ndef funAppendToLog (vLogLine='', vEnd='\\n', vLogHeader=True, vSilentMode=False, vLogFile=\"\"):\n\n\t# Check vShowLog ------------------------------------\n\tif vSilentMode == True:\n\n\t\t# Silent mode -------------------------------\n\t\treturn\n\n\t# Get caller info -----------------------------------\n\tvFrame = inspect.currentframe()\n\n\t# Back to needed frame ------------------------------\n\tvContinue = True\n\twhile vContinue == True: \n\t\tif vFrame.f_code.co_name == 'funAppendToLog':\n\t\t\tvFrame = vFrame.f_back\n\t\t\tvContinue = False\n\n\t\telse:\n\t\t\tvFrame = vFrame.f_back\n\n\t# Set variables -------------------------------------\n\tvCode = vFrame.f_code\n\tvModule = vCode.co_filename.split('/')[-1]\n\tvFunction = vCode.co_name\n\n\t# Adjust string lenghts -----------------------------\n\tvLength = 40\n\tvModule = funAdjustStringLength (vModule, vLength)\n\tvFunction = funAdjustStringLength (vFunction, vLength)\n\n\t# Set variables -------------------------------------\n\tvTimestamp = vega_CommonFunctions_mod.funGetTimestamp()\n\tif vLogFile == \"\":\n\t\tvPath_Log = vega_SetVariables_mod.funSetVariable('PATH_LOG')\n\telse:\n\t\tvPath_Log = vLogFile\n\t\n\t# Write to Log --------------------------------------\n\twith open(vPath_Log, 'a') as vLogFile:\n\t\tif vLogHeader == False:\n\t\t\t# Log without timestamp -----------------------------\n\t\t\tvLogFile.write( vLogLine )\n\n\t\telse:\n\t\t\t# Log with timestamp --------------------------------\n\t\t\tif vLogLine != '':\n\t\t\t\tvLogFile.write(vTimestamp + \" (\" + vModule + \", \" + vFunction + \"): \" + vLogLine )\n\n\t\tvLogFile.write( vEnd )\n\n\n# ---------------------------------------------------\n# - Append To Log Traceback Info -\n# ---------------------------------------------------\ndef funAppendToLogTracebackInfo (vError, vMessage, vLogFile=\"\"):\n\n\t# Set Varaibles -------------------------------------\n\tvDetail = vError.args[0]\n\tvTraceBack = traceback.extract_tb(sys.exc_info()[2])\n\n\t# Log -----------------------------------------------\n\tfunAppendToLog ( \"ERROR DETECTED \" + \"-\" * 87 + \": \" + \"-\" * 51 , vLogHeader=False, vLogFile=vLogFile )\n\tfunAppendToLog ( \"ERROR:\\t \" + vMessage, vLogFile=vLogFile)\n\tfunAppendToLog ( \"DETAIL:\\t \" + vDetail[0].upper() + vDetail[1:], vLogFile=vLogFile )\n\n\t# Traceback -----------------------------------------\n\tvIndex = 0\n\tfor vFrame in vTraceBack:\n\n\t\t# Set variables -------------------------------------\n\t\tvIndex = vIndex + 1\n\t\tvFileName, vCodeLineNumber, vFunctionName, vCode = vFrame\n\n\t\t# Log -----------------------------------------------\n\t\tfunAppendToLog ( \" \", vLogFile=vLogFile )\n\t\tfunAppendToLog ( \"FRAME \" + str(vIndex) + \": \" + \"File Name:\\t\" + vFileName + \" (Line \" + str(vCodeLineNumber) + \")\", vLogFile=vLogFile )\n\t\tfunAppendToLog ( \"\\t \" + \"Function:\\t\" + vFunctionName, vLogFile=vLogFile )\n\t\tfunAppendToLog ( \"\\t \" + \"Code Line:\\t\" + vCode , vLogFile=vLogFile )\n\n\tfunAppendToLog ( \"ERROR DETECTED \" + \"-\" * 87 + \": \" + \"-\" * 51 , vLogHeader=False, vLogFile=vLogFile )\n\n\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport inspect\nimport datetime\nimport pdb\nimport traceback\nimport sys\n\nimport vega_SetVariables_mod\nimport vega_CommonFunctions_mod\n" }, { "alpha_fraction": 0.4635509252548218, "alphanum_fraction": 0.4775209426879883, "avg_line_length": 52.9315071105957, "blob_id": "3892078cadd7083b367a7f17c9088a532fb637cc", "content_id": "593eea54e9bb561860c9173e6f490cd83af13a53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3937, "license_type": "no_license", "max_line_length": 141, "num_lines": 73, "path": "/SetVariables/vega_SetVariables_mod.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# ---------------------------------------------------\n# - Set Variable -\n# ---------------------------------------------------\ndef funSetVariable (vVarName):\n\n\t# PATH and FILEs ------------------------------------\n\tvVarDict = {}\n\tvVarDict.update ( { 'PATH_BASE' : '/home/pi/SCRIPTS/Python/Projects/vega' } )\n\tvVarDict.update ( { 'PATH_CRON' : '/home/pi/SCRIPTS/Python/Projects/vega/logs' } )\n\tvVarDict.update ( { 'PATH_DB' : '/home/pi/SCRIPTS/Python/Projects/vega/db/vega.db' } )\n\tvVarDict.update ( { 'PATH_EXE' : '/home/pi/SCRIPTS/Python/Projects/vega/root/ExecuteCommand/vega_ExecuteCommand.py' } )\n\tvVarDict.update ( { 'PATH_LOG' : '/home/pi/SCRIPTS/Python/Projects/vega/logs/vega_general.log' } )\n\n\t# EXECUTE COMMAND ------------------------------------\n\tvVarDict.update ( { 'EXEC_LOG' : '/home/pi/SCRIPTS/Python/Projects/vega/logs/vega_execute.log' } )\n\n\t# TECHNOLOGY -----------------------------------------\n\tvVarDict.update ( { 'VAR_TECH' : { 'Z-Wave' : ['ZWaveAPI', 'Run'], 'Linux' : ['/bin/sh -exec '] } } )\n\tvVarDict.update ( { 'EXE_ARGS' : [ [ 'LITERAL', ' -Standby OVERRIDE -Operation ENABLED -Calendar \"*\" -DoM \"*\" -DoW \"*\" -Cmd ' ] ] } ) \n\tvVarDict.update ( { 'ZWAVE_CMD_BASE' : [ [ 'LITERAL', '/usr/bin/curl -s -LN --globoff ' ], \\\n\t\t\t\t \t\t [ 'LITERAL', \"'http://\" ], \\\n\t\t\t\t\t\t [ 'POINTER', [ 'DEVICE', 'DEVPROP', 'CONNECTION', 'CONTROLLER', 'Host' ] ], \\\n\t\t\t\t\t\t [ 'LITERAL', '.' ], \\\n\t\t\t\t\t\t [ 'POINTER', [ 'DEVICE', 'DEVPROP', 'CONNECTION', 'CONTROLLER', 'Domain' ] ], \\\n\t\t\t\t\t\t [ 'LITERAL', ':' ], \\\n\t\t\t\t\t\t [ 'POINTER', [ 'DEVICE', 'DEVPROP', 'CONNECTION', 'CONTROLLER', 'Port' ] ], \\\n\t\t\t\t\t\t [ 'LITERAL', '/ZWaveAPI/Run/devices[' ], \\\n\t\t\t\t\t\t [ 'POINTER', [ 'DEVICE', 'DEVPROP', 'CONNECTION', 'Parameter01' ] ], \\\n\t\t\t\t\t\t [ 'LITERAL', '].instances[' ], \\\n\t\t\t\t\t\t [ 'POINTER', [ 'DEVICE', 'DEVPROP', 'CONNECTION', 'Parameter02' ] ], \\\n\t\t\t\t\t\t [ 'LITERAL', '].commandClasses[' ], \\\n\t\t\t\t\t\t [ 'POINTER', [ 'DEVICE', 'DEVPROP', 'CONNECTION', 'Parameter03' ] ], \\\n\t\t\t\t\t\t [ 'LITERAL', ']' ] \t] } )\n\tvVarDict.update ( { 'ZWAVE_CMD_GET' : [ [ 'LITERAL', '.data.level.value' + \"'\" ] ] } )\n\tvVarDict.update ( { 'ZWAVE_CMD_SET' : [ [ 'LITERAL', '.Set(' ], \\\n\t\t\t\t\t\t [ 'VALUE' ], \\\n\t\t\t\t\t\t [ 'LITERAL', ')' + \"'\" ] ] } )\n\n\n\t# WEATHER -------------------------------------------\n\tvVarDict.update ( { 'WEAT_LOG' : '/home/pi/SCRIPTS/Python/Projects/vega/logs/vega_weather.log' } )\n\tvVarDict.update ( { 'YAHOO' : [ 'yahoo.com', ['Rain', 'Showers', 'Thunderstorms', 'T-Storms', 'Snow' ] ] } )\n\tvVarDict.update ( { 'WEATHER' : [ 'weather.com', ['Rain', 'Showers', 'Thunderstorms', 'T-Storms', 'Snow' ] ] } )\n\n\t# STATES ---------------------------------------------\n\tvVarDict.update ( { 'TOLERANCE' : 2 } )\n\tvVarDict.update ( { 'WAITTIME' : 1 } )\n\tvVarDict.update ( { 'WAITCYCLES' : 25 } )\n\n\t# WHATSAPP -------------------------------------------\n\tvVarDict.update ( { 'WAPP_LOG' : '/home/pi/SCRIPTS/Python/Projects/vega/logs/vega_whatsapp.log' } )\n\tvVarDict.update ( { 'WAPP_USERNAME' : '34668857875' } )\n\tvVarDict.update ( { 'WAPP_PASSWORD' : 'GDma1sXwkTORBQSkbJXApLVBZnQ=' } )\n\tvVarDict.update ( { 'WAPP_CALLER' : { '34668857875' : 'Vega', '34619809398' : 'Juan', '34619365132' : 'Marta'} } )\n\tvVarDict.update ( { 'WAPP_PIC' : '/home/pi/SCRIPTS/Python/Projects/vega/db/whatsapp_pic.jpeg' } )\n\tvVarDict.update ( { 'WAPP_STATUS' : 'At your command' } )\n\n\tvVarDict.update ( { 'WAPP_ACTION_EXIT' : ['reboot', 'bye', 'adi?s'] } )\n\tvVarDict.update ( { 'WAPP_ANSWER_EXIT' : ['reboot', 'bye', 'adi?s'] } )\n\n\t# DEBUG ----------------------------------------------\n\tvVarDict.update ( { 'DEBUG' : False } )\n\n\t# Exit ----------------------------------------------\n\treturn vVarDict[vVarName.upper()]\n\n\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport pdb\n" }, { "alpha_fraction": 0.41971829533576965, "alphanum_fraction": 0.4361971914768219, "avg_line_length": 37.378379821777344, "blob_id": "00314cf0fc026d11add23d71a9d563b569510223", "content_id": "1246dad70114cff5f9117c373df29c582bf606b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7123, "license_type": "no_license", "max_line_length": 141, "num_lines": 185, "path": "/Whatsapp/vega_Whatsapp_CommandAnalysis_mod.py", "repo_name": "maduenyo/Vega", "src_encoding": "ISO-8859-1", "text": "#!/usr/bin/python\n# coding: latin-1\n\n# ---------------------------------------------------\n# - Execute Command -\n# ---------------------------------------------------\ndef funExecuteComand (lsDevice, sAction):\n\n\t# Set variables -------------------------------------\n\tbExit = True\n\tlsCommand = []\n\n\t# Execute command device by device ------------------\n\tfor sDevice in lsDevice:\n\n\t\t# Set variables -------------------------------------\n\t\tsCommand = \"\"\n\n\t\t# Check device --------------------------------------\n\t\tif int(sDevice) < 100:\n\t\t\tsCommand = \"/usr/bin/curl -s -LN --globoff 'http://raspberry01.madnet.home:8083/ZWaveAPI/Run/devices[\" + sDevice \\\n\t\t\t\t\t+ \"].instances[0].commandClasses[38].Set(\" + sAction + \")'\"\n\n\t\telif sDevice in [\"101\"]:\n\t\t\tpass\n\n\t\telif sDevice in [\"201\"]:\n\t\t\t#pdb.set_trace()\n\t\t\tif type(sAction) is list:\n\t\t\t\t# Execute action by action ---------------------------\n\t\t\t\tfor sItem in sAction:\n\t\t\t\t\tsCommand = \"/usr/bin/curl -s -LN --globoff 'http://roomba780.madnet.home/roomba.cgi?button=\" + sItem + \"'\"\n\t\t\t\t\tlsCommand.append(sCommand)\n\t\t\t\t\tos.system(sCommand)\n\t\t\t\t\ttime.sleep(11.5)\n\n\t\t\t\t# Exit -----------------------------------------------\n\t\t\t\treturn lsCommand, bExit\n\n\t\t\telse:\t\n\t\t\t\tsCommand = \"/usr/bin/curl -s -LN --globoff 'http://roomba780.madnet.home/roomba.cgi?button=\" + sAction + \"'\"\n\n\t\t# Check sCommand ------------------------------------\n\t\tif sCommand <> \"\":\n\t\t\tlsCommand.append(sCommand)\n\t\t\tos.system(sCommand)\n\n\t# Exit ----------------------------------------------\n\treturn lsCommand, bExit\n\n\n# ---------------------------------------------------\n# - Extract Action From Msg -\n# ---------------------------------------------------\ndef funExtractActionFromMsg (sCurrMsg):\n\n\t# Set variables -------------------------------------\n\tdActionDict = { 'BAJAR' : '0', 'BAJA' : '0', \\\n\t\t\t 'CERRAR' : '0', 'CIERRA' : '0', u'CIÉRRA' : '0', 'CERRANDO' : '0', u'CERRÁNDO' : '0', \\\n\t\t\t 'CHAPAR' : '0', 'CHAPA' : '0', u'CHÁPA' : '0', 'CHAPANDO' : '0', u'CHAPÁNDO' : '0', \\\n\t\t\t 'ABRIR' : '75', 'ABRE' : '75', u'ÁBRE' : '75', 'ABRIENDO' : '75', u'ABRIÉNDO' : '75', \\\n\t\t\t 'SUBIR' : '75', 'SUBE' : '75', u'SÚBE' : '75', 'SUBIENDO' : '75', u'SUBIÉNDO' : '75', \\\n\t\t\t 'LEVANTAR' : '75', 'LEVANTA' : '75', u'LEVÁNTA' : '75', 'LEVANTANDO' : '75', u'LEVANTÁNDO' : '75', \\\n\t\t\t 'LIMPIAR' : 'CLEAN', 'LIMPIA' : 'CLEAN', u'LÍMPIA' : 'CLEAN', 'LIMPIANDO' : 'CLEAN', u'LIMPIÁNDO' : 'CLEAN', 'LIMPIE' : 'CLEAN', \\\n\t\t\t 'BARRER' : 'CLEAN', 'BARRE' : 'CLEAN', u'BÁRRE' : 'CLEAN', 'BARRIENDO' : 'CLEAN', u'BARRIÉNDO' : 'CLEAN', 'BARRA' : 'CLEAN', \\\n\t\t\t 'PARAR' : 'CLEAN', 'PARA' : 'CLEAN', u'PÁRA' : 'CLEAN', 'PARANDO' : 'CLEAN', u'PARÁNDO' : 'CLEAN', 'PARE' : 'CLEAN', \\\n\t\t\t 'DETENER' : 'CLEAN', u'DET?N' : 'CLEAN', u'DETÉN' : 'CLEAN', 'DETENIENDO' : 'CLEAN', u'DETENIÉNDO' : 'CLEAN', 'DETENGA' : 'CLEAN', \\\n\t\t\t 'RECOGER' : 'DOCK', 'RECOGE' : 'DOCK', u'RECÓGE' : 'DOCK', 'RECOGIENDO' : 'DOCK', u'RECOGIÉNDO' : 'DOCK', 'RECOJA' : 'DOCK', \\\n\t\t\t 'MANTENIMIENTO' : ['CLEAN', 'CLEAN'] }\n\n\t# Set variables -------------------------------------\n\tsAction = \"\"\n\n\t# Check Action --------------------------------------\n\tfor sKey in dActionDict:\n\n\t\t# Check if match ------------------------------------\n\t\tif sCurrMsg.find(sKey) >= 0: \n\t\t\tsAction = dActionDict[sKey]\n\n\t# Return --------------------------------------------\n\treturn sAction\n\t\n\n# ---------------------------------------------------\n# - Extract Device List From Msg -\n# ---------------------------------------------------\ndef funExtractDeviceListFromMsg (sCurrMsg):\n\n\t#pdb.set_trace()\n\n\t# Set variables -------------------------------------\n\tdDevIdDict = { 'PERSIANA' : { 'DORMITORIO' : '3', 'ENCUENTRO Y AMOR' : '3', \\\n\t\t\t\t 'COCINA' : '4', \\\n\t\t\t\t\t u'SALÓN' : '5', 'COMEDOR' : '5', \\\n\t\t\t\t\t u'ÁLVARO' : '6', 'ALVARITO' : '6', 'PILAR' : '6', 'PILI' : '6', 'INVITADOS' : '6', \\\n\t\t\t\t\t 'OFICINA' : '7', }, \\\n\t\t\t'TELE' : { 'TELE' : '101' }, \\\n\t\t\t'ASPIRADORA' : { 'ROOMBA' : '201', '780' : '201', 'RUMBA' : '201' }, \\\n\t\t\t'ASPIRADOR' : { 'ROOMBA' : '201', '780' : '201', 'RUMBA' : '201' } }\n\n\t# Set variables -------------------------------------\n\tbKey = False\n\tlsDeviceList = []\n\tbDevTypePattern = False\n\n\t# Check Device Type ---------------------------------\n\tfor sKey in dDevIdDict:\n\t\tif sCurrMsg.find(sKey) >= 0 :\n\t\t\tdDevIdDict = dDevIdDict[sKey]\n\t\t\tbKey = True\n\t\t\tbreak\n\n\t# Check Device Id -----------------------------------\n\tif bKey == True:\n\n\t\tfor sKey in dDevIdDict:\n\n\t\t\t# Check if match ------------------------------------\n\t\t\tif sCurrMsg.find(sKey) >= 0 or sCurrMsg.find(\"TODAS\") >= 0: \n\n\t\t\t\tif dDevIdDict[sKey] not in lsDeviceList:\n\n\t\t\t\t\tlsDeviceList.append(dDevIdDict[sKey])\n\n\t# Return --------------------------------------------\n\treturn lsDeviceList\n\n\n# ---------------------------------------------------\n# - Command Analysis -\n# ---------------------------------------------------\ndef funCommandAnalysis (cWhatsappListener, lPrevCounter, vTab = \"\"):\n\n\t# Set variables -------------------------------------\n\tsComment = \"Command not executed, sorry.\"\n\tbExit = False\n\tvNextTab = \" \"\n\tvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\tsCurrMsg = cWhatsappListener.lvMessage[lPrevCounter][2]\n\t#sCurrMsg = sCurrMsg.decode('utf-8').encode('ascii','replace')\n\tsCurrMsg = sCurrMsg.decode('utf-8').upper()\n\n\t# Check exit condition ------------------------------\n\tif sCurrMsg.find(u'ADIÓS') >= 0:\n\t\tbExit = True\n\t\tsComment = \"Bye\"\n\t\treturn bExit, sComment\n\n\t# Get Device List -----------------------------------\n\tlsDevice = funExtractDeviceListFromMsg (sCurrMsg)\n\tvega_Log_mod.funAppendToLog ( vTab + vNextTab + \"Device List:\\t\" + str(lsDevice), vLogFile = vPathLog )\n\t\n\t# Get Action ----------------------------------------\n\tsAction = funExtractActionFromMsg (sCurrMsg)\n\tvega_Log_mod.funAppendToLog ( vTab + vNextTab + \"Action:\\t\" + str(sAction), vLogFile = vPathLog )\n\n\t# Execute Action ------------------------------------\n\tif lsDevice <> [] and sAction <> \"\":\n\n\t\t# Execute command ------------------------------------\n\t\tlsCommand, bExit = funExecuteComand (lsDevice, sAction)\n\t\tif bExit == True:\n\t\t\tsComment = \"Done!\"\n\t\t\tsResult = \"OK\"\n\t\telse:\n\t\t\tsComment = \"Unable to execute command.\"\n\t\t\tsResult = \"NOK\"\n\n\t\t# Log -----------------------------------------------\n\t\tvega_Log_mod.funAppendToLog ( vTab + vNextTab + \"Executing:\\t\" + sResult + \" (\" + sComment[:-1] + \")\", vLogFile = vPathLog )\n\n\n\t# Exit ----------------------------------------------\n\treturn False, sComment\n\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport time\nimport pdb\nimport os\n\nimport vega_Log_mod\nimport vega_SetVariables_mod\n" }, { "alpha_fraction": 0.48291322588920593, "alphanum_fraction": 0.48533689975738525, "avg_line_length": 35.19078826904297, "blob_id": "c9ba3ac99795d12168b6308c479e53c0008d45b2", "content_id": "9f6f4aaab8a3085f5e9f37a5d8c44370ae2c43fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16504, "license_type": "no_license", "max_line_length": 134, "num_lines": 456, "path": "/Whatsapp/vega_Whatsapp_mod.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# coding: latin-1\n\n# ---------------------------------------------------\n# - Whatsapp Set Pic -\n# ---------------------------------------------------\ndef funWhatsappSetPic (cWhatsappCmdClient, sPicFile, sTab=\" \"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tbSuccess = True\n\tsComment = \"OK\"\n\tvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n\t# Set profile picture -------------------------------\n\tvega_Log_mod.funAppendToLog ( sTab + \"Setting pic:\\t\", vEnd=\"\", vLogFile=vPathLog )\n\n\t# Try up to 5 times ---------------------------------\n\tfor lIteration in range(0,5):\n\n\t\t# Set status ----------------------------------------\n\t\tcWhatsappCmdClient.setProfile(sPicFile)\n\t\ttime.sleep(1)\n\n\t\t# Check pic file ------------------------------------\n\t\tif cWhatsappCmdClient.bSetProfileFile == False:\n\n\t\t\t# Profile file NOK ----------------------------------\n\t\t\tbSuccess = False\n\t\t\tsComment = \"NOK (Pic file not found)\"\n\t\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vLogFile=vPathLog )\n\n\t\t\t# Exit ----------------------------------------------\n\t\t\treturn bSuccess, sComment\n\n\t\t# Check set pic -------------------------------------\n\t\telif cWhatsappCmdClient.bSetProfileSuccess == False:\n\n\t\t\t# Set pic NOK ---------------------------------------\n\t\t\tbSuccess = False\n\t\t\tsComment = \".\"\n\t\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vEnd=\"\", vLogFile=vPathLog )\n\t\t\ttime.sleep(5)\n\n\t\telse:\n\n\t\t\t# Log -----------------------------------------------\n\t\t\tsComment = \"OK\"\n\t\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vLogFile=vPathLog )\n\t\t\tbreak\n\n\n\t# Check bSuccess -------------------------------------\n\tif bSuccess == False:\n\n\t\t# Set pic NOK ---------------------------------------\n\t\tbSuccess = False\n\t\tsComment = \"Unable to set pic\"\n\t\tvega_Log_mod.funAppendToLog ( \"NOK (\" + sComment + \")\", vLogHeader=False, vLogFile=vPathLog )\n\n\n\t# Exit ----------------------------------------------\n\treturn bSuccess, sComment\n\n\n# ---------------------------------------------------\n# - Whatsapp Set Status -\n# ---------------------------------------------------\ndef funWhatsappSetStatus (cWhatsappCmdClient, sStatus, sTab=\" \"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tbSuccess = True\n\tsComment = \"OK\"\n\tsStatus = \"At your command\"\n\tvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n\t# Set status ----------------------------------------\n\tvega_Log_mod.funAppendToLog ( sTab + \"Setting status:\\t\", vEnd=\"\", vLogFile=vPathLog )\n\n\t# Try up to 5 times ---------------------------------\n\tfor lIteration in range(0,5):\n\n\t\t# Set status ----------------------------------------\n\t\tcWhatsappCmdClient.setStatus(sStatus)\n\t\ttime.sleep(1)\n\n\t\t# Check status --------------------------------------\n\t\tif cWhatsappCmdClient.bSetStatus == False:\n\n\t\t\t# Set Status NOK ------------------------------------\n\t\t\tbSuccess = False\n\t\t\tsComment = \".\"\n\t\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vEnd=\"\", vLogFile=vPathLog )\n\t\t\ttime.sleep(5)\n\n\t\telse:\n\n\t\t\t# Log -----------------------------------------------\n\t\t\tsComment = \"OK\"\n\t\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vLogFile=vPathLog )\n\t\t\tbreak\n\n\n\t# Check bSuccess -------------------------------------\n\tif bSuccess == False:\n\n\t\t# Set Status NOK ------------------------------------\n\t\tsComment = \"Unable to set status\"\n\t\tvega_Log_mod.funAppendToLog ( \"NOK (\" + sComment + \")\", vLogHeader=False, vLogFile=vPathLog )\n\n\t# Exit ----------------------------------------------\n\treturn bSuccess, sComment\n\n\n# ---------------------------------------------------\n# - Whatsapp Login -\n# ---------------------------------------------------\ndef funWhatsappLogin (cWhatsappCmdClient, sUserName, sPassword, sTab=\" \"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tbSuccess = True\n\tsComment = \"OK\"\n\tvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n\t# Login ---------------------------------------------\n\tvega_Log_mod.funAppendToLog ( sTab + \"Authenticating:\\t\", vEnd=\"\", vLogFile=vPathLog )\n\tcWhatsappCmdClient.login(sUserName, sPassword)\n\ttime.sleep(1)\n\n\t# Check login ---------------------------------------\n\tif cWhatsappCmdClient.bAuthSuccess == False:\n\n\t\t# Login NOK -----------------------------------------\n\t\tbSuccess = False\n\t\tsComment = \"NOK (Authentication failed)\"\n\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vLogFile=vPathLog )\n\t\treturn bSuccess, sComment\n\n\telse:\n\n\t\t# Log -----------------------------------------------\n\t\tsComment = \"OK\"\n\t\tvega_Log_mod.funAppendToLog ( sComment, vLogHeader=False, vLogFile=vPathLog )\n\n\t# Exit ----------------------------------------------\n\treturn bSuccess, sComment\n\n# ---------------------------------------------------\n# - Connect To Whatsapp -\n# ---------------------------------------------------\ndef funConnectToWhatsapp (sTab=\"\"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tbSuccess = True\n\tvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\tsUserName = vega_SetVariables_mod.funSetVariable('WAPP_USERNAME')\n\tsPassword = vega_SetVariables_mod.funSetVariable('WAPP_PASSWORD')\n\tsPassword = base64.b64decode(bytes(sPassword.encode('utf-8')))\n\tsStatus = vega_SetVariables_mod.funSetVariable('WAPP_STATUS')\n\tsPicFile = vega_SetVariables_mod.funSetVariable('WAPP_PIC')\n\tcWhatsappCmdClient = Examples.CmdClient.WhatsappCmdClient(phoneNumber='', keepAlive = True, sendReceipts = True)\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( sTab + \"Connecting to Whatsapp server:\", vLogFile=vPathLog )\n\n\t# ---------------------------------------------------\n\t# Login \n\t# ---------------------------------------------------\n\tbSuccess, sComment = funWhatsappLogin (cWhatsappCmdClient, sUserName, sPassword, sNextTab)\n\tif bSuccess == False:\n\n\t\t# Login NOK -----------------------------------------\n\t\treturn cWhatsappCmdClient, bSuccess, sComment\n\n\t# ---------------------------------------------------\n\t# Set status\n\t# ---------------------------------------------------\n\tbSuccess, sComment = funWhatsappSetStatus (cWhatsappCmdClient, sStatus, sNextTab)\n\tif bSuccess == False:\n\n\t\t# Set status NOK ------------------------------------\n\t\treturn cWhatsappCmdClient, bSuccess, sComment\n\n\t# ---------------------------------------------------\n\t# Set Pic\n\t# ---------------------------------------------------\n\tbSuccess, sComment = funWhatsappSetPic (cWhatsappCmdClient, sPicFile, sNextTab)\n\tif bSuccess == False:\n\n\t\t# Set pic NOK ---------------------------------------\n\t\treturn cWhatsappCmdClient, bSuccess, sComment\n\n\t# ---------------------------------------------------\n\t# Set presence\n\t# ---------------------------------------------------\n\n\n\n\n\t\n\n\t# Exit ----------------------------------------------\n\t# Ignore errors in set status / set pic\n\tbSuccess = True\n\treturn cWhatsappCmdClient, bSuccess, sComment\n\n\n# ---------------------------------------------------\n# - Reconnect To Whatsapp -\n# ---------------------------------------------------\ndef funReconnectToWhatsapp (sComment, sTab=\"\"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tvPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + \"Disconnected from Whatsapp server:\", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + sNextTab + \"Reason:\\t\" + sComment, vLogFile=vPathLog )\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + \"Retrying connection in 5 seconds\", vEnd='', vLogFile=vPathLog )\n\tfor vItem in range(0,4):\n\t\tvega_Log_mod.funAppendToLog ( \".\", vEnd='', vLogHeader=False, vLogFile=vPathLog )\n\t\ttime.sleep(1)\n\tvega_Log_mod.funAppendToLog ( \".\", vLogHeader=False, vLogFile=vPathLog )\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\n\n\t# Retry connection ----------------------------------\n\ttime.sleep(5)\n\tcWhatsappCmdClient, bSuccess, sComment = funConnectToWhatsapp(sTab)\n\tif bSuccess == False:\n\n\t\t# Log -----------------------------------------------\n\t\tsComment = \"Unable to re-connect\"\n\t\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\t\tvega_Log_mod.funAppendToLog ( sTab + \"Disconnected from Whatsapp server:\", vLogFile=vPathLog )\n\t\tvega_Log_mod.funAppendToLog ( sTab + sNextTab + \"Reason:\\t\" + sComment, vLogFile=vPathLog )\n\n\t\t# Disconnected --------------------------------------\n\t\treturn cWhatsappCmdClient, bSuccess, sComment\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + \"Listening to incomming messages:\", vLogFile=vPathLog )\n\n\t# Exit ----------------------------------------------\n\tvExit = False\n\treturn cWhatsappCmdClient, bSuccess, sComment\n\n\n# ---------------------------------------------------\n# - Whatsapp Set Variables Incoming Msg -\n# ---------------------------------------------------\ndef funWhatsappSetVariablesIncomingMsg (lvMessage):\n\n\t# Set variables -------------------------------------\n\tsJid = lvMessage[0]\n\tsPhone = sJid.split(\"@\")[0]\n\tsRxTimestamp = lvMessage[1]\n\tsRxMessage = lvMessage[2]\n\t# sRxMessage = sRxMessage.decode('utf-8').encode('ascii','replace')\n\tsRxMessage = sRxMessage.decode('utf-8').encode('latin-1')\n\n\t# Exit ----------------------------------------------\n\treturn sJid, sPhone, sRxTimestamp, sRxMessage\n\n\n# ---------------------------------------------------\n# - Get Party Name From Phone Number -\n# ---------------------------------------------------\ndef funGetPartyNameFromPhoneNumber (sPhone):\n\n\t# Set varaibles -------------------------------------\n\tlsCaller = vega_SetVariables_mod.funSetVariable('WAPP_CALLER')\n\t\n\t# Check Phone number --------------------------------\n\tif sPhone in lsCaller:\n\n\t\t# Name found ----------------------------------------\n\t\tsPartyName = lsCaller[sPhone]\n\n\telse:\n\n\t\t# Name not found ------------------------------------\n\t\tsPartyName = sPhone\n\n\t# Exit ----------------------------------------------\n\treturn sPartyName\n\n\n# ---------------------------------------------------\n# - Check Whatsapp Message -\n# ---------------------------------------------------\ndef funCheckWhatsappMessage (cWhatsappCmdClient, lPrevCounter, sTab=\"\"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tbExitWhile = False\n vPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\tlsCaller = vega_SetVariables_mod.funSetVariable('WAPP_CALLER')\n\tlCurrCounter = len(cWhatsappCmdClient.lvMessage)\n\n\t# Check if new message ------------------------------\n\tif lCurrCounter > lPrevCounter:\n\n\t\t# Set variables -------------------------------------\n\t\tsJid, sPhone, sRxTimestamp, sRxMessage = funWhatsappSetVariablesIncomingMsg (cWhatsappCmdClient.lvMessage[lPrevCounter])\n\t\tsPartyName = funGetPartyNameFromPhoneNumber(sPhone)\n\n\t\t# Log -----------------------------------------------\n\t\tvega_Log_mod.funAppendToLog ( sTab + sNextTab + \"RX (\" + sPartyName + \" - \" + sRxTimestamp + \"): \" + sRxMessage, vLogFile=vPathLog )\n\n\t\t# Check if vega command -----------------------------\n\t\tif sRxMessage.upper().find(\"VEGA\") >=0 and sPhone in lsCaller:\n\n\t\t\t# Interpret Command ---------------------------------\n\t\t\tbExitWhile, sTxMessage = vega_Whatsapp_CommandAnalysis_mod.funCommandAnalysis (cWhatsappCmdClient, lPrevCounter, sTab + sNextTab)\n\n\t\t\t# Set variables -------------------------------------\n\t\t\tcWhatsappCmdClient.sendMessage(sJid, sTxMessage)\n\t\t\tsTxTimestamp = vega_CommonFunctions_mod.funGetTimestamp()\n\n\t\t\t# Answering to message ------------------------------\n\t\t\tvega_Log_mod.funAppendToLog ( sTab + sNextTab + \"TX (\" + \"Vega\" + \" - \" + sTxTimestamp + \"): \" + sTxMessage, vLogFile=vPathLog )\n\n\t\t# Set variables -------------------------------------\n\t\tlPrevCounter = lPrevCounter + 1\n\n\t# Exit ---------------------------------------------\n\treturn lPrevCounter, bExitWhile\n\n# ---------------------------------------------------\n# - Whatsapp Loop -\n# ---------------------------------------------------\ndef funWhatsappLoop (cWhatsappCmdClient, sTab=\"\"):\n\n\t# Set variables -------------------------------------\n\tsNextTab = \" \"\n\tlPrevCounter = 0\n\tlCurrCounter = 0\n\tlsCaller = vega_SetVariables_mod.funSetVariable('WAPP_CALLER')\n vPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + \"Listening to incomming messages:\", vLogFile=vPathLog )\n\n\t# Loop for messages ---------------------------------\n\twhile True:\n\n\t\t# Check if disconnected -----------------------------\n\t\tif cWhatsappCmdClient.bDisconnected == True:\n\n\t\t\t# Reset variables -----------------------------------\n\t\t\tlCurrCounter = 0\n\t\t\tlPrevCounter = 0\n\t\t\tsComment = cWhatsappCmdClient.sDisconnectReason\n\t\t\tdel cWhatsappCmdClient\n\n\t\t\t# Reconnect To Whatsapp -----------------------------\n\t\t\tcWhatsappCmdClient, bSuccess, sComment = funReconnectToWhatsapp (sComment, sTab)\n\t\t\tif bSuccess == False:\n\n\t\t\t\t# Exit ----------------------------------------------\n\t\t\t\treturn sComment\n\n\t\t# Check if Message List not empty -------------------\n\t\telif cWhatsappCmdClient.lvMessage <> []:\n\n\t\t\t# Check whatsapp message ------------------------------\n\t\t\tlPrevCounter, bExitWhile = funCheckWhatsappMessage (cWhatsappCmdClient, lPrevCounter, sTab)\n\t\t\tif bExitWhile == True:\n\n\t\t\t\t# Exit While ----------------------------------------\n\t\t\t\tbreak\n\n\t\t# Wait for incoming message -------------------------\n\t\ttime.sleep(0.5)\n\n\t\t# DEBUG #############################################\n\t\t#if lCurrCounter == 3:\n\t\t\t#pdb.set_trace()\n\t\t\t#cWhatsappCmdClient.connectionManager.disconnect(\"DEBUG\")\n\t\t# DEBUG #############################################\n\n\t# Disconnect ----------------------------------------\n\tsComment = \"User request\"\n\tcWhatsappCmdClient.connectionManager.disconnect(sComment)\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + \"Disconnected from Whatsapp server:\", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( sTab + sNextTab + \"Reason:\\t\" + sComment, vLogFile=vPathLog )\n\n\t# Exit ----------------------------------------------\n\treturn sComment\n\n\n# ---------------------------------------------------\n# - main -\n# ---------------------------------------------------\ndef main():\n\n\t# Set variables -------------------------------------\n\tsTab = \"\"\n\tsNextTab = \" \"\n vPathLog = vega_SetVariables_mod.funSetVariable('WAPP_LOG')\n\n\t# Connect to Whatsapp -------------------------------\n\tcWhatsappCmdClient, bSuccess, sComment = funConnectToWhatsapp(sTab)\n\tif bSuccess == True:\n\n\t\t# Loop ----------------------------------------------\n\t\tsComment = funWhatsappLoop (cWhatsappCmdClient, sTab)\n\n\telse:\n\t\t# Unable to connect ---------------------------------\n\t\tcWhatsappCmdClient.connectionManager.disconnect(sComment)\n\n\t\t# Log -----------------------------------------------\n\t\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\t\tvega_Log_mod.funAppendToLog ( \"Disconnected from Whatsapp server:\", vLogFile=vPathLog )\n\t\tvega_Log_mod.funAppendToLog ( sNextTab + \"Reason:\\t\" + sComment, vLogFile=vPathLog )\n\n\t\t# Exit ----------------------------------------------\n\t\treturn\n\n\t# Log ---------------------------------------\n\tvega_Log_mod.funAppendToLog ( \" \", vLogFile=vPathLog )\n\tvega_Log_mod.funAppendToLog ( \"Listening finished.\", vLogFile=vPathLog )\n\n\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport datetime\nimport base64\nimport time\nimport sys\nimport pdb\n\nimport Examples.ListenerClient\nimport Examples.CmdClient\nimport vega_Log_mod\nimport vega_SetVariables_mod\nimport vega_CommonFunctions_mod\nimport vega_Whatsapp_CommandAnalysis_mod\n\n" }, { "alpha_fraction": 0.42222222685813904, "alphanum_fraction": 0.42535948753356934, "avg_line_length": 32.84955596923828, "blob_id": "4c9530739eb9892096b0c75dfa6c379861d15947", "content_id": "3b30d0460e5a66d812486bb7b7c2e42fc6307425", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3825, "license_type": "no_license", "max_line_length": 102, "num_lines": 113, "path": "/Path/vega_Path.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# Add root subdirectories to PYTHON PATH\n\n# ---------------------------------------------------\n# - Get Root Path -\n# ---------------------------------------------------\ndef funGetRootPath():\n\n\t# Set variables -------------------------------------\n\tvCurrPath = os.path.dirname(os.path.abspath(__file__))\n\tvDirName = \"/\" + vCurrPath.split(\"/\")[-1]\n\tvRootPath = vCurrPath[:-len(vDirName)]\n\n\t# Exit ----------------------------------------------\n\treturn vRootPath\n\n# ---------------------------------------------------\n# - Get Subdirectory Path List -\n# ---------------------------------------------------\ndef funGetSubdirectoryPathList(vRootPath):\n\n\t# Set variables -------------------------------------\n\tvRootSubdirectoryList = []\n\tvRoorSubpathList = os.listdir(vRootPath)\n\n\t# Get subdirectory list -----------------------------\n\tfor vItem in vRoorSubpathList:\n\n\t\t# Set variables -------------------------------------\n\t\tvItem = vRootPath + \"/\" + vItem\n\n\t\t# Check subdirectory --------------------------------\n\t\tif os.path.isdir(vItem) == True and vItem.find(\"__pycache__\") < 0 and vItem.find(\"__init__.pyc\") <0:\n\t\t\tvRootSubdirectoryList.append(vItem)\n\n\t# Sort subdirectory list ----------------------------\n\tvRootSubdirectoryList.sort()\n\t\n\t# Exit ----------------------------------------------\n\treturn vRootSubdirectoryList\n\n\n# ---------------------------------------------------\n# - Update Python Path -\n# ---------------------------------------------------\ndef funUpdatePythonPath(vRootPath, vRootSubdirectoryPathList):\n\n\t# Set variables -------------------------------------\n\tvTab = \" \"\n\tvAddedPathList = []\n\n\t# Append to PYTHON PATH -----------------------------\n\tfor vRootSubdirectoryPath in vRootSubdirectoryPathList:\n\t\tif str(sys.path).find(vRootSubdirectoryPath) < 0:\n\t\t\tsys.path.append(vRootSubdirectoryPath)\n\t\t\tvAddedPathList.append(vRootSubdirectoryPath)\n\n\t# Import modules ------------------------------------\n\timport vega_Log_mod\n\n\t# Log -----------------------------------------------\n\t#vega_Log_mod.funAppendToLog ( \"-\" * 51 )\n\t#vega_Log_mod.funAppendToLog ( \"UPDATING PYTHON PATH\" )\n\t#vega_Log_mod.funAppendToLog ( vTab + \"Root directory found:\")\n\t#vega_Log_mod.funAppendToLog ( vTab * 2 + vRootPath)\n\n\t# Check vRootSubdirectoryPath ----------------------\n\tif vRootSubdirectoryPath != []:\n\n\t\t# Log ----------------------------------------------\n\t\t#vega_Log_mod.funAppendToLog ( vTab + \"Its Subdirectories will be added to PYTHON PATH:\")\n\n\t\t# Add path to PYTHON PATH ----------------------------------\n\t\t#for vPath in vAddedPathList:\n\n\t\t\t# Log -------------------------------------------------------\n\t\t\t#vega_Log_mod.funAppendToLog (vTab * 2 + vPath)\n\n\t\t# Log -------------------------------------------------------\n\t\t#vega_Log_mod.funAppendToLog (vTab * 2 + \"Paths added:\\t\" + str(len(vAddedPathList)) )\n\n\t\tpass\n\n\t# Log -----------------------------------------------\n\t#vega_Log_mod.funAppendToLog ( \"-\" * 51 )\n\n# ---------------------------------------------------\n# - main -\n# ---------------------------------------------------\ndef main():\n\n\t# Get root path -------------------------------------\n\tvRootPath = funGetRootPath()\n\tif vRootPath == \"\":\n\t\treturn\n\n\t# Get subdirectory list -----------------------------\n\tvRootSubdirectoryPathList = funGetSubdirectoryPathList(vRootPath)\n\tif vRootSubdirectoryPathList == []:\n\t\treturn\n\n\t# Update PYTHON PATH --------------------------------\n\tfunUpdatePythonPath(vRootPath, vRootSubdirectoryPathList)\n\n\t\n# ---------------------------------------------------\n# - T O P - \n# ---------------------------------------------------\nimport pdb\nimport sys\nimport os\n\nmain()\n" }, { "alpha_fraction": 0.7958333492279053, "alphanum_fraction": 0.7958333492279053, "avg_line_length": 29, "blob_id": "f96e375974b5affb5444a5112ed3016e67f94195", "content_id": "bf9d58c687b54deab12296669ce28ca0da728be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 243, "license_type": "no_license", "max_line_length": 66, "num_lines": 8, "path": "/README.md", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "Vega\n====\n\nProyecto para Universidad Galileo\n\nEl programa principal es /Whatsapp/vega_Whatsapp.py que llama a la\nfunción Main en /Whatsapp/vega_Whatsapp_mod.py entrando así en un\nbucle infinito a la espera de recibir mensajes vía whatsapp.\n" }, { "alpha_fraction": 0.28985506296157837, "alphanum_fraction": 0.2910628020763397, "avg_line_length": 36.6363639831543, "blob_id": "0a43cc0defbcc951df624efbf64ed4367303d78d", "content_id": "69aaa975e0b3f4a639d3c454e3d71a86dc77ae77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 828, "license_type": "no_license", "max_line_length": 105, "num_lines": 22, "path": "/ScreenHeader/vega_ScreenHeader_mod.py", "repo_name": "maduenyo/Vega", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n# ---------------------------------------------------\n# - Print Header -\n# ---------------------------------------------------\ndef funPrintHeader (vHeader, vLogFile=\"\"):\n\n\t# Set variables -------------------------------------\n\tvLine = vHeader.upper()\n\n\t# Log -----------------------------------------------\n\tvega_Log_mod.funAppendToLog ( \"---------------------------------------------------\", vLogFile=vLogFile )\n\tvega_Log_mod.funAppendToLog ( vLine, vLogFile=vLogFile )\n\tvega_Log_mod.funAppendToLog ( \"---------------------------------------------------\", vLogFile=vLogFile )\n\n\n# ---------------------------------------------------\n# - T O P -\n# ---------------------------------------------------\nimport pdb\n\nimport vega_Log_mod\n" } ]
10
kundanchavan/QRcode_attendance_system
https://github.com/kundanchavan/QRcode_attendance_system
e688ccb94e214aa0dacee36f359dbd825eea48d8
22ba6f11488e86807ca1ce25c3c109a34a111aba
345a2048904d25cfd70413cf42e71bd5bc542739
refs/heads/master
"2022-11-17T06:30:44.318571"
"2020-07-09T16:50:53"
"2020-07-09T16:50:53"
278,420,072
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5777971744537354, "alphanum_fraction": 0.6110140085220337, "avg_line_length": 25.90243911743164, "blob_id": "d597e91d38df8a9a658633ed6c5568da7320d88c", "content_id": "ffc05667892f62f7ac60f33efa141746dfa1d7c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1144, "license_type": "no_license", "max_line_length": 86, "num_lines": 41, "path": "/qrCode attendance/scanner.py", "repo_name": "kundanchavan/QRcode_attendance_system", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nimport qrcode\r\nimport pymysql\r\n\r\n\r\ndef submit():\r\n name = e1.get()\r\n roll = e2.get()\r\n qr = qrcode.QRCode(\r\n version=1,\r\n box_size=10,\r\n border=5\r\n )\r\n all = (f'name:{name} rollno:{roll}')\r\n data = all\r\n qr.add_data(data)\r\n qr.make(fit=True)\r\n img=qr.make_image(fill='black', back_color='white')\r\n img.save(f'{name}.png')\r\n db = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"\", database=\"student\")\r\n mycursor=db.cursor()\r\n sq = \"insert into stud(Name, Rollno) values(%s, %s)\"\r\n b1 = (name, roll)\r\n mycursor.execute(sq,b1)\r\n db.commit()\r\n e1.delete(0, 'end')\r\n e2.delete(0,'end ')\r\n\r\n \r\n\r\nrot=Tk()\r\nrot.geometry('500x500')\r\nrot.title('form')\r\nl1 = Label(rot, text='student Name'); l1.grid(row=0, column=0)\r\nl2 = Label(rot, text='Roll No'); l2.grid(row=1, column=0)\r\nnamevalue=StringVar()\r\nrollvalue=StringVar()\r\ne1 = Entry(rot, textvariable=namevalue); e1.grid(row=0, column=1)\r\ne2 = Entry(rot, textvariable=rollvalue); e2.grid(row=1, column=1)\r\nb1 = Button(rot, text='generate QR code', command=submit); b1.grid(row=3, column=0)\r\nrot.mainloop()\r\n" }, { "alpha_fraction": 0.5466816425323486, "alphanum_fraction": 0.5646794438362122, "avg_line_length": 20.794872283935547, "blob_id": "a9b9ec769b87823e6505a5b50a5e8039e26622e9", "content_id": "eb04cff757b178dc7f77bae4b13256ac9ba23bd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 87, "num_lines": 39, "path": "/qrCode attendance/decoder.py", "repo_name": "kundanchavan/QRcode_attendance_system", "src_encoding": "UTF-8", "text": "import cv2\r\nimport pyzbar.pyzbar as pyzbar\r\nimport pybase64\r\nimport time\r\nimport pymysql\r\n\r\ndb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"\", database=\"student\")\r\nmycursor=db.cursor()\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nli = []\r\ndef add(s):\r\n li.append(s)\r\n sq1 = \"insert into presenty(name, presenty) value(%s, %s)\" #we can add more columns\r\n b2 = (s, 'present')\r\n mycursor.execute(sq1, b2)\r\n db.commit()\r\n \r\ndef check(data):\r\n data = str(data)\r\n if data in li:\r\n print('already present')\r\n else:\r\n print('present')\r\n add(data)\r\n \r\nwhile True:\r\n _,frame = cap.read()\r\n scanned = pyzbar.decode(frame)\r\n for obj in scanned:\r\n k = obj.data\r\n data = k.decode('utf-8')\r\n check(data)\r\n time.sleep(2)\r\n cv2.imshow(\"Frame\", frame)\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n" } ]
2
Ryuto10/100knock-2018
https://github.com/Ryuto10/100knock-2018
8ef76cc2a8175823b5af66c2cd65e61f61ac2b54
5d9646107eb181d6005daede474527eee04598b6
85991c12be5c3dede82ae58f5991d996c151ad3e
refs/heads/master
"2021-05-25T18:15:14.003677"
"2020-04-07T17:26:19"
"2020-04-07T17:26:19"
253,864,508
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3125, "alphanum_fraction": 0.75, "avg_line_length": 15, "blob_id": "3ee53e231b61d289e9adb986d5291693cde64fac", "content_id": "316fb7bbbca30773e69906d6cecaa8ff86e74292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 16, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/README.md", "repo_name": "Ryuto10/100knock-2018", "src_encoding": "UTF-8", "text": "# 100knock-2018\n" }, { "alpha_fraction": 0.5258427262306213, "alphanum_fraction": 0.5539326071739197, "avg_line_length": 32, "blob_id": "77c0ecf3d9e809da0d7518070906a7cef1dc4d26", "content_id": "12d98647fbc7bab829d0546a8f403c8c8b4f7e6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 80, "num_lines": 27, "path": "/chapter02/src/q016.py", "repo_name": "Ryuto10/100knock-2018", "src_encoding": "UTF-8", "text": "import sys,itertools\n\ndef filename(i): #ファイル命名\n return 'work/x'+chr(97+i//26)+chr(97+i%26)\n\ndef div_numlist(length,N): #何行ごとに区切るか\n return [length//N+1 if i < length%N else length//N for i in range(N)] #パターン1\n\nargs = sys.argv\n\nif len(args) != 2: #error1\n print(\"usage : q016.py [number]\")\nelif not args[1].isdigit() or int(args[1]) <= 0: #error2\n print(\"usage : number is natural number\")\nelse:\n with open(\"data/popular-names.txt\",\"r\") as text:\n lines = text.readlines() #メモリの浪費\n if(len(lines) < int(args[1])): #error3\n print(\"Number is too big.\")\n else:\n ls = list(div_numlist(len(lines),int(args[1]))) #メモリの浪費2\n m = 0\n for i in range(int(args[1])):\n newfile = open(filename(i),\"w\") \n newfile.writelines(lines[m:m+ls[i]])\n m += ls[i]\n newfile.close()" }, { "alpha_fraction": 0.5584415793418884, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 30.28125, "blob_id": "94bd7ce09682ce878286b0b1caef79ad57192685", "content_id": "bd68676c366ec9892af2e1922f7da38d60318a5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 80, "num_lines": 32, "path": "/chapter02/src/q016_2.py", "repo_name": "Ryuto10/100knock-2018", "src_encoding": "UTF-8", "text": "\nimport argparse,itertools\n\ndef len_iterable(iterable): #iterableの長さを返す\n return sum(1 for _ in iterable)\n\ndef parts_len(length,pieces):#何行ごとに区切るか,generator関数\n t = length//pieces\n return (t+1 if length%pieces else t for i in range(pieces)) #パターン2\n\ndef filename(i): #ファイル命名\n return 'work/x'+chr(97+i//26)+chr(97+i%26)\n\ndef main():\n length = len_iterable(args.file)\n args.file.seek(0)\n for i,n in enumerate(parts_len(length,args.pieces)):\n with open(filename(i),\"w\") as newfile:\n for line in itertools.islice(args.file,n):\n newfile.writelines(line)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = 'Split a file into N pieces')\n parser.add_argument(\"file\",\n type=argparse.FileType('r'),\n )\n parser.add_argument(\"-n\",\"--pieces\",\n type=int,\n default=3,\n )\n args = parser.parse_args()\n \n main()" }, { "alpha_fraction": 0.5768116116523743, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 27.83333396911621, "blob_id": "3104b621d7d961f1222fee4df6c68e36a606ccc9", "content_id": "972fffc208f594ef670c806e06c2ef7d86949570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 345, "license_type": "no_license", "max_line_length": 52, "num_lines": 12, "path": "/chapter02/src/q015.py", "repo_name": "Ryuto10/100knock-2018", "src_encoding": "UTF-8", "text": "import sys\nargs = sys.argv\n\nif len(args) != 2: #error1\n print(\"usage : q015.py [number]\")\nelif not args[1].isdigit(): #error2\n print(\"usage : number is natural number\")\nelse:\n with open(\"data/popular-names.txt\",\"r\") as text:\n lines = text.readlines()\n for line in lines[-int(args[1]):]:\n print(line.strip('\\n'))" } ]
4
dajomi/algorithm
https://github.com/dajomi/algorithm
a093b33b68b96ab1666e49e5c3c45450f4132bf6
f9249ccf3cd87e9aab626f44b1b77ca2adfc3010
ff84eafeb577f663d3fc94dd764806fc4720a153
refs/heads/master
"2023-06-18T02:21:18.387789"
"2021-07-19T04:13:54"
"2021-07-19T04:13:54"
380,516,875
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.54935622215271, "alphanum_fraction": 0.5665236115455627, "avg_line_length": 28.25, "blob_id": "1d6959fcaf51eb7f3c5f1029b67238f1610e599e", "content_id": "feecf7ca49140c65990cb66aa38e57fa3a053a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/algorithm/9093.py", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "testcase = int(input())\nfor i in range(testcase):\n wordlst = ''\n wordlst += input()\n wordlst = wordlst[::-1]\n wordlst = list(wordlst.split())\n for i in range(len(wordlst), 0, -1):\n print(wordlst[i-1], end = ' ')" }, { "alpha_fraction": 0.5644599199295044, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 19.5, "blob_id": "b5e9de2b0ecfd5bc9d6fe9c36339201ac5b57c27", "content_id": "cc7733bd98e4fd102b002a89a7320293923debac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 287, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/algorithm/1927.py", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "import heapq\nimport sys\n\ntestcase = int(sys.stdin.readline())\nlst = []\nfor i in range(testcase) :\n number = int(sys.stdin.readline())\n if number != 0 :\n heapq.heappush(lst, number)\n else :\n try :\n print(heapq.heappop(lst))\n except :\n print(0)\n" }, { "alpha_fraction": 0.5004897117614746, "alphanum_fraction": 0.5171400308609009, "avg_line_length": 17.25, "blob_id": "8076eefa2ffb0f9808ca0767d6ebf9a34f1c1a66", "content_id": "e9daf1ce880a0921f545bbe630d7b796043e510e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1021, "license_type": "no_license", "max_line_length": 46, "num_lines": 56, "path": "/algorithm/10828_stack.cpp", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "#include <vector>\n#include <iostream>\n#include <string>\nclass stackdata {\n\tstd::vector<int> intdata;\n\tint cnt = 0;\npublic:\n\tvoid push(int x) {\n\t\tintdata.push_back(x);\n\t\tcnt++;\n\t}\n\tvoid size() {\n\t\tstd::cout << cnt << std::endl;\n\t}\n\tvoid pop() {\n\t\tif (cnt == 0) std::cout << -1 << std::endl;\n\t\telse {\n\t\t\tstd::cout << intdata[cnt - 1] << std::endl;\n\t\t\tintdata.pop_back();\n\t\t\tcnt--;\n\t\t}\n\t}\n\tvoid empty() {\n\t\tif (cnt == 0) std::cout << 1 << std::endl;\n\t\telse { std::cout << 0 << std::endl; }\n\t}\n\tvoid top() {\n\t\tif (cnt == 0) std::cout << -1 << std::endl;\n\t\telse {\n\t\t\tstd::cout << intdata[cnt - 1] << std::endl;\n\t\t}\n\t}\n};\n\nint main() {\n\tstackdata d1;\n\tint size;\n\tstd::cin >> size;\n\tstd::string command;\n\tint num;\n\tfor (int i = 0; i < size; i++) {\n\t\tstd::cin >> command;\n\n\t\tif (command == \"push\") {\n\t\t\tstd::cin >> num;\n\t\t\td1.push(num);\n\t\t}\n\t\telse if (command == \"size\") {\n\t\t\td1.size();\n\t\t}\n\t\telse if (command == \"pop\") { d1.pop(); }\n\t\telse if (command == \"empty\") { d1.empty(); }\n\t\telse if (command == \"top\") { d1.top(); }\n\n\t}\n}" }, { "alpha_fraction": 0.5142857432365417, "alphanum_fraction": 0.5285714268684387, "avg_line_length": 24, "blob_id": "aa2d1cb3b2a88835b56c806a66ffdd06dd912c4d", "content_id": "917d4286d7ff3dbb630f5fc3bc8e9c5613ed70f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 49, "num_lines": 14, "path": "/algorithm/3062.py", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "iter = int(input())\nfor i in range(iter):\n num = input()\n slicedString=num[::-1]\n hap = str(int(num) + int(slicedString))\n begin = 0\n end = len(hap) - 1\n result = True\n while begin < end:\n if hap[begin] != hap[end]: result = False\n begin+=1\n end-=1\n if (result == True):print(\"YES\")\n else: print(\"NO\")\n" }, { "alpha_fraction": 0.5494071245193481, "alphanum_fraction": 0.5573122501373291, "avg_line_length": 27.22222137451172, "blob_id": "a39d588a24f617be65fa06fca701a18269489a0f", "content_id": "3a8259367e2fda2c7633dfcea7806075b8b165d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/algorithm/2154.py", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "numstr = input()\ncontinuedNum = ''\nfor i in range(int(numstr)) :\n continuedNum += str(i + 1)\n length = len(numstr)\n for i in range(len(continuedNum)) :\n if continuedNum[i:i + length] == numstr :\n print(i + 1)\n break" }, { "alpha_fraction": 0.6130536198616028, "alphanum_fraction": 0.61771559715271, "avg_line_length": 27.600000381469727, "blob_id": "ed9814d6176d0d49b4dffb4b831f29f12c74ada2", "content_id": "232fb2b248483938e347ec7638f02913c0fb5471", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/algorithm/1920.py", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "import sys\ntrain, test = map(int,sys.stdin.readline().split())\nmonsters = dict()\nfor i in range(train):\n m = sys.stdin.readline().split()\n m = ''.join(m)\n monsters[m] = i+1\nlst_monster = list(monsters.keys())\nfor i in range (test):\n monster = sys.stdin.readline().split()\n monster = ''.join(monster)\n if (monster.isdigit()):\n print(lst_monster[int(monster)-1])\n else:\n print(monsters[monster])\n" }, { "alpha_fraction": 0.6349862217903137, "alphanum_fraction": 0.641873300075531, "avg_line_length": 39.27777862548828, "blob_id": "4bbeb2016594e1f1a27498dc0e8cdd98f6d2e3f5", "content_id": "5bc2fa2fe3e42ffa8947b0905bc5b5a6e6e3678b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/algorithm/1966.py", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "import sys\nnum = int(sys.stdin.readline())\n\ndef findprintorder(numbers, findidx, numofnumber):\n idxlst = [i for i in range(numofnumber)]\n sortednumbers = sorted(numbers, reverse = True)\n for i in range(numofnumber):\n while(numbers[i] != sortednumbers[i]):\n numbers = numbers[:i] + numbers[i+1:] + [numbers[i]]\n idxlst = idxlst[:i] + idxlst[i+1:] + [idxlst[i]]\n idx = [i for i in range(numofnumber)]\n dic = { name:value for name, value in zip(idxlst, idx) }\n print(dic[findidx]+1)\n\nfor i in range(num):\n numofnumber, findidx = map(int,sys.stdin.readline().split())\n numbers = list(map(21int, sys.stdin.readline().split()))\n findprintorder(numbers, findidx, numofnumber)\n\n" }, { "alpha_fraction": 0.5535390377044678, "alphanum_fraction": 0.5662431716918945, "avg_line_length": 19.44444465637207, "blob_id": "8959f07fd6fafa8c0d17ef4eab05eff084b54ac6", "content_id": "b1936a66d10b3aab8944d4d3d8325146dbc80f7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 551, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/algorithm/2502.cpp", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nint ddeok;\nstd::vector<int> pibo(int day, int first, int second) {\n\tif (day == 3 && first <= second) {\n\t\tstd::vector<int> vec = { first, second };\n\t\treturn vec;\n\t}\n\tif (first <= second) {\n\t\treturn pibo(day - 1, second - first, first);\n\t}\n\treturn {};\n}\nint main() {\n\tint day;\n\tstd::cin >> day >> ddeok;\n\tstd::vector<int> arr = {};\n\tint first = ddeok / 2, second;\n\tdo {\n\t\tsecond = ddeok - first;\n\t\tarr = pibo(day, first, second);\n\t\tfirst -= 1;\n\t} while (arr.size() == 0);\n\n\n\tstd::cout << arr[0] << '\\n' << arr[1];\n}" }, { "alpha_fraction": 0.6185897588729858, "alphanum_fraction": 0.6196581125259399, "avg_line_length": 24.29729652404785, "blob_id": "1911203a3fc37d43517a5e27a29d7526c1fa122b", "content_id": "ba0093e0f744a1242a9f3e01076830b922eea9e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1872, "license_type": "no_license", "max_line_length": 75, "num_lines": 74, "path": "/algorithm/1991.cpp", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nstruct Node {\n\tNode* leftp = nullptr;\n\tNode* rightp = nullptr;\n\tchar route = NULL;\n\tNode(char route, char left, char right) :route(route) {\n\t\tif (left != '.') this->leftp = new Node(left, '.', '.');\n\t\tif (right != '.') this->rightp = new Node(right, '.', '.');\n\t}\n};\n\n\nvoid findNodeAndAddNode(Node* root_node, Node* patch_node) {\n\tif (patch_node->leftp == nullptr && patch_node->rightp == nullptr) return;\n\n\tif (root_node->leftp != nullptr) {\n\t\tif (root_node->leftp->route == patch_node->route) {\n\t\t\troot_node->leftp = patch_node;\n\t\t\treturn;\n\t\t}\n\t}\n\t\n\tif (root_node->rightp != nullptr) {\n\t\tif (root_node->rightp->route == patch_node->route) {\n\t\t\troot_node->rightp = patch_node;\n\t\t\treturn;\n\t\t}\n\t}\n\tif (root_node->leftp != nullptr) {\n\t\tfindNodeAndAddNode(root_node->leftp, patch_node);\n\t}\n\tif (root_node->rightp != nullptr) {\n\t\tfindNodeAndAddNode(root_node->rightp, patch_node);\n\t}\n}\nvoid preorder(Node* node) {\n\tstd::cout << node->route << ' ';\n\tif (node->leftp!= nullptr) preorder(node->leftp);\n\tif (node->rightp != nullptr) preorder(node->rightp);\n}\nvoid inorder(Node* node) {\n\tif (node->leftp != nullptr) inorder(node->leftp);\n\tstd::cout << node->route << ' ';\n\tif (node->rightp != nullptr) inorder(node->rightp);\n}\nvoid postorder(Node* node) {\n\tif (node->leftp != nullptr) postorder(node->leftp);\n\tif (node->rightp != nullptr) postorder(node->rightp);\n\tstd::cout << node->route << ' ';\n}\n\nNode* route_node;\n\nint main() {\n\tint numofnode;\n\tchar route, left, right;\n\tstd::cin >> numofnode;\n\tfor (int i = 0; i < numofnode; i++) {\n\t\tstd::cin >> route >> left >> right;\n\t\tif (i == 0) {\n\t\t\troute_node = new Node(route, left, right);\n\t\t}\n\t\telse {\n\t\t\tNode* n = new Node(route, left, right);\n\t\t\tfindNodeAndAddNode(route_node, n);\n\t\t}\n\t}\n\tpreorder(route_node);\n\tstd::cout << std::endl;\n\tinorder(route_node);\n\tstd::cout << std::endl;\n\tpostorder(route_node);\n}\n" }, { "alpha_fraction": 0.43939393758773804, "alphanum_fraction": 0.4484848380088806, "avg_line_length": 16.421052932739258, "blob_id": "7665f54d2e44c9ce253966c13a01a4b5a9b471e8", "content_id": "92ba7cf5eb191ae277b64dad7280a8ad970dc935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 330, "license_type": "no_license", "max_line_length": 43, "num_lines": 19, "path": "/algorithm/2164.cpp", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <queue>\n \n\nint main() {\n int numofnumber;\n std::cin >> numofnumber;\n std::queue<int> q;\n for (int i = 0; i < numofnumber; i++) {\n q.push(i + 1);\n }\n while (q.size() > 1) {\n q.pop();\n q.push(q.front());\n q.pop();\n }\n \n std::cout << q.front();\n}" }, { "alpha_fraction": 0.5433213114738464, "alphanum_fraction": 0.5740072131156921, "avg_line_length": 18.821428298950195, "blob_id": "37e9a857ad3ead354d4e8070025397695270cfb7", "content_id": "618d3fea3b51d49e890e2bbe3393d499805164c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 554, "license_type": "no_license", "max_line_length": 44, "num_lines": 28, "path": "/algorithm/2606.cpp", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nstd::vector<int> a[100];\nint cnt = 0;\nvoid dfs(int start, int* visit) {\n\tif (visit[start]==true) {\n\t\treturn;\n\t}\n\tvisit[start] = true;\n\tcnt += 1;\n\tfor (int i = 0; i < a[start].size(); i++) {\n\t\tint x = a[start][i];\n\t\tdfs(x, visit);\n\t}\n}\nint main() {\n\tint numofcom, numofnet, com1, com2;\n\tstd::cin >> numofcom >> numofnet;\t\n\tint* visit = new int[numofcom];\n\tfor (int i = 0; i < numofnet; i++)\n\t{\n\t\tstd::cin >> com1 >> com2;\n\t\ta[com1].push_back(com2);\n\t\ta[com2].push_back(com1);\n\t}\n\tdfs(1, visit);\n\tstd::cout << cnt-1;\n}" }, { "alpha_fraction": 0.5760869383811951, "alphanum_fraction": 0.60326087474823, "avg_line_length": 20.705883026123047, "blob_id": "8b393d72d126a6b4b8522e3f546559582b1c6c14", "content_id": "b405d4aa1fde35368d015c2d9d69d8f50149db0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 368, "license_type": "no_license", "max_line_length": 52, "num_lines": 17, "path": "/algorithm/5347.cpp", "repo_name": "dajomi/algorithm", "src_encoding": "UTF-8", "text": "#include <iostream>\n\nlong int get_gcd(long int a, long int b) {\n\tif (b == 0) return a;\n\treturn get_gcd(b, a%b);\n}\n\nint main() {\n\tint testcase;\n\tstd::cin >> testcase;\n\tfor (int i = 0; i < testcase; i++) {\n\t\tlong int number1, number2;\n\t\tstd::cin >> number1 >> number2;\n\t\tlong int gcd = get_gcd(number1, number2);\n\t\tstd::cout << number1 * number2 / gcd << std::endl;\n\t}\n}" } ]
12
datakind/dymo
https://github.com/datakind/dymo
3def39043e9b6e952d16dc361378c29115eac8e9
ac416f56e5700cadf44f80a3eff57ce3002fcfd5
95ce002d5882fa01fdbd15e450ac505d726e1516
refs/heads/master
"2020-06-02T13:32:17.952153"
"2014-04-29T03:20:55"
"2014-04-29T03:20:55"
19,262,222
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5196942687034607, "alphanum_fraction": 0.5490887761116028, "avg_line_length": 21.986486434936523, "blob_id": "cc90985995624585c77733437477a85f1d297716", "content_id": "f18fac3816ef65c5684c575072fe86d40e9993e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1701, "license_type": "no_license", "max_line_length": 182, "num_lines": 74, "path": "/readme.md", "repo_name": "datakind/dymo", "src_encoding": "UTF-8", "text": "![img](readme-img/logo.png)\n============================\n\nA `flask` app for quickly labeling featuers in images and recording their pixel positions.\n\nFor now, this app is specific to a datakind/give-directly project, but it might be useful for other applications in the future.\n\n## Clone\n```\ngit clone https://github.com/abelsonlive/dymo.git\n``` \n\n## Requirements\nInstall `flask` and `redis`:\n```\npip install -r requirements.txt\n```\n\n## Setup\n* Startup redis:\n * `$ redis-server`\n* Now, simply place the images you want to label in the `static/images` directory. The filenames should be unique to each image and will be used as the primary key in the datastore.\n\n## Run\n`dymo` is configured to run as a simple heroku app. You can follow instructions for set up [here](https://devcenter.heroku.com/articles/redistogo#install-redis-in-python).\n<br></br>\nTo test the applocally, run\n```\npython dymo.py\n```\nAnd navigate to [`http://localhost:3030/`](http://localhost:3030/) for further instructions.\n\n## Data:\n\nEach item looks like this:\n```\n{\n 'roofs': [\n {\n 'y': 347,\n 'x': 314,\n 'type': 'iron'\n },\n {\n 'y': 362,\n 'x': 10,\n 'type': 'thatched'\n },\n {\n 'y': 270,\n 'x': 119,\n 'type': 'thatched'\n },\n {\n 'y': 178,\n 'x': 186,\n 'type': 'thatched'\n },\n {\n 'y': 157,\n 'x': 175,\n 'type': 'thatched'\n }\n ],\n 'image': 'KE2013071948-grass.png',\n 'total': 5,\n 'number_thatched': 4,\n 'number_iron': 1\n}\n```\n\n## Preview:\n\n![screenshot](readme-img/screenshot.png)\n" }, { "alpha_fraction": 0.6524547934532166, "alphanum_fraction": 0.6608527302742004, "avg_line_length": 25.25423812866211, "blob_id": "401342a8c35b4cb8796b2c50da3d3c88c7bafe60", "content_id": "922d5b22fa1b720cf9f985f52193d6707ac33378", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1548, "license_type": "no_license", "max_line_length": 86, "num_lines": 59, "path": "/dymo.py", "repo_name": "datakind/dymo", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, redirect, url_for\nfrom random import choice\nimport json, redis, os, sys\nfrom os.path import abspath, dirname\n\n# list of images in the static folder\nimages = [i.strip() for i in os.listdir('static/images') if i != '' and i is not None]\n\n# # initialize redis\nredis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')\nrdb = redis.from_url(redis_url)\n\n# intitialize app\napp = Flask(__name__)\napp.root_path = abspath(dirname(__file__))\n\n# index page\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/<username>')\ndef user_index(username):\n\n # serve a random image that we haven't labeled yet\n completed = rdb.keys()\n images_to_label = [i for i in images if i not in completed]\n if len(images_to_label) == 0:\n return \"All images have been labeled. Thanks for your help!\"\n else:\n image = choice(images_to_label)\n return render_template(\n 'home.html',\n user = username, \n image = image, \n images_left = len(images_to_label)\n )\n\n# form post for label data\[email protected]('/label/image/', methods=['POST'])\ndef label(): \n \n # parse form\n value = json.loads(request.form['data'])\n \n # extract key\n key = value['image'].strip()\n\n # push to redis\n rdb.set(key, json.dumps(value))\n\n # redirect to a new image for this user\n user_url = url_for('user_index', username=value['user'])\n\n return redirect(user_url)\n\nif __name__ == '__main__':\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port, debug=True)" }, { "alpha_fraction": 0.4789915978908539, "alphanum_fraction": 0.4902836084365845, "avg_line_length": 29.717741012573242, "blob_id": "40de11e26f867845b62de210ec493b15b2074382", "content_id": "bdd1e86d7bdea69bc6db2161bedde64fb18685dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3808, "license_type": "no_license", "max_line_length": 127, "num_lines": 124, "path": "/static/js/app.js", "repo_name": "datakind/dymo", "src_encoding": "UTF-8", "text": "(function() {\n // main script\n\n var roundFloat = function(amt) {\n // round a float to an int\n return( (Math.round(amt*100)/100).toFixed(0) );\n };\n\n var iron_number = 0;\n var thatch_number = 0;\n var img_data = {};\n img_data['roofs'] = [];\n\n $(document).ready(function() {\n \n // pre-populate data labels\n img_data['user'] = $('#user').text();\n img_data['image'] = $('#img-link').text();\n img_data['number_thatched'] = thatch_number;\n img_data['number_iron'] = iron_number;\n img_data['total'] = thatch_number + iron_number;\n\n $('#data').val( JSON.stringify(img_data) );\n\n\n $('#clear').click(function() {\n\n // reset label data\n var iron_number = 0;\n var thatch_number = 0;\n var img_data = {};\n img_data['roofs'] = [];\n img_data['number_thatched'] = thatch_number;\n img_data['number_iron'] = iron_number;\n img_data['total'] = thatch_number + iron_number;\n\n // remove markers\n $('.marker').remove();\n\n // remove logs\n $('#log-list').empty();\n\n // remove data\n $('#data').val( JSON.stringify(img_data) );\n\n });\n\n $('#img').click(function(e) {\n \n var offset = $(this).offset();\n \n var x_abs = e.clientX;\n var y_abs = e.clientY;\n \n var x_img = x_abs - offset.left;\n var y_img = y_abs - offset.top;\n \n var x_display = x_abs - 7;\n var y_display = y_abs -7;\n \n var this_img_type = '';\n var this_roof = {};\n \n if (e.shiftKey) {\n \n iron_number = iron_number + 1;\n this_img_type = 'iron';\n\n var iron_id = \"iron\" + iron_number;\n var iron_div = '<div class=\"marker\" id=\"' + \n iron_id + \n '\" style=\"background-color:#a7b0ad; height:10px; width:10px; z-index:1; border: 2px solid;\"></div>';\n\n var iron_log = '<li><span style=\"background-color:#a7b0ad;\"><strong>iron</strong></span>\\t<strong>x:</strong> ' + \n roundFloat(x_img) + \n ', <strong>y:</strong> ' + \n roundFloat(y_img) + \n '</li>' \n\n $(document.body).append(iron_div);\n $(\"#\" + iron_id).css('position', 'absolute');\n $(\"#\" + iron_id).css('top', y_display);\n $(\"#\" + iron_id).css('left', x_display);\n $('#log-list').append(iron_log); \n\n } else {\n\n thatch_number = thatch_number + 1;\n this_img_type = 'thatched';\n\n var thatch_id = \"thatch\" + thatch_number;\n var thatch_div = '<div class=\"marker\" id=\"' + \n thatch_id + \n '\" style=\"background-color:#788854; height:10px; width:10px; z-index:1; border: 2px solid;\"></div>';\n var thatch_log = '<li><span style=\"background-color:#788854;\"> <strong>thatch</strong></span>\\t<strong>x:</strong> ' + \n roundFloat(x_img) + \n ', <strong>y:</strong> ' + \n roundFloat(y_img) + '</li>' \n\n $(document.body).append(thatch_div);\n $(\"#\" + thatch_id).css('position', 'absolute');\n $(\"#\" + thatch_id).css('top', y_display);\n $(\"#\" + thatch_id).css('left', x_display);\n $('#log-list').append(thatch_log);\n \n }\n\n // update metadata for image\n img_data['number_thatched'] = thatch_number;\n img_data['number_iron'] = iron_number;\n img_data['total'] = thatch_number + iron_number;\n\n // record data for this roof\n this_roof['x'] = x_img;\n this_roof['y'] = y_img;\n this_roof['type'] = this_img_type;\n img_data['roofs'].push(this_roof);\n\n // update data input field\n $('#data').val( JSON.stringify(img_data) );\n\n });\n });\n}).call(this);" } ]
3
tduproject/kagikko2
https://github.com/tduproject/kagikko2
08a5b88b59ed668cf186c3f78d540902dcc5d4da
83c8f2d720acaafdfaa6affd713709ab134a31a3
3586f6d3a926a84d83bc16cf3d712fec54b5de53
refs/heads/master
"2021-01-22T10:27:10.024714"
"2017-07-09T07:12:27"
"2017-07-09T07:12:27"
92,644,330
0
0
null
"2017-05-28T06:35:11"
"2017-07-09T07:12:33"
"2017-08-07T08:53:39"
JavaScript
[ { "alpha_fraction": 0.556291401386261, "alphanum_fraction": 0.5794702172279358, "avg_line_length": 34.52941131591797, "blob_id": "fb22b4c8dffc6d38e1118c8ad6f016ea4a27e818", "content_id": "2db1643397148eb919eeb1a7046cc3d60bf71045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1240, "license_type": "no_license", "max_line_length": 144, "num_lines": 34, "path": "/tdu/app/migrations/0001_initial.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-05-20 00:08\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255, verbose_name='カテゴリ名')),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=255, verbose_name='曜日')),\n ('text', models.CharField(max_length=255, verbose_name='時間')),\n ('sub', models.CharField(max_length=255, verbose_name='科目名')),\n ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Category', verbose_name='カテゴリ')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5570839047431946, "alphanum_fraction": 0.5653370022773743, "avg_line_length": 21.030303955078125, "blob_id": "f2514bef416c5f87a42b0954774beda00441de16", "content_id": "6e3d887a7acaa837060df880f8acb00d8c85c2c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 38, "num_lines": 33, "path": "/tdu/keijiban/models.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Posting(models.Model):\n name = models.CharField(\n max_length=64,\n verbose_name='名前',\n help_text='あなたの名前を入力してください',\n )\n message = models.TextField(\n verbose_name='メッセージ',\n help_text='メッセージを入力してください',\n null=True,\n )\n subject = models.CharField(\n max_length=64,\n verbose_name='科目名',\n null=True,\n )\n created_at = models.DateTimeField(\n auto_now_add=True,\n verbose_name='登録日時',\n )\n pk_label = models.IntegerField(\n null=True,\n )\n\nclass PostingSubject(models.Model):\n subject = models.CharField(\n max_length=64,\n verbose_name='科目名',\n null=True,\n )\n" }, { "alpha_fraction": 0.5147198438644409, "alphanum_fraction": 0.583095908164978, "avg_line_length": 31.875, "blob_id": "39c2335036e2d89e7c5fc23ae1c903f4be819db1", "content_id": "6b9d08b1dc39ba1f150f0b3a80c783898064b3f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 123, "num_lines": 32, "path": "/tdu/accounts/aes.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "import codecs\nfrom Crypto.Cipher import AES\n\nclass aesEncryption:\n\n # def PaddingMes(self,mes):\n # mes_length = len(mes)\n # len = round(mes_length / 16) #四捨五入\n # new_mes = mes + \" \"*len\n # return new_mes\n\n\n def Encrypt(self,mes1):\n secret = (b'\\xf0\\x0e3nE\\xa1\\x9a\\xff\\x7f\\xf6r\\xd6\\xf4\\x9c\\xa9\\xaa')\n counter = (b'\\xa7\\r\\xa5u\\xd4\\xa0h\\xb2\\x04\\x19<8\\x8e\\xc6$\\x82\\xc8\\x7f\\xe9\\x99\\x0b3\\xe3\\x05\\xe8\\x999j-\\xf1\\xf7\\xd5')\n crypto = AES.new(counter, AES.MODE_CTR, counter=lambda: secret)\n mes_length = len(mes1)\n leng = round(mes_length / 16) # 四捨五入\n mes = mes1 + \" \" * leng\n encrypted = crypto.encrypt(mes)\n return encrypted\n\n def Decrypt(self,mes2):\n secret = (b'\\xf0\\x0e3nE\\xa1\\x9a\\xff\\x7f\\xf6r\\xd6\\xf4\\x9c\\xa9\\xaa')\n counter = ( b'\\xa7\\r\\xa5u\\xd4\\xa0h\\xb2\\x04\\x19<8\\x8e\\xc6$\\x82\\xc8\\x7f\\xe9\\x99\\x0b3\\xe3\\x05\\xe8\\x999j-\\xf1\\xf7\\xd5')\n crypto = AES.new(counter, AES.MODE_CTR, counter=lambda: secret)\n\n mes = crypto.decrypt(mes2)\n mes = codecs.decode(mes, 'utf-8')\n decrypt = mes.strip()\n\n return decrypt\n\n" }, { "alpha_fraction": 0.5549368262290955, "alphanum_fraction": 0.5726751089096069, "avg_line_length": 57.45637512207031, "blob_id": "42309e5aa840eb79b8cedf445ead417616969b39", "content_id": "aea289b9d216810982df85e790d65f8d97f01657", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 18992, "license_type": "no_license", "max_line_length": 171, "num_lines": 298, "path": "/tdu/timetable/templates/timetable/timetable.html", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "{% extends \"timetable/base.html\" %}\n\n{% block title %}\n <title>Kagikko Time Table</title>\n{% endblock %}\n\n\n{% block content %}\n\n<style type=\"text/css\">\n.table th {\n background-color: #1ABC9C;\n color: #ECF0F1;\n}\nth.week {\n text-align: center;\n}\ntd.time{\n text-align: center;\n}\n</style>\n\n\n<html>\n<form action={% url 'timetable:edit'%} method=\"post\" name=myform>\n {% csrf_token %}\n\n\n<h2>前期</h2>\n\n\n<table class=\"table\" border=1>\n <tr><th>  </th><th class=\"week\">月曜日</th><th class=\"week\">火曜日</th><th class=\"week\">水曜日</th><th class=\"week\">木曜日</th><th class=\"week\">金曜日</th><th class=\"week\">土曜日</th></tr>\n <tr><td class=\"time\">1</td>\n <td><select name = \"月1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"月\" and post.text == \"1\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"火1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"火\" and post.text == \"1\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"水1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"水\" and post.text == \"1\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"木1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"木\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"金1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"金\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"土1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"土\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">2</td>\n <td><select name = \"月2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"月\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"火2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"火\" and post.text == \"2\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"水2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"水\" and post.text == \"2\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"木2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"木\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"金2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"金\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"土2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"土\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">3</td>\n <td><select name = \"月3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"月\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"火3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"火\" and post.text == \"3\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"水3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"水\" and post.text == \"3\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"木3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"木\" and post.text == \"3\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"金3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"金\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"土3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"土\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">4</td>\n <td><select name = \"月4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"月\" and \"4\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"火4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"火\" and post.text == \"4\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"水4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"水\" and post.text == \"4\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"木4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"木\" and post.text == \"4\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"金4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"金\" and \"4\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"土4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"土\" and post.text == \"4\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">5</td>\n <td><select name = \"月5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"月\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"火5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"火\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"水5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"水\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"木5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"木\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"金5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"前期\" and post.title == \"金\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n</table></br>\n\n\n<h2>後期</h2>\n<table class=\"table\" border=1>\n<tr><th>  </th><th class=\"week\">月曜日</th><th class=\"week\">火曜日</th><th class=\"week\">水曜日</th><th class=\"week\">木曜日</th><th class=\"week\">金曜日</th><th class=\"week\">土曜日</th></tr>\n<tr><td class=\"time\">1</td>\n <td><select name = \"後期月1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"月\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期火1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"火\" and post.text == \"1\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期水1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"水\" and post.text == \"1\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期木1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"木\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期金1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"金\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期土1\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"土\" and \"1\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">2</td>\n <td><select name = \"後期月2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"月\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期火2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"火\" and post.text == \"2\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期水2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"水\" and post.text == \"2\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期木2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"木\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期金2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"金\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期土2\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"土\" and \"2\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">3</td>\n <td><select name = \"後期月3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"月\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期火3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"火\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期水3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"水\" and post.text == \"3\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期木3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"木\" and post.text == \"3\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期金3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"金\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期土3\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"土\" and \"3\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n <tr><td class=\"time\">4</td>\n <td><select name = \"後期月4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"月\" and \"4\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期火4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"火\" and \"4\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期水4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"水\" and post.text == \"4\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期木4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"木\" and post.text == \"4\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期金4\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"金\" and \"4\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n\n <tr><td class=\"time\">5</td>\n <td><select name = \"後期月5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"月\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期火5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"火\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期水5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"水\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期木5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"木\" and post.text == \"5\" %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n <td><select name = \"後期金5\" style = \"width:165px\">\n <option value=null>選択してください</option>\n {% for post in posts %}{% if post.when == \"後期\" and post.title == \"金\" and \"5\" in post.text %}\n <option value={{post.sub}}>{{post.sub}}</option>{% endif %}{% endfor %}</select></td>\n\n\n</table>\n<div style=\"margin-left:1000px\">\n<br><button type='submit' class=\"btn btn-primary\">保存</button>\n</div>\n<input type=\"hidden\" name=\"timetable\" value=time>\n</form>\n\n</html>\n{% endblock %}\n" }, { "alpha_fraction": 0.5618661046028137, "alphanum_fraction": 0.6267748475074768, "avg_line_length": 22.4761905670166, "blob_id": "6108381530e65b19eaf08ab356f3359c5b9004b6", "content_id": "47d2043e9451d0b7f1c69f7fefac89637c19b581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 493, "license_type": "no_license", "max_line_length": 66, "num_lines": 21, "path": "/tdu/profiles/migrations/0006_auto_20170703_2051.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-03 20:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\nimport encrypted_fields.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0005_auto_20170703_1142'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='grade',\n field=encrypted_fields.fields.EncryptedIntegerField(),\n ),\n ]\n" }, { "alpha_fraction": 0.646213173866272, "alphanum_fraction": 0.6504207849502563, "avg_line_length": 30.688888549804688, "blob_id": "623b878af66b641e50fcc8746d8d8034d14ef441", "content_id": "66819548e5bad189c33856eeeee32e7dac65c829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3758, "license_type": "no_license", "max_line_length": 109, "num_lines": 90, "path": "/tdu/keijiban/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# ページネーター\nfrom django.core.paginator import (\n Paginator, # ページネーター本体のクラス\n EmptyPage, # ページ番号が範囲外だった場合に発生する例外クラス\n PageNotAnInteger # ページ番号が数字でなかった場合に発生する例外クラス\n)\nfrom django.shortcuts import (\n render,\n redirect,\n)\nfrom .models import Posting\nfrom .forms import PostingForm\nfrom .models import PostingSubject\nfrom .forms import PostingSubjectForm\nfrom django.contrib import messages\nfrom django.shortcuts import render, get_object_or_404\n\nfrom polls.models import Poll\nfrom profiles.models import UserProfile\nfrom django.contrib.auth.models import User\n\ndef post_list(request):\n posts = Poll.objects.all()\n return render(request, 'keijiban/post_list.html', {'posts': posts})\n\ndef _get_page(list_, page_no, count=100):\n \"\"\"ページネーターを使い、表示するページ情報を取得する\"\"\"\n paginator = Paginator(list_, count)\n try:\n page = paginator.page(page_no)\n except (EmptyPage, PageNotAnInteger):\n # page_noが指定されていない場合、数値で無い場合、範囲外の場合は\n # 先頭のページを表示する\n page = paginator.page(1)\n return page\n\ndef index(request,pk):\n \"\"\"表示・投稿を処理する\"\"\"\n posts = get_object_or_404(Poll, pk=pk)\n # 教科名と投稿名者をフォームにあらかじめ登録しておく設定\n if not request.user.is_authenticated():\n #ログインされていない場合は投稿者名が@名無しの電大生になる\n form = PostingForm(initial={'subject':posts.subname , 'name':\"@名無しの電大生\", 'pk_label':-1})\n else:\n #ログインされている場合は投稿者名がプロフィールの名前になる\n email = request.user.email\n info_personal = UserProfile.objects.get(email = email)\n #ユーザプロフィールへのリンク情報を付加\n link_profile = UserProfile.objects.all()\n for tmp in link_profile:\n if tmp.email == email:\n pk_link = tmp.pk\n\n form = PostingForm(initial={'subject':posts.subname , 'name':info_personal.name, 'pk_label':pk_link})\n\n if request.method == 'POST':\n # ModelFormもFormもインスタンスを作るタイミングでの使い方は同じ\n form = PostingForm(request.POST or None)\n if form.is_valid():\n # save()メソッドを呼ぶだけでModelを使ってDBに登録される。\n form.save()\n # メッセージフレームワークを使い、処理が成功したことをユーザーに通知する\n messages.success(request, '投稿を受付ました。')\n return redirect('keijiban:index',pk=pk)\n else:\n # メッセージフレームワークを使い、処理が失敗したことをユーザーに通知する\n messages.error(request, '入力内容に誤りがあります。')\n\n #リストを作成し、該当する講義のデータのみ抽出する\n db_posts = Posting.objects.order_by('-subject')\n post_list = [\"temp\"]\n for temp in db_posts:\n if temp.subject == posts.subname:\n post_list.append(temp)\n\n #リストの表示設定\n post_list.pop(0)\n post_list.reverse()\n\n page = _get_page(\n # Posting.objects.order_by('-id'), # 投稿を新しい順に並び替えて取得する\n post_list,\n request.GET.get('page') # GETクエリからページ番号を取得する\n )\n contexts = {\n 'page': page,\n 'posts': posts,\n 'form': form,\n }\n return render(request, 'keijiban/index.html', contexts)\n" }, { "alpha_fraction": 0.5470852255821228, "alphanum_fraction": 0.6038864254951477, "avg_line_length": 24.730770111083984, "blob_id": "8a97674b36b7b0b00a1b660c4a1c117d966428e6", "content_id": "66fb3bbee3a573ebd699902a04e85f50bfd668e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/tdu/profiles/migrations/0007_auto_20170703_2053.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-03 20:53\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport encrypted_fields.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0006_auto_20170703_2051'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='grade',\n field=models.CharField(max_length=254),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='name',\n field=encrypted_fields.fields.EncryptedCharField(max_length=254),\n ),\n ]\n" }, { "alpha_fraction": 0.6073298454284668, "alphanum_fraction": 0.6178010702133179, "avg_line_length": 26.285715103149414, "blob_id": "63f3c45aceb8241183cc08a6bee733bec3055094", "content_id": "219d4de887f8df4529f5641fd70f8f71278b5e22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/tdu/keijiban/urls.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.post_list, name='list'),\n url(r'^index/(?P<pk>[0-9]+)/$', views.index, name='index'),\n]\n" }, { "alpha_fraction": 0.6384615302085876, "alphanum_fraction": 0.6384615302085876, "avg_line_length": 27.66666603088379, "blob_id": "1d83ec42b844a107541b08940456c1d03ae64b2f", "content_id": "05f8e467107d54d85b1a8cc6798035780078ad9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 63, "num_lines": 9, "path": "/tdu/app/urls.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^csv_import/$', views.csv_import, name='csv_import'),\n url(r'^csv_export/$', views.csv_export, name='csv_export'),\n]\n \n" }, { "alpha_fraction": 0.8110235929489136, "alphanum_fraction": 0.8110235929489136, "avg_line_length": 24.399999618530273, "blob_id": "50c035608650a0e0e87251e6e812d2da2943ddd4", "content_id": "e7d4a5bd5601d086db53cc852d66e1ea2ff30446", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 127, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/tdu/polls/admin.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom polls.models import Poll , Choice\n\nadmin.site.register(Poll)\nadmin.site.register(Choice)\n" }, { "alpha_fraction": 0.5767385959625244, "alphanum_fraction": 0.5863309502601624, "avg_line_length": 28.785715103149414, "blob_id": "c050cff0dbd960bf67ff483760e346eca39b73ae", "content_id": "91c8caf3ccec577916520fb499e19767bd58a112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "no_license", "max_line_length": 81, "num_lines": 56, "path": "/tdu/app/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "import csv\nfrom io import TextIOWrapper, StringIO\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.views import generic\nfrom .models import Post\nfrom polls.models import Poll ,Choice\n\n\nclass IndexView(generic.ListView):\n model = Post\n\n\ndef csv_import(request):\n q_array = ['q1','q2','q3']\n form_data = TextIOWrapper(\n request.FILES['csv'].file, encoding='utf-8')\n if form_data:\n csv_file = csv.reader(form_data)\n for line in csv_file:\n post, _ = Post.objects.get_or_create(pk=line[0])\n post.title = line[1]\n post.text = line[2]\n post.sub = line[3]\n mypoll = Poll()\n mypoll.subname = line[3]\n mypoll.question1 = \"課題の難易度 \"\n mypoll.question2 = \"テストの難易度 \"\n mypoll.question3 = \"課題の量 \"\n\n\n for q in q_array:\n mychoice = Choice()\n mychoice.subname = line[3]\n mychoice.value = q\n mychoice.save()\n\n # category, _ = Category.objects.get_or_create(name=line[4])\n post.category = line[4]\n post.when = line[5]\n post.save()\n mypoll.save()\n\n return redirect('app:index')\n\n\ndef csv_export(request):\n memory_file = StringIO()\n writer = csv.writer(memory_file)\n for post in Post.objects.all():\n row = [post.pk, post.title, post.text, post.sub, post.category,post.when]\n writer.writerow(row)\n response = HttpResponse(\n memory_file.getvalue(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=db.csv'\n return response\n" }, { "alpha_fraction": 0.6577470898628235, "alphanum_fraction": 0.6633577942848206, "avg_line_length": 29.090909957885742, "blob_id": "c3121d83ee0abc94efa74121c5239bf1901f1087", "content_id": "2020b8349cde89507ca1310df0300263a4f72fb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4686, "license_type": "no_license", "max_line_length": 92, "num_lines": 154, "path": "/tdu/accounts/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\n#from myUserModel.models import User\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import Http404\nfrom django.template.loader import get_template\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.views import generic\nfrom profiles.models import UserProfile\nfrom pprint import pprint\nfrom django.http import HttpResponse\nfrom .forms import (\n RegisterForm,\n LoginForm,\n ChangePasswordForm,\n ForgetPasswordForm,\n PasswordConfirmForm,\n\n)\n\n#ユーザー登録\n\n\nclass CreateUserView(generic.FormView):\n template_name = 'accounts/create.html'\n form_class = RegisterForm\n success_url = reverse_lazy('accounts:create_done')\n\n def form_valid(self,form):\n user = form.save(commit=False)\n user.is_active = False\n user.email = user.username\n user.save()\n current_site = get_current_site(self.request)\n domain = current_site.domain\n # subject_template = get_template('mailtemplate/subject.txt')\n message_template = get_template('mailtemplate/message.txt')\n\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': default_token_generator.make_token(user),\n 'user': user,\n }\n\n\n #subject = subject_template.render(context)\n message = message_template.render(context)\n from_email = settings.EMAIL_HOST_USER\n to = [user.username]\n\n send_mail('ご登録ありがとうございます',\n message,\n from_email,\n to\n )\n\n return super(CreateUserView, self).form_valid(form)\n\n\nclass CreateDoneView(generic.TemplateView):\n template_name = \"accounts/create_done.html\"\n\n\n\n\n\n\nclass CreateCompleteView(generic.TemplateView):\n template_name = 'accounts/create_complete.html'\n\n def get(self, request, **kwargs):\n token = kwargs.get(\"token\")\n uidb64 = kwargs.get(\"uidb64\")\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user and not user.is_active and default_token_generator.check_token(user, token):\n user.is_active = True\n user.save()\n createprofile = UserProfile()\n createprofile.name = '名無しの電大生'\n createprofile.email = user.email\n createprofile.save()\n\n\n return super(CreateCompleteView, self).get(request, **kwargs)\n else:\n raise Http404\n\n\n\ndef password_reset(request):\n context = {\n 'post_reset_redirect': reverse_lazy('accounts:password_reset_done'),\n 'template_name': 'accounts/password_reset_form.html',\n 'email_template_name': 'mailtemplate/password_reset/message.txt',\n 'subject_template_name': 'mailtemplate/password_reset/subject.txt',\n 'password_reset_form': ForgetPasswordForm,\n }\n return auth_views.password_reset(request, **context)\n\n\ndef password_reset_done(request):\n context = {\n 'template_name': 'accounts/password_reset_done.html',\n }\n return auth_views.password_reset_done(request, **context)\n\n\ndef password_reset_confirm(request, uidb64, token):\n context = {\n 'uidb64': uidb64,\n 'token': token,\n 'post_reset_redirect': reverse_lazy('accounts:password_reset_complete'),\n 'template_name': 'accounts/password_reset_confirm.html',\n 'set_password_form': PasswordConfirmForm,\n }\n return auth_views.password_reset_confirm(request, **context)\n\n\ndef password_reset_complete(request):\n context = {\n 'template_name': 'accounts/password_reset_complete.html',\n }\n return auth_views.password_reset_complete(request, **context)\n\n\n\n\ndef login(request):\n context = {\n 'template_name': 'accounts/login.html',\n 'authentication_form': LoginForm\n }\n return auth_views.login(request, **context)\n\n\ndef logout(request):\n context = {\n 'template_name': 'accounts/login.html'\n }\n\n return auth_views.logout(request, **context)\n" }, { "alpha_fraction": 0.8142856955528259, "alphanum_fraction": 0.8428571224212646, "avg_line_length": 27, "blob_id": "670eaba82741025d2a1cb818b782ed912fb48f97", "content_id": "27da3acfddbc93831f7fde9b63493ee8d0007eb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 41, "num_lines": 5, "path": "/tdu/timetable/admin.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Timetable1,Timetable2\n\nadmin.site.register(Timetable1)\nadmin.site.register(Timetable2)\n" }, { "alpha_fraction": 0.5388513803482056, "alphanum_fraction": 0.5489864945411682, "avg_line_length": 20.527273178100586, "blob_id": "1d5e4d2ba3bb82a686c3b02d154552d5784623e9", "content_id": "fa46357d9edc7be6bd6c3ff0da2760258899bf5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1268, "license_type": "no_license", "max_line_length": 66, "num_lines": 55, "path": "/tdu/profiles/forms.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import UserProfile\n\n\nGRADE_CHOICES = (\n ('1年', '1年生'),\n ('2年', '2年生'),\n ('3年', '3年生'),\n ('4年', '4年生'),\n ('院1年', '院1年生'),\n ('院2年', '院2年生'),\n ('教員', '教員'),\n\n)\n\nMAJOR_CHOICES = (\n ('RB', 'RB'),\n ('RD', 'RD'),\n ('RG', 'RG'),\n ('RT', 'RT'),\n ('RU', 'RT'),\n)\n\nclass UserProfileForm(forms.ModelForm):\n\n name = forms.CharField(label=\"名前\", required=True)\n text = forms.CharField(label=\"コメント\", widget=forms.Textarea)\n\n\n class Meta:\n model = UserProfile\n fields = ('name', 'grade', 'major', 'text')\n\n\n grade = forms.ChoiceField(\n label='学年',\n widget=forms.Select,\n choices=GRADE_CHOICES,\n required=False,\n )\n\n major = forms.ChoiceField(\n label='学系',\n widget=forms.Select,\n choices=MAJOR_CHOICES,\n required=False,\n )\n\n def __init__(self, *args,**kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs['class'] = 'form-control'\n self.fields['name'].widget.attrs['placeholder'] = '名前'\n\n self.fields['text'].widget.attrs['class'] = 'form-control'\n self.fields['text'].widget.attrs['placeholder'] = 'コメント'\n" }, { "alpha_fraction": 0.6336206793785095, "alphanum_fraction": 0.6465517282485962, "avg_line_length": 24.77777862548828, "blob_id": "dad8861338036ba68bba90ed129ec28b0fe9557d", "content_id": "318137a713fcfc1a20f60aeab4f7a0a2587c7c8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 57, "num_lines": 9, "path": "/tdu/timetable/urls.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom .import views\n\nurlpatterns = [\n url(r'^$', views.time_table, name = 'table'), #追加 7/9 山田\n url(r'^timeedit/$', views.time_table2, name='edit'),\n url(r'^result/$', views.show, name='result'),\n]\n" }, { "alpha_fraction": 0.5284210443496704, "alphanum_fraction": 0.6021052598953247, "avg_line_length": 22.75, "blob_id": "cb9686d791dc6232ec1eff70dec44ee87165d903", "content_id": "360fb2ac0b6afcb99be395b3d8fcf75d1db1d42a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/tdu/profiles/migrations/0004_auto_20170703_1043.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-03 10:43\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0003_auto_20170703_1040'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='email',\n field=models.EmailField(default='[email protected]', max_length=254),\n ),\n ]\n" }, { "alpha_fraction": 0.6477272510528564, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 30.428571701049805, "blob_id": "65a87a174dd746e9a311046194e5c2477625d469", "content_id": "b34474ce836b620366b83118bfd8e630fe6688cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 79, "num_lines": 14, "path": "/tdu/app/models.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom django.db import models\n\n\n\nclass Post(models.Model):\n title = models.CharField('曜日', max_length=255)\n text = models.CharField('時間', max_length=255)\n sub = models.CharField('科目名', max_length=255)\n category = models.CharField('カテゴリ名', max_length=255 ,default='SOME STRING')\n when = models.CharField('時期', max_length=255 ,default='SOME STRING')\n\n def __str__(self):\n return self.sub\n" }, { "alpha_fraction": 0.584555983543396, "alphanum_fraction": 0.5907335877418518, "avg_line_length": 32.20512771606445, "blob_id": "287859417c4986275dac11a50b6b8e283091ea54", "content_id": "ff683a5fd899fe90a4a0707fd96c4099fd5da37b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1327, "license_type": "no_license", "max_line_length": 71, "num_lines": 39, "path": "/tdu/keijiban/forms.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Posting\nfrom .models import PostingSubject\n\nclass PostingForm(forms.ModelForm):\n\n name = forms.CharField(label=\"名前\", required=True)\n message = forms.CharField(label=\"メッセージ\", widget=forms.Textarea)\n\n class Meta:\n model = Posting\n fields = ('name','message','subject','pk_label')\n # widgets = {\n # 'name': forms.TextInput(attrs={'size': 40}),\n # 'message': forms.Textarea(attrs={'cols': 80, 'rows': 20})\n # }\n\n def __init__(self, *args,**kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs['class'] = 'form-control'\n self.fields['name'].widget.attrs['placeholder'] = '名前'\n\n self.fields['message'].widget.attrs['class'] = 'form-control'\n self.fields['message'].widget.attrs['placeholder'] = 'メッセージ'\n\n\nclass PostingSubjectForm(forms.ModelForm):\n\n class Meta:\n model = PostingSubject\n fields = ('subject',)\n widgets = {\n 'subject': forms.TextInput(attrs={'size': 40})\n }\n\n def __init__(self, *args,**kwargs):\n super().__init__(*args, **kwargs)\n self.fields['subject'].widget.attrs['class'] = 'form-control'\n self.fields['subject'].widget.attrs['placeholder'] = '教科'\n" }, { "alpha_fraction": 0.5336927175521851, "alphanum_fraction": 0.5687331557273865, "avg_line_length": 25.5, "blob_id": "50dd46040c69c727bd3daba6c0f6e2ef4304da48", "content_id": "38a06e039043780704128ebda61a6e84776c8a13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 96, "num_lines": 28, "path": "/tdu/app/migrations/0002_auto_20170708_0057.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-08 00:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='when',\n field=models.CharField(default='SOME STRING', max_length=255, verbose_name='時期'),\n ),\n migrations.AlterField(\n model_name='post',\n name='category',\n field=models.CharField(default='SOME STRING', max_length=255, verbose_name='カテゴリ名'),\n ),\n migrations.DeleteModel(\n name='Category',\n ),\n ]\n" }, { "alpha_fraction": 0.463428258895874, "alphanum_fraction": 0.5075376629829407, "avg_line_length": 21.670886993408203, "blob_id": "f5bdc7199d1f4492a7a947b18d22345e9b0a3adf", "content_id": "4f9a9ebaff0b00eb796b2be82cf451d106a8df34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1803, "license_type": "no_license", "max_line_length": 88, "num_lines": 79, "path": "/tdu/polls/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# coding: UTF-8\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\n\nfrom polls.models import Poll, Choice\n\n#\n# 一覧表示\n#\ndef poll_list(request):\n posts = Poll.objects.all()\n return render(request, 'poll_list/poll_list.html', {'posts': posts})\n\ndef poll_detail(request, pk):\n post = get_object_or_404(Poll, pk=pk)\n return render(request, 'poll_list/poll_detail.html', {'post': post})\n\n# 投票\n#\ndef vote(request):\n name = request.POST[\"subname\"]\n choice = Choice.objects.filter(subname = name)\n\n q1 = request.POST[\"select1\"]\n q2 = request.POST[\"select2\"]\n q3 = request.POST[\"select3\"]\n\n Q1 = choice[0]\n Q2 = choice[1]\n Q3 = choice[2]\n\n if q1 == \"e1\" :\n num = Q1.easy\n Q1.easy = num+1\n Q1.save()\n\n elif q1 == \"n1\" :\n num = Q1.normal\n Q1.normal = num+1\n Q1.save()\n\n elif q1 == \"h1\" :\n num = Q1.hard\n Q1.hard = num+1\n Q1.save()\n\n if q2 == \"e2\" :\n num = Q2.easy\n Q2.easy = num+1\n Q2.save()\n\n elif q2 == \"n2\" :\n num = Q2.normal\n Q2.normal = num+1\n Q2.save()\n\n elif q2 == \"h2\" :\n num = Q2.hard\n Q2.hard = num+1\n Q2.save()\n\n if q3 == \"e3\" :\n num = Q3.easy\n Q3.easy = num+1\n Q3.save()\n\n elif q3 == \"n3\" :\n num = Q3.normal\n Q3.normal = num+1\n Q3.save()\n\n elif q3 == \"h3\" :\n num = Q3.hard\n Q3.hard = num+1\n Q3.save()\n\n return render(request, 'poll_list/poll_result.html', {'Q1' :Q1,'Q2': Q2,'Q3' : Q3 })\n" }, { "alpha_fraction": 0.5290620923042297, "alphanum_fraction": 0.5607661604881287, "avg_line_length": 38.842105865478516, "blob_id": "3469c04eec593f4fdbdfa738ead510e6c4fd1aa3", "content_id": "5963e976617141b684a73cc42d8b54f8d6630108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1558, "license_type": "no_license", "max_line_length": 114, "num_lines": 38, "path": "/tdu/timetable/migrations/0001_initial.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-08 00:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Timetable1',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=255, verbose_name='名前')),\n ('day', models.CharField(max_length=255, verbose_name='曜日')),\n ('time', models.CharField(max_length=255, verbose_name='時間')),\n ('sub', models.CharField(max_length=255, verbose_name='科目名')),\n ('when', models.CharField(max_length=255, verbose_name='時期')),\n ],\n ),\n migrations.CreateModel(\n name='Timetable2',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=255, verbose_name='名前')),\n ('day', models.CharField(max_length=255, verbose_name='曜日')),\n ('time', models.CharField(max_length=255, verbose_name='時間')),\n ('sub', models.CharField(max_length=255, verbose_name='科目名')),\n ('when', models.CharField(max_length=255, verbose_name='時期')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6850746273994446, "alphanum_fraction": 0.7223880887031555, "avg_line_length": 22.10344886779785, "blob_id": "bbbd33c1c4a46663c0c5284b7c3be6ec80cf7f71", "content_id": "d15c70aeff63a0bf677bfad836c3211d848e421d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 710, "license_type": "no_license", "max_line_length": 67, "num_lines": 29, "path": "/tdu/polls/models.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# coding: UTF-8\nfrom django.db import models\n\n#\n# アンケート質問モデル\n#\n\nclass Poll(models.Model):\n subname = models.CharField(max_length=200)\n question1 = models.CharField(max_length=200)\n question2 = models.CharField(max_length=200)\n question3 = models.CharField(max_length=200)\n\ndef __str__(self):\n return self.subname\n\n\n#\n# アンケート選択モデル\n#\nclass Choice(models.Model):\n subname = models.CharField(max_length=200, default='SOME STRING')\n value = models.CharField(max_length=200 , default='SOME STRING')\n easy = models.IntegerField(default=0)\n normal = models.IntegerField(default=0)\n hard = models.IntegerField(default=0)\n\n def __str__(self):\n return self.subname\n" }, { "alpha_fraction": 0.45109519362449646, "alphanum_fraction": 0.4747343361377716, "avg_line_length": 33.155555725097656, "blob_id": "fcbc61016288ec5ebb01d11a0258432ac824492b", "content_id": "4f217c49c40fccf462a78ad8352c2e50e5b06262", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4827, "license_type": "no_license", "max_line_length": 84, "num_lines": 135, "path": "/tdu/timetable/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom app.models import Post\nfrom .models import Timetable1,Timetable2\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\n\ndef time_table(request):\n posts = Post.objects.all()\n\n #return HttpResponse(request.user.email)\n return render(request, 'timetable/timetable.html', {'posts': posts})\n\n@login_required\ndef time_table2(request):\n\n username = request.user.email\n subject = request.POST[\"timetable\"]\n\n # listnames = [\"月1\",\"月2\",\"月3\",\"月4\",\"月5\",\"火]\n listnames = list()\n when = [\"後期\"]\n days = [\"月\",\"火\",\"水\",\"木\",\"金\"]\n times = [\"1\",\"2\",\"3\",\"4\",\"5\"]\n tuti = [\"土1\",\"土2\",\"土3\",\"土4\"]\n\n #検索するリストを作成\n for day in days:\n for time in times:\n data = day + time\n listnames.append(data)\n for element in tuti:\n listnames.append(element)\n\n #listnamesで作られた、リストを用いて、request.Postで送信されたデータを保存\n #何も選択されていない場合、保存しない\n\n #現在保存されている時間割を、検索\n user_timetable1 = Timetable1.objects.filter(username = username)\n for day2 in listnames:\n week = day2[0]\n num = day2[1]\n mytime = Timetable1()\n if user_timetable1.count() == 0:\n t1 = request.POST[day2]\n if t1 == request.POST[day2]:\n if t1 != 'null':\n mytime.username = username\n mytime.day = day2[0]\n mytime.time = day2[1]\n mytime.sub = t1\n mytime.when = \"前期\"\n mytime.save()\n else:\n t1 = request.POST[day2]\n if t1 == request.POST[day2]:\n if t1 != 'null':\n for timetable in user_timetable1:\n if timetable.day == week and timetable.time == num:\n print(timetable.sub)\n timetable.delete()\n mytime.username = username\n mytime.day = day2[0]\n mytime.time = day2[1]\n mytime.sub = t1\n mytime.when = \"前期\"\n mytime.save()\n else:\n mytime.username = username\n mytime.day = day2[0]\n mytime.time = day2[1]\n mytime.sub = t1\n mytime.when = \"前期\"\n mytime.save()\n\n listnames2 = list()\n tuti2 = [\"後期土1\",\"後期土2\",\"後期土3\"]\n for day in days:\n for time in times:\n data = when[0] + day + time\n listnames2.append(data)\n for elemnt2 in tuti2:\n listnames2.append(elemnt2)\n\n\n user_timetable2 = Timetable2.objects.filter(username=username)\n for day2 in listnames2:\n week = day2[2]\n num = day2[3]\n mytime = Timetable2()\n if user_timetable2.count() == 0:\n t1 = request.POST[day2]\n if t1 == request.POST[day2]:\n if t1 != 'null':\n mytime.username = username\n mytime.day = day2[2]\n mytime.time = day2[3]\n mytime.sub = t1\n mytime.when = \"後期\"\n mytime.save()\n else:\n t1 = request.POST[day2]\n if t1 == request.POST[day2]:\n if t1 != 'null':\n for timetable in user_timetable2:\n if timetable.day == week and timetable.time == num:\n timetable.delete()\n mytime.username = username\n mytime.day = day2[2]\n mytime.time = day2[3]\n mytime.sub = t1\n mytime.when = \"後期\"\n mytime.save()\n\n else:\n mytime.username = username\n mytime.day = day2[2]\n mytime.time = day2[3]\n mytime.sub = t1\n mytime.when = \"後期\"\n mytime.save()\n return HttpResponseRedirect('/timetable/result')\n\ndef show(request):\n\n username = request.user.email\n post1 = Timetable1.objects.filter(username = username)\n post2 = Timetable2.objects.filter(username = username)\n\n\n\n return render(request, 'timetable/result.html', {'post1': post1,'post2': post2})\n" }, { "alpha_fraction": 0.6867796778678894, "alphanum_fraction": 0.6908474564552307, "avg_line_length": 36.82051467895508, "blob_id": "712f147e5a712bd0e8e7fc1e5e0297e131b5b551", "content_id": "a29d573d4f1b1b622243310081c398a4d7e32297", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1475, "license_type": "no_license", "max_line_length": 76, "num_lines": 39, "path": "/tdu/profiles/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import UserProfile\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import UserProfileForm\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse\ndef profile_detail(request, pk):\n post = get_object_or_404(UserProfile, pk=pk)\n #return HttpResponse(request.user.email)\n return render(request, 'profiles/profile_detail.html', {'post': post})\n\n@login_required\ndef profile_mydetail(request):\n email = request.user.email\n post = UserProfile.objects.get(email = email)\n return render(request, 'profiles/profile_mydetail.html', {'post': post})\n\n@login_required\ndef profile_edit(request):\n email = request.user.email\n post = UserProfile.objects.get(email = email)\n if request.method == \"POST\":\n\n #form = UserProfileForm(request.POST, instance=post)\n post.name = request.POST[\"name\"]\n post.text = request.POST[\"text\"]\n post.major = request.POST[\"major\"]\n post.grade = request.POST[\"grade\"]\n post.save()\n # if form.is_valid():\n # post = form.save(commit=False)\n # post.save()\n # return redirect('profile_mydetail')\n return redirect('profile_mydetail')\n else:\n form = UserProfileForm(instance=post)\n return render(request, 'profiles/profile_edit.html', {'form': form})\n" }, { "alpha_fraction": 0.6743002533912659, "alphanum_fraction": 0.6984732747077942, "avg_line_length": 36.42856979370117, "blob_id": "a2aa49956b738304429752dd6a95257f83a51333", "content_id": "2bf6b6953840a2f85fb55106334e9b9c1db2999b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 786, "license_type": "no_license", "max_line_length": 109, "num_lines": 21, "path": "/tdu/profiles/models.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\nfrom encrypted_fields import EncryptedTextField ,EncryptedEmailField,EncryptedCharField,EncryptedIntegerField\nclass UserProfile(models.Model):\n\n name = EncryptedCharField(max_length = 254)\n email = models.EmailField(max_length= 254 , default = '[email protected]')\n grade = models.CharField(max_length = 254)\n major = EncryptedCharField(max_length = 254)\n text = EncryptedTextField()\n # name = models.CharField(max_length = 20)\n # email = models.EmailField(max_length = 254,default='[email protected]')\n # grade = models.CharField(max_length = 5)\n # major = models.CharField(max_length = 5)\n # text = models.TextField()\n\n def publish(self):\n self.save()\n\n def __str__(self):\n return self.name\n" }, { "alpha_fraction": 0.5883905291557312, "alphanum_fraction": 0.5936675667762756, "avg_line_length": 41.11111068725586, "blob_id": "1bcb675b59a0fcd7ee939df9ab55f7f4d2d0396e", "content_id": "ead59f7074795dc7e6a64efe538a007af6f78eed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 95, "num_lines": 9, "path": "/tdu/profiles/urls.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "#profilesの全てのviewをインポートするよ\nfrom django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^detail/(?P<pk>[0-9]+)/$', views.profile_detail, name = 'profile_detail'),\n url(r'^edit/$', views.profile_edit, name='profile_edit'),\n url(r'^mydetail/$', views.profile_mydetail, name = 'profile_mydetail'),\n ]\n" }, { "alpha_fraction": 0.8561643958091736, "alphanum_fraction": 0.8561643958091736, "avg_line_length": 28.200000762939453, "blob_id": "4a6bf6953f42f46ce7d468e5163a6618bdca1fc2", "content_id": "92db4257dcc302f2a48db9ea6cc9e9b8b053c3a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 56, "num_lines": 5, "path": "/tdu/profiles/admin.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import UserProfile #UserProfileモデルをインポート\n\n\nadmin.site.register(UserProfile) #モデルをadminページで見るにはこれで登録\n" }, { "alpha_fraction": 0.8405796885490417, "alphanum_fraction": 0.8405796885490417, "avg_line_length": 28.571428298950195, "blob_id": "5027c13fc9007a672a7de13319d458e72e112fb9", "content_id": "567919ddc98186ccc467bc52f2e339448e9c54d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 42, "num_lines": 7, "path": "/tdu/keijiban/admin.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom keijiban.models import Posting\nfrom keijiban.models import PostingSubject\n\n# Register your models here.\nadmin.site.register(Posting)\nadmin.site.register(PostingSubject)\n" }, { "alpha_fraction": 0.5910290479660034, "alphanum_fraction": 0.6068601608276367, "avg_line_length": 24.266666412353516, "blob_id": "7eee44a9fd7bfd0180b323484d8a4336173f62b9", "content_id": "eca42ccaa5b65c9aadf6238b3dbdbd95fa5896c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 379, "license_type": "no_license", "max_line_length": 73, "num_lines": 15, "path": "/tdu/polls/urls.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# coding: UTF-8\nfrom django.conf.urls import url\n\nfrom polls import views\n\nurlpatterns = [\n url(r'^$', views.poll_list, name = 'poll_list'),\n\n url(r'^poll/(?P<pk>[0-9]+)/$', views.poll_detail, name = 'poll_detail'),\n # ex: /polls/5/\n # ex: /polls/5/results/\n url(r'^vote/$', views.vote, name='vote'),\n # ex: /polls/5/vote/\n #url(r'^result/$',views.result,name='result'),\n]\n" }, { "alpha_fraction": 0.5694668889045715, "alphanum_fraction": 0.591276228427887, "avg_line_length": 29.19512176513672, "blob_id": "259c0b84ddd4cb8185d86bf52f46d3cbe04a311b", "content_id": "7da3611b67914e31cdf1b0d1f8edc54051b3b417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 97, "num_lines": 41, "path": "/tdu/profiles/migrations/0003_auto_20170703_1040.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-03 10:40\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\nimport encrypted_fields.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0002_userprofile_email'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='email',\n field=encrypted_fields.fields.EncryptedEmailField(default='[email protected]', max_length=254),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='grade',\n field=encrypted_fields.fields.EncryptedCharField(max_length=5),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='major',\n field=encrypted_fields.fields.EncryptedCharField(max_length=5),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='name',\n field=encrypted_fields.fields.EncryptedCharField(max_length=20),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='text',\n field=encrypted_fields.fields.EncryptedTextField(),\n ),\n ]\n" }, { "alpha_fraction": 0.6314483880996704, "alphanum_fraction": 0.6349206566810608, "avg_line_length": 36, "blob_id": "c12b3be55c3df0a40d3dcb3853ef9eed3c54f2d2", "content_id": "f3c66bc28c6a5b3beb3ee2fcf7608efab45aeadd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4474, "license_type": "no_license", "max_line_length": 130, "num_lines": 109, "path": "/tdu/accounts/forms.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.http import HttpResponse\nfrom pprint import pprint\n\nclass RegisterForm(UserCreationForm):\n\n\n #Djangoデフォルトログインではusernameとpasswordでログインするため\n #今回はfirst_name をユーザーネームとして扱う\n #username = email アドレスと考える\n #required = Trueで登録時必須にする\n # first_name = forms.CharField(label=\"ユーザーネーム\", required=True)\n\n class Meta:\n model = User\n fields = (\n \"username\",\"password1\",\"password2\",\n \"email\", \"first_name\",\n )\n\n\n\n def __init__(self, *args,**kwargs):\n super().__init__(*args, **kwargs)\n self.fields['username'].widget.attrs['class'] = 'form-control'\n self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス'\n\n # self.fields['first_name'].widget.attrs['class'] = 'form-control'\n # self.fields['first_name'].widget.attrs['placeholder'] = 'ユーザーネーム'\n\n self.fields['password1'].widget.attrs['class'] = 'form-control'\n self.fields['password1'].widget.attrs['placeholder'] = 'パスワード'\n\n self.fields['password2'].widget.attrs['class'] = 'form-control'\n self.fields['password2'].widget.attrs['placeholder'] = 'パスワード(確認)'\n\n\n def clean_username(self):\n username = self.cleaned_data[\"username\"]\n atmark = username.find('@')\n string = username.find(\"dendai.ac.jp\")\n\n if(atmark < 0):\n raise ValidationError(\"正しいメールアドレスを指定してください。\")\n\n if(atmark > string and string < 0):\n raise ValidationError(\"電大メールを入力してください\")\n\n # try:\n # validate_email(username)\n # except ValidationError:\n # raise ValidationError(\"正しいメールアドレスを指定してください。\")\n\n try:\n self.user = User.objects.get(username=username)\n except User.DoesNotExist:\n return username\n else:\n raise ValidationError(\"既に存在するメールアドレスです。\")\n\n\nclass LoginForm(AuthenticationForm):\n #ログインフォーム作成\n #username = email と考える\n\n # def __init__(self, *args, **kwargs):\n # super().__init__(*args, **kwargs)\n # self.fields['username'].widget.attrs['class'] = 'form-control'\n # self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス'\n #\n # self.fields['password'].widget.attrs['class'] = 'form-control'\n # self.fields['password'].widget.attrs['placeholder'] = 'パスワード'\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['username'].widget.attrs['class'] = 'form-control'\n self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス'\n\n self.fields['password'].widget.attrs['class'] = 'form-control'\n self.fields['password'].widget.attrs['placeholder'] = 'パスワード'\n\n\nclass ForgetPasswordForm(PasswordResetForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['email'].widget.attrs['class'] = 'form-control'\n self.fields['email'].widget.attrs['placeholder'] = 'メールアドレス'\n\n\nclass ChangePasswordForm(PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].widget.attrs['class'] = 'form-control'\n self.fields['new_password2'].widget.attrs['class'] = 'form-control'\n self.fields['old_password'].widget.attrs['class'] = 'form-control'\n\n\nclass PasswordConfirmForm(SetPasswordForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].widget.attrs['class'] = 'form-control'\n self.fields['new_password1'].widget.attrs['placeholder'] = '新パスワード'\n self.fields['new_password2'].widget.attrs['class'] = 'form-control'\n self.fields['new_password2'].widget.attrs['placeholder'] = '新パスワード(確認)'" }, { "alpha_fraction": 0.5595930218696594, "alphanum_fraction": 0.6148256063461304, "avg_line_length": 25.461538314819336, "blob_id": "ba1bd45152863321b19d1bc8757afb9344ac9faa", "content_id": "99d12b79a36a66568b0d63fbbd4cdbfe4134d329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 688, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/tdu/profiles/migrations/0005_auto_20170703_1142.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2017-07-03 11:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\nimport encrypted_fields.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profiles', '0004_auto_20170703_1043'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='grade',\n field=encrypted_fields.fields.EncryptedCharField(max_length=254),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='major',\n field=encrypted_fields.fields.EncryptedCharField(max_length=254),\n ),\n ]\n" }, { "alpha_fraction": 0.5409395694732666, "alphanum_fraction": 0.5503355860710144, "avg_line_length": 22.650793075561523, "blob_id": "b3049202c2f1a834e28c9051d3d424120bfa8b5b", "content_id": "24b7ee6445a34d4fb316fb6bfc163657d7adbe4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1608, "license_type": "no_license", "max_line_length": 56, "num_lines": 63, "path": "/tdu/home/views.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.shortcuts import redirect\n\nfrom keijiban.models import Posting\nfrom polls.models import Poll\nfrom .models import Contact\nfrom .forms import ContactForm\n\n# Create your views here.\n\ndef show(request):\n #最新スレッド5件を表示する処理\n #全てのデータを1つのlistに集約\n posts_list = Posting.objects.order_by('-created_at')\n db_poll = Poll.objects.all()\n pk_list = [\"0\"]\n for post in posts_list:\n for db_post in db_poll:\n if post.subject == db_post.subname:\n pk_list.append(db_post.pk)\n\n #科目名が同じものを取り除く\n pk_list.pop(0)\n i = 0\n n = len(pk_list)\n while i < n:\n j = 0\n while j < i:\n if pk_list[j] == pk_list[i]:\n pk_list.pop(i)\n n = n - 1\n i = 0\n break\n j = j + 1\n i = i + 1\n\n #5件までにまとめる\n n = len(pk_list)\n count = 5\n if n > 5:\n while count < n:\n pk_list.pop(count)\n n = n - 1\n\n #お問い合わせフォーム処理\n form = ContactForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('home:contact')\n\n contexts = {\n 'posts_list': posts_list,\n 'db_poll': db_poll,\n 'pk_list': pk_list,\n 'form':form,\n }\n\n return render(request,'home/home.html', contexts)\n\ndef contact(request):\n return render(request, 'home/contact.html')\n" }, { "alpha_fraction": 0.4653465449810028, "alphanum_fraction": 0.4752475321292877, "avg_line_length": 27.40625, "blob_id": "0154d7206ca9678e79324a79287a803dab001d4b", "content_id": "44e8d64ef6f84d977d45b0f588cad0c9664e9723", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 975, "license_type": "no_license", "max_line_length": 87, "num_lines": 32, "path": "/tdu/accounts/templates/accounts/password_reset_confirm.html", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n{% block content %}\n<div class=\"container\">\n <div align=\"center\">\n <h1>パスワードリセット</h1>\n <p class=\"lead\">新パスワードを入力してください</p>\n </div>\n\n <div class=\"row\">\n <div class=\"col-sm-10 col-sm-offset-1\">\n <div class=\"card-group\">\n <div class=\"card card-outline-primary\">\n <div class=\"card-block\">\n  <form action=\"\" method=\"POST\">\n <div class=\"form-group\">\n {{ form.new_password1 }}\n {{ form.new_password1.errors }}\n </div>\n <div class=\"form-group\">\n {{ form.new_password2 }}\n {{ form.new_password2.errors }}\n </div>\n    {% csrf_token %}\n    <button type=\"submit\" class=\"btn btn-primary btn-lg btn-block\">送信</button>\n </form>\n </div>\n </div>\n </div>\n  </div>\n </div>\n</div>\n{% endblock %}\n" }, { "alpha_fraction": 0.6268656849861145, "alphanum_fraction": 0.6702849268913269, "avg_line_length": 32.5, "blob_id": "12a200baa3535bb69d6875709521871c6e57c215", "content_id": "e965226168f14a9bf8c873b58252865a88e4ea08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/tdu/timetable/models.py", "repo_name": "tduproject/kagikko2", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom django.db import models\n\nclass Timetable1(models.Model):\n username = models.CharField('名前', max_length=255)\n day = models.CharField('曜日', max_length=255)\n time = models.CharField('時間', max_length=255)\n sub = models.CharField('科目名', max_length=255)\n when = models.CharField('時期', max_length=255)\n\n def __str__(self):\n return self.username\n\nclass Timetable2(models.Model):\n username = models.CharField('名前', max_length=255)\n day = models.CharField('曜日', max_length=255)\n time = models.CharField('時間', max_length=255)\n sub = models.CharField('科目名', max_length=255)\n when = models.CharField('時期', max_length=255)\n\n def __str__(self):\n return self.username\n" } ]
35
Champ2k/Dota2-Guide
https://github.com/Champ2k/Dota2-Guide
27d550f1e88cf67ef02454287ed493a5cdd0e124
4bfce25144e75196f99fd1118f0a68b2e52d9095
fb3fa422e42f1a1106905282f1012dc05119eb2c
refs/heads/main
"2023-02-09T15:53:14.068880"
"2020-12-26T10:59:57"
"2020-12-26T10:59:57"
318,132,804
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.7171428799629211, "avg_line_length": 25.076923370361328, "blob_id": "2b631d4315c3d99973edb50fb6c818716def803d", "content_id": "07cbad588ed91f532e023c6a4d590d6699ae4d98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 93, "num_lines": 13, "path": "/manipulate_data/import_db.py", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nfrom pymongo import MongoClient\r\nimport json\r\n\r\n\r\nclient = MongoClient(\"mongodb+srv://poom123456789:[email protected]/Dota2?retryWrites=true&w=majority\")\r\n\r\ndb = client['Dota2']\r\ncoll = db['heroimgs']\r\ndata = pd.read_csv('../data/hero_img.csv')\r\npayload = json.loads(data.to_json(orient='records'))\r\ncoll.insert_many(payload)\r\ncoll.count()" }, { "alpha_fraction": 0.7002801299095154, "alphanum_fraction": 0.7198879718780518, "avg_line_length": 30.5, "blob_id": "93626a1217b755ec39086788eb47a0d7f828f093", "content_id": "5c505f2ba7ef7fd5f7c921fe38903fe16713f2b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 394, "num_lines": 34, "path": "/README.md", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "# Dota2-Guide\n\n## **Description**\n<p> This application provides a data of all heroes in Dota2. Have a feature to compare hero Winrate, Pickrate, Basic Status and also provide line graph for easy to understand. The Main feature of this application is match analysis that using matches data to analyse the hero timing (early-game,mid-game,late-game) we also provide pie graph for match analysis to make it easy understanding. </p>\n\n---\n## **Prerequisites**\n- [Node.js](https://nodejs.org/en/) (ver. 12 or newer) \n- [MongoDB](https://www.mongodb.com/1)\n\n## **Running On Local**\n### **Frontend**\n1. Go to directory *frontend/dota2_guide_react*\n2. Run the following command for install all requirements\n```\nnpm install\n```\n3. Run the following command for start server\n```\nnpm start\n```\n### The website will run on http://localhost:3000/\n\n### **Backend**\n1. Go to directory *api/dota2*\n2. Run the following command for install all requirements\n```\nnpm install\n```\n3. Run the following command for start server\n```\nnpm start\n```\n### The api will run on http://localhost:1337/\n" }, { "alpha_fraction": 0.30690231919288635, "alphanum_fraction": 0.3135896921157837, "avg_line_length": 54.83333206176758, "blob_id": "a894869848195fd4356b0a2daa2b9655329b2752", "content_id": "9e42b509164e8d785e78484856cd1fbb27f450e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8374, "license_type": "no_license", "max_line_length": 124, "num_lines": 150, "path": "/manipulate_data/json_to_csv.py", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import pandas\nimport json\nimport csv\n\ncompression_opt = dict(method='zip', archive_name='hero_ability_with_img_v3.csv')\n\nwith open('../data/hero_ability_with_img_v3.json', encoding=\"utf8\") as data:\n raw = json.loads(data.read())\nhero_ability = pandas.json_normalize(raw)\ndf = pandas.DataFrame(raw)\n\n\"\"\"\nJson with all stat and abilities\n\"\"\"\n\ndrop_unused_columns = hero_ability.drop(columns=['roles',\n 'language.hype',\n 'stat.gameVersionId',\n 'stat.enabled',\n 'stat.heroUnlockOrder',\n 'stat.team',\n 'stat.cmEnabled',\n 'stat.newPlayerEnabled',\n 'stat.attackType',\n 'stat.hpBarOffset',\n 'stat.visionDaytimeRange',\n 'stat.visionNighttimeRange',\n 'stat.complexity',\n 'language.heroId',\n 'language.gameVersionId',\n 'language.languageId',\n 'language.displayName',\n 'language.bio',\n 'language.hype',\n 'ability7',\n 'ability8',\n 'ability9',\n 'ability10',\n 'ability11',\n 'ability12',\n 'ability13',\n 'ability14',\n 'ability15',\n 'ability16',\n ], axis=1)\n\nrename_columns = drop_unused_columns.rename(columns={'stat.startingArmor' : 'startingArmor',\n 'stat.startingMagicArmor' : 'startingMagicArmor',\n 'stat.startingDamageMin' : 'startingDamageMin',\n 'stat.startingDamageMax' : 'startingDamageMax',\n 'stat.attackRate' : 'attackRate',\n 'stat.attackAnimationPoint' : 'attackAnimationPoint',\n 'stat.attackAcquisitionRange' : 'attackAcquisitionRange',\n 'stat.attackRange' : 'attackRange',\n 'stat.primaryAttribute' : 'primaryAttribute',\n 'stat.heroPrimaryAttribute' : 'heroPrimaryAttribute',\n 'stat.strengthBase' : 'strengthBase',\n 'stat.strengthGain' : 'strengthGain',\n 'stat.intelligenceBase' : 'intelligenceBase',\n 'stat.intelligenceGain' : 'intelligenceGain',\n 'stat.agilityBase' : 'agilityBase',\n 'stat.agilityGain' : 'agilityGain',\n 'stat.hpRegen' : 'hpRegen',\n 'stat.mpRegen' : 'mpRegen',\n 'stat.moveSpeed' : 'moveSpeed',\n 'stat.moveTurnRate' : 'moveTurnRate',\n })\n\nto_csv_file = rename_columns.to_csv('../data/hero_ability_with_img_v3.csv', index=False)\nto_csv_file = rename_columns.to_csv('../data/hero_ability_with_img_v3.zip', index=False, compression=compression_opt)\n\n\"\"\"\nJson with name and img\n\"\"\"\n\n# drop_unused_columns = hero_ability.drop(columns=['roles',\n# 'language.hype',\n# 'stat.gameVersionId',\n# 'stat.enabled',\n# 'stat.heroUnlockOrder',\n# 'stat.team',\n# 'stat.cmEnabled',\n# 'stat.newPlayerEnabled',\n# 'stat.attackType',\n# 'stat.hpBarOffset',\n# 'stat.visionDaytimeRange',\n# 'stat.visionNighttimeRange',\n# 'stat.complexity',\n# 'language.heroId',\n# 'language.gameVersionId',\n# 'language.languageId',\n# 'language.displayName',\n# 'language.bio',\n# 'language.hype',\n# 'ability7',\n# 'ability8',\n# 'ability9',\n# 'ability10',\n# 'ability11',\n# 'ability12',\n# 'ability13',\n# 'ability14',\n# 'ability15',\n# 'ability16',\n# \"heroId\",\n# \"gameVersionId\",\n# \"ability1\",\n# \"ability2\",\n# \"ability3\",\n# \"ability4\",\n# \"ability5\",\n# \"ability6\",\n# \"talents1\",\n# \"talents2\",\n# \"talents3\",\n# \"talents4\",\n# \"talents5\",\n# \"talents6\",\n# \"talents7\",\n# \"talents8\",\n# 'stat.startingArmor',\n# 'stat.startingMagicArmor',\n# 'stat.startingDamageMin',\n# 'stat.startingDamageMax',\n# 'stat.attackRate',\n# 'stat.attackAnimationPoint',\n# 'stat.attackAcquisitionRange',\n# 'stat.attackRange',\n# 'stat.primaryAttribute',\n# 'stat.heroPrimaryAttribute',\n# 'stat.strengthBase',\n# 'stat.strengthGain',\n# 'stat.intelligenceBase',\n# 'stat.intelligenceGain',\n# 'stat.agilityBase',\n# 'stat.agilityGain',\n# 'stat.hpRegen',\n# 'stat.mpRegen',\n# 'stat.moveSpeed',\n# 'stat.moveTurnRate',\n# 'name',\n# 'shortName',\n# 'aliases',\n# ], axis=1)\n\n# rename = drop_unused_columns.rename(columns={'displayName':'name'})\n\n\n# to_csv_file = rename.to_csv('hero_img.csv', index=False)\n# to_csv_file = drop_unused_columns.to_csv('../data/hero_ability_with_img_v3.zip', index=False, compression=compression_opt)" }, { "alpha_fraction": 0.5632582306861877, "alphanum_fraction": 0.5655690431594849, "avg_line_length": 28.84482765197754, "blob_id": "e075f99864b08ec368140cbea73d533cc730c096", "content_id": "ea9dcfec29c4d7d5804200188f092ccbd020f1f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3462, "license_type": "no_license", "max_line_length": 88, "num_lines": 116, "path": "/frontend/dota2_guide_react/src/App.js", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "// import 'bootstrap/dist/css/bootstrap.min.css';\nimport './App.css';\nimport Image from './components/image'\nimport Rate from './components/rate'\nimport Attribute from './components/attribute'\nimport Infomation from './components/infomation'\nimport Navigation from './components/navigation'\nimport React, { useState, useEffect } from 'react';\n\n\nimport { BrowserRouter, Route} from 'react-router-dom';\n\nfunction App() {\n const [firstHeroName, setFirstHeroName] = useState(\"Anti-Mage\");\n const [secondHeroName, setSecondHeroName] = useState(\"Anti-Mage\");\n const [allLevel, setAllLevel] = useState([]);\n const [level, setLevel] = useState(1);\n const [HeroesName, setHeroesName] = useState([]);\n useEffect (() =>{\n async function fetchData() {\n const heroResponse = await fetch(`http://localhost:1337/characters`,{\n method:'GET',\n });\n const heroDataJson = await heroResponse.json();\n let levelList = [];\n for(let i =1;i<31; i++){\n levelList.push(i)\n }\n setAllLevel(levelList)\n setHeroesName(heroDataJson)\n }\n fetchData();\n }, [])\n\n function handleChangeFirstHero(event) {\n setFirstHeroName(event.target.value)\n }\n function handleChangeSecondHero(event) {\n setSecondHeroName(event.target.value)\n }\n function handleChangeLevel(event) {\n setLevel(event.target.value)\n }\n return (\n <BrowserRouter>\n <div id=\"app\" className=\"App\">\n <Navigation />\n <Route path=\"/\" exact component={Image}/>\n <Route path=\"/rate\" >\n <select name=\"firstHero\" id=\"selecFirstHero\" onChange={handleChangeFirstHero}>\n {HeroesName.map((data) => (\n <option \n value={data.displayName} \n key={data.displayName} \n >\n {data.displayName}\n </option>\n ))\n }\n </select>\n <select name=\"secondHero\" id=\"selecSecondHero\" onChange={handleChangeSecondHero}>\n {HeroesName.map((data) => (\n <option \n value={data.displayName} \n key={data.displayName} \n >\n {data.displayName}\n </option>\n ))\n }\n </select>\n <Rate firstHero={firstHeroName} secondHero={secondHeroName}/>\n </Route>\n <Route path=\"/attribute\">\n <select name=\"firstHero\" id=\"selecFirstHero\" onChange={handleChangeFirstHero}>\n {HeroesName.map((data) => (\n <option \n value={data.displayName} \n key={data.displayName} \n >\n {data.displayName}\n </option>\n ))}\n </select>\n <select name=\"secondHero\" id=\"selecSecondHero\" onChange={handleChangeSecondHero}>\n {HeroesName.map((data) => (\n <option \n value={data.displayName} \n key={data.displayName} \n >\n {data.displayName}\n </option>\n ))\n }\n </select>\n <select name=\"level\" id=\"lvl\" onChange={handleChangeLevel}>\n {allLevel.map((data) => (\n <option \n value={data} \n key={data} \n >\n {data}\n </option>\n ))\n }\n </select>\n <Attribute level={level} firstHero={firstHeroName} secondHero={secondHeroName}/>\n </Route>\n <Route path=\"/infomation/:name\" level={level} component={Infomation}>\n </Route>\n </div>\n </BrowserRouter>\n );\n}\n\nexport default App;\n" }, { "alpha_fraction": 0.5928853750228882, "alphanum_fraction": 0.5944663882255554, "avg_line_length": 30.625, "blob_id": "b585d2ab239bdad1946c91e3ac8ef5a869e8c914", "content_id": "51b67d17dd47b41088460c2a79c09868f4653162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1265, "license_type": "no_license", "max_line_length": 108, "num_lines": 40, "path": "/api/dota2/api/character/controllers/character.js", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "'use strict';\n\n/**\n * Read the documentation (https://strapi.io/documentation/v3.x/concepts/controllers.html#core-controllers)\n * to customize this controller\n */\nconst { sanitizeEntity } = require('strapi-utils');\n\nmodule.exports = {\n\n async find(ctx){\n let entities;\n \n ctx.query = {\n ...ctx.query,\n _limit: -1\n };\n \n if (ctx.query._q) {\n entities = await strapi.services.character.search(ctx.query);\n } else {\n entities = await strapi.services.character.find(ctx.query);\n }\n \n return entities.map(entity => sanitizeEntity(entity, { model: strapi.models.character }));\n },\n \n async findOneByName(ctx) {\n const { name } = ctx.params;\n const entities = await strapi.query('character').model.find({'displayName' : name});\n return sanitizeEntity(entities, { model: strapi.models.character });\n },\n \n async searchHeroByPrimaryAttribute(ctx){\n const { primaryAttribute } = ctx.params;\n const entities = await strapi.query('character').model.find({'primaryAttribute': primaryAttribute});\n return sanitizeEntity(entities, { model: strapi.models.character });\n }\n\n};\n" }, { "alpha_fraction": 0.502281665802002, "alphanum_fraction": 0.5092053413391113, "avg_line_length": 42.8344841003418, "blob_id": "bd2f14fc2cc33882ed2223c3aa14f0e49f0c0f9c", "content_id": "ffcf3aade1d193e506cabc5b939fc73a89d8cb71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6355, "license_type": "no_license", "max_line_length": 213, "num_lines": 145, "path": "/frontend/dota2_guide_react/src/components/rate.js", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from 'react';\nimport Plot from 'react-plotly.js'\nimport { Row, Col } from 'react-bootstrap'\nimport '../style/rate.css'\n\n\nconst Rate = (props) => {\n const [firstHeroWinPickRateData, setFirstHeroWinPickRateData] = useState([])\n const [secondHeroWinPickRateData, setSecondHeroWinPickRateData] = useState([])\n const [keyOfJsonHeroWinPickRate, setKeyOfJsonHeroWinPickRate] = useState([])\n const [firstHeroWinRateFolatValue, setFirstHeroWinRateFolatValue] = useState([])\n const [secondHeroWinRateFolatValue, setSecondHeroWinRateFolatValue] = useState([])\n const [firstHeroPickRateFolatValue, setFirstHeroPickRateFolatValue] = useState([])\n const [secondHeroPickRateFolatValue, setSecondHeroPickRateFolatValue] = useState([])\n const [keyOfJsonHeroWinRate, setKeyOfJsonHeroWinRate] = useState([])\n const [keyOfJsonHeroPickRate, setKeyOfJsonHeroPickRate] = useState([])\n\n\n useEffect(() => {\n async function fetchData(){\n const firstHeroResponse = await fetch(`http://localhost:1337/characteristics/hero/${props.firstHero}`,{\n method:'GET',\n })\n const secondHeroResponse = await fetch(`http://localhost:1337/characteristics/hero/${props.secondHero}`,{\n method:'GET',\n })\n const firstHeroWinPickRateJson = await firstHeroResponse.json();\n const secondHeroWinPickRateJson = await secondHeroResponse.json();\n const firstHeroValue = Object.values(firstHeroWinPickRateJson[0]).slice(2,12);\n const secondHeroValue = Object.values(secondHeroWinPickRateJson[0]).slice(2,12);\n const keyOfHeroJson = Object.keys(firstHeroWinPickRateJson[0]).slice(2,12);\n let firstHeroWinFloatValue = [];\n let secondHeroWinFloatValue = [];\n let firstHeroPickFloatValue = [];\n let secondHeroPickFloatValue = [];\n let keyWordPickRate = [];\n let keyWordWinRate = [];\n for(let index =0; index < keyOfHeroJson.length;index++){\n if(index%2 === 0){\n firstHeroPickFloatValue.push(parseFloat(firstHeroValue[index]))\n secondHeroPickFloatValue.push(parseFloat(secondHeroValue[index]))\n keyWordPickRate.push(keyOfHeroJson[index])\n }else{\n firstHeroWinFloatValue.push(parseFloat(firstHeroValue[index]))\n secondHeroWinFloatValue.push(parseFloat(secondHeroValue[index]))\n keyWordWinRate.push(keyOfHeroJson[index])\n }\n }\n setKeyOfJsonHeroWinRate(keyWordWinRate)\n setFirstHeroWinRateFolatValue(firstHeroWinFloatValue)\n setSecondHeroWinRateFolatValue(secondHeroWinFloatValue)\n\n setKeyOfJsonHeroPickRate(keyWordPickRate)\n setFirstHeroPickRateFolatValue(firstHeroPickFloatValue)\n setSecondHeroPickRateFolatValue(secondHeroPickFloatValue)\n\n setFirstHeroWinPickRateData(firstHeroValue);\n setSecondHeroWinPickRateData(secondHeroValue);\n setKeyOfJsonHeroWinPickRate(keyOfHeroJson);\n }\n fetchData()\n },[props.firstHero, props.secondHero] )\n\n return (\n <div id=\"rate\">\n <Row>\n <Col>\n <table id=\"table\">\n <thead>\n <tr id=\"tr_1\">\n <th></th>\n <th>{props.firstHero}</th>\n <th>diff%</th>\n <th>{props.secondHero}</th>\n </tr>\n </thead>\n <tbody>\n {firstHeroWinPickRateData.map((data, index) => (\n <tr key={index}>\n <td>\n {keyOfJsonHeroWinPickRate[index]}\n </td>\n <td>\n {data}\n </td>\n <td style={{color: ((parseFloat(data) - parseFloat(secondHeroWinPickRateData[index])) < 0)?\"red\" :((parseFloat(data) - parseFloat(secondHeroWinPickRateData[index])) === 0)?\"initial\": \"green\"}}>\n {`${(Math.round((parseFloat(data) - parseFloat(secondHeroWinPickRateData[index]) + Number.EPSILON) * 100) / 100)}%`}\n </td>\n <td>\n {secondHeroWinPickRateData[index]}\n </td>\n </tr>\n ))}\n </tbody>\n </table>\n </Col>\n </Row>\n <Row>\n <Plot id=\"winRateGraph\"\n data={[\n {\n x: keyOfJsonHeroWinRate,\n y: firstHeroWinRateFolatValue,\n type: 'scatter',\n mode:\"lines + markers\",\n marker: {color: 'orange'},\n name:props.firstHero\n },\n {\n x: keyOfJsonHeroWinRate,\n y: secondHeroWinRateFolatValue,\n type: 'scatter',\n mode:\"lines + markers\",\n marker: {color: 'blue'},\n name:props.secondHero\n },\n ]}\n layout={ {width: 620, height: 540, title: 'WinRate Chart'} }\n />\n <Plot id=\"pickRateGraph\"\n data={[\n {\n x: keyOfJsonHeroPickRate,\n y: firstHeroPickRateFolatValue,\n type: 'scatter',\n mode:\"lines + markers\",\n marker: {color: 'orange'},\n name:props.firstHero\n },\n {\n x: keyOfJsonHeroPickRate,\n y: secondHeroPickRateFolatValue,\n type: 'scatter',\n mode:\"lines + markers\",\n marker: {color: 'blue'},\n name:props.secondHero\n },\n ]}\n layout={ {width: 620, height: 540, title: 'PickRate Chart'} }\n />\n </Row>\n </div>\n )\n}\nexport default Rate" }, { "alpha_fraction": 0.5387117862701416, "alphanum_fraction": 0.5491216778755188, "avg_line_length": 24.534482955932617, "blob_id": "aa9d23af5b0241d95b5fa116dd793f023d379b58", "content_id": "1966a3cfaac327cb6472ca2b563830f494c5ead4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1537, "license_type": "no_license", "max_line_length": 74, "num_lines": 58, "path": "/manipulate_data/add_to_json.py", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import pandas\r\nimport json\r\nimport csv\r\nfrom scrapimage import get_all_images\r\n\r\nwith open('../data/hero_ability.json', encoding=\"utf8\") as data:\r\n raw = json.loads(data.read())\r\ndf = pandas.DataFrame(raw)\r\n\r\nurl = 'http://www.dota2.com/heroes/'\r\nimgs = get_all_images(url)\r\n\r\n\r\ndef add_ability():\r\n for i in range(0, len(df)):\r\n for slot in range(0, len(raw[i]['abilities'])):\r\n raw[i][f'ability{slot+1}'] = raw[i]['abilities'][slot]['name']\r\n del raw[i]['abilities']\r\n return\r\n\r\ndef add_talents():\r\n for i in range(0, len(df)):\r\n for slot in range(0, len(raw[i]['talents'])):\r\n raw[i][f'talents{slot+1}'] = raw[i]['talents'][slot]['name']\r\n del raw[i]['talents']\r\n return\r\n\r\ndef add_game_version():\r\n for i in range(0, len(df)):\r\n raw[i]['gameVersionId'] = '7.27d'\r\n return\r\n\r\ndef add_img():\r\n for i in range(0, len(df)):\r\n shot_name = raw[i]['shortName']\r\n for img in imgs:\r\n if shot_name in img:\r\n raw[i]['img'] = img\r\n return\r\n\r\ndef rename_id():\r\n for i in range(0, len(df)):\r\n try:\r\n raw[i]['heroId'] = raw[i]['id']\r\n del raw[i]['id']\r\n except:\r\n pass\r\n return \r\n\r\nrename_id()\r\ngame_version = add_game_version()\r\nability_name = add_ability()\r\ntalent_name = add_talents()\r\nhero_img = add_img()\r\n\r\njson_object = json.dumps(raw, indent = 4) \r\nwith open(\"../data/hero_ability_with_img_v3.json\", \"w\") as outfile: \r\n json_output = outfile.write(json_object)" }, { "alpha_fraction": 0.6589595079421997, "alphanum_fraction": 0.6647399067878723, "avg_line_length": 29.636363983154297, "blob_id": "d603f819768eec60a047323daf5bfebef1464d40", "content_id": "12e7f256ec8bc219488d5087db8e2a6833022b0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/manipulate_data/to_json.py", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport json\r\n\r\n# data = pd.read_csv('../data/hero_ability_with_img_v3.csv')\r\ndata = pd.read_csv('../data/hero_img.csv')\r\npayload = json.loads(data.to_json(orient='records'))\r\n\r\n\r\njson_object = json.dumps(payload, indent = 4) \r\nwith open(\"../data/hero_img.json\", \"w\") as outfile: \r\n json_output = outfile.write(json_object)" }, { "alpha_fraction": 0.5819805264472961, "alphanum_fraction": 0.5836039185523987, "avg_line_length": 29.799999237060547, "blob_id": "397684f7ae7bab2585f2ec44f603b32420a21b86", "content_id": "1ccac9579892472c878312f75817daef14b349fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1232, "license_type": "no_license", "max_line_length": 108, "num_lines": 40, "path": "/api/dota2/api/characteristics/controllers/characteristics.js", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "'use strict';\n\n/**\n * Read the documentation (https://strapi.io/documentation/v3.x/concepts/controllers.html#core-controllers)\n * to customize this controller\n */\nconst { sanitizeEntity } = require('strapi-utils');\n\nmodule.exports = {\n\n async find(ctx){\n let entities;\n \n ctx.query = {\n ...ctx.query,\n _limit: -1\n };\n \n if (ctx.query._q) {\n entities = await strapi.services.characteristics.search(ctx.query);\n } else {\n entities = await strapi.services.characteristics.find(ctx.query);\n }\n \n return entities.map(entity => sanitizeEntity(entity, { model: strapi.models.characteristics }));\n },\n \n async findOne(ctx) {\n const { id } = ctx.params;\n const entity = await strapi.services.characteristics.findOne({ id });\n return sanitizeEntity(entity, { model: strapi.models.characteristics });\n },\n \n async findOneByName(ctx) {\n const { Hero } = ctx.params;\n const entities = await strapi.query('characteristics').model.find({'Hero' : Hero});\n return sanitizeEntity(entities, { model: strapi.models.characteristics });\n },\n\n};\n" }, { "alpha_fraction": 0.563835620880127, "alphanum_fraction": 0.5726027488708496, "avg_line_length": 33.82352828979492, "blob_id": "64cbc89b5d9fde0b24eb7d851d36f8af9b2b1f50", "content_id": "7196a9f780e3af7ab319471aa61225b4b87d6ee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1825, "license_type": "no_license", "max_line_length": 126, "num_lines": 51, "path": "/manipulate_data/add_ability_name_to_json.py", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import pandas\r\nimport json\r\nimport csv\r\n\r\nn = 129\r\nwith open('../data/hero.json', encoding=\"utf8\") as hero:\r\n data_hero = json.loads(hero.read())\r\nwith open('../data/ability.json', encoding=\"utf8\") as ability:\r\n data_ability = json.loads(ability.read())\r\n\r\n\r\ndef add_talents_name(n):\r\n for i in range(0,n+1):\r\n try:\r\n for slot in range(0, len(data_hero[f'{i}']['talents'])+1): # loop in abilities to add name\r\n talent_ability_id = data_hero[f'{i}']['talents'][slot]['abilityId'] # abilityId\r\n talent_ability_name = data_ability[f'{talent_ability_id}']['language']['displayName'] # ability name\r\n data_hero[f'{i}']['talents'][slot]['name'] = talent_ability_name # add ability name in to talents in each slot\r\n except:\r\n pass\r\n\r\n\r\ndef add_abilities_name(n):\r\n for i in range(1,n+1):\r\n try:\r\n for slot in range(0, len(data_hero[f'{i}']['abilities'])+1): # loop in abilities to add name\r\n ability_id = data_hero[f'{i}']['abilities'][slot]['abilityId'] # abilityId\r\n ability_name = data_ability[f'{ability_id}']['name'] # ability name\r\n data_hero[f'{i}']['abilities'][slot]['name'] = ability_name # add ability name in to abilities in each slot\r\n except:\r\n pass\r\n\r\ndef create_json_list(n):\r\n list_data = []\r\n for i in range(1,n+1):\r\n try:\r\n json_list = data_hero[f'Hero'] = data_hero.pop(f'{i}')\r\n list_data.append(json_list)\r\n except:\r\n pass\r\n return list_data\r\n\r\n\r\nadd_talents_name(n)\r\nadd_abilities_name(n)\r\nlist_data = create_json_list(n)\r\n\r\n\r\njson_object = json.dumps(list_data, indent = 4) \r\nwith open(\"../data/hero_ability.json\", \"w\") as outfile: \r\n json_output = outfile.write(json_object)" }, { "alpha_fraction": 0.4314829707145691, "alphanum_fraction": 0.43872350454330444, "avg_line_length": 35.568626403808594, "blob_id": "f0f09ea57d3216f9f515b904a20c9c79667e657e", "content_id": "9c72fe4a0f7d39e75a8d7e6d1a8c03d35851fe94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3729, "license_type": "no_license", "max_line_length": 123, "num_lines": 102, "path": "/frontend/dota2_guide_react/src/components/image.js", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from 'react';\nimport '../style/image.css'\nimport { Link } from 'react-router-dom'\nimport { Row, Col } from 'react-bootstrap'\n\nconst Image = () => {\n const [heroAgiImg, setHeroAgiImg] = useState([])\n const [heroIntImg, setHeroIntImg] = useState([])\n const [heroStrImg, setHeroStrImg] = useState([])\n\n\n useEffect(() => {\n async function fetchData() {\n const heroAgiResponse = await fetch(`http://localhost:1337/characters/hero/attribute/agi`,{\n method:'GET',\n })\n const heroIntResponse = await fetch(`http://localhost:1337/characters/hero/attribute/int`,{\n method:'GET',\n })\n const heroStrResponse = await fetch(`http://localhost:1337/characters/hero/attribute/str`,{\n method:'GET',\n })\n const heroAgi= await heroAgiResponse.json();\n const heroInt = await heroIntResponse.json();\n const heroStr = await heroStrResponse.json();\n\n await heroAgi.sort(function(a,b){\n a = a.displayName.toLowerCase()\n b = b.displayName.toLowerCase()\n return a<b ? -1 : a>b ? 1 : 0;\n });\n await heroInt.sort(function(a,b){\n a = a.displayName.toLowerCase()\n b = b.displayName.toLowerCase()\n return a<b ? -1 : a>b ? 1 : 0;\n });\n await heroStr.sort(function(a,b){\n a = a.displayName.toLowerCase()\n b = b.displayName.toLowerCase()\n return a<b ? -1 : a>b ? 1 : 0;\n });\n \n setHeroAgiImg(heroAgi)\n setHeroIntImg(heroInt)\n setHeroStrImg(heroStr)\n }\n fetchData();\n }, [])\n\n return (\n\n <div id=\"allImg\">\n <Row>\n <Col>\n <div>\n <h1 style={{\"color\": \"white\"}}>Strength</h1>\n {heroStrImg.map((data, index) => (\n <Link to={`/infomation/${data.displayName}`} key={index} id={data.displayName}>\n <div className=\"photo-wrapper\">\n <img className=\"photo\" alt={data.displayName} id={`HeroId${data.heroId}`} src={data.img}></img>\n </div>\n </Link>\n )\n )\n }\n </div>\n </Col>\n <Col>\n <div>\n <h1 style={{\"color\": \"white\"}}>Agility</h1>\n {heroAgiImg.map((data, index) => (\n <Link to={`/infomation/${data.displayName}`} key={index} id={data.displayName}>\n <div className=\"photo-wrapper\">\n <img className=\"photo\" alt={data.displayName} id={`HeroId${data.heroId}`} src={data.img}></img>\n </div>\n </Link>\n )\n )\n }\n </div>\n </Col>\n <Col>\n <div>\n <h1 style={{\"color\": \"white\"}} >Intelligence</h1>\n {heroIntImg.map((data, index) => (\n <Link to={`/infomation/${data.displayName}`} key={index} id={data.displayName}>\n <div className=\"photo-wrapper\">\n <img className=\"photo\" alt={data.displayName} id={`HeroId${data.heroId}`} src={data.img}></img>\n </div>\n </Link>\n )\n )\n }\n </div>\n </Col>\n </Row>\n\n </div>\n )\n}\n\nexport default Image" }, { "alpha_fraction": 0.517463207244873, "alphanum_fraction": 0.5303308963775635, "avg_line_length": 48.5, "blob_id": "f97781340761f709ec47c0abbbb295953c5b3115", "content_id": "582cf53632cabbf131b9103a7c1c648c1080c615", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1088, "license_type": "no_license", "max_line_length": 95, "num_lines": 22, "path": "/scraping/hero_meta.py", "repo_name": "Champ2k/Dota2-Guide", "src_encoding": "UTF-8", "text": "import re, requests\nimport pandas\nimport sys, json\nfrom bs4 import BeautifulSoup\n\nurl = 'https://www.dotabuff.com/heroes/meta'\nr = requests.get(url, headers={'user-agent': 'mozilla/5.0'})\nsoup = BeautifulSoup(r.content, 'html.parser')\ntable = soup('table')[0]\n\n\ncompression_opt = dict(method='zip', archive_name='hero_meta.csv')\nstats = pandas.read_html(str(table))[0].drop(columns={'Hero'})\nstats_rename_header = stats.rename(columns={\n 'Hero.1':'Hero', \n 'Win %': 'WinRateGuardian', 'Pick %': 'PickRateGuardian', \n 'Win %.1': 'WinRateArchon', 'Pick %.1': 'PickRateArchon',\n 'Win %.2': 'WinRateLegend', 'Pick %.2': 'PickRateLegend',\n 'Win %.3': 'WinRateAncient', 'Pick %.3': 'PickRateAncient',\n 'Win %.4': 'WinRateDivine', 'Pick %.4': 'PickRateDivine',\n })\nstats_rename_header.to_csv('hero_meta.zip', index=False, compression=compression_opt)" } ]
12
ArtFix13/Lecture_3.2_Task-1-2
https://github.com/ArtFix13/Lecture_3.2_Task-1-2
6e3950b5a0a86ab822229e796982c049004d10d1
16c2fb7be695471a0c4a5b14dc3518fc07c0fae1
0307b9a79a18642a722e2a556bbc3f9ce2e3ff46
refs/heads/master
"2021-01-08T08:54:49.946189"
"2020-02-22T14:26:14"
"2020-02-22T14:26:14"
241,977,340
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6322208046913147, "alphanum_fraction": 0.6386392712593079, "avg_line_length": 30.18000030517578, "blob_id": "fe6c686a3c586ab4dca97588c5d0df081c05d1f8", "content_id": "85180ff0a4695d11e5a435375a5ec68bba72ba48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1975, "license_type": "no_license", "max_line_length": 107, "num_lines": 50, "path": "/main.py", "repo_name": "ArtFix13/Lecture_3.2_Task-1-2", "src_encoding": "UTF-8", "text": "# Задача №1\n# Необходимо расширить функцию переводчика так, чтобы она принимала следующие параметры:\n#\n# Путь к файлу с текстом;\n# Путь к файлу с результатом;\n# Язык с которого перевести;\n# Язык на который перевести (по-умолчанию русский).\n# У вас есть 3 файла (DE.txt, ES.txt, FR.txt) с новостями на 3 языках: французском, испанском, немецком.\n# Функция должна взять каждый файл с текстом, перевести его на русский и сохранить результат в новом файле.\n\n\nimport requests\n# документация https://yandex.ru/dev/translate/doc/dg/reference/translate-docpage/\n\nAPI_KEY = 'trnsl.1.1.20190827T202940Z.2c49395d596e72e6.b347e4d8ce5e733c54bcbf89895db63e5841a947'\nURL = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n\ndef translate_it(path_file_text, path_file_result, from_lang, to_lang):\n \"\"\"\n https://translate.yandex.net/api/v1.5/tr.json/translate ?\n key=<API-ключ>\n & text=<переводимый текст>\n & lang=<направление перевода>\n & [format=<формат текста>]\n & [options=<опции перевода>]\n & [callback=<имя callback-функции>]\n :param to_lang:\n :return:\n \"\"\"\n\n with open(path_file_text, encoding='utf-8') as f:\n text = f.read()\n\n params = {\n 'key': API_KEY,\n 'text': text,\n 'lang': from_lang+'-'+to_lang,\n }\n\n response = requests.get(URL, params=params)\n json_ = response.json()\n\n with open(path_file_result, 'w', encoding='utf-8') as f:\n f.write(json_['text'][0])\n\n\nif __name__ == '__main__':\n translate_it('DE.txt', 'DE-RU.txt', 'de', 'ru')\n translate_it('ES.txt', 'ES-RU.txt', 'es', 'ru')\n translate_it('FR.txt', 'FR-RU.txt', 'fr', 'ru')" } ]
1
priyamittal15/Analysing-ROC-curve-on-different-algorithm-
https://github.com/priyamittal15/Analysing-ROC-curve-on-different-algorithm-
4853c8c23a5c2e812500a670beba0ecb1ece4a8f
93d5b9ff3a3a509013ad2e35c17f0c2ce0920f32
4cc0747600d0865906c9424b2719cd914be93c1d
refs/heads/main
"2023-06-03T20:18:52.482495"
"2021-06-13T11:08:01"
"2021-06-13T11:08:01"
326,354,462
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8261421322822571, "alphanum_fraction": 0.836294412612915, "avg_line_length": 97.5, "blob_id": "11d4e4c7e085882727f8a6aec6b45f25e61dc3f1", "content_id": "f891ae35582bc27f45b670ad19e87afd4467d079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 788, "license_type": "no_license", "max_line_length": 175, "num_lines": 8, "path": "/README.md", "repo_name": "priyamittal15/Analysing-ROC-curve-on-different-algorithm-", "src_encoding": "UTF-8", "text": "# Analysing-ROC-curve-on-different-algorithm-\ncreating ROC curve to analyses different algorithm on python\nROC Curves summarize the trade-off between the true positive rate and false positive rate for a predictive model using different probability thresholds.\nPrecision-Recall curves summarize the trade-off between the true positive rate and the positive predictive value for a predictive model using different probability thresholds.\nROC curves are appropriate when the observations are balanced between each class, whereas precision-recall curves are appropriate for imbalanced datasets.\n\nSo i used some algorithm and find out their ROc Curve to analyse that algorithm smoothly.\nFor more excited projects connnect with me on linkdin: https://www.linkedin.com/in/priya-mittal-83560619b\n" }, { "alpha_fraction": 0.6873932480812073, "alphanum_fraction": 0.7037922739982605, "avg_line_length": 34.03658676147461, "blob_id": "710201aadd294ca59194fc8e9d37cb104935be37", "content_id": "629001cd5cfd4e3bc4fae67441e8cdb88680a648", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2927, "license_type": "no_license", "max_line_length": 117, "num_lines": 82, "path": "/Implementation(CODE).py", "repo_name": "priyamittal15/Analysing-ROC-curve-on-different-algorithm-", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\nfrom sklearn.neighbors import KNeighborsRegressor\ndataset = pd.read_csv(r'C:\\Users\\Priya Mittal\\Documents\\iris dataset01.csv')\ndataset.head()\n\nfeature_columns = ['sepal length in cm', 'sepal width in cm', 'sepal length in cm','sepal width in cm']\nX = dataset[feature_columns].values\ny = dataset['class'].values\n\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\ny = le.fit_transform(y)\n\nfrom sklearn.model_selection import train_test_split \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=24) # 80% training and 20% test\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nfrom sklearn.model_selection import cross_val_score\nclassifier = KNeighborsClassifier(n_neighbors=15) # when (k==15).\nclassifier.fit(X_train, y_train)\ny_pred = classifier.predict(X_test)\n\n# print confusion matrix\ncm = confusion_matrix(y_test, y_pred)\ncm\n\naccuracy = accuracy_score(y_test, y_pred)*100\nprint('Accuracy = ' + str(round(accuracy, 2)) + ' %.')\nfrom sklearn.datasets import make_classification\nimport numpy as np\nX, Y = make_classification(n_samples=2000, n_classes=2, n_features=10, random_state=0\nrandom_state = np.random.RandomState(0)\nn_samples, n_features = X.shape\nX = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# using Random forest:\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,random_state=0)\nrf = RandomForestClassifier(max_features=5, n_estimators=500)\nrf.fit(X_train, Y_train)\n\n# using naive bayes:\nnb = GaussianNB()\nnb.fit(X_train, Y_train)\nr_probs = [0 for _ in range(len(Y_test))]\nrf_probs = rf.predict_proba(X_test)\nnb_probs = nb.predict_proba(X_test)\nrf_probs = rf_probs[:, 1]\nnb_probs = nb_probs[:, 1]\n\nfrom sklearn.metrics import roc_curve, roc_auc_score\nr_auc = roc_auc_score(Y_test, r_probs)\nrf_auc = roc_auc_score(Y_test, rf_probs)\nnb_auc = roc_auc_score(Y_test, nb_probs)\n\nprint('Random (chance) Prediction: AUROC = %.3f' % (r_auc))\nprint('Random Forest: AUROC = %.3f' % (rf_auc))\nprint('Naive Bayes: AUROC = %.3f' % (nb_auc))\n\nr_fpr, r_tpr, _ = roc_curve(Y_test, r_probs)\nrf_fpr, rf_tpr, _ = roc_curve(Y_test, rf_probs)\nnb_fpr, nb_tpr, _ = roc_curve(Y_test, nb_probs)\nimport matplotlib.pyplot as plt\n\nplt.plot(r_fpr, r_tpr, linestyle='--', label='Random prediction (AUROC = %0.3f)' % r_auc)\nplt.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest (AUROC = %0.3f)' % rf_auc)\nplt.plot(nb_fpr, nb_tpr, marker='.', label='Naive Bayes (AUROC = %0.3f)' % nb_auc)\n\n# Title\nplt.title('ROC Plot')\n# Axis labels\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\n# Show legend\nplt.legend() # \n# Show plot\nplt.show()\n\n \n" } ]
2
robertokcanale/SofAR_Project
https://github.com/robertokcanale/SofAR_Project
b048c8018e809d1f04e69024315929c5643a9ff2
764269c0dd3d76d64cc581da82af3322201b7ce1
4aaf6289278411741d6269852ddc3edc897aabbd
refs/heads/master
"2020-12-14T00:34:15.710994"
"2020-02-21T10:50:32"
"2020-02-21T10:50:32"
243,072,228
1
0
null
"2020-02-25T18:35:08"
"2020-02-21T11:11:08"
"2020-02-25T14:20:18"
null
[ { "alpha_fraction": 0.6380743980407715, "alphanum_fraction": 0.6389496922492981, "avg_line_length": 26.865854263305664, "blob_id": "afd61d1171d49e45c697442e6c1a2a31fc2b4c5e", "content_id": "e3e03ba0438d4d82f33f284729116bcfd100c259", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2285, "license_type": "no_license", "max_line_length": 118, "num_lines": 82, "path": "/catkin_ws/src/labeled_slam/src/StateMachine/State_DRIVING.cpp", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#include \"State_DRIVING.h\"\n#include \"State_LISTENING.h\"\n#include \"StateMachine.h\"\n\n/**\n * @brief Constructor of State_DRIVING\n *\n * Calls service activate_driving (always, when this state is entered)\n * with a TRUE flag\n **/\nState_DRIVING::State_DRIVING(StateMachine* m)\n : client_activate_driving_(m->client_activate_driving_)\n{\n std_srvs::SetBool srv;\n srv.request.data = true;\n if (!ros::service::exists(\"activate_driving\", true )) //Info and return, if service does not exist\n {\n ROS_INFO(\"activate_driving service does not exist! Driving will not be activated.\");\n return;\n }\n client_activate_driving_->call(srv);\n}\n\n/**\n * @brief Destructor of State_DRIVING\n *\n * Calls service activate_driving (always, when this state is left)\n * with a FALSE flag\n **/\nState_DRIVING::~State_DRIVING()\n{\n std_srvs::SetBool srv;\n srv.request.data = false;\n if (!ros::service::exists(\"activate_driving\", true )) //Info and return, if service does not exist\n {\n ROS_INFO(\"activate_driving service does not exist! Driving can not be deactivated.\");\n return;\n }\n client_activate_driving_->call(srv);\n}\n\n/**\n * @brief Don't do anythin on command drive\n **/\nvoid State_DRIVING::drive(StateMachine* m)\n{\n ROS_INFO(\"Already in driving mode!\");\n}\n\n/**\n * @brief Switch to listening mode\n **/\nvoid State_DRIVING::listen(StateMachine* m)\n{\n ROS_INFO(\"Switching to listening mode\");\n m->change_state( new State_LISTENING() );\n delete this;\n}\n\n/**\n * @brief Don't do anythin on command go_to\n **/\nvoid State_DRIVING::go_to(StateMachine* m, string target)\n{\n ROS_INFO(\"Invalid Command 'go to' for driving mode. Expected to receive a listen command first (typing '1')\");\n}\n\n/**\n * @brief Don't do anything on command label\n **/\nvoid State_DRIVING::label(StateMachine* m, string label)\n{\n ROS_INFO(\"Invalid Command 'label' for driving mode. Expected to receive a listen command first (typing '1')\");\n}\n\n/**\n * @brief Don't do anything when goal reached is received\n **/\nvoid State_DRIVING::goal_reached(StateMachine* m)\n{\n ROS_INFO(\"Invalid message: Goal should not be reached, when in driving mode!\");\n}\n" }, { "alpha_fraction": 0.7612179517745972, "alphanum_fraction": 0.7612179517745972, "avg_line_length": 25, "blob_id": "a51d8480ecd039384e74d0c2e6f752fbaaea618d", "content_id": "44ee7c990d8d1ee0ddb0229450a9901372a6933a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 624, "license_type": "no_license", "max_line_length": 95, "num_lines": 24, "path": "/catkin_ws/src/labeled_slam/include/labeled_slam/StateMachine/State_DRIVING.h", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#ifndef STATE_DRIVING_H\n#define STATE_DRIVING_H\n\n#include \"BaseState.h\"\nclass StateMachine;\n\n/** @class State_DRIVING\n * @brief State class for the mode, when the robot is controlled manually using the smartwatch\n **/\nclass State_DRIVING : public BaseState\n{\npublic:\nState_DRIVING(StateMachine* m);\n~State_DRIVING();\nvirtual void drive(StateMachine* m);\nvirtual void listen(StateMachine* m);\nvirtual void go_to(StateMachine* m, string target);\nvirtual void label(StateMachine* m, string label);\nvirtual void goal_reached(StateMachine* m);\nprivate:\nros::ServiceClient* client_activate_driving_;\n};\n\n#endif //STATE_DRIVING_H\n" }, { "alpha_fraction": 0.6029306650161743, "alphanum_fraction": 0.6043602824211121, "avg_line_length": 30.08888816833496, "blob_id": "2da6d72b682bc4218894e6b9dfa04ff382d4bdc5", "content_id": "d4522b6f1ff256d621f7b1a2cc340eee377d8a54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2798, "license_type": "no_license", "max_line_length": 85, "num_lines": 90, "path": "/catkin_ws/src/labeled_slam/src/StateMachine/StateMachine.cpp", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#include \"StateMachine.h\"\n#include \"State_DRIVING.h\"\n#include \"State_GO_TO.h\"\n#include \"State_LISTENING.h\"\n#include <iostream>\n\n\n/**\n * @brief Constructor of StateMachine Class\n **/\nStateMachine::StateMachine(ros::ServiceClient* client_set_goal,\n ros::ServiceClient* client_set_label,\n ros::ServiceClient* client_activate_path_following,\n ros::ServiceClient* client_activate_driving)\n : state_(new State_DRIVING(this) )\n , client_set_goal_( client_set_goal )\n , client_set_label_( client_set_label )\n , client_activate_path_following_( client_activate_path_following )\n , client_activate_driving_( client_activate_driving )\n{\n}\n\n/**\n * @brief Destructor of StateMachine Class\n **/\nStateMachine::~StateMachine()\n{\n delete state_;\n}\n\n/**\n * @brief Callback-function interpreting command from command-recognition node\n *\n * Allowed commands are \"drive\", \"go to\", \"label\", \"listen\"\n * commands go_to and label are using an argument. For the other commands, the\n * argument is just ignored.\n * According function of the state_member are called on each command\n * Depending on the true object-type in the state_-variable, the function of one\n * specific state is called (POLYMORPHISM!)\n **/\nvoid StateMachine::callback_command(const labeled_slam::Command::ConstPtr& msg)\n{\n string command = msg->command;\n string argument = msg->argument;\n\n if (msg->command.compare(\"drive\") == 0) //strings are equal!\n {\n state_->drive(this);\n }\n else if (msg->command.compare(\"listen\") == 0) //strings are equal!\n {\n state_->listen(this);\n }\n else if (msg->command.compare(\"go to\") == 0) //strings are equal!\n {\n state_->go_to(this, msg->argument);\n }\n else if (msg->command.compare(\"label\") == 0) //strings are equal!\n {\n state_->label(this, msg->argument);\n }\n else\n {\n ROS_INFO(\"wrong command: %s\", command.c_str());\n ROS_INFO(\"Allowed commands are 'drive', 'go to', 'label', 'listen'\");\n }\n}\n\n/**\n * @brief Callback-function for the subscribed topic goal_reached\n **/\nvoid StateMachine::callback_goal_reached(const std_msgs::Bool::ConstPtr& msg)\n{\n if(msg->data == true) // A boolean with value TRUE must be sent\n {\n state_->goal_reached(this);\n }\n}\n\n/**\n * @brief Change the state of state StateMachine\n *\n * Important: needs to be called with new new_state\n * To avoid memory leaks, call delete(this) after calling this function\n * from the old state\n **/\nvoid StateMachine::change_state(BaseState * state)\n{\n state_ = state;\n}\n" }, { "alpha_fraction": 0.7251356244087219, "alphanum_fraction": 0.7341772317886353, "avg_line_length": 24.136363983154297, "blob_id": "e373509196d0c6aae5aa09fb2fbc3b8394a9a747", "content_id": "83105b0c7b184a63c4d21087474d18e9f0d3e8ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 553, "license_type": "no_license", "max_line_length": 107, "num_lines": 22, "path": "/catkin_ws/src/labeled_slam/include/labeled_slam/StateMachine/BaseState.h", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#ifndef BASE_STATE_H\n#define BASE_STATE_H\n\n#include \"ros/ros.h\"\n#include <string>\nusing namespace std;\nclass StateMachine;\n\n/** @class BaseState\n * @brief Base class for all the specific state classes. Contains public interface of all derived classes.\n **/\nclass BaseState\n{\npublic:\nvirtual void drive(StateMachine* m) = 0;\nvirtual void listen(StateMachine* m) = 0;\nvirtual void go_to(StateMachine* m, string target) = 0;\nvirtual void label(StateMachine* m, string label) = 0;\nvirtual void goal_reached(StateMachine* m) = 0;\n};\n\n#endif // BASE_STATE_H\n" }, { "alpha_fraction": 0.7606146931648254, "alphanum_fraction": 0.7688897848129272, "avg_line_length": 45.99536895751953, "blob_id": "a9a9275199b1d1901bc3ca7719976d0c31061748", "content_id": "4acd08b0e58dd1dccfdabad32bc78772c20dd211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 10153, "license_type": "no_license", "max_line_length": 796, "num_lines": 216, "path": "/README.md", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "# Hands on with SLAM\n\n## Objective of the Project\n\nThis project aims to enhance the capabilities of a Husqvarna automower by combining Simultaneous Localization and Mapping (SLAM) and voice recognition to provide labeling of points in a generated map of the environment. The map can later be traversed by setting labeled points as desired goals and using a path planning algorithm to reach those points starting from the current estimated position of the robot.\n\nThe robot is expected to navigate an area, through smartwatch teleoperation, and acquire labels for objects when a voice command is sent. Then, it is expected to move between labeled objects once voice commands are given through the microphone.\n\nThis project has been developed using the [Robot Operating System (ROS)](https://www.ros.org/).\n\nWe have used the following hardware components:\n* Husqvarna 430x automower with a Raspberry Pi installed as explained in section 7.3 [here](https://github.com/HusqvarnaResearch/hrp/blob/master/Startup%20Guide%20HRP.pdf)\n* LG Smart Watch\n* Microsoft Kinect for Xbox 360 attached to the top of the automower\n* Microphone\n* Wi-Fi router\n* PC running ROS Kinetic or Melodic on a Linux distribution\n\n## Architecture of the System\n\nThe architecture can be summarized using the following UML diagram:\n<p align=\"center\"> \n<img src=\"https://github.com/danieto98/SofAR_Project/blob/master/UML.png\">\n</p>\n\nThe Kinect driver (freenect_camera) provides an image, a depth image and the camera information for the device. All of this data is synched into a single topic using the rtabmap_ros/rgbd_sync nodelet. This is later fed to both the rtabmap and rgbd_odometry nodes. The latter computes odometry from the current image and point cloud visualized by the Kinect. The Husqvarna driver node (am_driver_legacy) provides odometry based on the model of the robot and the current speed of the wheels. These two odometry estimates are merged into a more robust estimate using the odometry_merge node. The results from this node and the synched Kinect data are fed into the rtabmap node which generates an estimate of the current map and computes the position of the robot as a tf using an RGBD SLAM approach.\n\nAt the same time, the command_recognition node listens to microphone and keyboard inputs and outputs valid commands to the logic node, which uses a state machine to change its behavior according to the given command. This node consequently publishes messages to either activator_1 or activator_2, which output the messages they receive as input to the velocity_forwarder node in case the received input from the logic node is true. The velocity_forwarder node lets all input messages through as velocity commands to the Husqvarna's driver (am_driver_legacy).\n\nThe logic node also sets labels to the current position upon command by issuing the set_label() service call to the rtabmap node. In case a \"go to\" command is issued, it uses the set_goal() service call to the rtabmap node instead, which will consequently output a path for the robot to follow from its current position to reach that goal. This path is used by the path_folower, who listens to the tf of the current position and outputs velocity commands (activated by the logic node using activator_1) needed to reach that goal.\n\nWhen not following a given path, the robot is controlled by using certain gestures captured by the smartwatch. The IMU data from the watch is received by the gb_controller node, which outputs velocity commands to the robot (activated by the logic node using activator_2).\n\n## Description of the System’s Architecture\n\nTODO\n\n## Notes for Us\nMaybe relevant for writing a path folower, receives TF and publishes geometry_msgs/Twists\nhttp://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20listener%20%28C%2B%2B%29\n\t\n## Installation and System Testing\n\n### Requirements\n\n#### ROS\n\nYou must have a working ROS installation. For this project to work, we recommend using either the ROS Kinetic Kame or Melodic Morenia distributions under Ubuntu 16 or 18 respectively; these are the ones we have used during our development and testing, there is no assurance everything will work under a different distribution. Make sure you install the full desktop version.\n\n* You can find instructions on how to download ROS Kinetic [here](http://wiki.ros.org/kinetic/Installation).\n* For downloading ROS Melodic, you can find instructions [here](http://wiki.ros.org/melodic/Installation).\n\n#### rtabmap_ros\n\nThis is a ROS wrapper for the rtabmap libary which we will be using for SLAM and path planning.\n\nIf you are a ROS Kinetic user, install it with:\n```\nsudo apt-get install ros-kinetic-rtabmap ros-kinetic-rtabmap-ros\n```\n\nIf you are a ROS Melodic user, use this one instead:\n```\nsudo apt-get install ros-melodic-rtabmap ros-melodic-rtabmap-ros\n```\n\n#### freenect_stack\n\nThis is a libfreenect-based ROS driver for the Microsoft Kinect.\nInstall both the library and the ROS stack using:\n```\nsudo apt-get install libfreenect-dev\nsudo apt-get install ros-kinetic-freenect-stack\n```\n\n#### Speech Recognition Library\n\nWhile installing the speech recognition library, make sure that you always use Python 2 (and thus pip2) instead of Python 3, as the latter is not supported by ROS Kinetic. As we are using a microphone as input, make sure you install PyAudio as well.\n\n##### ROS Melodic Users\nFor ROS Melodic users, you can find installation instructions for both of these libraries [here](https://pypi.org/project/SpeechRecognition/).\n\n##### ROS Kinetic Users\nFor ROS Kinetic users, the suggested installation and version requirements might not work. To avoid this problem, we suggest to:\n\n* Download PyAudio-0.2.11.tar.gz [here](https://pypi.org/project/PyAudio/).\n* Move the file to the the Python2.7 package folder (/usr/local/lib/python2.7/dist-packages), you will need sudo privileges. \n* In that directory, run:\n```\ntar xvf PyAudio-0.2.11.tar.gz\ncd PyAudio-0.2.11\nsudo python setup.py install\n```\n##### Installation Test\n\nIn order to test the installation as a Python package, input the following command:\n```\npython -m speech_recognition\n```\n\n### Installation\n\nFollow the instructions below for both the Raspberry Pi (if it is the first using it on the Husqvarna) and your PC.\nIf you have trouble accessing the Raspberry Pi, take a look at the [preliminary steps](#preliminary-steps) section.\n\n#### On the Raspberry Pi\n\nCreate a new catkin workspace on the home directory:\n```\nmkdir -p ~/catkin_ws/src\ncd ~/catkin_ws/\ncatkin_make\n```\n\nInstall Git Lfs to handle large files, if you haven't done so yet:\n```\ngit lfs install\n```\n\nNavigate to the src directory and clone the Husqvarna driver repository:\n```\ncd ~/catkin_ws/src\ngit clone https://github.com/HusqvarnaResearch/hrp\n```\n\nMake the catkin workspace:\n```\ncd ~/catkin_ws\ncatkin_make\n```\n\nSee which serial port is currently connected to the Husqvarna (it should be something similar to /dev/ttyACM0):\n```\nls /dev | grep tty\n```\nIf more than one serial port shows up, pick the most similar one to the one suggested above. You will have to test which one of them works later on and return here to perform the following step in case the first one you chose was not the one.\n\nEdit the launchfile and change the serial port to the one in use:\n```\nnano ~/catkin_ws/src/hrp/am_driver_legacy/launch/automower_legacy.launch\n```\n\n#### On your PC\n\nClone this repository:\n```\ncd ~\ngit clone --recurse-submodules -j8 https://github.com/danieto98/SofAR_Project.git\n```\n\nMake the catkin workspace:\n```\ncd ~/SofAR_Project/catkin_ws\ncatkin_make\n```\n### Running the Project\n\n#### Preliminary Steps\n\n##### Turning on the Husqvarna\n\n* Flip on the two switches on the back of the automower (left, then center).\n* Open the lid on top of the robot by pressing the red STOP button.\n* Enter the password on the keypad\n* Click on \"Menu\"\n* Press 7 and 9 simultaneously for about two seconds to open up a special menu item (a wrench)\n* Open up the special menu and select \"Special Settings\" at the bottom\n* Tick the \"Override loop detection\" option by pressing \"OK\"\n* Press \"OK\" once more and navigate back to the main menu\n* Close the lid on top of the robot\n\n##### Find Your Computer's IP Address\n* Run the following command: `ifconfig`\n* Find the interface corresponding to your current connection (Ethernet or Wi-Fi), make sure the address is not localhost(127.0.0.1)\n* Under that interface, note down the IP address which follows \"inet addr:\"\n* For every terminal you open up, immediately run the following command `export ROS_IP=<your_ip>` by substituting `<your_ip>` with the IP address obtained above. Note that you will have to use this command in every terminal you open up in your PC. Alternatively, you can add the command to the bottom of your .bashrc file. After doing so, close the terminal and open it up again.\n\n##### Set the ROS Master on the Raspberry Pi\n* SSH into the Pi using its IP address: `ssh <pi_username>@<pi_ip>`\n* Enter the password\n* Set the ROS master to your machine:\n```\nexport ROS_MASTER_URI=<your_ip>:11311\n```\n\n#### Running the project\n* Open up a terminal on your machine and run roscore by using the command `roscore`\n* In a separate terminal, navigate to the repository's catkin workspace directory `cd ~/SofAR_Project/catkin_ws`\n* Make the repository if you haven't done so yet by issuing `catkin_make`\n* Source the files using `source devel/setup.bash`\n* Run the gesture based controller: `roslaunch gesture_based_controller hrp_gb_controller.launch`\n* On the terminal connected to the Raspberry Pi via ssh, run the following commands:\n```\nsudo chmod 666 /dev/ttyACM0\nroslaunch am_driver_legacy automower_legacy.launch\n```\n* Open up the IMU app on the LG SmartWatch\n* Set the IP address to that of your machine and the port number to \"11311\"\n* Move your hand to drive the robot\n\n## Report\n\nThis is the link to the report: https://drive.google.com/drive/folders/1F-D1oRu5Ioa_JKwIRIPIFAkoQlCVzCDN\n\n## Authors\n\n* Filip Hesse: [email protected]\n* Justin Lee: [email protected]\n* Daniel Nieto: [email protected]\n* Roberto Canale: [email protected]\n* Steven Palma Morera: [email protected]\n* Josep Rueda: [email protected]\n\n## References\n\n[GitHub README template.](https://github.com/EmaroLab/GitHub_Readme_Template)\n" }, { "alpha_fraction": 0.6255183219909668, "alphanum_fraction": 0.6274322271347046, "avg_line_length": 30.350000381469727, "blob_id": "2aac894d34542c3f6d529b0d6a82505c89be7a8f", "content_id": "4bee88c8189b5d5d0b8c4e66a1ce8164459fc9e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3135, "license_type": "no_license", "max_line_length": 116, "num_lines": 100, "path": "/catkin_ws/src/labeled_slam/src/StateMachine/State_GO_TO.cpp", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#include \"State_GO_TO.h\"\n#include \"State_LISTENING.h\"\n#include \"StateMachine.h\"\n\n/**\n * @brief Constructor of State_GO_TO\n *\n * Always, when this state is entered:\n * (1) Calls service activate_path_following\n * with a TRUE flag\n * (2) Calls service which sets the goal to the rtabmap\n **/\nState_GO_TO::State_GO_TO(StateMachine* m, string target)\n : target_(target)\n , client_activate_path_following_(m->client_activate_path_following_)\n{\n // activate path following\n std_srvs::SetBool srv_act;\n srv_act.request.data = true;\n if (!ros::service::exists(\"activate_path_following\", true ))//Info and return, if service does not exist\n {\n ROS_INFO(\"activate_path_following service does not exist! Path following will not be activated.\");\n return;\n }\n client_activate_path_following_->call(srv_act);\n\n // set new goal\n SRV_TYPE_SET_GOAL srv_goal;\n //srv.request.node_id = 0; //Not sure about that\n srv_goal.request.node_label = target_;\n if (!ros::service::exists(\"set_goal\", true ))//Info and return, if service does not exist\n {\n ROS_INFO(\"set_goal service does not exist! Going to specified location unsuccessfull.\");\n return;\n }\n m->client_set_goal_->call(srv_goal);\n}\n\n/**\n * @brief Destructor of State_GO_TO\n *\n * Calls service activate_path_following (always, when this state is left)\n * with a FALSE flag\n **/\nState_GO_TO::~State_GO_TO()\n{\n // deactivate path following\n std_srvs::SetBool srv;\n srv.request.data = false;\n if (!ros::service::exists(\"activate_path_following\", true ))//Info and return, if service does not exist\n {\n ROS_INFO(\"activate_path_following service does not exist! Path following can not be deactivated.\");\n return;\n }\n client_activate_path_following_->call(srv);\n}\n\n/**\n * @brief Don't do anything on command drive\n **/\nvoid State_GO_TO::drive(StateMachine* m)\n{\n ROS_INFO(\"Invalid Command 'drive' for go_to mode. Expected to receive a listen command first (typing '1')\");\n}\n\n/**\n * @brief Switch to listening mode\n **/\nvoid State_GO_TO::listen(StateMachine* m)\n{\n ROS_INFO(\"Switching to listening mode\");\n m->change_state( new State_LISTENING() );\n delete this;\n}\n\n/**\n * @brief Don't do anything on command go to\n **/\nvoid State_GO_TO::go_to(StateMachine* m, string target)\n{\n ROS_INFO(\"Invalid Command 'go to' for go_to mode. Expected to receive a listen command first (typing '1')\");\n}\n\n/**\n * @brief Don't do anything on command label\n **/\nvoid State_GO_TO::label(StateMachine* m, string label)\n{\n ROS_INFO(\"Invalid Command 'label' for go_to mode. Expected to receive a listen command first (typing '1')\");\n}\n\n/**\n * @brief When goal reached is received, switch back to listening mode (leave this state)\n **/\nvoid State_GO_TO::goal_reached(StateMachine* m)\n{\n ROS_INFO(\"GOAL REACHED! Switching to listening mode\");\n m->change_state( new State_LISTENING() );\n delete this;\n}\n" }, { "alpha_fraction": 0.7372105717658997, "alphanum_fraction": 0.7458266019821167, "avg_line_length": 55.272727966308594, "blob_id": "6b52f2582bdd3dce17045a5238f90045db3e486c", "content_id": "3f3a276dbec1e1978993f97fb8efbd908979a586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1857, "license_type": "no_license", "max_line_length": 88, "num_lines": 33, "path": "/catkin_ws/src/labeled_slam/test/test_state_machine.sh", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho Call invalid command\nrostopic pub -1 /text_command labeled_slam/Command '{command: dance! }'\necho Switch to listening mode!\nrostopic pub -1 /text_command labeled_slam/Command '{command: listen }'\necho Repeat command listen!\nrostopic pub -1 /text_command labeled_slam/Command '{command: listen }'\necho Switch to driving mode from listening mode!\nrostopic pub -1 /text_command labeled_slam/Command '{command: drive }'\necho Repeat command drive!\nrostopic pub -1 /text_command labeled_slam/Command '{command: drive }'\necho \"Try to label - should not work!\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: label, argument: chair }'\necho \"Try command go_to - should not work!\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: go to, argument: chair }'\necho \"Switch to listening mode\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: listen }'\necho \"Label the word 'chair'\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: label, argument: chair }'\necho \"Go to label 'chair'\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: go to, argument: chair }'\necho \"Repeat command go_to\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: go to, argument: chair }'\necho \"Try to label word 'chair' - should not work!\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: label, argument: chair }'\necho \"Interrupt go_to state by going to drive mode\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: drive }'\necho \"Switch to listening mode\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: listen }'\necho \"Go to label 'chair'\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: go to, argument: chair }'\necho \"Interrupt go_to state by going to listening mode\"\nrostopic pub -1 /text_command labeled_slam/Command '{command: listen }'\n" }, { "alpha_fraction": 0.7248569130897522, "alphanum_fraction": 0.725674569606781, "avg_line_length": 31.1842098236084, "blob_id": "6ad61fd304234dd50915ecdff7c5a43552790dcf", "content_id": "9f1a064df359212b365868d3b22bd2dad22857d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2446, "license_type": "no_license", "max_line_length": 146, "num_lines": 76, "path": "/catkin_ws/src/labeled_slam/include/labeled_slam/StateMachine/StateMachine.h", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#ifndef STATE_MACHINE_H\n#define STATE_MACHINE_H\n\n// ROS includes.\n#include \"ros/ros.h\"\n#include \"ros/time.h\"\n#include \"labeled_slam/Command.h\"\n#include \"std_srvs/SetBool.h\"\n#include \"std_msgs/Bool.h\"\n\n// comment out the following line after rtabmap package has been created!\n//#define STATE_MACHINE_STANDALONE\n\n#ifdef STATE_MACHINE_STANDALONE\n #include \"labeled_slam/SetGoalDummy.h\"\n #include \"labeled_slam/SetLabelDummy.h\"\n #define SRV_TYPE_SET_GOAL labeled_slam::SetGoalDummy\n #define SRV_TYPE_SET_LABEL labeled_slam::SetLabelDummy\n#else\n #include \"rtabmap_ros/SetGoal.h\"\n #include \"rtabmap_ros/SetLabel.h\"\n #define SRV_TYPE_SET_GOAL rtabmap_ros::SetGoal\n #define SRV_TYPE_SET_LABEL rtabmap_ros::SetLabel\n#endif\n\n\nusing std::string;\n\n//forward declare classess\nclass BaseState;\nclass State_DRIVING;\nclass State_LISTENING;\nclass State_GO_TO;\n\n\n/** @class StateMachine\n * @brief Implements the interface for the labeled_slam state machine\n *\n * Uses the c++ 'State'-pattern\n **/\nclass StateMachine\n{\npublic:\n//! Constructor\nStateMachine(ros::ServiceClient* client_set_goal,\n ros::ServiceClient* client_set_label,\n ros::ServiceClient* client_activate_path_following,\n ros::ServiceClient* client_activate_driving);\n\n//! Destructor\n~StateMachine();\n\n//! Callback function for text_command subscriber\nvoid callback_command(const labeled_slam::Command::ConstPtr& msg);\n//! Callback function for goal_reached subscriber\nvoid callback_goal_reached(const std_msgs::Bool::ConstPtr& msg);\n\nprivate:\nros::ServiceClient* client_set_goal_; /**< Service client to call service of rtabmap_ros/SetGoal */\nros::ServiceClient* client_set_label_; /**< Service client to call service of rtabmap_ros/SetLabel */\nros::ServiceClient* client_activate_path_following_;/**< Service client to call service (of Activator1) which activates path following */\nros::ServiceClient* client_activate_driving_; /**< Service client to call service (of Activator2) which activates driving */\n\n\nBaseState* state_; /**< Member, which contains the current state object of derived state class (POLYMORPHISM!) */\n\n// Comments on functions in CPP-File!\nvoid change_state (BaseState * state);\n\n// define friend classes in order to acess functions like change_state\nfriend class State_DRIVING;\nfriend class State_LISTENING;\nfriend class State_GO_TO;\n};\n\n#endif // STATE_MACHINE_H\n" }, { "alpha_fraction": 0.6188552379608154, "alphanum_fraction": 0.619528591632843, "avg_line_length": 24.60344886779785, "blob_id": "a2c1a51b8a5948e0d391927d3ce3bbf3c58b40fd", "content_id": "693964d2e44e469f6423a9d83ff6d37554e463ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1485, "license_type": "no_license", "max_line_length": 99, "num_lines": 58, "path": "/catkin_ws/src/labeled_slam/src/StateMachine/State_LISTENING.cpp", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#include \"State_LISTENING.h\"\n#include \"State_GO_TO.h\"\n#include \"State_DRIVING.h\"\n#include \"StateMachine.h\"\n\n\n/**\n * @brief Switch to driving mode\n **/\nvoid State_LISTENING::drive(StateMachine* m)\n{\n ROS_INFO(\"Switching to driving mode\");\n m->change_state( new State_DRIVING(m) );\n delete this;\n}\n\n/**\n * @brief Don't do anything on command listen\n **/\nvoid State_LISTENING::listen(StateMachine* m)\n{\n ROS_INFO(\"Already in listening mode!\");\n}\n\n/**\n * @brief Switch to go_to mode\n **/\nvoid State_LISTENING::go_to(StateMachine* m, string target)\n{\n ROS_INFO(\"Switching to go_to mode\");\n ROS_INFO(\"Target: %s\", target.c_str() );\n m->change_state( new State_GO_TO(m, target) );\n delete this;\n}\n\n/**\n * @brief Call label service\n **/\nvoid State_LISTENING::label(StateMachine* m, string label)\n{\n SRV_TYPE_SET_LABEL srv;\n srv.request.node_id = 0; //Means, that label will b set to last node_id\n srv.request.node_label = label;\n if (!ros::service::exists(\"set_label\", true ))//Info and return, if service does not exist\n {\n ROS_INFO(\"set_label service does not exist! Labeling unsuccessfull.\");\n return;\n }\n m->client_set_label_->call(srv);\n}\n\n/**\n * @brief Don't do anything when goal reached is received\n **/\nvoid State_LISTENING::goal_reached(StateMachine* m)\n{\n ROS_INFO(\"Invalid message: Goal should not be reached, when in labeling mode!\");\n}\n" }, { "alpha_fraction": 0.7482143044471741, "alphanum_fraction": 0.7482143044471741, "avg_line_length": 24.454545974731445, "blob_id": "ddc058eefd0e48934ac6246982ef2ead93531ea9", "content_id": "39a35d3a1413c8045ef6e442597868b0999cc494", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 560, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/catkin_ws/src/labeled_slam/include/labeled_slam/StateMachine/State_LISTENING.h", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#ifndef STATE_LISTENING_H\n#define STATE_LISTENING_H\n\n#include \"BaseState.h\"\nclass StateMachine;\n\n/** @class State_LISTENING\n * @brief State class for the mode, when the system is listening to voice commands.\n *\n * Robot is not moving in this mode!\n **/\nclass State_LISTENING : public BaseState\n{\npublic:\nvirtual void drive(StateMachine* m);\nvirtual void listen(StateMachine* m);\nvirtual void go_to(StateMachine* m, string target);\nvirtual void label(StateMachine* m, string label);\nvirtual void goal_reached(StateMachine* m);\n};\n\n#endif // STATE_LISTENING_H\n" }, { "alpha_fraction": 0.6983016729354858, "alphanum_fraction": 0.7057942152023315, "avg_line_length": 30.77777862548828, "blob_id": "2774f3ed0b592cb76a056a72ced9be750935f95a", "content_id": "7acd237929fa74032de037c6a2b9df1a78a951c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2002, "license_type": "no_license", "max_line_length": 168, "num_lines": 63, "path": "/catkin_ws/src/labeled_slam/src/activator1.cpp", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#include <sstream>\n#include <stdlib.h>\n#include \"ros/ros.h\"\n#include \"std_msgs/Bool.h\"\n#include \"std_srvs.h\"\n#include \"geometry_msgs/Twist.h\"\n\nusing namespace std;\n\n//Constants and general stuff\ngeometry_msgs::Twist velocity_to_publish;\nbool activation = false;\nvoid velocity_callback(const geometry_msgs::::ConstPtr& received_velocity);\nbool path_following_activate(std_srvs::SetBool& req, std_srvs::SetBool& response);\n\n\n\nint main(int argc, char** argv){\n\n ros::init(argc, argv, \"activator1\");\n ros::Rate loop_rate(1000);\n\n //initializing my node\n\n ros::NodeHandle node;\n ros::Publisher twist_pub = node.advertise<geometry_msgs::Twist>(\"ac1/cmd_vel\", 1000); //publisher for the veolocity forwarder/the robot\n ros::Subscriber twist_sub = node.subscribe(\"path/cmd_vel\", 1000, &velocity_callback); //subscriber for the velocity from the path planner\n ros::ServiceServer bool_serv = node.advertiseService(\"activate_path_following\", path_following_activate); //boolean check to see wether data needs to be sent or not.\n\n\n// while here dunno why\n while(ros::ok()){\n if(activation == true){\n Twist_pub.publish(velocity_to_publish);\n ROS_INFO(\"Path Follower is active.\\n\");\n } else{\n ROS_INFO(\"Path Follower is not active.\\n\");\n }\n\n ros::spin();\n }\n\n return 0;\n};\n\n\nbool path_following_activate(std_srvs::SetBool& req, std_srvs::SetBool& response){\n\n activation == req->data ; // iff activation->data = true\n response->success = true;\n response->message = \"\";\n return true;\n\n}\n\nvoid velocity_calback(const geometry_msgs::Twist::ConstPtr& received_velocity){\n velocity_to_publish.linear.x = received_velocity->linear.x;\n velocity_to_publish.linear.y = received_velocity->linear.y;\n velocity_to_publish.linear.z = received_velocity->linear.z;\n velocity_to_publish.angular.x = received_velocity->angular.x;\n velocity_to_publish.angular.y = received_velocity->angular.y;\n velocity_to_publish.angular.z = received_velocity->angular.z;\n}\n" }, { "alpha_fraction": 0.6365688443183899, "alphanum_fraction": 0.6416478753089905, "avg_line_length": 45.6315803527832, "blob_id": "04afec7d7d9b8cfe8c99d3f623a459769b7b1fb7", "content_id": "9f44bc0c21564b73a072803c705442502fa71814", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1772, "license_type": "no_license", "max_line_length": 132, "num_lines": 38, "path": "/catkin_ws/src/labeled_slam/src/logic_node.cpp", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#include \"StateMachine.h\"\n#include \"ros/ros.h\"\n#include \"labeled_slam/Command.h\"\n\n/**\n * @brief This node implements the main logic of the labeled_slam project\n *\n * The core object of this node is state_machine\n * The c++ \"state\"-pattern is used to imlement the state machine in an object-\n * oriented, safe and maintainable way\n * Possible states of the state machine are \"DRIVING\", \"LISTENING\" and \"GO_TO\"\n **/\nint main(int argc, char **argv)\n{\n ros::init(argc, argv, \"logic_node\");\n\n ros::NodeHandle n;\n\n //Define all service clients\n ros::ServiceClient client_set_goal = n.serviceClient<SRV_TYPE_SET_GOAL>(\"set_goal\");\n ros::ServiceClient client_set_label = n.serviceClient<SRV_TYPE_SET_LABEL>(\"set_label\");\n ros::ServiceClient client_activate_path_following = n.serviceClient<std_srvs::SetBool>(\"activate_path_following\");\n ros::ServiceClient client_activate_driving = n.serviceClient<std_srvs::SetBool>(\"activate_driving\");\n\n //Create state machine, pass all the service clients to state machine (services will be called from inside)\n StateMachine state_machine(&client_set_goal,\n &client_set_label,\n &client_activate_path_following,\n &client_activate_driving);\n\n //Create Subscribers, assign callback-functions to memberfunctions of state_machine\n ros::Subscriber sub_command = n.subscribe(\"text_command\", 1000, &StateMachine::callback_command, &state_machine);\n ros::Subscriber sub_goal_reached = n.subscribe(\"goal_reached\", 1000, &StateMachine::callback_goal_reached, &state_machine);\n\n ros::spin();\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5255765318870544, "alphanum_fraction": 0.5301886796951294, "avg_line_length": 33.565216064453125, "blob_id": "96725d5ba197e702d83ee7e617c312fdaabc758f", "content_id": "2df9de811e359513785310facf3a8836a912c693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9540, "license_type": "no_license", "max_line_length": 128, "num_lines": 276, "path": "/catkin_ws/src/labeled_slam/src/command_recognition.py", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# SPEECH RECOGNITION AND COMMAND NODE\n# Requires connection to the internet\n\n# Import system calls\nimport os\n\n# Python libraries\nimport time\nimport speech_recognition as sr # Speech Recognition library\n\n# ROS libraries\nimport roslib\nimport rospy\n\n# ROS messages\nfrom labeled_slam.msg import Command # Custome message containing two strings variables and a header\n\n# Mother class\nclass Main_Speech_Controller:\n\n # Initialize global variables for the object\n def __init__(self):\n\n # Topic where we will publish our costume message\n self.mytopic= rospy.Publisher('text_command', Command, queue_size=10)\n\n # Structure for the speech recognition library result\n self.speech= {\"Transcription\":None, \"Success\":True, \"Error\":None}\n\n # Feedback from user\n self.confirmation= 0\n\n # State of the speech recognition operations\n self.status= 0\n\n # Initialize a message type speechcommand\n self.speechcommand= Command()\n self.speechcommand.command= \"\" # Tried to used None insted of \"\" but mytopic.pub returned error\n self.speechcommand.argument= \"\"\n\n # Get the audio signal from the microphone as a string\n def recognize_speech(self, speech):\n\n # Declare a type of recognizer an microphone for the speech recognition library\n speechrecognizer= sr.Recognizer()\n microphone= sr.Microphone()\n\n\n with microphone as source:\n # Identifies the backgroundnoise to recognize better the speech\n speechrecognizer.adjust_for_ambient_noise(source)\n # Listen\n print(\"***** Listening...\")\n audio= speechrecognizer.listen(source)\n\n # Troubleshoot\n try:\n speech[\"Transcription\"]= speechrecognizer.recognize_google(audio)\n except sr.RequestError:\n speech[\"Success\"]= False\n speech[\"Error\"]= \"API unavailable\"\n except sr.UnknownValueError:\n speech[\"Error\"]= \"Unable to recognize the past speech\"\n\n\n return speech\n\n # From the audio signal, publish the desired command\n def recognize_command(self, speech, speechcommand, mytopic):\n\n if speech[\"Transcription\"]== \"drive\":\n\n # Update the message variables\n speechcommand.command= speech[\"Transcription\"]\n speechcommand.argument= \"\"\n\n # Publish the speechcommand message to /speechcommandtopic\n mytopic.publish(speechcommand)\n\n print(\"***** Driving mode\")\n return 1\n\n elif speech[\"Transcription\"]== \"go to\":\n\n # Update the first message variable\n speechcommand.command= speech[\"Transcription\"]\n\n # Listen to the second argument of the command\n print(\"***** Where should we go?\")\n speech=self.recognize_speech(self.speech)\n\n # Ask if user agrees with the recognized word\n confirmation=self.recognize_user(self.speech, self.confirmation)\n\n # If yes\n if confirmation==1:\n\n # ***** MISSING *****\n # Is it a valid location?\n # If yes\n\n # Update the second message variable\n speechcommand.argument= speech[\"Transcription\"]\n\n # Publish the speechcommand message to /speechcommandtopic\n mytopic.publish(speechcommand)\n\n print(\"***** Going autonomously to: \" + speech[\"Transcription\"])\n return 1\n\n # ***** MISSING *****\n # Otherwise, start all over again\n # else:\n # print(\"Sure, but first tell where is it at\")\n # return 0\n\n # Otherwise, start all over again\n else:\n print(\"***** Oops! Sorry about that\")\n time.sleep(3)\n return 0\n\n elif speech[\"Transcription\"]==\"label\":\n\n # Update the first message variable\n speechcommand.command= speech[\"Transcription\"]\n\n # Listen to the second argument of the command\n print(\"***** How should we call it? Be creative\")\n speech=self.recognize_speech(self.speech)\n\n # Ask if the user agrees with the recognized word\n confirmation=self.recognize_user(self.speech, self.confirmation)\n\n # If yes\n if confirmation==1:\n\n # Update the second message variable\n speechcommand.argument= speech[\"Transcription\"]\n\n # Publish the speechcommand message to /speechcommandtopic\n mytopic.publish(speechcommand)\n\n print(\"***** \" + speech[\"Transcription\"] + \"saved successfully\")\n return 1\n\n # Otherwise, start all over again\n else:\n print(\"***** Oops! Sorry about that\")\n time.sleep(3)\n return 0\n\n # If command not valid\n else:\n\n # Update message variables\n speechcommand.command= \"\"\n speechcommand.argument= \"\"\n\n # Start all over again\n print(\"***** I'm afraid I cannot do that\")\n return 0\n\n # Ask user for feedback of the recognized speech\n def recognize_user(self, speech, confirmation):\n\n # If the speech is free of errors\n if speech[\"Success\"]==1:\n\n # Ask if the user agrees with the recognized speech\n print(\"***** Did you say: \" + speech[\"Transcription\"])\n print(\"----------\")\n\n # Wait for keyboard input, 1=YES 0=NO\n confirmation=int(input(\"Type 1 if yes or 0 otherwise: \"))\n return confirmation\n\n # If the recognized speech has an error\n else:\n\n # Start all over again\n print(\"***** An error ocurred\", speech[\"Error\"])\n confirmation= 0\n return confirmation\n\n # The QuarterBack\n def start_recognition(self):\n\n while not rospy.is_shutdown():\n\n # Clear command window\n os.system('clear')\n\n # Ask if the user wants to perform a new voice command\n print(\"****************************************\")\n print(\"***** Do you want to perform a voice command now? Only 'drive', 'go to' and 'label' are valid (for now)\")\n print(\"----------\")\n\n # Wait for keyboard input, 1=YES 0=NO\n aux=int(input(\"Type 1 if yes or 0 otherwise: \"))\n\n # If yes\n if aux==1:\n\n # Publish a message saying that we are Listening\n self.speechcommand.command= \"listen\"\n self.speechcommand.argument= \"\"\n self.mytopic.publish(self.speechcommand)\n\n # Recognize speech\n self.speech= self.recognize_speech(self.speech)\n\n # Ask for feedback\n self.confirmation= self.recognize_user(self.speech, self.confirmation)\n\n # If it is good\n if self.confirmation==1:\n\n # Get the right command and publish it\n self.status= self.recognize_command(self.speech, self.speechcommand, self.mytopic)\n\n # Show status of the operation\n if self.status==1:\n print(\"***** command sent\")\n else:\n print(\"***** command NOT sent\")\n time.sleep(3)\n\n # Otherwise, start all over again\n else:\n print(\"***** Oops! sorry about that\")\n time.sleep(3)\n\n # Otherwise\n else:\n\n # Sleep for 10 seconds\n print(\"***** It's okay, I'm going to sleep 10 seconds. If you want to exit, press CTRL+C\")\n print(\"****************************************\")\n time.sleep(10) # Doing this mostly for having an exit door for the program / Might change it later\n\n# Main\ndef main():\n\n # Initialize and cleanup the ROS node\n rospy.init_node('Main_Speech_Controller', anonymous=True)\n\n # Declare a new instance of the class and run its starting function\n MSC= Main_Speech_Controller()\n MSC.start_recognition()\n\n # Listen to CTRL+C interruptions when is not waiting for inputs\n try:\n rospy.spin()\n except rospy.ROSInterruptException:\n print(\"Shutting down the Main Speech Controller node\")\n pass\n\nif __name__ == '__main__':\n main()\n\n\n# ----------\n# TO-DO list:\n# 1. How to check if the label of Go mode is correct?\n# 2. Fill with error exceptions\n\n# 3. Branch and Merge in git repository\n# 4. Test and debug\n# 5. Use and installation manual of the package (also readme)\n\n# 6. Might do a GUI if we have time\n# 7. Improve comments and documentation\n# 8. Learn more about the Speech Recognition library\n" }, { "alpha_fraction": 0.7394958138465881, "alphanum_fraction": 0.7394958138465881, "avg_line_length": 25.44444465637207, "blob_id": "2c8ae7a85edd18bfe85a5a7c1c590414893a4671", "content_id": "f9de2f88772b28dabf939b304108f8b59bb72ffd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 714, "license_type": "no_license", "max_line_length": 90, "num_lines": 27, "path": "/catkin_ws/src/labeled_slam/include/labeled_slam/StateMachine/State_GO_TO.h", "repo_name": "robertokcanale/SofAR_Project", "src_encoding": "UTF-8", "text": "#ifndef STATE_GO_TO_H\n#define STATE_GO_TO_H\n\n#include \"BaseState.h\"\nclass StateMachine;\n\n/** @class State_GO_TO\n * @brief State class for the mode, when the system is following a path towards a target.\n *\n * Target can be one of the labels which have been created before\n **/\nclass State_GO_TO : public BaseState\n{\npublic:\nState_GO_TO(StateMachine* m, string target);\n~State_GO_TO();\nvirtual void drive(StateMachine* m);\nvirtual void listen(StateMachine* m);\nvirtual void go_to(StateMachine* m, string target);\nvirtual void label(StateMachine* m, string label);\nvirtual void goal_reached(StateMachine* m);\nprivate:\nstring target_;\nros::ServiceClient* client_activate_path_following_;\n};\n\n#endif // STATE_GO_TO_H\n" } ]
14