commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
fd9a553868ce46ceef2b23e79347dd262b63ebae
fix build instructions on Linux
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "bindings", "include_dirs": [ "<(raptor_prefix)/include/raptor2" ], "sources": [ "src/bindings.cc", "src/parser.cc", "src/parser_wrapper.cc", "src/serializer.cc", "src/serializer_wrapper.cc", "src/statement.cc", "src/statement_wrapper.cc", "src/uri.cc", "src/world.cc", "src/message.cc" ], "link_settings": { "libraries": [ "-lraptor2" ] }, "conditions": [ [ "OS=='mac'", { "variables": { "raptor_prefix": "/usr/local" }, "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "OTHER_CPLUSPLUSFLAGS": [ "-std=c++11", "-stdlib=libc++", "-mmacosx-version-min=10.7" ] } } ], [ "OS!='win'", { "variables": { "raptor_prefix": "/usr" }, "cflags_cc": [ "-std=c++11", "-fexceptions" ] } ] ] } ] }
{ "targets": [ { "target_name": "bindings", "variables": { "raptor_prefix": "/usr/local" }, "include_dirs": [ "<(raptor_prefix)/include/raptor2" ], "sources": [ "src/bindings.cc", "src/parser.cc", "src/parser_wrapper.cc", "src/serializer.cc", "src/serializer_wrapper.cc", "src/statement.cc", "src/statement_wrapper.cc", "src/uri.cc", "src/world.cc", "src/message.cc" ], "cflags!": [ "-fno-exceptions" ], "cflags_cc!": [ "-std=c++11", "-fno-exceptions" ], "link_settings": { "libraries": [ "-lraptor2" ] }, "conditions": [ [ "OS=='mac'", { "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "OTHER_CPLUSPLUSFLAGS": [ "-std=c++11", "-stdlib=libc++", "-mmacosx-version-min=10.7" ] } } ] ] } ] }
Python
0.000001
2f924fc35d0724e7638e741fd466228649077e10
Update action_after_build destination
binding.gyp
binding.gyp
{ 'includes': [ 'deps/common-libzip.gypi' ], 'variables': { 'shared_libzip%':'false', 'shared_libzip_includes%':'/usr/lib', 'shared_libzip_libpath%':'/usr/include' }, 'targets': [ { 'target_name': 'node_zipfile', 'conditions': [ ['shared_libzip == "false"', { 'dependencies': [ 'deps/libzip.gyp:libzip' ] }, { 'libraries': [ '-L<@(shared_libzip_libpath)', '-lzip' ], 'include_dirs': [ '<@(shared_libzip_includes)', '<@(shared_libzip_libpath)/libzip/include', ] } ] ], 'sources': [ 'src/node_zipfile.cpp' ], }, { 'target_name': 'action_after_build', 'type': 'none', 'dependencies': [ 'node_zipfile' ], 'copies': [ { 'files': [ '<(PRODUCT_DIR)/node_zipfile.node' ], 'destination': './lib/binding/' } ], 'conditions': [ ['OS=="win"', { 'copies': [ { 'files': [ '<(PRODUCT_DIR)/libzip.dll' ], 'destination': 'lib/' } ] }] ] } ] }
{ 'includes': [ 'deps/common-libzip.gypi' ], 'variables': { 'shared_libzip%':'false', 'shared_libzip_includes%':'/usr/lib', 'shared_libzip_libpath%':'/usr/include' }, 'targets': [ { 'target_name': 'node_zipfile', 'conditions': [ ['shared_libzip == "false"', { 'dependencies': [ 'deps/libzip.gyp:libzip' ] }, { 'libraries': [ '-L<@(shared_libzip_libpath)', '-lzip' ], 'include_dirs': [ '<@(shared_libzip_includes)', '<@(shared_libzip_libpath)/libzip/include', ] } ] ], 'sources': [ 'src/node_zipfile.cpp' ], }, { 'target_name': 'action_after_build', 'type': 'none', 'dependencies': [ 'node_zipfile' ], 'copies': [ { 'files': [ '<(PRODUCT_DIR)/node_zipfile.node' ], 'destination': './lib/' } ], 'conditions': [ ['OS=="win"', { 'copies': [ { 'files': [ '<(PRODUCT_DIR)/libzip.dll' ], 'destination': 'lib/' } ] }] ] } ] }
Python
0.000002
2f23ce76bfc32022cea41d675d762dfbbde3fed7
Fix a typo
home.py
home.py
#!/usr/bin/env python import time import sys import json import types import thread from messenger import Messenger import config import led import dht import stepper_motor def turn_on_living_light(freq, dc): print('turn_on_living_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_LIVING, freq, dc) def turn_off_living_light(): print('turn_off_living_light') led.turn_off(config.LED_LIVING) def turn_on_bedroom_light(freq, dc): print('turn_on_bedroom_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_BEDROOM, freq, dc) def turn_off_bedroom_light(): print('turn_off_bedroom_light') led.turn_off(config.LED_BEDROOM) def turn_on_porch_light(freq, dc): print('turn_on_porch_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_PORCH, freq, dc) def turn_off_porch_light(): print('turn_off_porch_light') led.turn_off(config.LED_PORCH) def open_front_door(): print('open_front_door') stepper_motor.forward(90) def close_front_door(): print('close_front_door') stepper_motor.backward(90) def message_callback(msg): print('message_callback:') print(msg) if not isinstance(msg, dict): return if msg['topic'] != config.ALIAS: return print('get a message!') try: m = json.loads(msg['msg']) except Exception as e: print('json.loads exception:') print(e) return print('act: %s' % m['act']) if m['act'] == 'turn_on_living_light': turn_on_living_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_living_light': turn_off_living_light() elif m['act'] == 'turn_on_bedroom_light': turn_on_bedroom_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_bedroom_light': turn_off_bedroom_light() elif m['act'] == 'turn_on_porch_light': turn_on_porch_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_porch_light': turn_off_porch_light() elif m['act'] == 'open_front_door': open_front_door() elif m['act'] == 'close_front_door': close_front_door() def report_ht(messenger): ht = dht.get_ht() m = {} m['act'] = 'report_ht' m['h'] = ht[0] m['t'] = ht[1] msg = json.dumps(m) messenger.publish(msg, 1) def main(): messenger = Messenger(message_callback) while True: report_ht(messenger) time.sleep(2) if __name__ == '__main__': main()
#!/usr/bin/env python import time import sys import json import types import thread from messenger import Messenger import config import led import dht import stepper_motor def turn_on_living_light(freq, dc): print('turn_on_living_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_LIVING, freq, dc) def turn_off_living_light(): print('turn_off_living_light') led.turn_off(config.LED_LIVING) def turn_on_bedroom_light(freq, dc): print('turn_on_bedroom_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_BEDROOM, freq, dc) def turn_off_bedroom_light(): print('turn_off_bedroom_light') led.turn_off(config.LED_BEDROOM) def turn_on_porch_light(freq, dc): print('turn_on_porch_light: %d, %d' % (freq, dc)) led.turn_on(config.LED_PORCH, freq, dc) def turn_off_porch_light(): print('turn_off_porch_light') led.turn_off(config.LED_PORCH) def open_front_door(): print('open_front_door') stepper_motor.forward(90) def close_front_door(): print('close_front_door') stepper_motor.backward(90) def message_callback(msg): print('message_callback:') print(msg) if not isinstance(msg, dict): return if msg['topic'] != config.ALIAS: return print('get a message!') try: m = json.loads(msg['msg']) except Exception as e: print('json.loads exception:') print(e) return print('act: %s' % m['act']) if m['act'] == 'turn_on_living_light': turn_on_living_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_living_light': turn_off_living_light() elif m['act'] == 'turn_on_bedroom_light': turn_on_bedroom_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_bedroom_light': turn_off_bedroom_light() elif m['act'] == 'turn_on_porch_light': turn_on_porch_light(m['freq'], m['dc']) elif m['act'] == 'turn_off_porch_light': turn_off_porch_light() elif m['act'] == 'open_front_door': open_front_door() elif m['act'] == 'close_front_door': close_front_door() def report_ht(messenger): ht = dht.get_ht() m = {} m['act'] = 'report_ht' m['h'] = ht[0] m['t'] = ht[1] msg = json.dumps(m) messenger.publish(msg, 1) def main(): messenger = Messenger(message_callback) while True: report_ht() time.sleep(2) if __name__ == '__main__': main()
Python
1
c2f563215fcc62d6e595446f5acbd1969484ddb7
move end timer command to the correct location
clean_db.py
clean_db.py
import MySQLdb, config, urllib, cgi, datetime, time sql = MySQLdb.connect(host="localhost", user=config.username, passwd=config.passwd, db=config.db) sql.query("SELECT `id` FROM `feedurls`") db_feed_query=sql.store_result() rss_urls=db_feed_query.fetch_row(0) table_name = "stories" date_from = datetime.datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") date_to = datetime.datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") for rss_url_data in rss_urls: feed_id=rss_url_data[0] i = date_from while i <= date_to: t0=time.clock() whereclause="`date_added` = '" + i.strftime("%Y-%m-%d") + "'" whereclause+=" AND `feedid`= "+ str(feed_id) +"" query="DELETE FROM stories WHERE " + whereclause query+=" AND `url` NOT IN (SELECT * FROM (SELECT `url` FROM stories WHERE "+whereclause query+=" ORDER BY `points` DESC LIMIT 0,20) AS TAB);" sql.query(query) sql.commit() print(i.strftime("%d/%m/%Y")+","+str(time.clock()-t0)) i += datetime.timedelta(days=1)
import MySQLdb, config, urllib, cgi, datetime, time sql = MySQLdb.connect(host="localhost", user=config.username, passwd=config.passwd, db=config.db) sql.query("SELECT `id` FROM `feedurls`") db_feed_query=sql.store_result() rss_urls=db_feed_query.fetch_row(0) table_name = "stories" date_from = datetime.datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") date_to = datetime.datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") for rss_url_data in rss_urls: feed_id=rss_url_data[0] i = date_from while i <= date_to: t0=time.clock() whereclause="`date_added` = '" + i.strftime("%Y-%m-%d") + "'" whereclause+=" AND `feedid`= "+ str(feed_id) +"" query="DELETE FROM stories WHERE " + whereclause query+=" AND `url` NOT IN (SELECT * FROM (SELECT `url` FROM stories WHERE "+whereclause query+=" ORDER BY `points` DESC LIMIT 0,20) AS TAB);" print(i.strftime("%d/%m/%Y")+","+str(time.clock()-t0)) sql.query(query) sql.commit() i += datetime.timedelta(days=1)
Python
0.000001
710dba5196fbd419c23de74e9177185e212736a1
Update according to changes in config
tmt/util.py
tmt/util.py
import yaml import json import os import re '''Utility functions for filename and path routines.''' def regex_from_format_string(format_string): ''' Convert a format string of the sort "{name}_bla/something_{number}" to a named regular expression a la "P<name>.*_bla/something_P<number>\d+". Parameters ---------- format_string: str Python format string Returns ------- str named regular expression pattern ''' # Extract the names of all placeholders from the format string placeholders_inner_parts = re.findall(r'{(.+?)}', format_string) # Remove format strings placeholder_names = [pl.split(':')[0] for pl in placeholders_inner_parts] placeholder_regexes = [re.escape('{%s}') % pl for pl in placeholders_inner_parts] regex = format_string for pl_name, pl_regex in zip(placeholder_names, placeholder_regexes): if re.search(r'number', pl_name): regex = re.sub(pl_regex, '(?P<%s>\d+)' % pl_name, regex) else: regex = re.sub(pl_regex, '(?P<%s>.*)' % pl_name, regex) return regex def load_config(filename): ''' Load configuration settings from YAML file. Parameters ---------- filename: str name of the config file Returns ------- dict YAML content Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Configuration file does not exist: %s' % filename) with open(filename) as f: return yaml.load(f.read()) def check_config(cfg): ''' Check that configuration settings contains all required keys. Parameters ---------- cfg: dict configuration settings Raises ------ KeyError when a required key is missing ''' required_keys = { 'COORDINATES_FROM_FILENAME', 'COORDINATES_IN_FILENAME_ONE_BASED', 'SUBEXPERIMENT_FOLDER_FORMAT', 'SUBEXPERIMENT_FILE_FORMAT', 'CYCLE_FROM_FILENAME', 'EXPERIMENT_FROM_FILENAME', 'IMAGE_FOLDER_LOCATION', 'SUBEXPERIMENTS_EXIST', 'SEGMENTATION_FOLDER_LOCATION', 'OBJECTS_FROM_FILENAME', 'SHIFT_FOLDER_LOCATION', 'SHIFT_FILE_FORMAT', 'STATS_FOLDER_LOCATION', 'STATS_FILE_FORMAT', 'CHANNEL_FROM_FILENAME' } for key in required_keys: if key not in cfg: raise KeyError('Configuration file must contain the key "%s"' % key) def write_joblist(filename, joblist): ''' Write joblist to YAML file. Parameters ---------- filename: str name of the YAML file joblist: List[dict] job descriptions Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Joblist file does not exist: %s' % filename) with open(filename, 'w') as joblist_file: joblist_file.write(yaml.dump(joblist, default_flow_style=False)) def read_joblist(filename): ''' Read joblist to YAML file. Parameters ---------- filename: str name of the YAML file Returns ------- List[dict] job descriptions Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Joblist file does not exist: %s' % filename) with open(filename, 'r') as joblist_file: return yaml.load(joblist_file.read()) class Namespacified(object): ''' Class for loading key-value pairs of a dictionary into a Namespace object. ''' def __init__(self, adict): self.__dict__.update(adict)
import yaml import json import os import re '''Utility functions for filename and path routines.''' def regex_from_format_string(format_string): ''' Convert a format string of the sort "{name}_bla/something_{number}" to a named regular expression a la "P<name>.*_bla/something_P<number>\d+". Parameters ---------- format_string: str Python format string Returns ------- str named regular expression pattern ''' # Extract the names of all placeholders from the format string placeholders_inner_parts = re.findall(r'{(.+?)}', format_string) # Remove format strings placeholder_names = [pl.split(':')[0] for pl in placeholders_inner_parts] placeholder_regexes = [re.escape('{%s}') % pl for pl in placeholders_inner_parts] regex = format_string for pl_name, pl_regex in zip(placeholder_names, placeholder_regexes): if re.search(r'number', pl_name): regex = re.sub(pl_regex, '(?P<%s>\d+)' % pl_name, regex) else: regex = re.sub(pl_regex, '(?P<%s>.*)' % pl_name, regex) return regex def load_config(filename): ''' Load configuration settings from YAML file. Parameters ---------- filename: str name of the config file Returns ------- dict YAML content Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Configuration file does not exist: %s' % filename) with open(filename) as f: return yaml.load(f.read()) def load_shift_descriptor(filename): ''' Load shift description from JSON file. Parameters ---------- filename: str name of the shift descriptor file Returns ------- dict JSON content Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Shift descriptor file does not exist: %s' % filename) with open(filename) as f: return json.load(f) def check_config(cfg): ''' Check that configuration settings contains all required keys. Parameters ---------- cfg: dict configuration settings Raises ------ KeyError when a required key is missing ''' required_keys = { 'COORDINATES_FROM_FILENAME', 'COORDINATES_IN_FILENAME_ONE_BASED', 'SUBEXPERIMENT_FOLDER_FORMAT', 'SUBEXPERIMENT_FILE_FORMAT', 'CYCLE_FROM_FILENAME', 'EXPERIMENT_FROM_FILENAME', 'IMAGE_FOLDER_LOCATION', 'SUBEXPERIMENTS_EXIST', 'SEGMENTATION_FOLDER_LOCATION', 'OBJECTS_FROM_FILENAME', 'SHIFT_FOLDER_LOCATION', 'SHIFT_FILE_FORMAT', 'STATS_FOLDER_LOCATION', 'STATS_FILE_FORMAT', 'CHANNEL_FROM_FILENAME', 'MEASUREMENT_FOLDER_LOCATION' } for key in required_keys: if key not in cfg: raise KeyError('Configuration file must contain the key "%s"' % key) def write_joblist(filename, joblist): ''' Write joblist to YAML file. Parameters ---------- filename: str name of the YAML file joblist: List[dict] job descriptions Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Joblist file does not exist: %s' % filename) with open(filename, 'w') as joblist_file: joblist_file.write(yaml.dump(joblist, default_flow_style=False)) def read_joblist(filename): ''' Read joblist to YAML file. Parameters ---------- filename: str name of the YAML file Returns ------- List[dict] job descriptions Raises ------ OSError when `filename` does not exist ''' if not os.path.exists(filename): raise OSError('Joblist file does not exist: %s' % filename) with open(filename, 'r') as joblist_file: return yaml.load(joblist_file.read()) class Namespacified(object): ''' Class for loading key-value pairs of a dictionary into a Namespace object. ''' def __init__(self, adict): self.__dict__.update(adict)
Python
0.000001
dcdd4040f45546472ff012dd4830e51804a1b9e5
Disable merchant debugging per default (to prevent logging and save disk space)
merchant_sdk/MerchantServer.py
merchant_sdk/MerchantServer.py
import json from typing import Type from flask import Flask, request, Response from flask_cors import CORS from .MerchantBaseLogic import MerchantBaseLogic from .models import SoldOffer def json_response(obj): js = json.dumps(obj) resp = Response(js, status=200, mimetype='application/json') return resp class MerchantServer: def __init__(self, merchant_logic: Type[MerchantBaseLogic]): self.merchant_logic = merchant_logic self.server_settings = { 'debug': False } self.app = Flask(__name__) CORS(self.app) self.register_routes() def log(self, *msg): if self.server_settings['debug']: print(*msg) ''' Helper methods ''' def get_all_settings(self): tmp_settings = { 'state': self.merchant_logic.get_state() } tmp_settings.update(self.merchant_logic.get_settings()) tmp_settings.update(self.server_settings) return tmp_settings def update_all_settings(self, new_settings): new_server_settings = {k: new_settings[k] for k in new_settings if k in self.server_settings} self.server_settings.update(new_server_settings) new_logic_settings = {k: new_settings[k] for k in new_settings if k in self.merchant_logic.get_settings()} self.merchant_logic.update_settings(new_logic_settings) self.log('update settings', self.get_all_settings()) ''' Routes ''' def register_routes(self): self.app.add_url_rule('/settings', 'get_settings', self.get_settings, methods=['GET']) self.app.add_url_rule('/settings', 'put_settings', self.put_settings, methods=['PUT', 'POST']) self.app.add_url_rule('/settings/execution', 'set_state', self.set_state, methods=['POST']) self.app.add_url_rule('/sold', 'item_sold', self.item_sold, methods=['POST']) ''' Endpoint definitions ''' def get_settings(self): return json_response(self.get_all_settings()) def put_settings(self): new_settings = request.json self.update_all_settings(new_settings) return json_response(self.get_all_settings()) def set_state(self): next_state = request.json['nextState'] self.log('Execution setting - next state:', next_state) ''' Execution settings can contain setting change i.e. on 'init', merchant_url and marketplace_url is given EDIT: maybe remove this settings update, since 'init' is not supported anymore ''' endpoint_setting_keys = ['merchant_url', 'marketplace_url'] endpoint_settings = {k: request.json[k] for k in request.json if k in endpoint_setting_keys} self.update_all_settings(endpoint_settings) if next_state == 'start': self.merchant_logic.start() elif next_state == 'stop': self.merchant_logic.stop() return json_response({}) def item_sold(self): try: sent_json = request.get_json(force=True) offer = SoldOffer.from_dict(sent_json) self.merchant_logic.sold_offer(offer) except Exception as e: self.log(e) return json_response({})
import json from typing import Type from flask import Flask, request, Response from flask_cors import CORS from .MerchantBaseLogic import MerchantBaseLogic from .models import SoldOffer def json_response(obj): js = json.dumps(obj) resp = Response(js, status=200, mimetype='application/json') return resp class MerchantServer: def __init__(self, merchant_logic: Type[MerchantBaseLogic]): self.merchant_logic = merchant_logic self.server_settings = { 'debug': True } self.app = Flask(__name__) CORS(self.app) self.register_routes() def log(self, *msg): if self.server_settings['debug']: print(*msg) ''' Helper methods ''' def get_all_settings(self): tmp_settings = { 'state': self.merchant_logic.get_state() } tmp_settings.update(self.merchant_logic.get_settings()) tmp_settings.update(self.server_settings) return tmp_settings def update_all_settings(self, new_settings): new_server_settings = {k: new_settings[k] for k in new_settings if k in self.server_settings} self.server_settings.update(new_server_settings) new_logic_settings = {k: new_settings[k] for k in new_settings if k in self.merchant_logic.get_settings()} self.merchant_logic.update_settings(new_logic_settings) self.log('update settings', self.get_all_settings()) ''' Routes ''' def register_routes(self): self.app.add_url_rule('/settings', 'get_settings', self.get_settings, methods=['GET']) self.app.add_url_rule('/settings', 'put_settings', self.put_settings, methods=['PUT', 'POST']) self.app.add_url_rule('/settings/execution', 'set_state', self.set_state, methods=['POST']) self.app.add_url_rule('/sold', 'item_sold', self.item_sold, methods=['POST']) ''' Endpoint definitions ''' def get_settings(self): return json_response(self.get_all_settings()) def put_settings(self): new_settings = request.json self.update_all_settings(new_settings) return json_response(self.get_all_settings()) def set_state(self): next_state = request.json['nextState'] self.log('Execution setting - next state:', next_state) ''' Execution settings can contain setting change i.e. on 'init', merchant_url and marketplace_url is given EDIT: maybe remove this settings update, since 'init' is not supported anymore ''' endpoint_setting_keys = ['merchant_url', 'marketplace_url'] endpoint_settings = {k: request.json[k] for k in request.json if k in endpoint_setting_keys} self.update_all_settings(endpoint_settings) if next_state == 'start': self.merchant_logic.start() elif next_state == 'stop': self.merchant_logic.stop() return json_response({}) def item_sold(self): try: sent_json = request.get_json(force=True) offer = SoldOffer.from_dict(sent_json) self.merchant_logic.sold_offer(offer) except Exception as e: self.log(e) return json_response({})
Python
0
f5421cb76103bf6da4b6dce74f2ae372c892067a
Add write_revision method to Metadata
onitu/api/metadata.py
onitu/api/metadata.py
class Metadata(object): """The Metadata class represent the metadata of any file in Onitu. This class should be instantiated via the :func:`Metadata.get_by_id` or :func:`Metadata.get_by_filename` class methods. The PROPERTIES class property represent each property found in the metadata common to all drivers. This is a dict where the key is the name of the property and the item is a tuple containing two functions, one which should be applied the metadata are extracted from the database, the other one they are written. """ PROPERTIES = { 'filename': (str, str), 'size': (int, str), 'owners': (lambda e: e.split(':'), lambda l: ':'.join(l)), 'uptodate': (lambda e: e.split(':'), lambda l: ':'.join(l)), } def __init__(self, plug=None, filename=None, size=0): super(Metadata, self).__init__() self.filename = filename self.size = size self.plug = plug self._revision = None self._fid = None @classmethod def get_by_filename(cls, plug, filename): """Instantiate a new :class:`Metadata` object for the file with the given name. """ fid = plug.redis.hget('files', filename) if fid: return cls.get_by_id(plug, fid) else: return None @classmethod def get_by_id(cls, plug, fid): """Instantiate a new :class:`Metadata` object for the file with the given id. """ values = plug.redis.hgetall('files:{}'.format(fid)) metadata = cls() metadata.plug = plug metadata._fid = fid for name, (deserialize, _) in cls.PROPERTIES.items(): metadata.__setattr__(name, deserialize(values.get(name))) return metadata @property def revision(self): """Return the current revision of the file for this entry. If the value has been setted manualy but not saved, returns it. Otherwise, seeks the value in the database. """ if self._revision: return self._revision elif self._fid: return self.plug.redis.hget( 'drivers:{}:files'.format(self.plug.name), self._fid ) @revision.setter def revision(self, value): """Set the current revision of the file for this entry. The value is only saved when either :func:`Metadata.write_revision` or :func:`Metadata.write` is called. """ self._revision = value def write_revision(self): if not self._revision: return self.plug.redis.hset( 'drivers:{}:files'.format(self.plug.name), self._fid, self._revision ) self._revision = None def write(self): """Write the metadata for the current object in the database. """ metadata = {} for name, (_, serialize) in self.PROPERTIES.items(): try: metadata[name] = serialize(self.__getattribute__(name)) except AttributeError: self.plug.error("Error writing metadata for {}, missing" "attribute {}".format(self._fid, name)) return self.plug.redis.hmset('files:{}'.format(self._fid), metadata) self.write_revision()
class Metadata(object): """The Metadata class represent the metadata of any file in Onitu. This class should be instantiated via the :func:`Metadata.get_by_id` or :func:`Metadata.get_by_filename` class methods. The PROPERTIES class property represent each property found in the metadata common to all drivers. This is a dict where the key is the name of the property and the item is a tuple containing two functions, one which should be applied the metadata are extracted from the database, the other one they are written. """ PROPERTIES = { 'filename': (str, str), 'size': (int, str), 'owners': (lambda e: e.split(':'), lambda l: ':'.join(l)), 'uptodate': (lambda e: e.split(':'), lambda l: ':'.join(l)), } def __init__(self, plug=None, filename=None, size=0): super(Metadata, self).__init__() self.filename = filename self.size = size self.plug = plug self._revision = None self._fid = None @classmethod def get_by_filename(cls, plug, filename): """Instantiate a new :class:`Metadata` object for the file with the given name. """ fid = plug.redis.hget('files', filename) if fid: return cls.get_by_id(plug, fid) else: return None @classmethod def get_by_id(cls, plug, fid): """Instantiate a new :class:`Metadata` object for the file with the given id. """ values = plug.redis.hgetall('files:{}'.format(fid)) metadata = cls() metadata.plug = plug metadata._fid = fid for name, (deserialize, _) in cls.PROPERTIES.items(): metadata.__setattr__(name, deserialize(values.get(name))) return metadata @property def revision(self): """Return the current revision of the file for this entry. If the value has been setted manualy but not saved, returns it. Otherwise, seeks the value in the database. """ if self._revision: return self._revision elif self._fid: return self.plug.redis.hget( 'drivers:{}:files'.format(self.plug.name), self._fid ) @revision.setter def revision(self, value): """Set the current revision of the file for this entry. The value will only be save when :func:`Meta.write` will be called. """ self._revision = value def write(self): """Write the metadata for the current object the database. """ metadata = {} for name, (_, serialize) in self.PROPERTIES.items(): try: metadata[name] = serialize(self.__getattribute__(name)) except AttributeError: self.plug.error("Error writing metadata for {}, " "missing attribute {}".format(self._fid, name)) return self.plug.redis.hmset('files:{}'.format(self._fid), metadata) if self._revision: self.plug.redis.hset( 'drivers:{}:files'.format(self.plug.name), self._fid, self._revision ) self._revision = None
Python
0.000001
3ee4d2f80f58cb0068eaeb3b7f5c4407ce8e60d0
add text information about progress of downloading
vk-photos-downloader.py
vk-photos-downloader.py
#!/usr/bin/python3.5 #-*- coding: UTF-8 -*- import vk, os, time from urllib.request import urlretrieve token = input("Enter a token: ") # vk token #Authorization session = vk.Session(access_token=str(token)) vk_api = vk.API(session) count = 0 # count of down. photos perc = 0 # percent of down. photos breaked = 0 # unsuccessful down. time_now = time.time() # current time url = input("Enter a URL of album: ") # url of album folder_name = input("Enter a name of folder for download photos: ") # fold. for photo print("-------------------------------------------") owner_id = url.split('album')[1].split('_')[0] # id of owner album_id = url.split('album')[1].split('_')[1][0:-1] # id of album photos_count = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['size'] # count of ph. in albums album_title = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['title'] # albums title photos_information = vk_api.photos.get(owner_id=owner_id, album_id=album_id) # dictionaries of photos information print("A title of album - {}".format(album_title)) print("Photos in album - {}".format(photos_count)) print("------------------") if not os.path.exists(folder_name): os.makedirs(folder_name + '/' + album_title) # creating a folder for download photos print("Created a folder for photo.") print("---------------------------") else: print("A folder with this name already exists!") exit() photos_link = [] # photos link for i in photos_information: photos_link.append(i['src_xxbig']) photo_name = 0 # photo name for i in photos_link: photo_name += 1 try: urlretrieve(i, folder_name + '/' + album_title + '/' + str(photo_name) + '.jpg') # download photos count += 1 perc = (100 * count) / photos_count print("Download {} of {} photos. ({}%)".format(count, photos_count, round(perc, 2))) except: print("An error occurred, file skipped.") breaked += 1 minutes = int((time.time() - time_now) // 60) seconds = int((time.time() - time_now) % 60) print("------------------------") print("Successful download {} photos.".format(count)) print("Skipped {} photos.".format(breaked)) print("Time spent: {}.{} minutes.".format(minutes, seconds))
#!/usr/bin/python3.5 #-*- coding: UTF-8 -*- import vk, os, time from urllib.request import urlretrieve token = input("Enter a token: ") #Authorization session = vk.Session(access_token=str(token)) vk_api = vk.API(session) count = 0 # count of down. photos perc = 0 # percent of down. photos breaked = 0 # unsuccessful down. time_now = time.time() # current time url = input("Enter a URL of album: ") # url of album folder_name = input("Enter a name of folder for download photos: ") # fold. for photo owner_id = url.split('album')[1].split('_')[0] # id of owner album_id = url.split('album')[1].split('_')[1][0:-1] # id of album photos_count = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['size'] # count of ph. in albums album_title = vk_api.photos.getAlbums(owner_id=owner_id, album_ids=album_id)[0]['title'] # albums title photos_information = vk_api.photos.get(owner_id=owner_id, album_id=album_id) # dictionaries of photos information photos_link = [] # photos link for i in photos_information: photos_link.append(i['src_xxbig']) if not os.path.exists(folder_name): os.makedirs(folder_name + '/' + album_title) # creating a folder for download photos qw = 'ok' else: print("A folder with this name already exists!") exit() photo_name = 0 # photo name for i in photos_link: photo_name += 1 urlretrieve(i, folder_name + '/' + album_title + '/' + str(photo_name) + '.jpg') # download photos
Python
0
b674bc4c369139926710311f1a3fc6ad39da9f0a
optimize code
app/xsbk.py
app/xsbk.py
# 抓取嗅事百科的段子 from cobweb.downloader import * from cobweb.parser import * import time import re def parse_joke(self): data = self.soup.find_all('div', class_='article block untagged mb15') content_pattern = re.compile("<div.*?content\">(.*?)<!--(.*?)-->.*?</div>", re.S) self.content = [] for d in data: soup_d = BeautifulSoup(str(d), 'html.parser', from_encoding='utf8') # 用户名 name = soup_d.h2.string # 内容(内容+时间&图片) c = soup_d.find('div', class_='content') # content = str(c.contents[0]).strip('\n') # timestamp = str(c.contents[1]) re1 = content_pattern.findall(str(c)) content = re1[0][0].strip('\n').replace('<br>', '\n') timestamp = re1[0][1] img = soup_d.find('div', class_='thumb') if img: img_src = img.contents[1].contents[1]['src'] content += "[img: %s]" % str(img_src) # 点赞数 like = soup_d.find('i', class_='number').string j = "name: %s\ncontent: %s\ntime: %s\nlike: %s" % (str(name), content, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(timestamp))), str(like)) self.content.append(j) return self class Sxbk: def __init__(self): self.page = 1 self.url = 'http://www.qiushibaike.com/hot/page/' self.joke_lists = [] self.enable = True self.downloader = Downloader() self.parse = Parser(None, parse_joke) # 下载页面 def get_page(self, num=1): return self.downloader.get(self.url + str(num), header={ 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' }, timeout=50).decode('utf8') # 解析段子到 list def gen_jokes(self, html): self.parse.set_html(html) self.joke_lists += self.parse.parse_content().get_content() # start def start(self): print('按回车开始...') while self.enable: n = input() if n == 'q': exit() if len(self.joke_lists) < 2: html = self.get_page(self.page) self.gen_jokes(html) self.page += 1 print(self.joke_lists[0]) del self.joke_lists[0] s = Sxbk() s.start()
# 抓取嗅事百科的段子 from cobweb.downloader import * from cobweb.parser import * import time import re def parse_joke(self): data = self.soup.find_all('div', class_='article block untagged mb15') self.content = [] for d in data: soup_d = BeautifulSoup(str(d), 'html.parser', from_encoding='utf8') # 用户名 name = soup_d.h2.string # 内容(内容+时间&图片) c = soup_d.find('div', class_='content') # content = str(c.contents[0]).strip('\n') # timestamp = str(c.contents[1]) pattern = re.compile("<div.*?content\">(.*?)<!--(.*?)-->.*?</div>", re.S) re1 = pattern.findall(str(c)) content = re1[0][0].strip('\n').replace('<br>', '\n') timestamp = re1[0][1] img = soup_d.find('div', class_='thumb') if img: img_src = img.contents[1].contents[1]['src'] content += "[img: %s]" % str(img_src) # 点赞数 like = soup_d.find('i', class_='number').string j = "name: %s\ncontent: %s\ntime: %s\nlike: %s" % (str(name), content, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(timestamp))), str(like)) print(j) self.content.append(j) return self class Sxbk: def __init__(self): self.page = 1 self.url = 'http://www.qiushibaike.com/hot/page/' self.joke_lists = [] self.enable = True self.downloader = Downloader() self.parse = Parser(None, parse_joke) # 下载页面 def get_page(self, num=1): return self.downloader.get(self.url + str(num), header={ 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' }, timeout=50).decode('utf8') # 解析段子到 list def gen_jokes(self, html): self.parse.set_html(html) self.joke_lists += self.parse.parse_content().get_content() # start def start(self): print('按回车开始...') while self.enable: n = input() if n == 'q': exit() if len(self.joke_lists) < 2: html = self.get_page(self.page) self.gen_jokes(html) self.page += 1 print(self.joke_lists[0]) del self.joke_lists[0] s = Sxbk() s.start()
Python
0.999117
cc894ecf36a95d18fc84a4866c5a1902d291ccbe
Use non-lazy `gettext` where sufficient
byceps/blueprints/site/ticketing/forms.py
byceps/blueprints/site/ticketing/forms.py
""" byceps.blueprints.site.ticketing.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from flask import g from flask_babel import gettext, lazy_gettext from wtforms import StringField from wtforms.validators import InputRequired, ValidationError from ....services.consent import ( consent_service, subject_service as consent_subject_service, ) from ....services.user import service as user_service from ....util.l10n import LocalizedForm def validate_user(form, field): screen_name = field.data.strip() user = user_service.find_user_by_screen_name( screen_name, case_insensitive=True ) if user is None: raise ValidationError(gettext('Unknown username')) if (not user.initialized) or user.suspeded or user.deleted: raise ValidationError(gettext('The user account is not active.')) user = user.to_dto() required_consent_subjects = ( consent_subject_service.get_subjects_required_for_brand(g.brand_id) ) required_consent_subject_ids = { subject.id for subject in required_consent_subjects } if not consent_service.has_user_consented_to_all_subjects( user.id, required_consent_subject_ids ): raise ValidationError( gettext( 'User "%(screen_name)s" has not yet given all necessary ' 'consents. Logging in again is required.', screen_name=user.screen_name, ) ) field.data = user class SpecifyUserForm(LocalizedForm): user = StringField( lazy_gettext('Username'), [InputRequired(), validate_user] )
""" byceps.blueprints.site.ticketing.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from flask import g from flask_babel import lazy_gettext from wtforms import StringField from wtforms.validators import InputRequired, ValidationError from ....services.consent import ( consent_service, subject_service as consent_subject_service, ) from ....services.user import service as user_service from ....util.l10n import LocalizedForm def validate_user(form, field): screen_name = field.data.strip() user = user_service.find_user_by_screen_name( screen_name, case_insensitive=True ) if user is None: raise ValidationError(lazy_gettext('Unknown username')) if (not user.initialized) or user.suspeded or user.deleted: raise ValidationError(lazy_gettext('The user account is not active.')) user = user.to_dto() required_consent_subjects = ( consent_subject_service.get_subjects_required_for_brand(g.brand_id) ) required_consent_subject_ids = { subject.id for subject in required_consent_subjects } if not consent_service.has_user_consented_to_all_subjects( user.id, required_consent_subject_ids ): raise ValidationError( lazy_gettext( 'User "%(screen_name)s" has not yet given all necessary ' 'consents. Logging in again is required.', screen_name=user.screen_name, ) ) field.data = user class SpecifyUserForm(LocalizedForm): user = StringField( lazy_gettext('Username'), [InputRequired(), validate_user] )
Python
0.000092
862885b5ea2b4d04c8980c257d3cdf644dd60f0c
Set the version to 0.1.6 final
king_phisher/version.py
king_phisher/version.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/version.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections version_info = collections.namedtuple('version_info', ['major', 'minor', 'micro'])(0, 1, 6) """A tuple representing the version information in the format ('major', 'minor', 'micro')""" version_label = '' """A version lable such as alpha or beta.""" version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro) """A string representing the full version information.""" # distutils_version is compatible with distutils.version classes distutils_version = version """A string sutiable for being parsed by :py:mod:`distutils.version` classes.""" if version_label: version += '-' + version_label distutils_version += version_label[0] if version_label[-1].isdigit(): distutils_version += version_label[-1] else: distutils_version += '0' rpc_api_version = 2 """An integer representing the current version of the RPC API, used for compatibility checks."""
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/version.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections version_info = collections.namedtuple('version_info', ['major', 'minor', 'micro'])(0, 1, 6) """A tuple representing the version information in the format ('major', 'minor', 'micro')""" version_label = 'beta' """A version lable such as alpha or beta.""" version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro) """A string representing the full version information.""" # distutils_version is compatible with distutils.version classes distutils_version = version """A string sutiable for being parsed by :py:mod:`distutils.version` classes.""" if version_label: version += '-' + version_label distutils_version += version_label[0] if version_label[-1].isdigit(): distutils_version += version_label[-1] else: distutils_version += '0' rpc_api_version = 2 """An integer representing the current version of the RPC API, used for compatibility checks."""
Python
0.003887
2ac05a8ccc9d7a9ab4f455f18355d80af9e13c84
add temp file context
tpl/path.py
tpl/path.py
# -*- coding:utf-8 -*- import os import uuid HOME = os.path.abspath(os.path.expanduser('~')) WORK_DIR = os.path.abspath(os.getcwd()) CWD = WORK_DIR class TempDir(object): pass class TempFile(object): def __init__(self, name=None, suffix='tmp'): self.path = '/tmp/{}.{}'.format(name or str(uuid.uuid4()), suffix) self._fd = None self._close = False @property def fd(self): if self._fd is None: self._fd = open(self.path, 'w') return self._fd def close(self): if self._close is True: return self.fd.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() os.remove(self.path) class TempPipe(object): def __init__(self): self.pipe_path = '/tmp/{}.pipe'.format(str(uuid.uuid4())) self._pipe = None @property def pipe(self): if self._pipe is None: self._pipe = open(self.pipe_path, 'rb') return self._pipe def __enter__(self): os.mkfifo(self.pipe_path) return self def __exit__(self, exc_type, exc_val, exc_tb): self.pipe.close() os.remove(self.pipe_path) def list_dirs(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir_path, dir_names, _ in os.walk(path): for dir_name in dir_names: yield os.path.join(dir_path, dir_name) if recursion is False: for dir in [p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))]: yield os.path.join(path, dir) def list_files(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir_path, _, file_names in os.walk(path): for file_name in file_names: yield os.path.join(dir_path, file_name) if recursion is False: for file in [p for p in os.listdir(path) if os.path.isfile(os.path.join(path, p))]: yield os.path.join(path, file) def list_all(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir in list_dirs(path): yield dir for file in list_files(path): yield file if recursion is False: for p in os.listdir(path): yield os.path.join(path, p) def get_parent_path(path, depth=1): parent_path = path for _ in range(depth): parent_path = os.path.abspath(os.path.dirname(parent_path)) return parent_path def mkdirs(path): if os.path.exists(path) and os.path.isdir(path): return os.makedirs(path) def touch(path): if os.path.exists(path) and os.path.isfile(path): return fd = open(path, 'w') fd.close()
# -*- coding:utf-8 -*- import os import uuid HOME = os.path.abspath(os.path.expanduser('~')) WORK_DIR = os.path.abspath(os.getcwd()) CWD = WORK_DIR class TempDir(object): pass class TempFile(object): pass class TempPipe(object): def __init__(self): self.pipe_path = '/tmp/{}.pipe'.format(str(uuid.uuid4())) self._pipe = None @property def pipe(self): if self._pipe is None: self._pipe = open(self.pipe_path, 'rb') return self._pipe def __enter__(self): os.mkfifo(self.pipe_path) return self def __exit__(self, exc_type, exc_val, exc_tb): self.pipe.close() os.remove(self.pipe_path) def list_dirs(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir_path, dir_names, _ in os.walk(path): for dir_name in dir_names: yield os.path.join(dir_path, dir_name) if recursion is False: for dir in [p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))]: yield os.path.join(path, dir) def list_files(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir_path, _, file_names in os.walk(path): for file_name in file_names: yield os.path.join(dir_path, file_name) if recursion is False: for file in [p for p in os.listdir(path) if os.path.isfile(os.path.join(path, p))]: yield os.path.join(path, file) def list_all(path, recursion=True): assert os.path.exists(path) and os.path.isdir(path) if recursion is True: for dir in list_dirs(path): yield dir for file in list_files(path): yield file if recursion is False: for p in os.listdir(path): yield os.path.join(path, p) def get_parent_path(path, depth=1): parent_path = path for _ in range(depth): parent_path = os.path.abspath(os.path.dirname(parent_path)) return parent_path def mkdirs(path): if os.path.exists(path) and os.path.isdir(path): return os.makedirs(path) def touch(path): if os.path.exists(path) and os.path.isfile(path): return fd = open(path, 'w') fd.close()
Python
0.000001
7c8d7a456634d15f8c13548e2cfd6be9440f7c65
Add handler for 403 forbidden (User does not have Atmosphere access, but was correctly authenticated)
troposphere/__init__.py
troposphere/__init__.py
import logging from flask import Flask from flask import render_template, redirect, url_for, request, abort import requests from troposphere import settings from troposphere.cas import (cas_logoutRedirect, cas_loginRedirect, cas_validateTicket) from troposphere.oauth import generate_access_token logger = logging.getLogger(__name__) app = Flask(__name__) def get_maintenance(): """ Returns a list of maintenance records along with a boolean to indicate whether or not login should be disabled """ return ([], False) @app.route('/') def redirect_app(): return "Redirect" @app.errorhandler(503) def handle_maintenance(): return "We're undergoing maintenance" @app.route('/login', methods=['GET', 'POST']) def login(): """ CAS Login : Phase 1/3 Call CAS Login """ records, disabled_login = get_maintenance() if disabled_login: abort(503) #if request.method == "POST" and 'next' in request.form: return cas_loginRedirect('/application/') #else: #return "Login please" @app.route('/logout') def logout(): #django_logout(request) if request.POST.get('cas', False): return cas_logoutRedirect() return redirect(settings.REDIRECT_URL + '/login') @app.route('/CAS_serviceValidater') def cas_service_validator(): """ Method expects 2 GET parameters: 'ticket' & 'sendback' After a CAS Login: Redirects the request based on the GET param 'ticket' Unauthorized Users are returned a 401 Authorized Users are redirected to the GET param 'sendback' """ logger.debug('GET Variables:%s' % request.args) sendback = request.args.get('sendback', None) ticket = request.args.get('ticket', None) if not ticket: logger.info("No Ticket received in GET string") abort(400) user = cas_validateTicket(ticket, sendback) logger.debug(user + " successfully authenticated against CAS") # Now check Groupy key = open(settings.OAUTH_PRIVATE_KEY, 'r').read() try: token = generate_access_token(key, user) logger.debug("TOKEN: " + token) return redirect(sendback) except: abort(403) @app.errorhandler(403) def no_user(e): logger.debug(e) return "You're not an Atmopshere user" #@app.route('/CASlogin', defaults={'path': ''}) #@app.route('/CASlogin/<redirect>') # """ # url(r'^CASlogin/(?P<redirect>.*)$', 'authentication.cas_loginRedirect'), # """ # pass @app.route('/application', defaults={'path': ''}) @app.route('/application/', defaults={'path': ''}) @app.route('/application/<path:path>') def application(path): return render_template('application.html') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
import logging from flask import Flask from flask import render_template, redirect, url_for, request import requests from troposphere import settings from troposphere.cas import (cas_logoutRedirect, cas_loginRedirect, cas_validateTicket) from troposphere.oauth import generate_access_token logger = logging.getLogger(__name__) app = Flask(__name__) def get_maintenance(): """ Returns a list of maintenance records along with a boolean to indicate whether or not login should be disabled """ return ([], False) @app.route('/') def redirect_app(): return "Redirect" @app.errorhandler(503) def handle_maintenance(): return "We're undergoing maintenance" @app.route('/login', methods=['GET', 'POST']) def login(): """ CAS Login : Phase 1/3 Call CAS Login """ records, disabled_login = get_maintenance() if disabled_login: abort(503) #if request.method == "POST" and 'next' in request.form: return cas_loginRedirect('/application/') #else: #return "Login please" @app.route('/logout') def logout(): #django_logout(request) if request.POST.get('cas', False): return cas_logoutRedirect() return redirect(settings.REDIRECT_URL + '/login') @app.route('/CAS_serviceValidater') def cas_service_validator(): """ Method expects 2 GET parameters: 'ticket' & 'sendback' After a CAS Login: Redirects the request based on the GET param 'ticket' Unauthorized Users are returned a 401 Authorized Users are redirected to the GET param 'sendback' """ logger.debug('GET Variables:%s' % request.args) sendback = request.args.get('sendback', None) ticket = request.args.get('ticket', None) if not ticket: logger.info("No Ticket received in GET string") abort(400) user = cas_validateTicket(ticket, sendback) logger.debug(user + " successfully authenticated against CAS") # Now check Groupy key = open(settings.OAUTH_PRIVATE_KEY, 'r').read() token = generate_access_token(key, user) logger.debug("TOKEN: " + token) return redirect(sendback) @app.route('/no_user') def no_user(): return "You're not an Atmopshere user" #@app.route('/CASlogin', defaults={'path': ''}) #@app.route('/CASlogin/<redirect>') # """ # url(r'^CASlogin/(?P<redirect>.*)$', 'authentication.cas_loginRedirect'), # """ # pass @app.route('/application', defaults={'path': ''}) @app.route('/application/', defaults={'path': ''}) @app.route('/application/<path:path>') def application(path): return render_template('application.html') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
Python
0
b7be60eff8e0c82741dda674824aa748e33e7fdd
convert pui.py to pywiki framework
trunk/toolserver/pui.py
trunk/toolserver/pui.py
#!usr/bin/python # -*- coding: utf-8 -* # # (C) Legoktm 2008-2009, MIT License # import re sys.path.append(os.environ['HOME'] + '/pywiki') import wiki page = wiki.Page('Wikipedia:Possibly unfree images') wikitext = state0 = page.get() wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext) EditMsg = 'Adding new day to holding cell' wiki.showDiff(state0, wikitext) page.put(wikitext,EditMsg)
#!usr/bin/python # -*- coding: utf-8 -* # # (C) Legoktm 2008-2009, MIT License # import re, sys, os sys.path.append(os.environ['HOME'] + '/pyenwiki') import wikipedia site = wikipedia.getSite() page = wikipedia.Page(site, 'Wikipedia:Possibly unfree images') wikitext = state0 = page.get() wikitext = re.compile(r'\n==New listings==', re.IGNORECASE).sub(r'\n*[[/{{subst:#time:Y F j|-14 days}}]]\n==New listings==', wikitext) EditMsg = 'Adding new day to holding cell' wikipedia.showDiff(state0, wikitext) wikipedia.setAction(EditMsg) page.put(wikitext)
Python
0.999999
7b3fd535b7622a9c4253aa80276e05ed83f8177e
Fix app name error
txmoney/rates/models.py
txmoney/rates/models.py
# coding=utf-8 from __future__ import absolute_import, unicode_literals from datetime import date from decimal import Decimal from django.db import models from django.utils.functional import cached_property from ..settings import txmoney_settings as settings from .exceptions import RateDoesNotExist class RateSource(models.Model): name = models.CharField(max_length=100) base_currency = models.CharField(max_length=3, default=settings.BASE_CURRENCY, blank=True) last_update = models.DateTimeField(auto_now=True, blank=True) class Meta: app_label = 'txmoneyrates' unique_together = ('name', 'base_currency') @cached_property def is_updated(self): return True if self.last_update.date() == date.today() else False class RateQuerySet(models.QuerySet): def get_rate_currency_by_date(self, currency, currency_date=None): """ Get currency for a date :param currency: base currency. :param currency_date: ratio currency. :return: Currency """ currency_date = currency_date if currency_date else date.today() try: # TODO: check if rate if updated else update return self.get(currency=currency, date=currency_date) except Rate.DoesNotExist: try: backend = settings.DEFAULT_BACKEND() backend.update_rates() return self.get(currency=currency, date=currency_date) except Rate.DoesNotExist: raise RateDoesNotExist(currency, currency_date) class Rate(models.Model): source = models.ForeignKey(RateSource, on_delete=models.PROTECT, related_name='rates', related_query_name='rate') currency = models.CharField(max_length=3, unique_for_date='date') value = models.DecimalField(max_digits=14, decimal_places=6) date = models.DateField(auto_now_add=True, blank=True) objects = RateQuerySet.as_manager() class Meta: app_label = 'txmoneyrates' unique_together = ('source', 'currency', 'date') @staticmethod def get_ratio(from_currency, to_currency, ratio_date=None): """ Calculate exchange ratio between two currencies for a date :param from_currency: base currency. :param to_currency: ratio currency. :param ratio_date: ratio date :return: Decimal """ ratio_date = ratio_date if ratio_date else date.today() # If not default currency get date base currency rate value because all rates are for base currency ratio = Decimal(1) if from_currency == settings.BASE_CURRENCY else \ Rate.objects.get_rate_currency_by_date(from_currency, ratio_date).value if to_currency != settings.BASE_CURRENCY: money_rate = Decimal(1) / Rate.objects.get_rate_currency_by_date(to_currency, ratio_date).value ratio *= money_rate return ratio
# coding=utf-8 from __future__ import absolute_import, unicode_literals from datetime import date from decimal import Decimal from django.db import models from django.utils.functional import cached_property from ..settings import txmoney_settings as settings from .exceptions import RateDoesNotExist class RateSource(models.Model): name = models.CharField(max_length=100) base_currency = models.CharField(max_length=3, default=settings.BASE_CURRENCY, blank=True) last_update = models.DateTimeField(auto_now=True, blank=True) class Meta: unique_together = ('name', 'base_currency') @cached_property def is_updated(self): return True if self.last_update.date() == date.today() else False class RateQuerySet(models.QuerySet): def get_rate_currency_by_date(self, currency, currency_date=None): """ Get currency for a date :param currency: base currency. :param currency_date: ratio currency. :return: Currency """ currency_date = currency_date if currency_date else date.today() try: # TODO: check if rate if updated else update return self.get(currency=currency, date=currency_date) except Rate.DoesNotExist: try: backend = settings.DEFAULT_BACKEND() backend.update_rates() return self.get(currency=currency, date=currency_date) except Rate.DoesNotExist: raise RateDoesNotExist(currency, currency_date) class Rate(models.Model): source = models.ForeignKey(RateSource, on_delete=models.PROTECT, related_name='rates', related_query_name='rate') currency = models.CharField(max_length=3, unique_for_date='date') value = models.DecimalField(max_digits=14, decimal_places=6) date = models.DateField(auto_now_add=True, blank=True) objects = RateQuerySet.as_manager() class Meta: unique_together = ('source', 'currency', 'date') @staticmethod def get_ratio(from_currency, to_currency, ratio_date=None): """ Calculate exchange ratio between two currencies for a date :param from_currency: base currency. :param to_currency: ratio currency. :param ratio_date: ratio date :return: Decimal """ ratio_date = ratio_date if ratio_date else date.today() # If not default currency get date base currency rate value because all rates are for base currency ratio = Decimal(1) if from_currency == settings.BASE_CURRENCY else \ Rate.objects.get_rate_currency_by_date(from_currency, ratio_date).value if to_currency != settings.BASE_CURRENCY: money_rate = Decimal(1) / Rate.objects.get_rate_currency_by_date(to_currency, ratio_date).value ratio *= money_rate return ratio
Python
0.000009
d6029a7b2e39ff6222ca3d6788d649b14bbf35e3
add smoother to denominator as well
trending.py
trending.py
import googleanalytics as ga import collections import numpy import datetime SMOOTHER = 20 WINDOW = 8 GROWTH_THRESHOLD = 0.02 def trend(counts) : X, Y = zip(*counts) X = numpy.array([x.toordinal() for x in X]) X -= datetime.date.today().toordinal() A = numpy.array([numpy.ones(len(X)), X]) Y = numpy.log(numpy.array(Y)) w = numpy.linalg.lstsq(A.T,Y)[0] return w profile = ga.authenticate(identity='sunspot', account='Illinois Campaign for Political Reform', webproperty='Illinois Sunshine', profile='Illinois Sunshine') totals = profile.core.query.metrics('pageviews').\ daily(days=-WINDOW) totals = {date : count for date, count in totals.rows} pages = profile.core.query.metrics('pageviews').\ dimensions('pagepath').\ daily(days=-WINDOW) page_counts = collections.defaultdict(dict) normalized_page_counts = collections.defaultdict(dict) for date, page, count in pages.rows : page_counts[page][date] = count normalized_page_counts[page][date] = (count + SMOOTHER)/(totals[date] + SMOOTHER) for counts in normalized_page_counts.values() : for date in totals.keys() - counts.keys() : counts[date] = SMOOTHER/(totals[date] + SMOOTHER) for page, counts in normalized_page_counts.items() : b0, b1 = trend(counts.items()) if b1 > GROWTH_THRESHOLD and page.startswith('/committees/') : print(page, b0, b1) for count in sorted(page_counts[page].items()) : print(count)
import googleanalytics as ga import collections import numpy import datetime SMOOTHER = 20 WINDOW = 8 GROWTH_THRESHOLD = 0.03 def trend(counts) : X, Y = zip(*counts) X = numpy.array([x.toordinal() for x in X]) X -= datetime.date.today().toordinal() A = numpy.array([numpy.ones(len(X)), X]) Y = numpy.log(numpy.array(Y)) w = numpy.linalg.lstsq(A.T,Y)[0] return w profile = ga.authenticate(identity='sunspot', account='Illinois Campaign for Political Reform', webproperty='Illinois Sunshine', profile='Illinois Sunshine') totals = profile.core.query.metrics('pageviews').\ daily(days=-WINDOW) totals = {date : count for date, count in totals.rows} pages = profile.core.query.metrics('pageviews').\ dimensions('pagepath').\ daily(days=-WINDOW) page_counts = collections.defaultdict(dict) normalized_page_counts = collections.defaultdict(dict) for date, page, count in pages.rows : page_counts[page][date] = count normalized_page_counts[page][date] = (count + SMOOTHER)/totals[date] for counts in normalized_page_counts.values() : for date in totals.keys() - counts.keys() : counts[date] = SMOOTHER/totals[date] for page, counts in normalized_page_counts.items() : b0, b1 = trend(counts.items()) if b1 > GROWTH_THRESHOLD and page.startswith('/committees/') : print(page, b0, b1) for count in sorted(page_counts[page].items()) : print(count)
Python
0.000001
656c0f44c27f64d14dde7cbbfdec31906dab4c51
Add params to request docs.
treq/api.py
treq/api.py
from treq.client import HTTPClient def head(url, **kwargs): """ Make a ``HEAD`` request. See :py:func:`treq.request` """ return _client(**kwargs).head(url, **kwargs) def get(url, headers=None, **kwargs): """ Make a ``GET`` request. See :py:func:`treq.request` """ return _client(**kwargs).get(url, headers=headers, **kwargs) def post(url, data=None, **kwargs): """ Make a ``POST`` request. See :py:func:`treq.request` """ return _client(**kwargs).post(url, data=data, **kwargs) def put(url, data=None, **kwargs): """ Make a ``PUT`` request. See :py:func:`treq.request` """ return _client(**kwargs).put(url, data=data, **kwargs) def delete(url, **kwargs): """ Make a ``DELETE`` request. See :py:func:`treq.request` """ return _client(**kwargs).delete(url, **kwargs) def request(method, url, **kwargs): """ Make an HTTP request. :param str method: HTTP method. Example: ``'GET'``, ``'HEAD'``. ``'PUT'``, ``'POST'``. :param str url: http or https URL, which may include query arguments. :param headers: Optional HTTP Headers to send with this request. :type headers: Headers or None :param params: Optional paramters to be append as the query string to the URL, any query string parameters in the URL already will be preserved. :type params: dict w/ str or list of str values, list of 2-tuples, or None. :param data: Optional request body. :type data: str, file-like, IBodyProducer, or None :param reactor: Optional twisted reactor. :param bool persistent: Use peristent HTTP connections. Default: ``True`` :param bool allow_redirects: Follow HTTP redirects. Default: ``True`` :rtype: Deferred that fires with an IResponse provider. """ return _client(**kwargs).request(method, url, **kwargs) # # Private API # def _client(*args, **kwargs): return HTTPClient.with_config(**kwargs)
from treq.client import HTTPClient def head(url, **kwargs): """ Make a ``HEAD`` request. See :py:func:`treq.request` """ return _client(**kwargs).head(url, **kwargs) def get(url, headers=None, **kwargs): """ Make a ``GET`` request. See :py:func:`treq.request` """ return _client(**kwargs).get(url, headers=headers, **kwargs) def post(url, data=None, **kwargs): """ Make a ``POST`` request. See :py:func:`treq.request` """ return _client(**kwargs).post(url, data=data, **kwargs) def put(url, data=None, **kwargs): """ Make a ``PUT`` request. See :py:func:`treq.request` """ return _client(**kwargs).put(url, data=data, **kwargs) def delete(url, **kwargs): """ Make a ``DELETE`` request. See :py:func:`treq.request` """ return _client(**kwargs).delete(url, **kwargs) def request(method, url, **kwargs): """ Make an HTTP request. :param str method: HTTP method. Example: ``'GET'``, ``'HEAD'``. ``'PUT'``, ``'POST'``. :param str url: http or https URL, which may include query arguments. :param headers: Optional HTTP Headers to send with this request. :type headers: Headers or None :param data: Optional request body. :type data: str, file-like, IBodyProducer, or None :param reactor: Optional twisted reactor. :param bool persistent: Use peristent HTTP connections. Default: ``True`` :param bool allow_redirects: Follow HTTP redirects. Default: ``True`` :rtype: Deferred that fires with an IResponse provider. """ return _client(**kwargs).request(method, url, **kwargs) # # Private API # def _client(*args, **kwargs): return HTTPClient.with_config(**kwargs)
Python
0
0ba512b0e8eb6b5055261afb2962d3bfc5e2fda5
Add some playback stream headers
src/playback.py
src/playback.py
# -*- coding: utf-8 -*- import mimetypes import os from flask import Response, request from werkzeug.datastructures import Headers import audiotranscode from utils import generate_random_key from tables import Song import config def stream_audio(): song = Song.get_one(id=request.args.get('id')) # A hack to get my local dev env working path = song.path if config.DEBUG: cut = '/mnt/storage/audio/music/' path = os.path.join(config.MUSIC_DIR, song.path[len(cut):]) # Find the file and guess type mime = mimetypes.guess_type(path)[0] ext = mimetypes.guess_extension(mime) # Transcoding if required transcode = False if ext not in ['.mp3', '.ogg']: transcode = True mime = "audio/mpeg" ext = '.mp3' # Send some extra headers headers = Headers() headers.add('Content-Transfer-Encoding', 'binary') headers.add('Content-Length', os.path.getsize(path)) def generate_audio(): if not transcode: with open(path, "rb") as handle: data = handle.read(1024) while data: yield data data = handle.read(1024) else: tc = audiotranscode.AudioTranscode() for data in tc.transcode_stream(path, 'mp3'): yield data return Response(generate_audio(), mimetype=mime, headers=headers)
# -*- coding: utf-8 -*- import mimetypes import os from flask import Response, request import audiotranscode from tables import Song import config def stream_audio(): song = Song.get_one(id=request.args.get('id')) # A hack to get my local dev env working path = song.path if config.DEBUG: cut = '/mnt/storage/audio/music/' path = os.path.join(config.MUSIC_DIR, song.path[len(cut):]) # Find the file and guess type mime = mimetypes.guess_type(path)[0] ext = mimetypes.guess_extension(mime) # Transcoding if required transcode = False if ext not in ['.mp3', '.ogg']: transcode = True mime = "audio/mpeg" def generate_audio(): if not transcode: with open(path, "rb") as handle: data = handle.read(1024) while data: yield data data = handle.read(1024) else: tc = audiotranscode.AudioTranscode() for data in tc.transcode_stream(path, 'mp3'): yield data return Response(generate_audio(), mimetype=mime)
Python
0
e6487a2c623638b540b707c895a97eac1fc31979
Update connection_test.py to work with Python3.7
server/integration-tests/connection_test.py
server/integration-tests/connection_test.py
#!/usr/bin/env python3.7 """ Test PUTing some data into Icepeak and getting it back over a websocket. Requires a running Icepeak instance. Requirements can be installed with: pip install requests websockets """ import asyncio import json import requests import websockets # 1. Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put('http://localhost:3000/so/cool', json.dumps(new_data)) # 2. Get the data back over a websocket async def hello(uri): async with websockets.connect(uri) as websocket: result = await websocket.recv() parsed_result = json.loads(result) assert new_data == parsed_result, 'Input data: {} is different from output data: {}'.format( new_data, parsed_result) print('Initial data was successfully sent to client!') asyncio.get_event_loop().run_until_complete( hello('ws://localhost:3000/so/cool'))
#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket # 1. Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put('http://localhost:3000/so/cool', json.dumps(new_data)) # 2. Get the data back over a websocket conn = websocket.create_connection("ws://localhost:3000/so/cool") result = conn.recv() parsed_result = json.loads(result) assert new_data == parsed_result, 'Input data: {} is different from output data: {}'.format( new_data, parsed_result) print 'Initial data was successfully sent to client!'
Python
0.000001
2fe5fc8c53142b5661bf176441e246d48cdb0799
fix a typo
Functions/Sed.py
Functions/Sed.py
''' Created on Feb 14, 2014 @author: Tyranic-Moron ''' from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType from Function import Function from GlobalVars import * import re # matches a sed-style regex pattern (taken from https://github.com/mossblaser/BeardBot/blob/master/modules/sed.py) # I stripped the unnecessary escapes by using a raw string instead sedRegex = re.compile(r"s/(?P<search>(\\\\|(\\[^\\])|[^\\/])+)/(?P<replace>(\\\\|(\\[^\\])|[^\\/])*)((/(?P<flags>.*))?)") class Instantiate(Function): Help = 's/search/replacement/flags - matches sed-like regex replacement patterns and attempts to execute them on the latest matching line from the last 10\n'\ 'flags are g (global), i (case-insensitive), o (only user messages). Example usage: "I\'d eat some tacos" -> s/some/all the/ -> "I\'d eat all the tacos"' messages = [] unmodifiedMessages = [] def GetResponse(self, message): if message.Type != 'PRIVMSG' and message.Type != 'ACTION': return match = sedRegex.match(message.MessageString) if match: search = match.group('search') replace = match.group('replace') flags = match.group('flags') if flags is None: flags = '' response = self.substitute(search, replace, flags) if response is not None: responseType = ResponseType.Say if response.Type == 'ACTION': responseType = ResponseType.Do return IRCResponse(responseType, response.MessageString, message.ReplyTo) else: self.storeMessage(message) def substitute(self, search, replace, flags): messages = self.unmodifiedMessages if 'o' in flags else self.messages for message in reversed(messages): if 'g' in flags: count = 0 else: count = 1 if 'i' in flags: subFlags = re.IGNORECASE else: subFlags = 0 new = re.sub(search, replace, message.MessageString, count, subFlags) new = new[:300] if new != message.MessageString: newMessage = message newMessage.MessageString = new self.storeMessage(newMessage, False) return newMessage return None def storeMessage(self, message, unmodified=True): self.messages.append(message) self.messages = self.messages[-10:] if unmodified: self.unmodifiedMessages.append(message) self.unmodifiedMessages = self.unmodifiedMessages[-10:]
''' Created on Feb 14, 2014 @author: Tyranic-Moron ''' from IRCMessage import IRCMessage from IRCResponse import IRCResponse, ResponseType from Function import Function from GlobalVars import * import re # matches a sed-style regex pattern (taken from https://github.com/mossblaser/BeardBot/blob/master/modules/sed.py) # I stripped the unnecessary escapes by using a raw string instead sedRegex = re.compile(r"s/(?P<search>(\\\\|(\\[^\\])|[^\\/])+)/(?P<replace>(\\\\|(\\[^\\])|[^\\/])*)((/(?P<flags>.*))?)") class Instantiate(Function): Help = 's/search/replacement/flags - matches sed-like regex replacement patterns and attempts to execute them on the latest matching line from the last 10\n'\ 'flags are g (global), i (case-insensitive), o (only user messages). Example usage: "I\'d eat some tacos" -> s/some/all the/ -> "I\'d eat all the tacos"' messages = [] unmodifiedMessages = [] def GetResponse(self, message): if message.Type != 'PRIVMSG' and message.Type != 'ACTION': return match = sedRegex.match(message.MessageString) if match: search = match.group('search') replace = match.group('replace') flags = match.group('flags') if flags is None: flags = '' response = self.substitute(search, replace, flags) if response is not None: responseType = ResponseType.Say if response.Type == 'ACTION': responseType = ResponseType.Do return IRCResponse(responseType, response.MessageString, message.ReplyTo) else: self.storeMessage(message) def substitute(self, search, replace, flags): messages = self.unmodifiedMessages if 'o' in flags else self.messages for message in reversed(messages): if 'g' in flags: count = 0 else: count = 1 if 'i' in flags: subFlags = re.IGNORECASE else subFlags = 0 new = re.sub(search, replace, message.MessageString, count, subFlags) new = new[:300] if new != message.MessageString: newMessage = message newMessage.MessageString = new self.storeMessage(newMessage, False) return newMessage return None def storeMessage(self, message, unmodified=True): self.messages.append(message) self.messages = self.messages[-10:] if unmodified: self.unmodifiedMessages.append(message) self.unmodifiedMessages = self.unmodifiedMessages[-10:]
Python
0.999999
69b9c641f144633b94aca47212af446971286454
add tests
server_common/test_modules/test_autosave.py
server_common/test_modules/test_autosave.py
from __future__ import unicode_literals, absolute_import, print_function, division import unittest import shutil import os from server_common.autosave import AutosaveFile TEMP_FOLDER = os.path.join("C:\\", "instrument", "var", "tmp", "autosave_tests") class TestAutosave(unittest.TestCase): def setUp(self): self.autosave = AutosaveFile(service_name="unittests", file_name="test_file", folder=TEMP_FOLDER) try: os.makedirs(TEMP_FOLDER) except: pass def test_GIVEN_no_existing_file_WHEN_get_parameter_from_autosave_THEN_default_returned(self): default = object() self.assertEqual(self.autosave.read_parameter("some_random_parameter", default), default) def test_GIVEN_parameter_saved_WHEN_get_parameter_from_autosave_THEN_saved_value_returned(self): value = "test_value" self.autosave.write_parameter("parameter", value) self.assertEqual(self.autosave.read_parameter("parameter", None), value) def test_GIVEN_different_parameter_saved_WHEN_get_parameter_from_autosave_THEN_saved_value_returned(self): value = "test_value" self.autosave.write_parameter("other_parameter", value) self.assertEqual(self.autosave.read_parameter("parameter", None), None) def tearDown(self): try: shutil.rmtree(TEMP_FOLDER) except: pass
import unittest class TestAutosave(unittest.TestCase): def setUp(self): pass
Python
0
6a4f4031b0aac1c8859424703088df903746a6c8
change command doc string
dvc/command/get.py
dvc/command/get.py
import argparse import logging from .base import append_doc_link from .base import CmdBaseNoRepo from dvc.exceptions import DvcException logger = logging.getLogger(__name__) class CmdGet(CmdBaseNoRepo): def run(self): from dvc.repo import Repo try: Repo.get( self.args.url, path=self.args.path, out=self.args.out, rev=self.args.rev, ) return 0 except DvcException: logger.exception( "failed to get '{}' from '{}'".format( self.args.path, self.args.url ) ) return 1 def add_parser(subparsers, parent_parser): GET_HELP = "Download/copy files or directories from Git repository." get_parser = subparsers.add_parser( "get", parents=[parent_parser], description=append_doc_link(GET_HELP, "get"), help=GET_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) get_parser.add_argument( "url", help="URL of Git repository to download from." ) get_parser.add_argument( "path", help="Path to a file or directory within the repository." ) get_parser.add_argument( "-o", "--out", nargs="?", help="Destination path to copy/download files to.", ) get_parser.add_argument( "--rev", nargs="?", help="Repository git revision." ) get_parser.set_defaults(func=CmdGet)
import argparse import logging from .base import append_doc_link from .base import CmdBaseNoRepo from dvc.exceptions import DvcException logger = logging.getLogger(__name__) class CmdGet(CmdBaseNoRepo): def run(self): from dvc.repo import Repo try: Repo.get( self.args.url, path=self.args.path, out=self.args.out, rev=self.args.rev, ) return 0 except DvcException: logger.exception( "failed to get '{}' from '{}'".format( self.args.path, self.args.url ) ) return 1 def add_parser(subparsers, parent_parser): GET_HELP = "Download/copy files or directories from git repository." get_parser = subparsers.add_parser( "get", parents=[parent_parser], description=append_doc_link(GET_HELP, "get"), help=GET_HELP, formatter_class=argparse.RawDescriptionHelpFormatter, ) get_parser.add_argument( "url", help="URL of Git repository to download from." ) get_parser.add_argument( "path", help="Path to a file or directory within the repository." ) get_parser.add_argument( "-o", "--out", nargs="?", help="Destination path to copy/download files to.", ) get_parser.add_argument( "--rev", nargs="?", help="Repository git revision." ) get_parser.set_defaults(func=CmdGet)
Python
0.000002
5ab7d4af67abb20a87f9b963e9ae53df65eea42f
fix self deleter
cogs/fun.py
cogs/fun.py
import asyncio import discord from discord.ext import commands class Fun: """ Fun and useful stuff """ def __init__(self, bot): self.bot = bot @commands.command(pass_context=True) async def marco(self, ctx): """ Says "polo" """ await self.bot.say(self.bot.msg_prefix + "polo") @commands.command(pass_context=True) async def soon(self, ctx, *, message: str = ""): """ Makes a soon tm """ await self.bot.delete_message(ctx.message) await self.bot.say("soon\u2122" + message) @commands.command(pass_context=True) async def give(self, ctx, *, message: str = ""): """ Gives stuff """ await self.bot.delete_message(ctx.message) await self.bot.say("༼ つ ◕\\_◕ ༽つ " + message + " ༼ つ ◕\\_◕ ༽つ") @commands.command(pass_context=True) async def shrug(self, ctx, *, message: str = ""): """ Makes a shrug """ await self.bot.delete_message(ctx.message) await self.bot.say("¯\_(ツ)_/¯ " + message) @commands.command(pass_context=True) async def lenny(self, ctx, *, message: str = ""): """ Makes a lenny face """ await self.bot.delete_message(ctx.message) await self.bot.say("( ͡° ͜ʖ ͡°) " + message) @commands.command(pass_context=True, aliases=["d"]) async def justdeleteme(self, ctx, count: int): """ Deletes 'count' number of message you have sent in the channel But only if they are in the first 1000 messages """ count += 1 iterator = self.bot.logs_from(ctx.message.channel, limit=1000) async for m in iterator: if isinstance(m, discord.Message): if (m.author == ctx.author): await self.bot.delete_message(m) count -= 1 if count <= 0: return @commands.command(pass_context=True, hidden=True) async def whois(self, ctx, *, ingnore: str = ""): """ Let's just ingore that """ to_del = await self.bot.say(self.bot.msg_prefix + "Use debug...") await asyncio.sleep(5) await self.bot.delete_message(to_del) def _calculate_mutual_servers(self, member: discord.Member): # Calculates mutual servers. serverlist = [] for server in self.bot.servers: assert isinstance(server, discord.Server) if server.get_member(member.id): serverlist += [server.name] return serverlist def _safe_roles(self, roles: list): names = [] for role in roles: if role.name == "@everyone": names.append("@\u200beveryone") # u200b is invisible space else: names.append(role.name) return names def setup(bot): bot.add_cog(Fun(bot))
import asyncio import discord from discord.ext import commands class Fun: """ Fun and useful stuff """ def __init__(self, bot): self.bot = bot @commands.command(pass_context=True) async def marco(self, ctx): """ Says "polo" """ await self.bot.say(self.bot.msg_prefix + "polo") @commands.command(pass_context=True) async def soon(self, ctx, *, message: str = ""): """ Makes a soon tm """ await self.bot.delete_message(ctx.message) await self.bot.say("soon\u2122" + message) @commands.command(pass_context=True) async def give(self, ctx, *, message: str = ""): """ Gives stuff """ await self.bot.delete_message(ctx.message) await self.bot.say("༼ つ ◕\\_◕ ༽つ " + message + " ༼ つ ◕\\_◕ ༽つ") @commands.command(pass_context=True) async def shrug(self, ctx, *, message: str = ""): """ Makes a shrug """ await self.bot.delete_message(ctx.message) await self.bot.say("¯\_(ツ)_/¯ " + message) @commands.command(pass_context=True) async def lenny(self, ctx, *, message: str = ""): """ Makes a lenny face """ await self.bot.delete_message(ctx.message) await self.bot.say("( ͡° ͜ʖ ͡°) " + message) @commands.command(pass_context=True, aliases=["d"]) async def justdeleteme(self, ctx, count: int): """ Deletes 'count' number of message you have sent in the channel But only if they are in the first 1000 messages """ count += 1 iterator = self.bot.logs_from(ctx.channel, limit=1000) async for m in iterator: if isinstance(m, discord.Message): if (m.author == ctx.author): await self.bot.delete_message(m) count -= 1 if count <= 0: return @commands.command(pass_context=True, hidden=True) async def whois(self, ctx, *, ingnore: str = ""): """ Let's just ingore that """ to_del = await self.bot.say(self.bot.msg_prefix + "Use debug...") await asyncio.sleep(5) await self.bot.delete_message(to_del) def _calculate_mutual_servers(self, member: discord.Member): # Calculates mutual servers. serverlist = [] for server in self.bot.servers: assert isinstance(server, discord.Server) if server.get_member(member.id): serverlist += [server.name] return serverlist def _safe_roles(self, roles: list): names = [] for role in roles: if role.name == "@everyone": names.append("@\u200beveryone") # u200b is invisible space else: names.append(role.name) return names def setup(bot): bot.add_cog(Fun(bot))
Python
0.000003
b0c83624004ebda4ea23f597d109d89ec319f1cf
fix tut2.py
doc/source/code/tut2.py
doc/source/code/tut2.py
from netpyne import specs, sim # Network parameters netParams = specs.NetParams() # object of class NetParams to store the network parameters ## Population parameters netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'} netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'} ## Cell property rules cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}} # cell rule dict cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism netParams.cellParams['PYRrule'] = cellRule # add dict to list of cell params ## Synaptic mechanism parameters netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 5.0, 'e': 0} # excitatory synaptic mechanism # Stimulation parameters netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 10, 'noise': 0.5} netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'} ## Cell connectivity rules netParams.connParams['S->M'] = { # S -> M label 'preConds': {'pop': 'S'}, # conditions of presyn cells 'postConds': {'pop': 'M'}, # conditions of postsyn cells 'probability': 0.5, # probability of connection 'weight': 0.01, # synaptic weight 'delay': 5, # transmission delay (ms) 'synMech': 'exc'} # synaptic mechanism # Simulation options simConfig = specs.SimConfig() # object of class SimConfig to store simulation configuration simConfig.duration = 1*1e3 # Duration of the simulation, in ms simConfig.dt = 0.025 # Internal integration timestep to use simConfig.verbose = False # Show detailed messages simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record simConfig.recordStep = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc) simConfig.filename = 'model_output' # Set file output name simConfig.savePickle = False # Save params, network and sim output to pickle file simConfig.saveJson = True simConfig.analysis['plotRaster'] = True # Plot a raster simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections # Create network and run simulation sim.createSimulateAnalyze(netParams = netParams, simConfig = simConfig) # import pylab; pylab.show() # this line is only necessary in certain systems where figures appear empty # check model output sim.checkOutput('tut2')
from netpyne import specs, sim # Network parameters netParams = specs.NetParams() # object of class NetParams to store the network parameters ## Population parameters netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'} netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': 20, 'cellModel': 'HH'} ## Cell property rules cellRule = {'conds': {'cellType': 'PYR'}, 'secs': {}} # cell rule dict cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}} # soma params dict cellRule['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0} # soma geometry cellRule['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} # soma hh mechanism netParams.cellParams['PYRrule'] = cellRule # add dict to list of cell params ## Synaptic mechanism parameters netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': 5.0, 'e': 0} # excitatory synaptic mechanism # Stimulation parameters netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 10, 'noise': 0.5} netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'} ## Cell connectivity rules netParams.connParams['S->M'] = { # S -> M label 'preConds': {'pop': 'S'}, # conditions of presyn cells 'postConds': {'pop': 'M'}, # conditions of postsyn cells 'divergence': 9, # probability of connection 'weight': 0.01, # synaptic weight 'delay': 5, # transmission delay (ms) 'synMech': 'exc'} # synaptic mechanism # Simulation options simConfig = specs.SimConfig() # object of class SimConfig to store simulation configuration simConfig.duration = 1*1e3 # Duration of the simulation, in ms simConfig.dt = 0.025 # Internal integration timestep to use simConfig.verbose = False # Show detailed messages simConfig.recordTraces = {'V_soma':{'sec':'soma','loc':0.5,'var':'v'}} # Dict with traces to record simConfig.recordStep = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc) simConfig.filename = 'model_output' # Set file output name simConfig.savePickle = False # Save params, network and sim output to pickle file simConfig.saveJson = True simConfig.analysis['plotRaster'] = True # Plot a raster simConfig.analysis['plotTraces'] = {'include': [1]} # Plot recorded traces for this list of cells simConfig.analysis['plot2Dnet'] = True # plot 2D visualization of cell positions and connections # Create network and run simulation sim.createSimulateAnalyze(netParams = netParams, simConfig = simConfig) # import pylab; pylab.show() # this line is only necessary in certain systems where figures appear empty # check model output sim.checkOutput('tut2')
Python
0.000001
100fa2f08656009e3fa2ee4fd66a85c5aca35f9d
Comment typo fix
sacad/rate_watcher.py
sacad/rate_watcher.py
""" This module provides a class with a context manager to help avoid overloading web servers. """ import collections import logging import os import sqlite3 import threading import time import urllib.parse import lockfile MIN_WAIT_TIME_S = 0.01 SUSPICIOUS_LOCK_AGE_S = 120 class WaitNeeded(Exception): """ Exception raised when access can not be granted without waiting. """ def __init__(self, wait_time_s): self.wait_s = wait_time_s class AccessRateWatcher: """ Access rate limiter, supporting concurrent access by threads and/or processes. """ thread_locks = collections.defaultdict(threading.Lock) thread_dict_lock = threading.Lock() def __init__(self, db_filepath, url, min_delay_between_accesses): self.domain = urllib.parse.urlsplit(url).netloc self.min_delay_between_accesses = min_delay_between_accesses self.connection = sqlite3.connect(db_filepath) with self.connection: self.connection.executescript("""PRAGMA journal_mode = MEMORY; PRAGMA synchronous = OFF; CREATE TABLE IF NOT EXISTS access_timestamp (domain TEXT PRIMARY KEY, timestamp FLOAT NOT NULL);""") self.lock_dir = os.path.join(os.path.dirname(db_filepath), "plocks") os.makedirs(self.lock_dir, exist_ok=True) def __enter__(self): self._raiseOrLock() self._access() def __exit__(self, exc_type, exc_value, traceback): self._releaseLock() def _access(self): """ Notify the watcher that the server is accessed. """ with self.connection: self.connection.execute("""INSERT OR REPLACE INTO access_timestamp (domain, timestamp) VALUES (?, ?)""", (self.domain, time.time(),)) def _raiseOrLock(self): """ Get lock or raise WaitNeeded exception. """ for try_lock in (True, False): with self.connection: last_access_time = self.connection.execute("""SELECT timestamp FROM access_timestamp WHERE domain = ?;""", (self.domain,)).fetchone() if last_access_time is not None: last_access_time = last_access_time[0] now = time.time() time_since_last_access = now - last_access_time if time_since_last_access < self.min_delay_between_accesses: time_to_wait = self.min_delay_between_accesses - time_since_last_access raise WaitNeeded(time_to_wait) if try_lock: locked = self._getLock() if locked: break # loop again to find wait time else: raise WaitNeeded(MIN_WAIT_TIME_S) def _getLock(self): with __class__.thread_dict_lock: tlock = __class__.thread_locks[self.domain] if tlock.acquire(blocking=False): plock = lockfile.FileLock(os.path.join(self.lock_dir, self.domain)) try: plock.acquire(timeout=0) except (lockfile.LockTimeout, lockfile.AlreadyLocked): # detect and break locks of dead processes lock_age = time.time() - os.path.getmtime(plock.lock_file) if lock_age > SUSPICIOUS_LOCK_AGE_S: logging.getLogger().warning("Breaking suspicious lock '%s' created %.2f seconds ago" % (plock.lock_file, lock_age)) plock.break_lock() tlock.release() except: tlock.release() raise else: return True else: # lock not available: wait for it, release it immediately and return as if locking fails # we do this to wait for the right amount of time but still re-read the cache with tlock: pass return False def _releaseLock(self): lockfile.FileLock(os.path.join(self.lock_dir, self.domain)).release() __class__.thread_locks[self.domain].release()
""" This module provides a class with a context manager to help avoid overloading web servers. """ import collections import logging import os import sqlite3 import threading import time import urllib.parse import lockfile MIN_WAIT_TIME_S = 0.01 SUSPICIOUS_LOCK_AGE_S = 120 class WaitNeeded(Exception): """ Exception raised when access can not be granted without waiting. """ def __init__(self, wait_time_s): self.wait_s = wait_time_s class AccessRateWatcher: """ Access rate limiter, supporting concurrent access by threads and/or processes. """ thread_locks = collections.defaultdict(threading.Lock) thread_dict_lock = threading.Lock() def __init__(self, db_filepath, url, min_delay_between_accesses): self.domain = urllib.parse.urlsplit(url).netloc self.min_delay_between_accesses = min_delay_between_accesses self.connection = sqlite3.connect(db_filepath) with self.connection: self.connection.executescript("""PRAGMA journal_mode = MEMORY; PRAGMA synchronous = OFF; CREATE TABLE IF NOT EXISTS access_timestamp (domain TEXT PRIMARY KEY, timestamp FLOAT NOT NULL);""") self.lock_dir = os.path.join(os.path.dirname(db_filepath), "plocks") os.makedirs(self.lock_dir, exist_ok=True) def __enter__(self): self._raiseOrLock() self._access() def __exit__(self, exc_type, exc_value, traceback): self._releaseLock() def _access(self): """ Notify the watcher that the server is accessed. """ with self.connection: self.connection.execute("""INSERT OR REPLACE INTO access_timestamp (domain, timestamp) VALUES (?, ?)""", (self.domain, time.time(),)) def _raiseOrLock(self): """ Get lock or raise WaitNeeded exception. """ for try_lock in (True, False): with self.connection: last_access_time = self.connection.execute("""SELECT timestamp FROM access_timestamp WHERE domain = ?;""", (self.domain,)).fetchone() if last_access_time is not None: last_access_time = last_access_time[0] now = time.time() time_since_last_access = now - last_access_time if time_since_last_access < self.min_delay_between_accesses: time_to_wait = self.min_delay_between_accesses - time_since_last_access raise WaitNeeded(time_to_wait) if try_lock: locked = self._getLock() if locked: break else: raise WaitNeeded(MIN_WAIT_TIME_S) def _getLock(self): with __class__.thread_dict_lock: tlock = __class__.thread_locks[self.domain] if tlock.acquire(blocking=False): plock = lockfile.FileLock(os.path.join(self.lock_dir, self.domain)) try: plock.acquire(timeout=0) except (lockfile.LockTimeout, lockfile.AlreadyLocked): # detect and break locks of dead processes lock_age = time.time() - os.path.getmtime(plock.lock_file) if lock_age > SUSPICIOUS_LOCK_AGE_S: logging.getLogger().warning("Breaking suspicious lock '%s' created %.2f seconds ago" % (plock.lock_file, lock_age)) plock.break_lock() tlock.release() except: tlock.release() raise else: return True else: # lock not availale: wait for it, release it immediately and return as if locking fails # we do this to wait for the right amount of time but still re-read the cache with tlock: pass return False def _releaseLock(self): lockfile.FileLock(os.path.join(self.lock_dir, self.domain)).release() __class__.thread_locks[self.domain].release()
Python
0
e63c463a3200d9843bc5be6c1c3ee36fb267cbde
Update hyper space setting.
matchzoo/engine/param_table.py
matchzoo/engine/param_table.py
"""Parameters table class.""" import typing from matchzoo.engine import Param, hyper_spaces class ParamTable(object): """ Parameter table class. Example: >>> params = ParamTable() >>> params.add(Param('ham', 'Parma Ham')) >>> params.add(Param('egg', 'Over Easy')) >>> params['ham'] 'Parma Ham' >>> params['egg'] 'Over Easy' >>> print(params) ham Parma Ham egg Over Easy >>> params.add(Param('egg', 'Sunny side Up')) Traceback (most recent call last): ... ValueError: Parameter named egg already exists. To re-assign parameter egg value, use `params["egg"] = value` instead. """ def __init__(self): """Parameter table constrctor.""" self._params = {} def add(self, param: Param): """:param param: parameter to add.""" if not isinstance(param, Param): raise TypeError("Only accepts a Param instance.") if param.name in self._params: msg = f"Parameter named {param.name} already exists.\n" \ f"To re-assign parameter {param.name} value, " \ f"use `params[\"{param.name}\"] = value` instead." raise ValueError(msg) self._params[param.name] = param def get(self, key) -> Param: """:return: The parameter in the table named `key`.""" return self._params[key] def set(self, key, param: Param): """Set `key` to parameter `param`.""" if not isinstance(param, Param): raise ValueError self._params[key] = param @property def hyper_space(self) -> dict: """:return: Hyper space of the table, a valid `hyperopt` graph.""" full_space = {} for param in self: if param.hyper_space is not None: param_space = param.hyper_space if isinstance(param_space, hyper_spaces.HyperoptProxy): param_space = param_space(param.name) full_space[param.name] = param_space return full_space def __getitem__(self, key: str) -> typing.Any: """:return: The value of the parameter in the table named `key`.""" return self._params[key].value def __setitem__(self, key: str, value: typing.Any): """ Set the value of the parameter named `key`. :param key: Name of the parameter. :param value: New value of the parameter to set. """ self._params[key].value = value def __str__(self): """:return: Pretty formatted parameter table.""" return '\n'.join(param.name.ljust(30) + str(param.value) for param in self._params.values()) def __iter__(self) -> typing.Iterator: """:return: A iterator that iterates over all parameter instances.""" yield from self._params.values() def completed(self) -> bool: """ :return: `True` if all params are filled, `False` otherwise. Example: >>> import matchzoo >>> model = matchzoo.models.NaiveModel() >>> model.params.completed() False >>> model.guess_and_fill_missing_params(verbose=0) >>> model.params.completed() True """ return all(param for param in self) def keys(self) -> typing.KeysView: """:return: Parameter table keys.""" return self._params.keys() def __contains__(self, item): """:return: `True` if parameter in parameters.""" return item in self._params
"""Parameters table class.""" import typing from matchzoo.engine import Param class ParamTable(object): """ Parameter table class. Example: >>> params = ParamTable() >>> params.add(Param('ham', 'Parma Ham')) >>> params.add(Param('egg', 'Over Easy')) >>> params['ham'] 'Parma Ham' >>> params['egg'] 'Over Easy' >>> print(params) ham Parma Ham egg Over Easy >>> params.add(Param('egg', 'Sunny side Up')) Traceback (most recent call last): ... ValueError: Parameter named egg already exists. To re-assign parameter egg value, use `params["egg"] = value` instead. """ def __init__(self): """Parameter table constrctor.""" self._params = {} def add(self, param: Param): """:param param: parameter to add.""" if not isinstance(param, Param): raise TypeError("Only accepts a Param instance.") if param.name in self._params: msg = f"Parameter named {param.name} already exists.\n" \ f"To re-assign parameter {param.name} value, " \ f"use `params[\"{param.name}\"] = value` instead." raise ValueError(msg) self._params[param.name] = param def get(self, key) -> Param: """:return: The parameter in the table named `key`.""" return self._params[key] def set(self, key, param: Param): """Set `key` to parameter `param`.""" if not isinstance(param, Param): raise ValueError self._params[key] = param @property def hyper_space(self) -> dict: """:return: Hyper space of the table, a valid `hyperopt` graph.""" return { param.name: param.hyper_space for param in self._params.values() if param.hyper_space is not None } def __getitem__(self, key: str) -> typing.Any: """:return: The value of the parameter in the table named `key`.""" return self._params[key].value def __setitem__(self, key: str, value: typing.Any): """ Set the value of the parameter named `key`. :param key: Name of the parameter. :param value: New value of the parameter to set. """ self._params[key].value = value def __str__(self): """:return: Pretty formatted parameter table.""" return '\n'.join(param.name.ljust(30) + str(param.value) for param in self._params.values()) def __iter__(self) -> typing.Iterator: """:return: A iterator that iterates over all parameter instances.""" yield from self._params.values() def completed(self) -> bool: """ :return: `True` if all params are filled, `False` otherwise. Example: >>> import matchzoo >>> model = matchzoo.models.NaiveModel() >>> model.params.completed() False >>> model.guess_and_fill_missing_params(verbose=0) >>> model.params.completed() True """ return all(param for param in self) def keys(self) -> typing.KeysView: """:return: Parameter table keys.""" return self._params.keys() def __contains__(self, item): """:return: `True` if parameter in parameters.""" return item in self._params
Python
0
4a6846b969746b79f1acd0e0615232d97ed54b1f
replace import-time cluster dependencies (#1544)
frameworks/template/tests/test_sanity.py
frameworks/template/tests/test_sanity.py
import pytest import sdk_install import sdk_utils from tests import config @pytest.fixture(scope='module', autouse=True) def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME)) # note: this package isn't released to universe, so there's nothing to test_upgrade() with sdk_install.install( config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME), config.DEFAULT_TASK_COUNT, additional_options={"service": { "name": sdk_utils.get_foldered_name(config.SERVICE_NAME) } }) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME)) @pytest.mark.sanity @pytest.mark.smoke def test_install(): pass # package installed and appeared healthy!
import pytest import sdk_install import sdk_utils from tests import config FOLDERED_SERVICE_NAME = sdk_utils.get_foldered_name(config.SERVICE_NAME) @pytest.fixture(scope='module', autouse=True) def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME) # note: this package isn't released to universe, so there's nothing to test_upgrade() with sdk_install.install( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options={"service": { "name": FOLDERED_SERVICE_NAME } }) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME) @pytest.mark.sanity @pytest.mark.smoke def test_install(): pass # package installed and appeared healthy!
Python
0
0eef0efbe716feb3dc02fb45a756496d5517966c
Update docs.
matchzoo/models/naive_model.py
matchzoo/models/naive_model.py
"""Naive model with a simplest structure for testing purposes.""" import keras from matchzoo import engine class NaiveModel(engine.BaseModel): """ Naive model with a simplest structure for testing purposes. Bare minimum functioning model. The best choice to get things rolling. The worst choice to fit and evaluate performance. """ def build(self): """Build.""" x_in = self._make_inputs() x = keras.layers.concatenate(x_in) x_out = self._make_output_layer()(x) self._backend = keras.models.Model(inputs=x_in, outputs=x_out)
"""Naive model with a simplest structure for testing purposes.""" import keras from matchzoo import engine class NaiveModel(engine.BaseModel): """Naive model with a simplest structure for testing purposes.""" def build(self): """Build.""" x_in = self._make_inputs() x = keras.layers.concatenate(x_in) x_out = self._make_output_layer()(x) self._backend = keras.models.Model(inputs=x_in, outputs=x_out)
Python
0
0859bb58a4fa24f5e278e95da491a2b4409f0b2b
Tag 0.5.3
koordinates/__init__.py
koordinates/__init__.py
# -*- coding: utf-8 -*- """ Koordinates Python API Client Library :copyright: (c) Koordinates Limited. :license: BSD, see LICENSE for more details. """ __version__ = "0.5.3" from .exceptions import ( KoordinatesException, ClientError, ClientValidationError, InvalidAPIVersion, ServerError, BadRequest, AuthenticationError, Forbidden, NotFound, NotAllowed, Conflict, RateLimitExceeded, InternalServerError, ServiceUnvailable, ) from .client import Client from .layers import Layer, Table from .licenses import License from .metadata import Metadata from .publishing import Publish from .sets import Set from .sources import Source, UploadSource from .users import Group, User from .permissions import Permission from .exports import Export, CropLayer, DownloadError
# -*- coding: utf-8 -*- """ Koordinates Python API Client Library :copyright: (c) Koordinates Limited. :license: BSD, see LICENSE for more details. """ __version__ = "0.5.0" from .exceptions import ( KoordinatesException, ClientError, ClientValidationError, InvalidAPIVersion, ServerError, BadRequest, AuthenticationError, Forbidden, NotFound, NotAllowed, Conflict, RateLimitExceeded, InternalServerError, ServiceUnvailable, ) from .client import Client from .layers import Layer, Table from .licenses import License from .metadata import Metadata from .publishing import Publish from .sets import Set from .sources import Source, UploadSource from .users import Group, User from .permissions import Permission from .exports import Export, CropLayer, DownloadError
Python
0.000001
b6554b00fdb0387a27671eeb39589dc7e7109f6e
Add collecter function
app/main.py
app/main.py
from flask import Flask, request, jsonify from urllib.request import urlopen from bs4 import BeautifulSoup app = Flask(__name__) app.config.update( DEBUG=True ) @app.route("/") def index(): url = request.args.get('url', '') res = collecter(url) return jsonify(res) if __name__ == "__main__": app.run() def collecter(url): """ 画像のスクレイピングを行い、結果をjsonで返す @param url スクレイピングしたいURL @return スクレイピング結果のjson """ if(url == ""): return count = 0 pic = {} html = urlopen(url) soup = BeautifulSoup(html, "html.parser") for a in soup.find_all("a"): text = str(a.string) if text.endswith("jpg") or text.endswith("png"): count += 1 pic.update({count: text}) return pic
from flask import Flask app = Flask(__name__) app.config.update( DEBUG=True ) @app.route("/") def index(): return "Hello python" if __name__ == "__main__": app.run()
Python
0.000001
7b58f59ec288dd055cf931dd47c4e8e59bb9ad1d
update atx-agent version
uiautomator2/version.py
uiautomator2/version.py
# coding: utf-8 # __apk_version__ = '1.1.5' # 1.1.5 waitForExists use UiObject2 method first then fallback to UiObject.waitForExists # 1.1.4 add ADB_EDITOR_CODE broadcast support, fix bug (toast捕获导致app闪退) # 1.1.3 use thread to make watchers.watched faster, try to fix input method type multi # 1.1.2 fix count error when have child && sync watched, to prevent watchers.remove error # 1.1.1 support toast capture # 1.1.0 update uiautomator-v18:2.1.2 -> uiautomator-v18:2.1.3 (This version fixed setWaitIdleTimeout not working bug) # 1.0.14 catch NullException, add gps mock support # 1.0.13 whatsinput suppoort, but not very well # 1.0.12 add toast support # 1.0.11 add auto install support # 1.0.10 fix service not started bug # 1.0.9 fix apk version code and version name # ERR: 1.0.8 bad version number. show ip on notification # ERR: 1.0.7 bad version number. new input method, some bug fix __atx_agent_version__ = '0.4.6' # 0.4.6 fix download dns resolve error (sometimes) # 0.4.5 add http log, change atx-agent -d into atx-agent server -d # 0.4.4 this version is gone # 0.4.3 ignore sigint to prevent atx-agent quit # 0.4.2 hot fix, close upgrade-self # 0.4.1 fix app-download time.Timer panic error, use safe-time.Timer instead. # 0.4.0 add go-daemon lib. use safe-time.Timer to prevent panic error. this will make it run longer # 0.3.6 support upload zip and unzip, fix minicap rotation error when atx-agent is killed -9 # 0.3.5 hot fix for session # 0.3.4 fix session() sometimes can not get mainActivity error # 0.3.3 /shell support timeout # 0.3.2 fix dns resolve error when network changes # 0.3.0 use github.com/codeskyblue/heartbeat library instead of websocket, add /whatsinput # 0.2.1 support occupy /minicap connection # 0.2.0 add session support # 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell # 0.1.7 fix dns resolve error in /install # 0.1.6 change download logic. auto fix orientation # 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10 # 0.1.4 phone remote control # 0.1.2 /download support # 0.1.1 minicap buildin
# coding: utf-8 # __apk_version__ = '1.1.5' # 1.1.5 waitForExists use UiObject2 method first then fallback to UiObject.waitForExists # 1.1.4 add ADB_EDITOR_CODE broadcast support, fix bug (toast捕获导致app闪退) # 1.1.3 use thread to make watchers.watched faster, try to fix input method type multi # 1.1.2 fix count error when have child && sync watched, to prevent watchers.remove error # 1.1.1 support toast capture # 1.1.0 update uiautomator-v18:2.1.2 -> uiautomator-v18:2.1.3 (This version fixed setWaitIdleTimeout not working bug) # 1.0.14 catch NullException, add gps mock support # 1.0.13 whatsinput suppoort, but not very well # 1.0.12 add toast support # 1.0.11 add auto install support # 1.0.10 fix service not started bug # 1.0.9 fix apk version code and version name # ERR: 1.0.8 bad version number. show ip on notification # ERR: 1.0.7 bad version number. new input method, some bug fix __atx_agent_version__ = '0.4.5' # 0.4.5 add http log, change atx-agent -d into atx-agent server -d # 0.4.4 this version is gone # 0.4.3 ignore sigint to prevent atx-agent quit # 0.4.2 hot fix, close upgrade-self # 0.4.1 fix app-download time.Timer panic error, use safe-time.Timer instead. # 0.4.0 add go-daemon lib. use safe-time.Timer to prevent panic error. this will make it run longer # 0.3.6 support upload zip and unzip, fix minicap rotation error when atx-agent is killed -9 # 0.3.5 hot fix for session # 0.3.4 fix session() sometimes can not get mainActivity error # 0.3.3 /shell support timeout # 0.3.2 fix dns resolve error when network changes # 0.3.0 use github.com/codeskyblue/heartbeat library instead of websocket, add /whatsinput # 0.2.1 support occupy /minicap connection # 0.2.0 add session support # 0.1.8 fix screenshot always the same image. (BUG in 0.1.7), add /shell/stream add timeout for /shell # 0.1.7 fix dns resolve error in /install # 0.1.6 change download logic. auto fix orientation # 0.1.5 add singlefight for minicap and minitouch, proxy dial-timeout change 30 to 10 # 0.1.4 phone remote control # 0.1.2 /download support # 0.1.1 minicap buildin
Python
0
0c9accce7b3df8889ecf57b6df89a36628cb908c
add timeout for running scheduler
sbin/run_scheduler.py
sbin/run_scheduler.py
import subprocess import tempfile import time, os import re import sys # cd ~/.config/sublime-text-3/Packages/UnitTesting # python sbin/run_scheduler.py PACKAGE # script directory __dir__ = os.path.dirname(os.path.abspath(__file__)) version = int(subprocess.check_output(["subl","--version"]).decode('utf8').strip()[-4]) # sublime package directory if sys.platform == "darwin": sublime_package = os.path.expanduser("~/Library/Application Support/Sublime Text %d/Packages" % version) elif "linux" in sys.platform: sublime_package = os.path.expanduser("~/.config/sublime-text-%d/Packages" % version) sys.path.append(os.path.join(sublime_package, "UnitTesting")) from jsonio import * package = sys.argv[1] if len(sys.argv)>1 else "UnitTesting" outdir = os.path.join(sublime_package, "User", "UnitTesting", "tests_output") outfile = os.path.join(outdir, package) # remove output if os.path.exists(outfile): os.unlink(outfile) # add schedule jpath = os.path.join(sublime_package, "User", "UnitTesting", "schedule.json") j = jsonio(jpath) schedule = j.load() if not any([s['package']==package for s in schedule]): schedule.append({'package': package}) j.save(schedule) tasks = subprocess.check_output(['ps', 'xw']).decode('utf8') sublime_is_running = "Sublime" in tasks or "sublime_text" in tasks if sublime_is_running: subprocess.Popen(["subl", "-b", "--command", "unit_testing_run_scheduler"]) else: subprocess.Popen(["subl"]) # wait until the file has something startt = time.time() while (not os.path.exists(outfile) or os.stat(outfile).st_size == 0): sys.stdout.write('.') sys.stdout.flush() if time.time()-startt > 60: print("Timeout: Sublime Text is not responding") sys.exit(1) time.sleep(1) print("\nstart to read output") # todo: use notification instead of polling with open(outfile, 'r') as f: while True: result = f.read() m = re.search("^(OK|FAILED|ERROR)", result, re.MULTILINE) # break when OK or Failed if m: break time.sleep(0.2) f.seek(0) result = f.read() print(result) success = m.group(0)=="OK" if not success: sys.exit(1)
import subprocess import tempfile import time, os import re import sys # cd ~/.config/sublime-text-3/Packages/UnitTesting # python sbin/run_scheduler.py PACKAGE # script directory __dir__ = os.path.dirname(os.path.abspath(__file__)) version = int(subprocess.check_output(["subl","--version"]).decode('utf8').strip()[-4]) # sublime package directory if sys.platform == "darwin": sublime_package = os.path.expanduser("~/Library/Application Support/Sublime Text %d/Packages" % version) elif "linux" in sys.platform: sublime_package = os.path.expanduser("~/.config/sublime-text-%d/Packages" % version) sys.path.append(os.path.join(sublime_package, "UnitTesting")) from jsonio import * package = sys.argv[1] if len(sys.argv)>1 else "UnitTesting" outdir = os.path.join(sublime_package, "User", "UnitTesting", "tests_output") outfile = os.path.join(outdir, package) # remove output if os.path.exists(outfile): os.unlink(outfile) # add schedule jpath = os.path.join(sublime_package, "User", "UnitTesting", "schedule.json") j = jsonio(jpath) schedule = j.load() if not any([s['package']==package for s in schedule]): schedule.append({'package': package}) j.save(schedule) tasks = subprocess.check_output(['ps', 'xw']).decode('utf8') sublime_is_running = "Sublime" in tasks or "sublime_text" in tasks if sublime_is_running: subprocess.Popen(["subl", "-b", "--command", "unit_testing_run_scheduler"]) else: subprocess.Popen(["subl"]) # wait until the file has something while (not os.path.exists(outfile) or os.stat(outfile).st_size == 0): sys.stdout.write('.') sys.stdout.flush() time.sleep(1) print("\nstart to read output") # todo: use notification instead of polling with open(outfile, 'r') as f: while True: result = f.read() m = re.search("^(OK|FAILED|ERROR)", result, re.MULTILINE) # break when OK or Failed if m: break time.sleep(0.2) f.seek(0) result = f.read() print(result) success = m.group(0)=="OK" if not success: sys.exit(1)
Python
0.000001
42f4ed206a9c79799b9bb0b13b829c8cf9c979e4
write to file
scraper/parse_dump.py
scraper/parse_dump.py
#!/usr/bin/python # Simple script to parse the devpost dump and place results in a json import os import json from multiprocessing import Pool from bs4 import BeautifulSoup OUTPUT_FNAME="devpostdump.json" DUMP_DIR = "output/" projects = [os.path.join(DUMP_DIR, f) for f in os.listdir(DUMP_DIR)] # projects = projects[:100] projects_json = [] def process_project(inp): i, project = inp print "%d %s" % (i, project) proj_html = BeautifulSoup(open(project, 'r').read(), 'html.parser') proj_data = {} proj_data['name'] = proj_html.find(id='app-title').string proj_data['id'] = project[len(DUMP_DIR):] # Number of likes and comments num_likes = proj_html.find('span', { 'class' : 'ss-heart' }).next_sibling.next_sibling proj_data['num_likes'] = int(num_likes.string) if num_likes is not None else 0 num_comments = proj_html.find('span', { 'class' : 'ss-quote' }).next_sibling.next_sibling proj_data['num_comments'] = int(num_comments.string) if num_comments is not None else 0 # Length of the description proj_data['description_length'] = len(proj_html.find(id="app-details").get_text()) # Number of contributors proj_data['num_contributors'] = len(proj_html.find_all('li', { 'class' : 'software-team-member' })) # Tags proj_data['tags'] = sorted([tag.string for tag in proj_html.find_all('span', { 'class' : 'cp-tag' })]) # Hackathon details hackathon_deets = proj_html.find('div', { 'class' : 'software-list-content' }) if hackathon_deets: proj_data['hackathon_name'] = hackathon_deets.find('a').string proj_data['num_prizes'] = len(hackathon_deets.find_all('span', { 'class' : 'winner' })) return proj_data if __name__ == '__main__': num_cores = multiprocessing.cpu_count() p = Pool(num_cores) j = p.map(process_project, enumerate(projects[:1000])) print "Creating json file" with open(OUTPUT_FNAME, "w+") as f: f.write(json.dump(j))
#!/usr/bin/python # Simple script to parse the devpost dump and place results in a json import os import json from multiprocessing import Pool from bs4 import BeautifulSoup OUTPUT_FNAME="devpostdump.json" DUMP_DIR = "output/" projects = [os.path.join(DUMP_DIR, f) for f in os.listdir(DUMP_DIR)] # projects = projects[:100] projects_json = [] def process_project(inp): i, project = inp print "%d %s" % (i, project) proj_html = BeautifulSoup(open(project, 'r').read(), 'html.parser') proj_data = {} proj_data['name'] = proj_html.find(id='app-title').string proj_data['id'] = project[len(DUMP_DIR):] # Number of likes and comments num_likes = proj_html.find('span', { 'class' : 'ss-heart' }).next_sibling.next_sibling proj_data['num_likes'] = int(num_likes.string) if num_likes is not None else 0 num_comments = proj_html.find('span', { 'class' : 'ss-quote' }).next_sibling.next_sibling proj_data['num_comments'] = int(num_comments.string) if num_comments is not None else 0 # Length of the description proj_data['description_length'] = len(proj_html.find(id="app-details").get_text()) # Number of contributors proj_data['num_contributors'] = len(proj_html.find_all('li', { 'class' : 'software-team-member' })) # Tags proj_data['tags'] = sorted([tag.string for tag in proj_html.find_all('span', { 'class' : 'cp-tag' })]) # Hackathon details hackathon_deets = proj_html.find('div', { 'class' : 'software-list-content' }) if hackathon_deets: proj_data['hackathon_name'] = hackathon_deets.find('a').string proj_data['num_prizes'] = len(hackathon_deets.find_all('span', { 'class' : 'winner' })) return proj_data if __name__ == '__main__': num_cores = multiprocessing.cpu_count() p = Pool(num_cores) json = p.map(process_project, enumerate(projects[:1000]))
Python
0.000001
94182e97ed1635e3aa4993f3db69c248e16b7600
Undo previous commit
unnaturalcode/ucUser.py
unnaturalcode/ucUser.py
# Copyright 2013, 2014 Joshua Charles Campbell # # This file is part of UnnaturalCode. # # UnnaturalCode is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # UnnaturalCode is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>. from unnaturalcode.ucUtil import * from unnaturalcode.unnaturalCode import * from unnaturalcode.pythonSource import * from unnaturalcode.mitlmCorpus import * from unnaturalcode.sourceModel import * from unnaturalcode.genericSource import * import shutil class genericUser(object): def getHome(self): self.homeDir = os.path.expanduser("~") self.ucDir = os.getenv("UC_DATA", os.path.join(self.homeDir, ".unnaturalCode")) if not os.path.exists(self.ucDir): os.makedirs(self.ucDir) assert os.access(self.ucDir, os.X_OK & os.R_OK & os.W_OK) assert os.path.isdir(self.ucDir) def __init__(self, ngram_order=10): self.getHome() self.readCorpus = os.path.join(self.ucDir, 'genericCorpus') if not os.path.exists(self.readCorpus): os.makedirs(self.readCorpus) self.logFilePath = os.path.join(self.ucDir, 'genericLogFile') self.lm = genericSource self.basicSetup(ngram_order) def basicSetup(self, ngram_order=10): self.uc = unnaturalCode(logFilePath=self.logFilePath) # Oiugh... thank you, dependecy injection. self.cm = mitlmCorpus(readCorpus=self.readCorpus, writeCorpus=self.readCorpus, uc=self.uc, order=ngram_order) self.sm = sourceModel(cm=self.cm, language=self.lm) def release(self): self.cm.release() def delete(self): # Ain't gotta do nothing if the file doesn't exist. if os.path.exists(self.readCorpus): replacementPath = self.readCorpus + '.bak' shutil.move(self.readCorpus, replacementPath) class pyUser(genericUser): def __init__(self, ngram_order=10): self.getHome() self.readCorpus = os.path.join(self.ucDir, 'pyCorpus') if not os.path.exists(self.readCorpus): os.makedirs(self.readCorpus) self.logFilePath = os.path.join(self.ucDir, 'pyLogFile') self.lm = pythonSource self.basicSetup(ngram_order)
# Copyright 2013, 2014 Joshua Charles Campbell # # This file is part of UnnaturalCode. # # UnnaturalCode is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # UnnaturalCode is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>. from unnaturalcode.ucUtil import * from unnaturalcode.unnaturalCode import * from unnaturalcode.pythonSource import * from unnaturalcode.mitlmCorpus import * from unnaturalcode.sourceModel import * from unnaturalcode.genericSource import * import shutil class genericUser(object): def getHome(self): self.homeDir = os.path.expanduser("~") self.ucDir = os.getenv("UC_DATA", os.path.join(self.homeDir, ".unnaturalCode")) if not os.path.exists(self.ucDir): os.mknod(self.ucDir) assert os.access(self.ucDir, os.X_OK & os.R_OK & os.W_OK) assert os.path.isdir(self.ucDir) def __init__(self, ngram_order=10): self.getHome() self.readCorpus = os.path.join(self.ucDir, 'genericCorpus') if not os.path.exists(self.readCorpus): os.makedirs(self.readCorpus) self.logFilePath = os.path.join(self.ucDir, 'genericLogFile') self.lm = genericSource self.basicSetup(ngram_order) def basicSetup(self, ngram_order=10): self.uc = unnaturalCode(logFilePath=self.logFilePath) # Oiugh... thank you, dependecy injection. self.cm = mitlmCorpus(readCorpus=self.readCorpus, writeCorpus=self.readCorpus, uc=self.uc, order=ngram_order) self.sm = sourceModel(cm=self.cm, language=self.lm) def release(self): self.cm.release() def delete(self): # Ain't gotta do nothing if the file doesn't exist. if os.path.exists(self.readCorpus): replacementPath = self.readCorpus + '.bak' shutil.move(self.readCorpus, replacementPath) class pyUser(genericUser): def __init__(self, ngram_order=10): self.getHome() self.readCorpus = os.path.join(self.ucDir, 'pyCorpus') if not os.path.exists(self.readCorpus): os.mknod(self.readCorpus) self.logFilePath = os.path.join(self.ucDir, 'pyLogFile') self.lm = pythonSource self.basicSetup(ngram_order)
Python
0
c8a010e6e9a917c50843dd10303f8f9497b4687c
Bump version
waterbutler/__init__.py
waterbutler/__init__.py
__version__ = '0.2.3' __import__("pkg_resources").declare_namespace(__name__)
__version__ = '0.2.2' __import__("pkg_resources").declare_namespace(__name__)
Python
0
b0e3886ee24689f1eb249e0ed3c66d887b317f60
Delete table test
tst/test.py
tst/test.py
#!/usr/bin/python import grpc import keyvalue_pb2 import os import sys if __name__ == '__main__': conn_str = os.environ['GRPCROCKSDB_PORT'].split("/")[2] print "Connecting on: " + conn_str channel = grpc.insecure_channel(conn_str) stub = keyvalue_pb2.KeyValueStub(channel) create_table_res = stub.CreateTable(keyvalue_pb2.CreateTableReq(tablename='test-table-1')) put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='12345'))) get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey')) assert get_res.item.value == "12345" try: put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="hello")) print "Condition should not be met!" sys.exit(1) except Exception: pass get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey')) assert get_res.item.value == "12345" put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="12345")) get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey')) assert get_res.item.value == "99999" delete_table_res = stub.DeleteTable(keyvalue_pb2.DeleteTableReq(tablename='test-table-1'))
#!/usr/bin/python import grpc import keyvalue_pb2 import os import sys if __name__ == '__main__': conn_str = os.environ['GRPCROCKSDB_PORT'].split("/")[2] print "Connecting on: " + conn_str channel = grpc.insecure_channel(conn_str) stub = keyvalue_pb2.KeyValueStub(channel) create_table_res = stub.CreateTable(keyvalue_pb2.CreateTableReq(tablename='test-table-1')) put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='12345'))) get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey')) assert get_res.item.value == "12345" try: put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="hello")) print "Condition should not be met!" sys.exit(1) except Exception: pass get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey')) assert get_res.item.value == "12345" put_res = stub.Put(keyvalue_pb2.PutReq(tablename='test-table-1',item=keyvalue_pb2.Item(key='myKey', value='99999'),condition="12345")) get_res = stub.Get(keyvalue_pb2.GetReq(tablename='test-table-1',key='myKey')) assert get_res.item.value == "99999"
Python
0.000002
4437565016d0b1edc3b5a5f96d405cd0c41ca5b9
Use DataHandle timestep helpers in sample_project
smif/sample_project/models/energy_demand.py
smif/sample_project/models/energy_demand.py
"""Energy demand dummy model """ import numpy as np from smif.data_layer.data_handle import RelativeTimestep from smif.model.sector_model import SectorModel class EDMWrapper(SectorModel): """Energy model """ def initialise(self, initial_conditions): pass def simulate(self, data): # Get the current timestep now = data.current_timestep self.logger.info("EDMWrapper received inputs in %s", now) # Demonstrates how to get the value for a model parameter parameter_value = data.get_parameter('smart_meter_savings') self.logger.info('Smart meter savings: %s', parameter_value) # Demonstrates how to get the value for a model input # (defaults to the current time period) current_energy_demand = data.get_data('energy_demand') self.logger.info("Current energy demand in %s is %s", now, current_energy_demand) # Demonstrates how to get the value for a model input from the base # timeperiod base_energy_demand = data.get_data('energy_demand', RelativeTimestep.BASE) base_year = data.base_timestep self.logger.info("Base year energy demand in %s was %s", base_year, base_energy_demand) # Demonstrates how to get the value for a model input from the previous # timeperiod if now > base_year: prev_energy_demand = data.get_data('energy_demand', RelativeTimestep.PREVIOUS) prev_year = data.previous_timestep self.logger.info("Previous energy demand in %s was %s", prev_year, prev_energy_demand) # Pretend to call the 'energy model' # This code prints out debug logging messages for each input # defined in the energy_demand configuration for name in self.inputs.names: time_intervals = self.inputs[name].get_interval_names() regions = self.inputs[name].get_region_names() for i, region in enumerate(regions): for j, interval in enumerate(time_intervals): self.logger.info( "%s %s %s", interval, region, data.get_data(name)[i, j]) # Write pretend results to data handler data.set_results("cost", np.ones((3, 1)) * 3) data.set_results("water_demand", np.ones((3, 1)) * 3) self.logger.info("EDMWrapper produced outputs in %s", now) def extract_obj(self, results): return 0
"""Energy demand dummy model """ import numpy as np from smif.data_layer.data_handle import RelativeTimestep from smif.model.sector_model import SectorModel class EDMWrapper(SectorModel): """Energy model """ def initialise(self, initial_conditions): pass def simulate(self, data): # Get the current timestep now = data.current_timestep self.logger.info("EDMWrapper received inputs in %s", now) # Demonstrates how to get the value for a model parameter parameter_value = data.get_parameter('smart_meter_savings') self.logger.info('Smart meter savings: %s', parameter_value) # Demonstrates how to get the value for a model input # (defaults to the current time period) current_energy_demand = data.get_data('energy_demand') self.logger.info("Current energy demand in %s is %s", now, current_energy_demand) # Demonstrates how to get the value for a model input from the base # timeperiod base_energy_demand = data.get_data('energy_demand', RelativeTimestep.BASE) base_year = RelativeTimestep.BASE.resolve_relative_to(now, data.timesteps) self.logger.info("Base year energy demand in %s was %s", base_year, base_energy_demand) # Demonstrates how to get the value for a model input from the previous # timeperiod if now > base_year: prev_energy_demand = data.get_data('energy_demand', RelativeTimestep.PREVIOUS) prev_year = RelativeTimestep.PREVIOUS.resolve_relative_to( now, data.timesteps) self.logger.info("Previous energy demand in %s was %s", prev_year, prev_energy_demand) # Pretend to call the 'energy model' # This code prints out debug logging messages for each input # defined in the energy_demand configuration for name in self.inputs.names: time_intervals = self.inputs[name].get_interval_names() regions = self.inputs[name].get_region_names() for i, region in enumerate(regions): for j, interval in enumerate(time_intervals): self.logger.info( "%s %s %s", interval, region, data.get_data(name)[i, j]) # Write pretend results to data handler data.set_results("cost", np.ones((3, 1)) * 3) data.set_results("water_demand", np.ones((3, 1)) * 3) self.logger.info("EDMWrapper produced outputs in %s", now) def extract_obj(self, results): return 0
Python
0
d5fe2e21c8ed4e1dc66098e16011cb2f9094e573
Fix ConditionalJump to manually increment the PC
bytecode.py
bytecode.py
class BytecodeBase: autoincrement = True # For jump def __init__(self): # Eventually might want to add subclassed bytecodes here # Though __subclasses__ works quite well pass def execute(self, machine): pass class Push(BytecodeBase): def __init__(self, data): self.data = data def execute(self, machine): machine.push(self.data) class Pop(BytecodeBase): def execute(self, machine): return machine.pop() class ReadMemory(BytecodeBase): def __init__(self, index): self.index = index def execute(self, machine): machine.push(machine.read_memory(self.index)) class WriteMemory(BytecodeBase): def __init__(self, index, value): self.index, self.value = index, value def execute(self, machine): machine.write_memory(self.index, self.value) class Add(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b+a) class Sub(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b-a) class Mul(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b*a) class Div(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b/a) class Terminate(BytecodeBase): def execute(self, machine): machine.executing = False class Jump(BytecodeBase): def __init__(self, jump_to): self.autoincrement = False self.jump_to = jump_to def execute(self, machine): machine.pc = self.jump_to class ConditionalJump(BytecodeBase): def __init__(self, value, jump_to): self.autoincrement = False self.value = value self.jump_to = jump_to def execute(self, machine): val = machine.pop() machine.push(val) if val == self.value: machine.pc = self.jump_to else: machine.pc += 1 class Print(BytecodeBase): def execute(self, machine): val = machine.pop() machine.push(val) print(val) class WriteTop(BytecodeBase): def __init__(self, index): # We need this because we can't layer bytecodes # WriteMemory(Pop()) Fails because only WriteMemory gets executed self.index = index def execute(self, machine): machine.write_memory(self.index, machine.pop())
class BytecodeBase: autoincrement = True # For jump def __init__(self): # Eventually might want to add subclassed bytecodes here # Though __subclasses__ works quite well pass def execute(self, machine): pass class Push(BytecodeBase): def __init__(self, data): self.data = data def execute(self, machine): machine.push(self.data) class Pop(BytecodeBase): def execute(self, machine): return machine.pop() class ReadMemory(BytecodeBase): def __init__(self, index): self.index = index def execute(self, machine): machine.push(machine.read_memory(self.index)) class WriteMemory(BytecodeBase): def __init__(self, index, value): self.index, self.value = index, value def execute(self, machine): machine.write_memory(self.index, self.value) class Add(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b+a) class Sub(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b-a) class Mul(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b*a) class Div(BytecodeBase): def execute(self, machine): a = machine.pop() b = machine.pop() machine.push(b/a) class Terminate(BytecodeBase): def execute(self, machine): machine.executing = False class Jump(BytecodeBase): def __init__(self, jump_to): self.autoincrement = False self.jump_to = jump_to def execute(self, machine): machine.pc = self.jump_to class ConditionalJump(BytecodeBase): def __init__(self, value, jump_to): self.value = value self.jump_to = jump_to def execute(self, machine): val = machine.pop() machine.push(val) if val == self.value: self.autoincrement = False machine.pc = self.jump_to class Print(BytecodeBase): def execute(self, machine): val = machine.pop() machine.push(val) print(val) class WriteTop(BytecodeBase): def __init__(self, index): # We need this because we can't layer bytecodes # WriteMemory(Pop()) Fails because only WriteMemory gets executed self.index = index def execute(self, machine): machine.write_memory(self.index, machine.pop())
Python
0.000001
eeea990b6409085e38df4be4c137b9e42e354ec6
remove more target="_blank" for @tofumatt (bug 807049)
mkt/site/context_processors.py
mkt/site/context_processors.py
from django.conf import settings from django.contrib.auth.models import AnonymousUser from tower import ugettext as _ from access import acl import amo from amo.context_processors import get_collect_timings from amo.urlresolvers import reverse import mkt from zadmin.models import get_config def global_settings(request): """Store global Marketplace-wide info. used in the header.""" account_links = [] tools_links = [] context = {} tools_title = _('Tools') if request.user.is_authenticated() and hasattr(request, 'amo_user'): amo_user = request.amo_user account_links = [] context['is_reviewer'] = acl.check_reviewer(request) if getattr(request, 'can_view_consumer', True): account_links = [ # TODO: Coming soon with payments. # {'text': _('Account History'), # 'href': reverse('account.purchases')}, {'text': _('Account Settings'), 'href': reverse('account.settings')}, ] account_links += [ {'text': _('Change Password'), 'href': 'https://login.persona.org/signin'}, {'text': _('Log out'), 'href': reverse('users.logout')}, ] if '/developers/' not in request.path: tools_links.append({'text': _('Developer Hub'), 'href': reverse('ecosystem.landing')}) if amo_user.is_app_developer: tools_links.append({'text': _('My Submissions'), 'href': reverse('mkt.developers.apps')}) if '/reviewers/' not in request.path and context['is_reviewer']: tools_links.append({'text': _('Reviewer Tools'), 'href': reverse('reviewers.home')}) if acl.action_allowed(request, 'Localizers', '%'): tools_links.append({'text': _('Localizer Tools'), 'href': '/localizers'}) if acl.action_allowed(request, 'AccountLookup', '%'): tools_links.append({'text': _('Lookup Tool'), 'href': reverse('lookup.home')}) if acl.action_allowed(request, 'Admin', '%'): tools_links.append({'text': _('Admin Tools'), 'href': reverse('zadmin.home')}) context['amo_user'] = amo_user else: context['amo_user'] = AnonymousUser() is_walled = ('amo.middleware.LoginRequiredMiddleware' in settings.MIDDLEWARE_CLASSES) context.update(account_links=account_links, settings=settings, amo=amo, mkt=mkt, APP=amo.FIREFOX, tools_links=tools_links, tools_title=tools_title, ADMIN_MESSAGE=get_config('site_notice'), collect_timings_percent=get_collect_timings(), is_admin=acl.action_allowed(request, 'Addons', 'Edit'), is_walled=is_walled) return context
from django.conf import settings from django.contrib.auth.models import AnonymousUser from tower import ugettext as _ from access import acl import amo from amo.context_processors import get_collect_timings from amo.urlresolvers import reverse import mkt from zadmin.models import get_config def global_settings(request): """Store global Marketplace-wide info. used in the header.""" account_links = [] tools_links = [] context = {} tools_title = _('Tools') if request.user.is_authenticated() and hasattr(request, 'amo_user'): amo_user = request.amo_user account_links = [] context['is_reviewer'] = acl.check_reviewer(request) if getattr(request, 'can_view_consumer', True): account_links = [ # TODO: Coming soon with payments. # {'text': _('Account History'), # 'href': reverse('account.purchases')}, {'text': _('Account Settings'), 'href': reverse('account.settings')}, ] account_links += [ {'text': _('Change Password'), 'href': 'https://login.persona.org/signin'}, {'text': _('Log out'), 'href': reverse('users.logout')}, ] if '/developers/' not in request.path: tools_links.append({'text': _('Developer Hub'), 'href': reverse('ecosystem.landing'), 'target': '_blank'}) if amo_user.is_app_developer: tools_links.append({'text': _('My Submissions'), 'href': reverse('mkt.developers.apps'), 'target': '_blank'}) if '/reviewers/' not in request.path and context['is_reviewer']: tools_links.append({'text': _('Reviewer Tools'), 'href': reverse('reviewers.home')}) if acl.action_allowed(request, 'Localizers', '%'): tools_links.append({'text': _('Localizer Tools'), 'href': '/localizers'}) if acl.action_allowed(request, 'AccountLookup', '%'): tools_links.append({'text': _('Lookup Tool'), 'href': reverse('lookup.home')}) if acl.action_allowed(request, 'Admin', '%'): tools_links.append({'text': _('Admin Tools'), 'href': reverse('zadmin.home')}) context['amo_user'] = amo_user else: context['amo_user'] = AnonymousUser() is_walled = ('amo.middleware.LoginRequiredMiddleware' in settings.MIDDLEWARE_CLASSES) context.update(account_links=account_links, settings=settings, amo=amo, mkt=mkt, APP=amo.FIREFOX, tools_links=tools_links, tools_title=tools_title, ADMIN_MESSAGE=get_config('site_notice'), collect_timings_percent=get_collect_timings(), is_admin=acl.action_allowed(request, 'Addons', 'Edit'), is_walled=is_walled) return context
Python
0.00002
462813f8f10db550a4897bfcf20aa1d675543edb
Exclude system sources from test coverage
mesonbuild/scripts/coverage.py
mesonbuild/scripts/coverage.py
# Copyright 2017 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mesonbuild import environment import sys, os, subprocess def remove_dir_from_trace(lcov_command, covfile, dirname): tmpfile = covfile + '.tmp' subprocess.check_call([lcov_command, '--remove', covfile, dirname, '-o', tmpfile]) os.replace(tmpfile, covfile) def coverage(source_root, build_root, log_dir): (gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools() if gcovr_exe: subprocess.check_call([gcovr_exe, '-x', '-r', source_root, '-o', os.path.join(log_dir, 'coverage.xml'), ]) subprocess.check_call([gcovr_exe, '-r', source_root, '-o', os.path.join(log_dir, 'coverage.txt'), ]) if lcov_exe and genhtml_exe: htmloutdir = os.path.join(log_dir, 'coveragereport') covinfo = os.path.join(log_dir, 'coverage.info') initial_tracefile = covinfo + '.initial' run_tracefile = covinfo + '.run' subprocess.check_call([lcov_exe, '--directory', build_root, '--capture', '--initial', '--output-file', initial_tracefile]) subprocess.check_call([lcov_exe, '--directory', build_root, '--capture', '--output-file', run_tracefile, '--no-checksum', '--rc', 'lcov_branch_coverage=1', ]) # Join initial and test results. subprocess.check_call([lcov_exe, '-a', initial_tracefile, '-a', run_tracefile, '-o', covinfo]) remove_dir_from_trace(lcov_exe, covinfo, '/usr/include/*') remove_dir_from_trace(lcov_exe, covinfo, '/usr/local/include/*') remove_dir_from_trace(lcov_exe, covinfo, '/usr/src/*') subprocess.check_call([genhtml_exe, '--prefix', build_root, '--output-directory', htmloutdir, '--title', 'Code coverage', '--legend', '--show-details', '--branch-coverage', covinfo]) return 0 def run(args): if not os.path.isfile('build.ninja'): print('Coverage currently only works with the Ninja backend.') return 1 source_root, build_root, log_dir = args[:] return coverage(source_root, build_root, log_dir) if __name__ == '__main__': sys.exit(run(sys.argv[1:]))
# Copyright 2017 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mesonbuild import environment import sys, os, subprocess def remove_dir_from_trace(lcov_command, covfile, dirname): tmpfile = covfile + '.tmp' subprocess.check_call([lcov_command, '--remove', covfile, dirname, '-o', tmpfile]) os.replace(tmpfile, covfile) def coverage(source_root, build_root, log_dir): (gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools() if gcovr_exe: subprocess.check_call([gcovr_exe, '-x', '-r', source_root, '-o', os.path.join(log_dir, 'coverage.xml'), ]) subprocess.check_call([gcovr_exe, '-r', source_root, '-o', os.path.join(log_dir, 'coverage.txt'), ]) if lcov_exe and genhtml_exe: htmloutdir = os.path.join(log_dir, 'coveragereport') covinfo = os.path.join(log_dir, 'coverage.info') initial_tracefile = covinfo + '.initial' run_tracefile = covinfo + '.run' subprocess.check_call([lcov_exe, '--directory', build_root, '--capture', '--initial', '--output-file', initial_tracefile]) subprocess.check_call([lcov_exe, '--directory', build_root, '--capture', '--output-file', run_tracefile, '--no-checksum', '--rc', 'lcov_branch_coverage=1', ]) # Join initial and test results. subprocess.check_call([lcov_exe, '-a', initial_tracefile, '-a', run_tracefile, '-o', covinfo]) remove_dir_from_trace(lcov_exe, covinfo, '/usr/include/*') remove_dir_from_trace(lcov_exe, covinfo, '/usr/local/include/*') subprocess.check_call([genhtml_exe, '--prefix', build_root, '--output-directory', htmloutdir, '--title', 'Code coverage', '--legend', '--show-details', '--branch-coverage', covinfo]) return 0 def run(args): if not os.path.isfile('build.ninja'): print('Coverage currently only works with the Ninja backend.') return 1 source_root, build_root, log_dir = args[:] return coverage(source_root, build_root, log_dir) if __name__ == '__main__': sys.exit(run(sys.argv[1:]))
Python
0
5e2b2342f94933a9c3e853802471776731b232c8
Add boot trigger API route.
app/urls.py
app/urls.py
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Define URLs and handlers to server them.""" import tornado.web import handlers.batch import handlers.bisect import handlers.boot import handlers.boot_trigger import handlers.count import handlers.defconf import handlers.job import handlers.lab import handlers.report import handlers.send import handlers.test_case import handlers.test_set import handlers.test_suite import handlers.token import handlers.upload import handlers.version _JOB_URL = tornado.web.url( r"/job[s]?/?(?P<id>.*)", handlers.job.JobHandler, name="job" ) _DEFCONF_URL = tornado.web.url( r"/defconfig[s]?/?(?P<id>.*)", handlers.defconf.DefConfHandler, name="defconf" ) _BOOT_URL = tornado.web.url( r"/boot[s]?/?(?P<id>.*)", handlers.boot.BootHandler, name="boot" ) _COUNT_URL = tornado.web.url( r"/count[s]?/?(?P<id>.*)", handlers.count.CountHandler, name="count" ) _TOKEN_URL = tornado.web.url( r"/token[s]?/?(?P<id>.*)", handlers.token.TokenHandler, name="token" ) _BATCH_URL = tornado.web.url( r"/batch", handlers.batch.BatchHandler, name="batch" ) _BISECT_URL = tornado.web.url( r"/bisect[s]?/?(?P<id>.*)", handlers.bisect.BisectHandler, name="bisect" ) _LAB_URL = tornado.web.url( r"/lab[s]?/?(?P<id>.*)", handlers.lab.LabHandler, name="lab" ) _VERSION_URL = tornado.web.url( r"/version", handlers.version.VersionHandler, name="version" ) _REPORT_URL = tornado.web.url( r"/report[s]?/?(?P<id>.*)", handlers.report.ReportHandler, name="response" ) _UPLOAD_URL = tornado.web.url( r"/upload/?(?P<path>.*)", handlers.upload.UploadHandler, name="upload" ) _SEND_URL = tornado.web.url( r"/send/?", handlers.send.SendHandler, name="send" ) _TEST_SUITE_URL = tornado.web.url( r"/test[s]?/suite[s]?/?(?P<id>.*)", handlers.test_suite.TestSuiteHandler, name="test-suite" ) _TEST_SET_URL = tornado.web.url( r"/test[s]?/set[s]?/?(?P<id>.*)", handlers.test_set.TestSetHandler, name="test-set" ) _TEST_CASE_URL = tornado.web.url( r"/test[s]?/case[s]?/?(?P<id>.*)", handlers.test_case.TestCaseHandler, name="test-case" ) _BOOT_TRIGGER_URL = tornado.web.url( r"/trigger/boot[s]?/?", handlers.boot_trigger.BootTriggerHandler, name="boot-trigger" ) APP_URLS = [ _BATCH_URL, _BISECT_URL, _BOOT_URL, _COUNT_URL, _DEFCONF_URL, _JOB_URL, _LAB_URL, _TOKEN_URL, _VERSION_URL, _REPORT_URL, _UPLOAD_URL, _SEND_URL, _TEST_SUITE_URL, _TEST_SET_URL, _TEST_CASE_URL, _BOOT_TRIGGER_URL ]
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Define URLs and handlers to server them.""" import tornado.web import handlers.batch import handlers.bisect import handlers.boot import handlers.count import handlers.defconf import handlers.job import handlers.lab import handlers.report import handlers.send import handlers.test_case import handlers.test_set import handlers.test_suite import handlers.token import handlers.upload import handlers.version _JOB_URL = tornado.web.url( r"/job[s]?/?(?P<id>.*)", handlers.job.JobHandler, name="job" ) _DEFCONF_URL = tornado.web.url( r"/defconfig[s]?/?(?P<id>.*)", handlers.defconf.DefConfHandler, name="defconf" ) _BOOT_URL = tornado.web.url( r"/boot[s]?/?(?P<id>.*)", handlers.boot.BootHandler, name="boot" ) _COUNT_URL = tornado.web.url( r"/count[s]?/?(?P<id>.*)", handlers.count.CountHandler, name="count" ) _TOKEN_URL = tornado.web.url( r"/token[s]?/?(?P<id>.*)", handlers.token.TokenHandler, name="token" ) _BATCH_URL = tornado.web.url( r"/batch", handlers.batch.BatchHandler, name="batch" ) _BISECT_URL = tornado.web.url( r"/bisect[s]?/?(?P<id>.*)", handlers.bisect.BisectHandler, name="bisect" ) _LAB_URL = tornado.web.url( r"/lab[s]?/?(?P<id>.*)", handlers.lab.LabHandler, name="lab" ) _VERSION_URL = tornado.web.url( r"/version", handlers.version.VersionHandler, name="version" ) _REPORT_URL = tornado.web.url( r"/report[s]?/?(?P<id>.*)", handlers.report.ReportHandler, name="response" ) _UPLOAD_URL = tornado.web.url( r"/upload/?(?P<path>.*)", handlers.upload.UploadHandler, name="upload" ) _SEND_URL = tornado.web.url( r"/send/?", handlers.send.SendHandler, name="send" ) _TEST_SUITE_URL = tornado.web.url( r"/test[s]?/suite[s]?/?(?P<id>.*)", handlers.test_suite.TestSuiteHandler, name="test-suite" ) _TEST_SET_URL = tornado.web.url( r"/test[s]?/set[s]?/?(?P<id>.*)", handlers.test_set.TestSetHandler, name="test-set" ) _TEST_CASE_URL = tornado.web.url( r"/test[s]?/case[s]?/?(?P<id>.*)", handlers.test_case.TestCaseHandler, name="test-case" ) APP_URLS = [ _BATCH_URL, _BISECT_URL, _BOOT_URL, _COUNT_URL, _DEFCONF_URL, _JOB_URL, _LAB_URL, _TOKEN_URL, _VERSION_URL, _REPORT_URL, _UPLOAD_URL, _SEND_URL, _TEST_SUITE_URL, _TEST_SET_URL, _TEST_CASE_URL ]
Python
0
96340529a8d5702ce8c880aa66966b2971b96449
change method
calc_cov.py
calc_cov.py
import mne import sys from mne import compute_covariance import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from my_settings import * reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=180e-6 # ) subject = sys.argv[1] epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject) epochs.drop_bad_epochs(reject) fig = epochs.plot_drop_log(subject=subject, show=False) fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject) # Make noise cov cov = compute_covariance(epochs, tmin=None, tmax=-0.2, method="factor_analysis") mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)
import mne import sys from mne import compute_covariance import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from my_settings import * reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=180e-6 # ) subject = sys.argv[1] epochs = mne.read_epochs(epochs_folder + "%s_trial_start-epo.fif" % subject) epochs.drop_bad_epochs(reject) fig = epochs.plot_drop_log(subject=subject, show=False) fig.savefig(epochs_folder + "pics/%s_drop_log.png" % subject) # Make noise cov cov = compute_covariance(epochs, tmin=None, tmax=-0.2, method="shrunk") mne.write_cov(mne_folder + "%s-cov.fif" % subject, cov)
Python
0.000005
4be984747a41e5ab966b12afe9074a0e611faee2
Add license text to resampling.py
resampling.py
resampling.py
""" MIT License Copyright (c) 2017 Talha Can Havadar Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. @author Talha Can Havadar (talhaHavadar) """ import random from collections import Counter class ResamplingWheel(object): """ A Class implementation for resampling wheel Creates an imaginary wheel that consist of weighted portions. According to these weights, you can pick an index value. Index with more weights has more chance to be picked up. """ def __init__(self, initiate_with=None): self.wheel = [] self.max_weight = None self.is_resampled = False self.beta = 0.0 self.last_index = 0 if initiate_with is not None and isinstance(initiate_with, list): self.wheel = initiate_with self.length = len(self.wheel) if self.length > 0: self.max_weight = max(self.wheel) self.last_index = int(random.random() * self.length) def get_pick_index(self): """ Returns an index value according to given data. Given data's length and weights matter """ if not self.is_resampled: self.__resample__() while self.beta > self.wheel[self.last_index]: self.beta -= self.wheel[self.last_index] self.last_index = (self.last_index + 1) % self.length self.is_resampled = False return self.last_index def __resample__(self): self.beta += random.random() * 2.0 * self.max_weight self.is_resampled = True def __len__(self): return len(self.wheel) if __name__ == "__main__": DATA = [10, 11, 12, 13, 14] SAMPLING = ResamplingWheel([5, 2, 1, 1, 1]) SAMPLED = [] print("Length of the sampling wheel:", len(SAMPLING)) for i in range(100): index = SAMPLING.get_pick_index() print(DATA[index]) SAMPLED.append(DATA[index]) print(Counter(SAMPLED))
""" @author Talha Can Havadar (talhaHavadar) """ import random from collections import Counter class ResamplingWheel(object): """ A Class implementation for resampling wheel Creates an imaginary wheel that consist of weighted portions. According to these weights, you can pick an index value. Index with more weights has more chance to be picked up. """ def __init__(self, initiate_with=None): self.wheel = [] self.max_weight = None self.is_resampled = False self.beta = 0.0 self.last_index = 0 if initiate_with is not None and isinstance(initiate_with, list): self.wheel = initiate_with self.length = len(self.wheel) if self.length > 0: self.max_weight = max(self.wheel) self.last_index = int(random.random() * self.length) def get_pick_index(self): """ Returns an index value according to given data. Given data's length and weights matter """ if not self.is_resampled: self.__resample__() while self.beta > self.wheel[self.last_index]: self.beta -= self.wheel[self.last_index] self.last_index = (self.last_index + 1) % self.length self.is_resampled = False return self.last_index def __resample__(self): self.beta += random.random() * 2.0 * self.max_weight self.is_resampled = True def __len__(self): return len(self.wheel) if __name__ == "__main__": DATA = [10, 11, 12, 13, 14] SAMPLING = ResamplingWheel([5, 2, 1, 1, 1]) SAMPLED = [] print("Length of the sampling wheel:", len(SAMPLING)) for i in range(100): index = SAMPLING.get_pick_index() print(DATA[index]) SAMPLED.append(DATA[index]) print(Counter(SAMPLED))
Python
0
5fa9e88e9402a4ca12f2f54298d397bc7b54728b
Revert "deactivated test for non-existent 'references'"
web/tests/test_views.py
web/tests/test_views.py
from django.test import TestCase, Client from django.urls import reverse from web.views import index, about, compare, reference class TestViews(TestCase): def test_index_view_GET(self): url = reverse('index') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'index.html') self.assertTemplateUsed(response, 'base.html') def test_about_view_GET(self): url = reverse('about') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'about.html') self.assertTemplateUsed(response, 'base.html') def test_compare_view_GET(self): url = reverse('compare') + '?concept=data_types&lang1=python&lang2=java' response = self.client.get(url) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'compare.html') self.assertTemplateUsed(response, 'base.html') def test_reference_view_GET(self): url = reverse('reference') + '?concept=data_types&lang=python' response = self.client.get(url) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'reference.html') self.assertTemplateUsed(response, 'base.html')
from django.test import TestCase, Client from django.urls import reverse from web.views import index, about, compare, reference class TestViews(TestCase): def test_index_view_GET(self): url = reverse('index') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'index.html') self.assertTemplateUsed(response, 'base.html') def test_about_view_GET(self): url = reverse('about') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'about.html') self.assertTemplateUsed(response, 'base.html') def test_compare_view_GET(self): url = reverse('compare') + '?concept=data_types&lang1=python&lang2=java' response = self.client.get(url) self.assertEquals(response.status_code, 200) self.assertTemplateUsed(response, 'compare.html') self.assertTemplateUsed(response, 'base.html') def test_reference_view_GET(self): pass # Uncomment these tests when 'reference' section is made # url = reverse('reference') + '?concept=data_types&lang=python' # response = self.client.get(url) # self.assertEquals(response.status_code, 200) # self.assertTemplateUsed(response, 'reference.html') # self.assertTemplateUsed(response, 'base.html')
Python
0
a5fddaefdedef18b0b6b7d3b2ec65f64eaaaad65
fix date time bug
clean_db.py
clean_db.py
import MySQLdb, config, urllib, cgi, datetime from datetime import datetime sql = MySQLdb.connect(host="localhost", user=config.username, passwd=config.passwd, db=config.test_db) sql.query("SELECT `id` FROM `feedurls`") db_feed_query=sql.store_result() rss_urls=db_feed_query.fetch_row(0) table_name = "stories" date_from = datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") date_to = datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") for rss_url_data in rss_urls: feed_id=rss_url_data[0] i = date_from while i <= date_to: print i.strftime("%d/%m/%Y")
import MySQLdb, config, urllib, cgi, datetime from datetime import datetime sql = MySQLdb.connect(host="localhost", user=config.username, passwd=config.passwd, db=config.test_db) sql.query("SELECT `id` FROM `feedurls`") db_feed_query=sql.store_result() rss_urls=db_feed_query.fetch_row(0) table_name = "stories" date_from = datetime.strptime(raw_input("start date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") date_to = datetime.strptime(raw_input("end date inc. in form 'dd-mm-yyyy'"),"%d-%m-%Y") for rss_url_data in rss_urls: feed_id=rss_url_data[0] i = start_date while i <= end_date: print end_dates
Python
0.000016
37fb65dd7763f7cbd1a53f613bbda16d739f11a3
Make `cctrluser create` work
cctrl/auth.py
cctrl/auth.py
# -*- coding: utf-8 -*- """ Copyright 2010 cloudControl UG (haftungsbeschraenkt) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __builtin__ import open, raw_input, range from exceptions import ImportError, ValueError from getpass import getpass import sys import os from cctrl.oshelpers import recode_input try: import json except ImportError: import simplejson as json from cctrl.error import messages, PasswordsDontMatchException from cctrl.settings import TOKEN_FILE_PATH, HOME_PATH def update_tokenfile(api): """ Because it is a real pain we don't want to ask developers for their username and password every time they call a method. Therefore we authenticate users via token for each request and only require email and password for a new token. A token is valid for a given period of time. Each successful API request resets the expiration time. """ if api.check_token(): write_tokenfile(api) return True return False def read_tokenfile(): """ Read the token from the token_file in TOKEN_FILE_PATH specified in cctrl.settings """ token = None if os.path.exists(TOKEN_FILE_PATH): token_file = open(TOKEN_FILE_PATH, "r") try: token = json.load(token_file) except ValueError: token = None token_file.close() return token def write_tokenfile(api): """ This method checks, if the .cloudControl directory inside the users home exists or is a file. If not, we create it and then write the token file. """ if os.path.isdir(HOME_PATH): pass elif os.path.isfile(HOME_PATH): print 'Error: ' + HOME_PATH + ' is a file, not a directory.' sys.exit(1) else: os.mkdir(HOME_PATH) tokenfile = open(TOKEN_FILE_PATH, "w") json.dump(api.get_token(), tokenfile) tokenfile.close() return True def delete_tokenfile(): """ We delete the tokenfile if we don't have a valid token to save. """ if os.path.exists(TOKEN_FILE_PATH): os.remove(TOKEN_FILE_PATH) return True return False def get_email(settings): sys.stderr.write(settings.login_name) sys.stderr.flush() email = raw_input() return email def get_password(create=False): password = None for i in range(3): password = recode_input(getpass('Password: ')) if create: password2 = recode_input(getpass('Password (again): ')) if password != password2: print messages['PasswordsDontMatch'] if i == 2: raise PasswordsDontMatchException() else: break else: break return password def get_credentials(settings, create=False): """ We use this to ask the user for his credentials in case we have no valid token. If create is true, the user is asked twice for the password, to make sure, that no typing error occurred. This is done three times after that a PasswordsDontMatchException is thrown. """ email = get_email(settings) password = get_password(create) return email, password
# -*- coding: utf-8 -*- """ Copyright 2010 cloudControl UG (haftungsbeschraenkt) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __builtin__ import open, raw_input, range from exceptions import ImportError, ValueError from getpass import getpass import sys import os from cctrl.oshelpers import recode_input try: import json except ImportError: import simplejson as json from cctrl.error import messages, PasswordsDontMatchException from cctrl.settings import TOKEN_FILE_PATH, HOME_PATH def update_tokenfile(api): """ Because it is a real pain we don't want to ask developers for their username and password every time they call a method. Therefore we authenticate users via token for each request and only require email and password for a new token. A token is valid for a given period of time. Each successful API request resets the expiration time. """ if api.check_token(): write_tokenfile(api) return True return False def read_tokenfile(): """ Read the token from the token_file in TOKEN_FILE_PATH specified in cctrl.settings """ token = None if os.path.exists(TOKEN_FILE_PATH): token_file = open(TOKEN_FILE_PATH, "r") try: token = json.load(token_file) except ValueError: token = None token_file.close() return token def write_tokenfile(api): """ This method checks, if the .cloudControl directory inside the users home exists or is a file. If not, we create it and then write the token file. """ if os.path.isdir(HOME_PATH): pass elif os.path.isfile(HOME_PATH): print 'Error: ' + HOME_PATH + ' is a file, not a directory.' sys.exit(1) else: os.mkdir(HOME_PATH) tokenfile = open(TOKEN_FILE_PATH, "w") json.dump(api.get_token(), tokenfile) tokenfile.close() return True def delete_tokenfile(): """ We delete the tokenfile if we don't have a valid token to save. """ if os.path.exists(TOKEN_FILE_PATH): os.remove(TOKEN_FILE_PATH) return True return False def get_email(settings): sys.stderr.write(settings.login_name) sys.stderr.flush() email = raw_input() return email def get_password(create=False): password = None for i in range(3): password = recode_input(getpass('Password: ')) if create: password2 = recode_input(getpass('Password (again): ')) if password != password2: print messages['PasswordsDontMatch'] if i == 2: raise PasswordsDontMatchException() else: break else: break return password def get_credentials(settings, create=False): """ We use this to ask the user for his credentials in case we have no valid token. If create is true, the user is asked twice for the password, to make sure, that no typing error occurred. This is done three times after that a PasswordsDontMatchException is thrown. """ email = get_email(); password = get_password(create) return email, password
Python
0.000003
d64460c8bbbe045dcdf9f737562a31d84044acce
Change package name to 'cirm' to avoid confusion.
rest/setup.py
rest/setup.py
# # Copyright 2012 University of Southern California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.core import setup setup(name="cirm-rest", description="cirm web application", version="0.1", package_dir={"": "src"}, packages=["cirm"], requires=["web.py", "psycopg2"], classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules", ], )
# # Copyright 2012 University of Southern California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.core import setup setup(name="cirm-rest", description="cirm web application", version="0.1", package_dir={"": "src"}, packages=["cirmrest"], requires=["web.py", "psycopg2"], classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules", ], )
Python
0
917d8e26a64a40de0a0b77085f1fa6d054af0ee8
Remove cleanup_testfn, no longer used.
conftest.py
conftest.py
import os import sys import platform import pytest collect_ignore = [] if platform.system() != 'Windows': collect_ignore.extend( [ 'distutils/msvc9compiler.py', ] ) @pytest.fixture def save_env(): orig = os.environ.copy() try: yield finally: for key in set(os.environ) - set(orig): del os.environ[key] for key, value in orig.items(): if os.environ.get(key) != value: os.environ[key] = value @pytest.fixture def needs_zlib(): pytest.importorskip('zlib') @pytest.fixture def distutils_logging_silencer(request): from distutils import log self = request.instance self.threshold = log.set_threshold(log.FATAL) # catching warnings # when log will be replaced by logging # we won't need such monkey-patch anymore self._old_log = log.Log._log log.Log._log = self._log self.logs = [] try: yield finally: log.set_threshold(self.threshold) log.Log._log = self._old_log @pytest.fixture def distutils_managed_tempdir(request): from distutils.tests import py38compat as os_helper self = request.instance self.old_cwd = os.getcwd() self.tempdirs = [] try: yield finally: # Restore working dir, for Solaris and derivatives, where rmdir() # on the current directory fails. os.chdir(self.old_cwd) while self.tempdirs: tmpdir = self.tempdirs.pop() os_helper.rmtree(tmpdir) @pytest.fixture def save_argv(): orig = sys.argv[:] try: yield finally: sys.argv[:] = orig @pytest.fixture def save_cwd(): orig = os.getcwd() try: yield finally: os.chdir(orig) @pytest.fixture def threshold_warn(): from distutils.log import set_threshold, WARN orig = set_threshold(WARN) yield set_threshold(orig) @pytest.fixture def pypirc(request, save_env, distutils_managed_tempdir): from distutils.core import PyPIRCCommand from distutils.core import Distribution self = request.instance self.tmp_dir = self.mkdtemp() os.environ['HOME'] = self.tmp_dir os.environ['USERPROFILE'] = self.tmp_dir self.rc = os.path.join(self.tmp_dir, '.pypirc') self.dist = Distribution() class command(PyPIRCCommand): def __init__(self, dist): super().__init__(dist) def initialize_options(self): pass finalize_options = initialize_options self._cmd = command # from pytest-dev/pytest#363 @pytest.fixture(scope="session") def monkeysession(request): from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo() @pytest.fixture(autouse=True, scope="session") def suppress_path_mangle(monkeysession): """ Disable the path mangling in CCompiler. Workaround for #169. """ from distutils import ccompiler monkeysession.setattr( ccompiler.CCompiler, '_make_relative', staticmethod(lambda x: x) ) @pytest.fixture def temp_home(tmp_path, monkeypatch): var = 'USERPROFILE' if platform.system() == 'Windows' else 'HOME' monkeypatch.setenv(var, str(tmp_path)) return tmp_path
import os import sys import platform import shutil import pytest collect_ignore = [] if platform.system() != 'Windows': collect_ignore.extend( [ 'distutils/msvc9compiler.py', ] ) @pytest.fixture def save_env(): orig = os.environ.copy() try: yield finally: for key in set(os.environ) - set(orig): del os.environ[key] for key, value in orig.items(): if os.environ.get(key) != value: os.environ[key] = value @pytest.fixture def needs_zlib(): pytest.importorskip('zlib') @pytest.fixture def distutils_logging_silencer(request): from distutils import log self = request.instance self.threshold = log.set_threshold(log.FATAL) # catching warnings # when log will be replaced by logging # we won't need such monkey-patch anymore self._old_log = log.Log._log log.Log._log = self._log self.logs = [] try: yield finally: log.set_threshold(self.threshold) log.Log._log = self._old_log @pytest.fixture def distutils_managed_tempdir(request): from distutils.tests import py38compat as os_helper self = request.instance self.old_cwd = os.getcwd() self.tempdirs = [] try: yield finally: # Restore working dir, for Solaris and derivatives, where rmdir() # on the current directory fails. os.chdir(self.old_cwd) while self.tempdirs: tmpdir = self.tempdirs.pop() os_helper.rmtree(tmpdir) @pytest.fixture def save_argv(): orig = sys.argv[:] try: yield finally: sys.argv[:] = orig @pytest.fixture def save_cwd(): orig = os.getcwd() try: yield finally: os.chdir(orig) @pytest.fixture def threshold_warn(): from distutils.log import set_threshold, WARN orig = set_threshold(WARN) yield set_threshold(orig) @pytest.fixture def pypirc(request, save_env, distutils_managed_tempdir): from distutils.core import PyPIRCCommand from distutils.core import Distribution self = request.instance self.tmp_dir = self.mkdtemp() os.environ['HOME'] = self.tmp_dir os.environ['USERPROFILE'] = self.tmp_dir self.rc = os.path.join(self.tmp_dir, '.pypirc') self.dist = Distribution() class command(PyPIRCCommand): def __init__(self, dist): super().__init__(dist) def initialize_options(self): pass finalize_options = initialize_options self._cmd = command @pytest.fixture def cleanup_testfn(): from distutils.tests import py38compat as os_helper yield path = os_helper.TESTFN if os.path.isfile(path): os.remove(path) elif os.path.isdir(path): shutil.rmtree(path) # from pytest-dev/pytest#363 @pytest.fixture(scope="session") def monkeysession(request): from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo() @pytest.fixture(autouse=True, scope="session") def suppress_path_mangle(monkeysession): """ Disable the path mangling in CCompiler. Workaround for #169. """ from distutils import ccompiler monkeysession.setattr( ccompiler.CCompiler, '_make_relative', staticmethod(lambda x: x) ) @pytest.fixture def temp_home(tmp_path, monkeypatch): var = 'USERPROFILE' if platform.system() == 'Windows' else 'HOME' monkeypatch.setenv(var, str(tmp_path)) return tmp_path
Python
0
d1137c56b59ef4fec06726fa0dda4854d0631e6d
delete tempfile after uploading screenshot
restclient.py
restclient.py
import json import requests import os from bs4 import BeautifulSoup from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * from ui.Ui_LoginDialog import Ui_LoginDialog def getLoginToken(address, email, password, timeout=15): """ attempt to get a login token. KeyError means invalid username or password""" client = requests.session() soup = BeautifulSoup(client.get(address, timeout=timeout).text, "html.parser") csrf = soup.find('input', { 'name': "csrf_token" })['value'] login_data = json.dumps({ "email": email, "password": password, "csrf_token": csrf }) r = client.post(address, data=login_data, headers={ "content-type": "application/json" }, timeout=timeout) ## if there's a login failure here, the server will report back whether the username or password was wrong. ## https://github.com/mattupstate/flask-security/issues/673 return r.json()['response']['user']['authentication_token'] def uploadFile(address, token, path, delete=True): """ KeyError means the upload failed """ r = requests.post(address, headers={ "Authentication-Token": token }, files={ "image": open(path, "rb") }) if delete: os.unlink(path) return r.json()['url'] class UploadThread(QThread): resultReady = pyqtSignal(str, object) def __init__(self, addr, token, path, parent=None): super(UploadThread, self).__init__(parent) self.addr = addr self.path = path self.token = token def run(self): url, error = None, None try: url = uploadFile(self.addr, self.token, self.path) except Exception as e: error = e self.resultReady.emit(url, error) class LoginThread(QThread): resultReady = pyqtSignal(str, object) def __init__(self, addr, email, password, parent=None): super(LoginThread, self).__init__(parent) self.addr = addr self.email = email self.password = password def run(self): token, error = None, None try: token = getLoginToken(self.addr, self.email, self.password) except Exception as e: error = e self.resultReady.emit(token, error) class LoginDialog(QDialog, Ui_LoginDialog): def __init__(self, parent): super(LoginDialog, self).__init__(parent) self.setupUi(self) self.loginToken = None self.thread = QThread(self) def accept(self): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) addr = QSettings(QSettings.IniFormat, QSettings.UserScope, "GliTch_ Is Mad Studios", "PostIt").value("internet/address") self.thread = LoginThread(addr + "/login", self.emailAddressLineEdit.text(), self.passwordLineEdit.text(), self) self.thread.resultReady.connect(self.gotToken) self.thread.start() def reject(self): if self.thread.isRunning(): self.thread.terminate() super().reject() def gotToken(self, token, error): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True) if token and not error: self.loginToken = token super().accept() else: msg = '' if isinstance(error, KeyError): msg = "Invalid username or password." else: msg = str(error) QMessageBox.critical(self, "Login Failed", msg)
import json import requests from bs4 import BeautifulSoup from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * from ui.Ui_LoginDialog import Ui_LoginDialog def getLoginToken(address, email, password, timeout=15): """ attempt to get a login token. KeyError means invalid username or password""" client = requests.session() soup = BeautifulSoup(client.get(address, timeout=timeout).text, "html.parser") csrf = soup.find('input', { 'name': "csrf_token" })['value'] login_data = json.dumps({ "email": email, "password": password, "csrf_token": csrf }) r = client.post(address, data=login_data, headers={ "content-type": "application/json" }, timeout=timeout).json() ## if there's a login failure here, the server will report back whether the username or password was wrong. ## https://github.com/mattupstate/flask-security/issues/673 return r['response']['user']['authentication_token'] def uploadFile(address, token, path): """ KeyError means the upload failed """ r = requests.post(address, headers={ "Authentication-Token": token }, files={ "image": open(path, "rb") }) return r.json()['url'] class UploadThread(QThread): resultReady = pyqtSignal(str, object) def __init__(self, addr, token, path, parent=None): super(UploadThread, self).__init__(parent) self.addr = addr self.path = path self.token = token def run(self): url, error = None, None try: url = uploadFile(self.addr, self.token, self.path) except Exception as e: error = e self.resultReady.emit(url, error) class LoginThread(QThread): resultReady = pyqtSignal(str, object) def __init__(self, addr, email, password, parent=None): super(LoginThread, self).__init__(parent) self.addr = addr self.email = email self.password = password def run(self): token, error = None, None try: token = getLoginToken(self.addr, self.email, self.password) except Exception as e: error = e self.resultReady.emit(token, error) class LoginDialog(QDialog, Ui_LoginDialog): def __init__(self, parent): super(LoginDialog, self).__init__(parent) self.setupUi(self) self.loginToken = None self.thread = QThread(self) def accept(self): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) addr = QSettings(QSettings.IniFormat, QSettings.UserScope, "GliTch_ Is Mad Studios", "PostIt").value("internet/address") self.thread = LoginThread(addr + "/login", self.emailAddressLineEdit.text(), self.passwordLineEdit.text(), self) self.thread.resultReady.connect(self.gotToken) self.thread.start() def reject(self): if self.thread.isRunning(): self.thread.terminate() super().reject() def gotToken(self, token, error): self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True) if token and not error: self.loginToken = token super().accept() else: msg = '' if isinstance(error, KeyError): msg = "Invalid username or password." else: msg = str(error) QMessageBox.critical(self, "Login Failed", msg)
Python
0.000001
7484c8d4ab699ee16bc867cdff1e7ec699dbb142
Add profiling support to Melange. By assigning profile_main_as_logs or profile_main_as_html to main variable you can turn on profiling. profile_main_as_logs will log profile data to App Engine console logs, profile_main_as_html will show profile data as html at the bottom of the page. If you want to profile app on deployed app just set the profiling function and deploy it.
app/main.py
app/main.py
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __authors__ = [ # alphabetical order by last name, please '"Augie Fackler" <[email protected]>', ] import logging import os import sys from google.appengine.ext.webapp import util # Remove the standard version of Django. for k in [k for k in sys.modules if k.startswith('django')]: del sys.modules[k] # Force sys.path to have our own directory first, in case we want to import # from it. This lets us replace the built-in Django sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) sys.path.insert(0, os.path.abspath('django.zip')) ultimate_sys_path = None # Force Django to reload its settings. from django.conf import settings settings._target = None # Must set this env var before importing any part of Django os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django.core.handlers.wsgi import django.core.signals import django.db # Log errors. def log_exception(*args, **kwds): logging.exception('Exception in request:') # Log all exceptions detected by Django. django.core.signals.got_request_exception.connect(log_exception) # Unregister the rollback event handler. django.core.signals.got_request_exception.disconnect( django.db._rollback_on_exception) def profile_main_as_html(): """Main program for profiling. Profiling data added as HTML to the page. """ import cProfile import pstats import StringIO prof = cProfile.Profile() prof = prof.runctx('real_main()', globals(), locals()) stream = StringIO.StringIO() stats = pstats.Stats(prof, stream=stream) # stats.strip_dirs() # Don't; too many modules are named __init__.py. # 'time', 'cumulative' or 'calls' stats.sort_stats('time') # Optional arg: how many to print stats.print_stats() # The rest is optional. # stats.print_callees() # stats.print_callers() print '\n<hr>' print '<h1>Profile data</h1>' print '<pre>' print stream.getvalue()[:1000000] print '</pre>' def profile_main_as_logs(): """Main program for profiling. Profiling data logged. """ import cProfile import pstats import StringIO prof = cProfile.Profile() prof = prof.runctx("real_main()", globals(), locals()) stream = StringIO.StringIO() stats = pstats.Stats(prof, stream=stream) stats.sort_stats('time') # Or cumulative stats.print_stats(80) # 80 = how many to print # The rest is optional. # stats.print_callees() # stats.print_callers() logging.info("Profile data:\n%s", stream.getvalue()) def real_main(): """Main program without profiling. """ global ultimate_sys_path if ultimate_sys_path is None: ultimate_sys_path = list(sys.path) else: sys.path[:] = ultimate_sys_path # Create a Django application for WSGI. application = django.core.handlers.wsgi.WSGIHandler() # Run the WSGI CGI handler with that application. util.run_wsgi_app(application) main = real_main if __name__ == '__main__': main()
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __authors__ = [ # alphabetical order by last name, please '"Augie Fackler" <[email protected]>', ] import logging import os import sys from google.appengine.ext.webapp import util # Remove the standard version of Django. for k in [k for k in sys.modules if k.startswith('django')]: del sys.modules[k] # Force sys.path to have our own directory first, in case we want to import # from it. This lets us replace the built-in Django sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) sys.path.insert(0, os.path.abspath('django.zip')) ultimate_sys_path = None # Force Django to reload its settings. from django.conf import settings settings._target = None # Must set this env var before importing any part of Django os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' import django.core.handlers.wsgi import django.core.signals import django.db # Log errors. def log_exception(*args, **kwds): logging.exception('Exception in request:') # Log all exceptions detected by Django. django.core.signals.got_request_exception.connect(log_exception) # Unregister the rollback event handler. django.core.signals.got_request_exception.disconnect( django.db._rollback_on_exception) def main(): global ultimate_sys_path if ultimate_sys_path is None: ultimate_sys_path = list(sys.path) else: sys.path[:] = ultimate_sys_path # Create a Django application for WSGI. application = django.core.handlers.wsgi.WSGIHandler() # Run the WSGI CGI handler with that application. util.run_wsgi_app(application) if __name__ == '__main__': main()
Python
0
fc05512b3ad40f6571ee3d942e4829a19e2a465e
Add core.models.Sensor
sensor/core/models.py
sensor/core/models.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from django.db import models class GenericSensor(models.Model): """Represents a sensor abstracting away the specifics of what it measures. A sensor measures one kind of thing. A physical device might have multiple logical sensors. """ name = models.CharField(max_length=256) model = models.CharField(max_length=128) class Meta: unique_together = [('name', 'model')] class Sensor(models.Model): """Base class for specific sensor types.""" generic_sensor = models.OneToOneField(GenericSensor) class Meta: abstract = True
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from django.db import models class GenericSensor(models.Model): """Represents a sensor abstracting away the specifics of what it measures. A sensor measures one kind of thing. A physical device might have multiple logical sensors. """ name = models.CharField(max_length=256) model = models.CharField(max_length=128) class Meta: unique_together = [('name', 'model')]
Python
0.000002
d9d9b993edc8baebf69b446d40f0a05260a041d5
Remove prints
emailauth/tests.py
emailauth/tests.py
from django.test import Client, TestCase from emailauth import forms c = Client() class FormTests(TestCase): def test_creation_form(self): form_data = {'email': '[email protected]', 'password1': 'test1234', 'password2': 'test1234'} form = forms.UserCreationForm(form_data) # Testing if form is valid, and that the fields are working. self.assertTrue(form.is_valid()) def test_form_save(self): form_data = {'email': '[email protected]', 'password1': 'test1234', 'password2': 'test1234'} form = forms.UserCreationForm(form_data) # Testing if form is valid, and that the fields are working. self.assertTrue(form.is_valid()) user = form.save() # Testing if save function is returning properly self.assertEqual(str(user), '[email protected]') def test_not_identically_passwords(self): form_data = {'email': '[email protected]', 'password1': '1234test', 'password2': 'test1234'} form = forms.UserCreationForm(form_data) # Testing if form is invalid when passwords are not matching. self.assertFalse(form.is_valid()) def test_register_by_post(self): # Testing register trough post-request get_response = c.get('/register/') post_response_wrong = c.post('/register/', { 'username': 'testUser', 'password1': 'test1234', 'password2': 'test1234', }) post_response = c.post('/register/', { 'email': '[email protected]', 'password1': 'testPass1234', 'password2': 'testPass1234', }) self.assertEqual(get_response.status_code, 200) self.assertNotEqual(post_response_wrong.status_code, 302) self.assertEqual(post_response.status_code, 302)
from django.test import Client, TestCase from emailauth import forms c = Client() class FormTests(TestCase): def test_creation_form(self): form_data = {'email': '[email protected]', 'password1': 'test1234', 'password2': 'test1234'} form = forms.UserCreationForm(form_data) # Testing if form is valid, and that the fields are working. self.assertTrue(form.is_valid()) def test_form_save(self): form_data = {'email': '[email protected]', 'password1': 'test1234', 'password2': 'test1234'} form = forms.UserCreationForm(form_data) # Testing if form is valid, and that the fields are working. self.assertTrue(form.is_valid()) user = form.save() # Testing if save function is returning properly self.assertEqual(str(user), '[email protected]') def test_not_identically_passwords(self): form_data = {'email': '[email protected]', 'password1': '1234test', 'password2': 'test1234'} form = forms.UserCreationForm(form_data) # Testing if form is invalid when passwords are not matching. self.assertFalse(form.is_valid()) def test_register_by_post(self): # Testing register trough post-request get_response = c.get('/register/') print(get_response.status_code) post_response_wrong = c.post('/register/', { 'username': '[email protected]', 'password1': 'test1234', 'password2': 'test1234', }) print(post_response_wrong.status_code) post_response = c.post('/register/', { 'email': '[email protected]', 'password1': 'testPass1234', 'password2': 'testPass1234', }) print(post_response.status_code) self.assertEqual(get_response.status_code, 200) self.assertNotEqual(post_response_wrong.status_code, 302) self.assertEqual(post_response.status_code, 302)
Python
0.000002
4c0325f92f542b9af7e504be55b7c7d79d1af3c8
Update some features
compiler.py
compiler.py
# -*- coding: utf-8 -*- # This file is part of the pymfony package. # # (c) Alexandre Quercia <[email protected]> # # For the full copyright and license information, please view the LICENSE # file that was distributed with this source code. """ """ from __future__ import absolute_import; from pymfony.component.system import ( Object, interface, ); from pymfony.component.dependency.exception import InvalidArgumentException; @interface class CompilerPassInterface(Object): """Interface that must be implemented by compilation passes """ def process(self, container): """You can modify the container here before it is dumped to PHP code. @param container: ContainerBuilder """ pass; class PassConfig(Object): """Compiler Pass Configuration This class has a default configuration embedded. """ TYPE_BEFORE_OPTIMIZATION = 'BeforeOptimization'; TYPE_AFTER_REMOVING = 'AfterRemoving' def __init__(self): self.__mergePass = None; self.__beforeOptimizationPasses = list(); self.__afterRemovingPasses = list(); def getPasses(self): """Returns all passes in order to be processed. @return: list An list of all passes to process """ passes = list(); if self.__mergePass: passes.append(self.__mergePass); passes.extend(self.__beforeOptimizationPasses); passes.extend(self.__afterRemovingPasses); return passes; def addPass(self, cPass, cType=TYPE_BEFORE_OPTIMIZATION): """Adds a pass. @param cPass: CompilerPassInterface A Compiler pass @param cType: string The pass type @raise InvalidArgumentException: when a pass type doesn't exist """ assert isinstance(cPass, CompilerPassInterface); getPropertyName = "get{0}Passes".format(cType); setPropertyName = "set{0}Passes".format(cType); if not hasattr(self, getPropertyName): raise InvalidArgumentException( 'Invalid type "{0}".'.format(cType) ); passes = getattr(self, getPropertyName)(); passes.append(cPass); getattr(self, setPropertyName)(passes); def getMergePass(self): """Gets the Merge Pass. @return: CompilerPassInterface A merge pass """ return self.__mergePass; def setMergePass(self, mergePass): """Sets the Merge Pass. @param mergePass: CompilerPassInterface A merge pass """ assert isinstance(mergePass, CompilerPassInterface); self.__mergePass = mergePass; def getBeforeOptimizationPasses(self): """ @return: list """ return self.__beforeOptimizationPasses; def setBeforeOptimizationPasses(self, passes): """ @param passes: list """ self.__beforeOptimizationPasses = passes; def getAfterRemovingPasses(self): """ @return: list """ return self.__afterRemovingPasses; def setAfterRemovingPasses(self, passes): """ @param passes: list """ self.__afterRemovingPasses = passes; class Compiler(Object): """This class is used to remove circular dependencies between individual passes. """ def __init__(self): """Constructor. """ self.__passConfig = PassConfig(); def getPassConfig(self): """Returns the PassConfig. @return: PassConfig The PassConfig instance """ return self.__passConfig; def addPass(self, cPass, cType=PassConfig.TYPE_BEFORE_OPTIMIZATION): """Adds a pass to the PassConfig. @param cPass: CompilerPassInterface A compiler pass @param cType: string The type of the pass """ assert isinstance(cPass, CompilerPassInterface); self.__passConfig.addPass(cPass, cType); def compile(self, container): """Run the Compiler and process all Passes. @param container: ContainerBuilder """ for cPass in self.__passConfig.getPasses(): cPass.process(container);
# -*- coding: utf-8 -*- # This file is part of the pymfony package. # # (c) Alexandre Quercia <[email protected]> # # For the full copyright and license information, please view the LICENSE # file that was distributed with this source code. """ """ from __future__ import absolute_import; from pymfony.component.system import ( Object, interface, ); from pymfony.component.dependency.exception import InvalidArgumentException; @interface class CompilerPassInterface(Object): """Interface that must be implemented by compilation passes """ def process(self, container): """You can modify the container here before it is dumped to PHP code. @param container: ContainerBuilder """ pass; class PassConfig(Object): """Compiler Pass Configuration This class has a default configuration embedded. """ TYPE_BEFORE_OPTIMIZATION = 'BeforeOptimization'; def __init__(self): self.__mergePass = None; self.__beforeOptimizationPasses = list(); def getPasses(self): """Returns all passes in order to be processed. @return: list An list of all passes to process """ passes = list(); if self.__mergePass: passes.append(self.__mergePass); passes.extend(self.__beforeOptimizationPasses); return passes; def addPass(self, cPass, cType=TYPE_BEFORE_OPTIMIZATION): """Adds a pass. @param cPass: CompilerPassInterface A Compiler pass @param cType: string The pass type @raise InvalidArgumentException: when a pass type doesn't exist """ assert isinstance(cPass, CompilerPassInterface); propertyName = "get{0}Passes".format(cType); if not hasattr(self, propertyName): raise InvalidArgumentException( 'Invalid type "{0}".'.format(cType) ); getattr(self, propertyName)().append(cPass); def getMergePass(self): """Gets the Merge Pass. @return: CompilerPassInterface A merge pass """ return self.__mergePass; def setMergePass(self, mergePass): """Sets the Merge Pass. @param mergePass: CompilerPassInterface A merge pass """ assert isinstance(mergePass, CompilerPassInterface); self.__mergePass = mergePass; def getBeforeOptimizationPasses(self): """ @return: list """ return self.__beforeOptimizationPasses; class Compiler(Object): """This class is used to remove circular dependencies between individual passes. """ def __init__(self): """Constructor. """ self.__passConfig = PassConfig(); def getPassConfig(self): """Returns the PassConfig. @return: PassConfig The PassConfig instance """ return self.__passConfig; def addPass(self, cPass, cType=PassConfig.TYPE_BEFORE_OPTIMIZATION): """Adds a pass to the PassConfig. @param cPass: CompilerPassInterface A compiler pass @param cType: string The type of the pass """ assert isinstance(cPass, CompilerPassInterface); self.__passConfig.addPass(cPass, cType); def compile(self, container): """Run the Compiler and process all Passes. @param container: ContainerBuilder """ for cPass in self.__passConfig.getPasses(): cPass.process(container);
Python
0
265e9added53d1eee1291b9e0b5a10bc7dfe19c8
Make sure we don't have section A before doing the extra round of manipulation
myuw_mobile/test/dao/canvas.py
myuw_mobile/test/dao/canvas.py
from django.test import TestCase from django.test.client import RequestFactory from myuw_mobile.dao.canvas import get_indexed_data_for_regid from myuw_mobile.dao.canvas import get_indexed_by_decrosslisted from myuw_mobile.dao.schedule import _get_schedule from myuw_mobile.dao.term import get_current_quarter class TestCanvas(TestCase): def test_crosslinks(self): with self.settings( RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'): data = get_indexed_data_for_regid("12345678901234567890123456789012") physics = data['2013,spring,PHYS,121/A'] self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650') has_section_a = '2013,spring,TRAIN,100/A' in data self.assertFalse(has_section_a) train = data['2013,spring,TRAIN,100/B'] self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650') def test_crosslinks_lookup(self): with self.settings( RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'): data = get_indexed_data_for_regid("12345678901234567890123456789012") now_request = RequestFactory().get("/") now_request.session = {} term = get_current_quarter(now_request) schedule = _get_schedule("12345678901234567890123456789012", term) canvas_data_by_course_id = get_indexed_by_decrosslisted(data, schedule.sections) physics = data['2013,spring,PHYS,121/A'] self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650') train = data['2013,spring,TRAIN,100/A'] self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650')
from django.test import TestCase from django.test.client import RequestFactory from myuw_mobile.dao.canvas import get_indexed_data_for_regid from myuw_mobile.dao.canvas import get_indexed_by_decrosslisted from myuw_mobile.dao.schedule import _get_schedule from myuw_mobile.dao.term import get_current_quarter class TestCanvas(TestCase): def test_crosslinks(self): with self.settings( RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'): data = get_indexed_data_for_regid("12345678901234567890123456789012") physics = data['2013,spring,PHYS,121/A'] self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650') train = data['2013,spring,TRAIN,100/B'] self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650') def test_crosslinks_lookup(self): with self.settings( RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File'): data = get_indexed_data_for_regid("12345678901234567890123456789012") now_request = RequestFactory().get("/") now_request.session = {} term = get_current_quarter(now_request) schedule = _get_schedule("12345678901234567890123456789012", term) canvas_data_by_course_id = get_indexed_by_decrosslisted(data, schedule.sections) physics = data['2013,spring,PHYS,121/A'] self.assertEquals(physics.course_url, 'https://canvas.uw.edu/courses/149650') train = data['2013,spring,TRAIN,100/A'] self.assertEquals(train.course_url, 'https://canvas.uw.edu/courses/249650')
Python
0.000001
ae948a2dfdd62af2ba98a0ee506ddd48504ee64b
bump version to 0.6-dev
validictory/__init__.py
validictory/__init__.py
#!/usr/bin/env python from validictory.validator import SchemaValidator __all__ = [ 'validate', 'SchemaValidator' ] __version__ = '0.6.0-dev' def validate(data, schema, validator_cls=SchemaValidator): ''' Validates a parsed json document against the provided schema. If an error is found a ValueError is raised. ``data`` is a python dictionary object of parsed json data. ``schema`` is a python dictionary object representing the schema. If ``validator_cls`` is provided that class will be used to validate the given ``schema`` against the given ``data``. The given class should be a subclass of the SchemaValidator class. ''' v = validator_cls() return v.validate(data,schema) if __name__ == '__main__': import sys import json if len(sys.argv) == 2: if sys.argv[1] == "--help": raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],)) schemafile = open(sys.argv[1], 'rb') infile = sys.stdin elif len(sys.argv) == 3: schemafile = open(sys.argv[1], 'rb') infile = open(sys.argv[2], 'rb') else: raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],)) try: obj = json.load(infile) schema = json.load(schemafile) validate(obj, schema) except ValueError, e: raise SystemExit(e)
#!/usr/bin/env python from validictory.validator import SchemaValidator __all__ = [ 'validate', 'SchemaValidator' ] __version__ = '0.5.0' def validate(data, schema, validator_cls=SchemaValidator): ''' Validates a parsed json document against the provided schema. If an error is found a ValueError is raised. ``data`` is a python dictionary object of parsed json data. ``schema`` is a python dictionary object representing the schema. If ``validator_cls`` is provided that class will be used to validate the given ``schema`` against the given ``data``. The given class should be a subclass of the SchemaValidator class. ''' v = validator_cls() return v.validate(data,schema) if __name__ == '__main__': import sys import json if len(sys.argv) == 2: if sys.argv[1] == "--help": raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],)) schemafile = open(sys.argv[1], 'rb') infile = sys.stdin elif len(sys.argv) == 3: schemafile = open(sys.argv[1], 'rb') infile = open(sys.argv[2], 'rb') else: raise SystemExit("%s SCHEMAFILE [INFILE]" % (sys.argv[0],)) try: obj = json.load(infile) schema = json.load(schemafile) validate(obj, schema) except ValueError, e: raise SystemExit(e)
Python
0
295a6dd0c2af01161ee5da274719596f043fe21c
Use encode('utf8') instead of str(...).
applyCrf.py
applyCrf.py
#!/usr/bin/env python """This program will read a JSON file (such as adjudicated_modeled_live_eyehair_100_03.json) and process it with CRF++. The labels assigned by CRF++ are printed.""" import argparse import sys import scrapings import crf_features as crff import CRFPP def main(argv=None): '''this is called if run from command line''' parser = argparse.ArgumentParser() parser.add_argument('-d','--debug', help="Optional give debugging feedback.", required=False, action='store_true') parser.add_argument('-f','--featlist', help="Required input file with features to be extracted, one feature entry per line.", required=True) parser.add_argument('-i','--input', help="Required input file with Web scraping sentences in JSON format.", required=True) parser.add_argument('-m','--model', help="Required input model file.", required=True) args=parser.parse_args() # Read the Web scrapings: s = scrapings.Scrapings(args.input) if args.debug: print "sencence count=%d" % s.sentenceCount() # Create a CrfFeatures object. This classs provides a lot of services, but we'll use only a subset. c = crff.CrfFeatures(args.featlist) # Create a CRF++ processor. tagger = CRFPP.Tagger("-m " + args.model) for sidx in range(0, s.sentenceCount()): tokens = s.getAllTokens(sidx) if args.debug: print "len(tokens)=%d" % len(tokens) fc = c.featurizeSentence(tokens) if args.debug: print "len(fc)=%d" % len(fc) tagger.clear() for idx, token in enumerate(tokens): features = fc[idx] if args.debug: print "token#%d (%s) has %d features" % (idx, token, len(features)) tf = token + ' ' + ' '.join(features) tagger.add(tf.encode('utf8')) tagger.parse() # tagger.size() returns the number of tokens that were added. # tagger.xsize() returns the number of features plus 1 (for the token). if args.debug: print "size=%d" % tagger.size() print "xsize=%d" % tagger.xsize() print "ysize=%d" % tagger.ysize() print "dsize=%d" % tagger.dsize() print "vlevel=%d" % tagger.vlevel() print "nbest=%d" % tagger.nbest() ntokens = tagger.size() if ntokens != len(tokens): print "received %d tokens , expected %d" % (ntokens, len(tokens)) nfeatures = tagger.xsize() for tokenIdx in range(0, tagger.size()): if args.debug: for featureIdx in range (0, nfeatures): print "x(%d, %d)=%s" % (tokenIdx, featureIdx, tagger.x(tokenIdx, featureIdx)) # tagger.x(tokenIdx, 0) is the original token # tagger.yname(tagger.y(tokenIdx)) is the label assigned to that token. print "%s %s" % (tagger.x(tokenIdx, 0), tagger.yname(tagger.y(tokenIdx))) # call main() if this is run as standalone if __name__ == "__main__": sys.exit(main())
#!/usr/bin/env python """This program will read a JSON file (such as adjudicated_modeled_live_eyehair_100_03.json) and process it with CRF++. The labels assigned by CRF++ are printed.""" import argparse import sys import scrapings import crf_features as crff import CRFPP def main(argv=None): '''this is called if run from command line''' parser = argparse.ArgumentParser() parser.add_argument('-d','--debug', help="Optional give debugging feedback.", required=False, action='store_true') parser.add_argument('-f','--featlist', help="Required input file with features to be extracted, one feature entry per line.", required=True) parser.add_argument('-i','--input', help="Required input file with Web scraping sentences in JSON format.", required=True) parser.add_argument('-m','--model', help="Required input model file.", required=True) args=parser.parse_args() # Read the Web scrapings: s = scrapings.Scrapings(args.input) if args.debug: print "sencence count=%d" % s.sentenceCount() # Create a CrfFeatures object. This classs provides a lot of services, but we'll use only a subset. c = crff.CrfFeatures(args.featlist) # Create a CRF++ processor. tagger = CRFPP.Tagger("-m " + args.model) for sidx in range(0, s.sentenceCount()): tokens = s.getAllTokens(sidx) if args.debug: print "len(tokens)=%d" % len(tokens) fc = c.featurizeSentence(tokens) if args.debug: print "len(fc)=%d" % len(fc) tagger.clear() for idx, token in enumerate(tokens): features = fc[idx] if args.debug: print "token#%d (%s) has %d features" % (idx, token, len(features)) tf = token + ' ' + ' '.join(features) tagger.add(str(tf)) tagger.parse() # tagger.size() returns the number of tokens that were added. # tagger.xsize() returns the number of features plus 1 (for the token). if args.debug: print "size=%d" % tagger.size() print "xsize=%d" % tagger.xsize() print "ysize=%d" % tagger.ysize() print "dsize=%d" % tagger.dsize() print "vlevel=%d" % tagger.vlevel() print "nbest=%d" % tagger.nbest() ntokens = tagger.size() if ntokens != len(tokens): print "received %d tokens , expected %d" % (ntokens, len(tokens)) nfeatures = tagger.xsize() for tokenIdx in range(0, tagger.size()): if args.debug: for featureIdx in range (0, nfeatures): print "x(%d, %d)=%s" % (tokenIdx, featureIdx, tagger.x(tokenIdx, featureIdx)) # tagger.x(tokenIdx, 0) is the original token # tagger.yname(tagger.y(tokenIdx)) is the label assigned to that token. print "%s %s" % (tagger.x(tokenIdx, 0), tagger.yname(tagger.y(tokenIdx))) # call main() if this is run as standalone if __name__ == "__main__": sys.exit(main())
Python
0.000001
0cd3651810daceefa492bc303c74568d1a042ca6
Fix get_proxy_ticket method usage
django_cas_ng/models.py
django_cas_ng/models.py
# ⁻*- coding: utf-8 -*- from django.db import models from django.conf import settings from .utils import (get_cas_client, get_user_from_session) from importlib import import_module from cas import CASError SessionStore = import_module(settings.SESSION_ENGINE).SessionStore class ProxyError(ValueError): pass class ProxyGrantingTicket(models.Model): class Meta: unique_together = ('session_key', 'user') session_key = models.CharField(max_length=255, blank=True, null=True) user = models.ForeignKey( settings.AUTH_USER_MODEL, related_name="+", null=True, blank=True ) pgtiou = models.CharField(max_length=255, null=True, blank=True) pgt = models.CharField(max_length=255, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) @classmethod def clean_deleted_sessions(cls): for pgt in cls.objects.all(): session = SessionStore(session_key=pgt.session_key) user = get_user_from_session(session) if not user.is_authenticated(): pgt.delete() @classmethod def retrieve_pt(cls, request, service): """`request` should be the current HttpRequest object `service` a string representing the service for witch we want to retrieve a ticket. The function return a Proxy Ticket or raise `ProxyError` """ try: pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt except cls.DoesNotExist: raise ProxyError( "INVALID_TICKET", "No proxy ticket found for this HttpRequest object" ) else: client = get_cas_client(service_url=service) try: return client.get_proxy_ticket(pgt) # change CASError to ProxyError nicely except CASError as error: raise ProxyError(*error.args) # juste embed other errors except Exception as e: raise ProxyError(e) class SessionTicket(models.Model): session_key = models.CharField(max_length=255) ticket = models.CharField(max_length=255) @classmethod def clean_deleted_sessions(cls): for st in cls.objects.all(): session = SessionStore(session_key=st.session_key) user = get_user_from_session(session) if not user.is_authenticated(): st.delete()
# ⁻*- coding: utf-8 -*- from django.db import models from django.conf import settings from .utils import (get_cas_client, get_service_url, get_user_from_session) from importlib import import_module from cas import CASError SessionStore = import_module(settings.SESSION_ENGINE).SessionStore class ProxyError(ValueError): pass class ProxyGrantingTicket(models.Model): class Meta: unique_together = ('session_key', 'user') session_key = models.CharField(max_length=255, blank=True, null=True) user = models.ForeignKey( settings.AUTH_USER_MODEL, related_name="+", null=True, blank=True ) pgtiou = models.CharField(max_length=255, null=True, blank=True) pgt = models.CharField(max_length=255, null=True, blank=True) date = models.DateTimeField(auto_now_add=True) @classmethod def clean_deleted_sessions(cls): for pgt in cls.objects.all(): session = SessionStore(session_key=pgt.session_key) user = get_user_from_session(session) if not user.is_authenticated(): pgt.delete() @classmethod def retrieve_pt(cls, request, service): """`request` should be the current HttpRequest object `service` a string representing the service for witch we want to retrieve a ticket. The function return a Proxy Ticket or raise `ProxyError` """ try: pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt except cls.DoesNotExist: raise ProxyError( "INVALID_TICKET", "No proxy ticket found for this HttpRequest object" ) else: service_url = get_service_url(request) client = get_cas_client(service_url=service_url) try: return client.get_proxy_ticket(pgt, service) # change CASError to ProxyError nicely except CASError as error: raise ProxyError(*error.args) # juste embed other errors except Exception as e: raise ProxyError(e) class SessionTicket(models.Model): session_key = models.CharField(max_length=255) ticket = models.CharField(max_length=255) @classmethod def clean_deleted_sessions(cls): for st in cls.objects.all(): session = SessionStore(session_key=st.session_key) user = get_user_from_session(session) if not user.is_authenticated(): st.delete()
Python
0.000004
db033a9560ee97b5281adbf05f3f452943d592d7
Add test_get_on_call and test_weekly
django_on_call/tests.py
django_on_call/tests.py
import datetime from django.test import TestCase from .models import OnCall class SimpleTest(TestCase): def test_get_on_call(self): """Test the basic OnCall.get_on_call functionality """ on_call = OnCall(slug='test', rule='on_call = "Alice"') self.assertEqual(on_call.get_on_call(), 'Alice') def test_weekly(self): """Test a week-on round robin """ on_call = OnCall(slug='test', rule='\n'.join([ 'handlers = ["Alice", "Bob", "Charlie"]', 'week = int(now.strftime("%W"))', 'on_call = handlers[week % len(handlers)]', ])) for now, expected in [ (datetime.datetime(2013, 1, 1), 'Alice'), (datetime.datetime(2013, 1, 8), 'Bob'), (datetime.datetime(2013, 1, 15), 'Charlie'), (datetime.datetime(2013, 1, 22), 'Alice'), ]: self.assertEqual(on_call.get_on_call(now=now), expected)
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2)
Python
0
781e20bc3f465bdaac50f0f2a637b037d892c054
Remove premature optimisation
src/registry.py
src/registry.py
from .formatters import * class FormatRegistry(): def __init__(self): self.__formatters = [ ClangFormat(), ElmFormat(), GoFormat(), JavaScriptFormat(), PythonFormat(), RustFormat(), TerraformFormat() ] @property def all(self): return self.__formatters @property def enabled(self): return [x for x in self.all if x.format_on_save] def find(self, predicate, default=None): return next((x for x in self.all if predicate(x)), default) def by_view(self, view): source = view.scope_name(0).split(' ')[0] return self.find(lambda x: x.source == source) def by_name(self, name): return self.find(lambda x: x.name == name)
from .formatters import * class FormatRegistry(): def __init__(self): self.__registered_formatters = [ ClangFormat(), ElmFormat(), GoFormat(), JavaScriptFormat(), PythonFormat(), RustFormat(), TerraformFormat() ] self.__source_formatter_lookup_table = {} for formatter in self.__registered_formatters: self.__source_formatter_lookup_table[formatter.source] = formatter @property def all(self): return self.__registered_formatters @property def enabled(self): return [x for x in self.all if x.format_on_save] def find(self, predicate, default=None): return next((x for x in self.all if predicate(x)), default) def by_view(self, view): source = view.scope_name(0).split(' ')[0] return self.__source_formatter_lookup_table.get(source) def by_name(self, name): return self.find(lambda x: x.name == name)
Python
0.00005
945e7d1ef165054891a0ac574d52f6a1c3b7a162
Add long help
code_gen.py
code_gen.py
import sys import getopt from config import CONFIG from ida_code_gen import IdaCodeGen from ida_parser import IdaInfoParser def print_help(): print 'Options:' print ' -d, --database Path to database from arguments. Default = ' + CONFIG['database'] print ' -o, --out_dir Path to output directory for code generation. Default = ' + CONFIG['out_dir'] print ' -v, --verbose Verbose mode programm. Default = ' + str(CONFIG['verbose']) print 'Example:' print ' python code_gen.py -v --database C:/ida_info.sqlite3 --out_dir C:/code_gen/' pass def main(argv): try: opts, args = getopt.getopt(argv, 'hvdo', ['help', ''verbose', 'database=', 'out_dir=']) except getopt.GetoptError: print_help() sys.exit(2) for opt, arg in opts: if opt == ('-h', '--help'): print_help() sys.exit() if opt in ('-v', '--verbose'): CONFIG['verbose'] = True continue if opt in ('-d', '--database'): CONFIG['database'] = arg continue if opt in ('-o', '--out_dir'): CONFIG['out_dir'] = arg continue if CONFIG['verbose']: print 'database: ' + CONFIG['database'] print 'out_dir: ' + CONFIG['out_dir'] print 'verbose: ' + str(CONFIG['verbose']) parser = IdaInfoParser(CONFIG['database']) parser.start() code_gen = IdaCodeGen(CONFIG['database'], CONFIG['out_dir']) code_gen.start() if __name__ == '__main__': main(sys.argv[1:])
import sys import getopt from config import CONFIG from ida_code_gen import IdaCodeGen from ida_parser import IdaInfoParser def print_help(): print 'Options:' print ' -d, --database Path to database from arguments. Default = ' + CONFIG['database'] print ' -o, --out_dir Path to output directory for code generation. Default = ' + CONFIG['out_dir'] print ' -v, --verbose Verbose mode programm. Default = ' + str(CONFIG['verbose']) print 'Example:' print ' python code_gen.py -v --database C:/ida_info.sqlite3 --out_dir C:/code_gen/' pass def main(argv): try: opts, args = getopt.getopt(argv, 'hvdo', ['verbose', 'database=', 'out_dir=']) except getopt.GetoptError: print_help() sys.exit(2) for opt, arg in opts: if opt == '-h': print_help() sys.exit() if opt in ('-v', '--verbose'): CONFIG['verbose'] = True continue if opt in ('-d', '--database'): CONFIG['database'] = arg continue if opt in ('-o', '--out_dir'): CONFIG['out_dir'] = arg continue if CONFIG['verbose']: print 'database: ' + CONFIG['database'] print 'out_dir: ' + CONFIG['out_dir'] print 'verbose: ' + str(CONFIG['verbose']) parser = IdaInfoParser(CONFIG['database']) parser.start() code_gen = IdaCodeGen(CONFIG['database'], CONFIG['out_dir']) code_gen.start() if __name__ == '__main__': main(sys.argv[1:])
Python
0.000121
2ad94140360f893ad46b1b972e753f2a78b5f779
print function
example/example.py
example/example.py
# coding: utf-8 import json import os import lastpass with open(os.path.join(os.path.dirname(__file__), 'credentials.json')) as f: credentials = json.load(f) username = str(credentials['username']) password = str(credentials['password']) try: # First try without a multifactor password vault = lastpass.Vault.open_remote(username, password) except lastpass.LastPassIncorrectGoogleAuthenticatorCodeError as e: # Get the code multifactor_password = input('Enter Google Authenticator code:') # And now retry with the code vault = lastpass.Vault.open_remote(username, password, multifactor_password) except lastpass.LastPassIncorrectYubikeyPasswordError as e: # Get the code multifactor_password = input('Enter Yubikey password:') # And now retry with the code vault = lastpass.Vault.open_remote(username, password, multifactor_password) for index, i in enumerate(vault.accounts): print("{} {} {} {} {} {} {}".format(index + 1, i.id, i.name, i.username, i.password, i.url, i.group))
# coding: utf-8 import json import os import lastpass with open(os.path.join(os.path.dirname(__file__), 'credentials.json')) as f: credentials = json.load(f) username = str(credentials['username']) password = str(credentials['password']) try: # First try without a multifactor password vault = lastpass.Vault.open_remote(username, password) except lastpass.LastPassIncorrectGoogleAuthenticatorCodeError as e: # Get the code multifactor_password = input('Enter Google Authenticator code:') # And now retry with the code vault = lastpass.Vault.open_remote(username, password, multifactor_password) except lastpass.LastPassIncorrectYubikeyPasswordError as e: # Get the code multifactor_password = input('Enter Yubikey password:') # And now retry with the code vault = lastpass.Vault.open_remote(username, password, multifactor_password) for index, i in enumerate(vault.accounts): print index+1, i.id, i.name, i.username, i.password, i.url, i.group
Python
0.00093
cefa0a94582e40f92c48d6c91cf393c9b0310713
fix geojson in sources dir
validate.py
validate.py
import json import re import click import jsonschema import utils @click.command() @click.argument('schema', type=click.File('r'), required=True) @click.argument('jsonfiles', type=click.Path(exists=True), required=True) def validate(schema, jsonfiles): """Validate a JSON files against a JSON schema. \b SCHEMA: JSON schema to validate against. Required. JSONFILE: JSON files to validate. Required. """ schema = json.loads(schema.read()) for path in utils.get_files(jsonfiles): if path.startswith('sources'): regex = r'sources/[A-Z]{2}/[A-Z]{2}/[a-z-]+.json' elif path.startswith('generated'): regex = r'generated/[A-Z]{2}/[A-Z]{2}/[a-z-]+.geojson' else: regex = r'' if not re.compile(regex).match(path): raise AssertionError('Path does not match spec for ' + path) with open(path) as f: jsonfile = json.loads(f.read()) jsonschema.validate(jsonfile, schema) if __name__ == '__main__': validate()
import json import re import click import jsonschema import utils @click.command() @click.argument('schema', type=click.File('r'), required=True) @click.argument('jsonfiles', type=click.Path(exists=True), required=True) def validate(schema, jsonfiles): """Validate a JSON files against a JSON schema. \b SCHEMA: JSON schema to validate against. Required. JSONFILE: JSON files to validate. Required. """ schema = json.loads(schema.read()) for path in utils.get_files(jsonfiles): regex = r'(sources|generated)/[A-Z]{2}/[A-Z]{2}/[a-z-]+.(geo)?json' if not re.compile(regex).match(path): raise AssertionError('Source path does not match spec for ' + path) with open(path) as f: jsonfile = json.loads(f.read()) jsonschema.validate(jsonfile, schema) if __name__ == '__main__': validate()
Python
0.000003
6d35c533940db6a6d664546c2b97e5c12c92dcfe
remove yaml parser for bandap GMM
example/src/yml.py
example/src/yml.py
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import os import yaml class SpeakerYML(object): def __init__(self, ymlf): # open yml file with open(ymlf) as yf: conf = yaml.safe_load(yf) # read parameter from yml file self.wav_fs = conf['wav']['fs'] self.wav_bit = conf['wav']['bit'] self.wav_framems = conf['wav']['framems'] self.wav_shiftms = conf['wav']['shiftms'] self.wav_fftl = conf['wav']['fftl'] self.f0_minf0 = conf['f0']['minf0'] self.f0_maxf0 = conf['f0']['maxf0'] assert self.f0_minf0 < self.f0_maxf0, \ "should be minf0 < maxf0 in yml file" self.mcep_dim = conf['mcep']['dim'] self.mcep_alpha = conf['mcep']['alpha'] self.power_dim = conf['power']['threshold'] self.analyzer = conf['analyzer'] def print_params(self): pass class PairYML(object): def __init__(self, ymlf): # open yml file with open(ymlf) as yf: conf = yaml.safe_load(yf) self.jnt_n_iter = conf['jnt']['n_iter'] self.GMM_mcep_n_mix = conf['GMM']['mcep']['n_mix'] self.GMM_mcep_n_iter = conf['GMM']['mcep']['n_iter'] self.GMM_mcep_covtype = conf['GMM']['mcep']['covtype'] self.GMM_mcep_cvtype = conf['GMM']['mcep']['cvtype'] def _read_training_list(self): if not os.path.exists(self.trlist): raise('training file list does not exists.') # read training list self.trfiles = [] with open(self.trlist, 'r') as f: for line in f: self.trfiles.append(line.rstrip().split(" ")) def _read_evaluation_list(self): if not os.path.exists(self.evlist): raise('evaluation file list does not exists.') self.evfiles = [] with open(self.evlist, 'r') as f: for line in f: self.evfiles.append(line.rstrip()) def print_params(self): pass
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import os import yaml class SpeakerYML(object): def __init__(self, ymlf): # open yml file with open(ymlf) as yf: conf = yaml.safe_load(yf) # read parameter from yml file self.wav_fs = conf['wav']['fs'] self.wav_bit = conf['wav']['bit'] self.wav_framems = conf['wav']['framems'] self.wav_shiftms = conf['wav']['shiftms'] self.wav_fftl = conf['wav']['fftl'] self.f0_minf0 = conf['f0']['minf0'] self.f0_maxf0 = conf['f0']['maxf0'] assert self.f0_minf0 < self.f0_maxf0, \ "should be minf0 < maxf0 in yml file" self.mcep_dim = conf['mcep']['dim'] self.mcep_alpha = conf['mcep']['alpha'] self.power_dim = conf['power']['threshold'] self.analyzer = conf['analyzer'] def print_params(self): pass class PairYML(object): def __init__(self, ymlf): # open yml file with open(ymlf) as yf: conf = yaml.safe_load(yf) self.jnt_n_iter = conf['jnt']['n_iter'] self.GMM_mcep_n_mix = conf['GMM']['mcep']['n_mix'] self.GMM_mcep_n_iter = conf['GMM']['mcep']['n_iter'] self.GMM_mcep_covtype = conf['GMM']['mcep']['covtype'] self.GMM_mcep_cvtype = conf['GMM']['mcep']['cvtype'] self.GMM_bandap_n_mix = conf['GMM']['bandap']['n_mix'] self.GMM_bandap_n_iter = conf['GMM']['bandap']['n_iter'] self.GMM_bandap_covtype = conf['GMM']['bandap']['covtype'] self.GMM_bandap_cvtype = conf['GMM']['bandap']['cvtype'] def _read_training_list(self): if not os.path.exists(self.trlist): raise('training file list does not exists.') # read training list self.trfiles = [] with open(self.trlist, 'r') as f: for line in f: self.trfiles.append(line.rstrip().split(" ")) def _read_evaluation_list(self): if not os.path.exists(self.evlist): raise('evaluation file list does not exists.') self.evfiles = [] with open(self.evlist, 'r') as f: for line in f: self.evfiles.append(line.rstrip()) def print_params(self): pass
Python
0.000005
7e16a9feb88023a03363aee5be552a2f15b825fc
修复 waiting 状态下颜色错误的问题
utils/templatetags/submission.py
utils/templatetags/submission.py
# coding=utf-8 def translate_result(value): results = { 0: "Accepted", 1: "Runtime Error", 2: "Time Limit Exceeded", 3: "Memory Limit Exceeded", 4: "Compile Error", 5: "Format Error", 6: "Wrong Answer", 7: "System Error", 8: "Waiting" } return results[value] def translate_id(submission_item): return submission_item["_id"] def translate_language(value): return {1: "C", 2: "C++", 3: "Java"}[value] def translate_result_class(value): if value == 0: return "success" elif value == 8: return "info" return "danger" from django import template register = template.Library() register.filter("translate_result", translate_result) register.filter("translate_id", translate_id) register.filter("translate_language", translate_language) register.filter("translate_result_class", translate_result_class)
# coding=utf-8 def translate_result(value): results = { 0: "Accepted", 1: "Runtime Error", 2: "Time Limit Exceeded", 3: "Memory Limit Exceeded", 4: "Compile Error", 5: "Format Error", 6: "Wrong Answer", 7: "System Error", 8: "Waiting" } return results[value] def translate_id(submission_item): return submission_item["_id"] def translate_language(value): return {1: "C", 2: "C++", 3: "Java"}[value] def translate_result_class(value): if value == 0: return "success" elif value == "8": return "info" return "danger" from django import template register = template.Library() register.filter("translate_result", translate_result) register.filter("translate_id", translate_id) register.filter("translate_language", translate_language) register.filter("translate_result_class", translate_result_class)
Python
0.000007
d17a88ac9ef8e3806c7ac60d31df62a1041939cb
Add sum_of_spreads
muv/spatial.py
muv/spatial.py
""" Spatial statistics. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" import numpy as np def spread(d, t): """ Calculate the spread between two sets of compounds. Given a matrix containing distances between two sets of compounds, A and B, calculate the fraction of compounds in set A that are closer than t to any compound in set B. Parameters ---------- d : ndarray Distance matrix with compounds from set A on first axis. t : float Distance threshold. """ s = np.mean(np.any(d < t, axis=1)) return s def sum_of_spreads(d, coeff, min_t=0, max_t=3, step=None): """ Calculate the sum of spreads across a range of distance thresholds. Parameters ---------- d : ndarray Distance matrix with compounds from set A on first axis. coeff : float Coefficient used to rescale distance thresholds. min_t : float, optional (default 0) Minimum distance threshold (before rescaling). max_t : float, optional (default 3) Maximum distance threshold (before rescaling). step : float, optional Step size for determining values to sample between min_t and max_t. If not provided, defaults to max_t / 500. """ if step is None: step = max_t / 500. n_steps = int((max_t - min_t) / step) thresholds = coeff * np.linspace(min_t, max_t, n_steps) ss = np.sum([spread(d, t) for t in thresholds]) return ss
""" Spatial statistics. """ __author__ = "Steven Kearnes" __copyright__ = "Copyright 2014, Stanford University" __license__ = "3-clause BSD" import numpy as np def spread(d, t): """ Calculate the spread between two sets of compounds. Given a matrix containing distances between two sets of compounds, A and B, calculate the fraction of compounds in set A that are closer than t to any compound in set B. Parameters ---------- d : ndarray Distance matrix with compounds from set A on first axis. t : float Distance threshold. """ p = np.mean(np.any(d < t, axis=1)) return p
Python
0.998996
e05736cd36bc595070dda78e91bcb1b4bcfd983c
Remove deprecated usage of `reflect` constructor param
microcosm_postgres/operations.py
microcosm_postgres/operations.py
""" Common database operations. """ from sqlalchemy import MetaData from sqlalchemy.exc import ProgrammingError from microcosm_postgres.migrate import main from microcosm_postgres.models import Model def stamp_head(graph): """ Stamp the database with the current head revision. """ main(graph, "stamp", "head") def get_current_head(graph): """ Get the current database head revision, if any. """ session = new_session(graph) try: result = session.execute("SELECT version_num FROM alembic_version") except ProgrammingError: return None else: return result.scalar() finally: session.close() def create_all(graph): """ Create all database tables. """ head = get_current_head(graph) if head is None: Model.metadata.create_all(graph.postgres) stamp_head(graph) def drop_all(graph): """ Drop all database tables. """ Model.metadata.drop_all(graph.postgres) drop_alembic_table(graph) def drop_alembic_table(graph): """ Drop the alembic version table. """ try: graph.postgres.execute("DROP TABLE alembic_version;") except ProgrammingError: return False else: return True # Cached database metadata instance _metadata = None def recreate_all(graph): """ Drop and add back all database tables, or reset all data associated with a database. Intended mainly for testing, where a test database may either need to be re-initialized or cleared out between tests """ global _metadata if _metadata is None: # First-run, the test database/metadata needs to be initialized drop_all(graph) create_all(graph) _metadata = MetaData(bind=graph.postgres) _metadata.reflect() return # Otherwise, truncate all existing tables connection = graph.postgres.connect() transaction = connection.begin() for table in reversed(_metadata.sorted_tables): connection.execute(table.delete()) transaction.commit() def new_session(graph, expire_on_commit=False): """ Create a new session. """ return graph.sessionmaker(expire_on_commit=expire_on_commit)
""" Common database operations. """ from sqlalchemy import MetaData from sqlalchemy.exc import ProgrammingError from microcosm_postgres.migrate import main from microcosm_postgres.models import Model def stamp_head(graph): """ Stamp the database with the current head revision. """ main(graph, "stamp", "head") def get_current_head(graph): """ Get the current database head revision, if any. """ session = new_session(graph) try: result = session.execute("SELECT version_num FROM alembic_version") except ProgrammingError: return None else: return result.scalar() finally: session.close() def create_all(graph): """ Create all database tables. """ head = get_current_head(graph) if head is None: Model.metadata.create_all(graph.postgres) stamp_head(graph) def drop_all(graph): """ Drop all database tables. """ Model.metadata.drop_all(graph.postgres) drop_alembic_table(graph) def drop_alembic_table(graph): """ Drop the alembic version table. """ try: graph.postgres.execute("DROP TABLE alembic_version;") except ProgrammingError: return False else: return True # Cached database metadata instance _metadata = None def recreate_all(graph): """ Drop and add back all database tables, or reset all data associated with a database. Intended mainly for testing, where a test database may either need to be re-initialized or cleared out between tests """ global _metadata if _metadata is None: # First-run, the test database/metadata needs to be initialized drop_all(graph) create_all(graph) _metadata = MetaData(bind=graph.postgres, reflect=True) return # Otherwise, truncate all existing tables connection = graph.postgres.connect() transaction = connection.begin() for table in reversed(_metadata.sorted_tables): connection.execute(table.delete()) transaction.commit() def new_session(graph, expire_on_commit=False): """ Create a new session. """ return graph.sessionmaker(expire_on_commit=expire_on_commit)
Python
0
78c5ef063a82d707b30eed4a6e02fcbc8976f4df
move sort code to the end, so initial result will be sorted too.
django_project/feti/views/landing_page.py
django_project/feti/views/landing_page.py
# coding=utf-8 """FETI landing page view.""" __author__ = 'Christian Christelis <[email protected]>' __date__ = '04/2015' __license__ = "GPL" __copyright__ = 'kartoza.com' from collections import OrderedDict from haystack.query import SearchQuerySet from django.shortcuts import render from django.http import HttpResponse from django.template import RequestContext from feti.models.campus import Campus from feti.models.course import Course def landing_page(request): """Serves the FETI landing page. :param request: A django request object. :type request: request :returns: Returns the landing page. :rtype: HttpResponse """ # sort the campus alphabetically def campus_key(item): return item[0].long_description.strip().lower() search_terms = '' course_dict = OrderedDict() errors = None if request.GET: search_terms = request.GET.get('search_terms') if search_terms: campuses = SearchQuerySet().filter(content=search_terms).models( Campus) courses = SearchQuerySet().filter(content=search_terms).models( Course) for campus in [c.object for c in campuses]: if campus.incomplete: continue course_dict[campus] = campus.courses.all() for course in [c.object for c in courses]: for campus in course.campus_set.all(): if campus in course_dict: if course not in course_dict[campus]: course_dict[campus].append(course) else: course_dict[campus] = [course] else: campuses = Campus.objects.filter(_complete=True).order_by( '_long_description') for campus in campuses: course_dict[campus] = campus.courses.all() else: campuses = Campus.objects.filter(_complete=True).order_by( '_long_description') for campus in campuses: course_dict[campus] = campus.courses.all() course_dict = OrderedDict( sorted(course_dict.items(), key=campus_key)) context = { 'course_dict': course_dict, 'search_terms': search_terms, 'errors': errors } return render( request, 'feti/feti.html', context_instance=RequestContext(request, context))
# coding=utf-8 """FETI landing page view.""" __author__ = 'Christian Christelis <[email protected]>' __date__ = '04/2015' __license__ = "GPL" __copyright__ = 'kartoza.com' from collections import OrderedDict from haystack.query import SearchQuerySet from django.shortcuts import render from django.http import HttpResponse from django.template import RequestContext from feti.models.campus import Campus from feti.models.course import Course def landing_page(request): """Serves the FETI landing page. :param request: A django request object. :type request: request :returns: Returns the landing page. :rtype: HttpResponse """ # sort the campus alphabetically def campus_key(item): return item[0].long_description search_terms = '' course_dict = OrderedDict() errors = None if request.GET: search_terms = request.GET.get('search_terms') if search_terms: campuses = SearchQuerySet().filter(content=search_terms).models( Campus) courses = SearchQuerySet().filter(content=search_terms).models( Course) for campus in [c.object for c in campuses]: if campus.incomplete: continue course_dict[campus] = campus.courses.all() for course in [c.object for c in courses]: for campus in course.campus_set.all(): if campus in course_dict: if course not in course_dict[campus]: course_dict[campus].append(course) else: course_dict[campus] = [course] course_dict = OrderedDict( sorted(course_dict.items(), key=campus_key)) else: campuses = Campus.objects.filter(_complete=True).order_by( '_long_description') for campus in campuses: course_dict[campus] = campus.courses.all() else: campuses = Campus.objects.filter(_complete=True).order_by( '_long_description') for campus in campuses: course_dict[campus] = campus.courses.all() context = { 'course_dict': course_dict, 'search_terms': search_terms, 'errors': errors } return render( request, 'feti/feti.html', context_instance=RequestContext(request, context))
Python
0
c7f8fd75dd5b41a059b65e9cea54d875d1f57655
Change self to PortStatCollector.
src/collectors/portstat/portstat.py
src/collectors/portstat/portstat.py
""" The PortStatCollector collects metrics about ports listed in config file. ##### Dependencies * psutil """ from collections import Counter import psutil import diamond.collector class PortStatCollector(diamond.collector.Collector): def __init__(self, *args, **kwargs): super(PortStatCollector, self).__init__(*args, **kwargs) self.ports = {} for port_name, cfg in self.config['port'].items(): port_cfg = {} for key in ('number',): port_cfg[key] = cfg.get(key, []) self.ports[port_name] = port_cfg def get_default_config_help(self): config_help = super(PortStatCollector, self).get_default_config_help() config_help.update({ }) return config_help def get_default_config(self): config = super(PortStatCollector, self).get_default_config() config.update({ 'path': 'port', 'port': {}, }) return config @staticmethod def get_port_stats(port): """ Iterate over connections and count states for specified port :param port: port for which stats are collected :return: Counter with port states """ cnts = Counter() for c in psutil.net_connections(): c_port = c.laddr[1] if c_port != port: continue status = c.status.lower() cnts[status] += 1 return cnts def collect(self): """ Overrides the Collector.collect method """ for port_name, port_cfg in self.ports.iteritems(): port = int(port_cfg['number']) stats = PortStatCollector.get_port_stats(port) for stat_name, stat_value in stats.iteritems(): metric_name = '%s.%s' % (port_name, stat_name) self.publish(metric_name, stat_value)
""" The PortStatCollector collects metrics about ports listed in config file. ##### Dependencies * psutil """ from collections import Counter import psutil import diamond.collector class PortStatCollector(diamond.collector.Collector): def __init__(self, *args, **kwargs): super(PortStatCollector, self).__init__(*args, **kwargs) self.ports = {} for port_name, cfg in self.config['port'].items(): port_cfg = {} for key in ('number',): port_cfg[key] = cfg.get(key, []) self.ports[port_name] = port_cfg def get_default_config_help(self): config_help = super(PortStatCollector, self).get_default_config_help() config_help.update({ }) return config_help def get_default_config(self): config = super(PortStatCollector, self).get_default_config() config.update({ 'path': 'port', 'port': {}, }) return config @staticmethod def get_port_stats(port): """ Iterate over connections and count states for specified port :param port: port for which stats are collected :return: Counter with port states """ cnts = Counter() for c in psutil.net_connections(): c_port = c.laddr[1] if c_port != port: continue status = c.status.lower() cnts[status] += 1 return cnts def collect(self): """ Overrides the Collector.collect method """ for port_name, port_cfg in self.ports.iteritems(): port = int(port_cfg['number']) stats = self.get_port_stats(port) for stat_name, stat_value in stats.iteritems(): metric_name = '%s.%s' % (port_name, stat_name) self.publish(metric_name, stat_value)
Python
0
0744dba6a52c42dbe6f9ba360e5311a1f90c3550
Fix python 3 compatibility issue in DNSimple driver.
libcloud/common/dnsimple.py
libcloud/common/dnsimple.py
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.utils.py3 import httplib from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import JsonResponse class DNSimpleDNSResponse(JsonResponse): def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ # response.success() only checks for 200 and 201 codes. Should we # add 204? return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT] class DNSimpleDNSConnection(ConnectionUserAndKey): host = 'api.dnsimple.com' responseCls = DNSimpleDNSResponse def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` to the request. """ # TODO: fijarse sobre que info se paso como parametro y en base # a esto, fijar el header headers['X-DNSimple-Token'] = '%s:%s' % (self.user_id, self.key) headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' return headers
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import httplib from libcloud.common.base import ConnectionUserAndKey from libcloud.common.base import JsonResponse class DNSimpleDNSResponse(JsonResponse): def success(self): """ Determine if our request was successful. The meaning of this can be arbitrary; did we receive OK status? Did the node get created? Were we authenticated? :rtype: ``bool`` :return: ``True`` or ``False`` """ # response.success() only checks for 200 and 201 codes. Should we # add 204? return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT] class DNSimpleDNSConnection(ConnectionUserAndKey): host = 'api.dnsimple.com' responseCls = DNSimpleDNSResponse def add_default_headers(self, headers): """ Add headers that are necessary for every request This method adds ``token`` to the request. """ # TODO: fijarse sobre que info se paso como parametro y en base # a esto, fijar el header headers['X-DNSimple-Token'] = '%s:%s' % (self.user_id, self.key) headers['Accept'] = 'application/json' headers['Content-Type'] = 'application/json' return headers
Python
0
725b246a0bbb437a5a0efeb16b58d3942f3b14cc
Update the example client.
examples/client.py
examples/client.py
from twisted.internet import defer, endpoints, task from txjason.netstring import JSONRPCClientFactory from txjason.client import JSONRPCClientError client = JSONRPCClientFactory('127.0.0.1', 7080) @defer.inlineCallbacks def main(reactor, description): endpoint = endpoints.clientFromString(reactor, description) client = JSONRPCClientFactory(endpoint) try: r = yield client.callRemote('bar.foo') except JSONRPCClientError as e: print e r = yield client.callRemote('bar.add', 1, 2) print "add result: %s" % str(r) r = yield client.callRemote('bar.whoami') print "whaomi result: %s" % str(r) task.react(main, ['tcp:127.0.0.1:7080'])
from twisted.internet import reactor, defer from txjason.netstring import JSONRPCClientFactory from txjason.client import JSONRPCClientError client = JSONRPCClientFactory('127.0.0.1', 7080) @defer.inlineCallbacks def stuff(): try: r = yield client.callRemote('bar.foo') except JSONRPCClientError as e: print e r = yield client.callRemote('bar.add', 1, 2) print "add result: %s" % str(r) r = yield client.callRemote('bar.whoami') print "whaomi result: %s" % str(r) reactor.callWhenRunning(stuff) reactor.run()
Python
0
5dddadb98340fec6afda80fd1a8ee1eda907b60a
print exports to terminal
examples/export.py
examples/export.py
""" Demonstrates export console output """ from rich.console import Console from rich.table import Table console = Console(record=True) def print_table(): table = Table(title="Star Wars Movies") table.add_column("Released", style="cyan", no_wrap=True) table.add_column("Title", style="magenta") table.add_column("Box Office", justify="right", style="green") table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690") table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347") table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889") table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889") console.print(table) # Prints table print_table() # Get console output as text file1 = "table_export_plaintext.txt" text = console.export_text() with open(file1, "w") as file: file.write(text) print(f"Exported console output as plain text to {file1}") # Calling print_table again because console output buffer # is flushed once export function is called print_table() # Get console output as html # use clear=False so output is not flushed after export file2 = "table_export_html.html" html = console.export_html(clear=False) with open(file2, "w") as file: file.write(html) print(f"Exported console output as html to {file2}") # Export text output to table_export.txt file3 = "table_export_plaintext2.txt" console.save_text(file3, clear=False) print(f"Exported console output as plain text to {file3}") # Export html output to table_export.html file4 = "table_export_html2.html" console.save_html(file4) print(f"Exported console output as html to {file4}")
""" Demonstrates export console output """ from rich.console import Console from rich.table import Table console = Console(record=True) def print_table(): table = Table(title="Star Wars Movies") table.add_column("Released", style="cyan", no_wrap=True) table.add_column("Title", style="magenta") table.add_column("Box Office", justify="right", style="green") table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690") table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347") table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889") table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889") console.print(table, justify="center") # Prints table print_table() # Get console output as text text = console.export_text() with open("plaintext_export.txt", "w") as file: file.write(text) # Calling print_table again because console output buffer # is flushed once export function is called print_table() # Get console output as html # use clear=False so output is not flushed after export html = console.export_html(clear=False) with open("html_export.html", "w") as file: file.write(html) # Export text output to table_export.txt console.save_text("rich_export.txt", clear=False) # Export html output to table_export.html console.save_html("rich_export.html")
Python
0
1741c7258ebdcef412442cebab33409290496df0
Add network example
IoT/iot_utils.py
IoT/iot_utils.py
from __future__ import print_function import sys, signal, atexit import json __author__ = 'KT Kirk' __all__ = ['keys', 'atexit', 'signal'] ## Exit handlers ## # This function stops python from printing a stacktrace when you hit control-C def SIGINTHandler(signum, frame): raise SystemExit # This function lets you run code on exit, including functions from myUVSensor def exitHandler(): print("Exiting") try: sys.exit(0) except KeyError: pass # Register exit handlers atexit.register(exitHandler) signal.signal(signal.SIGINT, SIGINTHandler) # Load data.sparkfun.com keys file with open("keys_n1YRX98dq9C6X0LrZdvD.json") as json_file: keys = json.load(json_file)
from __future__ import print_function import sys, signal, atexit import json __author__ = 'KT Kirk' __all__ = ['keys', 'atexit', 'signal'] ## Exit handlers ## # This function stops python from printing a stacktrace when you hit control-C def SIGINTHandler(signum, frame): raise SystemExit # This function lets you run code on exit, including functions from myUVSensor def exitHandler(): print("Exiting") try: sys.exit(0) except KeyError: pass # Register exit handlers atexit.register(exitHandler) signal.signal(signal.SIGINT, SIGINTHandler) # Load data.sparkfun.com keys file with open("keys_n1YRX98dq9C6X0LrZdvD.json") as json_file: keys = json.load(json_file)
Python
0.000002
b07243a6fb11dbbd487ba37620f7c8f4fc89449a
bump version to v1.10.5
ndd/package.py
ndd/package.py
# -*- coding: utf-8 -*- """Template package file""" __title__ = 'ndd' __version__ = '1.10.5' __author__ = 'Simone Marsili' __summary__ = '' __url__ = 'https://github.com/simomarsili/ndd' __email__ = '[email protected]' __license__ = 'BSD 3-Clause' __copyright__ = 'Copyright (c) 2020, Simone Marsili' __classifiers__ = [ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ]
# -*- coding: utf-8 -*- """Template package file""" __title__ = 'ndd' __version__ = '1.10.4' __author__ = 'Simone Marsili' __summary__ = '' __url__ = 'https://github.com/simomarsili/ndd' __email__ = '[email protected]' __license__ = 'BSD 3-Clause' __copyright__ = 'Copyright (c) 2020, Simone Marsili' __classifiers__ = [ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ]
Python
0
7abd9b977368a189ca3f298e566dd1dd5b7a66d1
Update constant.py
vnpy/trader/constant.py
vnpy/trader/constant.py
""" General constant string used in VN Trader. """ from enum import Enum class Direction(Enum): """ Direction of order/trade/position. """ LONG = "多" SHORT = "空" NET = "净" class Offset(Enum): """ Offset of order/trade. """ NONE = "" OPEN = "开" CLOSE = "平" CLOSETODAY = "平今" CLOSEYESTERDAY = "平昨" class Status(Enum): """ Order status. """ SUBMITTING = "提交中" NOTTRADED = "未成交" PARTTRADED = "部分成交" ALLTRADED = "全部成交" CANCELLED = "已撤销" REJECTED = "拒单" class Product(Enum): """ Product class. """ EQUITY = "股票" FUTURES = "期货" OPTION = "期权" INDEX = "指数" FOREX = "外汇" SPOT = "现货" ETF = "ETF" BOND = "债券" WARRANT = "权证" SPREAD = "价差" FUND = "基金" class OrderType(Enum): """ Order type. """ LIMIT = "限价" MARKET = "市价" STOP = "STOP" FAK = "FAK" FOK = "FOK" class OptionType(Enum): """ Option type. """ CALL = "看涨期权" PUT = "看跌期权" class Exchange(Enum): """ Exchange. """ # Chinese CFFEX = "CFFEX" # China Financial Futures Exchange SHFE = "SHFE" # Shanghai Futures Exchange CZCE = "CZCE" # Zhengzhou Commodity Exchange DCE = "DCE" # Dalian Commodity Exchange INE = "INE" # Shanghai International Energy Exchange SSE = "SSE" # Shanghai Stock Exchange SZSE = "SZSE" # Shenzhen Stock Exchange SGE = "SGE" # Shanghai Gold Exchange WXE = "WXE" # Wuxi Steel Exchange # Global SMART = "SMART" # Smart Router for US stocks NYMEX = "NYMEX" # New York Mercantile Exchange COMEX = "COMEX" # a division of theNew York Mercantile Exchange GLOBEX = "GLOBEX" # Globex of CME IDEALPRO = "IDEALPRO" # Forex ECN of Interactive Brokers CME = "CME" # Chicago Mercantile Exchange ICE = "ICE" # Intercontinental Exchange SEHK = "SEHK" # Stock Exchange of Hong Kong HKFE = "HKFE" # Hong Kong Futures Exchange SGX = "SGX" # Singapore Global Exchange CBOT = "CBT" # Chicago Board of Trade DME = "DME" # Dubai Mercantile Exchange EUREX = "EUX" # Eurex Exchange APEX = "APEX" # Asia Pacific Exchange LME = "LME" # London Metal Exchange BMD = "BMD" # Bursa Malaysia Derivatives TOCOM = "TOCOM" # Tokyo Commodity Exchange EUNX = "EUNX" # Euronext Exchange KRX = "KRX" # Korean Exchange # CryptoCurrency BITMEX = "BITMEX" OKEX = "OKEX" HUOBI = "HUOBI" BITFINEX = "BITFINEX" BINANCE = "BINANCE" class Currency(Enum): """ Currency. """ USD = "USD" HKD = "HKD" CNY = "CNY" class Interval(Enum): """ Interval of bar data. """ MINUTE = "1m" HOUR = "1h" DAILY = "d" WEEKLY = "w"
""" General constant string used in VN Trader. """ from enum import Enum class Direction(Enum): """ Direction of order/trade/position. """ LONG = "多" SHORT = "空" NET = "净" class Offset(Enum): """ Offset of order/trade. """ NONE = "" OPEN = "开" CLOSE = "平" CLOSETODAY = "平今" CLOSEYESTERDAY = "平昨" class Status(Enum): """ Order status. """ SUBMITTING = "提交中" NOTTRADED = "未成交" PARTTRADED = "部分成交" ALLTRADED = "全部成交" CANCELLED = "已撤销" REJECTED = "拒单" class Product(Enum): """ Product class. """ EQUITY = "股票" FUTURES = "期货" OPTION = "期权" INDEX = "指数" FOREX = "外汇" SPOT = "现货" ETF = "ETF" BOND = "债券" WARRANT = "权证" SPREAD = "价差" FUND = "基金" class OrderType(Enum): """ Order type. """ LIMIT = "限价" MARKET = "市价" STOP = "STOP" FAK = "FAK" FOK = "FOK" class OptionType(Enum): """ Option type. """ CALL = "看涨期权" PUT = "看跌期权" class Exchange(Enum): """ Exchange. """ # Chinese CFFEX = "CFFEX" # China Financial Futures Exchange SHFE = "SHFE" # Shanghai Futures Exchange CZCE = "CZCE" # Zhengzhou Commodity Exchange DCE = "DCE" # Dalian Commodity Exchange INE = "INE" # Shanghai International Energy Exchange SSE = "SSE" # Shanghai Stock Exchange SZSE = "SZSE" # Shenzhen Stock Exchange SGE = "SGE" # Shanghai Gold Exchange WXE = "WXE" # Wuxi Steel Exchange # Global SMART = "SMART" # Smart Router for US stocks NYMEX = "NYMEX" # New York Mercantile Exchange COMEX = "COMEX" # a division of theNew York Mercantile Exchange GLOBEX = "GLOBEX" # Globex of CME IDEALPRO = "IDEALPRO" # Forex ECN of Interactive Brokers CME = "CME" # Chicago Mercantile Exchange ICE = "ICE" # Intercontinental Exchange SEHK = "SEHK" # Stock Exchange of Hong Kong HKFE = "HKFE" # Hong Kong Futures Exchange SGX = "SGX" # Singapore Global Exchange CBOT = "CBT" # Chicago Board of Trade DME = "DME" # Dubai Mercantile Exchange EUREX = "EUX" # Eurex Exchange APEX = "APEX" # Asia Pacific Exchange LME = "LME" # London Metal Exchange BMD = "BMD" # Bursa Malaysia Derivatives TOCOM = "TOCOM" # Tokyo Commodity Exchange EUNX = "EUNX" # Euronext Exchange KRX = "KRX" # Korean Exchange # CryptoCurrency BITMEX = "BITMEX" OKEX = "OKEX" HUOBI = "HUOBI" BITFINEX = "BITFINEX" class Currency(Enum): """ Currency. """ USD = "USD" HKD = "HKD" CNY = "CNY" class Interval(Enum): """ Interval of bar data. """ MINUTE = "1m" HOUR = "1h" DAILY = "d" WEEKLY = "w"
Python
0.000001
5848a9c64744eacf8d90a86335e948ed17ef8346
Correct path to workflows
src/prepare_asaim/import_workflows.py
src/prepare_asaim/import_workflows.py
#!/usr/bin/env python import os from bioblend import galaxy admin_email = os.environ.get('GALAXY_DEFAULT_ADMIN_USER', '[email protected]') admin_pass = os.environ.get('GALAXY_DEFAULT_ADMIN_PASSWORD', 'admin') url = "http://localhost:8080" gi = galaxy.GalaxyInstance(url=url, email=admin_email, password=admin_pass) wf = galaxy.workflows.WorkflowClient(gi) wf.import_workflow_from_local_path('asaim_main_workflow.ga') wf.import_workflow_from_local_path('asaim_taxonomic_result_comparative_analysis.ga') wf.import_workflow_from_local_path('asaim_functional_result_comparative_analysis.ga') wf.import_workflow_from_local_path('asaim_go_slim_terms_comparative_analysis.ga') wf.import_workflow_from_local_path('asaim_taxonomically_related_functional_result_comparative_analysis.ga')
#!/usr/bin/env python import os from bioblend import galaxy admin_email = os.environ.get('GALAXY_DEFAULT_ADMIN_USER', '[email protected]') admin_pass = os.environ.get('GALAXY_DEFAULT_ADMIN_PASSWORD', 'admin') url = "http://localhost:8080" gi = galaxy.GalaxyInstance(url=url, email=admin_email, password=admin_pass) wf = galaxy.workflows.WorkflowClient(gi) wf.import_workflow_from_local_path('/home/galaxy/asaim_main_workflow.ga') wf.import_workflow_from_local_path('/home/galaxy/asaim_taxonomic_result_comparative_analysis.ga') wf.import_workflow_from_local_path('/home/galaxy/asaim_functional_result_comparative_analysis.ga') wf.import_workflow_from_local_path('/home/galaxy/asaim_go_slim_terms_comparative_analysis.ga') wf.import_workflow_from_local_path('/home/galaxy/asaim_taxonomically_related_functional_result_comparative_analysis.ga')
Python
0.000018
0d31cbfd3042a1e7255ed833715112504fe608ae
Revert types
dshin/nn/types.py
dshin/nn/types.py
""" TensorFlow type annotation aliases. """ import typing import tensorflow as tf Value = typing.Union[tf.Variable, tf.Tensor] Values = typing.Sequence[Value] Named = typing.Union[tf.Variable, tf.Tensor, tf.Operation] NamedSeq = typing.Sequence[Named] Tensors = typing.Sequence[tf.Tensor] Variables = typing.Sequence[tf.Variable] Operations = typing.Sequence[tf.Operation]
""" TensorFlow type annotation aliases. """ import typing import tensorflow as tf Value = (tf.Variable, tf.Tensor) Values = typing.Sequence[Value] Named = (tf.Variable, tf.Tensor, tf.Operation) NamedSeq = typing.Sequence[Named] Tensors = typing.Sequence[tf.Tensor] Variables = typing.Sequence[tf.Variable] Operations = typing.Sequence[tf.Operation]
Python
0.000001
8a3ae1b809d886f647f13574cc9b416b17c27b7c
Remove VERSION variable from api.py
duckduckpy/api.py
duckduckpy/api.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from __init__ import __version__ from collections import namedtuple from duckduckpy.utils import camel_to_snake_case SERVER_HOST = 'api.duckduckgo.com' USER_AGENT = 'duckduckpy {0}'.format(__version__) ICON_KEYS = set(['URL', 'Width', 'Height']) RESULT_KEYS = set(['FirstURL', 'Icon', 'Result', 'Text']) CONTENT_KEYS = set(['data_type', 'label', 'sort_order', 'value', 'wiki_order']) META_KEYS = set(['data_type', 'label', 'value']) INFOBOX_KEYS = set(['content', 'meta']) RESPONSE_KEYS = set([ 'Redirect', 'Definition', 'ImageWidth', 'Infobox', 'RelatedTopics', 'ImageHeight', 'Heading', 'Answer', 'AbstractText', 'Type', 'ImageIsLogo', 'DefinitionSource', 'AbstractURL', 'Abstract', 'DefinitionURL', 'Results', 'Entity', 'AnswerType', 'AbstractSource', 'Image']) camel_to_snake_case_set = lambda seq: set(map(camel_to_snake_case, seq)) Icon = namedtuple('Icon', camel_to_snake_case_set(ICON_KEYS)) Result = namedtuple('Result', camel_to_snake_case_set(RESULT_KEYS)) Content = namedtuple('Content', camel_to_snake_case_set(CONTENT_KEYS)) Meta = namedtuple('Meta', camel_to_snake_case_set(META_KEYS)) Infobox = namedtuple('Infobox', camel_to_snake_case_set(INFOBOX_KEYS)) Response = namedtuple('Response', camel_to_snake_case_set(RESPONSE_KEYS))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from collections import namedtuple from utils import camel_to_snake_case SERVER_HOST = 'api.duckduckgo.com' VERSION = '0.1-alpha' USER_AGENT = 'duckduckpy {0}'.format(VERSION) ICON_KEYS = set(['URL', 'Width', 'Height']) RESULT_KEYS = set(['FirstURL', 'Icon', 'Result', 'Text']) CONTENT_KEYS = set(['data_type', 'label', 'sort_order', 'value', 'wiki_order']) META_KEYS = set(['data_type', 'label', 'value']) INFOBOX_KEYS = set(['content', 'meta']) RESPONSE_KEYS = set([ 'Redirect', 'Definition', 'ImageWidth', 'Infobox', 'RelatedTopics', 'ImageHeight', 'Heading', 'Answer', 'AbstractText', 'Type', 'ImageIsLogo', 'DefinitionSource', 'AbstractURL', 'Abstract', 'DefinitionURL', 'Results', 'Entity', 'AnswerType', 'AbstractSource', 'Image']) camel_to_snake_case_set = lambda seq: set(map(camel_to_snake_case, seq)) Icon = namedtuple('Icon', camel_to_snake_case_set(ICON_KEYS)) Result = namedtuple('Result', camel_to_snake_case_set(RESULT_KEYS)) Content = namedtuple('Content', camel_to_snake_case_set(CONTENT_KEYS)) Meta = namedtuple('Meta', camel_to_snake_case_set(META_KEYS)) Infobox = namedtuple('Infobox', camel_to_snake_case_set(INFOBOX_KEYS)) Response = namedtuple('Response', camel_to_snake_case_set(RESPONSE_KEYS))
Python
0.000002
ee5e9d09a02e52714291a44148be4722f8e495ac
Revert "Take Abode camera snapshot before fetching latest image" (#68626)
homeassistant/components/abode/camera.py
homeassistant/components/abode/camera.py
"""Support for Abode Security System cameras.""" from __future__ import annotations from datetime import timedelta from typing import Any, cast from abodepy.devices import CONST, AbodeDevice as AbodeDev from abodepy.devices.camera import AbodeCamera as AbodeCam import abodepy.helpers.timeline as TIMELINE import requests from requests.models import Response from homeassistant.components.camera import Camera from homeassistant.config_entries import ConfigEntry from homeassistant.core import Event, HomeAssistant from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.util import Throttle from . import AbodeDevice, AbodeSystem from .const import DOMAIN, LOGGER MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=90) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Abode camera devices.""" data: AbodeSystem = hass.data[DOMAIN] entities = [] for device in data.abode.get_devices(generic_type=CONST.TYPE_CAMERA): entities.append(AbodeCamera(data, device, TIMELINE.CAPTURE_IMAGE)) async_add_entities(entities) class AbodeCamera(AbodeDevice, Camera): """Representation of an Abode camera.""" _device: AbodeCam def __init__(self, data: AbodeSystem, device: AbodeDev, event: Event) -> None: """Initialize the Abode device.""" AbodeDevice.__init__(self, data, device) Camera.__init__(self) self._event = event self._response: Response | None = None async def async_added_to_hass(self) -> None: """Subscribe Abode events.""" await super().async_added_to_hass() self.hass.async_add_executor_job( self._data.abode.events.add_timeline_callback, self._event, self._capture_callback, ) signal = f"abode_camera_capture_{self.entity_id}" self.async_on_remove(async_dispatcher_connect(self.hass, signal, self.capture)) def capture(self) -> bool: """Request a new image capture.""" return cast(bool, self._device.capture()) @Throttle(MIN_TIME_BETWEEN_UPDATES) def refresh_image(self) -> None: """Find a new image on the timeline.""" if self._device.refresh_image(): self.get_image() def get_image(self) -> None: """Attempt to download the most recent capture.""" if self._device.image_url: try: self._response = requests.get(self._device.image_url, stream=True) self._response.raise_for_status() except requests.HTTPError as err: LOGGER.warning("Failed to get camera image: %s", err) self._response = None else: self._response = None def camera_image( self, width: int | None = None, height: int | None = None ) -> bytes | None: """Get a camera image.""" self.refresh_image() if self._response: return self._response.content return None def turn_on(self) -> None: """Turn on camera.""" self._device.privacy_mode(False) def turn_off(self) -> None: """Turn off camera.""" self._device.privacy_mode(True) def _capture_callback(self, capture: Any) -> None: """Update the image with the device then refresh device.""" self._device.update_image_location(capture) self.get_image() self.schedule_update_ha_state() @property def is_on(self) -> bool: """Return true if on.""" return cast(bool, self._device.is_on)
"""Support for Abode Security System cameras.""" from __future__ import annotations from datetime import timedelta from typing import Any, cast from abodepy.devices import CONST, AbodeDevice as AbodeDev from abodepy.devices.camera import AbodeCamera as AbodeCam import abodepy.helpers.timeline as TIMELINE import requests from requests.models import Response from homeassistant.components.camera import Camera from homeassistant.config_entries import ConfigEntry from homeassistant.core import Event, HomeAssistant from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.util import Throttle from . import AbodeDevice, AbodeSystem from .const import DOMAIN, LOGGER MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=90) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Abode camera devices.""" data: AbodeSystem = hass.data[DOMAIN] entities = [] for device in data.abode.get_devices(generic_type=CONST.TYPE_CAMERA): entities.append(AbodeCamera(data, device, TIMELINE.CAPTURE_IMAGE)) async_add_entities(entities) class AbodeCamera(AbodeDevice, Camera): """Representation of an Abode camera.""" _device: AbodeCam def __init__(self, data: AbodeSystem, device: AbodeDev, event: Event) -> None: """Initialize the Abode device.""" AbodeDevice.__init__(self, data, device) Camera.__init__(self) self._event = event self._response: Response | None = None async def async_added_to_hass(self) -> None: """Subscribe Abode events.""" await super().async_added_to_hass() self.hass.async_add_executor_job( self._data.abode.events.add_timeline_callback, self._event, self._capture_callback, ) signal = f"abode_camera_capture_{self.entity_id}" self.async_on_remove(async_dispatcher_connect(self.hass, signal, self.capture)) def capture(self) -> bool: """Request a new image capture.""" return cast(bool, self._device.capture()) @Throttle(MIN_TIME_BETWEEN_UPDATES) def refresh_image(self) -> None: """Find a new image on the timeline.""" if self._device.refresh_image(): self.get_image() def get_image(self) -> None: """Attempt to download the most recent capture.""" if self._device.image_url: try: self._response = requests.get(self._device.image_url, stream=True) self._response.raise_for_status() except requests.HTTPError as err: LOGGER.warning("Failed to get camera image: %s", err) self._response = None else: self._response = None def camera_image( self, width: int | None = None, height: int | None = None ) -> bytes | None: """Get a camera image.""" if not self.capture(): return None self.refresh_image() if self._response: return self._response.content return None def turn_on(self) -> None: """Turn on camera.""" self._device.privacy_mode(False) def turn_off(self) -> None: """Turn off camera.""" self._device.privacy_mode(True) def _capture_callback(self, capture: Any) -> None: """Update the image with the device then refresh device.""" self._device.update_image_location(capture) self.get_image() self.schedule_update_ha_state() @property def is_on(self) -> bool: """Return true if on.""" return cast(bool, self._device.is_on)
Python
0
70f588282e1777945e113e73dbca83f77355f0f9
Test git permission
driver/omni_driver.py
driver/omni_driver.py
import driver import lib.lib as lib from hardware.dmcc_motor import DMCCMotorSet class OmniDriver(driver.Driver): #Vijay was here #Chad was here
import driver import lib.lib as lib from hardware.dmcc_motor import DMCCMotorSet class OmniDriver(driver.Driver): #Vijay was here
Python
0
a3f12245163a9165f45f4ee97b6e4e67cdd29783
Update decipher.py
decipher.py
decipher.py
# # decipher.py (c) Luis Hoderlein # # BUILT: Apr 21, 2016 # # This program can brute force Cesarian ciphers # It gives you all possible outputs, meaning you still have to chose the output you want # # imports import string # adds padding to make output inline def pad(num): if num < 10: return "0"+str(num) else: return str(num) # declare vars + ask for input raw_txt = raw_input("Enter ciphertext: ") raw_int = [] txt = "" spaces = [] # make all lower case (necessary) raw_txt = raw_txt.lower() # log spaces + remove them for i in range(0, len(raw_txt)): if raw_txt[i] != " ": txt = txt + raw_txt[i] else: spaces.append(i); # turn chars into ints for i in range(0, len(txt)): raw_int.append(string.lowercase.index(txt[i])) # loop through every possible solution (26 of them), using i has cipher number # and print all possible solution + add the spaces again # to prevent some weird bug, possible int has to be reassigned every time for i in range(0, 26): possible_int = [] for j in range(0, len(raw_int)): possible_int.append(raw_int[j]) possible_txt = "" for j in range(0, len(possible_int)): possible_int[j] = possible_int[j]+i if possible_int[j] >= 26: possible_int[j] = possible_int[j] - 26 possible_txt = possible_txt + string.lowercase[possible_int[j]] del possible_int for j in range(0, len(spaces)): possible_txt = possible_txt[:spaces[j]] + " " +possible_txt[spaces[j]:] print "Solution "+pad(i)+" is: "+possible_txt
# # decipher.py (c) Luis Hoderlein # # BUILT: Apr 21, 2016 # # This program can brute force Cesarian ciphers # It gives you all possible outputs, meaning you still have to chose the output you want # import string def pad(num): if num < 10: return "0"+str(num) else: return str(num) raw_txt = raw_input("Enter ciphertext: ") raw_int = [] txt = "" spaces = [] raw_txt = raw_txt.lower() for i in range(0, len(raw_txt)): if raw_txt[i] != " ": txt = txt + raw_txt[i] else: spaces.append(i); for i in range(0, len(txt)): raw_int.append(string.lowercase.index(txt[i])) for i in range(0, 26): possible_int = [] for j in range(0, len(raw_int)): possible_int.append(raw_int[j]) possible_txt = "" for j in range(0, len(possible_int)): possible_int[j] = possible_int[j]+i if possible_int[j] >= 26: possible_int[j] = possible_int[j] - 26 possible_txt = possible_txt + string.lowercase[possible_int[j]] del possible_int for j in range(0, len(spaces)): possible_txt = possible_txt[:spaces[j]] + " " +possible_txt[spaces[j]:] print "Solution "+pad(i)+" is "+possible_txt
Python
0
71e96782caff8543c2e859226bd0b77a79a55040
fix gate
e3nn_jax/_gate.py
e3nn_jax/_gate.py
from functools import partial import jax import jax.numpy as jnp from e3nn_jax import IrrepsData, elementwise_tensor_product, scalar_activation from e3nn_jax.util.decorators import overload_for_irreps_without_data @partial(jax.jit, static_argnums=(1, 2, 3, 4)) def _gate(input: IrrepsData, even_act, odd_act, even_gate_act, odd_gate_act) -> IrrepsData: scalars, gated = input, None for j, (_, ir) in enumerate(input.irreps): if ir.l > 0: scalars, gated = input.split([j]) break assert scalars.irreps.lmax == 0 # No gates: if gated is None: return scalar_activation(scalars, [even_act if ir.p == 1 else odd_act for _, ir in scalars.irreps]) # Get the scalar gates: gates = None for i in range(j + 1): if scalars.irreps[i:].num_irreps == gated.irreps.num_irreps: scalars, gates = scalars.split([i]) break if gates is None: raise ValueError(f"Gate: did not manage to split the input {input.irreps} into scalars, gates ({scalars.irreps}) and gated ({gated.irreps}).") scalars = scalar_activation(scalars, [even_act if ir.p == 1 else odd_act for _, ir in scalars.irreps]) gates = scalar_activation(gates, [even_gate_act if ir.p == 1 else odd_gate_act for _, ir in gates.irreps]) return IrrepsData.cat([scalars, elementwise_tensor_product(gates, gated)]) @overload_for_irreps_without_data((0,)) def gate(input: IrrepsData, even_act=None, odd_act=None, even_gate_act=None, odd_gate_act=None) -> IrrepsData: r"""Gate activation function. The input is split into scalars that are activated separately, scalars that are used as gates, and non-scalars that are multiplied by the gates. List of assumptions: - The scalars are on the left side of the input. - The gate scalars are on the right side of the scalars. Args: input (IrrepsData): Input data. acts: The list of activation functions. Its length must be equal to the number of scalar blocks in the input. Returns: IrrepsData: Output data. Examples: The 3 even scalars are used as gates. >>> gate("12x0e + 3x0e + 2x1e + 1x2e") 12x0e+2x1e+1x2e Odd scalars used as gates change the parity of the gated quantities: >>> gate("12x0e + 3x0o + 2x1e + 1x2e") 12x0e+2x1o+1x2o Without anything to gate, all the scalars are activated: >>> gate("12x0e + 3x0o") 12x0e+3x0o """ assert isinstance(input, IrrepsData) if even_act is None: even_act = jax.nn.gelu if odd_act is None: odd_act = lambda x: (1 - jnp.exp(-x**2)) * x if even_gate_act is None: even_gate_act = jax.nn.sigmoid if odd_gate_act is None: odd_gate_act = jax.nn.tanh return _gate(input, even_act, odd_act, even_gate_act, odd_gate_act)
from functools import partial import jax import jax.numpy as jnp from e3nn_jax import IrrepsData, elementwise_tensor_product, scalar_activation from e3nn_jax.util.decorators import overload_for_irreps_without_data @partial(jax.jit, static_argnums=(1, 2, 3, 4)) def _gate(input: IrrepsData, even_act, odd_act, even_gate_act, odd_gate_act) -> IrrepsData: # split l=0 vs l>0 j = 0 for j, (_, ir) in enumerate(input.irreps): if ir.l > 0: break scalars, gated = input.split([j]) assert scalars.irreps.lmax == 0 # apply scalar activation if there is no gate if gated.irreps.dim == 0: scalars = scalar_activation(scalars, [even_act if ir.p == 1 else odd_act for _, ir in scalars.irreps]) return scalars # extract gates from scalars gates = None for i in range(j + 1): if scalars.irreps[i:].num_irreps == gated.irreps.num_irreps: scalars, gates = scalars.split([i]) break if gates is None: raise ValueError(f"Gate: did not manage to split the input {input.irreps} into scalars, gates and gated.") scalars = scalar_activation(scalars, [even_act if ir.p == 1 else odd_act for _, ir in scalars.irreps]) gates = scalar_activation(gates, [even_gate_act if ir.p == 1 else odd_gate_act for _, ir in gates.irreps]) return IrrepsData.cat([scalars, elementwise_tensor_product(gates, gated)]) @overload_for_irreps_without_data((0,)) def gate(input: IrrepsData, even_act=None, odd_act=None, even_gate_act=None, odd_gate_act=None) -> IrrepsData: r"""Gate activation function. The input is split into scalars that are activated separately, scalars that are used as gates, and non-scalars that are multiplied by the gates. List of assumptions: - The scalars are on the left side of the input. - The gate scalars are on the right side of the scalars. Args: input (IrrepsData): Input data. acts: The list of activation functions. Its length must be equal to the number of scalar blocks in the input. Returns: IrrepsData: Output data. Examples: >>> gate("12x0e + 3x0e + 2x1e + 1x2e") 12x0e+2x1e+1x2e """ assert isinstance(input, IrrepsData) if even_act is None: even_act = jax.nn.gelu if odd_act is None: odd_act = lambda x: (1 - jnp.exp(-x**2)) * x if even_gate_act is None: even_gate_act = jax.nn.sigmoid if odd_gate_act is None: odd_gate_act = jax.nn.tanh return _gate(input, even_act, odd_act, even_gate_act, odd_gate_act)
Python
0.000001
f421b2997494ca546c6479e4246456e56b816e60
Add Robert EVT ID too
libpebble2/util/hardware.py
libpebble2/util/hardware.py
__author__ = 'katharine' class PebbleHardware(object): UNKNOWN = 0 TINTIN_EV1 = 1 TINTIN_EV2 = 2 TINTIN_EV2_3 = 3 TINTIN_EV2_4 = 4 TINTIN_V1_5 = 5 BIANCA = 6 SNOWY_EVT2 = 7 SNOWY_DVT = 8 SPALDING_EVT = 9 BOBBY_SMILES = 10 SPALDING = 11 SILK_EVT = 12 ROBERT_EVT = 13 SILK = 14 TINTIN_BB = 0xFF TINTIN_BB2 = 0xFE SNOWY_BB = 0xFD SNOWY_BB2 = 0xFC SPALDING_BB2 = 0xFB SILK_BB = 0xFA ROBERT_BB = 0xF9 SILK_BB2 = 0xF8 PLATFORMS = { UNKNOWN: 'unknown', TINTIN_EV1: 'aplite', TINTIN_EV2: 'aplite', TINTIN_EV2_3: 'aplite', TINTIN_EV2_4: 'aplite', TINTIN_V1_5: 'aplite', BIANCA: 'aplite', SNOWY_EVT2: 'basalt', SNOWY_DVT: 'basalt', BOBBY_SMILES: 'basalt', SPALDING_EVT: 'chalk', SPALDING: 'chalk', SILK_EVT: 'diorite', SILK: 'diorite', TINTIN_BB: 'aplite', TINTIN_BB2: 'aplite', SNOWY_BB: 'basalt', SNOWY_BB2: 'basalt', SPALDING_BB2: 'chalk', SILK_BB: 'diorite', ROBERT_BB: 'emery', SILK_BB2: 'diorite', } @classmethod def hardware_platform(cls, hardware): return cls.PLATFORMS.get(hardware, 'unknown')
__author__ = 'katharine' class PebbleHardware(object): UNKNOWN = 0 TINTIN_EV1 = 1 TINTIN_EV2 = 2 TINTIN_EV2_3 = 3 TINTIN_EV2_4 = 4 TINTIN_V1_5 = 5 BIANCA = 6 SNOWY_EVT2 = 7 SNOWY_DVT = 8 SPALDING_EVT = 9 BOBBY_SMILES = 10 SPALDING = 11 SILK_EVT = 12 SILK = 14 TINTIN_BB = 0xFF TINTIN_BB2 = 0xFE SNOWY_BB = 0xFD SNOWY_BB2 = 0xFC SPALDING_BB2 = 0xFB SILK_BB = 0xFA ROBERT_BB = 0xF9 SILK_BB2 = 0xF8 PLATFORMS = { UNKNOWN: 'unknown', TINTIN_EV1: 'aplite', TINTIN_EV2: 'aplite', TINTIN_EV2_3: 'aplite', TINTIN_EV2_4: 'aplite', TINTIN_V1_5: 'aplite', BIANCA: 'aplite', SNOWY_EVT2: 'basalt', SNOWY_DVT: 'basalt', BOBBY_SMILES: 'basalt', SPALDING_EVT: 'chalk', SPALDING: 'chalk', SILK_EVT: 'diorite', SILK: 'diorite', TINTIN_BB: 'aplite', TINTIN_BB2: 'aplite', SNOWY_BB: 'basalt', SNOWY_BB2: 'basalt', SPALDING_BB2: 'chalk', SILK_BB: 'diorite', ROBERT_BB: 'emery', SILK_BB2: 'diorite', } @classmethod def hardware_platform(cls, hardware): return cls.PLATFORMS.get(hardware, 'unknown')
Python
0
d9af336506fcca40cbc5ebf337268cfd16459c4f
Use iter_log in example.
examples/ra_log.py
examples/ra_log.py
#!/usr/bin/python # Demonstrates how to iterate over the log of a Subversion repository. from subvertpy.ra import RemoteAccess conn = RemoteAccess("svn://svn.samba.org/subvertpy/trunk") for (changed_paths, rev, revprops, has_children) in conn.iter_log(paths=None, start=0, end=conn.get_latest_revnum(), discover_changed_paths=True): print "=" * 79 print "%d:" % rev print "Revision properties:" for entry in revprops.items(): print " %s: %s" % entry print "" print "Changed paths" for path, (action, from_path, from_rev) in changed_paths.iteritems(): print " %s (%s)" % (path, action)
#!/usr/bin/python # Demonstrates how to iterate over the log of a Subversion repository. from subvertpy.ra import RemoteAccess conn = RemoteAccess("svn://svn.gnome.org/svn/gnome-specimen/trunk") def cb(changed_paths, rev, revprops, has_children=None): print "=" * 79 print "%d:" % rev print "Revision properties:" for entry in revprops.items(): print " %s: %s" % entry print "" print "Changed paths" for path, (action, from_path, from_rev) in changed_paths.iteritems(): print " %s (%s)" % (path, action) conn.get_log(callback=cb, paths=None, start=0, end=conn.get_latest_revnum(), discover_changed_paths=True)
Python
0
1d0d28ebdda25a7dc579857063d47c5042e6c02b
Enable south for the docs site.
django_docs/settings.py
django_docs/settings.py
# Settings for docs.djangoproject.com from django_www.common_settings import * ### Django settings CACHE_MIDDLEWARE_KEY_PREFIX = 'djangodocs' INSTALLED_APPS = [ 'django.contrib.sitemaps', 'django.contrib.sites', 'django.contrib.staticfiles', 'djangosecure', 'haystack', 'south', 'docs', ] MIDDLEWARE_CLASSES = [ 'django.middleware.cache.UpdateCacheMiddleware', 'djangosecure.middleware.SecurityMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware' ] TEMPLATE_CONTEXT_PROCESSORS = [ 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', 'docs.context_processors.recent_release', 'django.core.context_processors.request', ] ROOT_URLCONF = 'django_docs.urls' SITE_ID = 2 ### Docs settings if PRODUCTION: DOCS_BUILD_ROOT = BASE.parent.child('docbuilds') else: DOCS_BUILD_ROOT = '/tmp/djangodocs' ### Haystack settings HAYSTACK_SITECONF = 'docs.search_sites' if PRODUCTION: HAYSTACK_SEARCH_ENGINE = 'xapian' HAYSTACK_XAPIAN_PATH = BASE.parent.child('djangodocs.index') else: HAYSTACK_SEARCH_ENGINE = 'whoosh' HAYSTACK_WHOOSH_PATH = '/tmp/djangodocs.index' ### South settings SOUTH_TESTS_MIGRATE = False ### Enable optional components if DEBUG: try: import debug_toolbar except ImportError: pass else: INSTALLED_APPS.append('debug_toolbar') INTERNAL_IPS = ['127.0.0.1'] MIDDLEWARE_CLASSES.insert( MIDDLEWARE_CLASSES.index('django.middleware.common.CommonMiddleware') + 1, 'debug_toolbar.middleware.DebugToolbarMiddleware') # Log errors to Sentry instead of email, if available. if 'sentry_dsn' in SECRETS: INSTALLED_APPS.append('raven.contrib.django') SENTRY_DSN = SECRETS['sentry_dsn'] LOGGING["loggers"]["django.request"]["handlers"].remove("mail_admins")
# Settings for docs.djangoproject.com from django_www.common_settings import * ### Django settings CACHE_MIDDLEWARE_KEY_PREFIX = 'djangodocs' INSTALLED_APPS = [ 'django.contrib.sitemaps', 'django.contrib.sites', 'django.contrib.staticfiles', 'djangosecure', 'haystack', 'docs', ] MIDDLEWARE_CLASSES = [ 'django.middleware.cache.UpdateCacheMiddleware', 'djangosecure.middleware.SecurityMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware' ] TEMPLATE_CONTEXT_PROCESSORS = [ 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', 'docs.context_processors.recent_release', 'django.core.context_processors.request', ] ROOT_URLCONF = 'django_docs.urls' SITE_ID = 2 ### Docs settings if PRODUCTION: DOCS_BUILD_ROOT = BASE.parent.child('docbuilds') else: DOCS_BUILD_ROOT = '/tmp/djangodocs' ### Haystack settings HAYSTACK_SITECONF = 'docs.search_sites' if PRODUCTION: HAYSTACK_SEARCH_ENGINE = 'xapian' HAYSTACK_XAPIAN_PATH = BASE.parent.child('djangodocs.index') else: HAYSTACK_SEARCH_ENGINE = 'whoosh' HAYSTACK_WHOOSH_PATH = '/tmp/djangodocs.index' ### Enable optional components if DEBUG: try: import debug_toolbar except ImportError: pass else: INSTALLED_APPS.append('debug_toolbar') INTERNAL_IPS = ['127.0.0.1'] MIDDLEWARE_CLASSES.insert( MIDDLEWARE_CLASSES.index('django.middleware.common.CommonMiddleware') + 1, 'debug_toolbar.middleware.DebugToolbarMiddleware') # Log errors to Sentry instead of email, if available. if 'sentry_dsn' in SECRETS: INSTALLED_APPS.append('raven.contrib.django') SENTRY_DSN = SECRETS['sentry_dsn'] LOGGING["loggers"]["django.request"]["handlers"].remove("mail_admins")
Python
0
3434c404d8ab3d42bed4756338f1b8dba3a10255
split debug_plot into debug and plot
src/settings.py
src/settings.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals debug = False debug_plot = False plot = False # CE hack is ON CE = True def plt_show(): from matplotlib import pyplot as plt if debug_plot or (debug and plot): plt.show() else: plt.close()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals debug = False debug_plot = False plot = False # CE hack is ON CE = True def plt_show(): from matplotlib import pyplot as plt if debug_plot: plt.show() else: plt.close()
Python
0.998673
2dc0ac43b50c61aa10576779a8228ff578c37068
Use get_user_model
src/auditlog/middleware.py
src/auditlog/middleware.py
from __future__ import unicode_literals import threading import time from django.contrib.auth import get_user_model from django.db.models.signals import pre_save from django.utils.functional import curry from auditlog.models import LogEntry from auditlog.compat import is_authenticated # Use MiddlewareMixin when present (Django >= 1.10) try: from django.utils.deprecation import MiddlewareMixin except ImportError: MiddlewareMixin = object threadlocal = threading.local() class AuditlogMiddleware(MiddlewareMixin): """ Middleware to couple the request's user to log items. This is accomplished by currying the signal receiver with the user from the request (or None if the user is not authenticated). """ def process_request(self, request): """ Gets the current user from the request and prepares and connects a signal receiver with the user already attached to it. """ # Initialize thread local storage threadlocal.auditlog = { 'signal_duid': (self.__class__, time.time()), 'remote_addr': request.META.get('REMOTE_ADDR'), } # In case of proxy, set 'original' address if request.META.get('HTTP_X_FORWARDED_FOR'): threadlocal.auditlog['remote_addr'] = request.META.get('HTTP_X_FORWARDED_FOR').split(',')[0] # Connect signal for automatic logging if hasattr(request, 'user') and is_authenticated(request.user): set_actor = curry(self.set_actor, user=request.user, signal_duid=threadlocal.auditlog['signal_duid']) pre_save.connect(set_actor, sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'], weak=False) def process_response(self, request, response): """ Disconnects the signal receiver to prevent it from staying active. """ if hasattr(threadlocal, 'auditlog'): pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid']) return response def process_exception(self, request, exception): """ Disconnects the signal receiver to prevent it from staying active in case of an exception. """ if hasattr(threadlocal, 'auditlog'): pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid']) return None @staticmethod def set_actor(user, sender, instance, signal_duid, **kwargs): """ Signal receiver with an extra, required 'user' kwarg. This method becomes a real (valid) signal receiver when it is curried with the actor. """ if hasattr(threadlocal, 'auditlog'): if signal_duid != threadlocal.auditlog['signal_duid']: return if sender == LogEntry and isinstance(user, get_user_model()) and instance.actor is None: instance.actor = user instance.remote_addr = threadlocal.auditlog['remote_addr']
from __future__ import unicode_literals import threading import time from django.conf import settings from django.db.models.signals import pre_save from django.utils.functional import curry from django.apps import apps from auditlog.models import LogEntry from auditlog.compat import is_authenticated # Use MiddlewareMixin when present (Django >= 1.10) try: from django.utils.deprecation import MiddlewareMixin except ImportError: MiddlewareMixin = object threadlocal = threading.local() class AuditlogMiddleware(MiddlewareMixin): """ Middleware to couple the request's user to log items. This is accomplished by currying the signal receiver with the user from the request (or None if the user is not authenticated). """ def process_request(self, request): """ Gets the current user from the request and prepares and connects a signal receiver with the user already attached to it. """ # Initialize thread local storage threadlocal.auditlog = { 'signal_duid': (self.__class__, time.time()), 'remote_addr': request.META.get('REMOTE_ADDR'), } # In case of proxy, set 'original' address if request.META.get('HTTP_X_FORWARDED_FOR'): threadlocal.auditlog['remote_addr'] = request.META.get('HTTP_X_FORWARDED_FOR').split(',')[0] # Connect signal for automatic logging if hasattr(request, 'user') and is_authenticated(request.user): set_actor = curry(self.set_actor, user=request.user, signal_duid=threadlocal.auditlog['signal_duid']) pre_save.connect(set_actor, sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'], weak=False) def process_response(self, request, response): """ Disconnects the signal receiver to prevent it from staying active. """ if hasattr(threadlocal, 'auditlog'): pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid']) return response def process_exception(self, request, exception): """ Disconnects the signal receiver to prevent it from staying active in case of an exception. """ if hasattr(threadlocal, 'auditlog'): pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid']) return None @staticmethod def set_actor(user, sender, instance, signal_duid, **kwargs): """ Signal receiver with an extra, required 'user' kwarg. This method becomes a real (valid) signal receiver when it is curried with the actor. """ if hasattr(threadlocal, 'auditlog'): if signal_duid != threadlocal.auditlog['signal_duid']: return try: app_label, model_name = settings.AUTH_USER_MODEL.split('.') auth_user_model = apps.get_model(app_label, model_name) except ValueError: auth_user_model = apps.get_model('auth', 'user') if sender == LogEntry and isinstance(user, auth_user_model) and instance.actor is None: instance.actor = user instance.remote_addr = threadlocal.auditlog['remote_addr']
Python
0.000004
00c14e981807668b09a5d6a2e71fe8872291acad
Add admin support for attachments
django_mailbox/admin.py
django_mailbox/admin.py
from django.conf import settings from django.contrib import admin from django_mailbox.models import MessageAttachment, Message, Mailbox def get_new_mail(mailbox_admin, request, queryset): for mailbox in queryset.all(): mailbox.get_new_mail() get_new_mail.short_description = 'Get new mail' class MailboxAdmin(admin.ModelAdmin): list_display = ( 'name', 'uri', 'from_email', 'active', ) actions = [get_new_mail] class MessageAttachmentAdmin(admin.ModelAdmin): pass class MessageAdmin(admin.ModelAdmin): list_display = ( 'subject', 'processed', 'mailbox', 'outgoing', ) ordering = ['-processed'] list_filter = ( 'mailbox', 'outgoing', ) raw_id_fields = ( 'in_reply_to', ) if getattr(settings, 'DJANGO_MAILBOX_ADMIN_ENABLED', True): admin.site.register(Message, MessageAdmin) admin.site.register(MessageAttachmentAdmin, MessageAttachment) admin.site.register(Mailbox, MailboxAdmin)
from django.conf import settings from django.contrib import admin from django_mailbox.models import Message, Mailbox def get_new_mail(mailbox_admin, request, queryset): for mailbox in queryset.all(): mailbox.get_new_mail() get_new_mail.short_description = 'Get new mail' class MailboxAdmin(admin.ModelAdmin): list_display = ( 'name', 'uri', 'from_email', 'active', ) actions = [get_new_mail] class MessageAdmin(admin.ModelAdmin): list_display = ( 'subject', 'processed', 'mailbox', 'outgoing', ) ordering = ['-processed'] list_filter = ( 'mailbox', 'outgoing', ) raw_id_fields = ( 'in_reply_to', ) if getattr(settings, 'DJANGO_MAILBOX_ADMIN_ENABLED', True): admin.site.register(Message, MessageAdmin) admin.site.register(Mailbox, MailboxAdmin)
Python
0
48c880a35c899929da33f20e9cd4ee7e4fd8bc7e
Set a custom name template including the replica set
servers/mongo/data.py
servers/mongo/data.py
from .. import Server import logging class MongoDataNode(Server): log = logging.getLogger('Servers.MongoDataNode') log.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s [%(name)s] %(levelname)s: %(message)s', datefmt = '%H:%M:%S') ch.setFormatter(formatter) log.addHandler(ch) def __init__(self, dry = None, verbose = None, size = None, cluster = None, environment = None, ami = None, region = None, role = None, keypair = None, availability_zone = None, security_groups = None, block_devices = None, replica_set = None, replica_set_index = None): super(MongoDataNode, self).__init__(dry, verbose, size, cluster, environment, ami, region, role, keypair, availability_zone, security_groups, block_devices) self.replica_set = replica_set self.replica_set_index = replica_set_index def configure(self): super(MongoDataNode, self).configure() if self.replica_set is None: self.log.warn('No replica set provided') self.replica_set = 1 self.log.info('Using replica set {set}'.format(set=self.replica_set)) if self.replica_set_index is None: self.log.warn('No replica set set index provided') self.replica_set_index = 1 self.log.info('Using replica set index {index}'.format( index=self.replica_set_index)) @property def name(self): try: return self.unique_name except Exception: pass template = '{envcl}-rs{set}-{zone}-{index}' name = template.format(envcl=self.envcl, set=self.replica_set, zone=self.availability_zone[-1:], index=self.replica_set_index) self.unique_name = name self.log.info('Using node name {name}'.format(name=name)) return name
from .. import Server import logging class MongoDataNode(Server): log = logging.getLogger('Servers.MongoDataNode') log.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s [%(name)s] %(levelname)s: %(message)s', datefmt = '%H:%M:%S') ch.setFormatter(formatter) log.addHandler(ch) def __init__(self, dry = None, verbose = None, size = None, cluster = None, environment = None, ami = None, region = None, role = None, keypair = None, availability_zone = None, security_groups = None, block_devices = None, replica_set = None, replica_set_index = None): super(MongoDataNode, self).__init__(dry, verbose, size, cluster, environment, ami, region, role, keypair, availability_zone, security_groups, block_devices) self.replica_set = replica_set self.replica_set_index = replica_set_index def configure(self): super(MongoDataNode, self).configure() if self.replica_set is None: self.log.warn('No replica set provided') self.replica_set = 1 self.log.info('Using replica set {set}'.format(set=self.replica_set)) if self.replica_set_index is None: self.log.warn('No replica set set index provided') self.replica_set_index = 1 self.log.info('Using replica set index {index}'.format( index=self.replica_set_index))
Python
0
71289d3a22476001421454ff736ea03742e43158
Add basic parser
vumi_twilio_api/twilml_parser.py
vumi_twilio_api/twilml_parser.py
import xml.etree.ElementTree as ET class Verb(object): """Represents a single verb in TwilML. """ def __init__(self, verb, attributes={}, nouns={}): self.verb = verb self.attributes = attributes self.nouns = nouns class TwilMLParseError(Exception): """Raised when trying to parse invalid TwilML""" class TwilMLParser(object): """Parser for TwilML""" def parse_xml(self, xml): """Parses TwilML and returns a list of :class:`Verb` objects""" verbs = [] root = ET.fromstring(xml) if root.tag != "Response": raise TwilMLParseError( "Invalid root %r. Should be 'Request'." % root.tag) for child in root: parser = getattr( self, '_parse_%s' % child.tag, self._parse_default) verbs.append(parser(child)) return verbs def _parse_default(self, element): raise TwilMLParseError("Unable to parse verb %r" % element.tag)
class Verb(object): """Represents a single verb in TwilML. """ def __init__(self, verb, attributes={}, nouns={}): self.verb = verb self.attributes = attributes self.nouns = nouns
Python
0.000334
84642bab00aecbb061789fc9e8a5d5103e3e9e42
add getdict
panoramisk/message.py
panoramisk/message.py
from . import utils from urllib.parse import unquote class Message(utils.CaseInsensitiveDict): """Handle both Responses and Events with the same api: .. >>> resp = Message({'Response': 'Follows'}, 'Response body') >>> event = Message({'Event': 'MeetmeEnd', 'Meetme': '4242'}) Responses: .. code-block:: python >>> bool(resp.success) True >>> resp <Message Response='Follows' content='Response body'> >>> print(resp.content) Response body >>> for line in resp.iter_lines(): ... print(resp.content) Response body Events: .. code-block:: python >>> print(event['meetme']) 4242 >>> print(event.meetme) 4242 >>> event.unknown_header '' """ quoted_keys = ['result'] success_responses = ['Success', 'Follows', 'Goodbye'] def __init__(self, headers, content=''): super(Message, self).__init__(headers, content=content) self.manager = None @property def id(self): if 'commandid' in self: return self['commandid'] elif 'actionid' in self: return self['actionid'] return None @property def action_id(self): if 'actionid' in self: return self['actionid'] return None @property def success(self): """return True if a response status is Success or Follows: .. code-block:: python >>> resp = Message({'Response': 'Success'}) >>> print(resp.success) True >>> resp['Response'] = 'Failed' >>> resp.success False """ if 'event' in self: return True if self.response in self.success_responses: return True return False def __repr__(self): message = ' '.join(['%s=%r' % i for i in sorted(self.items())]) return '<Message {0}>'.format(message) def iter_lines(self): """Iter over response body""" for line in self.content.split('\n'): yield line def parsed_result(self): """Get parsed result of AGI command""" if 'Result' in self: return utils.parse_agi_result(self['Result']) else: raise ValueError('No result in %r' % self) def getdict(self, key): """Convert a multi values header to a case-insensitive dict: .. code-block:: python >>> resp = Message({ ... 'Response': 'Success', ... 'ChanVariable': [ ... 'FROM_DID=', 'SIPURI=sip:[email protected]:4242'], ... }) >>> print(resp.chanvariable) ['FROM_DID=', 'SIPURI=sip:[email protected]:4242'] >>> value = resp.getdict('chanvariable') >>> print(value['sipuri']) sip:[email protected]:4242 """ values = self.get(key, None) if not isinstance(values, list): raise TypeError("{0} must be a list. got {1}".format(key, values)) result = utils.CaseInsensitiveDict() for item in values: k, v = item.split('=', 1) result[k] = v return result @classmethod def from_line(cls, line): mlines = line.split(utils.EOL) headers = {} content = '' has_body = ('Response: Follows', 'Response: Fail') if mlines[0].startswith(has_body): content = mlines.pop() while not content and mlines: content = mlines.pop() for mline in mlines: if ': ' in mline: k, v = mline.split(': ', 1) if k.lower() in cls.quoted_keys: v = unquote(v).strip() if k in headers: o = headers.setdefault(k, []) if not isinstance(o, list): o = [o] o.append(v) headers[k] = o else: headers[k] = v if 'Event' in headers or 'Response' in headers: return cls(headers, content)
from . import utils from urllib.parse import unquote class Message(utils.CaseInsensitiveDict): """Handle both Responses and Events with the same api: .. >>> resp = Message({'Response': 'Follows'}, 'Response body') >>> event = Message({'Event': 'MeetmeEnd', 'Meetme': '4242'}) Responses: .. code-block:: python >>> bool(resp.success) True >>> resp <Message Response='Follows' content='Response body'> >>> print(resp.content) Response body >>> for line in resp.iter_lines(): ... print(resp.content) Response body Events: .. code-block:: python >>> print(event['meetme']) 4242 >>> print(event.meetme) 4242 >>> event.unknown_header '' """ quoted_keys = ['result'] success_responses = ['Success', 'Follows', 'Goodbye'] def __init__(self, headers, content=''): super(Message, self).__init__(headers, content=content) self.manager = None @property def id(self): if 'commandid' in self: return self['commandid'] elif 'actionid' in self: return self['actionid'] return None @property def action_id(self): if 'actionid' in self: return self['actionid'] return None @property def success(self): """return True if a response status is Success or Follows: .. code-block:: python >>> resp = Message({'Response': 'Success'}) >>> print(resp.success) True >>> resp['Response'] = 'Failed' >>> resp.success False """ if 'event' in self: return True if self.response in self.success_responses: return True return False def __repr__(self): message = ' '.join(['%s=%r' % i for i in sorted(self.items())]) return '<Message {0}>'.format(message) def iter_lines(self): """Iter over response body""" for line in self.content.split('\n'): yield line def parsed_result(self): """Get parsed result of AGI command""" if 'Result' in self: return utils.parse_agi_result(self['Result']) else: raise ValueError('No result in %r' % self) @classmethod def from_line(cls, line): mlines = line.split(utils.EOL) headers = {} content = '' has_body = ('Response: Follows', 'Response: Fail') if mlines[0].startswith(has_body): content = mlines.pop() while not content and mlines: content = mlines.pop() for mline in mlines: if ': ' in mline: k, v = mline.split(': ', 1) if k.lower() in cls.quoted_keys: v = unquote(v).strip() if k in headers: o = headers.setdefault(k, []) if not isinstance(o, list): o = [o] o.append(v) headers[k] = o else: headers[k] = v if 'Event' in headers or 'Response' in headers: return cls(headers, content)
Python
0.000001
948ce666053eee9fbdfd7f14e9f02e0aa6bdd18d
list[:limit] works fine if limit=None
djangofeeds/feedutil.py
djangofeeds/feedutil.py
from django.utils.text import truncate_html_words from djangofeeds import conf from datetime import datetime from djangofeeds.optimization import BeaconDetector import time from datetime import datetime, timedelta _beacon_detector = BeaconDetector() def entries_by_date(entries, limit=None): """Sort the feed entries by date :param entries: Entries given from :mod:`feedparser``. :param limit: Limit number of posts. """ now = datetime.now() def date_entry_tuple(entry, counter): """Find the most current date entry tuple.""" if "date_parsed" in entry: return (entry["date_parsed"].encode("utf-8"), entry) if "updated_parsed" in entry: return (entry["updated_parsed"].encode("utf-8"), entry) if "published_parsed" in entry: return (entry["published_parsed"].encode("utf-8"), entry) return (now - timedelta(seconds=(counter * 30)), entry) sortede_entries = [date_entry_tuple(entry, counter) for counter, entry in enumerate(entries)] sorted_entries.sort() sorted_entries.reverse() return [entry for _date, entry in sorted_entries[:limit]] def find_post_content(feed_obj, entry): """Find the correct content field for a post.""" try: content = entry["content"][0]["value"] except (IndexError, KeyError): content = entry.get("description") or entry.get("summary", "") try: #content = _beacon_detector.stripsafe(content) content = truncate_html_words(content, conf.DEFAULT_ENTRY_WORD_LIMIT) except UnicodeDecodeError: content = "" return content def date_to_datetime(field_name): """Given a post field, convert its :mod:`feedparser` date tuple to :class:`datetime.datetime` objects. :param field_name: The post field to use. """ def _parsed_date_to_datetime(feed_obj, entry): """generated below""" if field_name in entry: try: time_ = time.mktime(entry[field_name]) date = datetime.fromtimestamp(time_) except TypeError: date = datetime.now() return date return datetime.now() _parsed_date_to_datetime.__doc__ = \ """Convert %s to :class:`datetime.datetime` object""" % field_name return _parsed_date_to_datetime
from django.utils.text import truncate_html_words from djangofeeds import conf from datetime import datetime from djangofeeds.optimization import BeaconDetector import time from datetime import datetime, timedelta _beacon_detector = BeaconDetector() def entries_by_date(entries, limit=None): """Sort the feed entries by date :param entries: Entries given from :mod:`feedparser``. :param limit: Limit number of posts. """ now = datetime.now() def date_entry_tuple(entry, counter): """Find the most current date entry tuple.""" if "date_parsed" in entry: return (entry["date_parsed"].encode("utf-8"), entry) if "updated_parsed" in entry: return (entry["updated_parsed"].encode("utf-8"), entry) if "published_parsed" in entry: return (entry["published_parsed"].encode("utf-8"), entry) return (now - timedelta(seconds=(counter * 30)), entry) sortede_entries = [date_entry_tuple(entry, counter) for counter, entry in enumerate(entries)] sorted_entries.sort() sorted_entries.reverse() return [entry for (date, entry) in sorted_entries[slice(0, limit)]] def find_post_content(feed_obj, entry): """Find the correct content field for a post.""" try: content = entry["content"][0]["value"] except (IndexError, KeyError): content = entry.get("description") or entry.get("summary", "") try: #content = _beacon_detector.stripsafe(content) content = truncate_html_words(content, conf.DEFAULT_ENTRY_WORD_LIMIT) except UnicodeDecodeError: content = "" return content def date_to_datetime(field_name): """Given a post field, convert its :mod:`feedparser` date tuple to :class:`datetime.datetime` objects. :param field_name: The post field to use. """ def _parsed_date_to_datetime(feed_obj, entry): """generated below""" if field_name in entry: try: time_ = time.mktime(entry[field_name]) date = datetime.fromtimestamp(time_) except TypeError: date = datetime.now() return date return datetime.now() _parsed_date_to_datetime.__doc__ = \ """Convert %s to :class:`datetime.datetime` object""" % field_name return _parsed_date_to_datetime
Python
0.999999
a49095bf078603e046288629aa8497f031ed6bd3
Add transpose_join, joins 2 infinite lists by transposing the next elements
node/divide.py
node/divide.py
#!/usr/bin/env python from nodes import Node from type.type_infinite_list import DummyList class Divide(Node): char = "/" args = 2 results = 1 @Node.test_func([4, 2], [2]) @Node.test_func([2, 4], [0.5]) def func(self, a: Node.number, b: Node.number): """a/b. floating point division. For integer division, see `f`""" return a/b @Node.test_func(["test", "t"], [2]) @Node.test_func([(3, 1, 2, 1, 3), 3], [2]) def count(self, a: Node.indexable, b): """a.count(b)""" return a.count(b) @Node.test_func([[4, 4, 2, 2, 9, 9], [1, 2, 3]], [[[4], [4, 2], [2, 9, 9]]]) def split_length(self, inp: Node.indexable, lengths: Node.sequence): """Split inp into sections length lengths""" rtn = [[]] cur_length = 0 for i in inp: if cur_length != len(lengths) and len(rtn[-1]) == lengths[cur_length]: cur_length += 1 rtn.append([]) rtn[-1].append(i) return [rtn] def time_int_div(self, a: Node.clock, b: Node.number): return a.divide_int(b) def time_int_div_2(self, a: Node.number, b: Node.clock): return b.divide_int(a) def time_div(self, a: Node.clock, b: Node.clock): return b.divide_time(a) def transpose_inf_list(self, a: Node.infinite, b: Node.infinite): def transpose(): while 1: yield next(a) yield next(b) return DummyList(transpose())
#!/usr/bin/env python from nodes import Node class Divide(Node): """ Takes two items from the stack and divides them """ char = "/" args = 2 results = 1 @Node.test_func([4,2], [2]) @Node.test_func([2,4], [0.5]) def func(self, a: Node.number, b: Node.number): """a/b. floating point division. For integer division, see `f`""" return a/b @Node.test_func(["test", "t"], [2]) @Node.test_func([(3,1,2,1,3), 3], [2]) def count(self, a: Node.indexable, b): """a.count(b)""" return a.count(b) @Node.test_func([[4, 4, 2, 2, 9, 9], [1, 2, 3]], [[[4], [4, 2], [2, 9, 9]]]) def split_length(self, inp: Node.indexable, lengths: Node.sequence): """Split inp into sections length lengths""" rtn = [[]] cur_length = 0 for i in inp: if cur_length != len(lengths) and len(rtn[-1]) == lengths[cur_length]: cur_length += 1 rtn.append([]) rtn[-1].append(i) return [rtn] def time_int_div(self, a: Node.clock, b: Node.number): return a.divide_int(b) def time_int_div_2(self, a: Node.number, b: Node.clock): return b.divide_int(a) def time_div(self, a: Node.clock, b: Node.clock): return b.divide_time(a)
Python
0.000001
e869d59dddf6e574155a4c5307b184d46e145d7c
Delete Feeds/Posts and retry query if MultipleObjectsReturned
djangofeeds/managers.py
djangofeeds/managers.py
from django.db import models from django.db.models.query import QuerySet from djangofeeds.utils import truncate_field_data import sys DEFAULT_POST_LIMIT = 5 def update_with_dict(obj, fields): set_value = lambda (name, val): setattr(obj, name, val) map(set_value, fields.items()) obj.save() return obj class ExtendedQuerySet(QuerySet): def update_or_create(self, **kwargs): try: obj, created = self.get_or_create(**kwargs) except self.model.MultipleObjectsReturned: sys.stderr.write("djfeedsMultipleObjectsReturned: %s" % ( str(kwargs))) self.filter(**kwargs).delete() obj, created = self.get_or_create(**kwargs) if not created: fields = dict(kwargs.pop("defaults", {})) fields.update(kwargs) update_with_dict(obj, fields) return obj class ExtendedManager(models.Manager): def get_query_set(self): return ExtendedQuerySet(self.model) def update_or_create(self, **kwargs): return self.get_query_set().update_or_create(**kwargs) FeedManager = ExtendedManager CategoryManager = ExtendedManager EnclosureManager = ExtendedManager class PostManager(ExtendedManager): """Manager class for Posts""" def all_by_order(self, limit=DEFAULT_POST_LIMIT): ordering = self.model._meta.ordering return self.all().order_by(*ordering)[:limit] def update_post(self, feed_obj, **fields): fields = truncate_field_data(self.model, fields) if fields.get("guid"): # Unique on guid, feed post = self.update_or_create(guid=fields["guid"], feed=feed_obj, defaults=fields) else: # Unique on title, feed, date_published lookup_fields = dict(date_published=fields["date_published"], title=fields["title"], feed=feed_obj) try: return self.update_or_create(defaults=fields, **lookup_fields) except self.model.MultipleObjectsReturned: dupe = self._find_duplicate_post(lookup_fields, fields) if dupe: return update_with_dict(dupe, fields) else: return self.create(**fields) def _find_duplicate_post(self, lookup_fields, fields): # If any of these fields matches, it's a dupe. # Compare in order, because you want to compare short fields # before having to match the content. cmp_fields = ("author", "link", "content") range = self.filter(**lookup_fields).iterator() for possible in range: for field in cmp_fields: orig_attr = getattr(possible, field, None) this_attr = fields.get(field) if orig_attr == this_attr: return possible
from django.db import models from django.db.models.query import QuerySet from djangofeeds.utils import truncate_field_data DEFAULT_POST_LIMIT = 5 def update_with_dict(obj, fields): set_value = lambda (name, val): setattr(obj, name, val) map(set_value, fields.items()) obj.save() return obj class ExtendedQuerySet(QuerySet): def update_or_create(self, **kwargs): obj, created = self.get_or_create(**kwargs) if not created: fields = dict(kwargs.pop("defaults", {})) fields.update(kwargs) update_with_dict(obj, fields) return obj class ExtendedManager(models.Manager): def get_query_set(self): return ExtendedQuerySet(self.model) def update_or_create(self, **kwargs): return self.get_query_set().update_or_create(**kwargs) FeedManager = ExtendedManager CategoryManager = ExtendedManager EnclosureManager = ExtendedManager class PostManager(ExtendedManager): """Manager class for Posts""" def all_by_order(self, limit=DEFAULT_POST_LIMIT): ordering = self.model._meta.ordering return self.all().order_by(*ordering)[:limit] def update_post(self, feed_obj, **fields): fields = truncate_field_data(self.model, fields) if fields.get("guid"): # Unique on guid, feed post = self.update_or_create(guid=fields["guid"], feed=feed_obj, defaults=fields) else: # Unique on title, feed, date_published lookup_fields = dict(date_published=fields["date_published"], title=fields["title"], feed=feed_obj) try: return self.update_or_create(defaults=fields, **lookup_fields) except self.model.MultipleObjectsReturned: dupe = self._find_duplicate_post(lookup_fields, fields) if dupe: return update_with_dict(dupe, fields) else: return self.create(**fields) def _find_duplicate_post(self, lookup_fields, fields): # If any of these fields matches, it's a dupe. # Compare in order, because you want to compare short fields # before having to match the content. cmp_fields = ("author", "link", "content") range = self.filter(**lookup_fields).iterator() for possible in range: for field in cmp_fields: orig_attr = getattr(possible, field, None) this_attr = fields.get(field) if orig_attr == this_attr: return possible
Python
0
7c9a4b72f59d902ab5daa43b7675641a2e81ebb7
Switch to "templates" terminology for VM images/templates.
xblock_skytap/skytap.py
xblock_skytap/skytap.py
""" """ # Imports ########################################################### from __future__ import absolute_import import skytap as skytap_library from xblock.core import XBlock from xblock.fields import Scope, String from xblock.fragment import Fragment from xblockutils.resources import ResourceLoader from xblockutils.studio_editable import StudioEditableXBlockMixin from .default_data import DEFAULT_DATA from .utils import _ # Globals ########################################################### loader = ResourceLoader(__name__) # Functions ######################################################### def get_projects(): """ """ return ('Dummy project A', 'Dummy project B', 'Dummy project C') def get_templates(): """ """ return ('Dummy template A', 'Dummy template B', 'Dummy template C') def get_vms(): """ """ return ('Dummy VM A', 'Dummy VM B', 'Dummy VM C') def get_subscription_types(): """ """ return ('All', 'Dummy subscription A', 'Dummy subscription B', 'Dummy subscription C') # Classes ########################################################### class SkytapXBlock(StudioEditableXBlockMixin, XBlock): """ """ display_name = String( display_name=_("Title"), help=_("The title of this problem. Displayed to learners as a tooltip in the navigation bar."), scope=Scope.settings, default=_("Skytap XBlock"), ) project = String( display_name=_("Project"), help=_("Skytap project to pull templates from."), scope=Scope.settings, values=get_projects, ) templates = String( display_name=_("Templates"), help=_("List of templates belonging to this exercise environment."), scope=Scope.settings, values=get_templates, ) vms = String( display_name=_("VMs"), help=_("List of VMs to start for selected template."), scope=Scope.settings, values=get_vms, ) subscription_types = String( display_name=_("Subscription types"), help=_("List of subscription types that may access this exercise environment."), scope=Scope.settings, values=get_subscription_types, ) organization_rules = String( display_name=_("Organization rules"), help=_( "Rules that define custom behavior for specific organizations. " "To apply a rule to an organization, add one or more identifiers below the rule name." ), scope=Scope.settings, default=DEFAULT_DATA, multiline_editor=True, ) editable_fields = ("display_name", "project", "templates", "subscription_types", "organization_rules") def student_view(self, context): """ """ context = context.copy() if context else {} users = skytap_library.Users() context['users'] = users.json() fragment = Fragment() fragment.add_content(loader.render_template("templates/skytap.html", context)) fragment.add_javascript_url( self.runtime.local_resource_url(self, "public/js/src/skytap.js") ) fragment.initialize_js("SkytapXBlock") return fragment
""" """ # Imports ########################################################### from __future__ import absolute_import import skytap as skytap_library from xblock.core import XBlock from xblock.fields import Scope, String from xblock.fragment import Fragment from xblockutils.resources import ResourceLoader from xblockutils.studio_editable import StudioEditableXBlockMixin from .default_data import DEFAULT_DATA from .utils import _ # Globals ########################################################### loader = ResourceLoader(__name__) # Functions ######################################################### def get_projects(): """ """ return ('Dummy project A', 'Dummy project B', 'Dummy project C') def get_vm_images(): """ """ return ('Dummy image A', 'Dummy image B', 'Dummy image C') def get_vms(): """ """ return ('Dummy VM A', 'Dummy VM B', 'Dummy VM C') def get_subscription_types(): """ """ return ('All', 'Dummy subscription A', 'Dummy subscription B', 'Dummy subscription C') # Classes ########################################################### class SkytapXBlock(StudioEditableXBlockMixin, XBlock): """ """ display_name = String( display_name=_("Title"), help=_("The title of this problem. Displayed to learners as a tooltip in the navigation bar."), scope=Scope.settings, default=_("Skytap XBlock"), ) project = String( display_name=_("Project"), help=_("Skytap project to pull VM images/templates from."), scope=Scope.settings, values=get_projects, ) vm_images = String( display_name=_("VM images/Templates"), help=_("List of VM images/templates belonging to this exercise environment."), scope=Scope.settings, values=get_vm_images, ) vms = String( display_name=_("VMs"), help=_("List of VMs to start for selected template."), scope=Scope.settings, values=get_vms, ) subscription_types = String( display_name=_("Subscription types"), help=_("List of subscription types that may access this exercise environment."), scope=Scope.settings, values=get_subscription_types, ) organization_rules = String( display_name=_("Organization rules"), help=_( "Rules that define custom behavior for specific organizations. " "To apply a rule to an organization, add one or more identifiers below the rule name." ), scope=Scope.settings, default=DEFAULT_DATA, multiline_editor=True, ) editable_fields = ("display_name", "project", "vm_images", "subscription_types", "organization_rules") def student_view(self, context): """ """ context = context.copy() if context else {} users = skytap_library.Users() context['users'] = users.json() fragment = Fragment() fragment.add_content(loader.render_template("templates/skytap.html", context)) fragment.add_javascript_url( self.runtime.local_resource_url(self, "public/js/src/skytap.js") ) fragment.initialize_js("SkytapXBlock") return fragment
Python
0
87d792fda8763f49d83ce274015f3a436a0c89cc
send message after stuff is started
dusty/commands/run.py
dusty/commands/run.py
from ..compiler import (compose as compose_compiler, nginx as nginx_compiler, port_spec as port_spec_compiler, spec_assembler) from ..systems import compose, hosts, nginx, virtualbox def start_local_env(): """ This command will use the compilers to get compose specs will pass those specs to the systems that need them. Those systems will in turn launch the services needed to make the local environment go""" assembled_spec = spec_assembler.get_assembled_specs() port_spec = port_spec_compiler.get_port_spec_document(assembled_spec) nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec) compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec) hosts.update_hosts_file_from_port_spec(port_spec) virtualbox.update_virtualbox_port_forwarding_from_port_spec(port_spec) nginx.update_nginx_from_config(nginx_config) compose.update_running_containers_from_spec(compose_config) yield "Your local environment is now started"
from ..compiler import (compose as compose_compiler, nginx as nginx_compiler, port_spec as port_spec_compiler, spec_assembler) from ..systems import compose, hosts, nginx, virtualbox def start_local_env(): """ This command will use the compilers to get compose specs will pass those specs to the systems that need them. Those systems will in turn launch the services needed to make the local environment go""" assembled_spec = spec_assembler.get_assembled_specs() port_spec = port_spec_compiler.get_port_spec_document(assembled_spec) nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec) compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec) hosts.update_hosts_file_from_port_spec(port_spec) virtualbox.update_virtualbox_port_forwarding_from_port_spec(port_spec) nginx.update_nginx_from_config(nginx_config) compose.update_running_containers_from_spec(compose_config)
Python
0
7f2ac925b2343e57ad7f4a6d79ee24e14c8f4d78
Add a Bazel rule assignment_notebook().
exercises/defs.bzl
exercises/defs.bzl
# TODO(salikh): Implement the automatic tar rules too def assignment_notebook_macro( name, srcs, language = None, visibility = ["//visibility:private"]): """ Defines a rule for student notebook and autograder generation from a master notebook. Arguments: name: srcs: the file name of the input notebook should end in '-master.ipynb'. """ language_opt = "" if language: language_opt = " --language=" + language native.genrule( name = name + "_student", srcs = srcs, outs = [name + '-student.ipynb'], cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --preamble=$(location //exercises:preamble.py) --command=student""" + language_opt, tools = [ "//go/cmd/assign", "//exercises:preamble.py", ], ) autograder_output = name + '-autograder' native.genrule( name = name + "_autograder", srcs = srcs, outs = [autograder_output], cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --command=autograder""" + language_opt, tools = [ "//go/cmd/assign", ], ) def _assignment_notebook_impl(ctx): print("src = ", ctx.attr.src) print("src.path = ", ctx.file.src.path) outs = [] languages = ctx.attr.languages inputs = [ctx.file.src] preamble_opt = "" if ctx.file.preamble: preamble_opt = " --preamble='" + ctx.file.preamble.path + "'" inputs.append(ctx.file.preamble) if len(languages) == 0: # Force the language-agnostic notebook generation by default. languages = [""] for lang in languages: outfile = ctx.label.name + ("-" + lang if lang else "") + "-student.ipynb" out = ctx.actions.declare_file(outfile) outs.append(out) language_opt = "" if lang: language_opt = " -language='" + lang + "'" print(" command = " + ctx.executable._assign.path + " --command=student --input='" + ctx.file.src.path + "'" + " --output='" + out.path + "'" + language_opt + preamble_opt) ctx.actions.run_shell( inputs = inputs, outputs = [out], tools = [ctx.executable._assign], progress_message = "Running %s" % ctx.executable._assign.path, command = ctx.executable._assign.path + " --command=student --input='" + ctx.file.src.path + "'" + " --output='" + out.path + "'" + language_opt + preamble_opt, ) return [DefaultInfo(files = depset(outs))] # Defines a rule for student notebook and autograder # generation from a master notebook. # # Arguments: # name: assignment_notebook = rule( implementation = _assignment_notebook_impl, attrs = { # Specifies the list of languages to generate student notebooks. # If omitted, defaults to empty list, which means that a # single language-agnostic notebook will be generated. # It is also possible to generate language-agnostic notebook # (skipping filtering by language) by adding an empty string # value to languages. "languages": attr.string_list(default=[], mandatory=False), # The file name of the input notebook. "src": attr.label( mandatory=True, allow_single_file=True), # If present, specifies the label of the preamble file. "preamble": attr.label( default=None, mandatory=False, allow_single_file=True), "_assign": attr.label( default = Label("//go/cmd/assign"), allow_single_file = True, executable = True, cfg = "host", ), }, )
# TODO(salikh): Implement the automatic tar rules too def assignment_notebook_macro( name, srcs, language = None, visibility = ["//visibility:private"]): """ Defines a rule for student notebook and autograder generation from a master notebook. Arguments: name: srcs: the file name of the input notebook should end in '-master.ipynb'. """ language_opt = "" if language: language_opt = " --language=" + language native.genrule( name = name + "_student", srcs = srcs, outs = [name + '-student.ipynb'], cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --preamble=$(location //exercises:preamble.py) --command=student""" + language_opt, tools = [ "//go/cmd/assign", "//exercises:preamble.py", ], ) autograder_output = name + '-autograder' native.genrule( name = name + "_autograder", srcs = srcs, outs = [autograder_output], cmd = """$(location //go/cmd/assign) --input="$<" --output="$@" --command=autograder""" + language_opt, tools = [ "//go/cmd/assign", ], )
Python
0
fd92c0b2964bce5d56b9bf41e84bfde24fec0b78
raise default post limit to 25q
djangofeeds/managers.py
djangofeeds/managers.py
from datetime import timedelta, datetime from django.db import models from django.db.models.query import QuerySet from django.core.exceptions import MultipleObjectsReturned from djangofeeds.utils import truncate_field_data """ .. data:: DEFAULT_POST_LIMIT The default limit of number of posts to keep in a feed. Default is 5 posts. """ DEFAULT_POST_LIMIT = 25 def update_with_dict(obj, fields): """Update and save a model from the values of a :class:`dict`.""" set_value = lambda (name, val): setattr(obj, name, val) map(set_value, fields.items()) obj.save() return obj class ExtendedQuerySet(QuerySet): def update_or_create(self, **kwargs): obj, created = self.get_or_create(**kwargs) if not created: fields = dict(kwargs.pop("defaults", {})) fields.update(kwargs) update_with_dict(obj, fields) return obj def since(self, interval): """Return all the feeds refreshed since a specified amount of seconds.""" threshold = datetime.now() - timedelta(seconds=interval) return self.filter(date_last_refresh__lt=threshold) def ratio(self, min=None, max=None): """Select feeds based on ratio. :param min: Don't include feeds with a ratio lower than this. :param max: Don't include feeds with a ratio higher than this. """ query = {} if min is not None: query["ratio__gt"] = min if max is not None: query["ratio__lt"] = max return self.filter(**query) def frequency(self, min=None, max=None): """Select feeds based on update frequency. :param min: Don't include feeds with a frequency lower than this. :param max: Don't include feeds with a frequency higher than this. """ query = {} if min is not None: query["freq__gt"] = min if max is not None: query["freq__lt"] = max return self.filter(**query) class ExtendedManager(models.Manager): """Manager supporting :meth:`update_or_create`.""" def get_query_set(self): return ExtendedQuerySet(self.model) def update_or_create(self, **kwargs): return self.get_query_set().update_or_create(**kwargs) class FeedManager(ExtendedManager): """Manager for :class:`djangofeeds.models.Feed`.""" def since(self, interval): return self.get_query_set().since(interval) def ratio(self, *args, **kwargs): return self.get_query_set().ratio(*args, **kwargs) def frequency(self, *args, **kwargs): return self.get_query_set().frequency(*args, **kwargs) class PostManager(ExtendedManager): """Manager class for Posts""" def all_by_order(self, limit=DEFAULT_POST_LIMIT): """Get feeds using the default sort order.""" ordering = self.model._meta.ordering return self.all().order_by(*ordering)[:limit] def update_or_create(self, feed_obj, **fields): """Update post with new values.""" super_update = super(PostManager, self).update_or_create defaults = truncate_field_data(self.model, fields) try: return super_update(guid=fields["guid"], feed=feed_obj, defaults=defaults) except MultipleObjectsReturned: self.filter(guid=fields["guid"], feed=feed_obj).delete() super_update(guid=fields["guid"], feed=feed_obj, defaults=defaults) class CategoryManager(ExtendedManager): pass class EnclosureManager(ExtendedManager): pass
from datetime import timedelta, datetime from django.db import models from django.db.models.query import QuerySet from django.core.exceptions import MultipleObjectsReturned from djangofeeds.utils import truncate_field_data """ .. data:: DEFAULT_POST_LIMIT The default limit of number of posts to keep in a feed. Default is 5 posts. """ DEFAULT_POST_LIMIT = 5 def update_with_dict(obj, fields): """Update and save a model from the values of a :class:`dict`.""" set_value = lambda (name, val): setattr(obj, name, val) map(set_value, fields.items()) obj.save() return obj class ExtendedQuerySet(QuerySet): def update_or_create(self, **kwargs): obj, created = self.get_or_create(**kwargs) if not created: fields = dict(kwargs.pop("defaults", {})) fields.update(kwargs) update_with_dict(obj, fields) return obj def since(self, interval): """Return all the feeds refreshed since a specified amount of seconds.""" threshold = datetime.now() - timedelta(seconds=interval) return self.filter(date_last_refresh__lt=threshold) def ratio(self, min=None, max=None): """Select feeds based on ratio. :param min: Don't include feeds with a ratio lower than this. :param max: Don't include feeds with a ratio higher than this. """ query = {} if min is not None: query["ratio__gt"] = min if max is not None: query["ratio__lt"] = max return self.filter(**query) def frequency(self, min=None, max=None): """Select feeds based on update frequency. :param min: Don't include feeds with a frequency lower than this. :param max: Don't include feeds with a frequency higher than this. """ query = {} if min is not None: query["freq__gt"] = min if max is not None: query["freq__lt"] = max return self.filter(**query) class ExtendedManager(models.Manager): """Manager supporting :meth:`update_or_create`.""" def get_query_set(self): return ExtendedQuerySet(self.model) def update_or_create(self, **kwargs): return self.get_query_set().update_or_create(**kwargs) class FeedManager(ExtendedManager): """Manager for :class:`djangofeeds.models.Feed`.""" def since(self, interval): return self.get_query_set().since(interval) def ratio(self, *args, **kwargs): return self.get_query_set().ratio(*args, **kwargs) def frequency(self, *args, **kwargs): return self.get_query_set().frequency(*args, **kwargs) class PostManager(ExtendedManager): """Manager class for Posts""" def all_by_order(self, limit=DEFAULT_POST_LIMIT): """Get feeds using the default sort order.""" ordering = self.model._meta.ordering return self.all().order_by(*ordering)[:limit] def update_or_create(self, feed_obj, **fields): """Update post with new values.""" super_update = super(PostManager, self).update_or_create defaults = truncate_field_data(self.model, fields) try: return super_update(guid=fields["guid"], feed=feed_obj, defaults=defaults) except MultipleObjectsReturned: self.filter(guid=fields["guid"], feed=feed_obj).delete() super_update(guid=fields["guid"], feed=feed_obj, defaults=defaults) class CategoryManager(ExtendedManager): pass class EnclosureManager(ExtendedManager): pass
Python
0
f274f927d600989db1d485212d116166695e6edd
Use keyword arguments for readability
scell/core.py
scell/core.py
""" scell.core ~~~~~~~~~~ Provides abstractions over lower level APIs and file objects and their interests. """ from select import select as _select from collections import namedtuple def select(rl, wl, timeout=None): """ Returns the file objects ready for reading/writing from the read-list (*rl*) and write-list (*wl*), subject to *timeout* in seconds. :param rl: Objects interested in readability. :param wl: Objects interested in writability. :param timeout: Maximum blocking time in seconds, *None* for no timeout. """ if not (rl or wl): return [], [] readers, writers, _ = _select(rl, wl, (), timeout) return readers, writers class Monitored(namedtuple('_Monitored', 'fp,wants_read,wants_write,callback')): """ Represents the interests of a file handle *fp*, and whether it *wants_read* and or *wants_write*, as well as an attached *callback*. """ __slots__ = () class Event(namedtuple('_Event', 'monitored,readable,writable,fp,callback,ready')): """ Represents the readability or writability of a *monitored* file object. """ __slots__ = () def __new__(cls, monitored, readable, writable): ready = ( readable >= monitored.wants_read and writable >= monitored.wants_write ) return super(Event, cls).__new__( cls, monitored, readable, writable, fp=monitored.fp, callback=monitored.callback, ready=ready, )
""" scell.core ~~~~~~~~~~ Provides abstractions over lower level APIs and file objects and their interests. """ from select import select as _select from collections import namedtuple def select(rl, wl, timeout=None): """ Returns the file objects ready for reading/writing from the read-list (*rl*) and write-list (*wl*), subject to *timeout* in seconds. :param rl: Objects interested in readability. :param wl: Objects interested in writability. :param timeout: Maximum blocking time in seconds, *None* for no timeout. """ if not (rl or wl): return [], [] readers, writers, _ = _select(rl, wl, (), timeout) return readers, writers class Monitored(namedtuple('_Monitored', 'fp,wants_read,wants_write,callback')): """ Represents the interests of a file handle *fp*, and whether it *wants_read* and or *wants_write*, as well as an attached *callback*. """ __slots__ = () class Event(namedtuple('_Event', 'monitored,readable,writable,fp,callback,ready')): """ Represents the readability or writability of a *monitored* file object. """ __slots__ = () def __new__(cls, monitored, readable, writable): ready = ( readable >= monitored.wants_read and writable >= monitored.wants_write ) return super(Event, cls).__new__( cls, monitored, readable, writable, monitored.fp, monitored.callback, ready, )
Python
0.000001
e7cce08f32516bc8b15df7eee0c285eebe795cab
Make it easier to filter on multiple field values
explorer/search.py
explorer/search.py
from . import config from .document import Document import requests from time import time def perform_search(**params): response = requests.get( config.GOVUK_SEARCH_API, params=params, auth=config.AUTH, ) return response.json() def fetch_documents(scope): documents = perform_search(**fetch_document_args(scope)) facets = {} for field in Document.FACET_FIELDS: start = time() facet_results = perform_search(**fetch_facet_args(scope, field)) facets[field] = facet_results["facets"][field] print "Fetched %s facet in %fs" % (field, time() - start) return present_documents(documents, facets) def fetch_lots_of_documents(scope, max_documents): fetched = 0 search_args = fetch_document_args(scope) while fetched < max_documents: search_args["start"] = fetched documents = perform_search(**search_args).get("results", []) if len(documents) == 0: break for document in documents: yield Document(document) fetched += 1 def fetch_document_args(scope): args = scope.search_args() args["count"] = 1000 args["fields"] = ",".join(Document.DISPLAY_FIELDS) return args def fetch_facet_args(scope, facet_field): args = scope.search_args() args["count"] = 0 args["facet_" + facet_field] = "1000,scope:exclude_field_filter" return args def present_documents(documents, facets): return { "count": documents["total"], "documents": [Document(document) for document in documents["results"] ], "facets": facets, }
from . import config from .document import Document import requests from time import time def perform_search(**params): response = requests.get( config.GOVUK_SEARCH_API, params=params, auth=config.AUTH, ) return response.json() def fetch_documents(scope): documents = perform_search(**fetch_document_args(scope)) facets = {} for field in Document.FACET_FIELDS: start = time() facet_results = perform_search(**fetch_facet_args(scope, field)) facets[field] = facet_results["facets"][field] print "Fetched %s facet in %fs" % (field, time() - start) return present_documents(documents, facets) def fetch_lots_of_documents(scope, max_documents): fetched = 0 search_args = fetch_document_args(scope) while fetched < max_documents: search_args["start"] = fetched documents = perform_search(**search_args).get("results", []) if len(documents) == 0: break for document in documents: yield Document(document) fetched += 1 def fetch_document_args(scope): args = scope.search_args() args["count"] = 1000 args["fields"] = ",".join(Document.DISPLAY_FIELDS) return args def fetch_facet_args(scope, facet_field): args = scope.search_args() args["count"] = 0 args["facet_" + facet_field] = "1000,scope:all_filters" return args def present_documents(documents, facets): return { "count": documents["total"], "documents": [Document(document) for document in documents["results"] ], "facets": facets, }
Python
0.000001
10d0b7c452c8d9d5893cfe612e0beaa738f61628
Add to template builtins only if add_to_buitlins is available (Django <= 1.8)
easy_pjax/__init__.py
easy_pjax/__init__.py
#-*- coding: utf-8 -*- """ Register filter so it is available for use in the `extends` template tag (The `extends` tag must come first in a template, so regular `load` is not an option). """ from __future__ import absolute_import, division, print_function, unicode_literals __version__ = "1.2.0" has_add_to_builtins = True try: from django.template import add_to_builtins except ImportError: try: # import path changed in 1.8 from django.template.base import add_to_builtins except ImportError: has_add_to_builtins = False if has_add_to_builtins: add_to_builtins("easy_pjax.templatetags.pjax_tags")
#-*- coding: utf-8 -*- """ Register filter so it is available for use in the `extends` template tag (The `extends` tag must come first in a template, so regular `load` is not an option). """ from __future__ import absolute_import, division, print_function, unicode_literals __version__ = "1.2.0" try: from django.template import add_to_builtins except ImportError: # import path changed in 1.8 from django.template.base import add_to_builtins add_to_builtins("easy_pjax.templatetags.pjax_tags")
Python
0