code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def update_project_actions(self): if self.recent_projects: self.clear_recent_projects_action.setEnabled(True) else: self.clear_recent_projects_action.setEnabled(False) active = bool(self.get_active_project_path()) self.close_project_action.setEnabled(active) self.delete_project_action.setEnabled(active) self.edit_project_preferences_action.setEnabled(active)
Update actions of the Projects menu
def _check_consistent_units_orbitInput(self,orb): if self._roSet and orb._roSet: assert m.fabs(self._ro-orb._ro) < 10.**-10., 'Physical conversion for the actionAngle object is not consistent with that of the Orbit given to it' if self._voSet and orb._voSet: assert m.fabs(self._vo-orb._vo) < 10.**-10., 'Physical conversion for the actionAngle object is not consistent with that of the Orbit given to it' return None
Internal function to check that the set of units for this object is consistent with that for an input orbit
def euler_options(fn): euler_functions = cheat, generate, preview, skip, verify, verify_all for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn
Decorator to link CLI options with their appropriate functions
def ascent(self): total_ascent = 0.0 altitude_data = self.altitude_points() for i in range(len(altitude_data) - 1): diff = altitude_data[i+1] - altitude_data[i] if diff > 0.0: total_ascent += diff return total_ascent
Returns ascent of workout in meters
def _zom_index(lexer): tok = next(lexer) if isinstance(tok, COMMA): first = _expect_token(lexer, {IntegerToken}).value rest = _zom_index(lexer) return (first, ) + rest else: lexer.unpop_token(tok) return tuple()
Return zero or more indices.
def remove_outliers(series, stddev): return series[(series - series.mean()).abs() < stddev * series.std()]
Remove the outliers from a series.
def update_attribute(self, attr, value): update = [fapi._attr_up(attr, value)] r = fapi.update_workspace_attributes(self.namespace, self.name, update, self.api_url) fapi._check_response_code(r, 200)
Set the value of a workspace attribute.
def list_network_ip_availabilities(self, retrieve_all=True, **_params): return self.list('network_ip_availabilities', self.network_ip_availabilities_path, retrieve_all, **_params)
Fetches IP availibility information for all networks
def mod(x, y, context=None): return _apply_function_in_current_context( BigFloat, mpfr_mod, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
Return the remainder of x divided by y, with sign matching that of y.
def process_token(self, tok): if(tok[0].__str__() in ('Token.Comment.Multiline', 'Token.Comment', 'Token.Literal.String.Doc')): self.comments += tok[1].count('\n')+1 elif(tok[0].__str__() in ('Token.Comment.Single')): self.comments += 1 elif(self.contains_code and tok[0].__str__().startswith('Token.Text') and tok[1].count(u'\n')): self.contains_code = False self.sloc += 1 elif(tok[0].__str__() == 'Token.Comment.Preproc' and tok[1].count(u'\n')): self.contains_code = False self.sloc += 1 elif(tok[0][0] in token_types): self.contains_code = True
count comments and non-empty lines that contain code
def link_version(self, source, target): if not hasattr(target, VERSION_ID): logger.warn("No iniatial version found for '{}'" .format(repr(target))) return if not hasattr(source, REFERENCE_VERSIONS): source.reference_versions = {} target_uid = api.get_uid(target) source.reference_versions[target_uid] = target.version_id source._p_changed = 1
Link the current version of the target on the source
def _ascii_tree(self, indent: str, no_types: bool, val_count: bool) -> str: def suffix(sn): return f" {{{sn.val_count}}}\n" if val_count else "\n" if not self.children: return "" cs = [] for c in self.children: cs.extend(c._flatten()) cs.sort(key=lambda x: x.qual_name) res = "" for c in cs[:-1]: res += (indent + c._tree_line(no_types) + suffix(c) + c._ascii_tree(indent + "| ", no_types, val_count)) return (res + indent + cs[-1]._tree_line(no_types) + suffix(cs[-1]) + cs[-1]._ascii_tree(indent + " ", no_types, val_count))
Return the receiver's subtree as ASCII art.
def example_sync_client(api_client): try: pprint(api_client.echo()) except errors.RequestError as exc: log.exception('Exception occurred: %s', exc)
Example sync client use with.
def log_likelihood(C, T): C = C.tocsr() T = T.tocsr() ind = scipy.nonzero(C) relT = np.array(T[ind])[0, :] relT = np.log(relT) relC = np.array(C[ind])[0, :] return relT.dot(relC)
implementation of likelihood of C given T
def display(self, messages, sig="", debug=False): full_message = "".join( sig + line for line in " ".join( str(msg) for msg in messages ).splitlines(True) ) if not full_message: full_message = sig.rstrip() if debug: printerr(full_message) else: print(full_message)
Prints an iterator of messages.
def _flush_graph_val(self): if not self._graphvals2set: return delafter = {} for graph, key, branch, turn, tick, value in self._graphvals2set: if (graph, key, branch) in delafter: delafter[graph, key, branch] = min(( (turn, tick), delafter[graph, key, branch] )) else: delafter[graph, key, branch] = (turn, tick) self.sqlmany( 'del_graph_val_after', *((graph, key, branch, turn, turn, tick) for ((graph, key, branch), (turn, tick)) in delafter.items()) ) self.sqlmany('graph_val_insert', *self._graphvals2set) self._graphvals2set = []
Send all new and changed graph values to the database.
def _finalize(self): container = {} try: for name in self._traces: container[name] = self._traces[name]._trace container['_state_'] = self._state_ file = open(self.filename, 'w+b') std_pickle.dump(container, file) file.close() except AttributeError: pass
Dump traces using cPickle.
def _get_shipped_from(row): try: spans = row.find('div', {'id': 'coltextR2'}).find_all('span') if len(spans) < 2: return None return spans[1].string except AttributeError: return None
Get where package was shipped from.
def check_has_path(self, api): if not hasattr(api, 'path'): msg = 'The Api class "{}" lacks a `path` attribute.' return [msg.format(api.__name__)]
An API class must have a `path` attribute.
async def async_delete_all_keys(session, host, port, api_key, api_keys=[]): url = 'http://{}:{}/api/{}/config'.format(host, str(port), api_key) response = await async_request(session.get, url) api_keys.append(api_key) for key in response['whitelist'].keys(): if key not in api_keys: await async_delete_api_key(session, host, port, key)
Delete all API keys except for the ones provided to the method.
def make_random_key() -> Text: r = SystemRandom() allowed = \ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+/[]' return ''.join([r.choice(allowed) for _ in range(0, 50)])
Generates a secure random string
def OnHelpSize(self, event): size = event.GetSize() config["help_window_size"] = repr((size.width, size.height)) event.Skip()
Help window size event handler stores size in config
def calc_csd(self): CSDarray = np.array([]) CSDdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'CSD.h5')) f = h5py.File(fil) if i == 0: CSDarray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) CSDarray[i, ] = f['data'].value CSDdict.update({y : f['data'].value}) f.close() i += 1 return CSDdict, CSDarray.sum(axis=0)
Sum all the CSD contributions from every layer.
def recommend(self, client_data, limit, extra_data={}): guids = self._curated_wl.get_randomized_guid_sample(limit) results = [(guid, 1.0) for guid in guids] log_data = (client_data["client_id"], str(guids)) self.logger.info( "Curated recommendations client_id: [%s], guids: [%s]" % log_data ) return results
Curated recommendations are just random selections
def limit_roles(self): new_roles = {} roles = self.options.limit.split(",") for key, value in self.roles.iteritems(): for role in roles: role = role.strip() if key == role: new_roles[key] = value self.roles = new_roles
Limit the roles being scanned.
def _read_conf_file(path): log.debug('Reading configuration from %s', path) with salt.utils.files.fopen(path, 'r') as conf_file: try: conf_opts = salt.utils.yaml.safe_load(conf_file) or {} except salt.utils.yaml.YAMLError as err: message = 'Error parsing configuration file: {0} - {1}'.format(path, err) log.error(message) raise salt.exceptions.SaltConfigurationError(message) if not isinstance(conf_opts, dict): message = 'Error parsing configuration file: {0} - conf ' \ 'should be a document, not {1}.'.format(path, type(conf_opts)) log.error(message) raise salt.exceptions.SaltConfigurationError(message) if 'id' in conf_opts: if not isinstance(conf_opts['id'], six.string_types): conf_opts['id'] = six.text_type(conf_opts['id']) else: conf_opts['id'] = salt.utils.data.decode(conf_opts['id']) return conf_opts
Read in a config file from a given path and process it into a dictionary
def send_response_message(self, request_id, meta, body): self.response_messages.append((request_id, meta, body))
Add the response to the deque.
def cluster_del_slots(self, slot, *slots): slots = (slot,) + slots if not all(isinstance(s, int) for s in slots): raise TypeError("All parameters must be of type int") fut = self.execute(b'CLUSTER', b'DELSLOTS', *slots) return wait_ok(fut)
Set hash slots as unbound in receiving node.
def to_json(self, skip_nulls=True): return json.dumps(self.to_dict(skip_nulls=skip_nulls))
Convert object to a json string
async def processNodeInBox(self): while self.nodeInBox: m = self.nodeInBox.popleft() await self.process_one_node_message(m)
Process the messages in the node inbox asynchronously.
def complete_hosts(self, text, line, begidx, endidx): "Tab-complete 'creds' commands." commands = ["add", "remove", "dc"] mline = line.partition(' ')[2] offs = len(mline) - len(text) return [s[offs:] for s in commands if s.startswith(mline)]
Tab-complete 'creds' commands.
def _load_raster_text(self, raster_path): with open(raster_path, 'r') as f: self.rasterText = f.read() lines = self.rasterText.split('\n') for line in lines[0:6]: spline = line.split() if 'north' in spline[0].lower(): self.north = float(spline[1]) elif 'south' in spline[0].lower(): self.south = float(spline[1]) elif 'east' in spline[0].lower(): self.east = float(spline[1]) elif 'west' in spline[0].lower(): self.west = float(spline[1]) elif 'rows' in spline[0].lower(): self.rows = int(spline[1]) elif 'cols' in spline[0].lower(): self.columns = int(spline[1])
Loads grass ASCII to object
def _setuintbe(self, uintbe, length=None): if length is not None and length % 8 != 0: raise CreationError("Big-endian integers must be whole-byte. " "Length = {0} bits.", length) self._setuint(uintbe, length)
Set the bitstring to a big-endian unsigned int interpretation.
def nickmask(prefix: str, kwargs: Dict[str, Any]) -> None: if "!" in prefix and "@" in prefix: kwargs["nick"], remainder = prefix.split("!", 1) kwargs["user"], kwargs["host"] = remainder.split("@", 1) else: kwargs["host"] = prefix
store nick, user, host in kwargs if prefix is correct format
def shutdown(self): 'Close the hub connection' log.info("shutting down") self._peer.go_down(reconnect=False, expected=True)
Close the hub connection
def _get_proposed_values(self): momentum_bar = self.momentum + 0.5 * self.stepsize * self.grad_log_position position_bar = self.position + self.stepsize * momentum_bar grad_log, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf() momentum_bar = momentum_bar + 0.5 * self.stepsize * grad_log return position_bar, momentum_bar, grad_log
Method to perform time splitting using leapfrog
def _parse_spectra(self, line): if line in ['\n', '\r\n', '//\n', '//\r\n', '', '//']: self.start_spectra = False self.current_id_meta += 1 self.collect_meta = True return splist = line.split() if len(splist) > 2 and not self.ignore_additional_spectra_info: additional_info = ''.join(map(str, splist[2:len(splist)])) else: additional_info = '' srow = ( self.current_id_spectra, float(splist[0]), float(splist[1]), additional_info, self.current_id_meta) self.spectra_all.append(srow) self.current_id_spectra += 1
Parse and store the spectral details
def jpegrescan(ext_args): args = copy.copy(_JPEGRESCAN_ARGS) if Settings.jpegrescan_multithread: args += ['-t'] if Settings.destroy_metadata: args += ['-s'] args += [ext_args.old_filename, ext_args.new_filename] extern.run_ext(args) return _JPEG_FORMAT
Run the EXTERNAL program jpegrescan.
def sync_role_definitions(self): from superset import conf logging.info('Syncing role definition') self.create_custom_permissions() self.set_role('Admin', self.is_admin_pvm) self.set_role('Alpha', self.is_alpha_pvm) self.set_role('Gamma', self.is_gamma_pvm) self.set_role('granter', self.is_granter_pvm) self.set_role('sql_lab', self.is_sql_lab_pvm) if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False): self.set_role('Public', self.is_gamma_pvm) self.create_missing_perms() self.get_session.commit() self.clean_perms()
Inits the Superset application with security roles and such
def _family_notes_path(family, data_dir): data_dir = fix_data_dir(data_dir) family = family.lower() if not family in get_families(data_dir): raise RuntimeError("Family '{}' does not exist".format(family)) file_name = 'NOTES.' + family.lower() file_path = os.path.join(data_dir, file_name) return file_path
Form a path to the notes for a family
def _log_error(self, message): key = (self.feature_name, self.target.get('formula')) self.environment.log_feature_error(key, "ERROR: " + message)
Log an error for the feature
def _check_valgrind(xml_file): log(_("checking for valgrind errors...")) xml = ET.ElementTree(file=xml_file) reported = set() for error in xml.iterfind("error"): kind = error.find("kind").text what = error.find("xwhat/text" if kind.startswith("Leak_") else "what").text msg = ["\t", what] for frame in error.iterfind("stack/frame"): obj = frame.find("obj") if obj is not None and internal.run_dir in Path(obj.text).parents: file, line = frame.find("file"), frame.find("line") if file is not None and line is not None: msg.append(f": ({_('file')}: {file.text}, {_('line')}: {line.text})") break msg = "".join(msg) if msg not in reported: log(msg) reported.add(msg) if reported: raise Failure(_("valgrind tests failed; rerun with --log for more information."))
Log and report any errors encountered by valgrind.
def replication_factor(self, cluster='main'): if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) return int(self.config.get(cluster, 'replication_factor'))
Return the replication factor for a cluster as an integer.
def _get_a2(bbar, dbar, slip_moment, mmax): return ((dbar - bbar) / bbar) * (slip_moment / _scale_moment(mmax))
Returns the A2 value defined in II.4 of Table 2
def train_input_fn(params): file_pattern = os.path.join(getattr(params, "data_dir", ""), "*encoded-train*") return _read_and_batch_from_files( file_pattern, params.batch_size, params.max_length, params.num_cpu_cores, shuffle=True, repeat=params.repeat_dataset)
Load and return dataset of batched examples for use during training.
def nodes_ali(c_obj): ali_nodes = [] try: ali_nodes = c_obj.list_nodes() except BaseHTTPError as e: abort_err("\r HTTP Error with AliCloud: {}".format(e)) ali_nodes = adj_nodes_ali(ali_nodes) return ali_nodes
Get node objects from AliCloud.
def sorts_query(sortables): stmts = [] for sortable in sortables: if sortable.desc: stmts.append('{} DESC'.format(sortable.field)) else: stmts.append('{} ASC'.format(sortable.field)) return ' ORDER BY {}'.format(', '.join(stmts))
Turn the Sortables into a SQL ORDER BY query
def step(self, action): total_reward = 0.0 done = None for i in range(self._skip): obs, reward, done, info = self.env.step(action) if i == self._skip - 2: self._obs_buffer[0] = obs if i == self._skip - 1: self._obs_buffer[1] = obs total_reward += reward if done: break max_frame = self._obs_buffer.max(axis=0) return max_frame, total_reward, done, info
Repeat action, sum reward, and max over last observations.
def _add_fcp(self, fcp): try: LOG.info("fcp %s found in CONF.volume.fcp_list, add it to db" % fcp) self.db.new(fcp) except Exception: LOG.info("failed to add fcp %s into db", fcp)
add fcp to db if it's not in db but in fcp list and init it
def load_each(*loaders): def _load_each(metadata): return merge( loader(metadata) for loader in loaders ) return _load_each
Loader factory that combines a series of loaders.
def calc_bin(self, _bin=None): if _bin is None: try: _bin = bins.bins(self.start, self.end, one=True) except TypeError: _bin = None return _bin
Calculate the smallest UCSC genomic bin that will contain this feature.
def check_output(args, env=None, sp=subprocess): log.debug('calling %s with env %s', args, env) output = sp.check_output(args=args, env=env) log.debug('output: %r', output) return output
Call an external binary and return its stdout.
def add_host(host): p = new_prefix() p.prefix = str(host['ipaddr']) p.type = "host" p.description = host['description'] p.node = host['fqdn'] p.avps = {} if 'additional' in host: p.comment = host['additional'] if len(host['location']) > 0: p.avps['location'] = host['location'] if len(host['mac']) > 0: p.avps['mac'] = host['mac'] if len(host['phone']) > 0: p.avps['phone'] = host['phone'] if len(host['user']) > 0: p.avps['user'] = host['user'] return p
Put your host information in the prefix object.
def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None): self.StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=True)
Stream the entire download in chunks.
def put(self, path, data, **options): data, options = self._update_request(data, options) return self.request('put', path, data=data, **options)
Parses PUT request options and dispatches a request
def as_dict(self, replace_value_names=True): r = RootSectionTerm(doc=self) for s in self: for t in s: r.terms.append(t) return r.as_dict(replace_value_names)
Iterate, link terms and convert to a dict
def OnMouseMotion(self, event): grid = self.grid pos_x, pos_y = grid.CalcUnscrolledPosition(event.GetPosition()) row = grid.YToRow(pos_y) col = grid.XToCol(pos_x) tab = grid.current_table key = row, col, tab merge_area = self.grid.code_array.cell_attributes[key]["merge_area"] if merge_area is not None: top, left, bottom, right = merge_area row, col = top, left grid.actions.on_mouse_over((row, col, tab)) event.Skip()
Mouse motion event handler
def suggest(self, utility_function): if len(self._space) == 0: return self._space.array_to_params(self._space.random_sample()) with warnings.catch_warnings(): warnings.simplefilter("ignore") self._gp.fit(self._space.params, self._space.target) suggestion = acq_max( ac=utility_function.utility, gp=self._gp, y_max=self._space.target.max(), bounds=self._space.bounds, random_state=self._random_state ) return self._space.array_to_params(suggestion)
Most promissing point to probe next
def unit_vector(x): y = np.array(x, dtype='float') return y/norm(y)
Return a unit vector in the same direction as x.
def date_decoder(dic): if '__date__' in dic: try: d = datetime.date(**{c: v for c, v in dic.items() if not c == "__date__"}) except (TypeError, ValueError): raise json.JSONDecodeError("Corrupted date format !", str(dic), 1) elif '__datetime__' in dic: try: d = datetime.datetime(**{c: v for c, v in dic.items() if not c == "__datetime__"}) except (TypeError, ValueError): raise json.JSONDecodeError("Corrupted datetime format !", str(dic), 1) else: return dic return d
Add python types decoding. See JsonEncoder
def factory(codes, base=_Exception): if not issubclass(base, _Exception): raise FactoryException("Invalid class passed as parent: Must be a subclass of an Exception class created with this function", FactoryException.INVALID_EXCEPTION_CLASS, intended_parent=base) class Error(base): pass if isinstance(codes, (list, set, tuple, frozenset)): codes = {e: e for e in codes} if not isinstance(codes, dict): raise FactoryException("Factory codes must be a dict str -> object", FactoryException.INVALID_CODES_LIST, intended_codes=codes) for code, value in codes.items(): try: setattr(Error, code, value) except TypeError: raise FactoryException("Cannot set class attribute: (%r) -> (%r)" % (code, value), FactoryException.INVALID_CODE_VALUE, attribute=code, value=value) return Error
Creates a custom exception class with arbitrary error codes and arguments.
def find_root(self): node = self while node.parent is not None: node = node.parent return node
Finds the outermost context.
def check_failed_login(self): last_attempt = self.get_last_failed_access_attempt() if not last_attempt: user_access = self._FailedAccessAttemptModel(ip_address=self.ip) elif last_attempt: user_access = last_attempt if self.request.method == 'POST': if self.username is None: raise DobermanImproperlyConfigured( "Bad username form field, if you are using a custom field please configure: " "DOBERMAN_USERNAME_FORM_FIELD via settings." ) if self.response.status_code != 302: user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '<unknown user agent>')[:255] user_access.username = self.username user_access.failed_attempts += 1 user_access.params_get = self.request.GET user_access.params_post = self.request.POST if user_access.failed_attempts >= self.max_failed_attempts: user_access.is_locked = True user_access.save() elif self.response.status_code == 302 and not user_access.is_locked: user_access.is_expired = True user_access.save() return user_access
'Private method', check failed logins, it's used for wath_login decorator
def expected_part_size(self, part_number): last_part = self.multipart.last_part_number if part_number == last_part: return self.multipart.last_part_size elif part_number >= 0 and part_number < last_part: return self.multipart.chunk_size else: raise MultipartInvalidPartNumber()
Get expected part size for a particular part number.
def paint_pattern(self): x = 0 while x < self.width: y = 0 while y < self.height: self.paint_cube(x, y) y += self.cube_size x += self.cube_size
Paints all the cubes.
def rename_retract_ar_transition(portal): logger.info("Renaming 'retract_ar' transition to 'invalidate'") wf_tool = api.get_tool("portal_workflow") workflow = wf_tool.getWorkflowById("bika_ar_workflow") if "invalidate" not in workflow.transitions: workflow.transitions.addTransition("invalidate") transition = workflow.transitions.invalidate transition.setProperties( title="Invalidate", new_state_id="invalid", after_script_name="", actbox_name="Invalidate", ) guard = transition.guard or Guard() guard_props = {"guard_permissions": "BIKA: Retract", "guard_roles": "", "guard_expr": "python:here.guard_cancelled_object()"} guard.changeFromProperties(guard_props) transition.guard = guard for state in workflow.states.values(): if 'retract_ar' in state.transitions: trans = filter(lambda id: id != 'retract_ar', state.transitions) trans += ('invalidate', ) state.transitions = trans if "retract_ar" in workflow.transitions: workflow.transitions.deleteTransitions(["retract_ar"])
Renames retract_ar transition to invalidate
def default_select(identifier, all_entry_points): if len(all_entry_points) == 0: raise PluginMissingError(identifier) elif len(all_entry_points) == 1: return all_entry_points[0] elif len(all_entry_points) > 1: raise AmbiguousPluginError(all_entry_points)
Raise an exception when we have ambiguous entry points.
def run(self, ket: State) -> State: res = [op.run(ket) for op in self.operators] probs = [asarray(ket.norm()) * w for ket, w in zip(res, self.weights)] probs = np.asarray(probs) probs /= np.sum(probs) newket = np.random.choice(res, p=probs) return newket.normalize()
Apply the action of this Kraus quantum operation upon a state
def closeLog(self): self._logPtr.close() if self._namePtr: self._namePtr.close() self.log = 0
Closes the log file.
def random_point_triangle(triangle, use_int_coords=True): xs, ys = triangle.exterior.coords.xy A, B, C = zip(xs[:-1], ys[:-1]) r1, r2 = np.random.rand(), np.random.rand() rx, ry = (1 - sqrt(r1)) * np.asarray(A) + sqrt(r1) * (1 - r2) * np.asarray(B) + sqrt(r1) * r2 * np.asarray(C) if use_int_coords: rx, ry = round(rx), round(ry) return Point(int(rx), int(ry)) return Point(rx, ry)
Selects a random point in interior of a triangle
def api_related(self, query): url = "{0}/{1}/related/?format=json".format(self.base_url, query) response = requests.get(url, headers=self.headers, verify=self.verify_ssl) if response.status_code == 200: return response.json() else: self.error('Received status code: {0} from Soltra Server. Content:\n{1}'.format( response.status_code, response.text) )
Find related objects through SoltraEdge API
def load_settings_file(self, settings_file=None): if not settings_file: settings_file = self.get_json_or_yaml_settings() if not os.path.isfile(settings_file): raise ClickException("Please configure your zappa_settings file or call `zappa init`.") path, ext = os.path.splitext(settings_file) if ext == '.yml' or ext == '.yaml': with open(settings_file) as yaml_file: try: self.zappa_settings = yaml.load(yaml_file) except ValueError: raise ValueError("Unable to load the Zappa settings YAML. It may be malformed.") elif ext == '.toml': with open(settings_file) as toml_file: try: self.zappa_settings = toml.load(toml_file) except ValueError: raise ValueError("Unable to load the Zappa settings TOML. It may be malformed.") else: with open(settings_file) as json_file: try: self.zappa_settings = json.load(json_file) except ValueError: raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.")
Load our settings file.
def _ranging_attributes(attributes, param_class): next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes} in_first = attributes.difference(next_attributes) in_second = next_attributes.difference(attributes) if len(in_first) == 1 and len(in_second) == 1: for x in attributes: if {param_class.next_in_enumeration(x)} == in_second: return next(iter(in_first)), x return None, None
Checks if there is a continuous range
def dump(self): for modpath in sorted(self.map): title = 'Used by %s' % modpath print('\n' + title + '\n' + '-'*len(title)) for origin in sorted(self.get_used_origins(modpath)): print(' %s' % origin)
Prints out the contents of the usage map.
def use_comparative_asset_view(self): self._object_views['asset'] = COMPARATIVE for session in self._get_provider_sessions(): try: session.use_comparative_asset_view() except AttributeError: pass
Pass through to provider AssetLookupSession.use_comparative_asset_view
def pil_image3d(input, size=(800, 600), pcb_rotate=(0, 0, 0), timeout=20, showgui=False): f = tempfile.NamedTemporaryFile(suffix='.png', prefix='eagexp_') output = f.name export_image3d(input, output=output, size=size, pcb_rotate=pcb_rotate, timeout=timeout, showgui=showgui) im = Image.open(output) return im
same as export_image3d, but there is no output file, PIL object is returned instead
def _perspective_warp(c, magnitude:partial(uniform,size=8)=0, invert=False): "Apply warp of `magnitude` to `c`." magnitude = magnitude.view(4,2) targ_pts = [[x+m for x,m in zip(xs, ms)] for xs, ms in zip(_orig_pts, magnitude)] return _do_perspective_warp(c, targ_pts, invert)
Apply warp of `magnitude` to `c`.
def currentRepoTreeItemChanged(self): currentItem, currentIndex = self.getCurrentItem() hasCurrent = currentIndex.isValid() assert hasCurrent == (currentItem is not None), \ "If current idex is valid, currentIndex may not be None" if hasCurrent: logger.info("Adding rti to collector: {}".format(currentItem.nodePath)) self.collector.setRti(currentItem) self.currentItemActionGroup.setEnabled(hasCurrent) isTopLevel = hasCurrent and self.model().isTopLevelIndex(currentIndex) self.topLevelItemActionGroup.setEnabled(isTopLevel) self.openItemAction.setEnabled(currentItem is not None and currentItem.hasChildren() and not currentItem.isOpen) self.closeItemAction.setEnabled(currentItem is not None and currentItem.hasChildren() and currentItem.isOpen) logger.debug("Emitting sigRepoItemChanged: {}".format(currentItem)) self.sigRepoItemChanged.emit(currentItem)
Called to update the GUI when a repo tree item has changed or a new one was selected.
def cli(env, identifier): mgr = SoftLayer.ObjectStorageManager(env.client) credential_limit = mgr.limit_credential(identifier) table = formatting.Table(['limit']) table.add_row([ credential_limit, ]) env.fout(table)
Credential limits for this IBM Cloud Object Storage account.
def indent_func(input_): if isinstance(input_, six.string_types): lbl = input_ return _indent_decor(lbl) elif isinstance(input_, (bool, tuple)): func = input_ return func else: func = input_ lbl = '[' + meta_util_six.get_funcname(func) + ']' return _indent_decor(lbl)(func)
Takes either no arguments or an alias label
def conns(self, value: Set[str]) -> None: if not self._conns == value: old = self._conns self._conns = value ins = value - old outs = old - value logger.display("{}'s connections changed from {} to {}".format(self, old, value)) self._connsChanged(ins, outs)
Updates the connection count of this node if not already done.
def transform_using_this_method(original_sample): new_sample = original_sample.copy() new_data = new_sample.data new_data['Y2-A'] = log(new_data['Y2-A']) new_data = new_data.dropna() new_sample.data = new_data return new_sample
This function implements a log transformation on the data.
def view(grid): "Show a grid human-readably." p_mark, q_mark = player_marks(grid) return grid_format % tuple(p_mark if by_p else q_mark if by_q else '.' for by_p, by_q in zip(*map(player_bits, grid)))
Show a grid human-readably.
def getblockhash(self, index: int) -> str: return cast(str, self.api_fetch('getblockhash?index=' + str(index)))
Returns the hash of the block at ; index 0 is the genesis block.
def node_stat_copy(self, node_or_char, node=None): if node is None: node = node_or_char else: node = self._real.character[node_or_char].node[node] return { k: v.unwrap() if hasattr(v, 'unwrap') and not hasattr(v, 'no_unwrap') else v for (k, v) in node.items() if k not in { 'character', 'name', 'arrival_time', 'next_arrival_time' } }
Return a node's stats, prepared for pickling, in a dictionary.
def del_label(self, name): labels_tag = self.root[0] labels_tag.remove(self._find_label(name))
Delete a label by name.
def live_scores(self, live_scores): scores = sorted(live_scores, key=lambda x: x["league"]) for league, games in groupby(scores, key=lambda x: x["league"]): self.league_header(league) for game in games: self.scores(self.parse_result(game), add_new_line=False) click.secho(' %s' % Stdout.utc_to_local(game["time"], use_12_hour_format=False), fg=self.colors.TIME) click.echo()
Prints the live scores in a pretty format
def dispatch_request(self, *args, **kwargs): if self.validation: specs = {} attrs = flasgger.constants.OPTIONAL_FIELDS + [ 'parameters', 'definitions', 'responses', 'summary', 'description' ] for attr in attrs: specs[attr] = getattr(self, attr) definitions = {} specs.update(convert_schemas(specs, definitions)) specs['definitions'] = definitions flasgger.utils.validate( specs=specs, validation_function=self.validation_function) return super(SwaggerView, self).dispatch_request(*args, **kwargs)
If validation=True perform validation
def _extract_options(orig_script): first = (orig_script + '\n').splitlines()[0] match = _first_line_re().match(first) options = match.group(1) or '' if match else '' return options.strip()
Extract any options from the first line of the script.
def _displaystr2num(st): num = None for s, n in [('DFP-', 16), ('TV-', 8), ('CRT-', 0)]: if st.startswith(s): try: curnum = int(st[len(s):]) if 0 <= curnum <= 7: num = n + curnum break except Exception: pass if num is not None: return num else: raise ValueError('Unrecognised display name: ' + st)
Return a display number from a string
def link_head(self, node): assert not node.tail old_head = self.head if old_head: assert old_head.tail == self old_head.tail = node node.head = old_head node.tail = self self.head = node
Add a node to the head.
def handle_user(self, params): params = params.split(' ', 3) if len(params) != 4: raise IRCError.from_name( 'needmoreparams', 'USER :Not enough parameters') user, mode, unused, realname = params self.user = user self.mode = mode self.realname = realname return ''
Handle the USER command which identifies the user to the server.
def load_decorate(package): from acorn.logging.decoration import set_decorating, decorating origdecor = decorating set_decorating(True) import sys from importlib import import_module apack = import_module(package) from acorn.logging.decoration import decorate decorate(apack) sys.modules["acorn.{}".format(package)] = apack from acorn.logging.decoration import set_decorating set_decorating(origdecor) return apack
Imports and decorates the package with the specified name.
def format_datetime(time): user_time_zone = timezone.get_current_timezone() if time.tzinfo is None: time = time.replace(tzinfo=pytz.utc) user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT')) time = time.astimezone(user_time_zone) return time.strftime("%b %d, %Y %H:%M")
Formats a date, converting the time to the user timezone if one is specified
def difference_update(self, other): return self.client.sdiffstore(self.name, [self.name, other.name])
Remove all elements of another set from this set.
def _concat_same_dtype(self, to_concat, name): attribs = self._get_attributes_dict() attribs['name'] = name if len({str(x.dtype) for x in to_concat}) != 1: raise ValueError('to_concat must have the same tz') new_data = type(self._values)._concat_same_type(to_concat).asi8 is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1 if not is_period_dtype(self) and not is_diff_evenly_spaced: attribs['freq'] = None return self._simple_new(new_data, **attribs)
Concatenate to_concat which has the same class.
def parse_rss_bytes(data: bytes) -> RSSChannel: root = parse_xml(BytesIO(data)).getroot() return _parse_rss(root)
Parse an RSS feed from a byte-string containing XML data.
def _init_kws(self, **kws_usr): kws_self = {} user_keys = set(kws_usr) for objname, expset in self.exp_keys.items(): usrkeys_curr = user_keys.intersection(expset) kws_self[objname] = get_kwargs(kws_usr, usrkeys_curr, usrkeys_curr) dpi = str(kws_self['dag'].get('dpi', self.dflts['dpi'])) kws_self['dag']['dpi'] = dpi return kws_self
Return a dict containing user-specified plotting options.
def train_agent(real_env, learner, world_model_dir, hparams, epoch): initial_frame_chooser = rl_utils.make_initial_frame_chooser( real_env, hparams.frame_stack_size, hparams.simulation_random_starts, hparams.simulation_flip_first_random_for_beginning ) env_fn = rl.make_simulated_env_fn_from_hparams( real_env, hparams, batch_size=hparams.simulated_batch_size, initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir, sim_video_dir=os.path.join( learner.agent_model_dir, "sim_videos_{}".format(epoch) ) ) base_algo_str = hparams.base_algo train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) if hparams.wm_policy_param_sharing: train_hparams.optimizer_zero_grads = True rl_utils.update_hparams_from_hparams( train_hparams, hparams, base_algo_str + "_" ) final_epoch = hparams.epochs - 1 is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch is_final_epoch = epoch == final_epoch env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1 learner.train( env_fn, train_hparams, simulated=True, save_continuously=True, epoch=epoch, env_step_multiplier=env_step_multiplier )
Train the PPO agent in the simulated environment.
def _read_pyMatch(fn, precursors): with open(fn) as handle: reads = defaultdict(realign) for line in handle: query_name, seq, chrom, reference_start, end, mism, add = line.split() reference_start = int(reference_start) if query_name not in reads: reads[query_name].sequence = seq iso = isomir() iso.align = line iso.start = reference_start iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start) logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add)) if len(iso.subs) > 1: continue reads[query_name].set_precursor(chrom, iso) reads = _clean_hits(reads) return reads
read pyMatch file and perform realignment of hits